From 228c5a07e04eda70074ce40b25512700f5168dc4 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 28 Dec 2014 08:45:17 -0500 Subject: Make source_encoding stricter about its arguments, and test it everywhere --- coverage/phystokens.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'coverage/phystokens.py') diff --git a/coverage/phystokens.py b/coverage/phystokens.py index bf55e8a3..95776251 100644 --- a/coverage/phystokens.py +++ b/coverage/phystokens.py @@ -148,11 +148,17 @@ generate_tokens = CachedTokenizer().generate_tokens def _source_encoding_py2(source): - """Determine the encoding for `source` (a string), according to PEP 263. + """Determine the encoding for `source`, according to PEP 263. - Returns a string, the name of the encoding. + Arguments: + source (byte string): the text of the program. + + Returns: + string: the name of the encoding. """ + assert isinstance(source, bytes) + # Do this so the detect_encode code we copied will work. readline = iter(source.splitlines(True)).next @@ -240,11 +246,16 @@ def _source_encoding_py2(source): def _source_encoding_py3(source): - """Determine the encoding for `source` (a string), according to PEP 263. + """Determine the encoding for `source`, according to PEP 263. + + Arguments: + source (byte string): the text of the program. - Returns a string, the name of the encoding. + Returns: + string: the name of the encoding. """ + assert isinstance(source, bytes) readline = iter(source.splitlines(True)).__next__ return tokenize.detect_encoding(readline)[0] -- cgit v1.2.1