diff options
| -rw-r--r-- | Lib/idlelib/IOBinding.py | 16 | ||||
| -rw-r--r-- | Lib/lib2to3/pgen2/tokenize.py | 9 | ||||
| -rw-r--r-- | Lib/lib2to3/tests/data/false_encoding.py | 2 | ||||
| -rw-r--r-- | Lib/lib2to3/tests/test_refactor.py | 4 | ||||
| -rw-r--r-- | Lib/test/test_importlib/source/test_source_encoding.py | 6 | ||||
| -rw-r--r-- | Lib/test/test_tokenize.py | 7 | ||||
| -rw-r--r-- | Lib/tokenize.py | 8 | ||||
| -rw-r--r-- | Misc/NEWS | 8 | ||||
| -rwxr-xr-x | Tools/scripts/findnocoding.py | 6 | 
9 files changed, 44 insertions, 22 deletions
diff --git a/Lib/idlelib/IOBinding.py b/Lib/idlelib/IOBinding.py index 4558ae6c37..cba80483a6 100644 --- a/Lib/idlelib/IOBinding.py +++ b/Lib/idlelib/IOBinding.py @@ -63,7 +63,7 @@ locale_encoding = locale_encoding.lower()  encoding = locale_encoding  ### KBK 07Sep07  This is used all over IDLE, check!                              ### 'encoding' is used below in encode(), check! -coding_re = re.compile("coding[:=]\s*([-\w_.]+)") +coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)  def coding_spec(data):      """Return the encoding declaration according to PEP 263. @@ -84,14 +84,16 @@ def coding_spec(data):          lines = data      # consider only the first two lines      if '\n' in lines: -        lst = lines.split('\n')[:2] +        lst = lines.split('\n', 2)[:2]      elif '\r' in lines: -        lst = lines.split('\r')[:2] +        lst = lines.split('\r', 2)[:2] +    else: +        lst = [lines] +    for line in lst: +        match = coding_re.match(line) +        if match is not None: +            break      else: -        lst = list(lines) -    str = '\n'.join(lst) -    match = coding_re.search(str) -    if not match:          return None      name = match.group(1)      try: diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py index 31e29698e6..83656fc19f 100644 --- a/Lib/lib2to3/pgen2/tokenize.py +++ b/Lib/lib2to3/pgen2/tokenize.py @@ -236,7 +236,7 @@ class Untokenizer:                  startline = False              toks_append(tokval) -cookie_re = re.compile("coding[:=]\s*([-\w.]+)") +cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)  def _get_normal_name(orig_enc):      """Imitates get_normal_name in tokenizer.c.""" @@ -281,11 +281,10 @@ def detect_encoding(readline):              line_string = line.decode('ascii')          except UnicodeDecodeError:              return None - -        matches = cookie_re.findall(line_string) -        if not matches: +        match = cookie_re.match(line_string) +        if not match:              return None -        encoding = _get_normal_name(matches[0]) +        encoding = _get_normal_name(match.group(1))          try:              codec = lookup(encoding)          except LookupError: diff --git a/Lib/lib2to3/tests/data/false_encoding.py b/Lib/lib2to3/tests/data/false_encoding.py new file mode 100644 index 0000000000..f4e59e787d --- /dev/null +++ b/Lib/lib2to3/tests/data/false_encoding.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python +print '#coding=0' diff --git a/Lib/lib2to3/tests/test_refactor.py b/Lib/lib2to3/tests/test_refactor.py index 8bdebc1f3d..5ecd9b1cb3 100644 --- a/Lib/lib2to3/tests/test_refactor.py +++ b/Lib/lib2to3/tests/test_refactor.py @@ -271,6 +271,10 @@ from __future__ import print_function"""          fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")          self.check_file_refactoring(fn) +    def test_false_file_encoding(self): +        fn = os.path.join(TEST_DATA_DIR, "false_encoding.py") +        data = self.check_file_refactoring(fn) +      def test_bom(self):          fn = os.path.join(TEST_DATA_DIR, "bom.py")          data = self.check_file_refactoring(fn) diff --git a/Lib/test/test_importlib/source/test_source_encoding.py b/Lib/test/test_importlib/source/test_source_encoding.py index 0ca5195439..ba02b44274 100644 --- a/Lib/test/test_importlib/source/test_source_encoding.py +++ b/Lib/test/test_importlib/source/test_source_encoding.py @@ -10,7 +10,7 @@ import unicodedata  import unittest -CODING_RE = re.compile(r'coding[:=]\s*([-\w.]+)') +CODING_RE = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)  class EncodingTest(unittest.TestCase): @@ -41,7 +41,7 @@ class EncodingTest(unittest.TestCase):      def create_source(self, encoding):          encoding_line = "# coding={0}".format(encoding) -        assert CODING_RE.search(encoding_line) +        assert CODING_RE.match(encoding_line)          source_lines = [encoding_line.encode('utf-8')]          source_lines.append(self.source_line.encode(encoding))          return b'\n'.join(source_lines) @@ -50,7 +50,7 @@ class EncodingTest(unittest.TestCase):          # Make sure that an encoding that has never been a standard one for          # Python works.          encoding_line = "# coding=koi8-r" -        assert CODING_RE.search(encoding_line) +        assert CODING_RE.match(encoding_line)          source = "{0}\na=42\n".format(encoding_line).encode("koi8-r")          self.run_test(source) diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index b4a58f0db2..17650855eb 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -946,6 +946,13 @@ class TestDetectEncoding(TestCase):          readline = self.get_readline((b'# coding: bad\n',))          self.assertRaises(SyntaxError, detect_encoding, readline) +    def test_false_encoding(self): +        # Issue 18873: "Encoding" detected in non-comment lines +        readline = self.get_readline((b'print("#coding=fake")',)) +        encoding, consumed_lines = detect_encoding(readline) +        self.assertEqual(encoding, 'utf-8') +        self.assertEqual(consumed_lines, [b'print("#coding=fake")']) +      def test_open(self):          filename = support.TESTFN + '.py'          self.addCleanup(support.unlink, filename) diff --git a/Lib/tokenize.py b/Lib/tokenize.py index cbf91ef222..f1e61d8ad5 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -31,7 +31,7 @@ from token import *  from codecs import lookup, BOM_UTF8  import collections  from io import TextIOWrapper -cookie_re = re.compile("coding[:=]\s*([-\w.]+)") +cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)  import token  __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", @@ -372,10 +372,10 @@ def detect_encoding(readline):                  msg = '{} for {!r}'.format(msg, filename)              raise SyntaxError(msg) -        matches = cookie_re.findall(line_string) -        if not matches: +        match = cookie_re.match(line_string) +        if not match:              return None -        encoding = _get_normal_name(matches[0]) +        encoding = _get_normal_name(match.group(1))          try:              codec = lookup(encoding)          except LookupError: @@ -68,6 +68,8 @@ Core and Builtins  Library  ------- +- Issue #18873: The tokenize module now detects Python source code encoding +  only in comment lines.  - Issue #17324: Fix http.server's request handling case on trailing '/'. Patch    contributed by Vajrasky Kok. @@ -304,6 +306,9 @@ C API  IDLE  ---- +- Issue #18873: IDLE now detects Python source code encoding only in comment +  lines. +  - Issue #18988: The "Tab" key now works when a word is already autocompleted.  - Issue #18489: Add tests for SearchEngine. Original patch by Phil Webster. @@ -430,6 +435,9 @@ Documentation  Tools/Demos  ----------- +- Issue #18873: 2to3 and the findnocoding.py script now detect Python source +  code encoding only in comment lines. +  - Issue #18817: Fix a resource warning in Lib/aifc.py demo.  - Issue #18439: Make patchcheck work on Windows for ACKS, NEWS. diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py index b3e9dc7361..c0997d6598 100755 --- a/Tools/scripts/findnocoding.py +++ b/Tools/scripts/findnocoding.py @@ -32,13 +32,13 @@ except ImportError:                           "no sophisticated Python source file search will be done.", file=sys.stderr) -decl_re = re.compile(rb"coding[=:]\s*([-\w.]+)") +decl_re = re.compile(rb'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')  def get_declaration(line): -    match = decl_re.search(line) +    match = decl_re.match(line)      if match:          return match.group(1) -    return '' +    return b''  def has_correct_encoding(text, codec):      try:  | 
