diff options
author | Benjamin Peterson <benjamin@python.org> | 2009-03-24 22:30:15 +0000 |
---|---|---|
committer | Benjamin Peterson <benjamin@python.org> | 2009-03-24 22:30:15 +0000 |
commit | 9b8d24b17de49813eb53d6f9a4d615bfac574d11 (patch) | |
tree | a79e1cea434b4681bacf5f88225bd712b400d2e6 /Lib/tokenize.py | |
parent | a8abe863316b8f0bc92c9a490573dde67c7c81e6 (diff) | |
download | cpython-git-9b8d24b17de49813eb53d6f9a4d615bfac574d11.tar.gz |
reuse tokenize.detect_encoding in linecache instead of a custom solution
patch by Victor Stinner #4016
Diffstat (limited to 'Lib/tokenize.py')
-rw-r--r-- | Lib/tokenize.py | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/Lib/tokenize.py b/Lib/tokenize.py index 16c4f3f029..4ff859d9d0 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -27,7 +27,6 @@ __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' import re, string, sys from token import * from codecs import lookup, BOM_UTF8 -from itertools import chain, repeat cookie_re = re.compile("coding[:=]\s*([-\w.]+)") import token @@ -327,13 +326,15 @@ def tokenize(readline): which tells you which encoding was used to decode the bytes stream. """ encoding, consumed = detect_encoding(readline) - def readline_generator(): + def readline_generator(consumed): + for line in consumed: + yield line while True: try: yield readline() except StopIteration: return - chained = chain(consumed, readline_generator()) + chained = readline_generator(consumed) return _tokenize(chained.__next__, encoding) |