diff options
author | Ned Batchelder <ned@nedbatchelder.com> | 2014-12-28 09:03:39 -0500 |
---|---|---|
committer | Ned Batchelder <ned@nedbatchelder.com> | 2014-12-28 09:03:39 -0500 |
commit | 98a7b99ed97af0f5dfc6fc7f5219ad4b026a6dfc (patch) | |
tree | 48a18f35dbf2805f5c7f9952859d5014fb63a16b | |
parent | 228c5a07e04eda70074ce40b25512700f5168dc4 (diff) | |
download | python-coveragepy-git-98a7b99ed97af0f5dfc6fc7f5219ad4b026a6dfc.tar.gz |
Move next/__next__ distinction into backward.py
-rw-r--r-- | coverage/backward.py | 12 | ||||
-rw-r--r-- | coverage/phystokens.py | 12 |
2 files changed, 17 insertions, 7 deletions
diff --git a/coverage/backward.py b/coverage/backward.py index f9402f41..cb62638a 100644 --- a/coverage/backward.py +++ b/coverage/backward.py @@ -52,6 +52,18 @@ else: """Produce the items from dict `d`.""" return d.iteritems() +# Getting the `next` function from an iterator is different in 2 and 3. +try: + iter([]).next +except AttributeError: + def iternext(seq): + """Get the `next` function for iterating over `seq`.""" + return iter(seq).__next__ +else: + def iternext(seq): + """Get the `next` function for iterating over `seq`.""" + return iter(seq).next + # Python 3.x is picky about bytes and strings, so provide methods to # get them right, and make them no-ops in 2.x if sys.version_info >= (3, 0): diff --git a/coverage/phystokens.py b/coverage/phystokens.py index 95776251..f3f633dd 100644 --- a/coverage/phystokens.py +++ b/coverage/phystokens.py @@ -7,6 +7,8 @@ import sys import token import tokenize +from coverage.backward import iternext + def phys_tokens(toks): """Return all physical tokens, even line continuations. @@ -135,11 +137,7 @@ class CachedTokenizer(object): # warnings. if type(text) != type(self.last_text) or text != self.last_text: self.last_text = text - line_iter = iter(text.splitlines(True)) - try: - readline = line_iter.next - except AttributeError: - readline = line_iter.__next__ + readline = iternext(text.splitlines(True)) self.last_tokens = list(tokenize.generate_tokens(readline)) return self.last_tokens @@ -160,7 +158,7 @@ def _source_encoding_py2(source): assert isinstance(source, bytes) # Do this so the detect_encode code we copied will work. - readline = iter(source.splitlines(True)).next + readline = iternext(source.splitlines(True)) # This is mostly code adapted from Py3.2's tokenize module. @@ -256,7 +254,7 @@ def _source_encoding_py3(source): """ assert isinstance(source, bytes) - readline = iter(source.splitlines(True)).__next__ + readline = iternext(source.splitlines(True)) return tokenize.detect_encoding(readline)[0] |