diff options
author | Seth Troisi <sethtroisi@google.com> | 2020-01-09 18:19:40 -0800 |
---|---|---|
committer | Seth Troisi <sethtroisi@google.com> | 2020-01-12 21:11:46 -0800 |
commit | addf86ba5ec6d0038993f00d782101f365ddeb6d (patch) | |
tree | ce567a2e63a080616b9995c0c19633778e25557e /numpy/lib/format.py | |
parent | b757fb34555d4c13e159ea4698608a2fc9624b92 (diff) | |
download | numpy-addf86ba5ec6d0038993f00d782101f365ddeb6d.tar.gz |
MAINT: cleanup sys.version dependant code
Diffstat (limited to 'numpy/lib/format.py')
-rw-r--r-- | numpy/lib/format.py | 7 |
1 files changed, 2 insertions, 5 deletions
diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 2ee43637c..15a74518b 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -549,9 +549,7 @@ def _filter_header(s): tokens = [] last_token_was_number = False - # adding newline as python 2.7.5 workaround - string = s + "\n" - for token in tokenize.generate_tokens(StringIO(string).readline): + for token in tokenize.generate_tokens(StringIO(s).readline): token_type = token[0] token_string = token[1] if (last_token_was_number and @@ -561,8 +559,7 @@ def _filter_header(s): else: tokens.append(token) last_token_was_number = (token_type == tokenize.NUMBER) - # removing newline (see above) as python 2.7.5 workaround - return tokenize.untokenize(tokens)[:-1] + return tokenize.untokenize(tokens) def _read_array_header(fp, version): |