summaryrefslogtreecommitdiff
path: root/numpy/lib/format.py
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/lib/format.py')
-rw-r--r--numpy/lib/format.py9
1 files changed, 5 insertions, 4 deletions
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 7c8dfbafa..67da0d6d1 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -35,7 +35,7 @@ Capabilities
- Is straightforward to reverse engineer. Datasets often live longer than
the programs that created them. A competent developer should be
- able to create a solution in his preferred programming language to
+ able to create a solution in their preferred programming language to
read most ``.npy`` files that he has been given without much
documentation.
@@ -298,7 +298,8 @@ def _write_array_header(fp, d, version=None):
# can take advantage of our premature optimization.
current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline
topad = 16 - (current_header_len % 16)
- header = asbytes(header + ' '*topad + '\n')
+ header = header + ' '*topad + '\n'
+ header = asbytes(_filter_header(header))
if len(header) >= (256*256) and version == (1, 0):
raise ValueError("header does not fit inside %s bytes required by the"
@@ -433,7 +434,7 @@ def _filter_header(s):
from io import StringIO
else:
from StringIO import StringIO
-
+
tokens = []
last_token_was_number = False
for token in tokenize.generate_tokens(StringIO(asstr(s)).read):
@@ -448,7 +449,7 @@ def _filter_header(s):
last_token_was_number = (token_type == tokenize.NUMBER)
return tokenize.untokenize(tokens)
-
+
def _read_array_header(fp, version):
"""
see read_array_header_1_0