summaryrefslogtreecommitdiff
path: root/numpy/lib
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/lib')
-rw-r--r--numpy/lib/arraysetops.py1
-rw-r--r--numpy/lib/npyio.py4
-rw-r--r--numpy/lib/recfunctions.py84
-rw-r--r--numpy/lib/tests/test_io.py38
-rw-r--r--numpy/lib/tests/test_recfunctions.py4
5 files changed, 85 insertions, 46 deletions
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index b53d8c03f..f3f4bc17e 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -383,6 +383,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
To return the indices of the values common to the input arrays
along with the intersected values:
+
>>> x = np.array([1, 1, 2, 3, 4])
>>> y = np.array([2, 1, 4, 6])
>>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 318dc434a..b9dc444f8 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -521,7 +521,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
"""
own_fid = False
- if hasattr(file, 'read'):
+ if hasattr(file, 'write'):
fid = file
else:
file = os_fspath(file)
@@ -709,7 +709,7 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# component of the so-called standard library.
import zipfile
- if not hasattr(file, 'read'):
+ if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.npz'):
file = file + '.npz'
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index fabb509ab..6e257bb3f 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -26,10 +26,13 @@ _check_fill_value = np.ma.core._check_fill_value
__all__ = [
- 'append_fields', 'drop_fields', 'find_duplicates',
- 'get_fieldstructure', 'join_by', 'merge_arrays',
- 'rec_append_fields', 'rec_drop_fields', 'rec_join',
- 'recursive_fill_fields', 'rename_fields', 'stack_arrays',
+ 'append_fields', 'apply_along_fields', 'assign_fields_by_name',
+ 'drop_fields', 'find_duplicates', 'flatten_descr',
+ 'get_fieldstructure', 'get_names', 'get_names_flat',
+ 'join_by', 'merge_arrays', 'rec_append_fields',
+ 'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
+ 'rename_fields', 'repack_fields', 'require_fields',
+ 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
]
@@ -76,7 +79,7 @@ def recursive_fill_fields(input, output):
return output
-def get_fieldspec(dtype):
+def _get_fieldspec(dtype):
"""
Produce a list of name/dtype pairs corresponding to the dtype fields
@@ -91,7 +94,7 @@ def get_fieldspec(dtype):
>>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
>>> dt.descr
[(('a', 'A'), '<i8'), ('b', '<f8', (3,))]
- >>> get_fieldspec(dt)
+ >>> _get_fieldspec(dt)
[(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]
"""
@@ -204,12 +207,7 @@ def flatten_descr(ndtype):
return tuple(descr)
-def _zip_dtype_dispatcher(seqarrays, flatten=None):
- return seqarrays
-
-
-@array_function_dispatch(_zip_dtype_dispatcher)
-def zip_dtype(seqarrays, flatten=False):
+def _zip_dtype(seqarrays, flatten=False):
newdtype = []
if flatten:
for a in seqarrays:
@@ -219,14 +217,13 @@ def zip_dtype(seqarrays, flatten=False):
current = a.dtype
if current.names and len(current.names) <= 1:
# special case - dtypes of 0 or 1 field are flattened
- newdtype.extend(get_fieldspec(current))
+ newdtype.extend(_get_fieldspec(current))
else:
newdtype.append(('', current))
return np.dtype(newdtype)
-@array_function_dispatch(_zip_dtype_dispatcher)
-def zip_descr(seqarrays, flatten=False):
+def _zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
@@ -237,7 +234,7 @@ def zip_descr(seqarrays, flatten=False):
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
- return zip_dtype(seqarrays, flatten=flatten).descr
+ return _zip_dtype(seqarrays, flatten=flatten).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
@@ -318,12 +315,7 @@ def _izip_fields(iterable):
yield element
-def _izip_records_dispatcher(seqarrays, fill_value=None, flatten=None):
- return seqarrays
-
-
-@array_function_dispatch(_izip_records_dispatcher)
-def izip_records(seqarrays, fill_value=None, flatten=True):
+def _izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
@@ -445,7 +437,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
# Make sure we have named fields
if not seqdtype.names:
seqdtype = np.dtype([('', seqdtype)])
- if not flatten or zip_dtype((seqarrays,), flatten=True) == seqdtype:
+ if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Find what type of array we must return
@@ -468,7 +460,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
- newdtype = zip_dtype(seqarrays, flatten=flatten)
+ newdtype = _zip_dtype(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
@@ -496,9 +488,9 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
- data = tuple(izip_records(seqdata, flatten=flatten))
+ data = tuple(_izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
- mask=list(izip_records(seqmask, flatten=flatten)))
+ mask=list(_izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
@@ -516,7 +508,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
- output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
+ output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
@@ -743,7 +735,7 @@ def append_fields(base, names, data, dtypes=None,
#
output = ma.masked_all(
max(len(base), len(data)),
- dtype=get_fieldspec(base.dtype) + get_fieldspec(data.dtype))
+ dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
@@ -834,17 +826,18 @@ def repack_fields(a, align=False, recurse=False):
Examples
--------
+ >>> from numpy.lib import recfunctions as rfn
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
...
- >>> dt = np.dtype('u1,<i4,<f4', align=True)
+ >>> dt = np.dtype('u1, <i8, <f8', align=True)
>>> dt
dtype({'names':['f0','f1','f2'], 'formats':['u1','<i8','<f8'], 'offsets':[0,8,16], 'itemsize':24}, align=True)
>>> print_offsets(dt)
offsets: [0, 8, 16]
itemsize: 24
- >>> packed_dt = repack_fields(dt)
+ >>> packed_dt = rfn.repack_fields(dt)
>>> packed_dt
dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
>>> print_offsets(packed_dt)
@@ -932,12 +925,13 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
Examples
--------
+ >>> from numpy.lib import recfunctions as rfn
>>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a
array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
- >>> structured_to_unstructured(arr)
+ >>> rfn.structured_to_unstructured(a)
array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
@@ -945,7 +939,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
>>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
- >>> np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
+ >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
array([ 3. , 5.5, 9. , 11. ])
"""
@@ -982,6 +976,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
# finally is it safe to view the packed fields as the unstructured type
return arr.view((out_dtype, (sum(counts),)))
+
def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
align=None, copy=None, casting=None):
return (arr,)
@@ -1027,6 +1022,7 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
Examples
--------
+ >>> from numpy.lib import recfunctions as rfn
>>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a = np.arange(20).reshape((4,5))
>>> a
@@ -1034,7 +1030,7 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
- >>> unstructured_to_structured(a, dt)
+ >>> rfn.unstructured_to_structured(a, dt)
array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
@@ -1111,11 +1107,12 @@ def apply_along_fields(func, arr):
Examples
--------
+ >>> from numpy.lib import recfunctions as rfn
>>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
- >>> apply_along_fields(np.mean, b)
+ >>> rfn.apply_along_fields(np.mean, b)
array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
- >>> apply_along_fields(np.mean, b[['x', 'z']])
+ >>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
array([ 3. , 5.5, 9. , 11. ])
"""
@@ -1200,14 +1197,15 @@ def require_fields(array, required_dtype):
Examples
--------
+ >>> from numpy.lib import recfunctions as rfn
>>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
- >>> require_fields(a, [('b', 'f4'), ('c', 'u1')])
+ >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
array([(1., 1), (1., 1), (1., 1), (1., 1)],
dtype=[('b', '<f4'), ('c', 'u1')])
- >>> require_fields(a, [('b', 'f4'), ('newf', 'u1')])
+ >>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
array([(1., 0), (1., 0), (1., 0), (1., 0)],
dtype=[('b', '<f4'), ('newf', 'u1')])
-
+
"""
out = np.empty(array.shape, dtype=required_dtype)
assign_fields_by_name(out, array)
@@ -1270,10 +1268,10 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
- newdescr = get_fieldspec(dtype_l)
+ newdescr = _get_fieldspec(dtype_l)
names = [n for n, d in newdescr]
for dtype_n in ndtype[1:]:
- for fname, fdtype in get_fieldspec(dtype_n):
+ for fname, fdtype in _get_fieldspec(dtype_n):
if fname not in names:
newdescr.append((fname, fdtype))
names.append(fname)
@@ -1496,15 +1494,15 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
#
# Build the new description of the output array .......
# Start with the key fields
- ndtype = get_fieldspec(r1k.dtype)
+ ndtype = _get_fieldspec(r1k.dtype)
# Add the fields from r1
- for fname, fdtype in get_fieldspec(r1.dtype):
+ for fname, fdtype in _get_fieldspec(r1.dtype):
if fname not in key:
ndtype.append((fname, fdtype))
# Add the fields from r2
- for fname, fdtype in get_fieldspec(r2.dtype):
+ for fname, fdtype in _get_fieldspec(r2.dtype):
# Have we seen the current name already ?
# we need to rebuild this list every time
names = list(name for name, dtype in ndtype)
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 38d4c19fa..78f9f85f3 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -2472,6 +2472,44 @@ def test_gzip_load():
assert_array_equal(np.load(f), a)
+# These next two classes encode the minimal API needed to save()/load() arrays.
+# The `test_ducktyping` ensures they work correctly
+class JustWriter(object):
+ def __init__(self, base):
+ self.base = base
+
+ def write(self, s):
+ return self.base.write(s)
+
+ def flush(self):
+ return self.base.flush()
+
+class JustReader(object):
+ def __init__(self, base):
+ self.base = base
+
+ def read(self, n):
+ return self.base.read(n)
+
+ def seek(self, off, whence=0):
+ return self.base.seek(off, whence)
+
+
+def test_ducktyping():
+ a = np.random.random((5, 5))
+
+ s = BytesIO()
+ f = JustWriter(s)
+
+ np.save(f, a)
+ f.flush()
+ s.seek(0)
+
+ f = JustReader(s)
+ assert_array_equal(np.load(f), a)
+
+
+
def test_gzip_loadtxt():
# Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index f713fb64d..0126ccaf8 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -12,9 +12,11 @@ from numpy.lib.recfunctions import (
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
repack_fields, unstructured_to_structured, structured_to_unstructured,
apply_along_fields, require_fields, assign_fields_by_name)
+get_fieldspec = np.lib.recfunctions._get_fieldspec
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
-zip_descr = np.lib.recfunctions.zip_descr
+zip_descr = np.lib.recfunctions._zip_descr
+zip_dtype = np.lib.recfunctions._zip_dtype
class TestRecFunctions(object):