summaryrefslogtreecommitdiff
path: root/numpy/lib
diff options
context:
space:
mode:
authorEric Wieser <wieser.eric@gmail.com>2018-05-25 23:07:21 -0700
committerGitHub <noreply@github.com>2018-05-25 23:07:21 -0700
commita10b4270d4b3f538254698874560d645c0525dc5 (patch)
treee8ff62b91d0477b56042b879d489e59ba807fed7 /numpy/lib
parent0addc016ba7000b27509663f4f489c6eb1838056 (diff)
parentc1fc882277bcec42e11f67c6eced43d68cec4d7a (diff)
downloadnumpy-a10b4270d4b3f538254698874560d645c0525dc5.tar.gz
Merge branch 'master' into force-tuple
Diffstat (limited to 'numpy/lib')
-rw-r--r--numpy/lib/__init__.py6
-rw-r--r--numpy/lib/_version.py2
-rw-r--r--numpy/lib/arraypad.py289
-rw-r--r--numpy/lib/arraysetops.py65
-rw-r--r--numpy/lib/format.py13
-rw-r--r--numpy/lib/function_base.py251
-rw-r--r--numpy/lib/histograms.py383
-rw-r--r--numpy/lib/mixins.py6
-rw-r--r--numpy/lib/nanfunctions.py125
-rw-r--r--numpy/lib/npyio.py68
-rw-r--r--numpy/lib/polynomial.py5
-rw-r--r--numpy/lib/scimath.py2
-rw-r--r--numpy/lib/stride_tricks.py7
-rw-r--r--numpy/lib/tests/test__datasource.py8
-rw-r--r--numpy/lib/tests/test__iotools.py5
-rw-r--r--numpy/lib/tests/test__version.py6
-rw-r--r--numpy/lib/tests/test_arraypad.py17
-rw-r--r--numpy/lib/tests/test_arraysetops.py51
-rw-r--r--numpy/lib/tests/test_arrayterator.py4
-rw-r--r--numpy/lib/tests/test_financial.py9
-rw-r--r--numpy/lib/tests/test_format.py19
-rw-r--r--numpy/lib/tests/test_function_base.py121
-rw-r--r--numpy/lib/tests/test_histograms.py88
-rw-r--r--numpy/lib/tests/test_index_tricks.py176
-rw-r--r--numpy/lib/tests/test_io.py123
-rw-r--r--numpy/lib/tests/test_mixins.py8
-rw-r--r--numpy/lib/tests/test_nanfunctions.py145
-rw-r--r--numpy/lib/tests/test_packbits.py8
-rw-r--r--numpy/lib/tests/test_polynomial.py44
-rw-r--r--numpy/lib/tests/test_recfunctions.py11
-rw-r--r--numpy/lib/tests/test_regression.py8
-rw-r--r--numpy/lib/tests/test_shape_base.py33
-rw-r--r--numpy/lib/tests/test_stride_tricks.py11
-rw-r--r--numpy/lib/tests/test_twodim_base.py69
-rw-r--r--numpy/lib/tests/test_type_check.py6
-rw-r--r--numpy/lib/tests/test_ufunclike.py5
-rw-r--r--numpy/lib/tests/test_utils.py12
-rw-r--r--numpy/lib/twodim_base.py2
38 files changed, 1306 insertions, 905 deletions
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index cc05232a2..d764cdc7e 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -46,6 +46,6 @@ __all__ += financial.__all__
__all__ += nanfunctions.__all__
__all__ += histograms.__all__
-from numpy.testing import _numpy_tester
-test = _numpy_tester().test
-bench = _numpy_tester().bench
+from numpy.testing._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py
index 0019c5607..c3563a7fa 100644
--- a/numpy/lib/_version.py
+++ b/numpy/lib/_version.py
@@ -45,7 +45,7 @@ class NumpyVersion():
Examples
--------
>>> from numpy.lib import NumpyVersion
- >>> if NumpyVersion(np.__version__) < '1.7.0'):
+ >>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
skip
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index daaa68d06..e9ca9de4d 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -74,6 +74,35 @@ def _round_ifneeded(arr, dtype):
arr.round(out=arr)
+def _slice_at_axis(shape, sl, axis):
+ """
+ Construct a slice tuple the length of shape, with sl at the specified axis
+ """
+ slice_tup = (slice(None),)
+ return slice_tup * axis + (sl,) + slice_tup * (len(shape) - axis - 1)
+
+
+def _slice_first(shape, n, axis):
+ """ Construct a slice tuple to take the first n elements along axis """
+ return _slice_at_axis(shape, slice(0, n), axis=axis)
+
+
+def _slice_last(shape, n, axis):
+ """ Construct a slice tuple to take the last n elements along axis """
+ dim = shape[axis] # doing this explicitly makes n=0 work
+ return _slice_at_axis(shape, slice(dim - n, dim), axis=axis)
+
+
+def _do_prepend(arr, pad_chunk, axis):
+ return np.concatenate(
+ (pad_chunk.astype(arr.dtype, copy=False), arr), axis=axis)
+
+
+def _do_append(arr, pad_chunk, axis):
+ return np.concatenate(
+ (arr, pad_chunk.astype(arr.dtype, copy=False)), axis=axis)
+
+
def _prepend_const(arr, pad_amt, val, axis=-1):
"""
Prepend constant `val` along `axis` of `arr`.
@@ -100,12 +129,7 @@ def _prepend_const(arr, pad_amt, val, axis=-1):
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- if val == 0:
- return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),
- axis=axis)
- else:
- return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),
- arr), axis=axis)
+ return _do_prepend(arr, np.full(padshape, val, dtype=arr.dtype), axis)
def _append_const(arr, pad_amt, val, axis=-1):
@@ -134,12 +158,8 @@ def _append_const(arr, pad_amt, val, axis=-1):
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- if val == 0:
- return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),
- axis=axis)
- else:
- return np.concatenate(
- (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, np.full(padshape, val, dtype=arr.dtype), axis)
+
def _prepend_edge(arr, pad_amt, axis=-1):
@@ -164,15 +184,9 @@ def _prepend_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- edge_arr = arr[edge_slice].reshape(pad_singleton)
- return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ edge_slice = _slice_first(arr.shape, 1, axis=axis)
+ edge_arr = arr[edge_slice]
+ return _do_prepend(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
def _append_edge(arr, pad_amt, axis=-1):
@@ -198,15 +212,9 @@ def _append_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- edge_arr = arr[edge_slice].reshape(pad_singleton)
- return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),
- axis=axis)
+ edge_slice = _slice_last(arr.shape, 1, axis=axis)
+ edge_arr = arr[edge_slice]
+ return _do_append(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
def _prepend_ramp(arr, pad_amt, end, axis=-1):
@@ -244,15 +252,10 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
reverse=True).astype(np.float64)
# Appropriate slicing to extract n-dimensional edge along `axis`
- edge_slice = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_first(arr.shape, 1, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract edge, reshape to original rank, and extend along `axis`
- edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+ # Extract edge, and extend along `axis`
+ edge_pad = arr[edge_slice].repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
@@ -261,7 +264,7 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
- return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)
+ return _do_prepend(arr, ramp_arr, axis)
def _append_ramp(arr, pad_amt, end, axis=-1):
@@ -299,15 +302,10 @@ def _append_ramp(arr, pad_amt, end, axis=-1):
reverse=False).astype(np.float64)
# Slice a chunk from the edge to calculate stats on
- edge_slice = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_last(arr.shape, 1, axis=axis)
- # Extract edge, reshape to original rank, and extend along `axis`
- edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+ # Extract edge, and extend along `axis`
+ edge_pad = arr[edge_slice].repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
@@ -316,7 +314,7 @@ def _append_ramp(arr, pad_amt, end, axis=-1):
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
- return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)
+ return _do_append(arr, ramp_arr, axis)
def _prepend_max(arr, pad_amt, num, axis=-1):
@@ -356,19 +354,13 @@ def _prepend_max(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- max_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ max_slice = _slice_first(arr.shape, num, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate max, reshape to add singleton dimension back
- max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate max
+ max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ return _do_prepend(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
def _append_max(arr, pad_amt, num, axis=-1):
@@ -407,24 +399,16 @@ def _append_max(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- max_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ max_slice = _slice_last(arr.shape, num, axis=axis)
else:
max_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate max, reshape to add singleton dimension back
- max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate max
+ max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),
- axis=axis)
+ return _do_append(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
def _prepend_mean(arr, pad_amt, num, axis=-1):
@@ -463,20 +447,14 @@ def _prepend_mean(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- mean_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ mean_slice = _slice_first(arr.shape, num, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate mean, reshape to add singleton dimension back
- mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)
+ # Extract slice, calculate mean
+ mean_chunk = arr[mean_slice].mean(axis, keepdims=True)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),
- arr), axis=axis)
+ return _do_prepend(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
def _append_mean(arr, pad_amt, num, axis=-1):
@@ -515,25 +493,17 @@ def _append_mean(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- mean_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ mean_slice = _slice_last(arr.shape, num, axis=axis)
else:
mean_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate mean, reshape to add singleton dimension back
- mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate mean
+ mean_chunk = arr[mean_slice].mean(axis=axis, keepdims=True)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
def _prepend_med(arr, pad_amt, num, axis=-1):
@@ -572,20 +542,14 @@ def _prepend_med(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- med_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ med_slice = _slice_first(arr.shape, num, axis=axis)
- # Extract slice, calculate median, reshape to add singleton dimension back
- med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate median
+ med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)
+ return _do_prepend(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
def _append_med(arr, pad_amt, num, axis=-1):
@@ -624,25 +588,17 @@ def _append_med(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- med_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ med_slice = _slice_last(arr.shape, num, axis=axis)
else:
med_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate median, reshape to add singleton dimension back
- med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate median
+ med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
def _prepend_min(arr, pad_amt, num, axis=-1):
@@ -682,19 +638,13 @@ def _prepend_min(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- min_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ min_slice = _slice_first(arr.shape, num, axis=axis)
- # Extract slice, calculate min, reshape to add singleton dimension back
- min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate min
+ min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ return _do_prepend(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
def _append_min(arr, pad_amt, num, axis=-1):
@@ -733,24 +683,16 @@ def _append_min(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- min_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ min_slice = _slice_last(arr.shape, num, axis=axis)
else:
min_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate min, reshape to add singleton dimension back
- min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate min
+ min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),
- axis=axis)
+ return _do_append(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
def _pad_ref(arr, pad_amt, method, axis=-1):
@@ -793,22 +735,14 @@ def _pad_ref(arr, pad_amt, method, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)
- for (i, x) in enumerate(arr.shape))
+ ref_slice = _slice_at_axis(arr.shape, slice(pad_amt[0], 0, -1), axis=axis)
ref_chunk1 = arr[ref_slice]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- ref_chunk1 = ref_chunk1.reshape(pad_singleton)
-
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice1]
ref_chunk1 = 2 * edge_chunk - ref_chunk1
del edge_chunk
@@ -818,19 +752,13 @@ def _pad_ref(arr, pad_amt, method, axis=-1):
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1] - 1
end = arr.shape[axis] - 1
- ref_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
- rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
- for (i, x) in enumerate(arr.shape))
+ ref_slice = _slice_at_axis(arr.shape, slice(start, end), axis=axis)
+ rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
ref_chunk2 = arr[ref_slice][rev_idx]
- if pad_amt[1] == 1:
- ref_chunk2 = ref_chunk2.reshape(pad_singleton)
-
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice2]
ref_chunk2 = 2 * edge_chunk - ref_chunk2
del edge_chunk
@@ -878,23 +806,14 @@ def _pad_sym(arr, pad_amt, method, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])
- for (i, x) in enumerate(arr.shape))
- rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
- for (i, x) in enumerate(arr.shape))
+ sym_slice = _slice_first(arr.shape, pad_amt[0], axis=axis)
+ rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
sym_chunk1 = arr[sym_slice][rev_idx]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- sym_chunk1 = sym_chunk1.reshape(pad_singleton)
-
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice1]
sym_chunk1 = 2 * edge_chunk - sym_chunk1
del edge_chunk
@@ -902,19 +821,12 @@ def _pad_sym(arr, pad_amt, method, axis=-1):
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
- start = arr.shape[axis] - pad_amt[1]
- end = arr.shape[axis]
- sym_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
+ sym_slice = _slice_last(arr.shape, pad_amt[1], axis=axis)
sym_chunk2 = arr[sym_slice][rev_idx]
- if pad_amt[1] == 1:
- sym_chunk2 = sym_chunk2.reshape(pad_singleton)
-
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice2]
sym_chunk2 = 2 * edge_chunk - sym_chunk2
del edge_chunk
@@ -959,29 +871,16 @@ def _pad_wrap(arr, pad_amt, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- start = arr.shape[axis] - pad_amt[0]
- end = arr.shape[axis]
- wrap_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
+ wrap_slice = _slice_last(arr.shape, pad_amt[0], axis=axis)
wrap_chunk1 = arr[wrap_slice]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)
-
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
- wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])
- for (i, x) in enumerate(arr.shape))
+ wrap_slice = _slice_first(arr.shape, pad_amt[1], axis=axis)
wrap_chunk2 = arr[wrap_slice]
- if pad_amt[1] == 1:
- wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)
-
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index e8eda297f..4d3f35183 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -298,7 +298,7 @@ def _unique1d(ar, return_index=False, return_inverse=False,
return ret
-def intersect1d(ar1, ar2, assume_unique=False):
+def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
"""
Find the intersection of two arrays.
@@ -307,15 +307,28 @@ def intersect1d(ar1, ar2, assume_unique=False):
Parameters
----------
ar1, ar2 : array_like
- Input arrays.
+ Input arrays. Will be flattened if not already 1D.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
-
+ return_indices : bool
+ If True, the indices which correspond to the intersection of the
+ two arrays are returned. The first instance of a value is used
+ if there are multiple. Default is False.
+
+ .. versionadded:: 1.15.0
+
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
+ comm1 : ndarray
+ The indices of the first occurrences of the common values in `ar1`.
+ Only provided if `return_indices` is True.
+ comm2 : ndarray
+ The indices of the first occurrences of the common values in `ar2`.
+ Only provided if `return_indices` is True.
+
See Also
--------
@@ -332,14 +345,49 @@ def intersect1d(ar1, ar2, assume_unique=False):
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
+
+ To return the indices of the values common to the input arrays
+ along with the intersected values:
+ >>> x = np.array([1, 1, 2, 3, 4])
+ >>> y = np.array([2, 1, 4, 6])
+ >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
+ >>> x_ind, y_ind
+ (array([0, 2, 4]), array([1, 0, 2]))
+ >>> xy, x[x_ind], y[y_ind]
+ (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
+
"""
if not assume_unique:
- # Might be faster than unique( intersect1d( ar1, ar2 ) )?
- ar1 = unique(ar1)
- ar2 = unique(ar2)
+ if return_indices:
+ ar1, ind1 = unique(ar1, return_index=True)
+ ar2, ind2 = unique(ar2, return_index=True)
+ else:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ else:
+ ar1 = ar1.ravel()
+ ar2 = ar2.ravel()
+
aux = np.concatenate((ar1, ar2))
- aux.sort()
- return aux[:-1][aux[1:] == aux[:-1]]
+ if return_indices:
+ aux_sort_indices = np.argsort(aux, kind='mergesort')
+ aux = aux[aux_sort_indices]
+ else:
+ aux.sort()
+
+ mask = aux[1:] == aux[:-1]
+ int1d = aux[:-1][mask]
+
+ if return_indices:
+ ar1_indices = aux_sort_indices[:-1][mask]
+ ar2_indices = aux_sort_indices[1:][mask] - ar1.size
+ if not assume_unique:
+ ar1_indices = ind1[ar1_indices]
+ ar2_indices = ind2[ar2_indices]
+
+ return int1d, ar1_indices, ar2_indices
+ else:
+ return int1d
def setxor1d(ar1, ar2, assume_unique=False):
"""
@@ -660,3 +708,4 @@ def setdiff1d(ar1, ar2, assume_unique=False):
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
+
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 363bb2101..23eac7e7d 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -1,5 +1,10 @@
"""
-Define a simple format for saving numpy arrays to disk with the full
+Binary serialization
+
+NPY format
+==========
+
+A simple format for saving numpy arrays to disk with the full
information about them.
The ``.npy`` format is the standard binary file format in NumPy for
@@ -143,8 +148,10 @@ data HEADER_LEN."
Notes
-----
-The ``.npy`` format, including reasons for creating it and a comparison of
-alternatives, is described fully in the "npy-format" NEP.
+The ``.npy`` format, including motivation for creating it and a comparison of
+alternatives, is described in the `"npy-format" NEP
+<http://www.numpy.org/neps/nep-0001-npy-format.html>`_, however details have
+evolved with time and this document is more current.
"""
from __future__ import division, absolute_import, print_function
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 0434eb472..a6e3e07d3 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1,6 +1,11 @@
from __future__ import division, absolute_import, print_function
-import collections
+try:
+ # Accessing collections abstact classes from collections
+ # has been deprecated since Python 3.3
+ import collections.abc as collections_abc
+except ImportError:
+ import collections as collections_abc
import re
import sys
import warnings
@@ -49,7 +54,8 @@ __all__ = [
'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
- 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
+ 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc',
+ 'quantile'
]
@@ -140,7 +146,7 @@ def rot90(m, k=1, axes=(0,1)):
return flip(transpose(m, axes_list), axes[1])
-def flip(m, axis):
+def flip(m, axis=None):
"""
Reverse the order of elements in an array along the given axis.
@@ -152,9 +158,16 @@ def flip(m, axis):
----------
m : array_like
Input array.
- axis : integer
- Axis in array, which entries are reversed.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to flip over. The default,
+ axis=None, will flip over all of the axes of the input array.
+ If axis is negative it counts from the last to the first axis.
+
+ If axis is a tuple of ints, flipping is performed on all of the axes
+ specified in the tuple.
+ .. versionchanged:: 1.15.0
+ None and tuples of axes are supported
Returns
-------
@@ -170,46 +183,60 @@ def flip(m, axis):
Notes
-----
flip(m, 0) is equivalent to flipud(m).
+
flip(m, 1) is equivalent to fliplr(m).
+
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
+ flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all
+ positions.
+
+ flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at
+ position 0 and position 1.
+
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
-
[[4, 5],
[6, 7]]])
-
>>> flip(A, 0)
array([[[4, 5],
[6, 7]],
-
[[0, 1],
[2, 3]]])
-
>>> flip(A, 1)
array([[[2, 3],
[0, 1]],
-
[[6, 7],
[4, 5]]])
-
+ >>> np.flip(A)
+ array([[[7, 6],
+ [5, 4]],
+ [[3, 2],
+ [1, 0]]])
+ >>> np.flip(A, (0, 2))
+ array([[[5, 4],
+ [7, 6]],
+ [[1, 0],
+ [3, 2]]])
>>> A = np.random.randn(3,4,5)
>>> np.all(flip(A,2) == A[:,:,::-1,...])
True
"""
if not hasattr(m, 'ndim'):
m = asarray(m)
- indexer = [slice(None)] * m.ndim
- try:
- indexer[axis] = slice(None, None, -1)
- except IndexError:
- raise ValueError("axis=%i is invalid for the %i-dimensional input array"
- % (axis, m.ndim))
- return m[tuple(indexer)]
+ if axis is None:
+ indexer = (np.s_[::-1],) * m.ndim
+ else:
+ axis = _nx.normalize_axis_tuple(axis, m.ndim)
+ indexer = [np.s_[:]] * m.ndim
+ for ax in axis:
+ indexer[ax] = np.s_[::-1]
+ indexer = tuple(indexer)
+ return m[indexer]
def iterable(y):
@@ -547,7 +574,7 @@ def piecewise(x, condlist, funclist, *args, **kw):
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
- if not isinstance(item, collections.Callable):
+ if not isinstance(item, collections_abc.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
@@ -632,7 +659,7 @@ def select(condlist, choicelist, default=0):
deprecated_ints = True
else:
raise ValueError(
- 'invalid entry in choicelist: should be boolean ndarray')
+ 'invalid entry {} in condlist: should be boolean ndarray'.format(i))
if deprecated_ints:
# 2014-02-24, 1.9
@@ -818,9 +845,9 @@ def gradient(f, *varargs, **kwargs):
Notes
-----
Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous
- derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the
- spacing the finite difference coefficients are computed by minimising
- the consistency error :math:`\\eta_{i}`:
+ derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we
+ minimize the "consistency error" :math:`\\eta_{i}` between the true gradient
+ and its estimate from a linear combination of the neighboring grid-points:
.. math::
@@ -839,7 +866,7 @@ def gradient(f, *varargs, **kwargs):
\\left\\{
\\begin{array}{r}
\\alpha+\\beta+\\gamma=0 \\\\
- -\\beta h_{d}+\\gamma h_{s}=1 \\\\
+ \\beta h_{d}-\\gamma h_{s}=1 \\\\
\\beta h_{d}^{2}+\\gamma h_{s}^{2}=0
\\end{array}
\\right.
@@ -1255,23 +1282,13 @@ def interp(x, xp, fp, left=None, right=None, period=None):
interp_func = compiled_interp
input_dtype = np.float64
- if period is None:
- if isinstance(x, (float, int, number)):
- return interp_func([x], xp, fp, left, right).item()
- elif isinstance(x, np.ndarray) and x.ndim == 0:
- return interp_func([x], xp, fp, left, right).item()
- else:
- return interp_func(x, xp, fp, left, right)
- else:
+ if period is not None:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
- return_array = True
- if isinstance(x, (float, int, number)):
- return_array = False
- x = [x]
+
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=input_dtype)
@@ -1289,10 +1306,7 @@ def interp(x, xp, fp, left=None, right=None, period=None):
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
- if return_array:
- return interp_func(x, xp, fp, left, right)
- else:
- return interp_func(x, xp, fp, left, right).item()
+ return interp_func(x, xp, fp, left, right)
def angle(z, deg=0):
@@ -1619,9 +1633,9 @@ def disp(mesg, device=None, linefeed=True):
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
- >>> from StringIO import StringIO
+ >>> from io import StringIO
>>> buf = StringIO()
- >>> np.disp('"Display" in a file', device=buf)
+ >>> np.disp(u'"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
@@ -2310,7 +2324,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
else:
X_T = (X*w).T
c = dot(X, X_T.conj())
- c *= 1. / np.float64(fact)
+ c *= np.true_divide(1, fact)
return c.squeeze()
@@ -3411,17 +3425,19 @@ def percentile(a, q, axis=None, out=None,
If True, then allow the input array `a` to be modified by intermediate
calculations, to save memory. In this case, the contents of the input
`a` after this function completes is undefined.
+
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
- use when the desired quantile lies between two data points
+ use when the desired percentile lies between two data points
``i < j``:
- * linear: ``i + (j - i) * fraction``, where ``fraction``
- is the fractional part of the index surrounded by ``i``
- and ``j``.
- * lower: ``i``.
- * higher: ``j``.
- * nearest: ``i`` or ``j``, whichever is nearest.
- * midpoint: ``(i + j) / 2``.
+
+ * 'linear': ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * 'lower': ``i``.
+ * 'higher': ``j``.
+ * 'nearest': ``i`` or ``j``, whichever is nearest.
+ * 'midpoint': ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
@@ -3448,6 +3464,7 @@ def percentile(a, q, axis=None, out=None,
mean
median : equivalent to ``percentile(..., 50)``
nanpercentile
+ quantile : equivalent to percentile, except with q in the range [0, 1].
Notes
-----
@@ -3488,6 +3505,34 @@ def percentile(a, q, axis=None, out=None,
array([ 7., 2.])
>>> assert not np.all(a == b)
+ The different types of interpolation can be visualized graphically:
+
+ .. plot::
+
+ import matplotlib.pyplot as plt
+
+ a = np.arange(4)
+ p = np.linspace(0, 100, 6001)
+ ax = plt.gca()
+ lines = [
+ ('linear', None),
+ ('higher', '--'),
+ ('lower', '--'),
+ ('nearest', '-.'),
+ ('midpoint', '-.'),
+ ]
+ for interpolation, style in lines:
+ ax.plot(
+ p, np.percentile(a, p, interpolation=interpolation),
+ label=interpolation, linestyle=style)
+ ax.set(
+ title='Interpolation methods for list: ' + str(a),
+ xlabel='Percentile',
+ ylabel='List item returned',
+ yticks=a)
+ ax.legend()
+ plt.show()
+
"""
q = np.true_divide(q, 100.0) # handles the asarray for us too
if not _quantile_is_valid(q):
@@ -3496,6 +3541,110 @@ def percentile(a, q, axis=None, out=None,
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def quantile(a, q, axis=None, out=None,
+ overwrite_input=False, interpolation='linear', keepdims=False):
+ """
+ Compute the `q`th quantile of the data along the specified axis.
+ ..versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ q : array_like of float
+ Quantile or sequence of quantiles to compute, which must be between
+ 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The
+ default is to compute the quantile(s) along a flattened
+ version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
+ This optional parameter specifies the interpolation method to
+ use when the desired quantile lies between two data points
+ ``i < j``:
+ * linear: ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single quantile and `axis=None`, then the result
+ is a scalar. If multiple quantiles are given, first axis of
+ the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean
+ percentile : equivalent to quantile, but with q in the range [0, 100].
+ median : equivalent to ``quantile(..., 0.5)``
+ nanquantile
+
+ Notes
+ -----
+ Given a vector ``V`` of length ``N``, the ``q``-th quantile of
+ ``V`` is the value ``q`` of the way from the minimum to the
+ maximum in a sorted copy of ``V``. The values and distances of
+ the two nearest neighbors as well as the `interpolation` parameter
+ will determine the quantile if the normalized ranking does not
+ match the location of ``q`` exactly. This function is the same as
+ the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the
+ same as the maximum if ``q=1.0``.
+
+ Examples
+ --------
+ >>> a = np.array([[10, 7, 4], [3, 2, 1]])
+ >>> a
+ array([[10, 7, 4],
+ [ 3, 2, 1]])
+ >>> np.quantile(a, 0.5)
+ 3.5
+ >>> np.quantile(a, 0.5, axis=0)
+ array([[ 6.5, 4.5, 2.5]])
+ >>> np.quantile(a, 0.5, axis=1)
+ array([ 7., 2.])
+ >>> np.quantile(a, 0.5, axis=1, keepdims=True)
+ array([[ 7.],
+ [ 2.]])
+ >>> m = np.quantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.quantile(a, 0.5, axis=0, out=out)
+ array([[ 6.5, 4.5, 2.5]])
+ >>> m
+ array([[ 6.5, 4.5, 2.5]])
+ >>> b = a.copy()
+ >>> np.quantile(b, 0.5, axis=1, overwrite_input=True)
+ array([ 7., 2.])
+ >>> assert not np.all(a == b)
+ """
+ q = np.asanyarray(q)
+ if not _quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+ return _quantile_unchecked(
+ a, q, axis, out, overwrite_input, interpolation, keepdims)
+
+
def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
"""Assumes that q is in [0, 1], and is an ndarray"""
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index 706a532c5..2922b3a86 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -8,7 +8,11 @@ import operator
import numpy as np
from numpy.compat.py3k import basestring
-__all__ = ['histogram', 'histogramdd']
+__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
+
+# range is a keyword argument to many functions, so save the builtin so they can
+# use it.
+_range = range
def _hist_bin_sqrt(x):
@@ -163,12 +167,22 @@ def _hist_bin_fd(x):
def _hist_bin_auto(x):
"""
Histogram bin estimator that uses the minimum width of the
- Freedman-Diaconis and Sturges estimators.
+ Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero
+ and the Sturges estimator if the FD bandwidth is 0.
The FD estimator is usually the most robust method, but its width
- estimate tends to be too large for small `x`. The Sturges estimator
- is quite good for small (<1000) datasets and is the default in the R
- language. This method gives good off the shelf behaviour.
+ estimate tends to be too large for small `x` and bad for data with limited
+ variance. The Sturges estimator is quite good for small (<1000) datasets
+ and is the default in the R language. This method gives good off the shelf
+ behaviour.
+
+ .. versionchanged:: 1.15.0
+ If there is limited variance the IQR can be 0, which results in the
+ FD bin width being 0 too. This is not a valid bin width, so
+ ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
+ If the IQR is 0, it's unlikely any variance based estimators will be of
+ use, so we revert to the sturges estimator, which only uses the size of the
+ dataset in its calculation.
Parameters
----------
@@ -184,10 +198,13 @@ def _hist_bin_auto(x):
--------
_hist_bin_fd, _hist_bin_sturges
"""
- # There is no need to check for zero here. If ptp is, so is IQR and
- # vice versa. Either both are zero or neither one is.
- return min(_hist_bin_fd(x), _hist_bin_sturges(x))
-
+ fd_bw = _hist_bin_fd(x)
+ sturges_bw = _hist_bin_sturges(x)
+ if fd_bw:
+ return min(fd_bw, sturges_bw)
+ else:
+ # limited variance, so we return a len dependent bw estimator
+ return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'auto': _hist_bin_auto,
@@ -219,18 +236,20 @@ def _get_outer_edges(a, range):
"""
if range is not None:
first_edge, last_edge = range
+ if first_edge > last_edge:
+ raise ValueError(
+ 'max must be larger than min in range parameter.')
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
+ raise ValueError(
+ "supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
elif a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = a.min(), a.max()
-
- if first_edge > last_edge:
- raise ValueError(
- 'max must be larger than min in range parameter.')
- if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
- raise ValueError(
- 'range parameter must be finite.')
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
+ raise ValueError(
+ "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
# expand empty range to avoid divide by zero
if first_edge == last_edge:
@@ -346,10 +365,9 @@ def _search_sorted_inclusive(a, v):
))
-def histogram(a, bins=10, range=None, normed=False, weights=None,
- density=None):
+def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
- Compute the histogram of a set of data.
+ Function to calculate only the edges of the bins used by the `histogram` function.
Parameters
----------
@@ -361,9 +379,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
- .. versionadded:: 1.11.0
-
- If `bins` is a string from the list below, `histogram` will use
+ If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
@@ -410,57 +426,24 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
- normed : bool, optional
- This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy
- behavior. It will be removed in NumPy 2.0.0. Use the ``density``
- keyword instead. If ``False``, the result will contain the
- number of samples in each bin. If ``True``, the result is the
- value of the probability *density* function at the bin,
- normalized such that the *integral* over the range is 1. Note
- that this latter behavior is known to be buggy with unequal bin
- widths; use ``density`` instead.
+
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
- (instead of 1). If `density` is True, the weights are
- normalized, so that the integral of the density over the range
- remains 1.
- density : bool, optional
- If ``False``, the result will contain the number of samples in
- each bin. If ``True``, the result is the value of the
- probability *density* function at the bin, normalized such that
- the *integral* over the range is 1. Note that the sum of the
- histogram values will not be equal to 1 unless bins of unity
- width are chosen; it is not a probability *mass* function.
-
- Overrides the ``normed`` keyword if given.
+ (instead of 1). This is currently not used by any of the bin estimators,
+ but may be in the future.
Returns
-------
- hist : array
- The values of the histogram. See `density` and `weights` for a
- description of the possible semantics.
bin_edges : array of dtype float
- Return the bin edges ``(length(hist)+1)``.
-
+ The edges to pass into `histogram`
See Also
--------
- histogramdd, bincount, searchsorted, digitize
+ histogram
Notes
-----
- All but the last (righthand-most) bin is half-open. In other words,
- if `bins` is::
-
- [1, 2, 3, 4]
-
- then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
- the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
- *includes* 4.
-
- .. versionadded:: 1.11.0
-
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
@@ -470,7 +453,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
- ``np.round(np.ceil(range / h))`.
+ ``np.round(np.ceil(range / h))``.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
@@ -531,6 +514,134 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
Examples
--------
+ >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
+ >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
+ array([0. , 0.25, 0.5 , 0.75, 1. ])
+ >>> np.histogram_bin_edges(arr, bins=2)
+ array([0. , 2.5, 5. ])
+
+ For consistency with histogram, an array of pre-computed bins is
+ passed through unmodified:
+
+ >>> np.histogram_bin_edges(arr, [1, 2])
+ array([1, 2])
+
+ This function allows one set of bins to be computed, and reused across
+ multiple histograms:
+
+ >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
+ >>> shared_bins
+ array([0., 1., 2., 3., 4., 5.])
+
+ >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
+ >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
+ >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
+
+ >>> hist_0; hist_1
+ array([1, 1, 0, 1, 0])
+ array([2, 0, 1, 1, 2])
+
+ Which gives more easily comparable results than using separate bins for
+ each histogram:
+
+ >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
+ >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
+ >>> hist_0; hist1
+ array([1, 1, 1])
+ array([2, 1, 1, 2])
+ >>> bins_0; bins_1
+ array([0., 1., 2., 3.])
+ array([0. , 1.25, 2.5 , 3.75, 5. ])
+
+ """
+ a, weights = _ravel_and_check_weights(a, weights)
+ bin_edges, _ = _get_bin_edges(a, bins, range, weights)
+ return bin_edges
+
+
+def histogram(a, bins=10, range=None, normed=False, weights=None,
+ density=None):
+ r"""
+ Compute the histogram of a set of data.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data. The histogram is computed over the flattened array.
+ bins : int or sequence of scalars or str, optional
+ If `bins` is an int, it defines the number of equal-width
+ bins in the given range (10, by default). If `bins` is a
+ sequence, it defines the bin edges, including the rightmost
+ edge, allowing for non-uniform bin widths.
+
+ .. versionadded:: 1.11.0
+
+ If `bins` is a string, it defines the method used to calculate the
+ optimal bin width, as defined by `histogram_bin_edges`.
+
+ range : (float, float), optional
+ The lower and upper range of the bins. If not provided, range
+ is simply ``(a.min(), a.max())``. Values outside the range are
+ ignored. The first element of the range must be less than or
+ equal to the second. `range` affects the automatic bin
+ computation as well. While bin width is computed to be optimal
+ based on the actual data within `range`, the bin count will fill
+ the entire range including portions containing no data.
+ normed : bool, optional
+
+ .. deprecated:: 1.6.0
+
+ This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy
+ behavior. It will be removed in NumPy 2.0.0. Use the ``density``
+ keyword instead. If ``False``, the result will contain the
+ number of samples in each bin. If ``True``, the result is the
+ value of the probability *density* function at the bin,
+ normalized such that the *integral* over the range is 1. Note
+ that this latter behavior is known to be buggy with unequal bin
+ widths; use ``density`` instead.
+ weights : array_like, optional
+ An array of weights, of the same shape as `a`. Each value in
+ `a` only contributes its associated weight towards the bin count
+ (instead of 1). If `density` is True, the weights are
+ normalized, so that the integral of the density over the range
+ remains 1.
+ density : bool, optional
+ If ``False``, the result will contain the number of samples in
+ each bin. If ``True``, the result is the value of the
+ probability *density* function at the bin, normalized such that
+ the *integral* over the range is 1. Note that the sum of the
+ histogram values will not be equal to 1 unless bins of unity
+ width are chosen; it is not a probability *mass* function.
+
+ Overrides the ``normed`` keyword if given.
+
+ Returns
+ -------
+ hist : array
+ The values of the histogram. See `density` and `weights` for a
+ description of the possible semantics.
+ bin_edges : array of dtype float
+ Return the bin edges ``(length(hist)+1)``.
+
+
+ See Also
+ --------
+ histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
+
+ Notes
+ -----
+ All but the last (righthand-most) bin is half-open. In other words,
+ if `bins` is::
+
+ [1, 2, 3, 4]
+
+ then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
+ the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
+ *includes* 4.
+
+
+ Examples
+ --------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
@@ -599,7 +710,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
- for i in np.arange(0, len(a), BLOCK):
+ for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
@@ -647,12 +758,12 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
- for i in np.arange(0, len(a), BLOCK):
+ for i in _range(0, len(a), BLOCK):
sa = np.sort(a[i:i+BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
- for i in np.arange(0, len(a), BLOCK):
+ for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
@@ -685,10 +796,18 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
Parameters
----------
- sample : array_like
- The data to be histogrammed. It must be an (N,D) array or data
- that can be converted to such. The rows of the resulting array
- are the coordinates of points in a D dimensional polytope.
+ sample : (N, D) array, or (D, N) array_like
+ The data to be histogrammed.
+
+ Note the unusual interpretation of sample when an array_like:
+
+ * When an array, each row is a coordinate in a D-dimensional space -
+ such as ``histogramgramdd(np.array([p1, p2, p3]))``.
+ * When an array_like, each element is the list of values for single
+ coordinate - such as ``histogramgramdd((X, Y, Z))``.
+
+ The first form should be preferred.
+
bins : sequence or int, optional
The bin specification:
@@ -697,9 +816,12 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
- A sequence of lower and upper bin edges to be used if the edges are
- not given explicitly in `bins`. Defaults to the minimum and maximum
- values along each dimension.
+ A sequence of length D, each an optional (lower, upper) tuple giving
+ the outer bin edges to be used if the edges are not given explicitly in
+ `bins`.
+ An entry of None in the sequence results in the minimum and maximum
+ values being used for the corresponding dimension.
+ The default, None, is equivalent to passing a tuple of D None values.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
@@ -755,107 +877,62 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
# bins is an integer
bins = D*[bins]
- # Select range for each dimension
- # Used only if number of bins is given.
+ # normalize the range argument
if range is None:
- # Handle empty input. Range can't be determined in that case, use 0-1.
- if N == 0:
- smin = np.zeros(D)
- smax = np.ones(D)
- else:
- smin = np.atleast_1d(np.array(sample.min(0), float))
- smax = np.atleast_1d(np.array(sample.max(0), float))
- else:
- if not np.all(np.isfinite(range)):
- raise ValueError(
- 'range parameter must be finite.')
- smin = np.zeros(D)
- smax = np.zeros(D)
- for i in np.arange(D):
- smin[i], smax[i] = range[i]
-
- # Make sure the bins have a finite width.
- for i in np.arange(len(smin)):
- if smin[i] == smax[i]:
- smin[i] = smin[i] - .5
- smax[i] = smax[i] + .5
-
- # avoid rounding issues for comparisons when dealing with inexact types
- if np.issubdtype(sample.dtype, np.inexact):
- edge_dt = sample.dtype
- else:
- edge_dt = float
+ range = (None,) * D
+ elif len(range) != D:
+ raise ValueError('range argument must have one entry per dimension')
+
# Create edge arrays
- for i in np.arange(D):
- if np.isscalar(bins[i]):
+ for i in _range(D):
+ if np.ndim(bins[i]) == 0:
if bins[i] < 1:
raise ValueError(
- "Element at index %s in `bins` should be a positive "
- "integer." % i)
- nbin[i] = bins[i] + 2 # +2 for outlier bins
- edges[i] = np.linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
+ '`bins[{}]` must be positive, when an integer'.format(i))
+ smin, smax = _get_outer_edges(sample[:,i], range[i])
+ edges[i] = np.linspace(smin, smax, bins[i] + 1)
+ elif np.ndim(bins[i]) == 1:
+ edges[i] = np.asarray(bins[i])
+ if np.any(edges[i][:-1] > edges[i][1:]):
+ raise ValueError(
+ '`bins[{}]` must be monotonically increasing, when an array'
+ .format(i))
else:
- edges[i] = np.asarray(bins[i], edge_dt)
- nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
- dedges[i] = np.diff(edges[i])
- if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
- "Found bin edge of size <= 0. Did you specify `bins` with"
- "non-monotonic sequence?")
-
- nbin = np.asarray(nbin)
+ '`bins[{}]` must be a scalar or 1d array'.format(i))
- # Handle empty input.
- if N == 0:
- return np.zeros(nbin-2), edges
+ nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
+ dedges[i] = np.diff(edges[i])
# Compute the bin number each sample falls into.
- Ncount = {}
- for i in np.arange(D):
- Ncount[i] = np.digitize(sample[:, i], edges[i])
+ Ncount = tuple(
+ # avoid np.digitize to work around gh-11022
+ np.searchsorted(edges[i], sample[:, i], side='right')
+ for i in _range(D)
+ )
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
- for i in np.arange(D):
- # Rounding precision
- mindiff = dedges[i].min()
- if not np.isinf(mindiff):
- decimal = int(-np.log10(mindiff)) + 6
- # Find which points are on the rightmost edge.
- not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
- on_edge = (np.around(sample[:, i], decimal) ==
- np.around(edges[i][-1], decimal))
- # Shift these points one bin to the left.
- Ncount[i][np.nonzero(on_edge & not_smaller_than_edge)[0]] -= 1
-
- # Flattened histogram matrix (1D)
- # Reshape is used so that overlarge arrays
- # will raise an error.
- hist = np.zeros(nbin, float).reshape(-1)
+ for i in _range(D):
+ # Find which points are on the rightmost edge.
+ on_edge = (sample[:, i] == edges[i][-1])
+ # Shift these points one bin to the left.
+ Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
- ni = nbin.argsort()
- xy = np.zeros(N, int)
- for i in np.arange(0, D-1):
- xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
- xy += Ncount[ni[-1]]
+ # This raises an error if the array is too large.
+ xy = np.ravel_multi_index(Ncount, nbin)
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
- if len(xy) == 0:
- return np.zeros(nbin-2, int), edges
-
- flatcount = np.bincount(xy, weights)
- a = np.arange(len(flatcount))
- hist[a] = flatcount
+ hist = np.bincount(xy, weights, minlength=nbin.prod())
# Shape into a proper matrix
- hist = hist.reshape(np.sort(nbin))
- for i in np.arange(nbin.size):
- j = ni.argsort()[i]
- hist = hist.swapaxes(i, j)
- ni[i], ni[j] = ni[j], ni[i]
+ hist = hist.reshape(nbin)
+
+ # This preserves the (bad) behavior observed in gh-7845, for now.
+ hist = hist.astype(float, casting='safe')
# Remove outliers (indices 0 and -1 for each dimension).
core = D*(slice(1, -1),)
@@ -864,7 +941,7 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
# Normalize if normed is True
if normed:
s = hist.sum()
- for i in np.arange(D):
+ for i in _range(D):
shape = np.ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py
index fbdc2edfb..0379ecb1a 100644
--- a/numpy/lib/mixins.py
+++ b/numpy/lib/mixins.py
@@ -74,8 +74,8 @@ class NDArrayOperatorsMixin(object):
It is useful for writing classes that do not inherit from `numpy.ndarray`,
but that should support arithmetic and numpy universal functions like
- arrays as described in :ref:`A Mechanism for Overriding Ufuncs
- <neps.ufunc-overrides>`.
+ arrays as described in `A Mechanism for Overriding Ufuncs
+ <../../neps/nep-0013-ufunc-overrides.html>`_.
As an trivial example, consider this implementation of an ``ArrayLike``
class that simply wraps a NumPy array and ensures that the result of any
@@ -137,6 +137,8 @@ class NDArrayOperatorsMixin(object):
Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations
with arbitrary, unrecognized types. This ensures that interactions with
ArrayLike preserve a well-defined casting hierarchy.
+
+ .. versionadded:: 1.13
"""
# Like np.ndarray, this mixin class implements "Option 1" from the ufunc
# overrides NEP.
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 16e363d7c..abd2da1a2 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -16,6 +16,7 @@ Functions
- `nanvar` -- variance of non-NaN values
- `nanstd` -- standard deviation of non-NaN values
- `nanmedian` -- median of non-NaN values
+- `nanquantile` -- qth quantile of non-NaN values
- `nanpercentile` -- qth percentile of non-NaN values
"""
@@ -29,7 +30,7 @@ from numpy.lib import function_base
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
- 'nancumsum', 'nancumprod'
+ 'nancumsum', 'nancumprod', 'nanquantile'
]
@@ -1057,15 +1058,16 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
`a` after this function completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
- use when the desired quantile lies between two data points
+ use when the desired percentile lies between two data points
``i < j``:
- * linear: ``i + (j - i) * fraction``, where ``fraction``
- is the fractional part of the index surrounded by ``i``
- and ``j``.
- * lower: ``i``.
- * higher: ``j``.
- * nearest: ``i`` or ``j``, whichever is nearest.
- * midpoint: ``(i + j) / 2``.
+
+ * 'linear': ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * 'lower': ``i``.
+ * 'higher': ``j``.
+ * 'nearest': ``i`` or ``j``, whichever is nearest.
+ * 'midpoint': ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
@@ -1094,6 +1096,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
nanmean
nanmedian : equivalent to ``nanpercentile(..., 50)``
percentile, median, mean
+ nanquantile : equivalent to nanpercentile, but with q in the range [0, 1].
Notes
-----
@@ -1143,6 +1146,110 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
+ interpolation='linear', keepdims=np._NoValue):
+ """
+ Compute the qth quantile of the data along the specified axis,
+ while ignoring nan values.
+ Returns the qth quantile(s) of the array elements.
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array, containing
+ nan values to be ignored
+ q : array_like of float
+ Quantile or sequence of quantiles to compute, which must be between
+ 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The
+ default is to compute the quantile(s) along a flattened
+ version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
+ This optional parameter specifies the interpolation method to
+ use when the desired quantile lies between two data points
+ ``i < j``:
+ * linear: ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single percentile and `axis=None`, then the result
+ is a scalar. If multiple quantiles are given, first axis of
+ the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ quantile
+ nanmean, nanmedian
+ nanmedian : equivalent to ``nanquantile(..., 0.5)``
+ nanpercentile : same as nanquantile, but with q in the range [0, 100].
+
+ Examples
+ --------
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
+ >>> a[0][1] = np.nan
+ >>> a
+ array([[ 10., nan, 4.],
+ [ 3., 2., 1.]])
+ >>> np.quantile(a, 0.5)
+ nan
+ >>> np.nanquantile(a, 0.5)
+ 3.5
+ >>> np.nanquantile(a, 0.5, axis=0)
+ array([ 6.5, 2., 2.5])
+ >>> np.nanquantile(a, 0.5, axis=1, keepdims=True)
+ array([[ 7.],
+ [ 2.]])
+ >>> m = np.nanquantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.nanquantile(a, 0.5, axis=0, out=out)
+ array([ 6.5, 2., 2.5])
+ >>> m
+ array([ 6.5, 2. , 2.5])
+ >>> b = a.copy()
+ >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)
+ array([ 7., 2.])
+ >>> assert not np.all(a==b)
+ """
+ a = np.asanyarray(a)
+ q = np.asanyarray(q)
+ if not function_base._quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+ return _nanquantile_unchecked(
+ a, q, axis, out, overwrite_input, interpolation, keepdims)
+
+
def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""Assumes that q is in [0, 1], and is an ndarray"""
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 096f1a3a4..b109d65e1 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -475,9 +475,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
Notes
-----
- For a description of the ``.npy`` format, see the module docstring
- of `numpy.lib.format` or the NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
Examples
--------
@@ -561,9 +559,7 @@ def savez(file, *args, **kwds):
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
- description of the ``.npy`` format, see `numpy.lib.format` or the
- NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
@@ -642,9 +638,9 @@ def savez_compressed(file, *args, **kwds):
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
- in ``.npy`` format. For a description of the ``.npy`` format, see
- `numpy.lib.format` or the NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ in ``.npy`` format. For a description of the ``.npy`` format, see
+ :py:mod:`numpy.lib.format`.
+
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
@@ -758,7 +754,7 @@ def _getconv(dtype):
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, complex):
- return lambda x: complex(asstr(x))
+ return lambda x: complex(asstr(x).replace('+-', '-'))
elif issubclass(typ, np.bytes_):
return asbytes
elif issubclass(typ, np.unicode_):
@@ -791,8 +787,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
the data-type.
comments : str or sequence of str, optional
The characters or list of characters used to indicate the start of a
- comment. For backwards compatibility, byte strings will be decoded as
- 'latin1'. The default is '#'.
+ comment. None implies no comments. For backwards compatibility, byte
+ strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
@@ -859,18 +855,18 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
- >>> c = StringIO("0 1\\n2 3")
+ >>> c = StringIO(u"0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
- >>> d = StringIO("M 21 72\\nF 35 58")
+ >>> d = StringIO(u"M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
- >>> c = StringIO("1,0,2\\n3,0,4")
+ >>> c = StringIO(u"1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
@@ -936,7 +932,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
if encoding is not None:
fencoding = encoding
# we must assume local encoding
- # TOOD emit portability warning?
+ # TODO emit portability warning?
elif fencoding is None:
import locale
fencoding = locale.getpreferredencoding()
@@ -1104,11 +1100,16 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
nshape = list(X.shape)
pos = nshape[0]
nshape[0] += len(x)
- X.resize(nshape)
+ X.resize(nshape, refcheck=False)
X[pos:, ...] = x
finally:
if fown:
fh.close()
+ # recursive closures have a cyclic reference to themselves, which
+ # requires gc to collect (gh-10620). To avoid this problem, for
+ # performance and PyPy friendliness, we break the cycle:
+ flatten_dtype_internal = None
+ pack_items = None
if X is None:
X = np.array([], dtype)
@@ -1161,13 +1162,14 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
- a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
- like `' (%s+%sj)' % (fmt, fmt)`
- b) a full string specifying every real and imaginary part, e.g.
- `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
- c) a list of specifiers, one per column - in this case, the real
- and imaginary part must have separate specifiers,
- e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
+
+ * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
+ like `' (%s+%sj)' % (fmt, fmt)`
+ * a full string specifying every real and imaginary part, e.g.
+ `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
+ * a list of specifiers, one per column - in this case, the real
+ and imaginary part must have separate specifiers,
+ e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
@@ -1372,7 +1374,8 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
for number in row:
row2.append(number.real)
row2.append(number.imag)
- fh.write(format % tuple(row2) + newline)
+ s = format % tuple(row2) + newline
+ fh.write(s.replace('+-', '-'))
else:
for row in X:
try:
@@ -1460,9 +1463,9 @@ def fromregex(file, regexp, dtype, encoding=None):
dtype = np.dtype(dtype)
content = file.read()
- if isinstance(content, bytes) and not isinstance(regexp, bytes):
+ if isinstance(content, bytes) and isinstance(regexp, np.unicode):
regexp = asbytes(regexp)
- elif not isinstance(content, bytes) and isinstance(regexp, bytes):
+ elif isinstance(content, np.unicode) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
@@ -1625,7 +1628,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
Comma delimited file with mixed dtype
- >>> s = StringIO("1,1.3,abcde")
+ >>> s = StringIO(u"1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
@@ -1652,7 +1655,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
An example with fixed-width columns
- >>> s = StringIO("11.3abcde")
+ >>> s = StringIO(u"11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
@@ -1714,7 +1717,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
try:
while not first_values:
first_line = _decode_line(next(fhd), encoding)
- if names is True:
+ if (names is True) and (comments is not None):
if comments in first_line:
first_line = (
''.join(first_line.split(comments)[1:]))
@@ -1728,8 +1731,9 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
- if fval in comments:
- del first_values[0]
+ if comments is not None:
+ if fval in comments:
+ del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 41b5e2f64..078608bbb 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -113,11 +113,6 @@ def poly(seq_of_zeros):
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
- Or a square matrix object:
-
- >>> np.poly(np.matrix(P))
- array([ 1. , 0. , 0.16666667])
-
Note how in all cases the leading coefficient is always 1.
"""
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index e07caf805..f1838fee6 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -555,7 +555,7 @@ def arctanh(x):
--------
>>> np.set_printoptions(precision=4)
- >>> np.emath.arctanh(np.matrix(np.eye(2)))
+ >>> np.emath.arctanh(np.eye(2))
array([[ Inf, 0.],
[ 0., Inf]])
>>> np.emath.arctanh([1j])
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 6c240db7f..2abe5cdd1 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -123,9 +123,12 @@ def _broadcast_to(array, shape, subok, readonly):
needs_writeable = not readonly and array.flags.writeable
extras = ['reduce_ok'] if needs_writeable else []
op_flag = 'readwrite' if needs_writeable else 'readonly'
- broadcast = np.nditer(
+ it = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
- op_flags=[op_flag], itershape=shape, order='C').itviews[0]
+ op_flags=[op_flag], itershape=shape, order='C')
+ with it:
+ # never really has writebackifcopy semantics
+ broadcast = it.itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if needs_writeable and not result.flags.writeable:
result.flags.writeable = True
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index a9cb157f3..32812990c 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -5,9 +5,7 @@ import sys
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises, SkipTest,
- )
+from numpy.testing import assert_, assert_equal, assert_raises, SkipTest
import numpy.lib._datasource as datasource
if sys.version_info[0] >= 3:
@@ -342,7 +340,3 @@ class TestOpenFunc(object):
fp = datasource.open(local_file)
assert_(fp)
fp.close()
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index 54fac8da4..5f6c29a4d 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -6,7 +6,7 @@ from datetime import date
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_allclose, assert_raises,
+ assert_, assert_equal, assert_allclose, assert_raises,
)
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
@@ -346,6 +346,3 @@ class TestMiscFunctions(object):
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test__version.py b/numpy/lib/tests/test__version.py
index 993c9d507..8e66a0c03 100644
--- a/numpy/lib/tests/test__version.py
+++ b/numpy/lib/tests/test__version.py
@@ -3,7 +3,7 @@
"""
from __future__ import division, absolute_import, print_function
-from numpy.testing import assert_, run_module_suite, assert_raises
+from numpy.testing import assert_, assert_raises
from numpy.lib import NumpyVersion
@@ -64,7 +64,3 @@ def test_dev0_a_b_rc_mixed():
def test_raises():
for ver in ['1.9', '1,9.0', '1.7.x']:
assert_raises(ValueError, NumpyVersion, ver)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index fce4c451d..8ba0370b0 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -489,6 +489,19 @@ class TestConstant(object):
)
assert_allclose(test, expected)
+ def test_check_large_integers(self):
+ uint64_max = 2 ** 64 - 1
+ arr = np.full(5, uint64_max, dtype=np.uint64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, uint64_max, dtype=np.uint64)
+ assert_array_equal(test, expected)
+
+ int64_max = 2 ** 63 - 1
+ arr = np.full(5, int64_max, dtype=np.int64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, int64_max, dtype=np.int64)
+ assert_array_equal(test, expected)
+
class TestLinearRamp(object):
def test_check_simple(self):
@@ -1090,7 +1103,3 @@ class TestTypeError1(object):
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)),
**kwargs)
-
-
-if __name__ == "__main__":
- np.testing.run_module_suite()
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index 8286834a4..dace5ade8 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -6,9 +6,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
import sys
-from numpy.testing import (
- run_module_suite, assert_array_equal, assert_equal, assert_raises,
- )
+from numpy.testing import assert_array_equal, assert_equal, assert_raises
from numpy.lib.arraysetops import (
ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
)
@@ -34,7 +32,46 @@ class TestSetOps(object):
assert_array_equal(c, ed)
assert_array_equal([], intersect1d([], []))
-
+
+ def test_intersect1d_indices(self):
+ # unique inputs
+ a = np.array([1, 2, 3, 4])
+ b = np.array([2, 1, 4, 6])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ee = np.array([1, 2, 4])
+ assert_array_equal(c, ee)
+ assert_array_equal(a[i1], ee)
+ assert_array_equal(b[i2], ee)
+
+ # non-unique inputs
+ a = np.array([1, 2, 2, 3, 4, 3, 2])
+ b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ef = np.array([1, 2, 3, 4])
+ assert_array_equal(c, ef)
+ assert_array_equal(a[i1], ef)
+ assert_array_equal(b[i2], ef)
+
+ # non1d, unique inputs
+ a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
+ b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 6, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
+ # non1d, not assumed to be uniqueinputs
+ a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
+ b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
def test_setxor1d(self):
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
@@ -76,8 +113,6 @@ class TestSetOps(object):
assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8]))
assert_array_equal([7,1], ediff1d(two_elem, to_begin=7))
assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
- assert(isinstance(ediff1d(np.matrix(1)), np.matrix))
- assert(isinstance(ediff1d(np.matrix(1), to_begin=1), np.matrix))
def test_isin(self):
# the tests for in1d cover most of isin's behavior
@@ -504,7 +539,3 @@ class TestUnique(object):
assert_array_equal(uniq[:, inv], data)
msg = "Unique's return_counts=True failed with axis=1"
assert_array_equal(cnt, np.array([2, 1, 1]), msg)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py
index 64ad7f4de..2ce4456a5 100644
--- a/numpy/lib/tests/test_arrayterator.py
+++ b/numpy/lib/tests/test_arrayterator.py
@@ -46,7 +46,3 @@ def test():
# Check that all elements are iterated correctly
assert_(list(c.flat) == list(d.flat))
-
-if __name__ == '__main__':
- from numpy.testing import run_module_suite
- run_module_suite()
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index c5e92dbc0..524915041 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -4,9 +4,8 @@ from decimal import Decimal
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_almost_equal, assert_allclose,
- assert_equal, assert_raises
-)
+ assert_, assert_almost_equal, assert_allclose, assert_equal, assert_raises
+ )
class TestFinancial(object):
@@ -339,7 +338,3 @@ class TestFinancial(object):
Decimal('0'), [Decimal('0'), Decimal('0'), Decimal('1'), 'end', 'begin']),
[Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-75.62318601'),
Decimal('-76.88882405'), Decimal('-76.88882405')], 4)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index d3bd2cef7..38a9b8000 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -1,5 +1,6 @@
from __future__ import division, absolute_import, print_function
+# doctest
r''' Test the .npy file format.
Set up:
@@ -275,18 +276,17 @@ Test the header writing.
"v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
'''
-
import sys
import os
import shutil
import tempfile
import warnings
+import pytest
from io import BytesIO
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_array_equal, assert_raises, raises,
- dec, SkipTest
+ assert_, assert_array_equal, assert_raises, raises, SkipTest
)
from numpy.lib import format
@@ -477,7 +477,7 @@ def test_long_str():
assert_array_equal(long_str_arr, long_str_arr2)
-@dec.slow
+@pytest.mark.slow
def test_memmap_roundtrip():
# Fixme: test crashes nose on windows.
if not (sys.platform == 'win32' or sys.platform == 'cygwin'):
@@ -628,7 +628,7 @@ def test_version_2_0():
assert_raises(ValueError, format.write_array, f, d, (1, 0))
-@dec.slow
+@pytest.mark.slow
def test_version_2_0_memmap():
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
@@ -832,8 +832,9 @@ def test_large_file_support():
assert_array_equal(r, d)
-@dec.slow
-@dec.skipif(np.dtype(np.intp).itemsize < 8, "test requires 64-bit system")
+@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8,
+ reason="test requires 64-bit system")
+@pytest.mark.slow
def test_large_archive():
# Regression test for product of saving arrays with dimensions of array
# having a product that doesn't fit in int32. See gh-7598 for details.
@@ -851,7 +852,3 @@ def test_large_archive():
new_a = np.load(f)["arr"]
assert_(a.shape == new_a.shape)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index dc5fe3397..4103a9eb3 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -4,15 +4,16 @@ import operator
import warnings
import sys
import decimal
+import pytest
import numpy as np
from numpy import ma
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_array_almost_equal, assert_raises,
- assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex,
- dec, suppress_warnings, HAS_REFCOUNT,
-)
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_allclose,
+ assert_array_max_ulp, assert_warns, assert_raises_regex, suppress_warnings,
+ HAS_REFCOUNT,
+ )
import numpy.lib.function_base as nfb
from numpy.random import rand
from numpy.lib import (
@@ -21,7 +22,7 @@ from numpy.lib import (
histogram, histogramdd, i0, insert, interp, kaiser, meshgrid, msort,
piecewise, place, rot90, select, setxor1d, sinc, split, trapz, trim_zeros,
unwrap, unique, vectorize
-)
+ )
from numpy.compat import long
@@ -103,9 +104,10 @@ class TestRot90(object):
class TestFlip(object):
def test_axes(self):
- assert_raises(ValueError, np.flip, np.ones(4), axis=1)
- assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=2)
- assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(np.AxisError, np.flip, np.ones(4), axis=1)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=2)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=(0, 3))
def test_basic_lr(self):
a = get_mat(4)
@@ -172,6 +174,35 @@ class TestFlip(object):
assert_equal(np.flip(a, i),
np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
+ def test_default_axis(self):
+ a = np.array([[1, 2, 3],
+ [4, 5, 6]])
+ b = np.array([[6, 5, 4],
+ [3, 2, 1]])
+ assert_equal(np.flip(a), b)
+
+ def test_multiple_axes(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ assert_equal(np.flip(a, axis=()), a)
+
+ b = np.array([[[5, 4],
+ [7, 6]],
+ [[1, 0],
+ [3, 2]]])
+
+ assert_equal(np.flip(a, axis=(0, 2)), b)
+
+ c = np.array([[[3, 2],
+ [1, 0]],
+ [[7, 6],
+ [5, 4]]])
+
+ assert_equal(np.flip(a, axis=(1, 2)), c)
+
class TestAny(object):
@@ -256,9 +287,6 @@ class TestAverage(object):
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
- y6 = np.matrix(rand(5, 5))
- assert_array_equal(y6.mean(0), average(y6, 0))
-
def test_weights(self):
y = np.arange(10)
w = np.arange(10)
@@ -326,14 +354,6 @@ class TestAverage(object):
assert_equal(type(np.average(a)), subclass)
assert_equal(type(np.average(a, weights=w)), subclass)
- # also test matrices
- a = np.matrix([[1,2],[3,4]])
- w = np.matrix([[1,2],[3,4]])
-
- r = np.average(a, axis=0, weights=w)
- assert_equal(type(r), np.matrix)
- assert_equal(r, [[2.5, 10.0/3]])
-
def test_upcasting(self):
types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]
@@ -1494,9 +1514,9 @@ class TestDigitize(object):
class TestUnwrap(object):
def test_simple(self):
- # check that unwrap removes jumps greather that 2*pi
+ # check that unwrap removes jumps greater that 2*pi
assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
- # check that unwrap maintans continuity
+ # check that unwrap maintains continuity
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
@@ -1592,16 +1612,6 @@ class TestTrapz(object):
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(y, xm), r)
- def test_matrix(self):
- # Test to make sure matrices give the same answer as ndarrays
- x = np.linspace(0, 5)
- y = x * x
- r = trapz(y, x)
- mx = np.matrix(x)
- my = np.matrix(y)
- mr = trapz(my, mx)
- assert_almost_equal(mr, r)
-
class TestSinc(object):
@@ -1752,7 +1762,9 @@ class TestCov(object):
def test_complex(self):
x = np.array([[1, 2, 3], [1j, 2j, 3j]])
- assert_allclose(cov(x), np.array([[1., -1.j], [1.j, 1.]]))
+ res = np.array([[1., -1.j], [1.j, 1.]])
+ assert_allclose(cov(x), res)
+ assert_allclose(cov(x, aweights=np.ones(3)), res)
def test_xy(self):
x = np.array([[1, 2, 3]])
@@ -2141,7 +2153,7 @@ class TestBincount(object):
"must not be negative",
lambda: np.bincount(x, minlength=-1))
- @dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_dtype_reference_leaks(self):
# gh-6805
intp_refcount = sys.getrefcount(np.dtype(np.intp))
@@ -2252,8 +2264,17 @@ class TestInterp(object):
y = np.linspace(0, 1, 5)
x0 = np.array(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
- x0 = np.array(.3, dtype=object)
- assert_almost_equal(np.interp(x0, x, y), .3)
+
+ xp = np.array([0, 2, 4])
+ fp = np.array([1, -1, 1])
+
+ actual = np.interp(np.array(1), xp, fp)
+ assert_equal(actual, 0)
+ assert_(isinstance(actual, np.float64))
+
+ actual = np.interp(np.array(4.5), xp, fp, period=4)
+ assert_equal(actual, 0.5)
+ assert_(isinstance(actual, np.float64))
def test_if_len_x_is_small(self):
xp = np.arange(0, 10, 0.0001)
@@ -2707,6 +2728,28 @@ class TestPercentile(object):
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
+class TestQuantile(object):
+ # most of this is already tested by TestPercentile
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.quantile(x, 0), 0.)
+ assert_equal(np.quantile(x, 1), 3.5)
+ assert_equal(np.quantile(x, 0.5), 1.75)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.quantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.quantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+
class TestMedian(object):
def test_basic(self):
@@ -2976,14 +3019,10 @@ class TestAdd_newdoc_ufunc(object):
class TestAdd_newdoc(object):
- @dec.skipif(sys.flags.optimize == 2)
+ @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
def test_add_doc(self):
# test np.add_newdoc
tgt = "Current flat index into the array."
assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
assert_(len(np.core.ufunc.identity.__doc__) > 300)
assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index a2c684a20..e16ae12c2 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -2,13 +2,12 @@ from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.lib.histograms import histogram, histogramdd
+from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_array_almost_equal, assert_raises,
- assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex,
- dec, suppress_warnings, HAS_REFCOUNT,
-)
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_allclose,
+ assert_array_max_ulp, assert_warns, assert_raises_regex, suppress_warnings,
+ )
class TestHistogram(object):
@@ -254,7 +253,7 @@ class TestHistogram(object):
one_nan = np.array([0, 1, np.nan])
all_nan = np.array([np.nan, np.nan])
- # the internal commparisons with NaN give warnings
+ # the internal comparisons with NaN give warnings
sup = suppress_warnings()
sup.filter(RuntimeWarning)
with sup:
@@ -346,6 +345,20 @@ class TestHistogram(object):
self.do_precision(np.single, np.longdouble)
self.do_precision(np.double, np.longdouble)
+ def test_histogram_bin_edges(self):
+ hist, e = histogram([1, 2, 3, 4], [1, 2])
+ edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
+ assert_array_equal(edges, e)
+
+ arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
+ hist, e = histogram(arr, bins=30, range=(-0.5, 5))
+ edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5))
+ assert_array_equal(edges, e)
+
+ hist, e = histogram(arr, bins='auto', range=(0, 1))
+ edges = histogram_bin_edges(arr, bins='auto', range=(0, 1))
+ assert_array_equal(edges, e)
+
class TestHistogramOptimBinNums(object):
"""
@@ -430,6 +443,24 @@ class TestHistogramOptimBinNums(object):
assert_equal(len(a), numbins, err_msg="{0} estimator, "
"No Variance test".format(estimator))
+ def test_limited_variance(self):
+ """
+ Check when IQR is 0, but variance exists, we return the sturges value
+ and not the fd value.
+ """
+ lim_var_data = np.ones(1000)
+ lim_var_data[:3] = 0
+ lim_var_data[-4:] = 100
+
+ edges_auto = histogram_bin_edges(lim_var_data, 'auto')
+ assert_equal(edges_auto, np.linspace(0, 100, 12))
+
+ edges_fd = histogram_bin_edges(lim_var_data, 'fd')
+ assert_equal(edges_fd, np.array([0, 100]))
+
+ edges_sturges = histogram_bin_edges(lim_var_data, 'sturges')
+ assert_equal(edges_sturges, np.linspace(0, 100, 12))
+
def test_outlier(self):
"""
Check the FD, Scott and Doane with outliers.
@@ -582,8 +613,6 @@ class TestHistogramdd(object):
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
assert_raises(
- ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]])
- assert_raises(
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
@@ -615,7 +644,7 @@ class TestHistogramdd(object):
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
- assert_(hist[1] == 1.)
+ assert_(hist[1] == 0.0)
x = [1.0001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
@@ -630,6 +659,39 @@ class TestHistogramdd(object):
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
-
-if __name__ == "__main__":
- run_module_suite()
+ def test_equal_edges(self):
+ """ Test that adjacent entries in an edge array can be equal """
+ x = np.array([0, 1, 2])
+ y = np.array([0, 1, 2])
+ x_edges = np.array([0, 2, 2])
+ y_edges = 1
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ hist_expected = np.array([
+ [2.],
+ [1.], # x == 2 falls in the final bin
+ ])
+ assert_equal(hist, hist_expected)
+
+ def test_edge_dtype(self):
+ """ Test that if an edge array is input, its type is preserved """
+ x = np.array([0, 10, 20])
+ y = x / 10
+ x_edges = np.array([0, 5, 15, 20])
+ y_edges = x_edges / 10
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(edges[0].dtype, x_edges.dtype)
+ assert_equal(edges[1].dtype, y_edges.dtype)
+
+ def test_large_integers(self):
+ big = 2**60 # Too large to represent with a full precision float
+
+ x = np.array([0], np.int64)
+ x_edges = np.array([-1, +1], np.int64)
+ y = big + x
+ y_edges = big + x_edges
+
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(hist[0, 0], 1)
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index 0520ce580..089a7589a 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -2,9 +2,8 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_array_almost_equal, assert_raises,
- assert_raises_regex
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_raises_regex
)
from numpy.lib.index_tricks import (
mgrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
@@ -185,37 +184,6 @@ class TestConcatenator(object):
assert_array_equal(d[:5, :], b)
assert_array_equal(d[5:, :], c)
- def test_matrix(self):
- a = [1, 2]
- b = [3, 4]
-
- ab_r = np.r_['r', a, b]
- ab_c = np.r_['c', a, b]
-
- assert_equal(type(ab_r), np.matrix)
- assert_equal(type(ab_c), np.matrix)
-
- assert_equal(np.array(ab_r), [[1,2,3,4]])
- assert_equal(np.array(ab_c), [[1],[2],[3],[4]])
-
- assert_raises(ValueError, lambda: np.r_['rc', a, b])
-
- def test_matrix_scalar(self):
- r = np.r_['r', [1, 2], 3]
- assert_equal(type(r), np.matrix)
- assert_equal(np.array(r), [[1,2,3]])
-
- def test_matrix_builder(self):
- a = np.array([1])
- b = np.array([2])
- c = np.array([3])
- d = np.array([4])
- actual = np.r_['a, b; c, d']
- expected = np.bmat([[a, b], [c, d]])
-
- assert_equal(actual, expected)
- assert_equal(type(actual), type(expected))
-
def test_0d(self):
assert_equal(r_[0, np.array(1), 2], [0, 1, 2])
assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3])
@@ -285,71 +253,77 @@ def test_c_():
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
-def test_fill_diagonal():
- a = np.zeros((3, 3), int)
- fill_diagonal(a, 5)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0],
- [0, 5, 0],
- [0, 0, 5]]))
-
- #Test tall matrix
- a = np.zeros((10, 3), int)
- fill_diagonal(a, 5)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0],
- [0, 5, 0],
- [0, 0, 5],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0]]))
-
- #Test tall matrix wrap
- a = np.zeros((10, 3), int)
- fill_diagonal(a, 5, True)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0],
- [0, 5, 0],
- [0, 0, 5],
- [0, 0, 0],
- [5, 0, 0],
- [0, 5, 0],
- [0, 0, 5],
- [0, 0, 0],
- [5, 0, 0],
- [0, 5, 0]]))
-
- #Test wide matrix
- a = np.zeros((3, 10), int)
- fill_diagonal(a, 5)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]))
-
- # The same function can operate on a 4-d array:
- a = np.zeros((3, 3, 3, 3), int)
- fill_diagonal(a, 4)
- i = np.array([0, 1, 2])
- yield (assert_equal, np.where(a != 0), (i, i, i, i))
+class TestFillDiagonal(object):
+ def test_basic(self):
+ a = np.zeros((3, 3), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5]])
+ )
+
+ def test_tall_matrix(self):
+ a = np.zeros((10, 3), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0]])
+ )
+
+ def test_tall_matrix_wrap(self):
+ a = np.zeros((10, 3), int)
+ fill_diagonal(a, 5, True)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0]])
+ )
+
+ def test_wide_matrix(self):
+ a = np.zeros((3, 10), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])
+ )
+
+ def test_operate_4d_array(self):
+ a = np.zeros((3, 3, 3, 3), int)
+ fill_diagonal(a, 4)
+ i = np.array([0, 1, 2])
+ assert_equal(np.where(a != 0), (i, i, i, i))
def test_diag_indices():
di = diag_indices(4)
a = np.array([[1, 2, 3, 4],
- [5, 6, 7, 8],
- [9, 10, 11, 12],
- [13, 14, 15, 16]])
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
a[di] = 100
- yield (assert_array_equal, a,
- np.array([[100, 2, 3, 4],
- [5, 100, 7, 8],
- [9, 10, 100, 12],
- [13, 14, 15, 100]]))
+ assert_array_equal(
+ a, np.array([[100, 2, 3, 4],
+ [5, 100, 7, 8],
+ [9, 10, 100, 12],
+ [13, 14, 15, 100]])
+ )
# Now, we create indices to manipulate a 3-d array:
d3 = diag_indices(2, 3)
@@ -357,12 +331,12 @@ def test_diag_indices():
# And use it to set the diagonal of a zeros array to 1:
a = np.zeros((2, 2, 2), int)
a[d3] = 1
- yield (assert_array_equal, a,
- np.array([[[1, 0],
- [0, 0]],
-
- [[0, 0],
- [0, 1]]]))
+ assert_array_equal(
+ a, np.array([[[1, 0],
+ [0, 0]],
+ [[0, 0],
+ [0, 1]]])
+ )
def test_diag_indices_from():
@@ -394,7 +368,3 @@ def test_ndindex():
# Make sure 0-sized ndindex works correctly
x = list(ndindex(*[0]))
assert_equal(x, [])
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index d05fcd543..f58c9e33d 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -4,15 +4,16 @@ import sys
import gzip
import os
import threading
-from tempfile import NamedTemporaryFile
import time
import warnings
import gc
import io
+import re
+import pytest
+from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
from datetime import datetime
import locale
-import re
import numpy as np
import numpy.ma as ma
@@ -20,10 +21,10 @@ from numpy.lib._iotools import ConverterError, ConversionWarning
from numpy.compat import asbytes, bytes, unicode, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
- run_module_suite, assert_warns, assert_, SkipTest,
- assert_raises_regex, assert_raises, assert_allclose,
- assert_array_equal, temppath, tempdir, dec, IS_PYPY, suppress_warnings
-)
+ assert_warns, assert_, SkipTest, assert_raises_regex, assert_raises,
+ assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
+ HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles,
+ )
class TextIO(BytesIO):
@@ -156,7 +157,7 @@ class RoundtripTest(object):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
- @dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
+ @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
@@ -168,7 +169,7 @@ class RoundtripTest(object):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
- @dec.slow
+ @pytest.mark.slow
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
@@ -200,8 +201,8 @@ class TestSavezLoad(RoundtripTest):
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
- @dec.skipif(not IS_64BIT, "Works only with 64bit systems")
- @dec.slow
+ @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
+ @pytest.mark.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
@@ -277,7 +278,8 @@ class TestSavezLoad(RoundtripTest):
fp.seek(0)
assert_(not fp.closed)
- @dec.skipif(IS_PYPY, "context manager required on PyPy")
+ #FIXME: Is this still true?
+ @pytest.mark.skipif(IS_PYPY, reason="Missing context manager on PyPy")
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
@@ -466,6 +468,26 @@ class TestSaveTxt(object):
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
+ def test_complex_negative_exponent(self):
+ # Previous to 1.15, some formats generated x+-yj, gh 7895
+ ncols = 2
+ nrows = 2
+ a = np.zeros((ncols, nrows), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.3e')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
+ b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
+
+
+
+
def test_custom_writer(self):
class CustomWriter(list):
@@ -540,15 +562,17 @@ class LoadTxtBase(object):
assert_array_equal(res, wanted)
# Python2 .open does not support encoding
- @dec.skipif(MAJVER == 2)
+ @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_gzip(self):
self.check_compressed(gzip.open, ('.gz',))
- @dec.skipif(MAJVER == 2 or not HAS_BZ2)
+ @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
+ @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_gzip(self):
self.check_compressed(bz2.open, ('.bz2',))
- @dec.skipif(MAJVER == 2 or not HAS_LZMA)
+ @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
+ @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_gzip(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
@@ -599,11 +623,11 @@ class LoadTxtBase(object):
class TestLoadTxt(LoadTxtBase):
loadfunc = staticmethod(np.loadtxt)
- def setUp(self):
+ def setup(self):
# lower chunksize for testing
self.orig_chunk = np.lib.npyio._loadtxt_chunksize
np.lib.npyio._loadtxt_chunksize = 1
- def tearDown(self):
+ def teardown(self):
np.lib.npyio._loadtxt_chunksize = self.orig_chunk
def test_record(self):
@@ -912,6 +936,26 @@ class TestLoadTxt(LoadTxtBase):
res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
+ def test_complex_misformatted(self):
+ # test for backward compatibility
+ # some complex formats used to generate x+-yj
+ a = np.zeros((2, 2), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.16e')
+ c.seek(0)
+ txt = c.read()
+ c.seek(0)
+ # misformat the sign on the imaginary part, gh 7895
+ txt_bad = txt.replace(b'e+00-', b'e00+-')
+ assert_(txt_bad != txt)
+ c.write(txt_bad)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=complex)
+ assert_equal(res, a)
+
def test_universal_newline(self):
with temppath() as name:
with open(name, 'w') as f:
@@ -1007,7 +1051,8 @@ class TestLoadTxt(LoadTxtBase):
dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
- @dec.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968')
+ @pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
+ reason="Wrong preferred encoding")
def test_binary_load(self):
butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\
b"20,2,3,\xc3\x95scar\n\r"
@@ -1073,6 +1118,13 @@ class Testfromregex(object):
x = np.fromregex(path, regexp, dt, encoding='UTF-8')
assert_array_equal(x, a)
+ def test_compiled_bytes(self):
+ regexp = re.compile(b'(\\d)')
+ c = BytesIO(b'123')
+ dt = [('num', np.float64)]
+ a = np.array([1, 2, 3], dtype=dt)
+ x = np.fromregex(c, regexp, dt)
+ assert_array_equal(x, a)
#####--------------------------------------------------------------------------
@@ -1265,6 +1317,13 @@ M 33 21.99
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test, ctrl)
+ def test_names_and_comments_none(self):
+ # Tests case when names is true but comments is None (gh-10780)
+ data = TextIO('col1 col2\n 1 2\n 3 4')
+ test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
+ control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
+ assert_equal(test, control)
+
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
@@ -1977,12 +2036,11 @@ M 33 21.99
# encoding of io.open. Will need to change this for PyTest, maybe
# using pytest.mark.xfail(raises=***).
try:
- import locale
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
raise SkipTest('Skipping test_utf8_file_nodtype_unicode, '
- 'unable to encode utf8 in preferred encoding')
+ 'unable to encode utf8 in preferred encoding')
with temppath() as path:
with io.open(path, "wt") as f:
@@ -2182,9 +2240,9 @@ M 33 21.99
assert_equal(test['f2'], 1024)
+@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
class TestPathUsage(object):
# Test that pathlib.Path can be used
- @dec.skipif(Path is None, "No pathlib.Path")
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
@@ -2193,7 +2251,6 @@ class TestPathUsage(object):
x = np.loadtxt(path)
assert_array_equal(x, a)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_save_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npy') as path:
@@ -2203,7 +2260,6 @@ class TestPathUsage(object):
data = np.load(path)
assert_array_equal(data, a)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_savez_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
@@ -2211,8 +2267,7 @@ class TestPathUsage(object):
np.savez(path, lab='place holder')
with np.load(path) as data:
assert_array_equal(data['lab'], 'place holder')
-
- @dec.skipif(Path is None, "No pathlib.Path")
+
def test_savez_compressed_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
@@ -2222,7 +2277,6 @@ class TestPathUsage(object):
assert_array_equal(data['lab'], 'place holder')
data.close()
- @dec.skipif(Path is None, "No pathlib.Path")
def test_genfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
@@ -2231,7 +2285,6 @@ class TestPathUsage(object):
data = np.genfromtxt(path)
assert_array_equal(a, data)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_ndfromtxt(self):
# Test outputting a standard ndarray
with temppath(suffix='.txt') as path:
@@ -2243,7 +2296,6 @@ class TestPathUsage(object):
test = np.ndfromtxt(path, dtype=int)
assert_array_equal(test, control)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_mafromtxt(self):
# From `test_fancy_dtype_alt` above
with temppath(suffix='.txt') as path:
@@ -2255,7 +2307,6 @@ class TestPathUsage(object):
control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
assert_equal(test, control)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
@@ -2269,7 +2320,6 @@ class TestPathUsage(object):
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_recfromcsv(self):
with temppath(suffix='.txt') as path:
path = Path(path)
@@ -2357,6 +2407,7 @@ def test_npzfile_dict():
assert_('x' in z.keys())
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
@@ -2365,17 +2416,5 @@ def test_load_refcount():
np.savez(f, [1, 2, 3])
f.seek(0)
- assert_(gc.isenabled())
- gc.disable()
- try:
- gc.collect()
+ with assert_no_gc_cycles():
np.load(f)
- # gc.collect returns the number of unreachable objects in cycles that
- # were found -- we are checking that no cycles were created by np.load
- n_objects_in_cycles = gc.collect()
- finally:
- gc.enable()
- assert_equal(n_objects_in_cycles, 0)
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_mixins.py b/numpy/lib/tests/test_mixins.py
index 94f06c336..f2d915502 100644
--- a/numpy/lib/tests/test_mixins.py
+++ b/numpy/lib/tests/test_mixins.py
@@ -5,9 +5,7 @@ import operator
import sys
import numpy as np
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises
- )
+from numpy.testing import assert_, assert_equal, assert_raises
PY2 = sys.version_info.major < 3
@@ -213,7 +211,3 @@ class TestNDArrayOperatorsMixin(object):
np.frexp(ArrayLike(2 ** -3)), expected)
_assert_equal_type_and_value(
np.frexp(ArrayLike(np.array(2 ** -3))), expected)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index 3d362fc6e..504372faf 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -4,8 +4,8 @@ import warnings
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_almost_equal,
- assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings
+ assert_, assert_equal, assert_almost_equal, assert_no_warnings,
+ assert_raises, assert_array_equal, suppress_warnings
)
@@ -113,42 +113,46 @@ class TestNanFunctions_MinMax(object):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
+
# check that rows of nan are dealt with for subclasses (#4628)
- mat[1] = np.nan
+ mine[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
- and not np.isnan(res[2, 0]))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(np.isnan(res[1]) and not np.isnan(res[0])
+ and not np.isnan(res[2]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine)
+ assert_(res.shape == ())
assert_(res != np.nan)
assert_(len(w) == 0)
@@ -209,19 +213,22 @@ class TestNanFunctions_ArgminArgmax(object):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
class TestNanFunctions_IntTypes(object):
@@ -381,19 +388,27 @@ class SharedNanFunctionsTestsMixin(object):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ array = np.eye(3)
+ mine = array.view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ expected_shape = f(array, axis=0).shape
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array, axis=1).shape
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array).shape
+ res = f(mine)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
@@ -481,18 +496,6 @@ class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):
res = f(d, axis=axis)
assert_equal(res.shape, (3, 5, 7, 11))
- def test_matrices(self):
- # Check that it works and that type and
- # shape are preserved
- mat = np.matrix(np.eye(3))
- for f in self.nanfuncs:
- for axis in np.arange(2):
- res = f(mat, axis=axis)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 3))
- res = f(mat)
- assert_(res.shape == (1, 3*3))
-
def test_result_values(self):
for axis in (-2, -1, 0, 1, None):
tgt = np.cumprod(_ndat_ones, axis=axis)
@@ -888,5 +891,37 @@ class TestNanFunctions_Percentile(object):
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
-if __name__ == "__main__":
- run_module_suite()
+class TestNanFunctions_Quantile(object):
+ # most of this is already tested by TestPercentile
+
+ def test_regression(self):
+ ar = np.arange(24).reshape(2, 3, 4).astype(float)
+ ar[0][1] = np.nan
+
+ assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=0),
+ np.nanpercentile(ar, q=50, axis=0))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=1),
+ np.nanpercentile(ar, q=50, axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.5], axis=1),
+ np.nanpercentile(ar, q=[50], axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1),
+ np.nanpercentile(ar, q=[25, 50, 75], axis=1))
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.nanquantile(x, 0), 0.)
+ assert_equal(np.nanquantile(x, 1), 3.5)
+ assert_equal(np.nanquantile(x, 0.5), 1.75)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.nanquantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.nanquantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py
index 965cbf67c..fde5c37f2 100644
--- a/numpy/lib/tests/test_packbits.py
+++ b/numpy/lib/tests/test_packbits.py
@@ -1,9 +1,7 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import (
- assert_array_equal, assert_equal, assert_raises, run_module_suite
-)
+from numpy.testing import assert_array_equal, assert_equal, assert_raises
def test_packbits():
@@ -268,7 +266,3 @@ def test_unpackbits_large():
assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)
d = d.T.copy()
assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 03915cead..7f6fca4a4 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -1,15 +1,13 @@
-from __future__ import division, absolute_import, print_function
-
'''
>>> p = np.poly1d([1.,2,3])
>>> p
-poly1d([ 1., 2., 3.])
+poly1d([1., 2., 3.])
>>> print(p)
2
1 x + 2 x + 3
>>> q = np.poly1d([3.,2,1])
>>> q
-poly1d([ 3., 2., 1.])
+poly1d([3., 2., 1.])
>>> print(q)
2
3 x + 2 x + 1
@@ -30,23 +28,23 @@ poly1d([ 3., 2., 1.])
86.0
>>> p * q
-poly1d([ 3., 8., 14., 8., 3.])
+poly1d([ 3., 8., 14., 8., 3.])
>>> p / q
-(poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667]))
+(poly1d([0.33333333]), poly1d([1.33333333, 2.66666667]))
>>> p + q
-poly1d([ 4., 4., 4.])
+poly1d([4., 4., 4.])
>>> p - q
poly1d([-2., 0., 2.])
>>> p ** 4
-poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
+poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
>>> p(q)
-poly1d([ 9., 12., 16., 8., 6.])
+poly1d([ 9., 12., 16., 8., 6.])
>>> q(p)
-poly1d([ 3., 12., 32., 40., 34.])
+poly1d([ 3., 12., 32., 40., 34.])
>>> np.asarray(p)
-array([ 1., 2., 3.])
+array([1., 2., 3.])
>>> len(p)
2
@@ -54,16 +52,16 @@ array([ 1., 2., 3.])
(3.0, 2.0, 1.0, 0)
>>> p.integ()
-poly1d([ 0.33333333, 1. , 3. , 0. ])
+poly1d([0.33333333, 1. , 3. , 0. ])
>>> p.integ(1)
-poly1d([ 0.33333333, 1. , 3. , 0. ])
+poly1d([0.33333333, 1. , 3. , 0. ])
>>> p.integ(5)
-poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. ,
- 0. , 0. , 0. ])
+poly1d([0.00039683, 0.00277778, 0.025 , 0. , 0. ,
+ 0. , 0. , 0. ])
>>> p.deriv()
-poly1d([ 2., 2.])
+poly1d([2., 2.])
>>> p.deriv(2)
-poly1d([ 2.])
+poly1d([2.])
>>> q = np.poly1d([1.,2,3], variable='y')
>>> print(q)
@@ -75,13 +73,15 @@ poly1d([ 2.])
1 lambda + 2 lambda + 3
>>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1]))
-(poly1d([ 1., -1.]), poly1d([ 0.]))
+(poly1d([ 1., -1.]), poly1d([0.]))
'''
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_array_almost_equal, assert_raises, rundocs
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, rundocs
)
@@ -243,7 +243,3 @@ class TestDocs(object):
p.coeffs[2] += 10
assert_equal(p.coeffs, [1, 2, 3])
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index bc9f8d7b6..219ae24fa 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -1,12 +1,12 @@
from __future__ import division, absolute_import, print_function
+import pytest
+
import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
-from numpy.testing import (
- run_module_suite, assert_, assert_raises, dec
- )
+from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by
@@ -687,7 +687,7 @@ class TestJoinBy(object):
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
- @dec.knownfailureif(True)
+ @pytest.mark.xfail(reason="See comment at gh-9343")
def test_same_name_different_dtypes_key(self):
a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
@@ -829,6 +829,3 @@ class TestAppendFieldsObj(object):
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
-
-if __name__ == '__main__':
- run_module_suite()
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index d96d3422d..4c46bc46b 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -5,8 +5,8 @@ import sys
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_array_almost_equal, assert_raises, _assert_valid_refcount,
+ assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
+ assert_raises, _assert_valid_refcount,
)
from numpy.compat import unicode
@@ -252,7 +252,3 @@ class TestRegression(object):
raise AssertionError()
finally:
out.close()
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index d0afeefd9..a35d90b70 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -8,8 +8,7 @@ from numpy.lib.shape_base import (
vsplit, dstack, column_stack, kron, tile, expand_dims,
)
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal, assert_raises,
- assert_warns
+ assert_, assert_equal, assert_array_equal, assert_raises, assert_warns
)
@@ -30,19 +29,21 @@ class TestApplyAlongAxis(object):
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
def test_preserve_subclass(self):
- # this test is particularly malicious because matrix
- # refuses to become 1d
def double(row):
return row * 2
- m = np.matrix([[0, 1], [2, 3]])
- expected = np.matrix([[0, 2], [4, 6]])
+
+ class MyNDArray(np.ndarray):
+ pass
+
+ m = np.array([[0, 1], [2, 3]]).view(MyNDArray)
+ expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)
result = apply_along_axis(double, 0, m)
- assert_(isinstance(result, np.matrix))
+ assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
result = apply_along_axis(double, 1, m)
- assert_(isinstance(result, np.matrix))
+ assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
def test_subclass(self):
@@ -80,7 +81,7 @@ class TestApplyAlongAxis(object):
def test_axis_insertion(self, cls=np.ndarray):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
return (x[::-1] * x[1:,None]).view(cls)
@@ -124,7 +125,7 @@ class TestApplyAlongAxis(object):
def test_axis_insertion_ma(self):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
res = x[::-1] * x[1:,None]
return np.ma.masked_where(res%5==0, res)
@@ -493,16 +494,10 @@ class TestSqueeze(object):
class TestKron(object):
def test_return_type(self):
- a = np.ones([2, 2])
- m = np.asmatrix(a)
- assert_equal(type(kron(a, a)), np.ndarray)
- assert_equal(type(kron(m, m)), np.matrix)
- assert_equal(type(kron(a, m)), np.matrix)
- assert_equal(type(kron(m, a)), np.matrix)
-
class myarray(np.ndarray):
__array_priority__ = 0.0
+ a = np.ones([2, 2])
ma = myarray(a.shape, a.dtype, a.data)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(ma, ma)), myarray)
@@ -569,7 +564,3 @@ class TestMayShareMemory(object):
def compare_results(res, desired):
for i in range(len(desired)):
assert_array_equal(res[i], desired[i])
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index 0599324d7..3c2ca8b87 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -1,14 +1,13 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.core.test_rational import rational
+from numpy.core._rational_tests import rational
from numpy.testing import (
- run_module_suite, assert_equal, assert_array_equal,
- assert_raises, assert_
+ assert_equal, assert_array_equal, assert_raises, assert_
)
from numpy.lib.stride_tricks import (
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to
-)
+ )
def assert_shapes_correct(input_shapes, expected_shape):
# Broadcast a list of arrays with the given input shapes and check the
@@ -432,7 +431,3 @@ def test_reference_types():
actual, _ = broadcast_arrays(input_array, np.ones(3))
assert_array_equal(expected, actual)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index 8183f7ca6..d3a072af3 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -4,14 +4,14 @@
from __future__ import division, absolute_import, print_function
from numpy.testing import (
- run_module_suite, assert_equal, assert_array_equal, assert_array_max_ulp,
+ assert_equal, assert_array_equal, assert_array_max_ulp,
assert_array_almost_equal, assert_raises,
)
from numpy import (
- arange, add, fliplr, flipud, zeros, ones, eye, array, diag,
- histogram2d, tri, mask_indices, triu_indices, triu_indices_from,
- tril_indices, tril_indices_from, vander,
+ arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d,
+ tri, mask_indices, triu_indices, triu_indices_from, tril_indices,
+ tril_indices_from, vander,
)
import numpy as np
@@ -244,32 +244,32 @@ class TestHistogram2d(object):
def test_binparameter_combination(self):
x = array(
- [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
+ [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
0.59944483, 1])
y = array(
- [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
+ [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
0.15886423, 1])
edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
H, xe, ye = histogram2d(x, y, (edges, 4))
answer = array(
- [[ 2., 0., 0., 0.],
- [ 0., 1., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 1., 0., 0.],
- [ 1., 0., 0., 0.],
- [ 0., 1., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 0., 0., 1.]])
+ [[2., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [1., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))
H, xe, ye = histogram2d(x, y, (4, edges))
answer = array(
- [[ 1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
- [ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
- [ 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
- [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
+ [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
@@ -288,11 +288,11 @@ def test_tril_triu_ndim2():
a = np.ones((2, 2), dtype=dtype)
b = np.tril(a)
c = np.triu(a)
- yield assert_array_equal, b, [[1, 0], [1, 1]]
- yield assert_array_equal, c, b.T
+ assert_array_equal(b, [[1, 0], [1, 1]])
+ assert_array_equal(c, b.T)
# should return the same dtype as the original array
- yield assert_equal, b.dtype, a.dtype
- yield assert_equal, c.dtype, a.dtype
+ assert_equal(b.dtype, a.dtype)
+ assert_equal(c.dtype, a.dtype)
def test_tril_triu_ndim3():
@@ -314,10 +314,11 @@ def test_tril_triu_ndim3():
], dtype=dtype)
a_triu_observed = np.triu(a)
a_tril_observed = np.tril(a)
- yield assert_array_equal, a_triu_observed, a_triu_desired
- yield assert_array_equal, a_tril_observed, a_tril_desired
- yield assert_equal, a_triu_observed.dtype, a.dtype
- yield assert_equal, a_tril_observed.dtype, a.dtype
+ assert_array_equal(a_triu_observed, a_triu_desired)
+ assert_array_equal(a_tril_observed, a_tril_desired)
+ assert_equal(a_triu_observed.dtype, a.dtype)
+ assert_equal(a_tril_observed.dtype, a.dtype)
+
def test_tril_triu_with_inf():
# Issue 4859
@@ -484,12 +485,12 @@ class TestVander(object):
[16, -8, 4, -2, 1],
[81, 27, 9, 3, 1]])
# Check default value of N:
- yield (assert_array_equal, v, powers[:, 1:])
+ assert_array_equal(v, powers[:, 1:])
# Check a range of N values, including 0 and 5 (greater than default)
m = powers.shape[1]
for n in range(6):
v = vander(c, N=n)
- yield (assert_array_equal, v, powers[:, m-n:m])
+ assert_array_equal(v, powers[:, m-n:m])
def test_dtypes(self):
c = array([11, -12, 13], dtype=np.int8)
@@ -497,7 +498,7 @@ class TestVander(object):
expected = np.array([[121, 11, 1],
[144, -12, 1],
[169, 13, 1]])
- yield (assert_array_equal, v, expected)
+ assert_array_equal(v, expected)
c = array([1.0+1j, 1.0-1j])
v = vander(c, N=3)
@@ -506,8 +507,4 @@ class TestVander(object):
# The data is floating point, but the values are small integers,
# so assert_array_equal *should* be safe here (rather than, say,
# assert_array_almost_equal).
- yield (assert_array_equal, v, expected)
-
-
-if __name__ == "__main__":
- run_module_suite()
+ assert_array_equal(v, expected)
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index ce8ef2f15..2982ca31a 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import long
from numpy.testing import (
- assert_, assert_equal, assert_array_equal, run_module_suite, assert_raises
+ assert_, assert_equal, assert_array_equal, assert_raises
)
from numpy.lib.type_check import (
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
@@ -440,7 +440,3 @@ class TestArrayConversion(object):
# other numpy function
assert_raises(TypeError,
asfarray, np.array([1, 2, 3]), dtype=np.array(1.0))
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index 128ce37ab..ad006fe17 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -4,7 +4,7 @@ import numpy as np
import numpy.core as nx
import numpy.lib.ufunclike as ufl
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal, assert_warns
+ assert_, assert_equal, assert_array_equal, assert_warns
)
@@ -91,6 +91,3 @@ class TestUfunclike(object):
out = np.array(0.0)
actual = np.fix(x, out=out)
assert_(actual is out)
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py
index 92bcdc238..c27c3cbf5 100644
--- a/numpy/lib/tests/test_utils.py
+++ b/numpy/lib/tests/test_utils.py
@@ -1,10 +1,10 @@
from __future__ import division, absolute_import, print_function
import sys
+import pytest
+
from numpy.core import arange
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises_regex, dec
- )
+from numpy.testing import assert_, assert_equal, assert_raises_regex
from numpy.lib import deprecate
import numpy.lib.utils as utils
@@ -14,7 +14,7 @@ else:
from StringIO import StringIO
-@dec.skipif(sys.flags.optimize == 2)
+@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
def test_lookfor():
out = StringIO()
utils.lookfor('eigenvalue', module='numpy', output=out,
@@ -65,7 +65,3 @@ def test_byte_bounds():
def test_assert_raises_regex_context_manager():
with assert_raises_regex(ValueError, 'no deprecation warning'):
raise ValueError('no deprecation warning')
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 402c18850..cca316e9a 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -650,7 +650,7 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
N = 1
if N != 1 and N != 2:
- xedges = yedges = asarray(bins, float)
+ xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]