summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--benchmarks/benchmarks/bench_lib.py25
-rw-r--r--doc/release/1.15.0-notes.rst11
-rw-r--r--numpy/add_newdocs.py5
-rw-r--r--numpy/core/src/multiarray/datetime.c17
-rw-r--r--numpy/core/tests/test_datetime.py37
-rw-r--r--numpy/core/tests/test_multiarray.py1
-rw-r--r--numpy/core/tests/test_numeric.py1
-rw-r--r--numpy/lib/arraypad.py154
-rw-r--r--numpy/lib/arraysetops.py65
-rw-r--r--numpy/lib/tests/test_arraysetops.py41
-rw-r--r--numpy/ma/core.py6
-rw-r--r--numpy/ma/tests/test_core.py10
-rw-r--r--numpy/ma/tests/test_extras.py15
-rw-r--r--numpy/matrixlib/tests/test_masked_matrix.py22
-rw-r--r--numpy/testing/_private/utils.py20
-rw-r--r--numpy/testing/tests/test_utils.py18
-rwxr-xr-xruntests.py12
17 files changed, 326 insertions, 134 deletions
diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py
new file mode 100644
index 000000000..83f26c9d1
--- /dev/null
+++ b/benchmarks/benchmarks/bench_lib.py
@@ -0,0 +1,25 @@
+"""Benchmarks for `numpy.lib`."""
+
+
+from __future__ import absolute_import, division, print_function
+
+from .common import Benchmark
+
+import numpy as np
+
+
+class Pad(Benchmark):
+ """Benchmarks for `numpy.pad`."""
+
+ param_names = ["shape", "pad_width", "mode"]
+ params = [
+ [(1000,), (10, 100), (10, 10, 10)],
+ [1, 3, (0, 5)],
+ ["constant", "edge", "linear_ramp", "mean", "reflect", "wrap"],
+ ]
+
+ def setup(self, shape, pad_width, mode):
+ self.array = np.empty(shape)
+
+ def time_pad(self, shape, pad_width, mode):
+ np.pad(self.array, pad_width, mode)
diff --git a/doc/release/1.15.0-notes.rst b/doc/release/1.15.0-notes.rst
index 82e50edac..04a188c65 100644
--- a/doc/release/1.15.0-notes.rst
+++ b/doc/release/1.15.0-notes.rst
@@ -126,6 +126,11 @@ unstructured void array's ``.item`` method now returns a bytes object
This may affect code which assumed the return value was mutable, which is no
longer the case.
+``copy.copy`` and ``copy.deepcopy`` no longer turn ``masked`` into an array
+----------------------------------------------------------------------------
+Since ``np.ma.masked`` is a readonly scalar, copying should be a no-op. These
+functions now behave consistently with ``np.copy()``.
+
C API changes
=============
@@ -162,6 +167,11 @@ Creating a full iOS-compatible NumPy package requires building for the 5
architectures supported by iOS (i386, x86_64, armv7, armv7s and arm64), and
combining these 5 compiled builds products into a single "fat" binary.
+``return_indices`` keyword added for ``np.intersect1d``
+-------------------------------------------------------
+New keyword ``return_indices`` returns the indices of the two input arrays
+that correspond to the common elements.
+
``np.quantile`` and ``np.nanquantile``
--------------------------------------
Like ``np.percentile`` and ``np.nanpercentile``, but takes quantiles in [0, 1]
@@ -173,6 +183,7 @@ Build system
------------
Added experimental support for the 64-bit RISC-V architecture.
+
Improvements
============
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index e8030d562..fc2130096 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -4759,6 +4759,11 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
+
+ When fid is a file object, array contents are directly written to the
+ file, bypassing the file object's ``write`` method. As a result, tofile
+ cannot be used with files objects supporting compression (e.g., GzipFile)
+ or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index a4a028ad4..af542aecc 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -2808,9 +2808,12 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
us_meta.base = NPY_FR_m;
}
else if (td % (24*60*60*1000000LL) != 0) {
- us_meta.base = NPY_FR_D;
+ us_meta.base = NPY_FR_h;
}
else if (td % (7*24*60*60*1000000LL) != 0) {
+ us_meta.base = NPY_FR_D;
+ }
+ else {
us_meta.base = NPY_FR_W;
}
us_meta.num = 1;
@@ -3679,11 +3682,11 @@ recursive_find_object_datetime64_type(PyObject *obj,
return 0;
}
- /* Python date object -> 'D' */
- else if (PyDate_Check(obj)) {
+ /* Python datetime object -> 'us' */
+ else if (PyDateTime_Check(obj)) {
PyArray_DatetimeMetaData tmp_meta;
- tmp_meta.base = NPY_FR_D;
+ tmp_meta.base = NPY_FR_us;
tmp_meta.num = 1;
/* Combine it with 'meta' */
@@ -3694,11 +3697,11 @@ recursive_find_object_datetime64_type(PyObject *obj,
return 0;
}
- /* Python datetime object -> 'us' */
- else if (PyDateTime_Check(obj)) {
+ /* Python date object -> 'D' */
+ else if (PyDate_Check(obj)) {
PyArray_DatetimeMetaData tmp_meta;
- tmp_meta.base = NPY_FR_us;
+ tmp_meta.base = NPY_FR_D;
tmp_meta.num = 1;
/* Combine it with 'meta' */
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index dca2d2541..e433877e8 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -124,7 +124,7 @@ class TestDateTime(object):
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_compare_generic_nat(self):
- # regression tests for GH6452
+ # regression tests for gh-6452
assert_equal(np.datetime64('NaT'),
np.datetime64('2000') + np.timedelta64('NaT'))
# nb. we may want to make NaT != NaT true in the future
@@ -236,18 +236,25 @@ class TestDateTime(object):
# find "supertype" for non-dates and dates
b = np.bool_(True)
- dt = np.datetime64('1970-01-01', 'M')
- arr = np.array([b, dt])
+ dm = np.datetime64('1970-01-01', 'M')
+ d = datetime.date(1970, 1, 1)
+ dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
+
+ arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
- dt = datetime.date(1970, 1, 1)
- arr = np.array([b, dt])
+ arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
- dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
+ arr = np.array([d, d]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[D]'))
+
+ arr = np.array([dt, dt]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[us]'))
+
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
@@ -324,6 +331,24 @@ class TestDateTime(object):
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
+ a = datetime.timedelta(seconds=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta(weeks=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta()
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+
+ def test_timedelta_object_array_conversion(self):
+ # Regression test for gh-11096
+ inputs = [datetime.timedelta(28),
+ datetime.timedelta(30),
+ datetime.timedelta(31)]
+ expected = np.array([28, 30, 31], dtype='timedelta64[D]')
+ actual = np.array(inputs, dtype='timedelta64[D]')
+ assert_equal(expected, actual)
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 3ca201edd..a60f2cd92 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -6517,6 +6517,7 @@ class TestNewBufferProtocol(object):
a = np.empty((1,) * 32)
self._check_roundtrip(a)
+ @pytest.mark.skipif(sys.version_info < (2, 7, 7), reason="See gh-11115")
def test_error_too_many_dims(self):
def make_ctype(shape, scalar_type):
t = scalar_type
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 95e9f8497..53486dc51 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -552,7 +552,6 @@ class TestFloatExceptions(object):
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
- @pytest.mark.xfail(reason="See ticket #2350")
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index 600301c56..e9ca9de4d 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -74,6 +74,35 @@ def _round_ifneeded(arr, dtype):
arr.round(out=arr)
+def _slice_at_axis(shape, sl, axis):
+ """
+ Construct a slice tuple the length of shape, with sl at the specified axis
+ """
+ slice_tup = (slice(None),)
+ return slice_tup * axis + (sl,) + slice_tup * (len(shape) - axis - 1)
+
+
+def _slice_first(shape, n, axis):
+ """ Construct a slice tuple to take the first n elements along axis """
+ return _slice_at_axis(shape, slice(0, n), axis=axis)
+
+
+def _slice_last(shape, n, axis):
+ """ Construct a slice tuple to take the last n elements along axis """
+ dim = shape[axis] # doing this explicitly makes n=0 work
+ return _slice_at_axis(shape, slice(dim - n, dim), axis=axis)
+
+
+def _do_prepend(arr, pad_chunk, axis):
+ return np.concatenate(
+ (pad_chunk.astype(arr.dtype, copy=False), arr), axis=axis)
+
+
+def _do_append(arr, pad_chunk, axis):
+ return np.concatenate(
+ (arr, pad_chunk.astype(arr.dtype, copy=False)), axis=axis)
+
+
def _prepend_const(arr, pad_amt, val, axis=-1):
"""
Prepend constant `val` along `axis` of `arr`.
@@ -100,8 +129,7 @@ def _prepend_const(arr, pad_amt, val, axis=-1):
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- return np.concatenate((np.full(padshape, val, dtype=arr.dtype), arr),
- axis=axis)
+ return _do_prepend(arr, np.full(padshape, val, dtype=arr.dtype), axis)
def _append_const(arr, pad_amt, val, axis=-1):
@@ -130,8 +158,8 @@ def _append_const(arr, pad_amt, val, axis=-1):
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- return np.concatenate((arr, np.full(padshape, val, dtype=arr.dtype)),
- axis=axis)
+ return _do_append(arr, np.full(padshape, val, dtype=arr.dtype), axis)
+
def _prepend_edge(arr, pad_amt, axis=-1):
@@ -156,11 +184,9 @@ def _prepend_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else slice(0, 1)
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_first(arr.shape, 1, axis=axis)
edge_arr = arr[edge_slice]
- return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ return _do_prepend(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
def _append_edge(arr, pad_amt, axis=-1):
@@ -186,11 +212,9 @@ def _append_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else slice(x - 1, x)
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_last(arr.shape, 1, axis=axis)
edge_arr = arr[edge_slice]
- return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),
- axis=axis)
+ return _do_append(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
def _prepend_ramp(arr, pad_amt, end, axis=-1):
@@ -228,8 +252,7 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
reverse=True).astype(np.float64)
# Appropriate slicing to extract n-dimensional edge along `axis`
- edge_slice = tuple(slice(None) if i != axis else slice(0, 1)
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_first(arr.shape, 1, axis=axis)
# Extract edge, and extend along `axis`
edge_pad = arr[edge_slice].repeat(pad_amt, axis)
@@ -241,7 +264,7 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
- return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)
+ return _do_prepend(arr, ramp_arr, axis)
def _append_ramp(arr, pad_amt, end, axis=-1):
@@ -279,8 +302,7 @@ def _append_ramp(arr, pad_amt, end, axis=-1):
reverse=False).astype(np.float64)
# Slice a chunk from the edge to calculate stats on
- edge_slice = tuple(slice(None) if i != axis else slice(x - 1, x)
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_last(arr.shape, 1, axis=axis)
# Extract edge, and extend along `axis`
edge_pad = arr[edge_slice].repeat(pad_amt, axis)
@@ -292,7 +314,7 @@ def _append_ramp(arr, pad_amt, end, axis=-1):
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
- return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)
+ return _do_append(arr, ramp_arr, axis)
def _prepend_max(arr, pad_amt, num, axis=-1):
@@ -332,15 +354,13 @@ def _prepend_max(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- max_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ max_slice = _slice_first(arr.shape, num, axis=axis)
# Extract slice, calculate max
max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ return _do_prepend(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
def _append_max(arr, pad_amt, num, axis=-1):
@@ -379,11 +399,8 @@ def _append_max(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- max_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ max_slice = _slice_last(arr.shape, num, axis=axis)
else:
max_slice = tuple(slice(None) for x in arr.shape)
@@ -391,8 +408,7 @@ def _append_max(arr, pad_amt, num, axis=-1):
max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),
- axis=axis)
+ return _do_append(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
def _prepend_mean(arr, pad_amt, num, axis=-1):
@@ -431,16 +447,14 @@ def _prepend_mean(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- mean_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ mean_slice = _slice_first(arr.shape, num, axis=axis)
# Extract slice, calculate mean
mean_chunk = arr[mean_slice].mean(axis, keepdims=True)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),
- arr), axis=axis)
+ return _do_prepend(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
def _append_mean(arr, pad_amt, num, axis=-1):
@@ -479,11 +493,8 @@ def _append_mean(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- mean_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ mean_slice = _slice_last(arr.shape, num, axis=axis)
else:
mean_slice = tuple(slice(None) for x in arr.shape)
@@ -492,8 +503,7 @@ def _append_mean(arr, pad_amt, num, axis=-1):
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
def _prepend_med(arr, pad_amt, num, axis=-1):
@@ -532,16 +542,14 @@ def _prepend_med(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- med_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ med_slice = _slice_first(arr.shape, num, axis=axis)
# Extract slice, calculate median
med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)
+ return _do_prepend(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
def _append_med(arr, pad_amt, num, axis=-1):
@@ -580,11 +588,8 @@ def _append_med(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- med_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ med_slice = _slice_last(arr.shape, num, axis=axis)
else:
med_slice = tuple(slice(None) for x in arr.shape)
@@ -593,8 +598,7 @@ def _append_med(arr, pad_amt, num, axis=-1):
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
def _prepend_min(arr, pad_amt, num, axis=-1):
@@ -634,15 +638,13 @@ def _prepend_min(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- min_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ min_slice = _slice_first(arr.shape, num, axis=axis)
# Extract slice, calculate min
min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ return _do_prepend(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
def _append_min(arr, pad_amt, num, axis=-1):
@@ -681,11 +683,8 @@ def _append_min(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- min_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ min_slice = _slice_last(arr.shape, num, axis=axis)
else:
min_slice = tuple(slice(None) for x in arr.shape)
@@ -693,8 +692,7 @@ def _append_min(arr, pad_amt, num, axis=-1):
min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),
- axis=axis)
+ return _do_append(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
def _pad_ref(arr, pad_amt, method, axis=-1):
@@ -737,15 +735,13 @@ def _pad_ref(arr, pad_amt, method, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)
- for (i, x) in enumerate(arr.shape))
+ ref_slice = _slice_at_axis(arr.shape, slice(pad_amt[0], 0, -1), axis=axis)
ref_chunk1 = arr[ref_slice]
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else slice(0, 1)
- for (i, x) in enumerate(arr.shape))
+ edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
edge_chunk = arr[edge_slice1]
ref_chunk1 = 2 * edge_chunk - ref_chunk1
del edge_chunk
@@ -756,15 +752,12 @@ def _pad_ref(arr, pad_amt, method, axis=-1):
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1] - 1
end = arr.shape[axis] - 1
- ref_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
- rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
- for (i, x) in enumerate(arr.shape))
+ ref_slice = _slice_at_axis(arr.shape, slice(start, end), axis=axis)
+ rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
ref_chunk2 = arr[ref_slice][rev_idx]
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else slice(x - 1, x)
- for (i, x) in enumerate(arr.shape))
+ edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
edge_chunk = arr[edge_slice2]
ref_chunk2 = 2 * edge_chunk - ref_chunk2
del edge_chunk
@@ -813,16 +806,13 @@ def _pad_sym(arr, pad_amt, method, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])
- for (i, x) in enumerate(arr.shape))
- rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
- for (i, x) in enumerate(arr.shape))
+ sym_slice = _slice_first(arr.shape, pad_amt[0], axis=axis)
+ rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
sym_chunk1 = arr[sym_slice][rev_idx]
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else slice(0, 1)
- for (i, x) in enumerate(arr.shape))
+ edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
edge_chunk = arr[edge_slice1]
sym_chunk1 = 2 * edge_chunk - sym_chunk1
del edge_chunk
@@ -831,15 +821,11 @@ def _pad_sym(arr, pad_amt, method, axis=-1):
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
- start = arr.shape[axis] - pad_amt[1]
- end = arr.shape[axis]
- sym_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
+ sym_slice = _slice_last(arr.shape, pad_amt[1], axis=axis)
sym_chunk2 = arr[sym_slice][rev_idx]
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else slice(x - 1, x)
- for (i, x) in enumerate(arr.shape))
+ edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
edge_chunk = arr[edge_slice2]
sym_chunk2 = 2 * edge_chunk - sym_chunk2
del edge_chunk
@@ -885,18 +871,14 @@ def _pad_wrap(arr, pad_amt, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- start = arr.shape[axis] - pad_amt[0]
- end = arr.shape[axis]
- wrap_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
+ wrap_slice = _slice_last(arr.shape, pad_amt[0], axis=axis)
wrap_chunk1 = arr[wrap_slice]
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
- wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])
- for (i, x) in enumerate(arr.shape))
+ wrap_slice = _slice_first(arr.shape, pad_amt[1], axis=axis)
wrap_chunk2 = arr[wrap_slice]
# Concatenate `arr` with both chunks, extending along `axis`
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index e8eda297f..4d3f35183 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -298,7 +298,7 @@ def _unique1d(ar, return_index=False, return_inverse=False,
return ret
-def intersect1d(ar1, ar2, assume_unique=False):
+def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
"""
Find the intersection of two arrays.
@@ -307,15 +307,28 @@ def intersect1d(ar1, ar2, assume_unique=False):
Parameters
----------
ar1, ar2 : array_like
- Input arrays.
+ Input arrays. Will be flattened if not already 1D.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
-
+ return_indices : bool
+ If True, the indices which correspond to the intersection of the
+ two arrays are returned. The first instance of a value is used
+ if there are multiple. Default is False.
+
+ .. versionadded:: 1.15.0
+
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
+ comm1 : ndarray
+ The indices of the first occurrences of the common values in `ar1`.
+ Only provided if `return_indices` is True.
+ comm2 : ndarray
+ The indices of the first occurrences of the common values in `ar2`.
+ Only provided if `return_indices` is True.
+
See Also
--------
@@ -332,14 +345,49 @@ def intersect1d(ar1, ar2, assume_unique=False):
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
+
+ To return the indices of the values common to the input arrays
+ along with the intersected values:
+ >>> x = np.array([1, 1, 2, 3, 4])
+ >>> y = np.array([2, 1, 4, 6])
+ >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
+ >>> x_ind, y_ind
+ (array([0, 2, 4]), array([1, 0, 2]))
+ >>> xy, x[x_ind], y[y_ind]
+ (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
+
"""
if not assume_unique:
- # Might be faster than unique( intersect1d( ar1, ar2 ) )?
- ar1 = unique(ar1)
- ar2 = unique(ar2)
+ if return_indices:
+ ar1, ind1 = unique(ar1, return_index=True)
+ ar2, ind2 = unique(ar2, return_index=True)
+ else:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ else:
+ ar1 = ar1.ravel()
+ ar2 = ar2.ravel()
+
aux = np.concatenate((ar1, ar2))
- aux.sort()
- return aux[:-1][aux[1:] == aux[:-1]]
+ if return_indices:
+ aux_sort_indices = np.argsort(aux, kind='mergesort')
+ aux = aux[aux_sort_indices]
+ else:
+ aux.sort()
+
+ mask = aux[1:] == aux[:-1]
+ int1d = aux[:-1][mask]
+
+ if return_indices:
+ ar1_indices = aux_sort_indices[:-1][mask]
+ ar2_indices = aux_sort_indices[1:][mask] - ar1.size
+ if not assume_unique:
+ ar1_indices = ind1[ar1_indices]
+ ar2_indices = ind2[ar2_indices]
+
+ return int1d, ar1_indices, ar2_indices
+ else:
+ return int1d
def setxor1d(ar1, ar2, assume_unique=False):
"""
@@ -660,3 +708,4 @@ def setdiff1d(ar1, ar2, assume_unique=False):
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
+
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index 984a3b15e..dace5ade8 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -32,7 +32,46 @@ class TestSetOps(object):
assert_array_equal(c, ed)
assert_array_equal([], intersect1d([], []))
-
+
+ def test_intersect1d_indices(self):
+ # unique inputs
+ a = np.array([1, 2, 3, 4])
+ b = np.array([2, 1, 4, 6])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ee = np.array([1, 2, 4])
+ assert_array_equal(c, ee)
+ assert_array_equal(a[i1], ee)
+ assert_array_equal(b[i2], ee)
+
+ # non-unique inputs
+ a = np.array([1, 2, 2, 3, 4, 3, 2])
+ b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ef = np.array([1, 2, 3, 4])
+ assert_array_equal(c, ef)
+ assert_array_equal(a[i1], ef)
+ assert_array_equal(b[i2], ef)
+
+ # non1d, unique inputs
+ a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
+ b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 6, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
+ # non1d, not assumed to be uniqueinputs
+ a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
+ b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
def test_setxor1d(self):
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 42db94cad..17682d13f 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -6310,6 +6310,12 @@ class MaskedConstant(MaskedArray):
# precedent for this with `np.bool_` scalars.
return self
+ def __copy__(self):
+ return self
+
+ def __deepcopy__(self, memo):
+ return self
+
def __setattr__(self, attr, value):
if not self.__has_singleton():
# allow the singleton to be initialized
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 63703f6cd..4c7440aab 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -4826,6 +4826,16 @@ class TestMaskedConstant(object):
np.ma.masked.copy() is np.ma.masked,
np.True_.copy() is np.True_)
+ def test__copy(self):
+ import copy
+ assert_(
+ copy.copy(np.ma.masked) is np.ma.masked)
+
+ def test_deepcopy(self):
+ import copy
+ assert_(
+ copy.deepcopy(np.ma.masked) is np.ma.masked)
+
def test_immutable(self):
orig = np.ma.masked
assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1)
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 2d5e30b2c..c29bec2bd 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -307,21 +307,6 @@ class TestConcatenator(object):
assert_array_equal(d[5:,:], b_2)
assert_array_equal(d.mask, np.r_[m_1, m_2])
- def test_matrix_builder(self):
- assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4'])
-
- def test_matrix(self):
- # Test consistency with unmasked version. If we ever deprecate
- # matrix, this test should either still pass, or both actual and
- # expected should fail to be build.
- actual = mr_['r', 1, 2, 3]
- expected = np.ma.array(np.r_['r', 1, 2, 3])
- assert_array_equal(actual, expected)
-
- # outer type is masked array, inner type is matrix
- assert_equal(type(actual), type(expected))
- assert_equal(type(actual.data), type(expected.data))
-
def test_masked_constant(self):
actual = mr_[np.ma.masked, 1]
assert_equal(actual.mask, [True, False])
diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py
index 80d1cacca..0a0d985c4 100644
--- a/numpy/matrixlib/tests/test_masked_matrix.py
+++ b/numpy/matrixlib/tests/test_masked_matrix.py
@@ -3,10 +3,12 @@ from __future__ import division, absolute_import, print_function
import pickle
import numpy as np
-from numpy.ma.testutils import assert_, assert_equal
+from numpy.ma.testutils import (assert_, assert_equal, assert_raises,
+ assert_array_equal)
from numpy.ma.core import (masked_array, masked_values, masked, allequal,
MaskType, getmask, MaskedArray, nomask,
log, add, hypot, divide)
+from numpy.ma.extras import mr_
class MMatrix(MaskedArray, np.matrix,):
@@ -209,3 +211,21 @@ class TestSubclassing(object):
assert_(isinstance(divide(mx, mx), MMatrix))
assert_(isinstance(divide(mx, x), MMatrix))
assert_equal(divide(mx, mx), divide(xmx, xmx))
+
+class TestConcatenator(object):
+ # Tests for mr_, the equivalent of r_ for masked arrays.
+
+ def test_matrix_builder(self):
+ assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4'])
+
+ def test_matrix(self):
+ # Test consistency with unmasked version. If we ever deprecate
+ # matrix, this test should either still pass, or both actual and
+ # expected should fail to be build.
+ actual = mr_['r', 1, 2, 3]
+ expected = np.ma.array(np.r_['r', 1, 2, 3])
+ assert_array_equal(actual, expected)
+
+ # outer type is masked array, inner type is matrix
+ assert_equal(type(actual), type(expected))
+ assert_equal(type(actual.data), type(expected.data))
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index b0c0b0c48..a7935f175 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -771,7 +771,11 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
- if not cond:
+ # The below comparison is a hack to ensure that fully masked
+ # results, for which val.ravel().all() returns np.ma.masked,
+ # do not trigger a failure (np.ma.masked != True evaluates as
+ # np.ma.masked, which is falsy).
+ if cond != True:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
@@ -1369,16 +1373,20 @@ def _assert_valid_refcount(op):
"""
if not HAS_REFCOUNT:
return True
- import numpy as np
+ import numpy as np, gc
b = np.arange(100*100).reshape(100, 100)
c = b
i = 1
- rc = sys.getrefcount(i)
- for j in range(15):
- d = op(b, c)
- assert_(sys.getrefcount(i) >= rc)
+ gc.disable()
+ try:
+ rc = sys.getrefcount(i)
+ for j in range(15):
+ d = op(b, c)
+ assert_(sys.getrefcount(i) >= rc)
+ finally:
+ gc.enable()
del d # for pyflakes
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index c9e8384c2..602cdf5f2 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -366,6 +366,24 @@ class TestArrayAlmostEqual(_GenericTest):
self._assert_func(b, a)
self._assert_func(b, b)
+ # Test fully masked as well (see gh-11123).
+ a = np.ma.MaskedArray(3.5, mask=True)
+ b = np.array([3., 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.masked
+ b = np.array([3., 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+ b = np.array([1., 2., 3.])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+ b = np.array(1.)
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
diff --git a/runtests.py b/runtests.py
index 12e3f2886..68192e86e 100755
--- a/runtests.py
+++ b/runtests.py
@@ -311,6 +311,8 @@ def build_project(args):
"""
+ import distutils.sysconfig
+
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
@@ -325,14 +327,18 @@ def build_project(args):
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
-
+ cvars = distutils.sysconfig.get_config_vars()
+ if 'gcc' in cvars['CC']:
+ # add flags used as werrors tools/travis-test.sh
+ warnings_as_errors = (' -Werror=declaration-after-statement -Werror=vla'
+ ' -Werror=nonnull -Werror=pointer-arith'
+ ' -Wlogical-op')
+ env['CFLAGS'] = warnings_as_errors + env.get('CFLAGS', '')
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
- import distutils.sysconfig
- cvars = distutils.sysconfig.get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'