summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py25
-rw-r--r--numpy/add_newdocs.py88
-rw-r--r--numpy/core/_methods.py21
-rw-r--r--numpy/core/arrayprint.py5
-rw-r--r--numpy/core/code_generators/generate_umath.py6
-rw-r--r--numpy/core/code_generators/numpy_api.py2
-rw-r--r--numpy/core/einsumfunc.py2
-rw-r--r--numpy/core/fromnumeric.py100
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h13
-rw-r--r--numpy/core/include/numpy/npy_interrupt.h2
-rw-r--r--numpy/core/include/numpy/npy_math.h11
-rw-r--r--numpy/core/include/numpy/ufuncobject.h2
-rw-r--r--numpy/core/numeric.py10
-rw-r--r--numpy/core/setup.py2
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src15
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c4
-rw-r--r--numpy/core/src/multiarray/arrayobject.c3
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src100
-rw-r--r--numpy/core/src/multiarray/ctors.c85
-rw-r--r--numpy/core/src/multiarray/datetime.c17
-rw-r--r--numpy/core/src/multiarray/descriptor.c30
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c9
-rw-r--r--numpy/core/src/multiarray/einsum.c.src438
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src4
-rw-r--r--numpy/core/src/multiarray/mapping.c17
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c6
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c38
-rw-r--r--numpy/core/src/multiarray/number.c4
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src2
-rw-r--r--numpy/core/src/npymath/ieee754.c.src75
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src62
-rw-r--r--numpy/core/src/umath/extobj.c2
-rw-r--r--numpy/core/src/umath/loops.c.src17
-rw-r--r--numpy/core/src/umath/override.c15
-rw-r--r--numpy/core/src/umath/reduction.c2
-rw-r--r--numpy/core/src/umath/scalarmath.c.src14
-rw-r--r--numpy/core/src/umath/simd.inc.src7
-rw-r--r--numpy/core/src/umath/ufunc_object.c571
-rw-r--r--numpy/core/src/umath/umathmodule.c91
-rw-r--r--numpy/core/tests/test_api.py13
-rw-r--r--numpy/core/tests/test_arrayprint.py2
-rw-r--r--numpy/core/tests/test_datetime.py37
-rw-r--r--numpy/core/tests/test_deprecations.py16
-rw-r--r--numpy/core/tests/test_einsum.py10
-rw-r--r--numpy/core/tests/test_indexing.py13
-rw-r--r--numpy/core/tests/test_longdouble.py2
-rw-r--r--numpy/core/tests/test_multiarray.py138
-rw-r--r--numpy/core/tests/test_nditer.py60
-rw-r--r--numpy/core/tests/test_numeric.py12
-rw-r--r--numpy/core/tests/test_print.py2
-rw-r--r--numpy/core/tests/test_regression.py11
-rw-r--r--numpy/core/tests/test_scalarprint.py38
-rw-r--r--numpy/core/tests/test_shape_base.py4
-rw-r--r--numpy/core/tests/test_ufunc.py176
-rw-r--r--numpy/core/tests/test_umath.py82
-rw-r--r--numpy/distutils/misc_util.py13
-rw-r--r--numpy/distutils/system_info.py131
-rw-r--r--numpy/f2py/src/fortranobject.c2
-rw-r--r--numpy/lib/arraypad.py289
-rw-r--r--numpy/lib/arraysetops.py65
-rw-r--r--numpy/lib/format.py13
-rw-r--r--numpy/lib/function_base.py114
-rw-r--r--numpy/lib/histograms.py36
-rw-r--r--numpy/lib/index_tricks.py2
-rw-r--r--numpy/lib/nanfunctions.py110
-rw-r--r--numpy/lib/npyio.py30
-rw-r--r--numpy/lib/polynomial.py5
-rw-r--r--numpy/lib/scimath.py2
-rw-r--r--numpy/lib/shape_base.py227
-rw-r--r--numpy/lib/tests/test_arraypad.py13
-rw-r--r--numpy/lib/tests/test_arraysetops.py43
-rw-r--r--numpy/lib/tests/test_function_base.py47
-rw-r--r--numpy/lib/tests/test_histograms.py43
-rw-r--r--numpy/lib/tests/test_index_tricks.py42
-rw-r--r--numpy/lib/tests/test_io.py2
-rw-r--r--numpy/lib/tests/test_nanfunctions.py141
-rw-r--r--numpy/lib/tests/test_shape_base.py118
-rw-r--r--numpy/lib/twodim_base.py2
-rw-r--r--numpy/linalg/linalg.py133
-rw-r--r--numpy/linalg/tests/test_linalg.py476
-rw-r--r--numpy/linalg/umath_linalg.c.src110
-rw-r--r--numpy/ma/core.py81
-rw-r--r--numpy/ma/extras.py47
-rw-r--r--numpy/ma/tests/test_core.py143
-rw-r--r--numpy/ma/tests/test_extras.py12
-rw-r--r--numpy/ma/tests/test_old_ma.py6
-rw-r--r--numpy/ma/tests/test_regression.py10
-rw-r--r--numpy/ma/tests/test_subclassing.py45
-rw-r--r--numpy/matrixlib/defmatrix.py115
-rw-r--r--numpy/matrixlib/tests/test_defmatrix.py2
-rw-r--r--numpy/matrixlib/tests/test_interaction.py361
-rw-r--r--numpy/matrixlib/tests/test_masked_matrix.py231
-rw-r--r--numpy/matrixlib/tests/test_matrix_linalg.py95
-rw-r--r--numpy/polynomial/tests/test_polyutils.py9
-rw-r--r--numpy/random/mtrand/mtrand.pyx40
-rw-r--r--numpy/testing/_private/decorators.py2
-rw-r--r--numpy/testing/_private/utils.py20
-rw-r--r--numpy/testing/tests/test_utils.py48
98 files changed, 3876 insertions, 2133 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index d10a1ecd3..d250ed5ac 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -194,3 +194,28 @@ else:
from numpy.testing._private.pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
+
+
+ def _sanity_check():
+ """
+ Quick sanity checks for common bugs caused by environment.
+ There are some cases e.g. with wrong BLAS ABI that cause wrong
+ results under specific runtime conditions that are not necessarily
+ achieved during test suite runs, and it is useful to catch those early.
+
+ See https://github.com/numpy/numpy/issues/8577 and other
+ similar bug reports.
+
+ """
+ try:
+ x = ones(2, dtype=float32)
+ if not abs(x.dot(x) - 2.0) < 1e-5:
+ raise AssertionError()
+ except AssertionError:
+ msg = ("The current Numpy installation ({!r}) fails to "
+ "pass simple sanity checks. This can be caused for example "
+ "by incorrect BLAS library being linked in.")
+ raise RuntimeError(msg.format(__file__))
+
+ _sanity_check()
+ del _sanity_check
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 8e8339355..fc2130096 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -257,6 +257,7 @@ add_newdoc('numpy.core', 'nditer',
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
+ Valid only before the iterator is closed.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
@@ -282,7 +283,8 @@ add_newdoc('numpy.core', 'nditer',
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
- and optimized iterator access pattern.
+ and optimized iterator access pattern. Valid only before the iterator
+ is closed.
multi_index
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
@@ -292,7 +294,8 @@ add_newdoc('numpy.core', 'nditer',
nop : int
The number of iterator operands.
operands : tuple of operand(s)
- The array(s) to be iterated over.
+ The array(s) to be iterated over. Valid only before the iterator is
+ closed.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
@@ -331,12 +334,12 @@ add_newdoc('numpy.core', 'nditer',
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
+ with it:
+ while not it.finished:
+ addop(it[0], it[1], out=it[2])
+ it.iternext()
- while not it.finished:
- addop(it[0], it[1], out=it[2])
- it.iternext()
-
- return it.operands[2]
+ return it.operands[2]
Here is an example outer product function::
@@ -351,7 +354,7 @@ add_newdoc('numpy.core', 'nditer',
with it:
for (a, b, c) in it:
mulop(a, b, out=c)
- return it.operands[2]
+ return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
@@ -374,7 +377,7 @@ add_newdoc('numpy.core', 'nditer',
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
- return it.operands[0]
+ return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
@@ -382,10 +385,11 @@ add_newdoc('numpy.core', 'nditer',
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
If operand flags `"writeonly"` or `"readwrite"` are used the operands may
- be views into the original data with the WRITEBACKIFCOPY flag. In this case
- nditer must be used as a context manager. The temporary
- data will be written back to the original data when the `` __exit__``
- function is called but not before::
+ be views into the original data with the `WRITEBACKIFCOPY` flag. In this case
+ nditer must be used as a context manager or the nditer.close
+ method must be called before using the result. The temporary
+ data will be written back to the original data when the `__exit__`
+ function is called but not before:
>>> a = np.arange(6, dtype='i4')[::-2]
>>> with nditer(a, [],
@@ -402,7 +406,7 @@ add_newdoc('numpy.core', 'nditer',
references (like `x` in the example) may or may not share data with
the original data `a`. If writeback semantics were active, i.e. if
`x.base.flags.writebackifcopy` is `True`, then exiting the iterator
- will sever the connection between `x` and `a`, writing to `x` will
+ will sever the connection between `x` and `a`, writing to `x` will
no longer write to `a`. If writeback semantics are not active, then
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
@@ -430,6 +434,13 @@ add_newdoc('numpy.core', 'nditer', ('copy',
"""))
+add_newdoc('numpy.core', 'nditer', ('operands',
+ """
+ operands[`Slice`]
+
+ The array(s) to be iterated over. Valid only before the iterator is closed.
+ """))
+
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
@@ -556,6 +567,11 @@ add_newdoc('numpy.core', 'nditer', ('close',
Resolve all writeback semantics in writeable operands.
+ See Also
+ --------
+
+ :ref:`nditer-context-manager`
+
"""))
@@ -4743,6 +4759,11 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
+
+ When fid is a file object, array contents are directly written to the
+ file, bypassing the file object's ``write`` method. As a result, tofile
+ cannot be used with files objects supporting compression (e.g., GzipFile)
+ or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
@@ -5631,10 +5652,13 @@ add_newdoc('numpy.core', 'ufunc',
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
- number of outputs; use `None` for outputs to be allocated by the ufunc.
+ number of outputs; use `None` for uninitialized outputs to be
+ allocated by the ufunc.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
- of False indicate to leave the value in the output alone.
+ of False indicate to leave the value in the output alone. Note that if
+ an uninitialized return array is created via the default ``out=None``,
+ then the elements where the values are False will remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
@@ -5642,7 +5666,8 @@ add_newdoc('numpy.core', 'ufunc',
-------
r : ndarray or tuple of ndarray
`r` will have the shape that the arrays in `x` broadcast to; if `out` is
- provided, `r` will be equal to `out`. If the function has more than one
+ provided, it will be returned. If not, `r` will be allocated and
+ may contain uninitialized values. If the function has more than one
output, then the result will be a tuple of arrays.
""")
@@ -5840,7 +5865,7 @@ add_newdoc('numpy.core', 'ufunc', ('signature',
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
- reduce(a, axis=0, dtype=None, out=None, keepdims=False)
+ reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
@@ -5896,6 +5921,14 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
+ initial : scalar, optional
+ The value with which to start the reduction.
+ If the ufunc has no identity or the dtype is object, this defaults
+ to None - otherwise it defaults to ufunc.identity.
+ If ``None`` is given, the first element of the reduction is used,
+ and an error is thrown if the reduction is empty.
+
+ .. versionadded:: 1.15.0
Returns
-------
@@ -5927,7 +5960,24 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
-
+
+ You can use the ``initial`` keyword argument to initialize the reduction with a
+ different value.
+
+ >>> np.add.reduce([10], initial=5)
+ 15
+ >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initializer=10)
+ array([14., 14.])
+
+ Allows reductions of empty arrays where they would normally fail, i.e.
+ for ufuncs without an identity.
+
+ >>> np.minimum.reduce([], initial=np.inf)
+ inf
+ >>> np.minimum.reduce([])
+ Traceback (most recent call last):
+ ...
+ ValueError: zero-size array to reduction operation minimum which has no identity
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index 0f928676b..33f6d01a8 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -11,6 +11,7 @@ from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core.numeric import asanyarray
from numpy.core import numerictypes as nt
+from numpy._globals import _NoValue
# save those O(100) nanoseconds!
umr_maximum = um.maximum.reduce
@@ -22,17 +23,21 @@ umr_all = um.logical_and.reduce
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
-def _amax(a, axis=None, out=None, keepdims=False):
- return umr_maximum(a, axis, None, out, keepdims)
+def _amax(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_maximum(a, axis, None, out, keepdims, initial)
-def _amin(a, axis=None, out=None, keepdims=False):
- return umr_minimum(a, axis, None, out, keepdims)
+def _amin(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_minimum(a, axis, None, out, keepdims, initial)
-def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
- return umr_sum(a, axis, dtype, out, keepdims)
+def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_sum(a, axis, dtype, out, keepdims, initial)
-def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
- return umr_prod(a, axis, dtype, out, keepdims)
+def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_prod(a, axis, dtype, out, keepdims, initial)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_any(a, axis, dtype, out, keepdims)
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index adbbab6ed..6d15cb23f 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -647,6 +647,9 @@ def array2string(a, max_line_width=None, precision=None,
options.update(overrides)
if options['legacy'] == '1.13':
+ if style is np._NoValue:
+ style = repr
+
if a.shape == () and not a.dtype.names:
return style(a.item())
elif style is not np._NoValue:
@@ -1085,7 +1088,7 @@ def format_float_positional(x, precision=None, unique=True,
Examples
--------
- >>> np.format_float_scientific(np.float32(np.pi))
+ >>> np.format_float_positional(np.float32(np.pi))
'3.1415927'
>>> np.format_float_positional(np.float16(np.pi))
'3.14'
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 7492baf9d..632bcb41f 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -908,8 +908,8 @@ if sys.version_info[0] >= 3:
del defdict['divide']
def indent(st, spaces):
- indention = ' '*spaces
- indented = indention + st.replace('\n', '\n'+indention)
+ indentation = ' '*spaces
+ indented = indentation + st.replace('\n', '\n'+indentation)
# trim off any trailing spaces
indented = re.sub(r' +$', r'', indented)
return indented
@@ -972,7 +972,7 @@ def make_arrays(funcdict):
for vt in t.simd:
code2list.append(textwrap.dedent("""\
#ifdef HAVE_ATTRIBUTE_TARGET_{ISA}
- if (npy_cpu_supports("{ISA}")) {{
+ if (npy_cpu_supports("{isa}")) {{
{fname}_functions[{idx}] = {type}_{fname}_{isa};
}}
#endif
diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py
index 157fa3447..6cfbbbcc7 100644
--- a/numpy/core/code_generators/numpy_api.py
+++ b/numpy/core/code_generators/numpy_api.py
@@ -6,7 +6,7 @@ Each dictionary contains name -> index pair.
Whenever you change one index, you break the ABI (and the ABI version number
should be incremented). Whenever you add an item to one of the dict, the API
needs to be updated in both setup_common.py and by adding an appropriate
-entry to cversion.txt (generate the hash via "python cversions.py".
+entry to cversion.txt (generate the hash via "python cversions.py").
When adding a function, make sure to use the next integer not used as an index
(in case you use an existing index or jump, the build will stop and raise an
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index 8cd6eae12..bb6767c4f 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -1148,7 +1148,7 @@ def einsum(*operands, **kwargs):
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
- # Append new items and derefernce what we can
+ # Append new items and dereference what we can
operands.append(new_view)
del tmp_operands, new_view
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 948c2139d..d1aae0aa0 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -140,6 +140,7 @@ def take(a, indices, axis=None, out=None, mode='raise'):
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
+ take_along_axis : Take elements by matching the array and the index arrays
Notes
-----
@@ -478,6 +479,7 @@ def put(a, ind, v, mode='raise'):
See Also
--------
putmask, place
+ put_along_axis : Put elements by matching the array and the index arrays
Examples
--------
@@ -723,7 +725,9 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
- In other words, ``a[index_array]`` yields a partitioned `a`.
+ If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
+ More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
+ yields the partitioned `a`, irrespective of dimensionality.
See Also
--------
@@ -904,6 +908,8 @@ def argsort(a, axis=-1, kind='quicksort', order=None):
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
+ More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
+ yields the sorted `a`, irrespective of dimensionality.
See Also
--------
@@ -1336,10 +1342,11 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
Returns
-------
array_of_diagonals : ndarray
- If `a` is 2-D and not a `matrix`, a 1-D array of the same type as `a`
- containing the diagonal is returned. If `a` is a `matrix`, a 1-D
- array containing the diagonal is returned in order to maintain
- backward compatibility.
+ If `a` is 2-D, then a 1-D array containing the diagonal and of the
+ same type as `a` is returned unless `a` is a `matrix`, in which case
+ a 1-D array rather than a (2-D) `matrix` is returned in order to
+ maintain backward compatibility.
+
If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
are removed, and a new axis inserted at the end corresponding to the
diagonal.
@@ -1496,10 +1503,9 @@ def ravel(a, order='C'):
Returns
-------
y : array_like
- If `a` is a matrix, y is a 1-D ndarray, otherwise y is an array of
- the same subtype as `a`. The shape of the returned array is
- ``(a.size,)``. Matrices are special cased for backward
- compatibility.
+ y is an array of the same subtype as `a`, with shape ``(a.size,)``.
+ Note that matrices are special cased for backward compatibility, if `a`
+ is a matrix, then y is a 1-D ndarray.
See Also
--------
@@ -1812,7 +1818,7 @@ def clip(a, a_min, a_max, out=None):
return _wrapfunc(a, 'clip', a_min, a_max, out=out)
-def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Sum of array elements over a given axis.
@@ -1851,6 +1857,10 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
Returns
-------
@@ -1898,6 +1908,10 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
+ You can also start the sum with a value other than zero:
+
+ >>> np.sum([10], initial=5)
+ 15
"""
if isinstance(a, _gentype):
# 2018-02-25, 1.15.0
@@ -1912,7 +1926,8 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
return out
return res
- return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims)
+ return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
+ initial=initial)
def any(a, axis=None, out=None, keepdims=np._NoValue):
@@ -2209,7 +2224,7 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue):
return _methods._ptp(a, axis=axis, out=out, **kwargs)
-def amax(a, axis=None, out=None, keepdims=np._NoValue):
+def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
@@ -2241,6 +2256,13 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue):
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ The minimum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
+
Returns
-------
amax : ndarray or scalar
@@ -2293,11 +2315,26 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.nanmax(b)
4.0
+ You can use an initial value to compute the maximum of an empty slice, or
+ to initialize it to a different value:
+
+ >>> np.max([[-50], [10]], axis=-1, initial=0)
+ array([ 0, 10])
+
+ Notice that the initial value is used as one of the elements for which the
+ maximum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ >>> np.max([5], initial=6)
+ 6
+ >>> max([5], default=6)
+ 5
"""
- return _wrapreduction(a, np.maximum, 'max', axis, None, out, keepdims=keepdims)
+ return _wrapreduction(a, np.maximum, 'max', axis, None, out, keepdims=keepdims,
+ initial=initial)
-def amin(a, axis=None, out=None, keepdims=np._NoValue):
+def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
@@ -2329,6 +2366,12 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue):
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ The maximum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
Returns
-------
amin : ndarray or scalar
@@ -2381,8 +2424,22 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.nanmin(b)
0.0
+ >>> np.min([[-50], [10]], axis=-1, initial=0)
+ array([-50, 0])
+
+ Notice that the initial value is used as one of the elements for which the
+ minimum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ Notice that this isn't the same as Python's ``default`` argument.
+
+ >>> np.min([6], initial=5)
+ 5
+ >>> min([6], default=5)
+ 6
"""
- return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims)
+ return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims,
+ initial=initial)
def alen(a):
@@ -2418,7 +2475,7 @@ def alen(a):
return len(array(a, ndmin=1))
-def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the product of array elements over a given axis.
@@ -2458,6 +2515,10 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ The starting value for this product. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
Returns
-------
@@ -2515,8 +2576,13 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
>>> np.prod(x).dtype == int
True
+ You can also start the product with a value other than one:
+
+ >>> np.prod([1, 2], initial=5)
+ 10
"""
- return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, keepdims=keepdims)
+ return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, keepdims=keepdims,
+ initial=initial)
def cumprod(a, axis=None, dtype=None, out=None):
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index ec0fd1ee9..12fc7098c 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -170,14 +170,17 @@ extern "C" CONFUSE_EMACS
(k)*PyArray_STRIDES(obj)[2] + \
(l)*PyArray_STRIDES(obj)[3]))
+/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */
static NPY_INLINE void
PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
{
- if (arr != NULL) {
- if ((PyArray_FLAGS(arr) & NPY_ARRAY_WRITEBACKIFCOPY) ||
- (PyArray_FLAGS(arr) & NPY_ARRAY_UPDATEIFCOPY)) {
- PyArrayObject *base = (PyArrayObject *)PyArray_BASE(arr);
- PyArray_ENABLEFLAGS(base, NPY_ARRAY_WRITEABLE);
+ PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
+ if (fa && fa->base) {
+ if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) ||
+ (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) {
+ PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE);
+ Py_DECREF(fa->base);
+ fa->base = NULL;
PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);
PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
}
diff --git a/numpy/core/include/numpy/npy_interrupt.h b/numpy/core/include/numpy/npy_interrupt.h
index f71fd689e..40cb7ac5e 100644
--- a/numpy/core/include/numpy/npy_interrupt.h
+++ b/numpy/core/include/numpy/npy_interrupt.h
@@ -55,7 +55,7 @@ Ideas:
Simple Interface:
-In your C-extension: around a block of code you want to be interruptable
+In your C-extension: around a block of code you want to be interruptible
with a SIGINT
NPY_SIGINT_ON
diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h
index ba32bcdd3..582390cdc 100644
--- a/numpy/core/include/numpy/npy_math.h
+++ b/numpy/core/include/numpy/npy_math.h
@@ -524,8 +524,17 @@ npy_clongdouble npy_catanhl(npy_clongdouble z);
#define NPY_FPE_UNDERFLOW 4
#define NPY_FPE_INVALID 8
-int npy_get_floatstatus(void);
+int npy_clear_floatstatus_barrier(char*);
+int npy_get_floatstatus_barrier(char*);
+/*
+ * use caution with these - clang and gcc8.1 are known to reorder calls
+ * to this form of the function which can defeat the check. The _barrier
+ * form of the call is preferable, where the argument is
+ * (char*)&local_variable
+ */
int npy_clear_floatstatus(void);
+int npy_get_floatstatus(void);
+
void npy_set_floatstatus_divbyzero(void);
void npy_set_floatstatus_overflow(void);
void npy_set_floatstatus_underflow(void);
diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h
index d0ac1fd7d..4b1b3d325 100644
--- a/numpy/core/include/numpy/ufuncobject.h
+++ b/numpy/core/include/numpy/ufuncobject.h
@@ -167,7 +167,7 @@ typedef struct _tagPyUFuncObject {
int *core_dim_ixs;
/*
* positions of 1st core dimensions of each
- * argument in core_dim_ixs
+ * argument in core_dim_ixs, equivalent to cumsum(core_num_dims)
*/
int *core_offsets;
/* signature string for printing purpose */
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 1108d4667..7ade3d224 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -489,9 +489,9 @@ def asarray(a, dtype=None, order=None):
Contrary to `asanyarray`, ndarray subclasses are not passed through:
- >>> issubclass(np.matrix, np.ndarray)
+ >>> issubclass(np.recarray, np.ndarray)
True
- >>> a = np.matrix([[1, 2]])
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
@@ -545,7 +545,7 @@ def asanyarray(a, dtype=None, order=None):
Instances of `ndarray` subclasses are passed through as-is:
- >>> a = np.matrix([1, 2])
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asanyarray(a) is a
True
@@ -2035,7 +2035,7 @@ def binary_repr(num, width=None):
'11101'
"""
- def warn_if_insufficient(width, binwdith):
+ def warn_if_insufficient(width, binwidth):
if width is not None and width < binwidth:
warnings.warn(
"Insufficient bit width provided. This behavior "
@@ -2280,7 +2280,7 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
-
+
.. warning:: The default `atol` is not appropriate for comparing numbers
that are much smaller than one (see Notes).
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 15f6e1522..7d8bab557 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -664,7 +664,7 @@ def configuration(parent_package='',top_path=None):
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
- # update the substition dictionary during npymath build
+ # update the substitution dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index 38698887a..0299f1a1b 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -687,6 +687,18 @@ npy_resolve(PyObject* NPY_UNUSED(self), PyObject* args)
Py_RETURN_NONE;
}
+/* resolve WRITEBACKIFCOPY */
+static PyObject*
+npy_discard(PyObject* NPY_UNUSED(self), PyObject* args)
+{
+ if (!PyArray_Check(args)) {
+ PyErr_SetString(PyExc_TypeError, "test needs ndarray input");
+ return NULL;
+ }
+ PyArray_DiscardWritebackIfCopy((PyArrayObject*)args);
+ Py_RETURN_NONE;
+}
+
#if !defined(NPY_PY3K)
static PyObject *
int_subclass(PyObject *dummy, PyObject *args)
@@ -1857,6 +1869,9 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"npy_resolve",
npy_resolve,
METH_O, NULL},
+ {"npy_discard",
+ npy_discard,
+ METH_O, NULL},
#if !defined(NPY_PY3K)
{"test_int_subclass",
int_subclass,
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index 3d259ae05..17de99cb9 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -245,6 +245,10 @@ PyArray_AssignRawScalar(PyArrayObject *dst,
allocated_src_data = 1;
}
+ if (PyDataType_FLAGCHK(PyArray_DESCR(dst), NPY_NEEDS_INIT)) {
+ memset(tmp_src_data, 0, PyArray_DESCR(dst)->elsize);
+ }
+
if (PyArray_CastRawArrays(1, src_data, tmp_src_data, 0, 0,
src_dtype, PyArray_DESCR(dst), 0) != NPY_SUCCEED) {
src_data = tmp_src_data;
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 69538c6b7..6f4d3d349 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -86,7 +86,8 @@ NPY_NO_EXPORT int
PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base)
{
int ret;
- /* 2017-Nov-10 1.14 */
+ /* 2017-Nov -10 1.14 (for PyPy only) */
+ /* 2018-April-21 1.15 (all Python implementations) */
if (DEPRECATE("PyArray_SetUpdateIfCopyBase is deprecated, use "
"PyArray_SetWritebackIfCopyBase instead, and be sure to call "
"PyArray_ResolveWritebackIfCopy before the array is deallocated, "
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 5e6804a5c..972147bb0 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -182,6 +182,15 @@ npy_strtoull(const char *str, char **endptr, int base)
*****************************************************************************
*/
+#define _ALIGN(type) offsetof(struct {char c; type v;}, v)
+/*
+ * Disable harmless compiler warning "4116: unnamed type definition in
+ * parentheses" which is caused by the _ALIGN macro.
+ */
+#if defined(_MSC_VER)
+#pragma warning(disable:4116)
+#endif
+
/**begin repeat
*
@@ -246,8 +255,10 @@ static int
}
return -1;
}
- if (ap == NULL || PyArray_ISBEHAVED(ap))
+ if (ap == NULL || PyArray_ISBEHAVED(ap)) {
+ assert(npy_is_aligned(ov, _ALIGN(@type@)));
*((@type@ *)ov)=temp;
+ }
else {
PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap),
ap);
@@ -655,9 +666,7 @@ VOID_getitem(void *input, void *vap)
{
PyArrayObject *ap = vap;
char *ip = input;
- PyArrayObject *u = NULL;
PyArray_Descr* descr;
- int itemsize;
descr = PyArray_DESCR(ap);
if (PyDataType_HASFIELDS(descr)) {
@@ -731,68 +740,7 @@ VOID_getitem(void *input, void *vap)
return (PyObject *)ret;
}
- /* 2017-11-26, 1.14 */
- if (DEPRECATE_FUTUREWARNING(
- "the `.item()` method of unstructured void types will return an "
- "immutable `bytes` object in the near future, the same as "
- "returned by `bytes(void_obj)`, instead of the mutable memoryview "
- "or integer array returned in numpy 1.13.") < 0) {
- return NULL;
- }
- /*
- * In the future all the code below will be replaced by
- *
- * For unstructured void types like V4, return a bytes object (copy).
- * return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize);
- */
-
- if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)
- || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) {
- PyErr_SetString(PyExc_ValueError,
- "tried to get void-array with object members as buffer.");
- return NULL;
- }
- itemsize = PyArray_DESCR(ap)->elsize;
-
-#if defined(NPY_PY3K)
- /*
- * Return a byte array; there are no plain buffer objects on Py3
- */
- {
- npy_intp dims[1], strides[1];
- dims[0] = itemsize;
- strides[0] = 1;
- descr = PyArray_DescrNewFromType(NPY_BYTE);
- u = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- descr, 1, dims, strides, ip,
- PyArray_ISWRITEABLE(ap) ? NPY_ARRAY_WRITEABLE : 0,
- NULL);
- Py_INCREF(ap);
- if (PyArray_SetBaseObject(u, (PyObject *)ap) < 0) {
- Py_DECREF(u);
- return NULL;
- }
- }
-#else
- /*
- * default is to return buffer object pointing to
- * current item a view of it
- */
- if (PyArray_ISWRITEABLE(ap)) {
- if (array_might_be_written(ap) < 0) {
- return NULL;
- }
- u = (PyArrayObject *)PyBuffer_FromReadWriteMemory(ip, itemsize);
- }
- else {
- u = (PyArrayObject *)PyBuffer_FromMemory(ip, itemsize);
- }
-#endif
-
- if (u == NULL) {
- return NULL;
- }
- return (PyObject *)u;
+ return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize);
}
@@ -809,7 +757,7 @@ NPY_NO_EXPORT int PyArray_CopyObject(PyArrayObject *, PyObject *);
*/
NPY_NO_EXPORT int
_setup_field(int i, PyArray_Descr *descr, PyArrayObject *arr,
- npy_intp *offset_p)
+ npy_intp *offset_p, char *dstdata)
{
PyObject *key;
PyObject *tup;
@@ -823,7 +771,8 @@ _setup_field(int i, PyArray_Descr *descr, PyArrayObject *arr,
}
((PyArrayObject_fields *)(arr))->descr = new;
- if ((new->alignment > 1) && ((offset % new->alignment) != 0)) {
+ if ((new->alignment > 1) &&
+ ((((uintptr_t)dstdata + offset) % new->alignment) != 0)) {
PyArray_CLEARFLAGS(arr, NPY_ARRAY_ALIGNED);
}
else {
@@ -851,7 +800,7 @@ _copy_and_return_void_setitem(PyArray_Descr *dstdescr, char *dstdata,
if (PyArray_EquivTypes(srcdescr, dstdescr)) {
for (i = 0; i < names_size; i++) {
/* neither line can ever fail, in principle */
- if (_setup_field(i, dstdescr, dummy, &offset)) {
+ if (_setup_field(i, dstdescr, dummy, &offset, dstdata)) {
return -1;
}
PyArray_DESCR(dummy)->f->copyswap(dstdata + offset,
@@ -921,7 +870,7 @@ VOID_setitem(PyObject *op, void *input, void *vap)
PyObject *item;
/* temporarily make ap have only this field */
- if (_setup_field(i, descr, ap, &offset) == -1) {
+ if (_setup_field(i, descr, ap, &offset, ip) == -1) {
failed = 1;
break;
}
@@ -943,7 +892,7 @@ VOID_setitem(PyObject *op, void *input, void *vap)
for (i = 0; i < names_size; i++) {
/* temporarily make ap have only this field */
- if (_setup_field(i, descr, ap, &offset) == -1) {
+ if (_setup_field(i, descr, ap, &offset, ip) == -1) {
failed = 1;
break;
}
@@ -4256,17 +4205,6 @@ small_correlate(const char * d_, npy_intp dstride,
*****************************************************************************
*/
-
-#define _ALIGN(type) offsetof(struct {char c; type v;}, v)
-/*
- * Disable harmless compiler warning "4116: unnamed type definition in
- * parentheses" which is caused by the _ALIGN macro.
- */
-#if defined(_MSC_VER)
-#pragma warning(disable:4116)
-#endif
-
-
/**begin repeat
*
* #from = VOID, STRING, UNICODE#
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 59eb2457c..5d3cee647 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -1276,42 +1276,31 @@ PyArray_New(PyTypeObject *subtype, int nd, npy_intp *dims, int type_num,
}
-NPY_NO_EXPORT int
-_array_from_buffer_3118(PyObject *obj, PyObject **out)
+/* Steals a reference to the memory view */
+NPY_NO_EXPORT PyObject *
+_array_from_buffer_3118(PyObject *memoryview)
{
/* PEP 3118 */
- PyObject *memoryview;
Py_buffer *view;
PyArray_Descr *descr = NULL;
- PyObject *r;
- int nd, flags, k;
+ PyObject *r = NULL;
+ int nd, flags;
Py_ssize_t d;
npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS];
- memoryview = PyMemoryView_FromObject(obj);
- if (memoryview == NULL) {
- PyErr_Clear();
- return -1;
- }
-
view = PyMemoryView_GET_BUFFER(memoryview);
if (view->format != NULL) {
descr = _descriptor_from_pep3118_format(view->format);
if (descr == NULL) {
- PyObject *msg;
- msg = PyBytes_FromFormat("Invalid PEP 3118 format string: '%s'",
- view->format);
- PyErr_WarnEx(PyExc_RuntimeWarning, PyBytes_AS_STRING(msg), 0);
- Py_DECREF(msg);
goto fail;
}
/* Sanity check */
if (descr->elsize != view->itemsize) {
- PyErr_WarnEx(PyExc_RuntimeWarning,
- "Item size computed from the PEP 3118 buffer format "
- "string does not match the actual item size.",
- 0);
+ PyErr_SetString(
+ PyExc_RuntimeError,
+ "Item size computed from the PEP 3118 buffer format "
+ "string does not match the actual item size.");
goto fail;
}
}
@@ -1322,13 +1311,13 @@ _array_from_buffer_3118(PyObject *obj, PyObject **out)
nd = view->ndim;
if (view->shape != NULL) {
- if (nd >= NPY_MAXDIMS || nd < 0) {
+ int k;
+ if (nd > NPY_MAXDIMS || nd < 0) {
+ PyErr_Format(PyExc_RuntimeError,
+ "PEP3118 dimensions do not satisfy 0 <= ndim <= NPY_MAXDIMS");
goto fail;
}
for (k = 0; k < nd; ++k) {
- if (k >= NPY_MAXDIMS) {
- goto fail;
- }
shape[k] = view->shape[k];
}
if (view->strides != NULL) {
@@ -1352,10 +1341,9 @@ _array_from_buffer_3118(PyObject *obj, PyObject **out)
strides[0] = view->itemsize;
}
else if (nd > 1) {
- PyErr_WarnEx(PyExc_RuntimeWarning,
- "ndim computed from the PEP 3118 buffer format "
- "is greater than 1, but shape is NULL.",
- 0);
+ PyErr_SetString(PyExc_RuntimeError,
+ "ndim computed from the PEP 3118 buffer format "
+ "is greater than 1, but shape is NULL.");
goto fail;
}
}
@@ -1364,21 +1352,21 @@ _array_from_buffer_3118(PyObject *obj, PyObject **out)
r = PyArray_NewFromDescr(&PyArray_Type, descr,
nd, shape, strides, view->buf,
flags, NULL);
- if (r == NULL ||
- PyArray_SetBaseObject((PyArrayObject *)r, memoryview) < 0) {
- Py_XDECREF(r);
- Py_DECREF(memoryview);
- return -1;
+ if (r == NULL) {
+ goto fail;
+ }
+ if (PyArray_SetBaseObject((PyArrayObject *)r, memoryview) < 0) {
+ goto fail;
}
PyArray_UpdateFlags((PyArrayObject *)r, NPY_ARRAY_UPDATE_ALL);
- *out = r;
- return 0;
+ return r;
fail:
+ Py_XDECREF(r);
Py_XDECREF(descr);
Py_DECREF(memoryview);
- return -1;
+ return NULL;
}
@@ -1490,14 +1478,25 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
}
/* If op supports the PEP 3118 buffer interface */
- if (!PyBytes_Check(op) && !PyUnicode_Check(op) &&
- _array_from_buffer_3118(op, (PyObject **)out_arr) == 0) {
- if (writeable
- && PyArray_FailUnlessWriteable(*out_arr, "PEP 3118 buffer") < 0) {
- Py_DECREF(*out_arr);
- return -1;
+ if (!PyBytes_Check(op) && !PyUnicode_Check(op)) {
+
+ PyObject *memoryview = PyMemoryView_FromObject(op);
+ if (memoryview == NULL) {
+ PyErr_Clear();
+ }
+ else {
+ PyObject *arr = _array_from_buffer_3118(memoryview);
+ if (arr == NULL) {
+ return -1;
+ }
+ if (writeable
+ && PyArray_FailUnlessWriteable((PyArrayObject *)arr, "PEP 3118 buffer") < 0) {
+ Py_DECREF(arr);
+ return -1;
+ }
+ *out_arr = (PyArrayObject *)arr;
+ return 0;
}
- return (*out_arr) == NULL ? -1 : 0;
}
/* If op supports the __array_struct__ or __array_interface__ interface */
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index a4a028ad4..af542aecc 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -2808,9 +2808,12 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
us_meta.base = NPY_FR_m;
}
else if (td % (24*60*60*1000000LL) != 0) {
- us_meta.base = NPY_FR_D;
+ us_meta.base = NPY_FR_h;
}
else if (td % (7*24*60*60*1000000LL) != 0) {
+ us_meta.base = NPY_FR_D;
+ }
+ else {
us_meta.base = NPY_FR_W;
}
us_meta.num = 1;
@@ -3679,11 +3682,11 @@ recursive_find_object_datetime64_type(PyObject *obj,
return 0;
}
- /* Python date object -> 'D' */
- else if (PyDate_Check(obj)) {
+ /* Python datetime object -> 'us' */
+ else if (PyDateTime_Check(obj)) {
PyArray_DatetimeMetaData tmp_meta;
- tmp_meta.base = NPY_FR_D;
+ tmp_meta.base = NPY_FR_us;
tmp_meta.num = 1;
/* Combine it with 'meta' */
@@ -3694,11 +3697,11 @@ recursive_find_object_datetime64_type(PyObject *obj,
return 0;
}
- /* Python datetime object -> 'us' */
- else if (PyDateTime_Check(obj)) {
+ /* Python date object -> 'D' */
+ else if (PyDate_Check(obj)) {
PyArray_DatetimeMetaData tmp_meta;
- tmp_meta.base = NPY_FR_us;
+ tmp_meta.base = NPY_FR_D;
tmp_meta.num = 1;
/* Combine it with 'meta' */
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index c1c1ce568..bb3cc9d4e 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -18,6 +18,7 @@
#include "templ_common.h" /* for npy_mul_with_overflow_intp */
#include "descriptor.h"
#include "alloc.h"
+#include "assert.h"
/*
* offset: A starting offset.
@@ -1938,33 +1939,26 @@ arraydescr_shape_get(PyArray_Descr *self)
if (!PyDataType_HASSUBARRAY(self)) {
return PyTuple_New(0);
}
- /*TODO
- * self->subarray->shape should always be a tuple,
- * so this check should be unnecessary
- */
- if (PyTuple_Check(self->subarray->shape)) {
- Py_INCREF(self->subarray->shape);
- return (PyObject *)(self->subarray->shape);
- }
- return Py_BuildValue("(O)", self->subarray->shape);
+ assert(PyTuple_Check(self->subarray->shape));
+ Py_INCREF(self->subarray->shape);
+ return self->subarray->shape;
}
static PyObject *
arraydescr_ndim_get(PyArray_Descr *self)
{
+ Py_ssize_t ndim;
+
if (!PyDataType_HASSUBARRAY(self)) {
return PyInt_FromLong(0);
}
- /*TODO
- * self->subarray->shape should always be a tuple,
- * so this check should be unnecessary
+
+ /*
+ * PyTuple_Size has built in check
+ * for tuple argument
*/
- if (PyTuple_Check(self->subarray->shape)) {
- Py_ssize_t ndim = PyTuple_Size(self->subarray->shape);
- return PyInt_FromLong(ndim);
- }
- /* consistent with arraydescr_shape_get */
- return PyInt_FromLong(1);
+ ndim = PyTuple_Size(self->subarray->shape);
+ return PyInt_FromLong(ndim);
}
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 9c27255aa..9f9aa6757 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -3400,6 +3400,7 @@ PyArray_GetDTypeTransferFunction(int aligned,
{
npy_intp src_itemsize, dst_itemsize;
int src_type_num, dst_type_num;
+ int is_builtin;
#if NPY_DT_DBG_TRACING
printf("Calculating dtype transfer from ");
@@ -3439,6 +3440,7 @@ PyArray_GetDTypeTransferFunction(int aligned,
dst_itemsize = dst_dtype->elsize;
src_type_num = src_dtype->type_num;
dst_type_num = dst_dtype->type_num;
+ is_builtin = src_type_num < NPY_NTYPES && dst_type_num < NPY_NTYPES;
/* Common special case - number -> number NBO cast */
if (PyTypeNum_ISNUMBER(src_type_num) &&
@@ -3462,13 +3464,14 @@ PyArray_GetDTypeTransferFunction(int aligned,
}
/*
- * If there are no references and the data types are equivalent,
+ * If there are no references and the data types are equivalent and builtin,
* return a simple copy
*/
if (PyArray_EquivTypes(src_dtype, dst_dtype) &&
!PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) &&
( !PyDataType_HASFIELDS(dst_dtype) ||
- is_dtype_struct_simple_unaligned_layout(dst_dtype)) ) {
+ is_dtype_struct_simple_unaligned_layout(dst_dtype)) &&
+ is_builtin) {
/*
* We can't pass through the aligned flag because it's not
* appropriate. Consider a size-8 string, it will say it's
@@ -3494,7 +3497,7 @@ PyArray_GetDTypeTransferFunction(int aligned,
!PyDataType_HASSUBARRAY(dst_dtype) &&
src_type_num != NPY_DATETIME && src_type_num != NPY_TIMEDELTA) {
/* A custom data type requires that we use its copy/swap */
- if (src_type_num >= NPY_NTYPES || dst_type_num >= NPY_NTYPES) {
+ if (!is_builtin) {
/*
* If the sizes and kinds are identical, but they're different
* custom types, then get a cast function
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index 5dbc30aa9..0eab25299 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -591,7 +591,7 @@ finish_after_unrolled_loop:
accum += @from@(data0[@i@]) * @from@(data1[@i@]);
/**end repeat2**/
case 0:
- *(@type@ *)dataptr[2] += @to@(accum);
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum);
return;
}
@@ -749,7 +749,7 @@ finish_after_unrolled_loop:
accum += @from@(data1[@i@]);
/**end repeat2**/
case 0:
- *(@type@ *)dataptr[2] += @to@(value0 * accum);
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value0 * accum);
return;
}
@@ -848,7 +848,7 @@ finish_after_unrolled_loop:
accum += @from@(data0[@i@]);
/**end repeat2**/
case 0:
- *(@type@ *)dataptr[2] += @to@(accum * value1);
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum * value1);
return;
}
@@ -1776,138 +1776,94 @@ get_sum_of_products_function(int nop, int type_num,
return _unspecialized_table[type_num][nop <= 3 ? nop : 0];
}
+
/*
- * Parses the subscripts for one operand into an output
- * of 'ndim' labels
+ * Parses the subscripts for one operand into an output of 'ndim'
+ * labels. The resulting 'op_labels' array will have:
+ * - the ASCII code of the label for the first occurrence of a label;
+ * - the (negative) offset to the first occurrence of the label for
+ * repeated labels;
+ * - zero for broadcast dimensions, if subscripts has an ellipsis.
+ * For example:
+ * - subscripts="abbcbc", ndim=6 -> op_labels=[97, 98, -1, 99, -3, -2]
+ * - subscripts="ab...bc", ndim=6 -> op_labels=[97, 98, 0, 0, -3, 99]
*/
+
static int
parse_operand_subscripts(char *subscripts, int length,
- int ndim,
- int iop, char *out_labels,
- char *out_label_counts,
- int *out_min_label,
- int *out_max_label,
- int *out_num_labels)
+ int ndim, int iop, char *op_labels,
+ char *label_counts, int *min_label, int *max_label)
{
- int i, idim, ndim_left, label;
- int ellipsis = 0;
+ int i;
+ int idim = 0;
+ int ellipsis = -1;
- /* Process the labels from the end until the ellipsis */
- idim = ndim-1;
- for (i = length-1; i >= 0; --i) {
- label = subscripts[i];
- /* A label for an axis */
+ /* Process all labels for this operand */
+ for (i = 0; i < length; ++i) {
+ int label = subscripts[i];
+
+ /* A proper label for an axis. */
if (label > 0 && isalpha(label)) {
- if (idim >= 0) {
- out_labels[idim--] = label;
- /* Calculate the min and max labels */
- if (label < *out_min_label) {
- *out_min_label = label;
- }
- if (label > *out_max_label) {
- *out_max_label = label;
- }
- /* If it's the first time we see this label, count it */
- if (out_label_counts[label] == 0) {
- (*out_num_labels)++;
- }
- out_label_counts[label]++;
- }
- else {
+ /* Check we don't exceed the operator dimensions. */
+ if (idim >= ndim) {
PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains "
- "too many subscripts for operand %d", iop);
- return 0;
+ "einstein sum subscripts string contains "
+ "too many subscripts for operand %d", iop);
+ return -1;
+ }
+
+ op_labels[idim++] = label;
+ if (label < *min_label) {
+ *min_label = label;
}
+ if (label > *max_label) {
+ *max_label = label;
+ }
+ label_counts[label]++;
}
- /* The end of the ellipsis */
+ /* The beginning of the ellipsis. */
else if (label == '.') {
- /* A valid ellipsis */
- if (i >= 2 && subscripts[i-1] == '.' && subscripts[i-2] == '.') {
- ellipsis = 1;
- length = i-2;
- break;
- }
- else {
+ /* Check it's a proper ellipsis. */
+ if (ellipsis != -1 || i + 2 >= length
+ || subscripts[++i] != '.' || subscripts[++i] != '.') {
PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains a "
- "'.' that is not part of an ellipsis ('...') in "
- "operand %d", iop);
- return 0;
-
+ "einstein sum subscripts string contains a "
+ "'.' that is not part of an ellipsis ('...') "
+ "in operand %d", iop);
+ return -1;
}
+
+ ellipsis = idim;
}
else if (label != ' ') {
PyErr_Format(PyExc_ValueError,
- "invalid subscript '%c' in einstein sum "
- "subscripts string, subscripts must "
- "be letters", (char)label);
- return 0;
+ "invalid subscript '%c' in einstein sum "
+ "subscripts string, subscripts must "
+ "be letters", (char)label);
+ return -1;
}
}
- if (!ellipsis && idim != -1) {
- PyErr_Format(PyExc_ValueError,
- "operand has more dimensions than subscripts "
- "given in einstein sum, but no '...' ellipsis "
- "provided to broadcast the extra dimensions.");
- return 0;
- }
-
- /* Reduce ndim to just the dimensions left to fill at the beginning */
- ndim_left = idim+1;
- idim = 0;
-
- /*
- * If we stopped because of an ellipsis, start again from the beginning.
- * The length was truncated to end at the ellipsis in this case.
- */
- if (i > 0) {
- for (i = 0; i < length; ++i) {
- label = subscripts[i];
- /* A label for an axis */
- if (label > 0 && isalnum(label)) {
- if (idim < ndim_left) {
- out_labels[idim++] = label;
- /* Calculate the min and max labels */
- if (label < *out_min_label) {
- *out_min_label = label;
- }
- if (label > *out_max_label) {
- *out_max_label = label;
- }
- /* If it's the first time we see this label, count it */
- if (out_label_counts[label] == 0) {
- (*out_num_labels)++;
- }
- out_label_counts[label]++;
- }
- else {
- PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains "
- "too many subscripts for operand %d", iop);
- return 0;
- }
- }
- else if (label == '.') {
- PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains a "
- "'.' that is not part of an ellipsis ('...') in "
- "operand %d", iop);
- }
- else if (label != ' ') {
- PyErr_Format(PyExc_ValueError,
- "invalid subscript '%c' in einstein sum "
- "subscripts string, subscripts must "
- "be letters", (char)label);
- return 0;
- }
+ /* No ellipsis found, labels must match dimensions exactly. */
+ if (ellipsis == -1) {
+ if (idim != ndim) {
+ PyErr_Format(PyExc_ValueError,
+ "operand has more dimensions than subscripts "
+ "given in einstein sum, but no '...' ellipsis "
+ "provided to broadcast the extra dimensions.");
+ return -1;
}
}
-
- /* Set the remaining labels to 0 */
- while (idim < ndim_left) {
- out_labels[idim++] = 0;
+ /* Ellipsis found, may have to add broadcast dimensions. */
+ else if (idim < ndim) {
+ /* Move labels after ellipsis to the end. */
+ for (i = 0; i < idim - ellipsis; ++i) {
+ op_labels[ndim - i - 1] = op_labels[idim - i - 1];
+ }
+ /* Set all broadcast dimensions to zero. */
+ for (i = 0; i < ndim - idim; ++i) {
+ op_labels[ellipsis + i] = 0;
+ }
}
/*
@@ -1918,158 +1874,116 @@ parse_operand_subscripts(char *subscripts, int length,
* twos complement arithmetic the char is ok either way here, and
* later where it matters the char is cast to a signed char.
*/
- for (idim = 0; idim < ndim-1; ++idim) {
- char *next;
- /* If this is a proper label, find any duplicates of it */
- label = out_labels[idim];
+ for (idim = 0; idim < ndim - 1; ++idim) {
+ int label = op_labels[idim];
+ /* If it is a proper label, find any duplicates of it. */
if (label > 0) {
- /* Search for the next matching label */
- next = (char *)memchr(out_labels+idim+1, label,
- ndim-idim-1);
+ /* Search for the next matching label. */
+ char *next = memchr(op_labels + idim + 1, label, ndim - idim - 1);
+
while (next != NULL) {
- /* The offset from next to out_labels[idim] (negative) */
- *next = (char)((out_labels+idim)-next);
- /* Search for the next matching label */
- next = (char *)memchr(next+1, label,
- out_labels+ndim-1-next);
+ /* The offset from next to op_labels[idim] (negative). */
+ *next = (char)((op_labels + idim) - next);
+ /* Search for the next matching label. */
+ next = memchr(next + 1, label, op_labels + ndim - 1 - next);
}
}
}
- return 1;
+ return 0;
}
+
/*
- * Parses the subscripts for the output operand into an output
- * that requires 'ndim_broadcast' unlabeled dimensions, returning
- * the number of output dimensions. Returns -1 if there is an error.
+ * Parses the subscripts for the output operand into an output that
+ * includes 'ndim_broadcast' unlabeled dimensions, and returns the total
+ * number of output dimensions, or -1 if there is an error. Similarly
+ * to parse_operand_subscripts, the 'out_labels' array will have, for
+ * each dimension:
+ * - the ASCII code of the corresponding label;
+ * - zero for broadcast dimensions, if subscripts has an ellipsis.
*/
static int
parse_output_subscripts(char *subscripts, int length,
int ndim_broadcast,
- const char *label_counts,
- char *out_labels)
+ const char *label_counts, char *out_labels)
{
- int i, nlabels, label, idim, ndim, ndim_left;
+ int i, bdim;
+ int ndim = 0;
int ellipsis = 0;
- /* Count the labels, making sure they're all unique and valid */
- nlabels = 0;
+ /* Process all the output labels. */
for (i = 0; i < length; ++i) {
- label = subscripts[i];
- if (label > 0 && isalpha(label)) {
- /* Check if it occurs again */
- if (memchr(subscripts+i+1, label, length-i-1) == NULL) {
- /* Check that it was used in the inputs */
- if (label_counts[label] == 0) {
- PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string included "
- "output subscript '%c' which never appeared "
- "in an input", (char)label);
- return -1;
- }
+ int label = subscripts[i];
- nlabels++;
- }
- else {
+ /* A proper label for an axis. */
+ if (label > 0 && isalpha(label)) {
+ /* Check that it doesn't occur again. */
+ if (memchr(subscripts + i + 1, label, length - i - 1) != NULL) {
PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string includes "
- "output subscript '%c' multiple times",
- (char)label);
+ "einstein sum subscripts string includes "
+ "output subscript '%c' multiple times",
+ (char)label);
return -1;
}
- }
- else if (label != '.' && label != ' ') {
- PyErr_Format(PyExc_ValueError,
- "invalid subscript '%c' in einstein sum "
- "subscripts string, subscripts must "
- "be letters", (char)label);
- return -1;
- }
- }
-
- /* The number of output dimensions */
- ndim = ndim_broadcast + nlabels;
-
- /* Process the labels from the end until the ellipsis */
- idim = ndim-1;
- for (i = length-1; i >= 0; --i) {
- label = subscripts[i];
- /* A label for an axis */
- if (label != '.' && label != ' ') {
- if (idim >= 0) {
- out_labels[idim--] = label;
+ /* Check that it was used in the inputs. */
+ if (label_counts[label] == 0) {
+ PyErr_Format(PyExc_ValueError,
+ "einstein sum subscripts string included "
+ "output subscript '%c' which never appeared "
+ "in an input", (char)label);
+ return -1;
}
- else {
+ /* Check that there is room in out_labels for this label. */
+ if (ndim >= NPY_MAXDIMS) {
PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains "
- "too many output subscripts");
+ "einstein sum subscripts string contains "
+ "too many subscripts in the output");
return -1;
}
+
+ out_labels[ndim++] = label;
}
- /* The end of the ellipsis */
+ /* The beginning of the ellipsis. */
else if (label == '.') {
- /* A valid ellipsis */
- if (i >= 2 && subscripts[i-1] == '.' && subscripts[i-2] == '.') {
- ellipsis = 1;
- length = i-2;
- break;
- }
- else {
+ /* Check it is a proper ellipsis. */
+ if (ellipsis || i + 2 >= length
+ || subscripts[++i] != '.' || subscripts[++i] != '.') {
PyErr_SetString(PyExc_ValueError,
- "einstein sum subscripts string contains a "
- "'.' that is not part of an ellipsis ('...') "
- "in the output");
+ "einstein sum subscripts string "
+ "contains a '.' that is not part of "
+ "an ellipsis ('...') in the output");
return -1;
-
}
- }
- }
-
- if (!ellipsis && idim != -1) {
- PyErr_SetString(PyExc_ValueError,
- "output has more dimensions than subscripts "
- "given in einstein sum, but no '...' ellipsis "
- "provided to broadcast the extra dimensions.");
- return 0;
- }
-
- /* Reduce ndim to just the dimensions left to fill at the beginning */
- ndim_left = idim+1;
- idim = 0;
-
- /*
- * If we stopped because of an ellipsis, start again from the beginning.
- * The length was truncated to end at the ellipsis in this case.
- */
- if (i > 0) {
- for (i = 0; i < length; ++i) {
- label = subscripts[i];
- if (label == '.') {
- PyErr_SetString(PyExc_ValueError,
- "einstein sum subscripts string contains a "
- "'.' that is not part of an ellipsis ('...') "
- "in the output");
+ /* Check there is room in out_labels for broadcast dims. */
+ if (ndim + ndim_broadcast > NPY_MAXDIMS) {
+ PyErr_Format(PyExc_ValueError,
+ "einstein sum subscripts string contains "
+ "too many subscripts in the output");
return -1;
}
- /* A label for an axis */
- else if (label != ' ') {
- if (idim < ndim_left) {
- out_labels[idim++] = label;
- }
- else {
- PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains "
- "too many subscripts for the output");
- return -1;
- }
+
+ ellipsis = 1;
+ for (bdim = 0; bdim < ndim_broadcast; ++bdim) {
+ out_labels[ndim++] = 0;
}
}
+ else if (label != ' ') {
+ PyErr_Format(PyExc_ValueError,
+ "invalid subscript '%c' in einstein sum "
+ "subscripts string, subscripts must "
+ "be letters", (char)label);
+ return -1;
+ }
}
- /* Set the remaining output labels to 0 */
- while (idim < ndim_left) {
- out_labels[idim++] = 0;
+ /* If no ellipsis was found there should be no broadcast dimensions. */
+ if (!ellipsis && ndim_broadcast > 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "output has more dimensions than subscripts "
+ "given in einstein sum, but no '...' ellipsis "
+ "provided to broadcast the extra dimensions.");
+ return -1;
}
return ndim;
@@ -2121,7 +2035,7 @@ get_single_op_view(PyArrayObject *op, int iop, char *labels,
if (ibroadcast == ndim_output) {
PyErr_SetString(PyExc_ValueError,
"output had too few broadcast dimensions");
- return 0;
+ return -1;
}
new_dims[ibroadcast] = PyArray_DIM(op, idim);
new_strides[ibroadcast] = PyArray_STRIDE(op, idim);
@@ -2144,7 +2058,7 @@ get_single_op_view(PyArrayObject *op, int iop, char *labels,
"index '%c' don't match (%d != %d)",
iop, label, (int)new_dims[i],
(int)PyArray_DIM(op, idim));
- return 0;
+ return -1;
}
new_dims[i] = PyArray_DIM(op, idim);
new_strides[i] += PyArray_STRIDE(op, idim);
@@ -2162,14 +2076,14 @@ get_single_op_view(PyArrayObject *op, int iop, char *labels,
(PyObject *)op);
if (*ret == NULL) {
- return 0;
+ return -1;
}
if (!PyArray_Check(*ret)) {
Py_DECREF(*ret);
*ret = NULL;
PyErr_SetString(PyExc_RuntimeError,
"NewFromDescr failed to return an array");
- return 0;
+ return -1;
}
PyArray_UpdateFlags(*ret,
NPY_ARRAY_C_CONTIGUOUS|
@@ -2179,14 +2093,14 @@ get_single_op_view(PyArrayObject *op, int iop, char *labels,
if (PyArray_SetBaseObject(*ret, (PyObject *)op) < 0) {
Py_DECREF(*ret);
*ret = NULL;
- return 0;
+ return -1;
}
- return 1;
+ return 0;
}
/* Return success, but that we couldn't make a view */
*ret = NULL;
- return 1;
+ return 0;
}
static PyArrayObject *
@@ -2332,7 +2246,7 @@ prepare_op_axes(int ndim, int iop, char *labels, int *axes,
}
}
- return 1;
+ return 0;
}
static int
@@ -2613,7 +2527,7 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
NPY_ORDER order, NPY_CASTING casting,
PyArrayObject *out)
{
- int iop, label, min_label = 127, max_label = 0, num_labels;
+ int iop, label, min_label = 127, max_label = 0;
char label_counts[128];
char op_labels[NPY_MAXARGS][NPY_MAXDIMS];
char output_labels[NPY_MAXDIMS], *iter_labels;
@@ -2644,7 +2558,6 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
/* Parse the subscripts string into label_counts and op_labels */
memset(label_counts, 0, sizeof(label_counts));
- num_labels = 0;
for (iop = 0; iop < nop; ++iop) {
int length = (int)strcspn(subscripts, ",-");
@@ -2661,10 +2574,10 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
return NULL;
}
- if (!parse_operand_subscripts(subscripts, length,
+ if (parse_operand_subscripts(subscripts, length,
PyArray_NDIM(op_in[iop]),
iop, op_labels[iop], label_counts,
- &min_label, &max_label, &num_labels)) {
+ &min_label, &max_label) < 0) {
return NULL;
}
@@ -2698,21 +2611,18 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
}
/*
- * If there is no output signature, create one using each label
- * that appeared once, in alphabetical order
+ * If there is no output signature, fill output_labels and ndim_output
+ * using each label that appeared once, in alphabetical order.
*/
if (subscripts[0] == '\0') {
- char outsubscripts[NPY_MAXDIMS + 3];
- int length;
- /* If no output was specified, always broadcast left (like normal) */
- outsubscripts[0] = '.';
- outsubscripts[1] = '.';
- outsubscripts[2] = '.';
- length = 3;
+ /* If no output was specified, always broadcast left, as usual. */
+ for (ndim_output = 0; ndim_output < ndim_broadcast; ++ndim_output) {
+ output_labels[ndim_output] = 0;
+ }
for (label = min_label; label <= max_label; ++label) {
if (label_counts[label] == 1) {
- if (length < NPY_MAXDIMS-1) {
- outsubscripts[length++] = label;
+ if (ndim_output < NPY_MAXDIMS) {
+ output_labels[ndim_output++] = label;
}
else {
PyErr_SetString(PyExc_ValueError,
@@ -2722,10 +2632,6 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
}
}
}
- /* Parse the output subscript string */
- ndim_output = parse_output_subscripts(outsubscripts, length,
- ndim_broadcast, label_counts,
- output_labels);
}
else {
if (subscripts[0] != '-' || subscripts[1] != '>') {
@@ -2736,13 +2642,13 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
}
subscripts += 2;
- /* Parse the output subscript string */
+ /* Parse the output subscript string. */
ndim_output = parse_output_subscripts(subscripts, strlen(subscripts),
ndim_broadcast, label_counts,
output_labels);
- }
- if (ndim_output < 0) {
- return NULL;
+ if (ndim_output < 0) {
+ return NULL;
+ }
}
if (out != NULL && PyArray_NDIM(out) != ndim_output) {
@@ -2776,9 +2682,9 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
if (iop == 0 && nop == 1 && out == NULL) {
ret = NULL;
- if (!get_single_op_view(op_in[iop], iop, labels,
- ndim_output, output_labels,
- &ret)) {
+ if (get_single_op_view(op_in[iop], iop, labels,
+ ndim_output, output_labels,
+ &ret) < 0) {
return NULL;
}
@@ -2840,8 +2746,8 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
for (iop = 0; iop < nop; ++iop) {
op_axes[iop] = op_axes_arrays[iop];
- if (!prepare_op_axes(PyArray_NDIM(op[iop]), iop, op_labels[iop],
- op_axes[iop], ndim_iter, iter_labels)) {
+ if (prepare_op_axes(PyArray_NDIM(op[iop]), iop, op_labels[iop],
+ op_axes[iop], ndim_iter, iter_labels) < 0) {
goto fail;
}
}
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index 397aaf209..fa68af19a 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -1373,7 +1373,7 @@ PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
/*
* Advanded indexing iteration of arrays when there is a single indexing
* array which has the same memory order as the value array and both
- * can be trivally iterated (single stride, aligned, no casting necessary).
+ * can be trivially iterated (single stride, aligned, no casting necessary).
*/
NPY_NO_EXPORT int
mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
@@ -1747,7 +1747,7 @@ mapiter_@name@(PyArrayMapIterObject *mit)
}
else {
/*
- * faster resetting if the subspace iteration is trival.
+ * faster resetting if the subspace iteration is trivial.
* reset_offsets are zero for positive strides,
* for negative strides this shifts the pointer to the last
* item.
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 4b2c6aa5a..42dbc3cce 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -293,8 +293,7 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
if (commit_to_unpack) {
/* propagate errors */
if (tmp_obj == NULL) {
- multi_DECREF(result, i);
- return -1;
+ goto fail;
}
}
else {
@@ -313,6 +312,16 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
|| PySlice_Check(tmp_obj)
|| tmp_obj == Py_Ellipsis
|| tmp_obj == Py_None) {
+ if (DEPRECATE_FUTUREWARNING(
+ "Using a non-tuple sequence for multidimensional "
+ "indexing is deprecated; use `arr[tuple(seq)]` "
+ "instead of `arr[seq]`. In the future this will be "
+ "interpreted as an array index, `arr[np.array(seq)]`, "
+ "which will result either in an error or a different "
+ "result.") < 0) {
+ i++; /* since loop update doesn't run */
+ goto fail;
+ }
commit_to_unpack = 1;
}
}
@@ -328,6 +337,10 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
multi_DECREF(result, i);
return unpack_scalar(index, result, result_n);
}
+
+fail:
+ multi_DECREF(result, i);
+ return -1;
}
/**
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 7eccb4a4b..6d323dbd8 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -197,7 +197,7 @@ PyArray_CompareLists(npy_intp *l1, npy_intp *l2, int n)
}
/*
- * simulates a C-style 1-3 dimensional array which can be accesed using
+ * simulates a C-style 1-3 dimensional array which can be accessed using
* ptr[i] or ptr[i][j] or ptr[i][j][k] -- requires pointer allocation
* for 2-d and 3-d.
*
@@ -3605,7 +3605,7 @@ as_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
/*
- * Prints floating-point scalars usign the Dragon4 algorithm, scientific mode.
+ * Prints floating-point scalars using the Dragon4 algorithm, scientific mode.
* See docstring of `np.format_float_scientific` for description of arguments.
* The differences is that a value of -1 is valid for pad_left, exp_digits,
* precision, which is equivalent to `None`.
@@ -3661,7 +3661,7 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
}
/*
- * Prints floating-point scalars usign the Dragon4 algorithm, positional mode.
+ * Prints floating-point scalars using the Dragon4 algorithm, positional mode.
* See docstring of `np.format_float_positional` for description of arguments.
* The differences is that a value of -1 is valid for pad_left, pad_right,
* precision, which is equivalent to `None`.
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index d36be61f5..4505e645b 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -20,16 +20,14 @@
typedef struct NewNpyArrayIterObject_tag NewNpyArrayIterObject;
-enum NPYITER_CONTEXT {CONTEXT_NOTENTERED, CONTEXT_INSIDE, CONTEXT_EXITED};
-
struct NewNpyArrayIterObject_tag {
PyObject_HEAD
/* The iterator */
NpyIter *iter;
/* Flag indicating iteration started/stopped */
char started, finished;
- /* iter must used as a context manager if writebackifcopy semantics used */
- char managed;
+ /* iter operands cannot be referenced if iter is closed */
+ npy_bool is_closed;
/* Child to update for nested iteration */
NewNpyArrayIterObject *nested_child;
/* Cached values from the iterator */
@@ -89,7 +87,7 @@ npyiter_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
if (self != NULL) {
self->iter = NULL;
self->nested_child = NULL;
- self->managed = CONTEXT_NOTENTERED;
+ self->is_closed = 0;
}
return (PyObject *)self;
@@ -1419,7 +1417,7 @@ static PyObject *npyiter_value_get(NewNpyArrayIterObject *self)
ret = npyiter_seq_item(self, 0);
}
else {
- if (self->managed == CONTEXT_EXITED) {
+ if (self->is_closed) {
PyErr_SetString(PyExc_ValueError,
"Iterator is closed");
return NULL;
@@ -1454,7 +1452,7 @@ static PyObject *npyiter_operands_get(NewNpyArrayIterObject *self)
"Iterator is invalid");
return NULL;
}
- if (self->managed == CONTEXT_EXITED) {
+ if (self->is_closed) {
PyErr_SetString(PyExc_ValueError,
"Iterator is closed");
return NULL;
@@ -1489,7 +1487,7 @@ static PyObject *npyiter_itviews_get(NewNpyArrayIterObject *self)
return NULL;
}
- if (self->managed == CONTEXT_EXITED) {
+ if (self->is_closed) {
PyErr_SetString(PyExc_ValueError,
"Iterator is closed");
return NULL;
@@ -1517,7 +1515,8 @@ static PyObject *npyiter_itviews_get(NewNpyArrayIterObject *self)
static PyObject *
npyiter_next(NewNpyArrayIterObject *self)
{
- if (self->iter == NULL || self->iternext == NULL || self->finished) {
+ if (self->iter == NULL || self->iternext == NULL ||
+ self->finished || self->is_closed) {
return NULL;
}
@@ -1912,7 +1911,7 @@ static PyObject *npyiter_dtypes_get(NewNpyArrayIterObject *self)
return NULL;
}
- if (self->managed == CONTEXT_EXITED) {
+ if (self->is_closed) {
PyErr_SetString(PyExc_ValueError,
"Iterator is closed");
return NULL;
@@ -2014,7 +2013,7 @@ npyiter_seq_item(NewNpyArrayIterObject *self, Py_ssize_t i)
return NULL;
}
- if (self->managed == CONTEXT_EXITED) {
+ if (self->is_closed) {
PyErr_SetString(PyExc_ValueError,
"Iterator is closed");
return NULL;
@@ -2104,7 +2103,7 @@ npyiter_seq_slice(NewNpyArrayIterObject *self,
return NULL;
}
- if (self->managed == CONTEXT_EXITED) {
+ if (self->is_closed) {
PyErr_SetString(PyExc_ValueError,
"Iterator is closed");
return NULL;
@@ -2170,7 +2169,7 @@ npyiter_seq_ass_item(NewNpyArrayIterObject *self, Py_ssize_t i, PyObject *v)
return -1;
}
- if (self->managed == CONTEXT_EXITED) {
+ if (self->is_closed) {
PyErr_SetString(PyExc_ValueError,
"Iterator is closed");
return -1;
@@ -2250,7 +2249,7 @@ npyiter_seq_ass_slice(NewNpyArrayIterObject *self, Py_ssize_t ilow,
return -1;
}
- if (self->managed == CONTEXT_EXITED) {
+ if (self->is_closed) {
PyErr_SetString(PyExc_ValueError,
"Iterator is closed");
return -1;
@@ -2307,7 +2306,7 @@ npyiter_subscript(NewNpyArrayIterObject *self, PyObject *op)
return NULL;
}
- if (self->managed == CONTEXT_EXITED) {
+ if (self->is_closed) {
PyErr_SetString(PyExc_ValueError,
"Iterator is closed");
return NULL;
@@ -2362,7 +2361,7 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
return -1;
}
- if (self->managed == CONTEXT_EXITED) {
+ if (self->is_closed) {
PyErr_SetString(PyExc_ValueError,
"Iterator is closed");
return -1;
@@ -2402,11 +2401,10 @@ npyiter_enter(NewNpyArrayIterObject *self)
PyErr_SetString(PyExc_RuntimeError, "operation on non-initialized iterator");
return NULL;
}
- if (self->managed == CONTEXT_EXITED) {
- PyErr_SetString(PyExc_ValueError, "cannot reuse iterator after exit");
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError, "cannot reuse closed iterator");
return NULL;
}
- self->managed = CONTEXT_INSIDE;
Py_INCREF(self);
return (PyObject *)self;
}
@@ -2420,6 +2418,7 @@ npyiter_close(NewNpyArrayIterObject *self)
Py_RETURN_NONE;
}
ret = NpyIter_Close(iter);
+ self->is_closed = 1;
if (ret < 0) {
return NULL;
}
@@ -2429,7 +2428,6 @@ npyiter_close(NewNpyArrayIterObject *self)
static PyObject *
npyiter_exit(NewNpyArrayIterObject *self, PyObject *args)
{
- self->managed = CONTEXT_EXITED;
/* even if called via exception handling, writeback any data */
return npyiter_close(self);
}
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 915d743c8..14389a925 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -476,7 +476,9 @@ fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace,
double exponent;
NPY_SCALARKIND kind; /* NPY_NOSCALAR is not scalar */
- if (PyArray_Check(a1) && ((kind=is_scalar_with_conversion(o2, &exponent))>0)) {
+ if (PyArray_Check(a1) &&
+ !PyArray_ISOBJECT(a1) &&
+ ((kind=is_scalar_with_conversion(o2, &exponent))>0)) {
PyObject *fastop = NULL;
if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) {
if (exponent == 1.0) {
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index cb4af0d12..6dc7e5a3e 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -4201,7 +4201,7 @@ doubletype_print(PyObject *o, FILE *fp, int flags)
return -1;
}
- ret = PyObject_Print(to_print, fp, flags);
+ ret = PyObject_Print(to_print, fp, Py_PRINT_RAW);
Py_DECREF(to_print);
return ret;
}
diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src
index bca690b4d..5405c8fe3 100644
--- a/numpy/core/src/npymath/ieee754.c.src
+++ b/numpy/core/src/npymath/ieee754.c.src
@@ -6,6 +6,7 @@
*/
#include "npy_math_common.h"
#include "npy_math_private.h"
+#include "numpy/utils.h"
#ifndef HAVE_COPYSIGN
double npy_copysign(double x, double y)
@@ -557,6 +558,15 @@ npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y)
}
#endif
+int npy_clear_floatstatus() {
+ char x=0;
+ return npy_clear_floatstatus_barrier(&x);
+}
+int npy_get_floatstatus() {
+ char x=0;
+ return npy_get_floatstatus_barrier(&x);
+}
+
/*
* Functions to set the floating point status word.
* keep in sync with NO_FLOATING_POINT_SUPPORT in ufuncobject.h
@@ -574,18 +584,24 @@ npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y)
defined(__NetBSD__)
#include <ieeefp.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char * param))
{
int fpstatus = fpgetsticky();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FP_X_DZ & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FP_X_OFL & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((FP_X_UFL & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((FP_X_INV & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
fpsetsticky(0);
return fpstatus;
@@ -617,10 +633,16 @@ void npy_set_floatstatus_invalid(void)
(defined(__FreeBSD__) && (__FreeBSD_version >= 502114))
# include <fenv.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char* param)
{
int fpstatus = fetestexcept(FE_DIVBYZERO | FE_OVERFLOW |
FE_UNDERFLOW | FE_INVALID);
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FE_DIVBYZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FE_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
@@ -628,10 +650,10 @@ int npy_get_floatstatus(void)
((FE_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
/* testing float status is 50-100 times faster than clearing on x86 */
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
if (fpstatus != 0) {
feclearexcept(FE_DIVBYZERO | FE_OVERFLOW |
FE_UNDERFLOW | FE_INVALID);
@@ -665,18 +687,24 @@ void npy_set_floatstatus_invalid(void)
#include <float.h>
#include <fpxcp.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
int fpstatus = fp_read_flag();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FP_DIV_BY_ZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FP_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((FP_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((FP_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
fp_swap_flag(0);
return fpstatus;
@@ -710,8 +738,11 @@ void npy_set_floatstatus_invalid(void)
#include <float.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
#if defined(_WIN64)
int fpstatus = _statusfp();
#else
@@ -720,15 +751,18 @@ int npy_get_floatstatus(void)
_statusfp2(&fpstatus, &fpstatus2);
fpstatus |= fpstatus2;
#endif
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((SW_ZERODIVIDE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((SW_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((SW_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((SW_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
_clearfp();
return fpstatus;
@@ -739,18 +773,24 @@ int npy_clear_floatstatus(void)
#include <machine/fpu.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
unsigned long fpstatus = ieee_get_fp_control();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((IEEE_STATUS_DZE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((IEEE_STATUS_OVF & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((IEEE_STATUS_UNF & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((IEEE_STATUS_INV & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
- long fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
/* clear status bits as well as disable exception mode if on */
ieee_set_fp_control(0);
@@ -759,13 +799,14 @@ int npy_clear_floatstatus(void)
#else
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char NPY_UNUSED(*param))
{
return 0;
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
+ int fpstatus = npy_get_floatstatus_barrier(param);
return 0;
}
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index 120ce0332..76af40439 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -317,13 +317,16 @@ addUfuncs(PyObject *dictionary) {
static PyObject *
UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
- int nin, nout;
+ int nin, nout, i;
PyObject *signature, *sig_str;
- PyObject *f;
+ PyUFuncObject *f = NULL;
+ PyObject *core_num_dims = NULL, *core_dim_ixs = NULL;
int core_enabled;
+ int core_num_ixs = 0;
- if (!PyArg_ParseTuple(args, "iiO", &nin, &nout, &signature)) return NULL;
-
+ if (!PyArg_ParseTuple(args, "iiO", &nin, &nout, &signature)) {
+ return NULL;
+ }
if (PyString_Check(signature)) {
sig_str = signature;
@@ -334,17 +337,60 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
return NULL;
}
- f = PyUFunc_FromFuncAndDataAndSignature(NULL, NULL, NULL,
+ f = (PyUFuncObject*)PyUFunc_FromFuncAndDataAndSignature(
+ NULL, NULL, NULL,
0, nin, nout, PyUFunc_None, "no name",
"doc:none",
1, PyString_AS_STRING(sig_str));
if (sig_str != signature) {
Py_DECREF(sig_str);
}
- if (f == NULL) return NULL;
- core_enabled = ((PyUFuncObject*)f)->core_enabled;
+ if (f == NULL) {
+ return NULL;
+ }
+ core_enabled = f->core_enabled;
+ /*
+ * Don't presume core_num_dims and core_dim_ixs are defined;
+ * they currently are even if core_enabled=0, but there's no real
+ * reason they should be. So avoid segfaults if we change our mind.
+ */
+ if (f->core_num_dims != NULL) {
+ core_num_dims = PyTuple_New(f->nargs);
+ if (core_num_dims == NULL) {
+ goto fail;
+ }
+ for (i = 0; i < f->nargs; i++) {
+ PyObject *val = PyLong_FromLong(f->core_num_dims[i]);
+ PyTuple_SET_ITEM(core_num_dims, i, val);
+ core_num_ixs += f->core_num_dims[i];
+ }
+ }
+ else {
+ Py_INCREF(Py_None);
+ core_num_dims = Py_None;
+ }
+ if (f->core_dim_ixs != NULL) {
+ core_dim_ixs = PyTuple_New(core_num_ixs);
+ if (core_num_dims == NULL) {
+ goto fail;
+ }
+ for (i = 0; i < core_num_ixs; i++) {
+ PyObject * val = PyLong_FromLong(f->core_dim_ixs[i]);
+ PyTuple_SET_ITEM(core_dim_ixs, i, val);
+ }
+ }
+ else {
+ Py_INCREF(Py_None);
+ core_dim_ixs = Py_None;
+ }
Py_DECREF(f);
- return Py_BuildValue("i", core_enabled);
+ return Py_BuildValue("iOO", core_enabled, core_num_dims, core_dim_ixs);
+
+fail:
+ Py_XDECREF(f);
+ Py_XDECREF(core_num_dims);
+ Py_XDECREF(core_dim_ixs);
+ return NULL;
}
static PyMethodDef UMath_TestsMethods[] = {
diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c
index e44036358..188054e22 100644
--- a/numpy/core/src/umath/extobj.c
+++ b/numpy/core/src/umath/extobj.c
@@ -284,7 +284,7 @@ _check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) {
if (!errmask) {
return 0;
}
- fperr = PyUFunc_getfperr();
+ fperr = npy_get_floatstatus_barrier((char*)extobj);
if (!fperr) {
return 0;
}
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 8b1c7e703..1ca298b30 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1819,7 +1819,7 @@ NPY_NO_EXPORT void
*((npy_bool *)op1) = @func@(in1) != 0;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -1866,6 +1866,9 @@ NPY_NO_EXPORT void
const @type@ in2 = *(@type@ *)ip2;
io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2;
}
+ if (npy_isnan(io1)) {
+ npy_set_floatstatus_invalid();
+ }
*((@type@ *)iop1) = io1;
}
}
@@ -1901,7 +1904,7 @@ NPY_NO_EXPORT void
*((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -1991,7 +1994,7 @@ NPY_NO_EXPORT void
*((@type@ *)op1) = tmp + 0;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
NPY_NO_EXPORT void
@@ -2177,7 +2180,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
const npy_half in1 = *(npy_half *)ip1;
*((npy_bool *)op1) = @func@(in1) != 0;
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat**/
@@ -2239,7 +2242,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
const npy_half in2 = *(npy_half *)ip2;
*((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in2)) ? in1 : in2;
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat**/
@@ -2681,7 +2684,7 @@ NPY_NO_EXPORT void
const @ftype@ in1i = ((@ftype@ *)ip1)[1];
*((npy_bool *)op1) = @func@(in1r) @OP@ @func@(in1i);
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -2790,7 +2793,7 @@ NPY_NO_EXPORT void
((@ftype@ *)op1)[1] = in2i;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c
index 0aef093b0..123d9af87 100644
--- a/numpy/core/src/umath/override.c
+++ b/numpy/core/src/umath/override.c
@@ -123,11 +123,16 @@ normalize_reduce_args(PyUFuncObject *ufunc, PyObject *args,
npy_intp nargs = PyTuple_GET_SIZE(args);
npy_intp i;
PyObject *obj;
- static char *kwlist[] = {"array", "axis", "dtype", "out", "keepdims"};
+ static PyObject *NoValue = NULL;
+ static char *kwlist[] = {"array", "axis", "dtype", "out", "keepdims",
+ "initial"};
+
+ npy_cache_import("numpy", "_NoValue", &NoValue);
+ if (NoValue == NULL) return -1;
- if (nargs < 1 || nargs > 5) {
+ if (nargs < 1 || nargs > 6) {
PyErr_Format(PyExc_TypeError,
- "ufunc.reduce() takes from 1 to 5 positional "
+ "ufunc.reduce() takes from 1 to 6 positional "
"arguments but %"NPY_INTP_FMT" were given", nargs);
return -1;
}
@@ -151,6 +156,10 @@ normalize_reduce_args(PyUFuncObject *ufunc, PyObject *args,
}
obj = PyTuple_GetSlice(args, 3, 4);
}
+ /* Remove initial=np._NoValue */
+ if (i == 5 && obj == NoValue) {
+ continue;
+ }
PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
if (i == 3) {
Py_DECREF(obj);
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 681d3fefa..5c3a84e21 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -537,7 +537,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
}
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&iter);
if (NpyIter_GetIterSize(iter) != 0) {
NpyIter_IterNextFunc *iternext;
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index 6e1fb1ee8..3e29c4b4e 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -848,7 +848,7 @@ static PyObject *
}
#if @fperr@
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
#endif
/*
@@ -863,7 +863,7 @@ static PyObject *
#if @fperr@
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
@@ -993,7 +993,7 @@ static PyObject *
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
@@ -1008,7 +1008,7 @@ static PyObject *
}
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
@@ -1072,7 +1072,7 @@ static PyObject *
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
@@ -1136,7 +1136,7 @@ static PyObject *
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
@@ -1150,7 +1150,7 @@ static PyObject *
}
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 2241414ac..5c0568c12 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -1031,7 +1031,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
i += 2 * stride;
/* minps/minpd will set invalid flag if nan is encountered */
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)&c1);
LOOP_BLOCKED(@type@, 32) {
@vtype@ v1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
@vtype@ v2 = @vpre@_load_@vsuf@((@type@*)&ip[i + stride]);
@@ -1040,7 +1040,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
}
c1 = @vpre@_@VOP@_@vsuf@(c1, c2);
- if (npy_get_floatstatus() & NPY_FPE_INVALID) {
+ if (npy_get_floatstatus_barrier((char*)&c1) & NPY_FPE_INVALID) {
*op = @nan@;
}
else {
@@ -1051,6 +1051,9 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
LOOP_BLOCKED_END {
*op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
}
+ if (npy_isnan(*op)) {
+ npy_set_floatstatus_invalid();
+ }
}
/**end repeat1**/
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index e0423630b..af415362b 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -65,6 +65,28 @@
#endif
/**********************************************/
+typedef struct {
+ PyObject *in; /* The input arguments to the ufunc, a tuple */
+ PyObject *out; /* The output arguments, a tuple. If no non-None outputs are
+ provided, then this is NULL. */
+} ufunc_full_args;
+
+/* Get the arg tuple to pass in the context argument to __array_wrap__ and
+ * __array_prepare__.
+ *
+ * Output arguments are only passed if at least one is non-None.
+ */
+static PyObject *
+_get_wrap_prepare_args(ufunc_full_args full_args) {
+ if (full_args.out == NULL) {
+ Py_INCREF(full_args.in);
+ return full_args.in;
+ }
+ else {
+ return PySequence_Concat(full_args.in, full_args.out);
+ }
+}
+
/* ---------------------------------------------------------------- */
static int
@@ -78,7 +100,8 @@ PyUFunc_getfperr(void)
* non-clearing get was only added in 1.9 so this function always cleared
* keep it so just in case third party code relied on the clearing
*/
- return npy_clear_floatstatus();
+ char param = 0;
+ return npy_clear_floatstatus_barrier(&param);
}
#define HANDLEIT(NAME, str) {if (retstatus & NPY_FPE_##NAME) { \
@@ -111,7 +134,8 @@ NPY_NO_EXPORT int
PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first)
{
/* clearing is done for backward compatibility */
- int retstatus = npy_clear_floatstatus();
+ int retstatus;
+ retstatus = npy_clear_floatstatus_barrier((char*)&retstatus);
return PyUFunc_handlefperr(errmask, errobj, retstatus, first);
}
@@ -122,7 +146,8 @@ PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first)
NPY_NO_EXPORT void
PyUFunc_clearfperr()
{
- npy_clear_floatstatus();
+ char param = 0;
+ npy_clear_floatstatus_barrier(&param);
}
/*
@@ -132,7 +157,7 @@ PyUFunc_clearfperr()
* defines the method.
*/
static PyObject*
-_find_array_method(PyObject *args, int nin, PyObject *method_name)
+_find_array_method(PyObject *args, PyObject *method_name)
{
int i, n_methods;
PyObject *obj;
@@ -140,7 +165,7 @@ _find_array_method(PyObject *args, int nin, PyObject *method_name)
PyObject *method = NULL;
n_methods = 0;
- for (i = 0; i < nin; i++) {
+ for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
obj = PyTuple_GET_ITEM(args, i);
if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) {
continue;
@@ -238,17 +263,17 @@ _get_output_array_method(PyObject *obj, PyObject *method,
* should just have PyArray_Return called.
*/
static void
-_find_array_prepare(PyObject *args, PyObject *kwds,
+_find_array_prepare(ufunc_full_args args,
PyObject **output_prep, int nin, int nout)
{
- Py_ssize_t nargs;
int i;
+ PyObject *prep;
/*
* Determine the prepping function given by the input arrays
* (could be NULL).
*/
- PyObject *prep = _find_array_method(args, nin, npy_um_str_array_prepare);
+ prep = _find_array_method(args.in, npy_um_str_array_prepare);
/*
* For all the output arrays decide what to do.
*
@@ -261,29 +286,16 @@ _find_array_prepare(PyObject *args, PyObject *kwds,
* exact ndarray so that no PyArray_Return is
* done in that case.
*/
- nargs = PyTuple_GET_SIZE(args);
- for (i = 0; i < nout; i++) {
- int j = nin + i;
- PyObject *obj = NULL;
- if (j < nargs) {
- obj = PyTuple_GET_ITEM(args, j);
- /* Output argument one may also be in a keyword argument */
- if (i == 0 && obj == Py_None && kwds != NULL) {
- obj = PyDict_GetItem(kwds, npy_um_str_out);
- }
- }
- /* Output argument one may also be in a keyword argument */
- else if (i == 0 && kwds != NULL) {
- obj = PyDict_GetItem(kwds, npy_um_str_out);
- }
-
- if (obj == NULL) {
+ if (args.out == NULL) {
+ for (i = 0; i < nout; i++) {
Py_XINCREF(prep);
output_prep[i] = prep;
}
- else {
+ }
+ else {
+ for (i = 0; i < nout; i++) {
output_prep[i] = _get_output_array_method(
- obj, npy_um_str_array_prepare, prep);
+ PyTuple_GET_ITEM(args.out, i), npy_um_str_array_prepare, prep);
}
}
Py_XDECREF(prep);
@@ -556,7 +568,8 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
PyObject **out_typetup,
int *out_subok,
PyArrayObject **out_wheremask,
- PyObject **out_axes)
+ PyObject **out_axes,
+ int *out_keepdims)
{
int i, nargs;
int nin = ufunc->nin;
@@ -811,9 +824,10 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
switch (str[0]) {
case 'a':
- /* possible axis argument for generalized ufunc */
+ /* possible axes argument for generalized ufunc */
if (out_axes != NULL && strcmp(str, "axes") == 0) {
*out_axes = value;
+
bad_arg = 0;
}
break;
@@ -855,6 +869,17 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
bad_arg = 0;
}
break;
+ case 'k':
+ if (out_keepdims != NULL && strcmp(str, "keepdims") == 0) {
+ if (!PyBool_Check(value)) {
+ PyErr_SetString(PyExc_TypeError,
+ "'keepdims' must be a boolean");
+ goto fail;
+ }
+ *out_keepdims = (value == Py_True);
+ bad_arg = 0;
+ }
+ break;
case 'o':
/*
* Output arrays may be specified as a keyword argument,
@@ -1141,22 +1166,31 @@ static int
prepare_ufunc_output(PyUFuncObject *ufunc,
PyArrayObject **op,
PyObject *arr_prep,
- PyObject *arr_prep_args,
+ ufunc_full_args full_args,
int i)
{
if (arr_prep != NULL && arr_prep != Py_None) {
PyObject *res;
PyArrayObject *arr;
+ PyObject *args_tup;
- res = PyObject_CallFunction(arr_prep, "O(OOi)",
- *op, ufunc, arr_prep_args, i);
- if ((res == NULL) || (res == Py_None) || !PyArray_Check(res)) {
- if (!PyErr_Occurred()){
- PyErr_SetString(PyExc_TypeError,
- "__array_prepare__ must return an "
- "ndarray or subclass thereof");
- }
- Py_XDECREF(res);
+ /* Call with the context argument */
+ args_tup = _get_wrap_prepare_args(full_args);
+ if (args_tup == NULL) {
+ return -1;
+ }
+ res = PyObject_CallFunction(
+ arr_prep, "O(OOi)", *op, ufunc, args_tup, i);
+ Py_DECREF(args_tup);
+
+ if (res == NULL) {
+ return -1;
+ }
+ else if (!PyArray_Check(res)) {
+ PyErr_SetString(PyExc_TypeError,
+ "__array_prepare__ must return an "
+ "ndarray or subclass thereof");
+ Py_DECREF(res);
return -1;
}
arr = (PyArrayObject *)res;
@@ -1199,7 +1233,7 @@ iterator_loop(PyUFuncObject *ufunc,
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- PyObject *arr_prep_args,
+ ufunc_full_args full_args,
PyUFuncGenericFunction innerloop,
void *innerloopdata)
{
@@ -1261,7 +1295,7 @@ iterator_loop(PyUFuncObject *ufunc,
continue;
}
if (prepare_ufunc_output(ufunc, &op[nin+i],
- arr_prep[i], arr_prep_args, i) < 0) {
+ arr_prep[i], full_args, i) < 0) {
return -1;
}
}
@@ -1289,7 +1323,7 @@ iterator_loop(PyUFuncObject *ufunc,
/* Call the __array_prepare__ functions for the new array */
if (prepare_ufunc_output(ufunc, &op[nin+i],
- arr_prep[i], arr_prep_args, i) < 0) {
+ arr_prep[i], full_args, i) < 0) {
NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return -1;
@@ -1369,7 +1403,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- PyObject *arr_prep_args)
+ ufunc_full_args full_args)
{
npy_intp nin = ufunc->nin, nout = ufunc->nout;
PyUFuncGenericFunction innerloop;
@@ -1406,7 +1440,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[1],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
@@ -1423,7 +1457,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[1],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
@@ -1465,7 +1499,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[2],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
@@ -1484,7 +1518,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[2],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
@@ -1503,7 +1537,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
NPY_UF_DBG_PRINT("iterator loop\n");
if (iterator_loop(ufunc, op, dtypes, order,
- buffersize, arr_prep, arr_prep_args,
+ buffersize, arr_prep, full_args,
innerloop, innerloopdata) < 0) {
return -1;
}
@@ -1530,7 +1564,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- PyObject *arr_prep_args)
+ ufunc_full_args full_args)
{
int retval, i, nin = ufunc->nin, nout = ufunc->nout;
int nop = nin + nout;
@@ -1643,7 +1677,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
Py_INCREF(op_tmp);
if (prepare_ufunc_output(ufunc, &op_tmp,
- arr_prep[i], arr_prep_args, i) < 0) {
+ arr_prep[i], full_args, i) < 0) {
NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return -1;
@@ -1727,42 +1761,109 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
return retval;
}
-static PyObject *
-make_arr_prep_args(npy_intp nin, PyObject *args, PyObject *kwds)
+static npy_bool
+tuple_all_none(PyObject *tup) {
+ npy_intp i;
+ for (i = 0; i < PyTuple_GET_SIZE(tup); ++i) {
+ if (PyTuple_GET_ITEM(tup, i) != Py_None) {
+ return NPY_FALSE;
+ }
+ }
+ return NPY_TRUE;
+}
+
+/*
+ * Convert positional args and the out kwarg into an input and output tuple.
+ *
+ * If the output tuple would be all None, return NULL instead.
+ *
+ * This duplicates logic in many places, so further refactoring is needed:
+ * - get_ufunc_arguments
+ * - PyUFunc_WithOverride
+ * - normalize___call___args
+ */
+static int
+make_full_arg_tuple(
+ ufunc_full_args *full_args,
+ npy_intp nin, npy_intp nout,
+ PyObject *args, PyObject *kwds)
{
- PyObject *out = kwds ? PyDict_GetItem(kwds, npy_um_str_out) : NULL;
- PyObject *arr_prep_args;
+ PyObject *out_kwd = NULL;
+ npy_intp nargs = PyTuple_GET_SIZE(args);
+ npy_intp i;
- if (out == NULL) {
- Py_INCREF(args);
- return args;
+ /* This should have been checked by the caller */
+ assert(nin <= nargs && nargs <= nin + nout);
+
+ /* Initialize so we can XDECREF safely */
+ full_args->in = NULL;
+ full_args->out = NULL;
+
+ /* Get the input arguments*/
+ full_args->in = PyTuple_GetSlice(args, 0, nin);
+ if (full_args->in == NULL) {
+ goto fail;
}
- else {
- npy_intp i, nargs = PyTuple_GET_SIZE(args), n;
- n = nargs;
- if (n < nin + 1) {
- n = nin + 1;
- }
- arr_prep_args = PyTuple_New(n);
- if (arr_prep_args == NULL) {
- return NULL;
+
+ /* Look for output keyword arguments */
+ out_kwd = kwds ? PyDict_GetItem(kwds, npy_um_str_out) : NULL;
+
+ if (out_kwd != NULL) {
+ assert(nargs == nin);
+ if (out_kwd == Py_None) {
+ return 0;
}
- /* Copy the tuple, but set the nin-th item to the keyword arg */
- for (i = 0; i < nin; ++i) {
- PyObject *item = PyTuple_GET_ITEM(args, i);
- Py_INCREF(item);
- PyTuple_SET_ITEM(arr_prep_args, i, item);
+ else if (PyTuple_Check(out_kwd)) {
+ assert(PyTuple_GET_SIZE(out_kwd) == nout);
+ if (tuple_all_none(out_kwd)) {
+ return 0;
+ }
+ Py_INCREF(out_kwd);
+ full_args->out = out_kwd;
+ return 0;
}
- Py_INCREF(out);
- PyTuple_SET_ITEM(arr_prep_args, nin, out);
- for (i = nin+1; i < n; ++i) {
- PyObject *item = PyTuple_GET_ITEM(args, i);
- Py_INCREF(item);
- PyTuple_SET_ITEM(arr_prep_args, i, item);
+ else {
+ /* A single argument x is promoted to (x, None, None ...) */
+ full_args->out = PyTuple_New(nout);
+ if (full_args->out == NULL) {
+ goto fail;
+ }
+ Py_INCREF(out_kwd);
+ PyTuple_SET_ITEM(full_args->out, 0, out_kwd);
+ for (i = 1; i < nout; ++i) {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(full_args->out, i, Py_None);
+ }
+ return 0;
}
+ }
- return arr_prep_args;
+ /* copy across positional output arguments, adding trailing Nones */
+ full_args->out = PyTuple_New(nout);
+ if (full_args->out == NULL) {
+ goto fail;
+ }
+ for (i = nin; i < nargs; ++i) {
+ PyObject *item = PyTuple_GET_ITEM(args, i);
+ Py_INCREF(item);
+ PyTuple_SET_ITEM(full_args->out, i - nin, item);
+ }
+ for (i = nargs; i < nin + nout; ++i) {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(full_args->out, i - nin, Py_None);
}
+
+ /* don't return a tuple full of None */
+ if (tuple_all_none(full_args->out)) {
+ Py_DECREF(full_args->out);
+ full_args->out = NULL;
+ }
+ return 0;
+
+fail:
+ Py_XDECREF(full_args->in);
+ Py_XDECREF(full_args->out);
+ return -1;
}
/*
@@ -1780,6 +1881,35 @@ _has_output_coredims(PyUFuncObject *ufunc) {
}
/*
+ * Check whether the gufunc can be used with keepdims, i.e., that all its
+ * input arguments have the same number of core dimension, and all output
+ * arguments have no core dimensions. Returns 0 if all is fine, and sets
+ * an error and returns -1 if not.
+ */
+static int
+_check_keepdims_support(PyUFuncObject *ufunc) {
+ int i;
+ int nin = ufunc->nin, nout = ufunc->nout;
+ int input_core_dims = ufunc->core_num_dims[0];
+ for (i = 1; i < nin + nout; i++) {
+ if (ufunc->core_num_dims[i] != (i < nin ? input_core_dims : 0)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s does not support keepdims: its signature %s requires "
+ "that %s %d has %d core dimensions, but keepdims can only "
+ "be used when all inputs have the same number of core "
+ "dimensions and all outputs have no core dimensions.",
+ ufunc_get_name_cstr(ufunc),
+ ufunc->core_signature,
+ i < nin ? "input" : "output",
+ i < nin ? i : i - nin,
+ ufunc->core_num_dims[i]);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
* Interpret a possible axes keyword argument, using it to fill the remap_axis
* array which maps default to actual axes for each operand, indexed as
* as remap_axis[iop][iaxis]. The default axis order has first all broadcast
@@ -1788,8 +1918,8 @@ _has_output_coredims(PyUFuncObject *ufunc) {
* Returns 0 on success, and -1 on failure
*/
static int
-_parse_axes_arg(PyUFuncObject *ufunc, PyObject *axes, PyArrayObject **op,
- int broadcast_ndim, int **remap_axis) {
+_parse_axes_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axes,
+ PyArrayObject **op, int broadcast_ndim, int **remap_axis) {
int nin = ufunc->nin;
int nout = ufunc->nout;
int nop = nin + nout;
@@ -1819,7 +1949,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, PyObject *axes, PyArrayObject **op,
PyObject *op_axes_tuple, *axis_item;
int axis, op_axis;
- op_ncore = ufunc->core_num_dims[iop];
+ op_ncore = core_num_dims[iop];
if (op[iop] != NULL) {
op_ndim = PyArray_NDIM(op[iop]);
op_nbroadcast = op_ndim - op_ncore;
@@ -2069,6 +2199,8 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Use remapped axes for generalized ufunc */
int broadcast_ndim, iter_ndim;
+ int core_num_dims_array[NPY_MAXARGS];
+ int *core_num_dims;
int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS];
int *op_axes[NPY_MAXARGS];
@@ -2097,17 +2229,15 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
int **remap_axis = NULL;
/* The __array_prepare__ function to call for each output */
PyObject *arr_prep[NPY_MAXARGS];
- /*
- * This is either args, or args with the out= parameter from
- * kwds added appropriately.
- */
- PyObject *arr_prep_args = NULL;
+ /* The separated input and output arguments, parsed from args and kwds */
+ ufunc_full_args full_args = {NULL, NULL};
NPY_ORDER order = NPY_KEEPORDER;
/* Use the default assignment casting rule */
NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING;
/* When provided, extobj, typetup, and axes contain borrowed references */
PyObject *extobj = NULL, *type_tup = NULL, *axes = NULL;
+ int keepdims = -1;
if (ufunc == NULL) {
PyErr_SetString(PyExc_ValueError, "function not supported");
@@ -2134,25 +2264,53 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Get all the arguments */
retval = get_ufunc_arguments(ufunc, args, kwds,
op, &order, &casting, &extobj,
- &type_tup, &subok, NULL, &axes);
+ &type_tup, &subok, NULL, &axes, &keepdims);
if (retval < 0) {
goto fail;
}
-
+ /*
+ * If keepdims was passed in (and thus changed from the initial value
+ * on top), check the gufunc is suitable, i.e., that its inputs share
+ * the same number of core dimensions, and its outputs have none.
+ */
+ if (keepdims != -1) {
+ retval = _check_keepdims_support(ufunc);
+ if (retval < 0) {
+ goto fail;
+ }
+ }
+ /*
+ * If keepdims is set and true, signal all dimensions will be the same.
+ */
+ if (keepdims == 1) {
+ int num_dims = ufunc->core_num_dims[0];
+ for (i = 0; i < nop; ++i) {
+ core_num_dims_array[i] = num_dims;
+ }
+ core_num_dims = core_num_dims_array;
+ }
+ else {
+ /* keepdims was not set or was false; no adjustment necessary */
+ core_num_dims = ufunc->core_num_dims;
+ keepdims = 0;
+ }
/*
* Check that operands have the minimum dimensions required.
* (Just checks core; broadcast dimensions are tested by the iterator.)
*/
for (i = 0; i < nop; i++) {
- if (op[i] != NULL && PyArray_NDIM(op[i]) < ufunc->core_num_dims[i]) {
+ if (op[i] != NULL && PyArray_NDIM(op[i]) < core_num_dims[i]) {
PyErr_Format(PyExc_ValueError,
"%s: %s operand %d does not have enough "
"dimensions (has %d, gufunc core with "
"signature %s requires %d)",
- ufunc_get_name_cstr(ufunc),
+ ufunc_name,
i < nin ? "Input" : "Output",
- i < nin ? i : i - nin, PyArray_NDIM(op[i]),
- ufunc->core_signature, ufunc->core_num_dims[i]);
+ i < nin ? i : i - nin,
+ PyArray_NDIM(op[i]),
+ ufunc->core_signature,
+ core_num_dims[i]);
+ retval = -1;
goto fail;
}
}
@@ -2164,7 +2322,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
*/
broadcast_ndim = 0;
for (i = 0; i < nin; ++i) {
- int n = PyArray_NDIM(op[i]) - ufunc->core_num_dims[i];
+ int n = PyArray_NDIM(op[i]) - core_num_dims[i];
if (n > broadcast_ndim) {
broadcast_ndim = n;
}
@@ -2178,7 +2336,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
*/
iter_ndim = broadcast_ndim;
for (i = nin; i < nop; ++i) {
- iter_ndim += ufunc->core_num_dims[i];
+ iter_ndim += core_num_dims[i];
}
if (iter_ndim > NPY_MAXDIMS) {
PyErr_Format(PyExc_ValueError,
@@ -2200,7 +2358,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
for (i=0; i < nop; i++) {
remap_axis[i] = remap_axis_memory + i * NPY_MAXDIMS;
}
- retval = _parse_axes_arg(ufunc, axes, op, broadcast_ndim,
+ retval = _parse_axes_arg(ufunc, core_num_dims, axes, op, broadcast_ndim,
remap_axis);
if(retval < 0) {
goto fail;
@@ -2222,12 +2380,13 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
j = broadcast_ndim;
for (i = 0; i < nop; ++i) {
int n;
+
if (op[i]) {
/*
* Note that n may be negative if broadcasting
* extends into the core dimensions.
*/
- n = PyArray_NDIM(op[i]) - ufunc->core_num_dims[i];
+ n = PyArray_NDIM(op[i]) - core_num_dims[i];
}
else {
n = broadcast_ndim;
@@ -2251,10 +2410,15 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Except for when it belongs to this output */
if (i >= nin) {
int dim_offset = ufunc->core_offsets[i];
- int num_dims = ufunc->core_num_dims[i];
- /* Fill in 'iter_shape' and 'op_axes' for this output */
+ int num_dims = core_num_dims[i];
+ /*
+ * Fill in 'iter_shape' and 'op_axes' for the core dimensions
+ * of this output. Here, we have to be careful: if keepdims
+ * was used, then this axis is not a real core dimension,
+ * but is being added back for broadcasting, so its size is 1.
+ */
for (idim = 0; idim < num_dims; ++idim) {
- iter_shape[j] = core_dim_sizes[
+ iter_shape[j] = keepdims ? 1 : core_dim_sizes[
ufunc->core_dim_ixs[dim_offset + idim]];
op_axes_arrays[i][j] = REMAP_AXIS(i, n + idim);
++j;
@@ -2300,19 +2464,15 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
#endif
if (subok) {
+ if (make_full_arg_tuple(&full_args, nin, nout, args, kwds) < 0) {
+ goto fail;
+ }
+
/*
* Get the appropriate __array_prepare__ function to call
* for each output
*/
- _find_array_prepare(args, kwds, arr_prep, nin, nout);
-
- /* Set up arr_prep_args if a prep function was needed */
- for (i = 0; i < nout; ++i) {
- if (arr_prep[i] != NULL && arr_prep[i] != Py_None) {
- arr_prep_args = make_arr_prep_args(nin, args, kwds);
- break;
- }
- }
+ _find_array_prepare(full_args, arr_prep, nin, nout);
}
/* If the loop wants the arrays, provide them */
@@ -2378,7 +2538,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
*/
core_dim_ixs_size = 0;
for (i = 0; i < nop; ++i) {
- core_dim_ixs_size += ufunc->core_num_dims[i];
+ core_dim_ixs_size += core_num_dims[i];
}
inner_strides = (npy_intp *)PyArray_malloc(
NPY_SIZEOF_INTP * (nop+core_dim_ixs_size));
@@ -2390,7 +2550,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Copy the strides after the first nop */
idim = nop;
for (i = 0; i < nop; ++i) {
- int num_dims = ufunc->core_num_dims[i];
+ int num_dims = core_num_dims[i];
int core_start_dim = PyArray_NDIM(op[i]) - num_dims;
/*
* Need to use the arrays in the iterator, not op, because
@@ -2459,7 +2619,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
#endif
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&iter);
NPY_UF_DBG_PRINT("Executing inner loop\n");
@@ -2543,7 +2703,8 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
NPY_UF_DBG_PRINT("Returning Success\n");
@@ -2561,7 +2722,8 @@ fail:
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
PyArray_free(remap_axis_memory);
PyArray_free(remap_axis);
return retval;
@@ -2599,7 +2761,7 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
* This is either args, or args with the out= parameter from
* kwds added appropriately.
*/
- PyObject *arr_prep_args = NULL;
+ ufunc_full_args full_args = {NULL, NULL};
int trivial_loop_ok = 0;
@@ -2638,7 +2800,7 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
/* Get all the arguments */
retval = get_ufunc_arguments(ufunc, args, kwds,
op, &order, &casting, &extobj,
- &type_tup, &subok, &wheremask, NULL);
+ &type_tup, &subok, &wheremask, NULL, NULL);
if (retval < 0) {
goto fail;
}
@@ -2691,23 +2853,18 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
#endif
if (subok) {
+ if (make_full_arg_tuple(&full_args, nin, nout, args, kwds) < 0) {
+ goto fail;
+ }
/*
* Get the appropriate __array_prepare__ function to call
* for each output
*/
- _find_array_prepare(args, kwds, arr_prep, nin, nout);
-
- /* Set up arr_prep_args if a prep function was needed */
- for (i = 0; i < nout; ++i) {
- if (arr_prep[i] != NULL && arr_prep[i] != Py_None) {
- arr_prep_args = make_arr_prep_args(nin, args, kwds);
- break;
- }
- }
+ _find_array_prepare(full_args, arr_prep, nin, nout);
}
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&ufunc);
/* Do the ufunc loop */
if (need_fancy) {
@@ -2715,14 +2872,14 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
retval = execute_fancy_ufunc_loop(ufunc, wheremask,
op, dtypes, order,
- buffersize, arr_prep, arr_prep_args);
+ buffersize, arr_prep, full_args);
}
else {
NPY_UF_DBG_PRINT("Executing legacy inner loop\n");
retval = execute_legacy_ufunc_loop(ufunc, trivial_loop_ok,
op, dtypes, order,
- buffersize, arr_prep, arr_prep_args);
+ buffersize, arr_prep, full_args);
}
if (retval < 0) {
goto fail;
@@ -2742,7 +2899,8 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
Py_XDECREF(wheremask);
NPY_UF_DBG_PRINT("Returning Success\n");
@@ -2758,7 +2916,8 @@ fail:
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
Py_XDECREF(wheremask);
return retval;
@@ -3019,20 +3178,25 @@ finish_loop:
*/
static PyArrayObject *
PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
- int naxes, int *axes, PyArray_Descr *odtype, int keepdims)
+ int naxes, int *axes, PyArray_Descr *odtype, int keepdims,
+ PyObject *initial)
{
int iaxes, ndim;
npy_bool reorderable;
npy_bool axis_flags[NPY_MAXDIMS];
PyArray_Descr *dtype;
PyArrayObject *result;
- PyObject *identity = NULL;
+ PyObject *identity;
const char *ufunc_name = ufunc_get_name_cstr(ufunc);
/* These parameters come from a TLS global */
int buffersize = 0, errormask = 0;
+ static PyObject *NoValue = NULL;
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s.reduce\n", ufunc_name);
+ npy_cache_import("numpy", "_NoValue", &NoValue);
+ if (NoValue == NULL) return NULL;
+
ndim = PyArray_NDIM(arr);
/* Create an array of flags for reduction */
@@ -3056,19 +3220,28 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
if (identity == NULL) {
return NULL;
}
- /*
- * The identity for a dynamic dtype like
- * object arrays can't be used in general
- */
- if (identity != Py_None && PyArray_ISOBJECT(arr) && PyArray_SIZE(arr) != 0) {
+
+ /* Get the initial value */
+ if (initial == NULL || initial == NoValue) {
+ initial = identity;
+
+ /*
+ * The identity for a dynamic dtype like
+ * object arrays can't be used in general
+ */
+ if (initial != Py_None && PyArray_ISOBJECT(arr) && PyArray_SIZE(arr) != 0) {
+ Py_DECREF(initial);
+ initial = Py_None;
+ Py_INCREF(initial);
+ }
+ } else {
Py_DECREF(identity);
- identity = Py_None;
- Py_INCREF(identity);
+ Py_INCREF(initial); /* match the reference count in the if above */
}
/* Get the reduction dtype */
if (reduce_type_resolver(ufunc, arr, odtype, &dtype) < 0) {
- Py_DECREF(identity);
+ Py_DECREF(initial);
return NULL;
}
@@ -3076,12 +3249,12 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
NPY_UNSAFE_CASTING,
axis_flags, reorderable,
keepdims, 0,
- identity,
+ initial,
reduce_loop,
ufunc, buffersize, ufunc_name, errormask);
Py_DECREF(dtype);
- Py_DECREF(identity);
+ Py_DECREF(initial);
return result;
}
@@ -3472,7 +3645,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
op_axes_arrays[2]};
npy_uint32 op_flags[3];
int i, idim, ndim, otype_final;
- int need_outer_iterator;
+ int need_outer_iterator = 0;
NpyIter *iter = NULL;
@@ -3845,8 +4018,9 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
PyArray_Descr *otype = NULL;
PyArrayObject *out = NULL;
int keepdims = 0;
+ PyObject *initial = NULL;
static char *reduce_kwlist[] = {
- "array", "axis", "dtype", "out", "keepdims", NULL};
+ "array", "axis", "dtype", "out", "keepdims", "initial", NULL};
static char *accumulate_kwlist[] = {
"array", "axis", "dtype", "out", NULL};
static char *reduceat_kwlist[] = {
@@ -3918,13 +4092,13 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
}
}
else {
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&i:reduce",
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&iO:reduce",
reduce_kwlist,
&op,
&axes_in,
PyArray_DescrConverter2, &otype,
PyArray_OutputConverter, &out,
- &keepdims)) {
+ &keepdims, &initial)) {
goto fail;
}
}
@@ -4055,7 +4229,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
switch(operation) {
case UFUNC_REDUCE:
ret = PyUFunc_Reduce(ufunc, mp, out, naxes, axes,
- otype, keepdims);
+ otype, keepdims, initial);
break;
case UFUNC_ACCUMULATE:
if (naxes != 1) {
@@ -4127,11 +4301,10 @@ fail:
* should just have PyArray_Return called.
*/
static void
-_find_array_wrap(PyObject *args, PyObject *kwds,
+_find_array_wrap(ufunc_full_args args, PyObject *kwds,
PyObject **output_wrap, int nin, int nout)
{
- Py_ssize_t nargs;
- int i, idx_offset, start_idx;
+ int i;
PyObject *obj;
PyObject *wrap = NULL;
@@ -4151,7 +4324,7 @@ _find_array_wrap(PyObject *args, PyObject *kwds,
* Determine the wrapping function given by the input arrays
* (could be NULL).
*/
- wrap = _find_array_method(args, nin, npy_um_str_array_wrap);
+ wrap = _find_array_method(args.in, npy_um_str_array_wrap);
/*
* For all the output arrays decide what to do.
@@ -4166,44 +4339,16 @@ _find_array_wrap(PyObject *args, PyObject *kwds,
* done in that case.
*/
handle_out:
- nargs = PyTuple_GET_SIZE(args);
- /* Default is using positional arguments */
- obj = args;
- idx_offset = nin;
- start_idx = 0;
- if (nin == nargs && kwds != NULL) {
- /* There may be a keyword argument we can use instead */
- obj = PyDict_GetItem(kwds, npy_um_str_out);
- if (obj == NULL) {
- /* No, go back to positional (even though there aren't any) */
- obj = args;
- }
- else {
- idx_offset = 0;
- if (PyTuple_Check(obj)) {
- /* If a tuple, must have all nout items */
- nargs = nout;
- }
- else {
- /* If the kwarg is not a tuple then it is an array (or None) */
- output_wrap[0] = _get_output_array_method(
- obj, npy_um_str_array_wrap, wrap);
- start_idx = 1;
- nargs = 1;
- }
+ if (args.out == NULL) {
+ for (i = 0; i < nout; i++) {
+ Py_XINCREF(wrap);
+ output_wrap[i] = wrap;
}
}
-
- for (i = start_idx; i < nout; ++i) {
- int j = idx_offset + i;
-
- if (j < nargs) {
+ else {
+ for (i = 0; i < nout; i++) {
output_wrap[i] = _get_output_array_method(
- PyTuple_GET_ITEM(obj, j), npy_um_str_array_wrap, wrap);
- }
- else {
- output_wrap[i] = wrap;
- Py_XINCREF(wrap);
+ PyTuple_GET_ITEM(args.out, i), npy_um_str_array_wrap, wrap);
}
}
@@ -4216,12 +4361,11 @@ static PyObject *
ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
{
int i;
- PyTupleObject *ret;
PyArrayObject *mps[NPY_MAXARGS];
PyObject *retobj[NPY_MAXARGS];
PyObject *wraparr[NPY_MAXARGS];
- PyObject *res;
PyObject *override = NULL;
+ ufunc_full_args full_args = {NULL, NULL};
int errval;
errval = PyUFunc_CheckOverride(ufunc, "__call__", args, kwds, &override);
@@ -4286,20 +4430,37 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
* None --- array-object passed in don't call PyArray_Return
* method --- the __array_wrap__ method to call.
*/
- _find_array_wrap(args, kwds, wraparr, ufunc->nin, ufunc->nout);
+ if (make_full_arg_tuple(&full_args, ufunc->nin, ufunc->nout, args, kwds) < 0) {
+ goto fail;
+ }
+ _find_array_wrap(full_args, kwds, wraparr, ufunc->nin, ufunc->nout);
/* wrap outputs */
for (i = 0; i < ufunc->nout; i++) {
int j = ufunc->nin+i;
PyObject *wrap = wraparr[i];
- if (wrap != NULL) {
- if (wrap == Py_None) {
- Py_DECREF(wrap);
- retobj[i] = (PyObject *)mps[j];
- continue;
+ if (wrap == NULL) {
+ /* default behavior */
+ retobj[i] = PyArray_Return(mps[j]);
+ }
+ else if (wrap == Py_None) {
+ Py_DECREF(wrap);
+ retobj[i] = (PyObject *)mps[j];
+ }
+ else {
+ PyObject *res;
+ PyObject *args_tup;
+
+ /* Call the method with appropriate context */
+ args_tup = _get_wrap_prepare_args(full_args);
+ if (args_tup == NULL) {
+ goto fail;
}
- res = PyObject_CallFunction(wrap, "O(OOi)", mps[j], ufunc, args, i);
+ res = PyObject_CallFunction(
+ wrap, "O(OOi)", mps[j], ufunc, args_tup, i);
+ Py_DECREF(args_tup);
+
/* Handle __array_wrap__ that does not accept a context argument */
if (res == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) {
PyErr_Clear();
@@ -4309,23 +4470,21 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
if (res == NULL) {
goto fail;
}
- else {
- Py_DECREF(mps[j]);
- retobj[i] = res;
- continue;
- }
- }
- else {
- /* default behavior */
- retobj[i] = PyArray_Return(mps[j]);
- }
+ Py_DECREF(mps[j]);
+ retobj[i] = res;
+ }
}
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
+
if (ufunc->nout == 1) {
return retobj[0];
}
else {
+ PyTupleObject *ret;
+
ret = (PyTupleObject *)PyTuple_New(ufunc->nout);
for (i = 0; i < ufunc->nout; i++) {
PyTuple_SET_ITEM(ret, i, retobj[i]);
@@ -4334,6 +4493,8 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
}
fail:
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
for (i = ufunc->nin; i < ufunc->nargs; i++) {
Py_XDECREF(mps[i]);
}
@@ -4439,7 +4600,7 @@ PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void **data,
const char *name, const char *doc, int unused)
{
return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes,
- nin, nout, identity, name, doc, 0, NULL);
+ nin, nout, identity, name, doc, unused, NULL);
}
/*UFUNC_API*/
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 15da831b2..5567b9bbf 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -87,11 +87,12 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
/* Keywords are ignored for now */
PyObject *function, *pyname = NULL;
- int nin, nout, i;
+ int nin, nout, i, nargs;
PyUFunc_PyFuncData *fdata;
PyUFuncObject *self;
- char *fname, *str;
+ char *fname, *str, *types, *doc;
Py_ssize_t fname_len = -1;
+ void * ptr, **data;
int offset[2];
if (!PyArg_ParseTuple(args, "Oii:frompyfunc", &function, &nin, &nout)) {
@@ -101,43 +102,7 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
PyErr_SetString(PyExc_TypeError, "function must be callable");
return NULL;
}
- if (nin + nout > NPY_MAXARGS) {
- PyErr_Format(PyExc_ValueError,
- "Cannot construct a ufunc with more than %d operands "
- "(requested number were: inputs = %d and outputs = %d)",
- NPY_MAXARGS, nin, nout);
- return NULL;
- }
- self = PyArray_malloc(sizeof(PyUFuncObject));
- if (self == NULL) {
- return NULL;
- }
- PyObject_Init((PyObject *)self, &PyUFunc_Type);
-
- self->userloops = NULL;
- self->nin = nin;
- self->nout = nout;
- self->nargs = nin + nout;
- self->identity = PyUFunc_None;
- self->functions = pyfunc_functions;
- self->ntypes = 1;
-
- /* generalized ufunc */
- self->core_enabled = 0;
- self->core_num_dim_ix = 0;
- self->core_num_dims = NULL;
- self->core_dim_ixs = NULL;
- self->core_offsets = NULL;
- self->core_signature = NULL;
- self->op_flags = PyArray_malloc(sizeof(npy_uint32)*self->nargs);
- if (self->op_flags == NULL) {
- return PyErr_NoMemory();
- }
- memset(self->op_flags, 0, sizeof(npy_uint32)*self->nargs);
- self->iter_flags = 0;
-
- self->type_resolver = &object_ufunc_type_resolver;
- self->legacy_inner_loop_selector = &object_ufunc_loop_selector;
+ nargs = nin + nout;
pyname = PyObject_GetAttrString(function, "__name__");
if (pyname) {
@@ -150,7 +115,7 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
}
/*
- * self->ptr holds a pointer for enough memory for
+ * ptr will be assigned to self->ptr, holds a pointer for enough memory for
* self->data[0] (fdata)
* self->data
* self->name
@@ -164,39 +129,51 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
if (i) {
offset[0] += (sizeof(void *) - i);
}
- offset[1] = self->nargs;
- i = (self->nargs % sizeof(void *));
+ offset[1] = nargs;
+ i = (nargs % sizeof(void *));
if (i) {
offset[1] += (sizeof(void *)-i);
}
- self->ptr = PyArray_malloc(offset[0] + offset[1] + sizeof(void *) +
+ ptr = PyArray_malloc(offset[0] + offset[1] + sizeof(void *) +
(fname_len + 14));
- if (self->ptr == NULL) {
+ if (ptr == NULL) {
Py_XDECREF(pyname);
return PyErr_NoMemory();
}
- Py_INCREF(function);
- self->obj = function;
- fdata = (PyUFunc_PyFuncData *)(self->ptr);
+ fdata = (PyUFunc_PyFuncData *)(ptr);
+ fdata->callable = function;
fdata->nin = nin;
fdata->nout = nout;
- fdata->callable = function;
- self->data = (void **)(((char *)self->ptr) + offset[0]);
- self->data[0] = (void *)fdata;
- self->types = (char *)self->data + sizeof(void *);
- for (i = 0; i < self->nargs; i++) {
- self->types[i] = NPY_OBJECT;
+ data = (void **)(((char *)ptr) + offset[0]);
+ data[0] = (void *)fdata;
+ types = (char *)data + sizeof(void *);
+ for (i = 0; i < nargs; i++) {
+ types[i] = NPY_OBJECT;
}
- str = self->types + offset[1];
+ str = types + offset[1];
memcpy(str, fname, fname_len);
memcpy(str+fname_len, " (vectorized)", 14);
- self->name = str;
-
Py_XDECREF(pyname);
/* Do a better job someday */
- self->doc = "dynamic ufunc based on a python function";
+ doc = "dynamic ufunc based on a python function";
+
+ self = (PyUFuncObject *)PyUFunc_FromFuncAndData(
+ (PyUFuncGenericFunction *)pyfunc_functions, data,
+ types, /* ntypes */ 1, nin, nout, PyUFunc_None,
+ str, doc, /* unused */ 0);
+
+ if (self == NULL) {
+ PyArray_free(ptr);
+ return NULL;
+ }
+ Py_INCREF(function);
+ self->obj = function;
+ self->ptr = ptr;
+
+ self->type_resolver = &object_ufunc_type_resolver;
+ self->legacy_inner_loop_selector = &object_ufunc_loop_selector;
return (PyObject *)self;
}
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index a927968a4..9755e7b36 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -223,22 +223,25 @@ def test_array_astype():
b = a.astype('f4', subok=0, copy=False)
assert_(a is b)
- a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4')
+ class MyNDArray(np.ndarray):
+ pass
- # subok=True passes through a matrix
+ a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
+
+ # subok=True passes through a subclass
b = a.astype('f4', subok=True, copy=False)
assert_(a is b)
# subok=True is default, and creates a subtype on a cast
b = a.astype('i4', copy=False)
assert_equal(a, b)
- assert_equal(type(b), np.matrix)
+ assert_equal(type(b), MyNDArray)
- # subok=False never returns a matrix
+ # subok=False never returns a subclass
b = a.astype('f4', subok=False, copy=False)
assert_equal(a, b)
assert_(not (a is b))
- assert_(type(b) is not np.matrix)
+ assert_(type(b) is not MyNDArray)
# Make sure converting from string object to fixed length string
# does not truncate.
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index 2c142f82b..6214e325c 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -491,6 +491,8 @@ class TestPrintOptions(object):
np.array(1.), style=repr)
# but not in legacy mode
np.array2string(np.array(1.), style=repr, legacy='1.13')
+ # gh-10934 style was broken in legacy mode, check it works
+ np.array2string(np.array(1.), legacy='1.13')
def test_float_spacing(self):
x = np.array([1., 2., 3.])
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index dca2d2541..e433877e8 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -124,7 +124,7 @@ class TestDateTime(object):
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_compare_generic_nat(self):
- # regression tests for GH6452
+ # regression tests for gh-6452
assert_equal(np.datetime64('NaT'),
np.datetime64('2000') + np.timedelta64('NaT'))
# nb. we may want to make NaT != NaT true in the future
@@ -236,18 +236,25 @@ class TestDateTime(object):
# find "supertype" for non-dates and dates
b = np.bool_(True)
- dt = np.datetime64('1970-01-01', 'M')
- arr = np.array([b, dt])
+ dm = np.datetime64('1970-01-01', 'M')
+ d = datetime.date(1970, 1, 1)
+ dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
+
+ arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
- dt = datetime.date(1970, 1, 1)
- arr = np.array([b, dt])
+ arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
- dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
+ arr = np.array([d, d]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[D]'))
+
+ arr = np.array([dt, dt]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[us]'))
+
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
@@ -324,6 +331,24 @@ class TestDateTime(object):
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
+ a = datetime.timedelta(seconds=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta(weeks=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta()
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+
+ def test_timedelta_object_array_conversion(self):
+ # Regression test for gh-11096
+ inputs = [datetime.timedelta(28),
+ datetime.timedelta(30),
+ datetime.timedelta(31)]
+ expected = np.array([28, 30, 31], dtype='timedelta64[D]')
+ actual = np.array(inputs, dtype='timedelta64[D]')
+ assert_equal(expected, actual)
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 5d59d8226..60a7c72f7 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -134,6 +134,22 @@ class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning
+class TestNonTupleNDIndexDeprecation(object):
+ def test_basic(self):
+ a = np.zeros((5, 5))
+ with warnings.catch_warnings():
+ warnings.filterwarnings('always')
+ assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
+ assert_warns(FutureWarning, a.__getitem__, [slice(None)])
+
+ warnings.filterwarnings('error')
+ assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
+ assert_raises(FutureWarning, a.__getitem__, [slice(None)])
+
+ # a a[[0, 1]] always was advanced indexing, so no error/warning
+ a[[0, 1]]
+
+
class TestRankDeprecation(_DeprecationTestCase):
"""Test that np.rank is deprecated. The function should simply be
removed. The VisibleDeprecationWarning may become unnecessary.
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 792b9e0a2..104dd1986 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -502,6 +502,16 @@ class TestEinSum(object):
optimize=optimize),
np.full((1, 5), 5))
+ # Cases which were failing (gh-10899)
+ x = np.eye(2, dtype=dtype)
+ y = np.ones(2, dtype=dtype)
+ assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
+ [2.]) # contig_contig_outstride0_two
+ assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
+ [2.]) # stride0_contig_outstride0_two
+ assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
+ [2.]) # contig_stride0_outstride0_two
+
def test_einsum_sums_int8(self):
self.check_einsum_sums('i1')
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 65852e577..88f5deabc 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -576,19 +576,6 @@ class TestSubclasses(object):
assert_(isinstance(s[[0, 1, 2]], SubClass))
assert_(isinstance(s[s > 0], SubClass))
- def test_matrix_fancy(self):
- # The matrix class messes with the shape. While this is always
- # weird (getitem is not used, it does not have setitem nor knows
- # about fancy indexing), this tests gh-3110
- m = np.matrix([[1, 2], [3, 4]])
-
- assert_(isinstance(m[[0,1,0], :], np.matrix))
-
- # gh-3110. Note the transpose currently because matrices do *not*
- # support dimension fixing for fancy indexing correctly.
- x = np.asmatrix(np.arange(50).reshape(5,10))
- assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
-
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index 513a71b99..cf50d5d5c 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -6,7 +6,7 @@ import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, temppath,
)
-from ._locales import CommaDecimalPointLocale
+from numpy.core.tests._locales import CommaDecimalPointLocale
LD_INFO = np.finfo(np.longdouble)
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 806a3b083..a60f2cd92 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -34,7 +34,7 @@ from numpy.testing import (
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
SkipTest, temppath, suppress_warnings
)
-from ._locales import CommaDecimalPointLocale
+from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
@@ -1745,13 +1745,6 @@ class TestMethods(object):
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
- def test_sort_matrix_none(self):
- a = np.matrix([[2, 1, 0]])
- actual = np.sort(a, axis=None)
- expected = np.matrix([[0, 1, 2]])
- assert_equal(actual, expected)
- assert_(type(expected) is np.matrix)
-
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
@@ -2497,14 +2490,6 @@ class TestMethods(object):
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
- def test_partition_matrix_none(self):
- # gh-4301
- a = np.matrix([[2, 1, 0]])
- actual = np.partition(a, 1, axis=None)
- expected = np.matrix([[0, 1, 2]])
- assert_equal(actual, expected)
- assert_(type(expected) is np.matrix)
-
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
@@ -3332,7 +3317,39 @@ class TestBinop(object):
with assert_raises(NotImplementedError):
a ** 2
+ def test_pow_array_object_dtype(self):
+ # test pow on arrays of object dtype
+ class SomeClass(object):
+ def __init__(self, num=None):
+ self.num = num
+
+ # want to ensure a fast pow path is not taken
+ def __mul__(self, other):
+ raise AssertionError('__mul__ should not be called')
+
+ def __div__(self, other):
+ raise AssertionError('__div__ should not be called')
+
+ def __pow__(self, exp):
+ return SomeClass(num=self.num ** exp)
+
+ def __eq__(self, other):
+ if isinstance(other, SomeClass):
+ return self.num == other.num
+
+ __rpow__ = __pow__
+
+ def pow_for(exp, arr):
+ return np.array([x ** exp for x in arr])
+
+ obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
+ assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
+ assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
+ assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
+ assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
+ assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
+
class TestTemporaryElide(object):
# elision is only triggered on relatively large arrays
@@ -5279,13 +5296,6 @@ class TestDot(object):
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
- def test_dot_scalar_and_matrix_of_objects(self):
- # Ticket #2469
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.dot(arr, 3), desired)
- assert_equal(np.dot(3, arr), desired)
-
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
@@ -5641,21 +5651,6 @@ class TestInner(object):
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
- def test_inner_scalar_and_matrix(self):
- for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
- sca = np.array(3, dtype=dt)[()]
- arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
- desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
- assert_equal(np.inner(arr, sca), desired)
- assert_equal(np.inner(sca, arr), desired)
-
- def test_inner_scalar_and_matrix_of_objects(self):
- # Ticket #4482
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.inner(arr, 3), desired)
- assert_equal(np.inner(3, arr), desired)
-
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
@@ -6204,7 +6199,10 @@ class TestPEP3118Dtype(object):
self._check('i', 'i')
self._check('i:f0:', [('f0', 'i')])
+
class TestNewBufferProtocol(object):
+ """ Test PEP3118 buffers """
+
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
@@ -6515,6 +6513,36 @@ class TestNewBufferProtocol(object):
with assert_raises(ValueError):
memoryview(arr)
+ def test_max_dims(self):
+ a = np.empty((1,) * 32)
+ self._check_roundtrip(a)
+
+ @pytest.mark.skipif(sys.version_info < (2, 7, 7), reason="See gh-11115")
+ def test_error_too_many_dims(self):
+ def make_ctype(shape, scalar_type):
+ t = scalar_type
+ for dim in shape[::-1]:
+ t = dim * t
+ return t
+
+ # construct a memoryview with 33 dimensions
+ c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8)
+ m = memoryview(c_u8_33d())
+ assert_equal(m.ndim, 33)
+
+ assert_raises_regex(
+ RuntimeError, "ndim",
+ np.array, m)
+
+ def test_error_pointer_type(self):
+ # gh-6741
+ m = memoryview(ctypes.pointer(ctypes.c_uint8()))
+ assert_('&' in m.format)
+
+ assert_raises_regex(
+ ValueError, "format string",
+ np.array, m)
+
class TestArrayAttributeDeletion(object):
@@ -7246,16 +7274,20 @@ class TestWritebackIfCopy(object):
def test_view_assign(self):
from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve
+
arr = np.arange(9).reshape(3, 3).T
arr_wb = npy_create_writebackifcopy(arr)
assert_(arr_wb.flags.writebackifcopy)
assert_(arr_wb.base is arr)
- arr_wb[:] = -100
+ arr_wb[...] = -100
npy_resolve(arr_wb)
+ # arr changes after resolve, even though we assigned to arr_wb
assert_equal(arr, -100)
# after resolve, the two arrays no longer reference each other
- assert_(not arr_wb.ctypes.data == 0)
- arr_wb[:] = 100
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
assert_equal(arr, -100)
def test_dealloc_warning(self):
@@ -7266,6 +7298,30 @@ class TestWritebackIfCopy(object):
_multiarray_tests.npy_abuse_writebackifcopy(v)
assert len(sup.log) == 1
+ def test_view_discard_refcount(self):
+ from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard
+
+ arr = np.arange(9).reshape(3, 3).T
+ orig = arr.copy()
+ if HAS_REFCOUNT:
+ arr_cnt = sys.getrefcount(arr)
+ arr_wb = npy_create_writebackifcopy(arr)
+ assert_(arr_wb.flags.writebackifcopy)
+ assert_(arr_wb.base is arr)
+ arr_wb[...] = -100
+ npy_discard(arr_wb)
+ # arr remains unchanged after discard
+ assert_equal(arr, orig)
+ # after discard, the two arrays no longer reference each other
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ if HAS_REFCOUNT:
+ assert_equal(arr_cnt, sys.getrefcount(arr))
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
+ assert_equal(arr, orig)
+
+
class TestArange(object):
def test_infinite(self):
assert_raises_regex(
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index bc9456536..a0096efdb 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -811,7 +811,7 @@ def test_iter_nbo_align_contig():
assert_equal(i.operands[0], a)
i.operands[0][:] = 2
assert_equal(au, [2]*6)
- i = None # should not raise a DeprecationWarning
+ del i # should not raise a warning
# Byte order change by requesting NBO
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
@@ -1469,26 +1469,25 @@ def test_iter_allocate_output_types_scalar():
def test_iter_allocate_output_subtype():
# Make sure that the subtype with priority wins
+ class MyNDArray(np.ndarray):
+ __array_priority__ = 15
- # matrix vs ndarray
- a = np.matrix([[1, 2], [3, 4]])
+ # subclass vs ndarray
+ a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = np.arange(4).reshape(2, 2).T
i = nditer([a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_equal(type(a), type(i.operands[2]))
- assert_(type(b) != type(i.operands[2]))
+ assert_(type(b) is not type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
- # matrix always wants things to be 2D
- b = np.arange(4).reshape(1, 2, 2)
- assert_raises(RuntimeError, nditer, [a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate']])
- # but if subtypes are disabled, the result can still work
+ # If subtypes are disabled, we should get back an ndarray.
i = nditer([a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_subtype']])
+ [['readonly'], ['readonly'],
+ ['writeonly', 'allocate', 'no_subtype']])
assert_equal(type(b), type(i.operands[2]))
- assert_(type(a) != type(i.operands[2]))
- assert_equal(i.operands[2].shape, (1, 2, 2))
+ assert_(type(a) is not type(i.operands[2]))
+ assert_equal(i.operands[2].shape, (2, 2))
def test_iter_allocate_output_errors():
# Check that the iterator will throw errors for bad output allocations
@@ -2838,16 +2837,34 @@ def test_writebacks():
it = nditer(au, [],
[['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
- au = None
+ # reentering works
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ # make sure exiting the inner context manager closes the iterator
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+ assert_raises(ValueError, getattr, it, 'operands')
# do not crash if original data array is decrefed
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ del au
with it:
for x in it:
x[...] = 123
- # make sure we cannot reenter the iterand
+ # make sure we cannot reenter the closed iterator
enter = it.__enter__
assert_raises(ValueError, enter)
-def test_close():
+def test_close_equivalent():
''' using a context amanger and using nditer.close are equivalent
'''
def add_close(x, y, out=None):
@@ -2856,8 +2873,10 @@ def test_close():
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
+ ret = it.operands[2]
it.close()
- return it.operands[2]
+ return ret
+
def add_context(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
@@ -2871,6 +2890,13 @@ def test_close():
z = add_context(range(5), range(5))
assert_equal(z, range(0, 10, 2))
+def test_close_raises():
+ it = np.nditer(np.arange(3))
+ assert_equal (next(it), 0)
+ it.close()
+ assert_raises(StopIteration, next, it)
+ assert_raises(ValueError, getattr, it, 'operands')
+
def test_warn_noclose():
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 40cccd404..53486dc51 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -552,7 +552,6 @@ class TestFloatExceptions(object):
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
- @pytest.mark.xfail(reason="See ticket #2350")
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
@@ -905,7 +904,7 @@ class TestTypes(object):
fi = np.finfo(dt)
assert_(np.can_cast(fi.min, dt))
assert_(np.can_cast(fi.max, dt))
-
+
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
@@ -2201,13 +2200,16 @@ class TestLikeFuncs(object):
self.compare_array_value(dz, value, fill_value)
# Test the 'subok' parameter
- a = np.matrix([[1, 2], [3, 4]])
+ class MyNDArray(np.ndarray):
+ pass
+
+ a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = like_function(a, **fill_kwarg)
- assert_(type(b) is np.matrix)
+ assert_(type(b) is MyNDArray)
b = like_function(a, subok=False, **fill_kwarg)
- assert_(type(b) is not np.matrix)
+ assert_(type(b) is not MyNDArray)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py
index 746ad0e4b..433208748 100644
--- a/numpy/core/tests/test_print.py
+++ b/numpy/core/tests/test_print.py
@@ -4,7 +4,7 @@ import sys
import numpy as np
from numpy.testing import assert_, assert_equal, SkipTest
-from ._locales import CommaDecimalPointLocale
+from numpy.core.tests._locales import CommaDecimalPointLocale
if sys.version_info[0] >= 3:
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index b3cb3e610..d6dcaa982 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -2325,13 +2325,10 @@ class TestRegression(object):
def test_void_item_memview(self):
va = np.zeros(10, 'V4')
- # for now, there is just a futurewarning
- assert_warns(FutureWarning, va[:1].item)
- # in the future, test we got a bytes copy:
- #x = va[:1].item()
- #va[0] = b'\xff\xff\xff\xff'
- #del va
- #assert_equal(x, b'\x00\x00\x00\x00')
+ x = va[:1].item()
+ va[0] = b'\xff\xff\xff\xff'
+ del va
+ assert_equal(x, b'\x00\x00\x00\x00')
def test_structarray_title(self):
# The following used to segfault on pypy, due to NPY_TITLE_KEY
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index 94d8294f1..a20ec9f74 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -4,9 +4,10 @@
"""
from __future__ import division, absolute_import, print_function
-import tempfile
+import code, sys
+from tempfile import TemporaryFile
import numpy as np
-from numpy.testing import assert_, assert_equal
+from numpy.testing import assert_, assert_equal, suppress_warnings
class TestRealScalars(object):
@@ -53,7 +54,7 @@ class TestRealScalars(object):
# output to a "real file" (ie, not a StringIO). Make sure we don't
# inherit it.
x = np.double(0.1999999999999)
- with tempfile.TemporaryFile('r+t') as f:
+ with TemporaryFile('r+t') as f:
print(x, file=f)
f.seek(0)
output = f.read()
@@ -62,6 +63,37 @@ class TestRealScalars(object):
# precision as '0.2', but we want numpy's np.double('0.1999999999999')
# to print the unique value, '0.1999999999999'.
+ # gh-11031
+ # Only in the python2 interactive shell and when stdout is a "real"
+ # file, the output of the last command is printed to stdout without
+ # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
+ # x` are potentially different. Make sure they are the same. The only
+ # way I found to get prompt-like output is using an actual prompt from
+ # the 'code' module. Again, must use tempfile to get a "real" file.
+
+ # dummy user-input which enters one line and then ctrl-Ds.
+ def userinput():
+ yield 'np.sqrt(2)'
+ raise EOFError
+ gen = userinput()
+ input_func = lambda prompt="": next(gen)
+
+ with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
+ orig_stdout, orig_stderr = sys.stdout, sys.stderr
+ sys.stdout, sys.stderr = fo, fe
+
+ # py2 code.interact sends irrelevant internal DeprecationWarnings
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ code.interact(local={'np': np}, readfunc=input_func, banner='')
+
+ sys.stdout, sys.stderr = orig_stdout, orig_stderr
+
+ fo.seek(0)
+ capture = fo.read().strip()
+
+ assert_equal(capture, repr(np.sqrt(2)))
+
def test_dragon4(self):
# these tests are adapted from Ryan Juckett's dragon4 implementation,
# see dragon4.c for details.
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index 1d91a651e..72b3451a4 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -364,10 +364,6 @@ def test_stack():
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
- # np.matrix
- m = np.matrix([[1, 2], [3, 4]])
- assert_raises_regex(ValueError, 'shape too large to be a matrix',
- stack, [m, m])
class TestBlock(object):
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 7a276c04d..b7fda3f2e 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -5,6 +5,7 @@ import itertools
import numpy as np
import numpy.core._umath_tests as umt
+import numpy.linalg._umath_linalg as uml
import numpy.core._operand_flag_tests as opflag_tests
import numpy.core._rational_tests as _rational_tests
from numpy.testing import (
@@ -284,10 +285,16 @@ class TestUfunc(object):
def test_signature(self):
# the arguments to test_signature are: nin, nout, core_signature
# pass
- assert_equal(umt.test_signature(2, 1, "(i),(i)->()"), 1)
+ enabled, num_dims, ixs = umt.test_signature(2, 1, "(i),(i)->()")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 1, 0))
+ assert_equal(ixs, (0, 0))
- # pass. empty core signature; treat as plain ufunc (with trivial core)
- assert_equal(umt.test_signature(2, 1, "(),()->()"), 0)
+ # empty core signature; treat as plain ufunc (with trivial core)
+ enabled, num_dims, ixs = umt.test_signature(2, 1, "(),()->()")
+ assert_equal(enabled, 0)
+ assert_equal(num_dims, (0, 0, 0))
+ assert_equal(ixs, ())
# in the following calls, a ValueError should be raised because
# of error in core signature
@@ -326,7 +333,10 @@ class TestUfunc(object):
pass
# more complicated names for variables
- assert_equal(umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)"), 1)
+ enabled, num_dims, ixs = umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 1, 1))
+ assert_equal(ixs, (0, 1, 2, 3))
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
@@ -494,6 +504,17 @@ class TestUfunc(object):
d += d
assert_almost_equal(d, 2. + 2j)
+ def test_sum_initial(self):
+ # Integer, single axis
+ assert_equal(np.sum([3], initial=2), 5)
+
+ # Floating point
+ assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
+
+ # Multiple non-adjacent axes
+ assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
+ [12, 12, 12])
+
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
@@ -600,49 +621,49 @@ class TestUfunc(object):
def test_axes_argument(self):
# inner1d signature: '(i),(i)->()'
- in1d = umt.inner1d
+ inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
# basic tests on inputs (outputs tested below with matrix_multiply).
- c = in1d(a, b)
+ c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
# default
- c = in1d(a, b, axes=[(-1,), (-1,), ()])
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()])
assert_array_equal(c, (a * b).sum(-1))
# integers ok for single axis.
- c = in1d(a, b, axes=[-1, -1, ()])
+ c = inner1d(a, b, axes=[-1, -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# mix fine
- c = in1d(a, b, axes=[(-1,), -1, ()])
+ c = inner1d(a, b, axes=[(-1,), -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# can omit last axis.
- c = in1d(a, b, axes=[-1, -1])
+ c = inner1d(a, b, axes=[-1, -1])
assert_array_equal(c, (a * b).sum(-1))
# can pass in other types of integer (with __index__ protocol)
- c = in1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
+ c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
assert_array_equal(c, (a * b).sum(-1))
# swap some axes
- c = in1d(a, b, axes=[0, 0])
+ c = inner1d(a, b, axes=[0, 0])
assert_array_equal(c, (a * b).sum(0))
- c = in1d(a, b, axes=[0, 2])
+ c = inner1d(a, b, axes=[0, 2])
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
# Check errors for improperly constructed axes arguments.
# should have list.
- assert_raises(TypeError, in1d, a, b, axes=-1)
+ assert_raises(TypeError, inner1d, a, b, axes=-1)
# needs enough elements
- assert_raises(ValueError, in1d, a, b, axes=[-1])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1])
# should pass in indices.
- assert_raises(TypeError, in1d, a, b, axes=[-1.0, -1.0])
- assert_raises(TypeError, in1d, a, b, axes=[(-1.0,), -1])
- assert_raises(TypeError, in1d, a, b, axes=[None, 1])
+ assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0])
+ assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1])
+ assert_raises(TypeError, inner1d, a, b, axes=[None, 1])
# cannot pass an index unless there is only one dimension
# (output is wrong in this case)
- assert_raises(TypeError, in1d, a, b, axes=[-1, -1, -1])
+ assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1])
# or pass in generally the wrong number of axes
- assert_raises(ValueError, in1d, a, b, axes=[-1, -1, (-1,)])
- assert_raises(ValueError, in1d, a, b, axes=[-1, (-2, -1), ()])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()])
# axes need to have same length.
- assert_raises(ValueError, in1d, a, b, axes=[0, 1])
+ assert_raises(ValueError, inner1d, a, b, axes=[0, 1])
# matrix_multiply signature: '(m,n),(n,p)->(m,p)'
mm = umt.matrix_multiply
@@ -697,6 +718,73 @@ class TestUfunc(object):
assert_raises(ValueError, mm, z[1], z, axes=[0, 1])
assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1])
+ def test_keepdims_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ out = np.zeros_like(c)
+ d = inner1d(a, b, keepdims=True, out=out)
+ assert_(d is out)
+ assert_array_equal(d, c)
+ # Now combined with axes.
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ c = inner1d(a, b, axes=[0, 0], keepdims=False)
+ assert_array_equal(c, (a * b).sum(0))
+ c = inner1d(a, b, axes=[0, 0, 0], keepdims=True)
+ assert_array_equal(c, (a * b).sum(0, keepdims=True))
+ c = inner1d(a, b, axes=[0, 2], keepdims=False)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
+ c = inner1d(a, b, axes=[0, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 0], keepdims=True)
+ assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True))
+ # Hardly useful, but should work.
+ c = inner1d(a, b, axes=[0, 2, 1], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1))
+ .sum(1, keepdims=True))
+ # Check with two core dimensions.
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected = uml.det(a)
+ c = uml.det(a, keepdims=False)
+ assert_array_equal(c, expected)
+ c = uml.det(a, keepdims=True)
+ assert_array_equal(c, expected[:, np.newaxis, np.newaxis])
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected_s, expected_l = uml.slogdet(a)
+ cs, cl = uml.slogdet(a, keepdims=False)
+ assert_array_equal(cs, expected_s)
+ assert_array_equal(cl, expected_l)
+ cs, cl = uml.slogdet(a, keepdims=True)
+ assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis])
+ assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis])
+ # Sanity check on innerwt.
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w, keepdims=True),
+ np.sum(a * b * w, axis=-1, keepdims=True))
+ # Check errors.
+ # Not a boolean
+ assert_raises(TypeError, inner1d, a, b, keepdims='true')
+ # 1 core dimension only.
+ mm = umt.matrix_multiply
+ assert_raises(TypeError, mm, a, b, keepdims=True)
+ assert_raises(TypeError, mm, a, b, keepdims=False)
+
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
@@ -844,6 +932,7 @@ class TestUfunc(object):
assert_equal(np.min(a), False)
assert_equal(np.array([[1]], dtype=object).sum(), 1)
assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
+ assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
def test_object_array_accumulate_inplace(self):
# Checks that in-place accumulates work, see also gh-7402
@@ -880,13 +969,6 @@ class TestUfunc(object):
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
assert_array_equal(arr, out)
- def test_object_scalar_multiply(self):
- # Tickets #2469 and #4482
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.multiply(arr, 3), desired)
- assert_equal(np.multiply(3, arr), desired)
-
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
@@ -987,7 +1069,7 @@ class TestUfunc(object):
assert_equal(np.sqrt(a, where=m), [1])
def check_identityless_reduction(self, a):
- # np.minimum.reduce is a identityless reduction
+ # np.minimum.reduce is an identityless reduction
# Verify that it sees the zero at various positions
a[...] = 1
@@ -1056,6 +1138,35 @@ class TestUfunc(object):
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
+ def test_initial_reduction(self):
+ # np.minimum.reduce is an identityless reduction
+
+ # For cases like np.maximum(np.abs(...), initial=0)
+ # More generally, a supremum over non-negative numbers.
+ assert_equal(np.maximum.reduce([], initial=0), 0)
+
+ # For cases like reduction of an empty array over the reals.
+ assert_equal(np.minimum.reduce([], initial=np.inf), np.inf)
+ assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf)
+
+ # Random tests
+ assert_equal(np.minimum.reduce([5], initial=4), 4)
+ assert_equal(np.maximum.reduce([4], initial=5), 5)
+ assert_equal(np.maximum.reduce([5], initial=4), 5)
+ assert_equal(np.minimum.reduce([4], initial=5), 4)
+
+ # Check initial=None raises ValueError for both types of ufunc reductions
+ assert_raises(ValueError, np.minimum.reduce, [], initial=None)
+ assert_raises(ValueError, np.add.reduce, [], initial=None)
+
+ # Check that np._NoValue gives default behavior.
+ assert_equal(np.add.reduce([], initial=np._NoValue), 0)
+
+ # Check that initial kwarg behaves as intended for dtype=object
+ a = np.array([10], dtype=object)
+ res = np.add.reduce(a, initial=5)
+ assert_equal(res, 15)
+
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
@@ -1407,15 +1518,18 @@ class TestUfunc(object):
assert_equal(f(d, 0, None, None), r)
assert_equal(f(d, 0, None, None, keepdims=False), r)
assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
+ assert_equal(f(d, 0, None, None, False, 0), r)
+ assert_equal(f(d, 0, None, None, False, initial=0), r)
# multiple keywords
assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False), r)
+ assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0), r)
# too little
assert_raises(TypeError, f)
# too much
- assert_raises(TypeError, f, d, 0, None, None, False, 1)
+ assert_raises(TypeError, f, d, 0, None, None, False, 0, 1)
# invalid axis
assert_raises(TypeError, f, d, "invalid")
assert_raises(TypeError, f, d, axis="invalid")
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 9da6abd4b..2a42b1ed1 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1328,6 +1328,17 @@ class TestMinMax(object):
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
+ def test_reduce_warns(self):
+ # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
+ # and put it before the call to an intrisic function that causes
+ # invalid status to be set. Also make sure warnings are emitted
+ for n in (2, 4, 8, 16, 32):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ for r in np.diagflat([np.nan] * n):
+ assert_equal(np.min(r), np.nan)
+ assert_equal(len(sup.log), n)
+
class TestAbsoluteNegative(object):
def test_abs_neg_blocked(self):
@@ -1413,6 +1424,57 @@ class TestSpecialMethods(object):
assert_equal(args[1], a)
assert_equal(i, 0)
+ def test_wrap_and_prepare_out(self):
+ # Calling convention for out should not affect how special methods are
+ # called
+
+ class StoreArrayPrepareWrap(np.ndarray):
+ _wrap_args = None
+ _prepare_args = None
+ def __new__(cls):
+ return np.empty(()).view(cls)
+ def __array_wrap__(self, obj, context):
+ self._wrap_args = context[1]
+ return obj
+ def __array_prepare__(self, obj, context):
+ self._prepare_args = context[1]
+ return obj
+ @property
+ def args(self):
+ # We need to ensure these are fetched at the same time, before
+ # any other ufuncs are calld by the assertions
+ return (self._prepare_args, self._wrap_args)
+ def __repr__(self):
+ return "a" # for short test output
+
+ def do_test(f_call, f_expected):
+ a = StoreArrayPrepareWrap()
+ f_call(a)
+ p, w = a.args
+ expected = f_expected(a)
+ try:
+ assert_equal(p, expected)
+ assert_equal(w, expected)
+ except AssertionError as e:
+ # assert_equal produces truly useless error messages
+ raise AssertionError("\n".join([
+ "Bad arguments passed in ufunc call",
+ " expected: {}".format(expected),
+ " __array_prepare__ got: {}".format(p),
+ " __array_wrap__ got: {}".format(w)
+ ]))
+
+ # method not on the out argument
+ do_test(lambda a: np.add(a, 0), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0))
+
+ # method on the out argument
+ do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a))
+
def test_wrap_with_iterable(self):
# test fix for bug #1026:
@@ -1613,13 +1675,16 @@ class TestSpecialMethods(object):
assert_equal(ncu.maximum(a, C()), 0)
def test_ufunc_override(self):
-
+ # check override works even with instance with high priority.
class A(object):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return self, func, method, inputs, kwargs
+ class MyNDArray(np.ndarray):
+ __array_priority__ = 100
+
a = A()
- b = np.matrix([1])
+ b = np.array([1]).view(MyNDArray)
res0 = np.multiply(a, b)
res1 = np.multiply(b, b, out=a)
@@ -1759,7 +1824,7 @@ class TestSpecialMethods(object):
# reduce, kwargs
res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
- keepdims='keep0')
+ keepdims='keep0', initial='init0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
@@ -1767,7 +1832,8 @@ class TestSpecialMethods(object):
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'keepdims': 'keep0',
- 'axis': 'axis0'})
+ 'axis': 'axis0',
+ 'initial': 'init0'})
# reduce, output equal to None removed, but not other explicit ones,
# even if they are at their default value.
@@ -1777,6 +1843,14 @@ class TestSpecialMethods(object):
assert_equal(res[4], {'axis': 0, 'keepdims': True})
res = np.multiply.reduce(a, None, out=(None,), dtype=None)
assert_equal(res[4], {'axis': None, 'dtype': None})
+ res = np.multiply.reduce(a, 0, None, None, False, 2)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': 2})
+ # np._NoValue ignored for initial.
+ res = np.multiply.reduce(a, 0, None, None, False, np._NoValue)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
+ # None kept for initial.
+ res = np.multiply.reduce(a, 0, None, None, False, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': None})
# reduce, wrong args
assert_raises(ValueError, np.multiply.reduce, a, out=())
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index cb7414a04..41f0b1f61 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -256,6 +256,11 @@ def minrelpath(path):
return ''
return os.sep.join(l)
+def sorted_glob(fileglob):
+ """sorts output of python glob for http://bugs.python.org/issue30461
+ to allow extensions to have reproducible build results"""
+ return sorted(glob.glob(fileglob))
+
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
@@ -263,8 +268,8 @@ def _fix_paths(paths, local_path, include_non_existing):
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
- p = glob.glob(n)
- p2 = glob.glob(njoin(local_path, n))
+ p = sorted_glob(n)
+ p2 = sorted_glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
@@ -528,7 +533,7 @@ def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
- head = glob.glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
+ head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
@@ -882,7 +887,7 @@ class Configuration(object):
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
- dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)]
+ dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 2a3ff2e52..65d7de316 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -385,6 +385,7 @@ def get_info(name, notfound_action=0):
'blis': blis_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
+ 'accelerate': accelerate_info, # use blas_opt instead
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
@@ -1551,39 +1552,10 @@ class lapack_opt_info(system_info):
if not atlas_info:
atlas_info = get_info('atlas')
- if sys.platform == 'darwin' \
- and not os.getenv('_PYTHON_HOST_PLATFORM', None) \
- and not (atlas_info or openblas_info or
- lapack_mkl_info):
- # Use the system lapack from Accelerate or vecLib under OSX
- args = []
- link_args = []
- if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
- 'x86_64' in get_platform() or \
- 'i386' in platform.platform():
- intel = 1
- else:
- intel = 0
- if os.path.exists('/System/Library/Frameworks'
- '/Accelerate.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
- elif os.path.exists('/System/Library/Frameworks'
- '/vecLib.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
- if args:
- self.set_info(extra_compile_args=args,
- extra_link_args=link_args,
- define_macros=[('NO_ATLAS_INFO', 3),
- ('HAVE_CBLAS', None)])
- return
+ accelerate_info = get_info('accelerate')
+ if accelerate_info and not atlas_info:
+ self.set_info(**accelerate_info)
+ return
need_lapack = 0
need_blas = 0
@@ -1659,43 +1631,10 @@ class blas_opt_info(system_info):
if not atlas_info:
atlas_info = get_info('atlas_blas')
- if sys.platform == 'darwin' \
- and not os.getenv('_PYTHON_HOST_PLATFORM', None) \
- and not (atlas_info or openblas_info or
- blas_mkl_info or blis_info):
- # Use the system BLAS from Accelerate or vecLib under OSX
- args = []
- link_args = []
- if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
- 'x86_64' in get_platform() or \
- 'i386' in platform.platform():
- intel = 1
- else:
- intel = 0
- if os.path.exists('/System/Library/Frameworks'
- '/Accelerate.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- args.extend([
- '-I/System/Library/Frameworks/vecLib.framework/Headers'])
- link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
- elif os.path.exists('/System/Library/Frameworks'
- '/vecLib.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- args.extend([
- '-I/System/Library/Frameworks/vecLib.framework/Headers'])
- link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
- if args:
- self.set_info(extra_compile_args=args,
- extra_link_args=link_args,
- define_macros=[('NO_ATLAS_INFO', 3),
- ('HAVE_CBLAS', None)])
- return
+ accelerate_info = get_info('accelerate')
+ if accelerate_info and not atlas_info:
+ self.set_info(**accelerate_info)
+ return
need_blas = 0
info = {}
@@ -1939,6 +1878,58 @@ class blis_info(blas_info):
include_dirs=incl_dirs)
self.set_info(**info)
+class accelerate_info(system_info):
+ section = 'accelerate'
+ notfounderror = BlasNotFoundError
+
+ def calc_info(self):
+ # Make possible to enable/disable from config file/env var
+ libraries = os.environ.get('ACCELERATE')
+ if libraries:
+ libraries = [libraries]
+ else:
+ libraries = self.get_libs('libraries', ['accelerate', 'veclib'])
+ libraries = [lib.strip().lower() for lib in libraries]
+
+ if (sys.platform == 'darwin' and
+ not os.getenv('_PYTHON_HOST_PLATFORM', None)):
+ # Use the system BLAS from Accelerate or vecLib under OSX
+ args = []
+ link_args = []
+ if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
+ 'x86_64' in get_platform() or \
+ 'i386' in platform.platform():
+ intel = 1
+ else:
+ intel = 0
+ if (os.path.exists('/System/Library/Frameworks'
+ '/Accelerate.framework/') and
+ 'accelerate' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ else:
+ args.extend(['-faltivec'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
+ elif (os.path.exists('/System/Library/Frameworks'
+ '/vecLib.framework/') and
+ 'veclib' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ else:
+ args.extend(['-faltivec'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
+
+ if args:
+ self.set_info(extra_compile_args=args,
+ extra_link_args=link_args,
+ define_macros=[('NO_ATLAS_INFO', 3),
+ ('HAVE_CBLAS', None)])
+
+ return
class blas_src_info(system_info):
section = 'blas_src'
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index dd2484eb4..78b06f066 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -539,7 +539,7 @@ void f2py_report_on_exit(int exit_flag,void *name) {
fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n",
cb_passed_counter,cb_passed_time);
- fprintf(stderr,"(e) wrapped (Fortran/C) functions (acctual) : %8d msec\n\n",
+ fprintf(stderr,"(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n",
passed_call_time-cb_passed_call_time-cb_passed_time);
fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n");
fprintf(stderr,"Exit status: %d\n",exit_flag);
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index daaa68d06..e9ca9de4d 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -74,6 +74,35 @@ def _round_ifneeded(arr, dtype):
arr.round(out=arr)
+def _slice_at_axis(shape, sl, axis):
+ """
+ Construct a slice tuple the length of shape, with sl at the specified axis
+ """
+ slice_tup = (slice(None),)
+ return slice_tup * axis + (sl,) + slice_tup * (len(shape) - axis - 1)
+
+
+def _slice_first(shape, n, axis):
+ """ Construct a slice tuple to take the first n elements along axis """
+ return _slice_at_axis(shape, slice(0, n), axis=axis)
+
+
+def _slice_last(shape, n, axis):
+ """ Construct a slice tuple to take the last n elements along axis """
+ dim = shape[axis] # doing this explicitly makes n=0 work
+ return _slice_at_axis(shape, slice(dim - n, dim), axis=axis)
+
+
+def _do_prepend(arr, pad_chunk, axis):
+ return np.concatenate(
+ (pad_chunk.astype(arr.dtype, copy=False), arr), axis=axis)
+
+
+def _do_append(arr, pad_chunk, axis):
+ return np.concatenate(
+ (arr, pad_chunk.astype(arr.dtype, copy=False)), axis=axis)
+
+
def _prepend_const(arr, pad_amt, val, axis=-1):
"""
Prepend constant `val` along `axis` of `arr`.
@@ -100,12 +129,7 @@ def _prepend_const(arr, pad_amt, val, axis=-1):
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- if val == 0:
- return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),
- axis=axis)
- else:
- return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),
- arr), axis=axis)
+ return _do_prepend(arr, np.full(padshape, val, dtype=arr.dtype), axis)
def _append_const(arr, pad_amt, val, axis=-1):
@@ -134,12 +158,8 @@ def _append_const(arr, pad_amt, val, axis=-1):
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- if val == 0:
- return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),
- axis=axis)
- else:
- return np.concatenate(
- (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, np.full(padshape, val, dtype=arr.dtype), axis)
+
def _prepend_edge(arr, pad_amt, axis=-1):
@@ -164,15 +184,9 @@ def _prepend_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- edge_arr = arr[edge_slice].reshape(pad_singleton)
- return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ edge_slice = _slice_first(arr.shape, 1, axis=axis)
+ edge_arr = arr[edge_slice]
+ return _do_prepend(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
def _append_edge(arr, pad_amt, axis=-1):
@@ -198,15 +212,9 @@ def _append_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- edge_arr = arr[edge_slice].reshape(pad_singleton)
- return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),
- axis=axis)
+ edge_slice = _slice_last(arr.shape, 1, axis=axis)
+ edge_arr = arr[edge_slice]
+ return _do_append(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
def _prepend_ramp(arr, pad_amt, end, axis=-1):
@@ -244,15 +252,10 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
reverse=True).astype(np.float64)
# Appropriate slicing to extract n-dimensional edge along `axis`
- edge_slice = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_first(arr.shape, 1, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract edge, reshape to original rank, and extend along `axis`
- edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+ # Extract edge, and extend along `axis`
+ edge_pad = arr[edge_slice].repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
@@ -261,7 +264,7 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
- return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)
+ return _do_prepend(arr, ramp_arr, axis)
def _append_ramp(arr, pad_amt, end, axis=-1):
@@ -299,15 +302,10 @@ def _append_ramp(arr, pad_amt, end, axis=-1):
reverse=False).astype(np.float64)
# Slice a chunk from the edge to calculate stats on
- edge_slice = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_last(arr.shape, 1, axis=axis)
- # Extract edge, reshape to original rank, and extend along `axis`
- edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+ # Extract edge, and extend along `axis`
+ edge_pad = arr[edge_slice].repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
@@ -316,7 +314,7 @@ def _append_ramp(arr, pad_amt, end, axis=-1):
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
- return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)
+ return _do_append(arr, ramp_arr, axis)
def _prepend_max(arr, pad_amt, num, axis=-1):
@@ -356,19 +354,13 @@ def _prepend_max(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- max_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ max_slice = _slice_first(arr.shape, num, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate max, reshape to add singleton dimension back
- max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate max
+ max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ return _do_prepend(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
def _append_max(arr, pad_amt, num, axis=-1):
@@ -407,24 +399,16 @@ def _append_max(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- max_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ max_slice = _slice_last(arr.shape, num, axis=axis)
else:
max_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate max, reshape to add singleton dimension back
- max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate max
+ max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),
- axis=axis)
+ return _do_append(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
def _prepend_mean(arr, pad_amt, num, axis=-1):
@@ -463,20 +447,14 @@ def _prepend_mean(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- mean_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ mean_slice = _slice_first(arr.shape, num, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate mean, reshape to add singleton dimension back
- mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)
+ # Extract slice, calculate mean
+ mean_chunk = arr[mean_slice].mean(axis, keepdims=True)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),
- arr), axis=axis)
+ return _do_prepend(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
def _append_mean(arr, pad_amt, num, axis=-1):
@@ -515,25 +493,17 @@ def _append_mean(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- mean_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ mean_slice = _slice_last(arr.shape, num, axis=axis)
else:
mean_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate mean, reshape to add singleton dimension back
- mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate mean
+ mean_chunk = arr[mean_slice].mean(axis=axis, keepdims=True)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
def _prepend_med(arr, pad_amt, num, axis=-1):
@@ -572,20 +542,14 @@ def _prepend_med(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- med_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ med_slice = _slice_first(arr.shape, num, axis=axis)
- # Extract slice, calculate median, reshape to add singleton dimension back
- med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate median
+ med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)
+ return _do_prepend(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
def _append_med(arr, pad_amt, num, axis=-1):
@@ -624,25 +588,17 @@ def _append_med(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- med_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ med_slice = _slice_last(arr.shape, num, axis=axis)
else:
med_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate median, reshape to add singleton dimension back
- med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate median
+ med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
def _prepend_min(arr, pad_amt, num, axis=-1):
@@ -682,19 +638,13 @@ def _prepend_min(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- min_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ min_slice = _slice_first(arr.shape, num, axis=axis)
- # Extract slice, calculate min, reshape to add singleton dimension back
- min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate min
+ min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ return _do_prepend(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
def _append_min(arr, pad_amt, num, axis=-1):
@@ -733,24 +683,16 @@ def _append_min(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- min_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ min_slice = _slice_last(arr.shape, num, axis=axis)
else:
min_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate min, reshape to add singleton dimension back
- min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate min
+ min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),
- axis=axis)
+ return _do_append(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
def _pad_ref(arr, pad_amt, method, axis=-1):
@@ -793,22 +735,14 @@ def _pad_ref(arr, pad_amt, method, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)
- for (i, x) in enumerate(arr.shape))
+ ref_slice = _slice_at_axis(arr.shape, slice(pad_amt[0], 0, -1), axis=axis)
ref_chunk1 = arr[ref_slice]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- ref_chunk1 = ref_chunk1.reshape(pad_singleton)
-
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice1]
ref_chunk1 = 2 * edge_chunk - ref_chunk1
del edge_chunk
@@ -818,19 +752,13 @@ def _pad_ref(arr, pad_amt, method, axis=-1):
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1] - 1
end = arr.shape[axis] - 1
- ref_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
- rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
- for (i, x) in enumerate(arr.shape))
+ ref_slice = _slice_at_axis(arr.shape, slice(start, end), axis=axis)
+ rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
ref_chunk2 = arr[ref_slice][rev_idx]
- if pad_amt[1] == 1:
- ref_chunk2 = ref_chunk2.reshape(pad_singleton)
-
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice2]
ref_chunk2 = 2 * edge_chunk - ref_chunk2
del edge_chunk
@@ -878,23 +806,14 @@ def _pad_sym(arr, pad_amt, method, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])
- for (i, x) in enumerate(arr.shape))
- rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
- for (i, x) in enumerate(arr.shape))
+ sym_slice = _slice_first(arr.shape, pad_amt[0], axis=axis)
+ rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
sym_chunk1 = arr[sym_slice][rev_idx]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- sym_chunk1 = sym_chunk1.reshape(pad_singleton)
-
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice1]
sym_chunk1 = 2 * edge_chunk - sym_chunk1
del edge_chunk
@@ -902,19 +821,12 @@ def _pad_sym(arr, pad_amt, method, axis=-1):
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
- start = arr.shape[axis] - pad_amt[1]
- end = arr.shape[axis]
- sym_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
+ sym_slice = _slice_last(arr.shape, pad_amt[1], axis=axis)
sym_chunk2 = arr[sym_slice][rev_idx]
- if pad_amt[1] == 1:
- sym_chunk2 = sym_chunk2.reshape(pad_singleton)
-
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice2]
sym_chunk2 = 2 * edge_chunk - sym_chunk2
del edge_chunk
@@ -959,29 +871,16 @@ def _pad_wrap(arr, pad_amt, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- start = arr.shape[axis] - pad_amt[0]
- end = arr.shape[axis]
- wrap_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
+ wrap_slice = _slice_last(arr.shape, pad_amt[0], axis=axis)
wrap_chunk1 = arr[wrap_slice]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)
-
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
- wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])
- for (i, x) in enumerate(arr.shape))
+ wrap_slice = _slice_first(arr.shape, pad_amt[1], axis=axis)
wrap_chunk2 = arr[wrap_slice]
- if pad_amt[1] == 1:
- wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)
-
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index e8eda297f..4d3f35183 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -298,7 +298,7 @@ def _unique1d(ar, return_index=False, return_inverse=False,
return ret
-def intersect1d(ar1, ar2, assume_unique=False):
+def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
"""
Find the intersection of two arrays.
@@ -307,15 +307,28 @@ def intersect1d(ar1, ar2, assume_unique=False):
Parameters
----------
ar1, ar2 : array_like
- Input arrays.
+ Input arrays. Will be flattened if not already 1D.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
-
+ return_indices : bool
+ If True, the indices which correspond to the intersection of the
+ two arrays are returned. The first instance of a value is used
+ if there are multiple. Default is False.
+
+ .. versionadded:: 1.15.0
+
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
+ comm1 : ndarray
+ The indices of the first occurrences of the common values in `ar1`.
+ Only provided if `return_indices` is True.
+ comm2 : ndarray
+ The indices of the first occurrences of the common values in `ar2`.
+ Only provided if `return_indices` is True.
+
See Also
--------
@@ -332,14 +345,49 @@ def intersect1d(ar1, ar2, assume_unique=False):
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
+
+ To return the indices of the values common to the input arrays
+ along with the intersected values:
+ >>> x = np.array([1, 1, 2, 3, 4])
+ >>> y = np.array([2, 1, 4, 6])
+ >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
+ >>> x_ind, y_ind
+ (array([0, 2, 4]), array([1, 0, 2]))
+ >>> xy, x[x_ind], y[y_ind]
+ (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
+
"""
if not assume_unique:
- # Might be faster than unique( intersect1d( ar1, ar2 ) )?
- ar1 = unique(ar1)
- ar2 = unique(ar2)
+ if return_indices:
+ ar1, ind1 = unique(ar1, return_index=True)
+ ar2, ind2 = unique(ar2, return_index=True)
+ else:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ else:
+ ar1 = ar1.ravel()
+ ar2 = ar2.ravel()
+
aux = np.concatenate((ar1, ar2))
- aux.sort()
- return aux[:-1][aux[1:] == aux[:-1]]
+ if return_indices:
+ aux_sort_indices = np.argsort(aux, kind='mergesort')
+ aux = aux[aux_sort_indices]
+ else:
+ aux.sort()
+
+ mask = aux[1:] == aux[:-1]
+ int1d = aux[:-1][mask]
+
+ if return_indices:
+ ar1_indices = aux_sort_indices[:-1][mask]
+ ar2_indices = aux_sort_indices[1:][mask] - ar1.size
+ if not assume_unique:
+ ar1_indices = ind1[ar1_indices]
+ ar2_indices = ind2[ar2_indices]
+
+ return int1d, ar1_indices, ar2_indices
+ else:
+ return int1d
def setxor1d(ar1, ar2, assume_unique=False):
"""
@@ -660,3 +708,4 @@ def setdiff1d(ar1, ar2, assume_unique=False):
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
+
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 363bb2101..23eac7e7d 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -1,5 +1,10 @@
"""
-Define a simple format for saving numpy arrays to disk with the full
+Binary serialization
+
+NPY format
+==========
+
+A simple format for saving numpy arrays to disk with the full
information about them.
The ``.npy`` format is the standard binary file format in NumPy for
@@ -143,8 +148,10 @@ data HEADER_LEN."
Notes
-----
-The ``.npy`` format, including reasons for creating it and a comparison of
-alternatives, is described fully in the "npy-format" NEP.
+The ``.npy`` format, including motivation for creating it and a comparison of
+alternatives, is described in the `"npy-format" NEP
+<http://www.numpy.org/neps/nep-0001-npy-format.html>`_, however details have
+evolved with time and this document is more current.
"""
from __future__ import division, absolute_import, print_function
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 099b63c40..a6e3e07d3 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -54,7 +54,8 @@ __all__ = [
'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
- 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
+ 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc',
+ 'quantile'
]
@@ -1632,9 +1633,9 @@ def disp(mesg, device=None, linefeed=True):
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
- >>> from StringIO import StringIO
+ >>> from io import StringIO
>>> buf = StringIO()
- >>> np.disp('"Display" in a file', device=buf)
+ >>> np.disp(u'"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
@@ -3427,7 +3428,7 @@ def percentile(a, q, axis=None, out=None,
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
- use when the desired quantile lies between two data points
+ use when the desired percentile lies between two data points
``i < j``:
* 'linear': ``i + (j - i) * fraction``, where ``fraction``
@@ -3463,6 +3464,7 @@ def percentile(a, q, axis=None, out=None,
mean
median : equivalent to ``percentile(..., 50)``
nanpercentile
+ quantile : equivalent to percentile, except with q in the range [0, 1].
Notes
-----
@@ -3539,6 +3541,110 @@ def percentile(a, q, axis=None, out=None,
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def quantile(a, q, axis=None, out=None,
+ overwrite_input=False, interpolation='linear', keepdims=False):
+ """
+ Compute the `q`th quantile of the data along the specified axis.
+ ..versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ q : array_like of float
+ Quantile or sequence of quantiles to compute, which must be between
+ 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The
+ default is to compute the quantile(s) along a flattened
+ version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
+ This optional parameter specifies the interpolation method to
+ use when the desired quantile lies between two data points
+ ``i < j``:
+ * linear: ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single quantile and `axis=None`, then the result
+ is a scalar. If multiple quantiles are given, first axis of
+ the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean
+ percentile : equivalent to quantile, but with q in the range [0, 100].
+ median : equivalent to ``quantile(..., 0.5)``
+ nanquantile
+
+ Notes
+ -----
+ Given a vector ``V`` of length ``N``, the ``q``-th quantile of
+ ``V`` is the value ``q`` of the way from the minimum to the
+ maximum in a sorted copy of ``V``. The values and distances of
+ the two nearest neighbors as well as the `interpolation` parameter
+ will determine the quantile if the normalized ranking does not
+ match the location of ``q`` exactly. This function is the same as
+ the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the
+ same as the maximum if ``q=1.0``.
+
+ Examples
+ --------
+ >>> a = np.array([[10, 7, 4], [3, 2, 1]])
+ >>> a
+ array([[10, 7, 4],
+ [ 3, 2, 1]])
+ >>> np.quantile(a, 0.5)
+ 3.5
+ >>> np.quantile(a, 0.5, axis=0)
+ array([[ 6.5, 4.5, 2.5]])
+ >>> np.quantile(a, 0.5, axis=1)
+ array([ 7., 2.])
+ >>> np.quantile(a, 0.5, axis=1, keepdims=True)
+ array([[ 7.],
+ [ 2.]])
+ >>> m = np.quantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.quantile(a, 0.5, axis=0, out=out)
+ array([[ 6.5, 4.5, 2.5]])
+ >>> m
+ array([[ 6.5, 4.5, 2.5]])
+ >>> b = a.copy()
+ >>> np.quantile(b, 0.5, axis=1, overwrite_input=True)
+ array([ 7., 2.])
+ >>> assert not np.all(a == b)
+ """
+ q = np.asanyarray(q)
+ if not _quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+ return _quantile_unchecked(
+ a, q, axis, out, overwrite_input, interpolation, keepdims)
+
+
def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
"""Assumes that q is in [0, 1], and is an ndarray"""
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index d2a398a0a..2922b3a86 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -877,12 +877,6 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
# bins is an integer
bins = D*[bins]
- # avoid rounding issues for comparisons when dealing with inexact types
- if np.issubdtype(sample.dtype, np.inexact):
- edge_dt = sample.dtype
- else:
- edge_dt = float
-
# normalize the range argument
if range is None:
range = (None,) * D
@@ -896,13 +890,12 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
- edges[i] = np.linspace(smin, smax, bins[i] + 1, dtype=edge_dt)
+ edges[i] = np.linspace(smin, smax, bins[i] + 1)
elif np.ndim(bins[i]) == 1:
- edges[i] = np.asarray(bins[i], edge_dt)
- # not just monotonic, due to the use of mindiff below
- if np.any(edges[i][:-1] >= edges[i][1:]):
+ edges[i] = np.asarray(bins[i])
+ if np.any(edges[i][:-1] > edges[i][1:]):
raise ValueError(
- '`bins[{}]` must be strictly increasing, when an array'
+ '`bins[{}]` must be monotonically increasing, when an array'
.format(i))
else:
raise ValueError(
@@ -911,13 +904,10 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
dedges[i] = np.diff(edges[i])
- # Handle empty input.
- if N == 0:
- return np.zeros(nbin-2), edges
-
# Compute the bin number each sample falls into.
Ncount = tuple(
- np.digitize(sample[:, i], edges[i])
+ # avoid np.digitize to work around gh-11022
+ np.searchsorted(edges[i], sample[:, i], side='right')
for i in _range(D)
)
@@ -925,16 +915,10 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
- # Rounding precision
- mindiff = dedges[i].min()
- if not np.isinf(mindiff):
- decimal = int(-np.log10(mindiff)) + 6
- # Find which points are on the rightmost edge.
- not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
- on_edge = (np.around(sample[:, i], decimal) ==
- np.around(edges[i][-1], decimal))
- # Shift these points one bin to the left.
- Ncount[i][on_edge & not_smaller_than_edge] -= 1
+ # Find which points are on the rightmost edge.
+ on_edge = (sample[:, i] == edges[i][-1])
+ # Shift these points one bin to the left.
+ Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 43fdc5627..d2139338e 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -201,7 +201,7 @@ class nd_grid(object):
slobj = [_nx.newaxis]*len(size)
for k in range(len(size)):
slobj[k] = slice(None, None)
- nn[k] = nn[k][slobj]
+ nn[k] = nn[k][tuple(slobj)]
slobj[k] = _nx.newaxis
return nn
except (IndexError, TypeError):
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index dddc0e5b8..abd2da1a2 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -16,6 +16,7 @@ Functions
- `nanvar` -- variance of non-NaN values
- `nanstd` -- standard deviation of non-NaN values
- `nanmedian` -- median of non-NaN values
+- `nanquantile` -- qth quantile of non-NaN values
- `nanpercentile` -- qth percentile of non-NaN values
"""
@@ -29,7 +30,7 @@ from numpy.lib import function_base
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
- 'nancumsum', 'nancumprod'
+ 'nancumsum', 'nancumprod', 'nanquantile'
]
@@ -1057,7 +1058,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
`a` after this function completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
- use when the desired quantile lies between two data points
+ use when the desired percentile lies between two data points
``i < j``:
* 'linear': ``i + (j - i) * fraction``, where ``fraction``
@@ -1095,6 +1096,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
nanmean
nanmedian : equivalent to ``nanpercentile(..., 50)``
percentile, median, mean
+ nanquantile : equivalent to nanpercentile, but with q in the range [0, 1].
Notes
-----
@@ -1144,6 +1146,110 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
+ interpolation='linear', keepdims=np._NoValue):
+ """
+ Compute the qth quantile of the data along the specified axis,
+ while ignoring nan values.
+ Returns the qth quantile(s) of the array elements.
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array, containing
+ nan values to be ignored
+ q : array_like of float
+ Quantile or sequence of quantiles to compute, which must be between
+ 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The
+ default is to compute the quantile(s) along a flattened
+ version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
+ This optional parameter specifies the interpolation method to
+ use when the desired quantile lies between two data points
+ ``i < j``:
+ * linear: ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single percentile and `axis=None`, then the result
+ is a scalar. If multiple quantiles are given, first axis of
+ the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ quantile
+ nanmean, nanmedian
+ nanmedian : equivalent to ``nanquantile(..., 0.5)``
+ nanpercentile : same as nanquantile, but with q in the range [0, 100].
+
+ Examples
+ --------
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
+ >>> a[0][1] = np.nan
+ >>> a
+ array([[ 10., nan, 4.],
+ [ 3., 2., 1.]])
+ >>> np.quantile(a, 0.5)
+ nan
+ >>> np.nanquantile(a, 0.5)
+ 3.5
+ >>> np.nanquantile(a, 0.5, axis=0)
+ array([ 6.5, 2., 2.5])
+ >>> np.nanquantile(a, 0.5, axis=1, keepdims=True)
+ array([[ 7.],
+ [ 2.]])
+ >>> m = np.nanquantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.nanquantile(a, 0.5, axis=0, out=out)
+ array([ 6.5, 2., 2.5])
+ >>> m
+ array([ 6.5, 2. , 2.5])
+ >>> b = a.copy()
+ >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)
+ array([ 7., 2.])
+ >>> assert not np.all(a==b)
+ """
+ a = np.asanyarray(a)
+ q = np.asanyarray(q)
+ if not function_base._quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+ return _nanquantile_unchecked(
+ a, q, axis, out, overwrite_input, interpolation, keepdims)
+
+
def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""Assumes that q is in [0, 1], and is an ndarray"""
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 36589ce82..390927601 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -480,9 +480,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
Notes
-----
- For a description of the ``.npy`` format, see the module docstring
- of `numpy.lib.format` or the NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
Examples
--------
@@ -566,9 +564,7 @@ def savez(file, *args, **kwds):
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
- description of the ``.npy`` format, see `numpy.lib.format` or the
- NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
@@ -647,9 +643,9 @@ def savez_compressed(file, *args, **kwds):
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
- in ``.npy`` format. For a description of the ``.npy`` format, see
- `numpy.lib.format` or the NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ in ``.npy`` format. For a description of the ``.npy`` format, see
+ :py:mod:`numpy.lib.format`.
+
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
@@ -796,8 +792,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
the data-type.
comments : str or sequence of str, optional
The characters or list of characters used to indicate the start of a
- comment. For backwards compatibility, byte strings will be decoded as
- 'latin1'. The default is '#'.
+ comment. None implies no comments. For backwards compatibility, byte
+ strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
@@ -864,18 +860,18 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
- >>> c = StringIO("0 1\\n2 3")
+ >>> c = StringIO(u"0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
- >>> d = StringIO("M 21 72\\nF 35 58")
+ >>> d = StringIO(u"M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
- >>> c = StringIO("1,0,2\\n3,0,4")
+ >>> c = StringIO(u"1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
@@ -941,7 +937,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
if encoding is not None:
fencoding = encoding
# we must assume local encoding
- # TOOD emit portability warning?
+ # TODO emit portability warning?
elif fencoding is None:
import locale
fencoding = locale.getpreferredencoding()
@@ -1637,7 +1633,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
Comma delimited file with mixed dtype
- >>> s = StringIO("1,1.3,abcde")
+ >>> s = StringIO(u"1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
@@ -1664,7 +1660,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
An example with fixed-width columns
- >>> s = StringIO("11.3abcde")
+ >>> s = StringIO(u"11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 41b5e2f64..078608bbb 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -113,11 +113,6 @@ def poly(seq_of_zeros):
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
- Or a square matrix object:
-
- >>> np.poly(np.matrix(P))
- array([ 1. , 0. , 0.16666667])
-
Note how in all cases the leading coefficient is always 1.
"""
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index e07caf805..f1838fee6 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -555,7 +555,7 @@ def arctanh(x):
--------
>>> np.set_printoptions(precision=4)
- >>> np.emath.arctanh(np.matrix(np.eye(2)))
+ >>> np.emath.arctanh(np.eye(2))
array([[ Inf, 0.],
[ 0., Inf]])
>>> np.emath.arctanh([1j])
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 41ef28ef3..65104115a 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -16,10 +16,235 @@ from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm
__all__ = [
'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
- 'apply_along_axis', 'kron', 'tile', 'get_array_wrap'
+ 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis',
+ 'put_along_axis'
]
+def _make_along_axis_idx(arr_shape, indices, axis):
+ # compute dimensions to iterate over
+ if not _nx.issubdtype(indices.dtype, _nx.integer):
+ raise IndexError('`indices` must be an integer array')
+ if len(arr_shape) != indices.ndim:
+ raise ValueError(
+ "`indices` and `arr` must have the same number of dimensions")
+ shape_ones = (1,) * indices.ndim
+ dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
+
+ # build a fancy index, consisting of orthogonal aranges, with the
+ # requested index inserted at the right location
+ fancy_index = []
+ for dim, n in zip(dest_dims, arr_shape):
+ if dim is None:
+ fancy_index.append(indices)
+ else:
+ ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
+ fancy_index.append(_nx.arange(n).reshape(ind_shape))
+
+ return tuple(fancy_index)
+
+
+def take_along_axis(arr, indices, axis):
+ """
+ Take values from the input array by matching 1d index and data slices.
+
+ This iterates over matching 1d slices oriented along the specified axis in
+ the index and data arrays, and uses the former to look up values in the
+ latter. These slices can be different lengths.
+
+ Functions returning an index along an axis, like `argsort` and
+ `argpartition`, produce suitable indices for this function.
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ arr: ndarray (Ni..., M, Nk...)
+ Source array
+ indices: ndarray (Ni..., J, Nk...)
+ Indices to take along each 1d slice of `arr`. This must match the
+ dimension of arr, but dimensions Ni and Nj only need to broadcast
+ against `arr`.
+ axis: int
+ The axis to take 1d slices along. If axis is None, the input array is
+ treated as if it had first been flattened to 1d, for consistency with
+ `sort` and `argsort`.
+
+ Returns
+ -------
+ out: ndarray (Ni..., J, Nk...)
+ The indexed result.
+
+ Notes
+ -----
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+ J = indices.shape[axis] # Need not equal M
+ out = np.empty(Nk + (J,) + Nk)
+
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ a_1d = a [ii + s_[:,] + kk]
+ indices_1d = indices[ii + s_[:,] + kk]
+ out_1d = out [ii + s_[:,] + kk]
+ for j in range(J):
+ out_1d[j] = a_1d[indices_1d[j]]
+
+ Equivalently, eliminating the inner loop, the last two lines would be::
+
+ out_1d[:] = a_1d[indices_1d]
+
+ See Also
+ --------
+ take : Take along an axis, using the same indices for every 1d slice
+ put_along_axis :
+ Put values into the destination array by matching 1d index and data slices
+
+ Examples
+ --------
+
+ For this sample array
+
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+ We can sort either by using sort directly, or argsort and this function
+
+ >>> np.sort(a, axis=1)
+ array([[10, 20, 30],
+ [40, 50, 60]])
+ >>> ai = np.argsort(a, axis=1); ai
+ array([[0, 2, 1],
+ [1, 2, 0]], dtype=int64)
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[10, 20, 30],
+ [40, 50, 60]])
+
+ The same works for max and min, if you expand the dimensions:
+
+ >>> np.expand_dims(np.max(a, axis=1), axis=1)
+ array([[30],
+ [60]])
+ >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai
+ array([[1],
+ [0], dtype=int64)
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[30],
+ [60]])
+
+ If we want to get the max and min at the same time, we can stack the
+ indices first
+
+ >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1)
+ >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai = np.concatenate([ai_min, ai_max], axis=axis)
+ >> ai
+ array([[0, 1],
+ [1, 0]], dtype=int64)
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[10, 30],
+ [40, 60]])
+ """
+ # normalize inputs
+ if axis is None:
+ arr = arr.flat
+ arr_shape = (len(arr),) # flatiter has no .shape
+ axis = 0
+ else:
+ axis = normalize_axis_index(axis, arr.ndim)
+ arr_shape = arr.shape
+
+ # use the fancy index
+ return arr[_make_along_axis_idx(arr_shape, indices, axis)]
+
+
+def put_along_axis(arr, indices, values, axis):
+ """
+ Put values into the destination array by matching 1d index and data slices.
+
+ This iterates over matching 1d slices oriented along the specified axis in
+ the index and data arrays, and uses the former to place values into the
+ latter. These slices can be different lengths.
+
+ Functions returning an index along an axis, like `argsort` and
+ `argpartition`, produce suitable indices for this function.
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ arr: ndarray (Ni..., M, Nk...)
+ Destination array.
+ indices: ndarray (Ni..., J, Nk...)
+ Indices to change along each 1d slice of `arr`. This must match the
+ dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast
+ against `arr`.
+ values: array_like (Ni..., J, Nk...)
+ values to insert at those indices. Its shape and dimension are
+ broadcast to match that of `indices`.
+ axis: int
+ The axis to take 1d slices along. If axis is None, the destination
+ array is treated as if a flattened 1d view had been created of it.
+
+ Notes
+ -----
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+ J = indices.shape[axis] # Need not equal M
+
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ a_1d = a [ii + s_[:,] + kk]
+ indices_1d = indices[ii + s_[:,] + kk]
+ values_1d = values [ii + s_[:,] + kk]
+ for j in range(J):
+ a_1d[indices_1d[j]] = values_1d[j]
+
+ Equivalently, eliminating the inner loop, the last two lines would be::
+
+ a_1d[indices_1d] = values_1d
+
+ See Also
+ --------
+ take_along_axis :
+ Take values from the input array by matching 1d index and data slices
+
+ Examples
+ --------
+
+ For this sample array
+
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+ We can replace the maximum values with:
+
+ >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai
+ array([[1],
+ [0]], dtype=int64)
+ >>> np.put_along_axis(a, ai, 99, axis=1)
+ >>> a
+ array([[10, 99, 20],
+ [99, 40, 50]])
+
+ """
+ # normalize inputs
+ if axis is None:
+ arr = arr.flat
+ axis = 0
+ arr_shape = (len(arr),) # flatiter has no .shape
+ else:
+ axis = normalize_axis_index(axis, arr.ndim)
+ arr_shape = arr.shape
+
+ # use the fancy index
+ arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
+
+
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 8be49ce67..8ba0370b0 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -489,6 +489,19 @@ class TestConstant(object):
)
assert_allclose(test, expected)
+ def test_check_large_integers(self):
+ uint64_max = 2 ** 64 - 1
+ arr = np.full(5, uint64_max, dtype=np.uint64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, uint64_max, dtype=np.uint64)
+ assert_array_equal(test, expected)
+
+ int64_max = 2 ** 63 - 1
+ arr = np.full(5, int64_max, dtype=np.int64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, int64_max, dtype=np.int64)
+ assert_array_equal(test, expected)
+
class TestLinearRamp(object):
def test_check_simple(self):
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index 76c36c53e..dace5ade8 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -32,7 +32,46 @@ class TestSetOps(object):
assert_array_equal(c, ed)
assert_array_equal([], intersect1d([], []))
-
+
+ def test_intersect1d_indices(self):
+ # unique inputs
+ a = np.array([1, 2, 3, 4])
+ b = np.array([2, 1, 4, 6])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ee = np.array([1, 2, 4])
+ assert_array_equal(c, ee)
+ assert_array_equal(a[i1], ee)
+ assert_array_equal(b[i2], ee)
+
+ # non-unique inputs
+ a = np.array([1, 2, 2, 3, 4, 3, 2])
+ b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ef = np.array([1, 2, 3, 4])
+ assert_array_equal(c, ef)
+ assert_array_equal(a[i1], ef)
+ assert_array_equal(b[i2], ef)
+
+ # non1d, unique inputs
+ a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
+ b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 6, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
+ # non1d, not assumed to be uniqueinputs
+ a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
+ b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
def test_setxor1d(self):
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
@@ -74,8 +113,6 @@ class TestSetOps(object):
assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8]))
assert_array_equal([7,1], ediff1d(two_elem, to_begin=7))
assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
- assert(isinstance(ediff1d(np.matrix(1)), np.matrix))
- assert(isinstance(ediff1d(np.matrix(1), to_begin=1), np.matrix))
def test_isin(self):
# the tests for in1d cover most of isin's behavior
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 6653b5ba1..4103a9eb3 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -287,9 +287,6 @@ class TestAverage(object):
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
- y6 = np.matrix(rand(5, 5))
- assert_array_equal(y6.mean(0), average(y6, 0))
-
def test_weights(self):
y = np.arange(10)
w = np.arange(10)
@@ -357,14 +354,6 @@ class TestAverage(object):
assert_equal(type(np.average(a)), subclass)
assert_equal(type(np.average(a, weights=w)), subclass)
- # also test matrices
- a = np.matrix([[1,2],[3,4]])
- w = np.matrix([[1,2],[3,4]])
-
- r = np.average(a, axis=0, weights=w)
- assert_equal(type(r), np.matrix)
- assert_equal(r, [[2.5, 10.0/3]])
-
def test_upcasting(self):
types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]
@@ -1525,9 +1514,9 @@ class TestDigitize(object):
class TestUnwrap(object):
def test_simple(self):
- # check that unwrap removes jumps greather that 2*pi
+ # check that unwrap removes jumps greater that 2*pi
assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
- # check that unwrap maintans continuity
+ # check that unwrap maintains continuity
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
@@ -1623,16 +1612,6 @@ class TestTrapz(object):
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(y, xm), r)
- def test_matrix(self):
- # Test to make sure matrices give the same answer as ndarrays
- x = np.linspace(0, 5)
- y = x * x
- r = trapz(y, x)
- mx = np.matrix(x)
- my = np.matrix(y)
- mr = trapz(my, mx)
- assert_almost_equal(mr, r)
-
class TestSinc(object):
@@ -2749,6 +2728,28 @@ class TestPercentile(object):
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
+class TestQuantile(object):
+ # most of this is already tested by TestPercentile
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.quantile(x, 0), 0.)
+ assert_equal(np.quantile(x, 1), 3.5)
+ assert_equal(np.quantile(x, 0.5), 1.75)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.quantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.quantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+
class TestMedian(object):
def test_basic(self):
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index 06daacbdc..e16ae12c2 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -253,7 +253,7 @@ class TestHistogram(object):
one_nan = np.array([0, 1, np.nan])
all_nan = np.array([np.nan, np.nan])
- # the internal commparisons with NaN give warnings
+ # the internal comparisons with NaN give warnings
sup = suppress_warnings()
sup.filter(RuntimeWarning)
with sup:
@@ -613,8 +613,6 @@ class TestHistogramdd(object):
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
assert_raises(
- ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]])
- assert_raises(
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
@@ -646,7 +644,7 @@ class TestHistogramdd(object):
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
- assert_(hist[1] == 1.)
+ assert_(hist[1] == 0.0)
x = [1.0001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
@@ -660,3 +658,40 @@ class TestHistogramdd(object):
range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
+
+ def test_equal_edges(self):
+ """ Test that adjacent entries in an edge array can be equal """
+ x = np.array([0, 1, 2])
+ y = np.array([0, 1, 2])
+ x_edges = np.array([0, 2, 2])
+ y_edges = 1
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ hist_expected = np.array([
+ [2.],
+ [1.], # x == 2 falls in the final bin
+ ])
+ assert_equal(hist, hist_expected)
+
+ def test_edge_dtype(self):
+ """ Test that if an edge array is input, its type is preserved """
+ x = np.array([0, 10, 20])
+ y = x / 10
+ x_edges = np.array([0, 5, 15, 20])
+ y_edges = x_edges / 10
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(edges[0].dtype, x_edges.dtype)
+ assert_equal(edges[1].dtype, y_edges.dtype)
+
+ def test_large_integers(self):
+ big = 2**60 # Too large to represent with a full precision float
+
+ x = np.array([0], np.int64)
+ x_edges = np.array([-1, +1], np.int64)
+ y = big + x
+ y_edges = big + x_edges
+
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(hist[0, 0], 1)
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index f934e952a..315251daa 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -6,7 +6,7 @@ from numpy.testing import (
assert_array_almost_equal, assert_raises, assert_raises_regex
)
from numpy.lib.index_tricks import (
- mgrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
+ mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
index_exp, ndindex, r_, s_, ix_
)
@@ -156,6 +156,15 @@ class TestGrid(object):
assert_array_almost_equal(d[1, :, 1] - d[1, :, 0],
0.2*np.ones(20, 'd'), 11)
+ def test_sparse(self):
+ grid_full = mgrid[-1:1:10j, -2:2:10j]
+ grid_sparse = ogrid[-1:1:10j, -2:2:10j]
+
+ # sparse grids can be made dense by broadcasting
+ grid_broadcast = np.broadcast_arrays(*grid_sparse)
+ for f, b in zip(grid_full, grid_broadcast):
+ assert_equal(f, b)
+
class TestConcatenator(object):
def test_1d(self):
@@ -184,37 +193,6 @@ class TestConcatenator(object):
assert_array_equal(d[:5, :], b)
assert_array_equal(d[5:, :], c)
- def test_matrix(self):
- a = [1, 2]
- b = [3, 4]
-
- ab_r = np.r_['r', a, b]
- ab_c = np.r_['c', a, b]
-
- assert_equal(type(ab_r), np.matrix)
- assert_equal(type(ab_c), np.matrix)
-
- assert_equal(np.array(ab_r), [[1,2,3,4]])
- assert_equal(np.array(ab_c), [[1],[2],[3],[4]])
-
- assert_raises(ValueError, lambda: np.r_['rc', a, b])
-
- def test_matrix_scalar(self):
- r = np.r_['r', [1, 2], 3]
- assert_equal(type(r), np.matrix)
- assert_equal(np.array(r), [[1,2,3]])
-
- def test_matrix_builder(self):
- a = np.array([1])
- b = np.array([2])
- c = np.array([3])
- d = np.array([4])
- actual = np.r_['a, b; c, d']
- expected = np.bmat([[a, b], [c, d]])
-
- assert_equal(actual, expected)
- assert_equal(type(actual), type(expected))
-
def test_0d(self):
assert_equal(r_[0, np.array(1), 2], [0, 1, 2])
assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3])
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 0ce44f28b..f58c9e33d 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -937,7 +937,7 @@ class TestLoadTxt(LoadTxtBase):
assert_equal(res, tgt)
def test_complex_misformatted(self):
- # test for backward compatability
+ # test for backward compatibility
# some complex formats used to generate x+-yj
a = np.zeros((2, 2), dtype=np.complex128)
re = np.pi
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index 1f403f7b8..504372faf 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -113,42 +113,46 @@ class TestNanFunctions_MinMax(object):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
+
# check that rows of nan are dealt with for subclasses (#4628)
- mat[1] = np.nan
+ mine[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
- and not np.isnan(res[2, 0]))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(np.isnan(res[1]) and not np.isnan(res[0])
+ and not np.isnan(res[2]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine)
+ assert_(res.shape == ())
assert_(res != np.nan)
assert_(len(w) == 0)
@@ -209,19 +213,22 @@ class TestNanFunctions_ArgminArgmax(object):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
class TestNanFunctions_IntTypes(object):
@@ -381,19 +388,27 @@ class SharedNanFunctionsTestsMixin(object):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ array = np.eye(3)
+ mine = array.view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ expected_shape = f(array, axis=0).shape
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array, axis=1).shape
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array).shape
+ res = f(mine)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
@@ -481,18 +496,6 @@ class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):
res = f(d, axis=axis)
assert_equal(res.shape, (3, 5, 7, 11))
- def test_matrices(self):
- # Check that it works and that type and
- # shape are preserved
- mat = np.matrix(np.eye(3))
- for f in self.nanfuncs:
- for axis in np.arange(2):
- res = f(mat, axis=axis)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 3))
- res = f(mat)
- assert_(res.shape == (1, 3*3))
-
def test_result_values(self):
for axis in (-2, -1, 0, 1, None):
tgt = np.cumprod(_ndat_ones, axis=axis)
@@ -886,3 +889,39 @@ class TestNanFunctions_Percentile(object):
megamat = np.ones((3, 4, 5, 6))
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
+
+
+class TestNanFunctions_Quantile(object):
+ # most of this is already tested by TestPercentile
+
+ def test_regression(self):
+ ar = np.arange(24).reshape(2, 3, 4).astype(float)
+ ar[0][1] = np.nan
+
+ assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=0),
+ np.nanpercentile(ar, q=50, axis=0))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=1),
+ np.nanpercentile(ar, q=50, axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.5], axis=1),
+ np.nanpercentile(ar, q=[50], axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1),
+ np.nanpercentile(ar, q=[25, 50, 75], axis=1))
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.nanquantile(x, 0), 0.)
+ assert_equal(np.nanquantile(x, 1), 3.5)
+ assert_equal(np.nanquantile(x, 0.5), 1.75)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.nanquantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.nanquantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 080fd066d..c95894f94 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -2,16 +2,106 @@ from __future__ import division, absolute_import, print_function
import numpy as np
import warnings
+import functools
from numpy.lib.shape_base import (
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
- vsplit, dstack, column_stack, kron, tile, expand_dims,
+ vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis,
+ put_along_axis
)
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns
)
+def _add_keepdims(func):
+ """ hack in keepdims behavior into a function taking an axis """
+ @functools.wraps(func)
+ def wrapped(a, axis, **kwargs):
+ res = func(a, axis=axis, **kwargs)
+ if axis is None:
+ axis = 0 # res is now a scalar, so we can insert this anywhere
+ return np.expand_dims(res, axis=axis)
+ return wrapped
+
+
+class TestTakeAlongAxis(object):
+ def test_argequivalent(self):
+ """ Test it translates from arg<func> to <func> """
+ from numpy.random import rand
+ a = rand(3, 4, 5)
+
+ funcs = [
+ (np.sort, np.argsort, dict()),
+ (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()),
+ (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()),
+ (np.partition, np.argpartition, dict(kth=2)),
+ ]
+
+ for func, argfunc, kwargs in funcs:
+ for axis in list(range(a.ndim)) + [None]:
+ a_func = func(a, axis=axis, **kwargs)
+ ai_func = argfunc(a, axis=axis, **kwargs)
+ assert_equal(a_func, take_along_axis(a, ai_func, axis=axis))
+
+ def test_invalid(self):
+ """ Test it errors when indices has too few dimensions """
+ a = np.ones((10, 10))
+ ai = np.ones((10, 2), dtype=np.intp)
+
+ # sanity check
+ take_along_axis(a, ai, axis=1)
+
+ # not enough indices
+ assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)
+ # bool arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)
+ # float arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)
+ # invalid axis
+ assert_raises(np.AxisError, take_along_axis, a, ai, axis=10)
+
+ def test_empty(self):
+ """ Test everything is ok with empty results, even with inserted dims """
+ a = np.ones((3, 4, 5))
+ ai = np.ones((3, 0, 5), dtype=np.intp)
+
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, ai.shape)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.ones((1, 2, 5), dtype=np.intp)
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, (3, 2, 5))
+
+
+class TestPutAlongAxis(object):
+ def test_replace_max(self):
+ a_base = np.array([[10, 30, 20], [60, 40, 50]])
+
+ for axis in list(range(a_base.ndim)) + [None]:
+ # we mutate this in the loop
+ a = a_base.copy()
+
+ # replace the max with a small value
+ i_max = _add_keepdims(np.argmax)(a, axis=axis)
+ put_along_axis(a, i_max, -99, axis=axis)
+
+ # find the new minimum, which should max
+ i_min = _add_keepdims(np.argmin)(a, axis=axis)
+
+ assert_equal(i_min, i_max)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4
+ put_along_axis(a, ai, 20, axis=1)
+ assert_equal(take_along_axis(a, ai, axis=1), 20)
+
+
class TestApplyAlongAxis(object):
def test_simple(self):
a = np.ones((20, 10), 'd')
@@ -29,19 +119,21 @@ class TestApplyAlongAxis(object):
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
def test_preserve_subclass(self):
- # this test is particularly malicious because matrix
- # refuses to become 1d
def double(row):
return row * 2
- m = np.matrix([[0, 1], [2, 3]])
- expected = np.matrix([[0, 2], [4, 6]])
+
+ class MyNDArray(np.ndarray):
+ pass
+
+ m = np.array([[0, 1], [2, 3]]).view(MyNDArray)
+ expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)
result = apply_along_axis(double, 0, m)
- assert_(isinstance(result, np.matrix))
+ assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
result = apply_along_axis(double, 1, m)
- assert_(isinstance(result, np.matrix))
+ assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
def test_subclass(self):
@@ -79,7 +171,7 @@ class TestApplyAlongAxis(object):
def test_axis_insertion(self, cls=np.ndarray):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
return (x[::-1] * x[1:,None]).view(cls)
@@ -123,7 +215,7 @@ class TestApplyAlongAxis(object):
def test_axis_insertion_ma(self):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
res = x[::-1] * x[1:,None]
return np.ma.masked_where(res%5==0, res)
@@ -492,16 +584,10 @@ class TestSqueeze(object):
class TestKron(object):
def test_return_type(self):
- a = np.ones([2, 2])
- m = np.asmatrix(a)
- assert_equal(type(kron(a, a)), np.ndarray)
- assert_equal(type(kron(m, m)), np.matrix)
- assert_equal(type(kron(a, m)), np.matrix)
- assert_equal(type(kron(m, a)), np.matrix)
-
class myarray(np.ndarray):
__array_priority__ = 0.0
+ a = np.ones([2, 2])
ma = myarray(a.shape, a.dtype, a.data)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(ma, ma)), myarray)
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 402c18850..cca316e9a 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -650,7 +650,7 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
N = 1
if N != 1 and N != 2:
- xedges = yedges = asarray(bins, float)
+ xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 5ee230f92..98af0733b 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -16,20 +16,20 @@ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
+import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
- csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
- add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
- finfo, errstate, geterrobj, longdouble, moveaxis, amin, amax, product, abs,
- broadcast, atleast_2d, intp, asanyarray, object_, ones, matmul,
- swapaxes, divide, count_nonzero, ndarray, isnan
+ csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
+ add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
+ finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
+ atleast_2d, intp, asanyarray, object_, matmul,
+ swapaxes, divide, count_nonzero, isnan
)
from numpy.core.multiarray import normalize_axis_index
-from numpy.lib import triu, asfarray
+from numpy.lib.twodim_base import triu, eye
from numpy.linalg import lapack_lite, _umath_linalg
-from numpy.matrixlib.defmatrix import matrix_power
# For Python2/3 compatibility
_N = b'N'
@@ -210,7 +210,8 @@ def _assertSquareness(*arrays):
def _assertNdSquareness(*arrays):
for a in arrays:
- if max(a.shape[-2:]) != min(a.shape[-2:]):
+ m, n = a.shape[-2:]
+ if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
@@ -532,6 +533,109 @@ def inv(a):
return wrap(ainv.astype(result_t, copy=False))
+def matrix_power(a, n):
+ """
+ Raise a square matrix to the (integer) power `n`.
+
+ For positive integers `n`, the power is computed by repeated matrix
+ squarings and matrix multiplications. If ``n == 0``, the identity matrix
+ of the same shape as M is returned. If ``n < 0``, the inverse
+ is computed and then raised to the ``abs(n)``.
+
+ Parameters
+ ----------
+ a : (..., M, M) array_like
+ Matrix to be "powered."
+ n : int
+ The exponent can be any integer or long integer, positive,
+ negative, or zero.
+
+ Returns
+ -------
+ a**n : (..., M, M) ndarray or matrix object
+ The return value is the same shape and type as `M`;
+ if the exponent is positive or zero then the type of the
+ elements is the same as those of `M`. If the exponent is
+ negative the elements are floating-point.
+
+ Raises
+ ------
+ LinAlgError
+ For matrices that are not square or that (for negative powers) cannot
+ be inverted numerically.
+
+ Examples
+ --------
+ >>> from numpy.linalg import matrix_power
+ >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
+ >>> matrix_power(i, 3) # should = -i
+ array([[ 0, -1],
+ [ 1, 0]])
+ >>> matrix_power(i, 0)
+ array([[1, 0],
+ [0, 1]])
+ >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
+ array([[ 0., 1.],
+ [-1., 0.]])
+
+ Somewhat more sophisticated example
+
+ >>> q = np.zeros((4, 4))
+ >>> q[0:2, 0:2] = -i
+ >>> q[2:4, 2:4] = i
+ >>> q # one of the three quaternion units not equal to 1
+ array([[ 0., -1., 0., 0.],
+ [ 1., 0., 0., 0.],
+ [ 0., 0., 0., 1.],
+ [ 0., 0., -1., 0.]])
+ >>> matrix_power(q, 2) # = -np.eye(4)
+ array([[-1., 0., 0., 0.],
+ [ 0., -1., 0., 0.],
+ [ 0., 0., -1., 0.],
+ [ 0., 0., 0., -1.]])
+
+ """
+ a = asanyarray(a)
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
+
+ try:
+ n = operator.index(n)
+ except TypeError:
+ raise TypeError("exponent must be an integer")
+
+ if n == 0:
+ a = empty_like(a)
+ a[...] = eye(a.shape[-2], dtype=a.dtype)
+ return a
+
+ elif n < 0:
+ a = inv(a)
+ n = abs(n)
+
+ # short-cuts.
+ if n == 1:
+ return a
+
+ elif n == 2:
+ return matmul(a, a)
+
+ elif n == 3:
+ return matmul(matmul(a, a), a)
+
+ # Use binary decomposition to reduce the number of matrix multiplications.
+ # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
+ # increasing powers of 2, and multiply into the result as needed.
+ z = result = None
+ while n > 0:
+ z = a if z is None else matmul(z, z)
+ n, bit = divmod(n, 2)
+ if bit:
+ result = z if result is None else matmul(result, z)
+
+ return result
+
+
# Cholesky decomposition
def cholesky(a):
@@ -1429,8 +1533,7 @@ def svd(a, full_matrices=True, compute_uv=True):
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
- m = a.shape[-2]
- n = a.shape[-1]
+ m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
if m < n:
@@ -1750,7 +1853,8 @@ def pinv(a, rcond=1e-15 ):
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _isEmpty2d(a):
- res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
+ m, n = a.shape[-2:]
+ res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False)
@@ -2007,10 +2111,9 @@ def lstsq(a, b, rcond="warn"):
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
- m = a.shape[0]
- n = a.shape[1]
- n_rhs = b.shape[1]
- if m != b.shape[0]:
+ m, n = a.shape[-2:]
+ m2, n_rhs = b.shape[-2:]
+ if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 4a87330c7..87dfe988a 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -7,11 +7,12 @@ import os
import sys
import itertools
import traceback
-import warnings
+import textwrap
+import subprocess
import pytest
import numpy as np
-from numpy import array, single, double, csingle, cdouble, dot, identity
+from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy import multiply, atleast_2d, inf, asarray, matrix
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
@@ -22,12 +23,11 @@ from numpy.testing import (
)
-def ifthen(a, b):
- return not a or b
-
-
-def imply(a, b):
- return not a or b
+def consistent_subclass(out, in_):
+ # For ndarray subclass input, our output should have the same subclass
+ # (non-ndarray input gets converted to ndarray).
+ return type(out) is (type(in_) if isinstance(in_, np.ndarray)
+ else np.ndarray)
old_assert_almost_equal = assert_almost_equal
@@ -65,6 +65,7 @@ all_tags = {
'generalized', 'size-0', 'strided' # optional additions
}
+
class LinalgCase(object):
def __init__(self, name, a, b, tags=set()):
"""
@@ -86,6 +87,7 @@ class LinalgCase(object):
def __repr__(self):
return "<LinalgCase: %s>" % (self.name,)
+
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
@@ -129,10 +131,6 @@ CASES += apply_tag('square', [
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
- LinalgCase("0x0_matrix",
- np.empty((0, 0), dtype=double).view(np.matrix),
- np.empty((0, 1), dtype=double).view(np.matrix),
- tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
@@ -142,12 +140,6 @@ CASES += apply_tag('square', [
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
- LinalgCase("matrix_b_only",
- array([[1., 2.], [3., 4.]]),
- matrix([2., 1.]).T),
- LinalgCase("matrix_a_and_b",
- matrix([[1., 2.], [3., 4.]]),
- matrix([2., 1.]).T),
])
# non-square test-cases
@@ -231,9 +223,6 @@ CASES += apply_tag('hermitian', [
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
- LinalgCase("hmatrix_a_and_b",
- matrix([[1., 2.], [2., 1.]]),
- None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
@@ -270,12 +259,13 @@ def _make_generalized_cases():
return new_cases
+
CASES += _make_generalized_cases()
+
#
# Generate stride combination variations of the above
#
-
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
@@ -323,6 +313,7 @@ def _stride_comb_iter(x):
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
+
def _make_strided_cases():
new_cases = []
for case in CASES:
@@ -333,94 +324,104 @@ def _make_strided_cases():
new_cases.append(new_case)
return new_cases
+
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
+class LinalgTestCase(object):
+ TEST_CASES = CASES
-def _check_cases(func, require=set(), exclude=set()):
- """
- Run func on each of the cases with all of the tags in require, and none
- of the tags in exclude
- """
- for case in CASES:
- # filter by require and exclude
- if case.tags & require != require:
- continue
- if case.tags & exclude:
- continue
+ def check_cases(self, require=set(), exclude=set()):
+ """
+ Run func on each of the cases with all of the tags in require, and none
+ of the tags in exclude
+ """
+ for case in self.TEST_CASES:
+ # filter by require and exclude
+ if case.tags & require != require:
+ continue
+ if case.tags & exclude:
+ continue
- try:
- case.check(func)
- except Exception:
- msg = "In test case: %r\n\n" % case
- msg += traceback.format_exc()
- raise AssertionError(msg)
+ try:
+ case.check(self.do)
+ except Exception:
+ msg = "In test case: %r\n\n" % case
+ msg += traceback.format_exc()
+ raise AssertionError(msg)
-class LinalgSquareTestCase(object):
+class LinalgSquareTestCase(LinalgTestCase):
def test_sq_cases(self):
- _check_cases(self.do, require={'square'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'square'},
+ exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
- _check_cases(self.do, require={'square', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'square', 'size-0'},
+ exclude={'generalized'})
-class LinalgNonsquareTestCase(object):
+class LinalgNonsquareTestCase(LinalgTestCase):
def test_nonsq_cases(self):
- _check_cases(self.do, require={'nonsquare'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'nonsquare'},
+ exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
- _check_cases(self.do, require={'nonsquare', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'nonsquare', 'size-0'},
+ exclude={'generalized'})
-class HermitianTestCase(object):
+
+class HermitianTestCase(LinalgTestCase):
def test_herm_cases(self):
- _check_cases(self.do, require={'hermitian'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'hermitian'},
+ exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
- _check_cases(self.do, require={'hermitian', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'hermitian', 'size-0'},
+ exclude={'generalized'})
-class LinalgGeneralizedSquareTestCase(object):
+class LinalgGeneralizedSquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_sq_cases(self):
- _check_cases(self.do, require={'generalized', 'square'}, exclude={'size-0'})
+ self.check_cases(require={'generalized', 'square'},
+ exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_sq_cases(self):
- _check_cases(self.do, require={'generalized', 'square', 'size-0'})
+ self.check_cases(require={'generalized', 'square', 'size-0'})
-class LinalgGeneralizedNonsquareTestCase(object):
+class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_nonsq_cases(self):
- _check_cases(self.do, require={'generalized', 'nonsquare'}, exclude={'size-0'})
+ self.check_cases(require={'generalized', 'nonsquare'},
+ exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_nonsq_cases(self):
- _check_cases(self.do, require={'generalized', 'nonsquare', 'size-0'})
+ self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
-class HermitianGeneralizedTestCase(object):
+class HermitianGeneralizedTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_herm_cases(self):
- _check_cases(self.do,
- require={'generalized', 'hermitian'},
- exclude={'size-0'})
+ self.check_cases(require={'generalized', 'hermitian'},
+ exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_herm_cases(self):
- _check_cases(self.do,
- require={'generalized', 'hermitian', 'size-0'},
- exclude={'none'})
+ self.check_cases(require={'generalized', 'hermitian', 'size-0'},
+ exclude={'none'})
def dot_generalized(a, b):
@@ -446,20 +447,21 @@ def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
- for c in itertools.product(*map(range, a.shape[:-2])):
- r[c] = identity(a.shape[-2])
+ r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
-class TestSolve(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
-
+class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+ # kept apart from TestSolve for use for testing with matrices.
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
- assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
+ assert_(consistent_subclass(x, b))
+
+class TestSolve(SolveCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -519,14 +521,16 @@ class TestSolve(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(result, ArraySubclass))
-class TestInv(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
- assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix)))
+ assert_(consistent_subclass(a_inv, a))
+
+class TestInv(InvCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -551,13 +555,15 @@ class TestInv(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(res, ArraySubclass))
-class TestEigvals(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
+
+class TestEigvals(EigvalsCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -586,15 +592,17 @@ class TestEigvals(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(res, np.ndarray))
-class TestEig(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
- assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix)))
+ assert_(consistent_subclass(evectors, a))
+
+class TestEig(EigCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -633,7 +641,7 @@ class TestEig(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(a, np.ndarray))
-class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
@@ -644,9 +652,11 @@ class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
- assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
- assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
+ assert_(consistent_subclass(u, a))
+ assert_(consistent_subclass(vt, a))
+
+class TestSVD(SVDCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -671,7 +681,7 @@ class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_raises(linalg.LinAlgError, linalg.svd, a)
-class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
def do(self, a, b, tags):
@@ -716,6 +726,8 @@ class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
* (abs(cinv)**2).sum(-1).sum(-1)),
single_decimal=5, double_decimal=11)
+
+class TestCond(CondCases):
def test_basic_nonsvd(self):
# Smoketest the non-svd norms
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
@@ -779,20 +791,24 @@ class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(np.isfinite(c[1,0]))
-class TestPinv(LinalgSquareTestCase,
- LinalgNonsquareTestCase,
- LinalgGeneralizedSquareTestCase,
- LinalgGeneralizedNonsquareTestCase):
+class PinvCases(LinalgSquareTestCase,
+ LinalgNonsquareTestCase,
+ LinalgGeneralizedSquareTestCase,
+ LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
- assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
+ assert_(consistent_subclass(a_ginv, a))
+
+
+class TestPinv(PinvCases):
+ pass
-class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
@@ -811,6 +827,8 @@ class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
+
+class TestDet(DetCases):
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
@@ -854,7 +872,7 @@ class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(res[1].dtype.type is np.float64)
-class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
+class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
@@ -882,9 +900,11 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
- assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
- assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
+ assert_(consistent_subclass(x, b))
+ assert_(consistent_subclass(residuals, b))
+
+class TestLstsq(LstsqCases):
def test_future_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
@@ -903,20 +923,26 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
# Warning should be raised exactly once (first command)
assert_(len(w) == 1)
+
class TestMatrixPower(object):
R90 = array([[0, 1], [-1, 0]])
Arb22 = array([[4, -7], [-2, 10]])
noninv = array([[1, 0], [0, 0]])
- arbfloat = array([[0.1, 3.2], [1.2, 0.7]])
+ arbfloat = array([[[0.1, 3.2], [1.2, 0.7]],
+ [[0.2, 6.4], [2.4, 1.4]]])
large = identity(10)
t = large[1, :].copy()
- large[1, :] = large[0,:]
+ large[1, :] = large[0, :]
large[0, :] = t
def test_large_power(self):
assert_equal(
matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5 + 1), self.R90)
+ assert_equal(
+ matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 1), self.R90)
+ assert_equal(
+ matrix_power(self.R90, 2 ** 100 + 2 + 1), -self.R90)
def test_large_power_trailing_zero(self):
assert_equal(
@@ -925,7 +951,7 @@ class TestMatrixPower(object):
def testip_zero(self):
def tz(M):
mz = matrix_power(M, 0)
- assert_equal(mz, identity(M.shape[0]))
+ assert_equal(mz, identity_like_generalized(M))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
tz(M)
@@ -941,7 +967,7 @@ class TestMatrixPower(object):
def testip_two(self):
def tz(M):
mz = matrix_power(M, 2)
- assert_equal(mz, dot(M, M))
+ assert_equal(mz, matmul(M, M))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
tz(M)
@@ -949,14 +975,19 @@ class TestMatrixPower(object):
def testip_invert(self):
def tz(M):
mz = matrix_power(M, -1)
- assert_almost_equal(identity(M.shape[0]), dot(mz, M))
+ assert_almost_equal(matmul(mz, M), identity_like_generalized(M))
for M in [self.R90, self.Arb22, self.arbfloat, self.large]:
tz(M)
def test_invert_noninvertible(self):
- import numpy.linalg
- assert_raises(numpy.linalg.linalg.LinAlgError,
- lambda: matrix_power(self.noninv, -1))
+ assert_raises(LinAlgError, matrix_power, self.noninv, -1)
+
+ def test_invalid(self):
+ assert_raises(TypeError, matrix_power, self.R90, 1.5)
+ assert_raises(TypeError, matrix_power, self.R90, [1])
+ assert_raises(LinAlgError, matrix_power, np.array([1]), 1)
+ assert_raises(LinAlgError, matrix_power, np.array([[1], [2]]), 1)
+ assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2)), 1)
class TestBoolPower(object):
@@ -966,7 +997,7 @@ class TestBoolPower(object):
assert_equal(matrix_power(A, 2), A)
-class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
+class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
@@ -979,6 +1010,8 @@ class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
ev2 = linalg.eigvalsh(a, 'U')
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
+
+class TestEigvalsh(object):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -1034,7 +1067,7 @@ class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
assert_(isinstance(res, np.ndarray))
-class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
+class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
@@ -1055,6 +1088,8 @@ class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
np.asarray(ev2)[..., None, :] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
+
+class TestEigh(object):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -1115,11 +1150,13 @@ class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
assert_(isinstance(a, np.ndarray))
-class _TestNorm(object):
-
+class _TestNormBase(object):
dt = None
dec = None
+
+class _TestNormGeneral(_TestNormBase):
+
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
@@ -1166,57 +1203,6 @@ class _TestNorm(object):
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
- def test_matrix_return_type(self):
- a = np.array([[1, 0, 1], [0, 1, 1]])
-
- exact_types = np.typecodes['AllInteger']
-
- # float32, complex64, float64, complex128 types are the only types
- # allowed by `linalg`, which performs the matrix operations used
- # within `norm`.
- inexact_types = 'fdFD'
-
- all_types = exact_types + inexact_types
-
- for each_inexact_types in all_types:
- at = a.astype(each_inexact_types)
-
- an = norm(at, -np.inf)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- with suppress_warnings() as sup:
- sup.filter(RuntimeWarning, "divide by zero encountered")
- an = norm(at, -1)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 1.0)
-
- an = norm(at, 1)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 2)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 3.0**(1.0/2.0))
-
- an = norm(at, -2)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 1.0)
-
- an = norm(at, np.inf)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 'fro')
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 'nuc')
- assert_(issubclass(an.dtype.type, np.floating))
- # Lower bar needed to support low precision floats.
- # They end up being off by 1 in the 7th place.
- old_assert_almost_equal(an, 2.7320508075688772, decimal=6)
-
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
@@ -1247,39 +1233,6 @@ class _TestNorm(object):
array(c, dtype=self.dt)):
_test(v)
- def test_matrix_2x2(self):
- A = matrix([[1, 3], [5, 7]], dtype=self.dt)
- assert_almost_equal(norm(A), 84 ** 0.5)
- assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
- assert_almost_equal(norm(A, 'nuc'), 10.0)
- assert_almost_equal(norm(A, inf), 12.0)
- assert_almost_equal(norm(A, -inf), 4.0)
- assert_almost_equal(norm(A, 1), 10.0)
- assert_almost_equal(norm(A, -1), 6.0)
- assert_almost_equal(norm(A, 2), 9.1231056256176615)
- assert_almost_equal(norm(A, -2), 0.87689437438234041)
-
- assert_raises(ValueError, norm, A, 'nofro')
- assert_raises(ValueError, norm, A, -3)
- assert_raises(ValueError, norm, A, 0)
-
- def test_matrix_3x3(self):
- # This test has been added because the 2x2 example
- # happened to have equal nuclear norm and induced 1-norm.
- # The 1/10 scaling factor accommodates the absolute tolerance
- # used in assert_almost_equal.
- A = (1 / 10) * \
- np.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
- assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
- assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
- assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
- assert_almost_equal(norm(A, inf), 1.1)
- assert_almost_equal(norm(A, -inf), 0.6)
- assert_almost_equal(norm(A, 1), 1.0)
- assert_almost_equal(norm(A, -1), 0.4)
- assert_almost_equal(norm(A, 2), 0.88722940323461277)
- assert_almost_equal(norm(A, -2), 0.19456584790481812)
-
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
@@ -1359,10 +1312,103 @@ class _TestNorm(object):
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
+
+class _TestNorm2D(_TestNormBase):
+ # Define the part for 2d arrays separately, so we can subclass this
+ # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg.
+ array = np.array
+
+ def test_matrix_empty(self):
+ assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0)
+
+ def test_matrix_return_type(self):
+ a = self.array([[1, 0, 1], [0, 1, 1]])
+
+ exact_types = np.typecodes['AllInteger']
+
+ # float32, complex64, float64, complex128 types are the only types
+ # allowed by `linalg`, which performs the matrix operations used
+ # within `norm`.
+ inexact_types = 'fdFD'
+
+ all_types = exact_types + inexact_types
+
+ for each_inexact_types in all_types:
+ at = a.astype(each_inexact_types)
+
+ an = norm(at, -np.inf)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "divide by zero encountered")
+ an = norm(at, -1)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 1.0)
+
+ an = norm(at, 1)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 2)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 3.0**(1.0/2.0))
+
+ an = norm(at, -2)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 1.0)
+
+ an = norm(at, np.inf)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 'fro')
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 'nuc')
+ assert_(issubclass(an.dtype.type, np.floating))
+ # Lower bar needed to support low precision floats.
+ # They end up being off by 1 in the 7th place.
+ np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)
+
+ def test_matrix_2x2(self):
+ A = self.array([[1, 3], [5, 7]], dtype=self.dt)
+ assert_almost_equal(norm(A), 84 ** 0.5)
+ assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
+ assert_almost_equal(norm(A, 'nuc'), 10.0)
+ assert_almost_equal(norm(A, inf), 12.0)
+ assert_almost_equal(norm(A, -inf), 4.0)
+ assert_almost_equal(norm(A, 1), 10.0)
+ assert_almost_equal(norm(A, -1), 6.0)
+ assert_almost_equal(norm(A, 2), 9.1231056256176615)
+ assert_almost_equal(norm(A, -2), 0.87689437438234041)
+
+ assert_raises(ValueError, norm, A, 'nofro')
+ assert_raises(ValueError, norm, A, -3)
+ assert_raises(ValueError, norm, A, 0)
+
+ def test_matrix_3x3(self):
+ # This test has been added because the 2x2 example
+ # happened to have equal nuclear norm and induced 1-norm.
+ # The 1/10 scaling factor accommodates the absolute tolerance
+ # used in assert_almost_equal.
+ A = (1 / 10) * \
+ self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
+ assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
+ assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
+ assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
+ assert_almost_equal(norm(A, inf), 1.1)
+ assert_almost_equal(norm(A, -inf), 0.6)
+ assert_almost_equal(norm(A, 1), 1.0)
+ assert_almost_equal(norm(A, -1), 0.4)
+ assert_almost_equal(norm(A, 2), 0.88722940323461277)
+ assert_almost_equal(norm(A, -2), 0.19456584790481812)
+
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
- A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
+ A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
@@ -1386,6 +1432,10 @@ class _TestNorm(object):
assert_raises(ValueError, norm, B, None, (0, 1, 2))
+class _TestNorm(_TestNorm2D, _TestNormGeneral):
+ pass
+
+
class TestNorm_NonSystematic(object):
def test_longdouble_norm(self):
@@ -1413,21 +1463,34 @@ class TestNorm_NonSystematic(object):
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
-class TestNormDouble(_TestNorm):
+# Separate definitions so we can use them for matrix tests.
+class _TestNormDoubleBase(_TestNormBase):
dt = np.double
dec = 12
-class TestNormSingle(_TestNorm):
+class _TestNormSingleBase(_TestNormBase):
dt = np.float32
dec = 6
-class TestNormInt64(_TestNorm):
+class _TestNormInt64Base(_TestNormBase):
dt = np.int64
dec = 12
+class TestNormDouble(_TestNorm, _TestNormDoubleBase):
+ pass
+
+
+class TestNormSingle(_TestNorm, _TestNormSingleBase):
+ pass
+
+
+class TestNormInt64(_TestNorm, _TestNormInt64Base):
+ pass
+
+
class TestMatrixRank(object):
def test_matrix_rank(self):
@@ -1478,6 +1541,8 @@ def test_reduced_rank():
class TestQR(object):
+ # Define the array class here, so run this on matrices elsewhere.
+ array = np.array
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
@@ -1528,7 +1593,7 @@ class TestQR(object):
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
- a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
+ a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
# Test double
h, tau = linalg.qr(a, mode='raw')
@@ -1544,22 +1609,21 @@ class TestQR(object):
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
- a = array([[1, 2], [3, 4]])
- b = array([[1, 2], [3, 4], [5, 6]])
+ a = self.array([[1, 2], [3, 4]])
+ b = self.array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
- self.check_qr(matrix(m1))
+
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
- self.check_qr(matrix(m1))
def test_0_size(self):
# There may be good ways to do (some of this) reasonably:
@@ -1699,6 +1763,40 @@ def test_xerbla_override():
raise SkipTest('Numpy xerbla not linked in.')
+def test_sdot_bug_8577():
+ # Regression test that loading certain other libraries does not
+ # result to wrong results in float32 linear algebra.
+ #
+ # There's a bug gh-8577 on OSX that can trigger this, and perhaps
+ # there are also other situations in which it occurs.
+ #
+ # Do the check in a separate process.
+
+ bad_libs = ['PyQt5.QtWidgets', 'IPython']
+
+ template = textwrap.dedent("""
+ import sys
+ {before}
+ try:
+ import {bad_lib}
+ except ImportError:
+ sys.exit(0)
+ {after}
+ x = np.ones(2, dtype=np.float32)
+ sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1)
+ """)
+
+ for bad_lib in bad_libs:
+ code = template.format(before="import numpy as np", after="",
+ bad_lib=bad_lib)
+ subprocess.check_call([sys.executable, "-c", code])
+
+ # Swapped import order
+ code = template.format(after="import numpy as np", before="",
+ bad_lib=bad_lib)
+ subprocess.check_call([sys.executable, "-c", code])
+
+
class TestMultiDot(object):
def test_basic_function_with_three_arguments(self):
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
index 03fdd387a..7dc1cb0cb 100644
--- a/numpy/linalg/umath_linalg.c.src
+++ b/numpy/linalg/umath_linalg.c.src
@@ -382,17 +382,11 @@ typedef f2c_doublecomplex fortran_doublecomplex;
*****************************************************************************
*/
-static NPY_INLINE void *
-offset_ptr(void* ptr, ptrdiff_t offset)
-{
- return (void*)((npy_uint8*)ptr + offset);
-}
-
static NPY_INLINE int
get_fp_invalid_and_clear(void)
{
int status;
- status = npy_clear_floatstatus();
+ status = npy_clear_floatstatus_barrier((char*)&status);
return !!(status & NPY_FPE_INVALID);
}
@@ -403,7 +397,7 @@ set_fp_invalid_or_clear(int error_occurred)
npy_set_floatstatus_invalid();
}
else {
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)&error_occurred);
}
}
@@ -577,104 +571,6 @@ dump_linearize_data(const char* name, const LINEARIZE_DATA_t* params)
params->row_strides, params->column_strides);
}
-
-static NPY_INLINE float
-FLOAT_add(float op1, float op2)
-{
- return op1 + op2;
-}
-
-static NPY_INLINE double
-DOUBLE_add(double op1, double op2)
-{
- return op1 + op2;
-}
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_add(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0] + op2.array[0];
- result.array[1] = op1.array[1] + op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_add(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0] + op2.array[0];
- result.array[1] = op1.array[1] + op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE float
-FLOAT_mul(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE double
-DOUBLE_mul(double op1, double op2)
-{
- return op1*op2;
-}
-
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_mul(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1];
- result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_mul(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1];
- result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE float
-FLOAT_mulc(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE double
-DOUBLE_mulc(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_mulc(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1];
- result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_mulc(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1];
- result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0];
-
- return result;
-}
-
static NPY_INLINE void
print_FLOAT(npy_float s)
{
@@ -3306,7 +3202,7 @@ static void
for (i = 0; i < nrhs; i++) {
@ftyp@ *vector = components + i*m;
/* Numpy and fortran floating types are the same size,
- * so this case is safe */
+ * so this cast is safe */
@basetyp@ abs2 = @TYPE@_abs2((@typ@ *)vector, excess);
memcpy(
resid + i*r_out.column_strides,
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 91cf8ed0f..5ed086db3 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -2799,13 +2799,8 @@ class MaskedArray(ndarray):
# FIXME _sharedmask is never used.
_sharedmask = True
# Process mask.
- # Number of named fields (or zero if none)
- names_ = _data.dtype.names or ()
# Type of the mask
- if names_:
- mdtype = make_mask_descr(_data.dtype)
- else:
- mdtype = MaskType
+ mdtype = make_mask_descr(_data.dtype)
if mask is nomask:
# Case 1. : no mask in input.
@@ -2831,14 +2826,12 @@ class MaskedArray(ndarray):
_data._mask = mask
_data._sharedmask = False
else:
+ _data._sharedmask = not copy
if copy:
_data._mask = _data._mask.copy()
- _data._sharedmask = False
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
- else:
- _data._sharedmask = True
else:
# Case 2. : With a mask in input.
# If mask is boolean, create an array of True or False
@@ -2875,7 +2868,7 @@ class MaskedArray(ndarray):
_data._mask = mask
_data._sharedmask = not copy
else:
- if names_:
+ if _data.dtype.names:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
@@ -2884,7 +2877,7 @@ class MaskedArray(ndarray):
_recursive_or(af, bf)
else:
af |= bf
- return
+
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
@@ -2999,7 +2992,9 @@ class MaskedArray(ndarray):
order = "K"
_mask = _mask.astype(_mask_dtype, order)
-
+ else:
+ # Take a view so shape changes, etc., do not propagate back.
+ _mask = _mask.view()
else:
_mask = nomask
@@ -3089,7 +3084,7 @@ class MaskedArray(ndarray):
returned object (this is equivalent to setting the ``type``
parameter).
type : Python type, optional
- Type of the returned view, e.g., ndarray or matrix. Again, the
+ Type of the returned view, either ndarray or a subclass. The
default None results in type preservation.
Notes
@@ -3344,17 +3339,35 @@ class MaskedArray(ndarray):
_mask[indx] = mindx
return
- def __setattr__(self, attr, value):
- super(MaskedArray, self).__setattr__(attr, value)
- if attr == 'dtype' and self._mask is not nomask:
- self._mask = self._mask.view(make_mask_descr(value), ndarray)
- # Try to reset the shape of the mask (if we don't have a void)
- # This raises a ValueError if the dtype change won't work
+ # Define so that we can overwrite the setter.
+ @property
+ def dtype(self):
+ return super(MaskedArray, self).dtype
+
+ @dtype.setter
+ def dtype(self, dtype):
+ super(MaskedArray, type(self)).dtype.__set__(self, dtype)
+ if self._mask is not nomask:
+ self._mask = self._mask.view(make_mask_descr(dtype), ndarray)
+ # Try to reset the shape of the mask (if we don't have a void).
+ # This raises a ValueError if the dtype change won't work.
try:
self._mask.shape = self.shape
except (AttributeError, TypeError):
pass
+ @property
+ def shape(self):
+ return super(MaskedArray, self).shape
+
+ @shape.setter
+ def shape(self, shape):
+ super(MaskedArray, type(self)).shape.__set__(self, shape)
+ # Cannot use self._mask, since it may not (yet) exist when a
+ # masked matrix sets the shape.
+ if getmask(self) is not nomask:
+ self._mask.shape = self.shape
+
def __setmask__(self, mask, copy=False):
"""
Set the mask.
@@ -3673,14 +3686,14 @@ class MaskedArray(ndarray):
>>> type(x.filled())
<type 'numpy.ndarray'>
- Subclassing is preserved. This means that if the data part of the masked
- array is a matrix, `filled` returns a matrix:
-
- >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
- >>> x.filled()
- matrix([[ 1, 999999],
- [999999, 4]])
+ Subclassing is preserved. This means that if, e.g., the data part of
+ the masked array is a recarray, `filled` returns a recarray:
+ >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray)
+ >>> m = np.ma.array(x, mask=[(True, False), (False, True)])
+ >>> m.filled()
+ rec.array([(999999, 2), ( -3, 999999)],
+ dtype=[('f0', '<i8'), ('f1', '<i8')])
"""
m = self._mask
if m is nomask:
@@ -5531,15 +5544,7 @@ class MaskedArray(ndarray):
sidx = self.argsort(axis=axis, kind=kind, order=order,
fill_value=fill_value, endwith=endwith)
- # save memory for 1d arrays
- if self.ndim == 1:
- idx = sidx
- else:
- idx = list(np.ix_(*[np.arange(x) for x in self.shape]))
- idx[axis] = sidx
- idx = tuple(idx)
-
- self[...] = self[idx]
+ self[...] = np.take_along_axis(self, sidx, axis=axis)
def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
@@ -6317,6 +6322,12 @@ class MaskedConstant(MaskedArray):
# precedent for this with `np.bool_` scalars.
return self
+ def __copy__(self):
+ return self
+
+ def __deepcopy__(self, memo):
+ return self
+
def __setattr__(self, attr, value):
if not self.__has_singleton():
# allow the singleton to be initialized
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 8272dced9..3be4d3625 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -747,19 +747,17 @@ def _median(a, axis=None, out=None, overwrite_input=False):
return np.ma.minimum_fill_value(asorted)
return s
- counts = count(asorted, axis=axis)
+ counts = count(asorted, axis=axis, keepdims=True)
h = counts // 2
- # create indexing mesh grid for all but reduced axis
- axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape)
- if i != axis]
- ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij')
+ # duplicate high if odd number of elements so mean does nothing
+ odd = counts % 2 == 1
+ l = np.where(odd, h, h-1)
- # insert indices of low and high median
- ind.insert(axis, h - 1)
- low = asorted[tuple(ind)]
- ind[axis] = np.minimum(h, asorted.shape[axis] - 1)
- high = asorted[tuple(ind)]
+ lh = np.concatenate([l,h], axis=axis)
+
+ # get low and high median
+ low_high = np.take_along_axis(asorted, lh, axis=axis)
def replace_masked(s):
# Replace masked entries with minimum_full_value unless it all values
@@ -767,30 +765,20 @@ def _median(a, axis=None, out=None, overwrite_input=False):
# larger than the fill value is undefined and a valid value placed
# elsewhere, e.g. [4, --, inf].
if np.ma.is_masked(s):
- rep = (~np.all(asorted.mask, axis=axis)) & s.mask
+ rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask
s.data[rep] = np.ma.minimum_fill_value(asorted)
s.mask[rep] = False
- replace_masked(low)
- replace_masked(high)
-
- # duplicate high if odd number of elements so mean does nothing
- odd = counts % 2 == 1
- np.copyto(low, high, where=odd)
- # not necessary for scalar True/False masks
- try:
- np.copyto(low.mask, high.mask, where=odd)
- except Exception:
- pass
+ replace_masked(low_high)
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
- s = np.ma.sum([low, high], axis=0, out=out)
+ s = np.ma.sum(low_high, axis=axis, out=out)
np.true_divide(s.data, 2., casting='unsafe', out=s.data)
s = np.lib.utils._median_nancheck(asorted, s, axis, out)
else:
- s = np.ma.mean([low, high], axis=0, out=out)
+ s = np.ma.mean(low_high, axis=axis, out=out)
return s
@@ -1465,9 +1453,14 @@ class MAxisConcatenator(AxisConcatenator):
"""
concatenate = staticmethod(concatenate)
- @staticmethod
- def makemat(arr):
- return array(arr.data.view(np.matrix), mask=arr.mask)
+ @classmethod
+ def makemat(cls, arr):
+ # There used to be a view as np.matrix here, but we may eventually
+ # deprecate that class. In preparation, we use the unmasked version
+ # to construct the matrix (with copy=False for backwards compatibility
+ # with the .view)
+ data = super(MAxisConcatenator, cls).makemat(arr.data, copy=False)
+ return array(data, mask=arr.mask)
def __getitem__(self, key):
# matrix builder syntax, like 'a, b; c, d'
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 9caf38b56..51616f214 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -335,49 +335,6 @@ class TestMaskedArray(object):
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
- def test_matrix_indexing(self):
- # Tests conversions and indexing
- x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
- x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])
- x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])
- x4 = array(x1)
- # test conversion to strings
- str(x2) # raises?
- repr(x2) # raises?
- # tests of indexing
- assert_(type(x2[1, 0]) is type(x1[1, 0]))
- assert_(x1[1, 0] == x2[1, 0])
- assert_(x2[1, 1] is masked)
- assert_equal(x1[0, 2], x2[0, 2])
- assert_equal(x1[0, 1:], x2[0, 1:])
- assert_equal(x1[:, 2], x2[:, 2])
- assert_equal(x1[:], x2[:])
- assert_equal(x1[1:], x3[1:])
- x1[0, 2] = 9
- x2[0, 2] = 9
- assert_equal(x1, x2)
- x1[0, 1:] = 99
- x2[0, 1:] = 99
- assert_equal(x1, x2)
- x2[0, 1] = masked
- assert_equal(x1, x2)
- x2[0, 1:] = masked
- assert_equal(x1, x2)
- x2[0, :] = x1[0, :]
- x2[0, 1] = masked
- assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
- x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
- assert_(allequal(getmask(x3)[1], array([1, 1, 0])))
- assert_(allequal(getmask(x3[1]), array([1, 1, 0])))
- x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
- assert_(allequal(getmask(x4[1]), array([1, 1, 0])))
- assert_(allequal(x4[1], array([1, 2, 3])))
- x1 = np.matrix(np.arange(5) * 1.0)
- x2 = masked_values(x1, 3.0)
- assert_equal(x1, x2)
- assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
- assert_equal(3.0, x2.fill_value)
-
@suppress_copy_mask_on_assignment
def test_copy(self):
# Tests of some subtle points of copying and sizing.
@@ -395,9 +352,11 @@ class TestMaskedArray(object):
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
+ # Default for masked array is not to copy; see gh-10318.
assert_(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
- assert_(y1a.mask is y1.mask)
+ assert_(y1a._mask.__array_interface__ ==
+ y1._mask.__array_interface__)
y2 = array(x1, mask=m3)
assert_(y2._data.__array_interface__ == x1.__array_interface__)
@@ -611,11 +570,13 @@ class TestMaskedArray(object):
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
- a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
+ x = np.array([(1.0, 2), (3.0, 4)],
+ dtype=[('x', float), ('y', int)]).view(np.recarray)
+ a = masked_array(x, mask=[(True, False), (False, True)])
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
- assert_(isinstance(a_pickled._data, np.matrix))
+ assert_(isinstance(a_pickled._data, np.recarray))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
@@ -1448,16 +1409,6 @@ class TestMaskedArrayArithmetic(object):
assert_(result is output)
assert_(output[0] is masked)
- def test_count_mean_with_matrix(self):
- m = np.ma.array(np.matrix([[1,2],[3,4]]), mask=np.zeros((2,2)))
-
- assert_equal(m.count(axis=0).shape, (1,2))
- assert_equal(m.count(axis=1).shape, (2,1))
-
- #make sure broadcasting inside mean and var work
- assert_equal(m.mean(axis=0), [[2., 3.]])
- assert_equal(m.mean(axis=1), [[1.5], [3.5]])
-
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
@@ -1740,23 +1691,6 @@ class TestMaskedArrayAttributes(object):
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
- # test simple access
- test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
- assert_equal(test.flat[1], 2)
- assert_equal(test.flat[2], masked)
- assert_(np.all(test.flat[0:2] == test[0, 0:2]))
- # Test flat on masked_matrices
- test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
- test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
- control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
- assert_equal(test, control)
- # Test setting
- test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
- testflat = test.flat
- testflat[:] = testflat[[2, 1, 0]]
- assert_equal(test, control)
- testflat[0] = 9
- assert_equal(test[0, 0], 9)
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
@@ -1784,12 +1718,6 @@ class TestMaskedArrayAttributes(object):
if i >= x.shape[-1]:
i = 0
j += 1
- # test that matrices keep the correct shape (#4615)
- a = masked_array(np.matrix(np.eye(2)), mask=0)
- b = a.flat
- b01 = b[:2]
- assert_equal(b01.data, array([[1., 0.]]))
- assert_equal(b01.mask, array([[False, False]]))
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
@@ -2893,32 +2821,6 @@ class TestMaskedArrayMethods(object):
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
- def test_allany_onmatrices(self):
- x = np.array([[0.13, 0.26, 0.90],
- [0.28, 0.33, 0.63],
- [0.31, 0.87, 0.70]])
- X = np.matrix(x)
- m = np.array([[True, False, False],
- [False, False, False],
- [True, True, False]], dtype=np.bool_)
- mX = masked_array(X, mask=m)
- mXbig = (mX > 0.5)
- mXsmall = (mX < 0.5)
-
- assert_(not mXbig.all())
- assert_(mXbig.any())
- assert_equal(mXbig.all(0), np.matrix([False, False, True]))
- assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
- assert_equal(mXbig.any(0), np.matrix([False, False, True]))
- assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
-
- assert_(not mXsmall.all())
- assert_(mXsmall.any())
- assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
- assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
- assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
- assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
-
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
@@ -3017,14 +2919,6 @@ class TestMaskedArrayMethods(object):
b = a.compressed()
assert_equal(b, [2, 3, 4])
- a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
- b = a.compressed()
- assert_equal(b, a)
- assert_(isinstance(b, np.matrix))
- a[0, 0] = masked
- b = a.compressed()
- assert_equal(b, [[2, 3, 4]])
-
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
@@ -3139,10 +3033,6 @@ class TestMaskedArrayMethods(object):
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
- a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
- aravel = a.ravel()
- assert_equal(aravel.shape, (1, 5))
- assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
@@ -4607,10 +4497,6 @@ class TestMaskedFields(object):
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
- test = a.view((float, 2), np.matrix)
- assert_equal(test, data)
- assert_(isinstance(test, np.matrix))
-
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
@@ -4794,11 +4680,12 @@ class TestMaskedView(object):
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
- test = a.view((float, 2), np.matrix)
+ test = a.view((float, 2), np.recarray)
assert_equal(test, data)
- assert_(isinstance(test, np.matrix))
+ assert_(isinstance(test, np.recarray))
assert_(not isinstance(test, MaskedArray))
+
class TestOptionalArgs(object):
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including multiple axes)
@@ -4941,6 +4828,16 @@ class TestMaskedConstant(object):
np.ma.masked.copy() is np.ma.masked,
np.True_.copy() is np.True_)
+ def test__copy(self):
+ import copy
+ assert_(
+ copy.copy(np.ma.masked) is np.ma.masked)
+
+ def test_deepcopy(self):
+ import copy
+ assert_(
+ copy.deepcopy(np.ma.masked) is np.ma.masked)
+
def test_immutable(self):
orig = np.ma.masked
assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1)
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index a7a32b628..c29bec2bd 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -307,18 +307,6 @@ class TestConcatenator(object):
assert_array_equal(d[5:,:], b_2)
assert_array_equal(d.mask, np.r_[m_1, m_2])
- def test_matrix_builder(self):
- assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4'])
-
- def test_matrix(self):
- actual = mr_['r', 1, 2, 3]
- expected = np.ma.array(np.r_['r', 1, 2, 3])
- assert_array_equal(actual, expected)
-
- # outer type is masked array, inner type is matrix
- assert_equal(type(actual), type(expected))
- assert_equal(type(actual.data), type(expected.data))
-
def test_masked_constant(self):
actual = mr_[np.ma.masked, 1]
assert_equal(actual.mask, [True, False])
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index 70eab0edc..d7b1e3c18 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -273,7 +273,11 @@ class TestMa(object):
assert_(y1.mask is m)
y1a = array(y1, copy=0)
- assert_(y1a.mask is y1.mask)
+ # For copy=False, one might expect that the array would just
+ # passed on, i.e., that it would be "is" instead of "==".
+ # See gh-4043 for discussion.
+ assert_(y1a._mask.__array_interface__ ==
+ y1._mask.__array_interface__)
y2 = array(x1, mask=m3, copy=0)
assert_(y2.mask is m3)
diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py
index 04e10d9d1..96c418a51 100644
--- a/numpy/ma/tests/test_regression.py
+++ b/numpy/ma/tests/test_regression.py
@@ -74,3 +74,13 @@ class TestRegression(object):
r1 = np.ma.corrcoef(x, y, ddof=1)
# ddof should not have an effect (it gets cancelled out)
assert_allclose(r0.data, r1.data)
+
+ def test_mask_not_backmangled(self):
+ # See gh-10314. Test case taken from gh-3140.
+ a = np.ma.MaskedArray([1., 2.], mask=[False, False])
+ assert_(a.mask.shape == (2,))
+ b = np.tile(a, (2, 1))
+ # Check that the above no longer changes a.shape to (1, 2)
+ assert_(a.mask.shape == (2,))
+ assert_(b.shape == (2, 2))
+ assert_(b.mask.shape == (2, 2))
diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py
index b61a46278..f8ab52bb9 100644
--- a/numpy/ma/tests/test_subclassing.py
+++ b/numpy/ma/tests/test_subclassing.py
@@ -75,27 +75,6 @@ class MSubArray(SubArray, MaskedArray):
msubarray = MSubArray
-class MMatrix(MaskedArray, np.matrix,):
-
- def __new__(cls, data, mask=nomask):
- mat = np.matrix(data)
- _data = MaskedArray.__new__(cls, data=mat, mask=mask)
- return _data
-
- def __array_finalize__(self, obj):
- np.matrix.__array_finalize__(self, obj)
- MaskedArray.__array_finalize__(self, obj)
- return
-
- def _get_series(self):
- _view = self.view(MaskedArray)
- _view._sharedmask = False
- return _view
- _series = property(fget=_get_series)
-
-mmatrix = MMatrix
-
-
# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing
# setting to non-class values (and thus np.ma.core.masked_print_option)
# and overrides __array_wrap__, updating the info dict, to check that this
@@ -180,7 +159,7 @@ class TestSubclassing(object):
def setup(self):
x = np.arange(5, dtype='float')
- mx = mmatrix(x, mask=[0, 1, 0, 0, 0])
+ mx = msubarray(x, mask=[0, 1, 0, 0, 0])
self.data = (x, mx)
def test_data_subclassing(self):
@@ -196,34 +175,34 @@ class TestSubclassing(object):
def test_maskedarray_subclassing(self):
# Tests subclassing MaskedArray
(x, mx) = self.data
- assert_(isinstance(mx._data, np.matrix))
+ assert_(isinstance(mx._data, subarray))
def test_masked_unary_operations(self):
# Tests masked_unary_operation
(x, mx) = self.data
with np.errstate(divide='ignore'):
- assert_(isinstance(log(mx), mmatrix))
+ assert_(isinstance(log(mx), msubarray))
assert_equal(log(x), np.log(x))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
(x, mx) = self.data
- # Result should be a mmatrix
- assert_(isinstance(add(mx, mx), mmatrix))
- assert_(isinstance(add(mx, x), mmatrix))
+ # Result should be a msubarray
+ assert_(isinstance(add(mx, mx), msubarray))
+ assert_(isinstance(add(mx, x), msubarray))
# Result should work
assert_equal(add(mx, x), mx+x)
- assert_(isinstance(add(mx, mx)._data, np.matrix))
- assert_(isinstance(add.outer(mx, mx), mmatrix))
- assert_(isinstance(hypot(mx, mx), mmatrix))
- assert_(isinstance(hypot(mx, x), mmatrix))
+ assert_(isinstance(add(mx, mx)._data, subarray))
+ assert_(isinstance(add.outer(mx, mx), msubarray))
+ assert_(isinstance(hypot(mx, mx), msubarray))
+ assert_(isinstance(hypot(mx, x), msubarray))
def test_masked_binary_operations2(self):
# Tests domained_masked_binary_operation
(x, mx) = self.data
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
- assert_(isinstance(divide(mx, mx), mmatrix))
- assert_(isinstance(divide(mx, x), mmatrix))
+ assert_(isinstance(divide(mx, mx), msubarray))
+ assert_(isinstance(divide(mx, x), msubarray))
assert_equal(divide(mx, mx), divide(xmx, xmx))
def test_attributepropagation(self):
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index 1f5c94921..9909fec8d 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -5,8 +5,11 @@ __all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
import ast
import numpy.core.numeric as N
-from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray
-from numpy.core.numerictypes import issubdtype
+from numpy.core.numeric import concatenate, isscalar
+# While not in __all__, matrix_power used to be defined here, so we import
+# it for backward compatibility.
+from numpy.linalg import matrix_power
+
def _convert_from_string(data):
for char in '[]':
@@ -63,114 +66,6 @@ def asmatrix(data, dtype=None):
"""
return matrix(data, dtype=dtype, copy=False)
-def matrix_power(M, n):
- """
- Raise a square matrix to the (integer) power `n`.
-
- For positive integers `n`, the power is computed by repeated matrix
- squarings and matrix multiplications. If ``n == 0``, the identity matrix
- of the same shape as M is returned. If ``n < 0``, the inverse
- is computed and then raised to the ``abs(n)``.
-
- Parameters
- ----------
- M : ndarray or matrix object
- Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``,
- with `m` a positive integer.
- n : int
- The exponent can be any integer or long integer, positive,
- negative, or zero.
-
- Returns
- -------
- M**n : ndarray or matrix object
- The return value is the same shape and type as `M`;
- if the exponent is positive or zero then the type of the
- elements is the same as those of `M`. If the exponent is
- negative the elements are floating-point.
-
- Raises
- ------
- LinAlgError
- If the matrix is not numerically invertible.
-
- See Also
- --------
- matrix
- Provides an equivalent function as the exponentiation operator
- (``**``, not ``^``).
-
- Examples
- --------
- >>> from numpy import linalg as LA
- >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
- >>> LA.matrix_power(i, 3) # should = -i
- array([[ 0, -1],
- [ 1, 0]])
- >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
- matrix([[ 0, -1],
- [ 1, 0]])
- >>> LA.matrix_power(i, 0)
- array([[1, 0],
- [0, 1]])
- >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
- array([[ 0., 1.],
- [-1., 0.]])
-
- Somewhat more sophisticated example
-
- >>> q = np.zeros((4, 4))
- >>> q[0:2, 0:2] = -i
- >>> q[2:4, 2:4] = i
- >>> q # one of the three quaternion units not equal to 1
- array([[ 0., -1., 0., 0.],
- [ 1., 0., 0., 0.],
- [ 0., 0., 0., 1.],
- [ 0., 0., -1., 0.]])
- >>> LA.matrix_power(q, 2) # = -np.eye(4)
- array([[-1., 0., 0., 0.],
- [ 0., -1., 0., 0.],
- [ 0., 0., -1., 0.],
- [ 0., 0., 0., -1.]])
-
- """
- M = asanyarray(M)
- if M.ndim != 2 or M.shape[0] != M.shape[1]:
- raise ValueError("input must be a square array")
- if not issubdtype(type(n), N.integer):
- raise TypeError("exponent must be an integer")
-
- from numpy.linalg import inv
-
- if n==0:
- M = M.copy()
- M[:] = identity(M.shape[0])
- return M
- elif n<0:
- M = inv(M)
- n *= -1
-
- result = M
- if n <= 3:
- for _ in range(n-1):
- result=N.dot(result, M)
- return result
-
- # binary decomposition to reduce the number of Matrix
- # multiplications for n > 3.
- beta = binary_repr(n)
- Z, q, t = M, 0, len(beta)
- while beta[t-q-1] == '0':
- Z = N.dot(Z, Z)
- q += 1
- result = Z
- for k in range(q+1, t):
- Z = N.dot(Z, Z)
- if beta[t-k-1] == '1':
- result = N.dot(result, Z)
- return result
-
-
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py
index a02a05c09..d160490b3 100644
--- a/numpy/matrixlib/tests/test_defmatrix.py
+++ b/numpy/matrixlib/tests/test_defmatrix.py
@@ -13,7 +13,7 @@ from numpy.testing import (
assert_, assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_raises
)
-from numpy.matrixlib.defmatrix import matrix_power
+from numpy.linalg import matrix_power
from numpy.matrixlib import mat
class TestCtor(object):
diff --git a/numpy/matrixlib/tests/test_interaction.py b/numpy/matrixlib/tests/test_interaction.py
new file mode 100644
index 000000000..fefb159c6
--- /dev/null
+++ b/numpy/matrixlib/tests/test_interaction.py
@@ -0,0 +1,361 @@
+"""Tests of interaction of matrix with other parts of numpy.
+
+Note that tests with MaskedArray and linalg are done in separate files.
+"""
+from __future__ import division, absolute_import, print_function
+
+import textwrap
+import warnings
+
+import numpy as np
+from numpy.testing import (assert_, assert_equal, assert_raises,
+ assert_raises_regex, assert_array_equal,
+ assert_almost_equal, assert_array_almost_equal)
+
+
+def test_fancy_indexing():
+ # The matrix class messes with the shape. While this is always
+ # weird (getitem is not used, it does not have setitem nor knows
+ # about fancy indexing), this tests gh-3110
+ # 2018-04-29: moved here from core.tests.test_index.
+ m = np.matrix([[1, 2], [3, 4]])
+
+ assert_(isinstance(m[[0, 1, 0], :], np.matrix))
+
+ # gh-3110. Note the transpose currently because matrices do *not*
+ # support dimension fixing for fancy indexing correctly.
+ x = np.asmatrix(np.arange(50).reshape(5, 10))
+ assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
+
+
+def test_polynomial_mapdomain():
+ # test that polynomial preserved matrix subtype.
+ # 2018-04-29: moved here from polynomial.tests.polyutils.
+ dom1 = [0, 4]
+ dom2 = [1, 3]
+ x = np.matrix([dom1, dom1])
+ res = np.polynomial.polyutils.mapdomain(x, dom1, dom2)
+ assert_(isinstance(res, np.matrix))
+
+
+def test_sort_matrix_none():
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ a = np.matrix([[2, 1, 0]])
+ actual = np.sort(a, axis=None)
+ expected = np.matrix([[0, 1, 2]])
+ assert_equal(actual, expected)
+ assert_(type(expected) is np.matrix)
+
+
+def test_partition_matrix_none():
+ # gh-4301
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ a = np.matrix([[2, 1, 0]])
+ actual = np.partition(a, 1, axis=None)
+ expected = np.matrix([[0, 1, 2]])
+ assert_equal(actual, expected)
+ assert_(type(expected) is np.matrix)
+
+
+def test_dot_scalar_and_matrix_of_objects():
+ # Ticket #2469
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.dot(arr, 3), desired)
+ assert_equal(np.dot(3, arr), desired)
+
+
+def test_inner_scalar_and_matrix():
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ sca = np.array(3, dtype=dt)[()]
+ arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
+ desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
+ assert_equal(np.inner(arr, sca), desired)
+ assert_equal(np.inner(sca, arr), desired)
+
+
+def test_inner_scalar_and_matrix_of_objects():
+ # Ticket #4482
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.inner(arr, 3), desired)
+ assert_equal(np.inner(3, arr), desired)
+
+
+def test_iter_allocate_output_subtype():
+ # Make sure that the subtype with priority wins
+ # 2018-04-29: moved here from core.tests.test_nditer, given the
+ # matrix specific shape test.
+
+ # matrix vs ndarray
+ a = np.matrix([[1, 2], [3, 4]])
+ b = np.arange(4).reshape(2, 2).T
+ i = np.nditer([a, b, None], [],
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ assert_(type(i.operands[2]) is np.matrix)
+ assert_(type(i.operands[2]) is not np.ndarray)
+ assert_equal(i.operands[2].shape, (2, 2))
+
+ # matrix always wants things to be 2D
+ b = np.arange(4).reshape(1, 2, 2)
+ assert_raises(RuntimeError, np.nditer, [a, b, None], [],
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ # but if subtypes are disabled, the result can still work
+ i = np.nditer([a, b, None], [],
+ [['readonly'], ['readonly'],
+ ['writeonly', 'allocate', 'no_subtype']])
+ assert_(type(i.operands[2]) is np.ndarray)
+ assert_(type(i.operands[2]) is not np.matrix)
+ assert_equal(i.operands[2].shape, (1, 2, 2))
+
+
+def like_function():
+ # 2018-04-29: moved here from core.tests.test_numeric
+ a = np.matrix([[1, 2], [3, 4]])
+ for like_function in np.zeros_like, np.ones_like, np.empty_like:
+ b = like_function(a)
+ assert_(type(b) is np.matrix)
+
+ c = like_function(a, subok=False)
+ assert_(type(c) is not np.matrix)
+
+
+def test_array_astype():
+ # 2018-04-29: copied here from core.tests.test_api
+ # subok=True passes through a matrix
+ a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4')
+ b = a.astype('f4', subok=True, copy=False)
+ assert_(a is b)
+
+ # subok=True is default, and creates a subtype on a cast
+ b = a.astype('i4', copy=False)
+ assert_equal(a, b)
+ assert_equal(type(b), np.matrix)
+
+ # subok=False never returns a matrix
+ b = a.astype('f4', subok=False, copy=False)
+ assert_equal(a, b)
+ assert_(not (a is b))
+ assert_(type(b) is not np.matrix)
+
+
+def test_stack():
+ # 2018-04-29: copied here from core.tests.test_shape_base
+ # check np.matrix cannot be stacked
+ m = np.matrix([[1, 2], [3, 4]])
+ assert_raises_regex(ValueError, 'shape too large to be a matrix',
+ np.stack, [m, m])
+
+
+def test_object_scalar_multiply():
+ # Tickets #2469 and #4482
+ # 2018-04-29: moved here from core.tests.test_ufunc
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.multiply(arr, 3), desired)
+ assert_equal(np.multiply(3, arr), desired)
+
+
+def test_nanfunctions_matrices():
+ # Check that it works and that type and
+ # shape are preserved
+ # 2018-04-29: moved here from core.tests.test_nanfunctions
+ mat = np.matrix(np.eye(3))
+ for f in [np.nanmin, np.nanmax]:
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 1))
+ res = f(mat)
+ assert_(np.isscalar(res))
+ # check that rows of nan are dealt with for subclasses (#4628)
+ mat[1] = np.nan
+ for f in [np.nanmin, np.nanmax]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(not np.any(np.isnan(res)))
+ assert_(len(w) == 0)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
+ and not np.isnan(res[2, 0]))
+ assert_(len(w) == 1, 'no warning raised')
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat)
+ assert_(np.isscalar(res))
+ assert_(res != np.nan)
+ assert_(len(w) == 0)
+
+
+def test_nanfunctions_matrices_general():
+ # Check that it works and that type and
+ # shape are preserved
+ # 2018-04-29: moved here from core.tests.test_nanfunctions
+ mat = np.matrix(np.eye(3))
+ for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod,
+ np.nanmean, np.nanvar, np.nanstd):
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 1))
+ res = f(mat)
+ assert_(np.isscalar(res))
+
+ for f in np.nancumsum, np.nancumprod:
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 3))
+ res = f(mat)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3*3))
+
+
+def test_average_matrix():
+ # 2018-04-29: moved here from core.tests.test_function_base.
+ y = np.matrix(np.random.rand(5, 5))
+ assert_array_equal(y.mean(0), np.average(y, 0))
+
+ a = np.matrix([[1, 2], [3, 4]])
+ w = np.matrix([[1, 2], [3, 4]])
+
+ r = np.average(a, axis=0, weights=w)
+ assert_equal(type(r), np.matrix)
+ assert_equal(r, [[2.5, 10.0/3]])
+
+
+def test_trapz_matrix():
+ # Test to make sure matrices give the same answer as ndarrays
+ # 2018-04-29: moved here from core.tests.test_function_base.
+ x = np.linspace(0, 5)
+ y = x * x
+ r = np.trapz(y, x)
+ mx = np.matrix(x)
+ my = np.matrix(y)
+ mr = np.trapz(my, mx)
+ assert_almost_equal(mr, r)
+
+
+def test_ediff1d_matrix():
+ # 2018-04-29: moved here from core.tests.test_arraysetops.
+ assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix))
+ assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix))
+
+
+def test_apply_along_axis_matrix():
+ # this test is particularly malicious because matrix
+ # refuses to become 1d
+ # 2018-04-29: moved here from core.tests.test_shape_base.
+ def double(row):
+ return row * 2
+
+ m = np.matrix([[0, 1], [2, 3]])
+ expected = np.matrix([[0, 2], [4, 6]])
+
+ result = np.apply_along_axis(double, 0, m)
+ assert_(isinstance(result, np.matrix))
+ assert_array_equal(result, expected)
+
+ result = np.apply_along_axis(double, 1, m)
+ assert_(isinstance(result, np.matrix))
+ assert_array_equal(result, expected)
+
+
+def test_kron_matrix():
+ # 2018-04-29: moved here from core.tests.test_shape_base.
+ a = np.ones([2, 2])
+ m = np.asmatrix(a)
+ assert_equal(type(np.kron(a, a)), np.ndarray)
+ assert_equal(type(np.kron(m, m)), np.matrix)
+ assert_equal(type(np.kron(a, m)), np.matrix)
+ assert_equal(type(np.kron(m, a)), np.matrix)
+
+
+class TestConcatenatorMatrix(object):
+ # 2018-04-29: moved here from core.tests.test_index_tricks.
+ def test_matrix(self):
+ a = [1, 2]
+ b = [3, 4]
+
+ ab_r = np.r_['r', a, b]
+ ab_c = np.r_['c', a, b]
+
+ assert_equal(type(ab_r), np.matrix)
+ assert_equal(type(ab_c), np.matrix)
+
+ assert_equal(np.array(ab_r), [[1, 2, 3, 4]])
+ assert_equal(np.array(ab_c), [[1], [2], [3], [4]])
+
+ assert_raises(ValueError, lambda: np.r_['rc', a, b])
+
+ def test_matrix_scalar(self):
+ r = np.r_['r', [1, 2], 3]
+ assert_equal(type(r), np.matrix)
+ assert_equal(np.array(r), [[1, 2, 3]])
+
+ def test_matrix_builder(self):
+ a = np.array([1])
+ b = np.array([2])
+ c = np.array([3])
+ d = np.array([4])
+ actual = np.r_['a, b; c, d']
+ expected = np.bmat([[a, b], [c, d]])
+
+ assert_equal(actual, expected)
+ assert_equal(type(actual), type(expected))
+
+
+def test_array_equal_error_message_matrix():
+ # 2018-04-29: moved here from testing.tests.test_utils.
+ try:
+ assert_equal(np.array([1, 2]), np.matrix([1, 2]))
+ except AssertionError as e:
+ msg = str(e)
+ msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
+ msg_reference = textwrap.dedent("""\
+
+ Arrays are not equal
+
+ (shapes (2,), (1, 2) mismatch)
+ x: array([1, 2])
+ y: matrix([[1, 2]])""")
+ try:
+ assert_equal(msg, msg_reference)
+ except AssertionError:
+ assert_equal(msg2, msg_reference)
+ else:
+ raise AssertionError("Did not raise")
+
+
+def test_array_almost_equal_matrix():
+ # Matrix slicing keeps things 2-D, while array does not necessarily.
+ # See gh-8452.
+ # 2018-04-29: moved here from testing.tests.test_utils.
+ m1 = np.matrix([[1., 2.]])
+ m2 = np.matrix([[1., np.nan]])
+ m3 = np.matrix([[1., -np.inf]])
+ m4 = np.matrix([[np.nan, np.inf]])
+ m5 = np.matrix([[1., 2.], [np.nan, np.inf]])
+ for assert_func in assert_array_almost_equal, assert_almost_equal:
+ for m in m1, m2, m3, m4, m5:
+ assert_func(m, m)
+ a = np.array(m)
+ assert_func(a, m)
+ assert_func(m, a)
diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py
new file mode 100644
index 000000000..0a0d985c4
--- /dev/null
+++ b/numpy/matrixlib/tests/test_masked_matrix.py
@@ -0,0 +1,231 @@
+from __future__ import division, absolute_import, print_function
+
+import pickle
+
+import numpy as np
+from numpy.ma.testutils import (assert_, assert_equal, assert_raises,
+ assert_array_equal)
+from numpy.ma.core import (masked_array, masked_values, masked, allequal,
+ MaskType, getmask, MaskedArray, nomask,
+ log, add, hypot, divide)
+from numpy.ma.extras import mr_
+
+
+class MMatrix(MaskedArray, np.matrix,):
+
+ def __new__(cls, data, mask=nomask):
+ mat = np.matrix(data)
+ _data = MaskedArray.__new__(cls, data=mat, mask=mask)
+ return _data
+
+ def __array_finalize__(self, obj):
+ np.matrix.__array_finalize__(self, obj)
+ MaskedArray.__array_finalize__(self, obj)
+ return
+
+ def _get_series(self):
+ _view = self.view(MaskedArray)
+ _view._sharedmask = False
+ return _view
+ _series = property(fget=_get_series)
+
+
+class TestMaskedMatrix(object):
+ def test_matrix_indexing(self):
+ # Tests conversions and indexing
+ x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
+ x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]])
+ x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]])
+ x4 = masked_array(x1)
+ # test conversion to strings
+ str(x2) # raises?
+ repr(x2) # raises?
+ # tests of indexing
+ assert_(type(x2[1, 0]) is type(x1[1, 0]))
+ assert_(x1[1, 0] == x2[1, 0])
+ assert_(x2[1, 1] is masked)
+ assert_equal(x1[0, 2], x2[0, 2])
+ assert_equal(x1[0, 1:], x2[0, 1:])
+ assert_equal(x1[:, 2], x2[:, 2])
+ assert_equal(x1[:], x2[:])
+ assert_equal(x1[1:], x3[1:])
+ x1[0, 2] = 9
+ x2[0, 2] = 9
+ assert_equal(x1, x2)
+ x1[0, 1:] = 99
+ x2[0, 1:] = 99
+ assert_equal(x1, x2)
+ x2[0, 1] = masked
+ assert_equal(x1, x2)
+ x2[0, 1:] = masked
+ assert_equal(x1, x2)
+ x2[0, :] = x1[0, :]
+ x2[0, 1] = masked
+ assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
+ x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
+ assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0])))
+ assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0])))
+ x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
+ assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0])))
+ assert_(allequal(x4[1], masked_array([1, 2, 3])))
+ x1 = np.matrix(np.arange(5) * 1.0)
+ x2 = masked_values(x1, 3.0)
+ assert_equal(x1, x2)
+ assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType),
+ x2.mask))
+ assert_equal(3.0, x2.fill_value)
+
+ def test_pickling_subbaseclass(self):
+ # Test pickling w/ a subclass of ndarray
+ a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
+ a_pickled = pickle.loads(a.dumps())
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled, a)
+ assert_(isinstance(a_pickled._data, np.matrix))
+
+ def test_count_mean_with_matrix(self):
+ m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2)))
+
+ assert_equal(m.count(axis=0).shape, (1, 2))
+ assert_equal(m.count(axis=1).shape, (2, 1))
+
+ # Make sure broadcasting inside mean and var work
+ assert_equal(m.mean(axis=0), [[2., 3.]])
+ assert_equal(m.mean(axis=1), [[1.5], [3.5]])
+
+ def test_flat(self):
+ # Test that flat can return items even for matrices [#4585, #4615]
+ # test simple access
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ assert_equal(test.flat[1], 2)
+ assert_equal(test.flat[2], masked)
+ assert_(np.all(test.flat[0:2] == test[0, 0:2]))
+ # Test flat on masked_matrices
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
+ control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
+ assert_equal(test, control)
+ # Test setting
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ testflat = test.flat
+ testflat[:] = testflat[[2, 1, 0]]
+ assert_equal(test, control)
+ testflat[0] = 9
+ # test that matrices keep the correct shape (#4615)
+ a = masked_array(np.matrix(np.eye(2)), mask=0)
+ b = a.flat
+ b01 = b[:2]
+ assert_equal(b01.data, np.array([[1., 0.]]))
+ assert_equal(b01.mask, np.array([[False, False]]))
+
+ def test_allany_onmatrices(self):
+ x = np.array([[0.13, 0.26, 0.90],
+ [0.28, 0.33, 0.63],
+ [0.31, 0.87, 0.70]])
+ X = np.matrix(x)
+ m = np.array([[True, False, False],
+ [False, False, False],
+ [True, True, False]], dtype=np.bool_)
+ mX = masked_array(X, mask=m)
+ mXbig = (mX > 0.5)
+ mXsmall = (mX < 0.5)
+
+ assert_(not mXbig.all())
+ assert_(mXbig.any())
+ assert_equal(mXbig.all(0), np.matrix([False, False, True]))
+ assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
+ assert_equal(mXbig.any(0), np.matrix([False, False, True]))
+ assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
+
+ assert_(not mXsmall.all())
+ assert_(mXsmall.any())
+ assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
+ assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
+ assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
+ assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
+
+ def test_compressed(self):
+ a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
+ b = a.compressed()
+ assert_equal(b, a)
+ assert_(isinstance(b, np.matrix))
+ a[0, 0] = masked
+ b = a.compressed()
+ assert_equal(b, [[2, 3, 4]])
+
+ def test_ravel(self):
+ a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
+ aravel = a.ravel()
+ assert_equal(aravel.shape, (1, 5))
+ assert_equal(aravel._mask.shape, a.shape)
+
+ def test_view(self):
+ # Test view w/ flexible dtype
+ iterator = list(zip(np.arange(10), np.random.rand(10)))
+ data = np.array(iterator)
+ a = masked_array(iterator, dtype=[('a', float), ('b', float)])
+ a.mask[0] = (1, 0)
+ test = a.view((float, 2), np.matrix)
+ assert_equal(test, data)
+ assert_(isinstance(test, np.matrix))
+ assert_(not isinstance(test, MaskedArray))
+
+
+class TestSubclassing(object):
+ # Test suite for masked subclasses of ndarray.
+
+ def setup(self):
+ x = np.arange(5, dtype='float')
+ mx = MMatrix(x, mask=[0, 1, 0, 0, 0])
+ self.data = (x, mx)
+
+ def test_maskedarray_subclassing(self):
+ # Tests subclassing MaskedArray
+ (x, mx) = self.data
+ assert_(isinstance(mx._data, np.matrix))
+
+ def test_masked_unary_operations(self):
+ # Tests masked_unary_operation
+ (x, mx) = self.data
+ with np.errstate(divide='ignore'):
+ assert_(isinstance(log(mx), MMatrix))
+ assert_equal(log(x), np.log(x))
+
+ def test_masked_binary_operations(self):
+ # Tests masked_binary_operation
+ (x, mx) = self.data
+ # Result should be a MMatrix
+ assert_(isinstance(add(mx, mx), MMatrix))
+ assert_(isinstance(add(mx, x), MMatrix))
+ # Result should work
+ assert_equal(add(mx, x), mx+x)
+ assert_(isinstance(add(mx, mx)._data, np.matrix))
+ assert_(isinstance(add.outer(mx, mx), MMatrix))
+ assert_(isinstance(hypot(mx, mx), MMatrix))
+ assert_(isinstance(hypot(mx, x), MMatrix))
+
+ def test_masked_binary_operations2(self):
+ # Tests domained_masked_binary_operation
+ (x, mx) = self.data
+ xmx = masked_array(mx.data.__array__(), mask=mx.mask)
+ assert_(isinstance(divide(mx, mx), MMatrix))
+ assert_(isinstance(divide(mx, x), MMatrix))
+ assert_equal(divide(mx, mx), divide(xmx, xmx))
+
+class TestConcatenator(object):
+ # Tests for mr_, the equivalent of r_ for masked arrays.
+
+ def test_matrix_builder(self):
+ assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4'])
+
+ def test_matrix(self):
+ # Test consistency with unmasked version. If we ever deprecate
+ # matrix, this test should either still pass, or both actual and
+ # expected should fail to be build.
+ actual = mr_['r', 1, 2, 3]
+ expected = np.ma.array(np.r_['r', 1, 2, 3])
+ assert_array_equal(actual, expected)
+
+ # outer type is masked array, inner type is matrix
+ assert_equal(type(actual), type(expected))
+ assert_equal(type(actual.data), type(expected.data))
diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py
new file mode 100644
index 000000000..6fc733c2e
--- /dev/null
+++ b/numpy/matrixlib/tests/test_matrix_linalg.py
@@ -0,0 +1,95 @@
+""" Test functions for linalg module using the matrix class."""
+from __future__ import division, absolute_import, print_function
+
+import numpy as np
+
+from numpy.linalg.tests.test_linalg import (
+ LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase,
+ _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base,
+ SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases,
+ PinvCases, DetCases, LstsqCases)
+
+
+CASES = []
+
+# square test cases
+CASES += apply_tag('square', [
+ LinalgCase("0x0_matrix",
+ np.empty((0, 0), dtype=np.double).view(np.matrix),
+ np.empty((0, 1), dtype=np.double).view(np.matrix),
+ tags={'size-0'}),
+ LinalgCase("matrix_b_only",
+ np.array([[1., 2.], [3., 4.]]),
+ np.matrix([2., 1.]).T),
+ LinalgCase("matrix_a_and_b",
+ np.matrix([[1., 2.], [3., 4.]]),
+ np.matrix([2., 1.]).T),
+])
+
+# hermitian test-cases
+CASES += apply_tag('hermitian', [
+ LinalgCase("hmatrix_a_and_b",
+ np.matrix([[1., 2.], [2., 1.]]),
+ None),
+])
+# No need to make generalized or strided cases for matrices.
+
+
+class MatrixTestCase(LinalgTestCase):
+ TEST_CASES = CASES
+
+
+class TestSolveMatrix(SolveCases, MatrixTestCase):
+ pass
+
+
+class TestInvMatrix(InvCases, MatrixTestCase):
+ pass
+
+
+class TestEigvalsMatrix(EigvalsCases, MatrixTestCase):
+ pass
+
+
+class TestEigMatrix(EigCases, MatrixTestCase):
+ pass
+
+
+class TestSVDMatrix(SVDCases, MatrixTestCase):
+ pass
+
+
+class TestCondMatrix(CondCases, MatrixTestCase):
+ pass
+
+
+class TestPinvMatrix(PinvCases, MatrixTestCase):
+ pass
+
+
+class TestDetMatrix(DetCases, MatrixTestCase):
+ pass
+
+
+class TestLstsqMatrix(LstsqCases, MatrixTestCase):
+ pass
+
+
+class _TestNorm2DMatrix(_TestNorm2D):
+ array = np.matrix
+
+
+class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase):
+ pass
+
+
+class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase):
+ pass
+
+
+class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base):
+ pass
+
+
+class TestQRMatrix(_TestQR):
+ array = np.matrix
diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py
index 32ea55716..801c558cc 100644
--- a/numpy/polynomial/tests/test_polyutils.py
+++ b/numpy/polynomial/tests/test_polyutils.py
@@ -63,7 +63,7 @@ class TestDomain(object):
dom1 = [0, 4]
dom2 = [1, 3]
tgt = dom2
- res = pu. mapdomain(dom1, dom1, dom2)
+ res = pu.mapdomain(dom1, dom1, dom2)
assert_almost_equal(res, tgt)
# test for complex values
@@ -83,11 +83,14 @@ class TestDomain(object):
assert_almost_equal(res, tgt)
# test that subtypes are preserved.
+ class MyNDArray(np.ndarray):
+ pass
+
dom1 = [0, 4]
dom2 = [1, 3]
- x = np.matrix([dom1, dom1])
+ x = np.array([dom1, dom1]).view(MyNDArray)
res = pu.mapdomain(x, dom1, dom2)
- assert_(isinstance(res, np.matrix))
+ assert_(isinstance(res, MyNDArray))
def test_mapparms(self):
# test for real values
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 4dabaa093..b45b3146f 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -3817,7 +3817,7 @@ cdef class RandomState:
Draw samples from a negative binomial distribution.
Samples are drawn from a negative binomial distribution with specified
- parameters, `n` trials and `p` probability of success where `n` is an
+ parameters, `n` successes and `p` probability of success where `n` is an
integer > 0 and `p` is in the interval [0, 1].
Parameters
@@ -3837,21 +3837,19 @@ cdef class RandomState:
-------
out : ndarray or scalar
Drawn samples from the parameterized negative binomial distribution,
- where each sample is equal to N, the number of trials it took to
- achieve n - 1 successes, N - (n - 1) failures, and a success on the,
- (N + n)th trial.
+ where each sample is equal to N, the number of failures that
+ occurred before a total of n successes was reached.
Notes
-----
The probability density for the negative binomial distribution is
- .. math:: P(N;n,p) = \\binom{N+n-1}{n-1}p^{n}(1-p)^{N},
+ .. math:: P(N;n,p) = \\binom{N+n-1}{N}p^{n}(1-p)^{N},
- where :math:`n-1` is the number of successes, :math:`p` is the
- probability of success, and :math:`N+n-1` is the number of trials.
- The negative binomial distribution gives the probability of n-1
- successes and N failures in N+n-1 trials, and success on the (N+n)th
- trial.
+ where :math:`n` is the number of successes, :math:`p` is the
+ probability of success, and :math:`N+n` is the number of trials.
+ The negative binomial distribution gives the probability of N
+ failures given n successes, with a success on the last trial.
If one throws a die repeatedly until the third time a "1" appears,
then the probability distribution of the number of non-"1"s that
@@ -4903,10 +4901,24 @@ cdef class RandomState:
"""
if isinstance(x, (int, long, np.integer)):
arr = np.arange(x)
- else:
- arr = np.array(x)
- self.shuffle(arr)
- return arr
+ self.shuffle(arr)
+ return arr
+
+ arr = np.asarray(x)
+
+ # shuffle has fast-path for 1-d
+ if arr.ndim == 1:
+ # must return a copy
+ if arr is x:
+ arr = np.array(arr)
+ self.shuffle(arr)
+ return arr
+
+ # Shuffle index array, dtype to ensure fast path
+ idx = np.arange(arr.shape[0], dtype=np.intp)
+ self.shuffle(idx)
+ return arr[idx]
+
_rand = RandomState()
seed = _rand.seed
diff --git a/numpy/testing/_private/decorators.py b/numpy/testing/_private/decorators.py
index 60d3f968f..24c4e385d 100644
--- a/numpy/testing/_private/decorators.py
+++ b/numpy/testing/_private/decorators.py
@@ -34,7 +34,7 @@ def slow(t):
The exact definition of a slow test is obviously both subjective and
hardware-dependent, but in general any individual test that requires more
- than a second or two should be labeled as slow (the whole suite consits of
+ than a second or two should be labeled as slow (the whole suite consists of
thousands of tests, so even a second is significant).
Parameters
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index b0c0b0c48..a7935f175 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -771,7 +771,11 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
- if not cond:
+ # The below comparison is a hack to ensure that fully masked
+ # results, for which val.ravel().all() returns np.ma.masked,
+ # do not trigger a failure (np.ma.masked != True evaluates as
+ # np.ma.masked, which is falsy).
+ if cond != True:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
@@ -1369,16 +1373,20 @@ def _assert_valid_refcount(op):
"""
if not HAS_REFCOUNT:
return True
- import numpy as np
+ import numpy as np, gc
b = np.arange(100*100).reshape(100, 100)
c = b
i = 1
- rc = sys.getrefcount(i)
- for j in range(15):
- d = op(b, c)
- assert_(sys.getrefcount(i) >= rc)
+ gc.disable()
+ try:
+ rc = sys.getrefcount(i)
+ for j in range(15):
+ d = op(b, c)
+ assert_(sys.getrefcount(i) >= rc)
+ finally:
+ gc.enable()
del d # for pyflakes
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 0592e62f8..602cdf5f2 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -286,7 +286,7 @@ class TestEqual(TestArrayEqual):
def test_error_message(self):
try:
- self._assert_func(np.array([1, 2]), np.matrix([1, 2]))
+ self._assert_func(np.array([1, 2]), np.array([[1, 2]]))
except AssertionError as e:
msg = str(e)
msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
@@ -296,7 +296,7 @@ class TestEqual(TestArrayEqual):
(shapes (2,), (1, 2) mismatch)
x: array([1, 2])
- y: matrix([[1, 2]])""")
+ y: array([[1, 2]])""")
try:
assert_equal(msg, msg_reference)
except AssertionError:
@@ -366,19 +366,23 @@ class TestArrayAlmostEqual(_GenericTest):
self._assert_func(b, a)
self._assert_func(b, b)
- def test_matrix(self):
- # Matrix slicing keeps things 2-D, while array does not necessarily.
- # See gh-8452.
- m1 = np.matrix([[1., 2.]])
- m2 = np.matrix([[1., np.nan]])
- m3 = np.matrix([[1., -np.inf]])
- m4 = np.matrix([[np.nan, np.inf]])
- m5 = np.matrix([[1., 2.], [np.nan, np.inf]])
- for m in m1, m2, m3, m4, m5:
- self._assert_func(m, m)
- a = np.array(m)
- self._assert_func(a, m)
- self._assert_func(m, a)
+ # Test fully masked as well (see gh-11123).
+ a = np.ma.MaskedArray(3.5, mask=True)
+ b = np.array([3., 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.masked
+ b = np.array([3., 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+ b = np.array([1., 2., 3.])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+ b = np.array(1.)
+ self._test_equal(a, b)
+ self._test_equal(b, a)
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
@@ -479,20 +483,6 @@ class TestAlmostEqual(_GenericTest):
# remove anything that's not the array string
assert_equal(str(e).split('%)\n ')[1], b)
- def test_matrix(self):
- # Matrix slicing keeps things 2-D, while array does not necessarily.
- # See gh-8452.
- m1 = np.matrix([[1., 2.]])
- m2 = np.matrix([[1., np.nan]])
- m3 = np.matrix([[1., -np.inf]])
- m4 = np.matrix([[np.nan, np.inf]])
- m5 = np.matrix([[1., 2.], [np.nan, np.inf]])
- for m in m1, m2, m3, m4, m5:
- self._assert_func(m, m)
- a = np.array(m)
- self._assert_func(a, m)
- self._assert_func(m, a)
-
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having