summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py25
-rw-r--r--numpy/add_newdocs.py169
-rw-r--r--numpy/core/_methods.py21
-rw-r--r--numpy/core/arrayprint.py169
-rw-r--r--numpy/core/code_generators/cversions.txt3
-rw-r--r--numpy/core/code_generators/generate_umath.py12
-rw-r--r--numpy/core/code_generators/numpy_api.py5
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py34
-rw-r--r--numpy/core/einsumfunc.py2
-rw-r--r--numpy/core/fromnumeric.py100
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h13
-rw-r--r--numpy/core/include/numpy/npy_interrupt.h2
-rw-r--r--numpy/core/include/numpy/npy_math.h11
-rw-r--r--numpy/core/numeric.py46
-rw-r--r--numpy/core/setup.py2
-rw-r--r--numpy/core/setup_common.py3
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src108
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c4
-rw-r--r--numpy/core/src/multiarray/arrayobject.c64
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src100
-rw-r--r--numpy/core/src/multiarray/ctors.c87
-rw-r--r--numpy/core/src/multiarray/datetime.c17
-rw-r--r--numpy/core/src/multiarray/descriptor.c48
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c9
-rw-r--r--numpy/core/src/multiarray/einsum.c.src6
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src4
-rw-r--r--numpy/core/src/multiarray/mapping.c18
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c37
-rw-r--r--numpy/core/src/multiarray/nditer_api.c42
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c11
-rw-r--r--numpy/core/src/multiarray/nditer_impl.h2
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c111
-rw-r--r--numpy/core/src/multiarray/number.c4
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src5
-rw-r--r--numpy/core/src/npymath/ieee754.c.src75
-rw-r--r--numpy/core/src/umath/extobj.c2
-rw-r--r--numpy/core/src/umath/loops.c.src42
-rw-r--r--numpy/core/src/umath/loops.h.src6
-rw-r--r--numpy/core/src/umath/override.c15
-rw-r--r--numpy/core/src/umath/reduction.c2
-rw-r--r--numpy/core/src/umath/scalarmath.c.src14
-rw-r--r--numpy/core/src/umath/simd.inc.src7
-rw-r--r--numpy/core/src/umath/ufunc_object.c644
-rw-r--r--numpy/core/src/umath/umathmodule.c105
-rw-r--r--numpy/core/tests/test_api.py13
-rw-r--r--numpy/core/tests/test_arrayprint.py2
-rw-r--r--numpy/core/tests/test_datetime.py37
-rw-r--r--numpy/core/tests/test_deprecations.py16
-rw-r--r--numpy/core/tests/test_dtype.py8
-rw-r--r--numpy/core/tests/test_einsum.py10
-rw-r--r--numpy/core/tests/test_indexing.py13
-rw-r--r--numpy/core/tests/test_longdouble.py2
-rw-r--r--numpy/core/tests/test_multiarray.py145
-rw-r--r--numpy/core/tests/test_nditer.py611
-rw-r--r--numpy/core/tests/test_numeric.py12
-rw-r--r--numpy/core/tests/test_print.py2
-rw-r--r--numpy/core/tests/test_regression.py11
-rw-r--r--numpy/core/tests/test_scalarprint.py38
-rw-r--r--numpy/core/tests/test_shape_base.py4
-rw-r--r--numpy/core/tests/test_ufunc.py174
-rw-r--r--numpy/core/tests/test_umath.py82
-rw-r--r--numpy/distutils/misc_util.py25
-rw-r--r--numpy/distutils/system_info.py133
-rw-r--r--numpy/doc/constants.py3
-rw-r--r--numpy/doc/glossary.py30
-rw-r--r--numpy/doc/misc.py2
-rw-r--r--numpy/doc/structured_arrays.py4
-rw-r--r--numpy/f2py/src/fortranobject.c2
-rw-r--r--numpy/fft/fftpack_litemodule.c9
-rw-r--r--numpy/lib/_version.py2
-rw-r--r--numpy/lib/arraypad.py289
-rw-r--r--numpy/lib/arraysetops.py65
-rw-r--r--numpy/lib/format.py13
-rw-r--r--numpy/lib/function_base.py193
-rw-r--r--numpy/lib/histograms.py36
-rw-r--r--numpy/lib/mixins.py4
-rw-r--r--numpy/lib/nanfunctions.py125
-rw-r--r--numpy/lib/npyio.py57
-rw-r--r--numpy/lib/polynomial.py5
-rw-r--r--numpy/lib/scimath.py2
-rw-r--r--numpy/lib/shape_base.py227
-rw-r--r--numpy/lib/stride_tricks.py7
-rw-r--r--numpy/lib/tests/test_arraypad.py13
-rw-r--r--numpy/lib/tests/test_arraysetops.py43
-rw-r--r--numpy/lib/tests/test_function_base.py83
-rw-r--r--numpy/lib/tests/test_histograms.py43
-rw-r--r--numpy/lib/tests/test_index_tricks.py31
-rw-r--r--numpy/lib/tests/test_io.py60
-rw-r--r--numpy/lib/tests/test_nanfunctions.py141
-rw-r--r--numpy/lib/tests/test_polynomial.py36
-rw-r--r--numpy/lib/tests/test_shape_base.py118
-rw-r--r--numpy/lib/twodim_base.py2
-rw-r--r--numpy/linalg/lapack_litemodule.c8
-rw-r--r--numpy/linalg/linalg.py119
-rw-r--r--numpy/linalg/tests/test_linalg.py476
-rw-r--r--numpy/linalg/umath_linalg.c.src117
-rw-r--r--numpy/ma/core.py47
-rw-r--r--numpy/ma/extras.py47
-rw-r--r--numpy/ma/tests/test_core.py139
-rw-r--r--numpy/ma/tests/test_extras.py12
-rw-r--r--numpy/ma/tests/test_subclassing.py45
-rw-r--r--numpy/matrixlib/defmatrix.py115
-rw-r--r--numpy/matrixlib/tests/test_defmatrix.py2
-rw-r--r--numpy/matrixlib/tests/test_interaction.py361
-rw-r--r--numpy/matrixlib/tests/test_masked_matrix.py231
-rw-r--r--numpy/matrixlib/tests/test_matrix_linalg.py95
-rw-r--r--numpy/polynomial/tests/test_polyutils.py9
-rw-r--r--numpy/random/mtrand/mtrand.pyx98
-rw-r--r--numpy/testing/_private/decorators.py2
-rw-r--r--numpy/testing/_private/utils.py110
-rw-r--r--numpy/testing/tests/test_utils.py124
-rw-r--r--numpy/testing/utils.py2
112 files changed, 4966 insertions, 2382 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index d10a1ecd3..d250ed5ac 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -194,3 +194,28 @@ else:
from numpy.testing._private.pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
+
+
+ def _sanity_check():
+ """
+ Quick sanity checks for common bugs caused by environment.
+ There are some cases e.g. with wrong BLAS ABI that cause wrong
+ results under specific runtime conditions that are not necessarily
+ achieved during test suite runs, and it is useful to catch those early.
+
+ See https://github.com/numpy/numpy/issues/8577 and other
+ similar bug reports.
+
+ """
+ try:
+ x = ones(2, dtype=float32)
+ if not abs(x.dot(x) - 2.0) < 1e-5:
+ raise AssertionError()
+ except AssertionError:
+ msg = ("The current Numpy installation ({!r}) fails to "
+ "pass simple sanity checks. This can be caused for example "
+ "by incorrect BLAS library being linked in.")
+ raise RuntimeError(msg.format(__file__))
+
+ _sanity_check()
+ del _sanity_check
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index d8ae98c12..fc2130096 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -257,6 +257,7 @@ add_newdoc('numpy.core', 'nditer',
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
+ Valid only before the iterator is closed.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
@@ -282,7 +283,8 @@ add_newdoc('numpy.core', 'nditer',
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
- and optimized iterator access pattern.
+ and optimized iterator access pattern. Valid only before the iterator
+ is closed.
multi_index
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
@@ -292,7 +294,8 @@ add_newdoc('numpy.core', 'nditer',
nop : int
The number of iterator operands.
operands : tuple of operand(s)
- The array(s) to be iterated over.
+ The array(s) to be iterated over. Valid only before the iterator is
+ closed.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
@@ -319,8 +322,9 @@ add_newdoc('numpy.core', 'nditer',
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
- for (a, b, c) in it:
- addop(a, b, out=c)
+ with it:
+ for (a, b, c) in it:
+ addop(a, b, out=c)
return it.operands[2]
Here is the same function, but following the C-style pattern::
@@ -330,12 +334,12 @@ add_newdoc('numpy.core', 'nditer',
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
+ with it:
+ while not it.finished:
+ addop(it[0], it[1], out=it[2])
+ it.iternext()
- while not it.finished:
- addop(it[0], it[1], out=it[2])
- it.iternext()
-
- return it.operands[2]
+ return it.operands[2]
Here is an example outer product function::
@@ -344,14 +348,13 @@ add_newdoc('numpy.core', 'nditer',
it = np.nditer([x, y, out], ['external_loop'],
[['readonly'], ['readonly'], ['writeonly', 'allocate']],
- op_axes=[range(x.ndim)+[-1]*y.ndim,
- [-1]*x.ndim+range(y.ndim),
+ op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
+ [-1] * x.ndim + list(range(y.ndim)),
None])
-
- for (a, b, c) in it:
- mulop(a, b, out=c)
-
- return it.operands[2]
+ with it:
+ for (a, b, c) in it:
+ mulop(a, b, out=c)
+ return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
@@ -374,13 +377,40 @@ add_newdoc('numpy.core', 'nditer',
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
- return it.operands[0]
+ return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
+ If operand flags `"writeonly"` or `"readwrite"` are used the operands may
+ be views into the original data with the `WRITEBACKIFCOPY` flag. In this case
+ nditer must be used as a context manager or the nditer.close
+ method must be called before using the result. The temporary
+ data will be written back to the original data when the `__exit__`
+ function is called but not before:
+
+ >>> a = np.arange(6, dtype='i4')[::-2]
+ >>> with nditer(a, [],
+ ... [['writeonly', 'updateifcopy']],
+ ... casting='unsafe',
+ ... op_dtypes=[np.dtype('f4')]) as i:
+ ... x = i.operands[0]
+ ... x[:] = [-1, -2, -3]
+ ... # a still unchanged here
+ >>> a, x
+ array([-1, -2, -3]), array([-1, -2, -3])
+
+ It is important to note that once the iterator is exited, dangling
+ references (like `x` in the example) may or may not share data with
+ the original data `a`. If writeback semantics were active, i.e. if
+ `x.base.flags.writebackifcopy` is `True`, then exiting the iterator
+ will sever the connection between `x` and `a`, writing to `x` will
+ no longer write to `a`. If writeback semantics are not active, then
+ `x.data` will still point at some part of `a.data`, and writing to
+ one will affect the other.
+
""")
# nditer methods
@@ -404,6 +434,13 @@ add_newdoc('numpy.core', 'nditer', ('copy',
"""))
+add_newdoc('numpy.core', 'nditer', ('operands',
+ """
+ operands[`Slice`]
+
+ The array(s) to be iterated over. Valid only before the iterator is closed.
+ """))
+
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
@@ -524,6 +561,18 @@ add_newdoc('numpy.core', 'nested_iters',
""")
+add_newdoc('numpy.core', 'nditer', ('close',
+ """
+ close()
+
+ Resolve all writeback semantics in writeable operands.
+
+ See Also
+ --------
+
+ :ref:`nditer-context-manager`
+
+ """))
###############################################################################
@@ -784,7 +833,15 @@ add_newdoc('numpy.core.multiarray', 'array',
See Also
--------
- empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like
+ empty_like : Return an empty array with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
Notes
-----
@@ -862,7 +919,11 @@ add_newdoc('numpy.core.multiarray', 'empty',
See Also
--------
- empty_like, zeros, ones
+ empty_like : Return an empty array with shape and type of input.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
Notes
-----
@@ -920,9 +981,8 @@ add_newdoc('numpy.core.multiarray', 'empty_like',
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
- ones : Return a new array setting values to one.
- zeros : Return a new array setting values to zero.
Notes
-----
@@ -984,10 +1044,9 @@ add_newdoc('numpy.core.multiarray', 'zeros',
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
- ones_like : Return an array of ones with shape and type of input.
- empty_like : Return an empty array with shape and type of input.
- ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ full : Return a new array of given shape filled with value.
Examples
--------
@@ -4700,6 +4759,11 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
+
+ When fid is a file object, array contents are directly written to the
+ file, bypassing the file object's ``write`` method. As a result, tofile
+ cannot be used with files objects supporting compression (e.g., GzipFile)
+ or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
@@ -5588,10 +5652,13 @@ add_newdoc('numpy.core', 'ufunc',
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
- number of outputs; use `None` for outputs to be allocated by the ufunc.
+ number of outputs; use `None` for uninitialized outputs to be
+ allocated by the ufunc.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
- of False indicate to leave the value in the output alone.
+ of False indicate to leave the value in the output alone. Note that if
+ an uninitialized return array is created via the default ``out=None``,
+ then the elements where the values are False will remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
@@ -5599,7 +5666,8 @@ add_newdoc('numpy.core', 'ufunc',
-------
r : ndarray or tuple of ndarray
`r` will have the shape that the arrays in `x` broadcast to; if `out` is
- provided, `r` will be equal to `out`. If the function has more than one
+ provided, it will be returned. If not, `r` will be allocated and
+ may contain uninitialized values. If the function has more than one
output, then the result will be a tuple of arrays.
""")
@@ -5797,7 +5865,7 @@ add_newdoc('numpy.core', 'ufunc', ('signature',
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
- reduce(a, axis=0, dtype=None, out=None, keepdims=False)
+ reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
@@ -5853,6 +5921,14 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
+ initial : scalar, optional
+ The value with which to start the reduction.
+ If the ufunc has no identity or the dtype is object, this defaults
+ to None - otherwise it defaults to ufunc.identity.
+ If ``None`` is given, the first element of the reduction is used,
+ and an error is thrown if the reduction is empty.
+
+ .. versionadded:: 1.15.0
Returns
-------
@@ -5884,7 +5960,24 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
-
+
+ You can use the ``initial`` keyword argument to initialize the reduction with a
+ different value.
+
+ >>> np.add.reduce([10], initial=5)
+ 15
+ >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initializer=10)
+ array([14., 14.])
+
+ Allows reductions of empty arrays where they would normally fail, i.e.
+ for ufuncs without an identity.
+
+ >>> np.minimum.reduce([], initial=np.inf)
+ inf
+ >>> np.minimum.reduce([])
+ Traceback (most recent call last):
+ ...
+ ValueError: zero-size array to reduction operation minimum which has no identity
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
@@ -6143,10 +6236,10 @@ add_newdoc('numpy.core', 'ufunc', ('at',
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
- `a[indices] += b`, except that results are accumulated for elements that
- are indexed more than once. For example, `a[[0,0]] += 1` will only
+ ``a[indices] += b``, except that results are accumulated for elements that
+ are indexed more than once. For example, ``a[[0,0]] += 1`` will only
increment the first element once because of buffering, whereas
- `add.at(a, [0,0], 1)` will increment the first element twice.
+ ``add.at(a, [0,0], 1)`` will increment the first element twice.
.. versionadded:: 1.8.0
@@ -6171,8 +6264,6 @@ add_newdoc('numpy.core', 'ufunc', ('at',
>>> print(a)
array([-1, -2, 3, 4])
- ::
-
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
@@ -6180,8 +6271,6 @@ add_newdoc('numpy.core', 'ufunc', ('at',
>>> print(a)
array([2, 3, 5, 4])
- ::
-
Add items 0 and 1 in first array to second array,
and store results in first array:
@@ -6968,7 +7057,7 @@ add_newdoc('numpy.core.multiarray', 'datetime_as_string',
arr : array_like of datetime64
The array of UTC timestamps to format.
unit : str
- One of None, 'auto', or a datetime unit.
+ One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
timezone : {'naive', 'UTC', 'local'} or tzinfo
Timezone information to use when displaying the datetime. If 'UTC', end
with a Z to indicate UTC time. If 'local', convert to the local timezone
@@ -6996,13 +7085,13 @@ add_newdoc('numpy.core.multiarray', 'datetime_as_string',
'2002-10-27T07:30Z'], dtype='<U35')
Note that we picked datetimes that cross a DST boundary. Passing in a
- ``pytz`` timezone object will print the appropriate offset::
+ ``pytz`` timezone object will print the appropriate offset
>>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
'2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
- Passing in a unit will change the precision::
+ Passing in a unit will change the precision
>>> np.datetime_as_string(d, unit='h')
array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
@@ -7011,7 +7100,7 @@ add_newdoc('numpy.core.multiarray', 'datetime_as_string',
array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
'2002-10-27T07:30:00'], dtype='<U38')
- But can be made to not lose precision::
+ 'casting' can be used to specify whether precision can be changed
>>> np.datetime_as_string(d, unit='h', casting='safe')
TypeError: Cannot create a datetime string as units 'h' from a NumPy
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index 0f928676b..33f6d01a8 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -11,6 +11,7 @@ from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core.numeric import asanyarray
from numpy.core import numerictypes as nt
+from numpy._globals import _NoValue
# save those O(100) nanoseconds!
umr_maximum = um.maximum.reduce
@@ -22,17 +23,21 @@ umr_all = um.logical_and.reduce
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
-def _amax(a, axis=None, out=None, keepdims=False):
- return umr_maximum(a, axis, None, out, keepdims)
+def _amax(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_maximum(a, axis, None, out, keepdims, initial)
-def _amin(a, axis=None, out=None, keepdims=False):
- return umr_minimum(a, axis, None, out, keepdims)
+def _amin(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_minimum(a, axis, None, out, keepdims, initial)
-def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
- return umr_sum(a, axis, dtype, out, keepdims)
+def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_sum(a, axis, dtype, out, keepdims, initial)
-def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
- return umr_prod(a, axis, dtype, out, keepdims)
+def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_prod(a, axis, dtype, out, keepdims, initial)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_any(a, axis, dtype, out, keepdims)
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 7dc73d6de..6d15cb23f 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -132,44 +132,45 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
- can be set are::
-
- - 'bool'
- - 'int'
- - 'timedelta' : a `numpy.timedelta64`
- - 'datetime' : a `numpy.datetime64`
- - 'float'
- - 'longfloat' : 128-bit floats
- - 'complexfloat'
- - 'longcomplexfloat' : composed of two 128-bit floats
- - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- - 'object' : `np.object_` arrays
- - 'str' : all other strings
-
- Other keys that can be used to set a group of types at once are::
-
- - 'all' : sets all types
- - 'int_kind' : sets 'int'
- - 'float_kind' : sets 'float' and 'longfloat'
- - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- - 'str_kind' : sets 'str' and 'numpystr'
+ can be set are:
+
+ - 'bool'
+ - 'int'
+ - 'timedelta' : a `numpy.timedelta64`
+ - 'datetime' : a `numpy.datetime64`
+ - 'float'
+ - 'longfloat' : 128-bit floats
+ - 'complexfloat'
+ - 'longcomplexfloat' : composed of two 128-bit floats
+ - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
+ - 'object' : `np.object_` arrays
+ - 'str' : all other strings
+
+ Other keys that can be used to set a group of types at once are:
+
+ - 'all' : sets all types
+ - 'int_kind' : sets 'int'
+ - 'float_kind' : sets 'float' and 'longfloat'
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+ - 'str_kind' : sets 'str' and 'numpystr'
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values:
- - 'fixed' : Always print exactly `precision` fractional digits,
- even if this would print more or fewer digits than
- necessary to specify the value uniquely.
- - 'unique : Print the minimum number of fractional digits necessary
- to represent each value uniquely. Different elements may
- have a different number of digits. The value of the
- `precision` option is ignored.
- - 'maxprec' : Print at most `precision` fractional digits, but if
- an element can be uniquely represented with fewer digits
- only print it with that many.
- - 'maxprec_equal' : Print at most `precision` fractional digits,
- but if every element in the array can be uniquely
- represented with an equal number of fewer digits, use that
- many digits for all elements.
+
+ * 'fixed': Always print exactly `precision` fractional digits,
+ even if this would print more or fewer digits than
+ necessary to specify the value uniquely.
+ * 'unique': Print the minimum number of fractional digits necessary
+ to represent each value uniquely. Different elements may
+ have a different number of digits. The value of the
+ `precision` option is ignored.
+ * 'maxprec': Print at most `precision` fractional digits, but if
+ an element can be uniquely represented with fewer digits
+ only print it with that many.
+ * 'maxprec_equal': Print at most `precision` fractional digits,
+ but if every element in the array can be uniquely
+ represented with an equal number of fewer digits, use that
+ many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
@@ -536,27 +537,27 @@ def array2string(a, max_line_width=None, precision=None,
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
- can be set are::
-
- - 'bool'
- - 'int'
- - 'timedelta' : a `numpy.timedelta64`
- - 'datetime' : a `numpy.datetime64`
- - 'float'
- - 'longfloat' : 128-bit floats
- - 'complexfloat'
- - 'longcomplexfloat' : composed of two 128-bit floats
- - 'void' : type `numpy.void`
- - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- - 'str' : all other strings
-
- Other keys that can be used to set a group of types at once are::
-
- - 'all' : sets all types
- - 'int_kind' : sets 'int'
- - 'float_kind' : sets 'float' and 'longfloat'
- - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- - 'str_kind' : sets 'str' and 'numpystr'
+ can be set are:
+
+ - 'bool'
+ - 'int'
+ - 'timedelta' : a `numpy.timedelta64`
+ - 'datetime' : a `numpy.datetime64`
+ - 'float'
+ - 'longfloat' : 128-bit floats
+ - 'complexfloat'
+ - 'longcomplexfloat' : composed of two 128-bit floats
+ - 'void' : type `numpy.void`
+ - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
+ - 'str' : all other strings
+
+ Other keys that can be used to set a group of types at once are:
+
+ - 'all' : sets all types
+ - 'int_kind' : sets 'int'
+ - 'float_kind' : sets 'float' and 'longfloat'
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+ - 'str_kind' : sets 'str' and 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
@@ -571,20 +572,21 @@ def array2string(a, max_line_width=None, precision=None,
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values:
- - 'fixed' : Always print exactly `precision` fractional digits,
- even if this would print more or fewer digits than
- necessary to specify the value uniquely.
- - 'unique : Print the minimum number of fractional digits necessary
- to represent each value uniquely. Different elements may
- have a different number of digits. The value of the
- `precision` option is ignored.
- - 'maxprec' : Print at most `precision` fractional digits, but if
- an element can be uniquely represented with fewer digits
- only print it with that many.
- - 'maxprec_equal' : Print at most `precision` fractional digits,
- but if every element in the array can be uniquely
- represented with an equal number of fewer digits, use that
- many digits for all elements.
+
+ - 'fixed': Always print exactly `precision` fractional digits,
+ even if this would print more or fewer digits than
+ necessary to specify the value uniquely.
+ - 'unique': Print the minimum number of fractional digits necessary
+ to represent each value uniquely. Different elements may
+ have a different number of digits. The value of the
+ `precision` option is ignored.
+ - 'maxprec': Print at most `precision` fractional digits, but if
+ an element can be uniquely represented with fewer digits
+ only print it with that many.
+ - 'maxprec_equal': Print at most `precision` fractional digits,
+ but if every element in the array can be uniquely
+ represented with an equal number of fewer digits, use that
+ many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
@@ -645,6 +647,9 @@ def array2string(a, max_line_width=None, precision=None,
options.update(overrides)
if options['legacy'] == '1.13':
+ if style is np._NoValue:
+ style = repr
+
if a.shape == () and not a.dtype.names:
return style(a.item())
elif style is not np._NoValue:
@@ -984,11 +989,12 @@ def format_float_scientific(x, precision=None, unique=True, trim='k',
value.
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
- k : keep trailing zeros, keep decimal point (no trimming)
- . : trim all trailing zeros, leave decimal point
- 0 : trim all but the zero before the decimal point. Insert the
- zero if it is missing.
- - : trim trailing zeros and any trailing decimal point
+
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
+ * '.' : trim all trailing zeros, leave decimal point
+ * '0' : trim all but the zero before the decimal point. Insert the
+ zero if it is missing.
+ * '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
@@ -1056,11 +1062,12 @@ def format_float_positional(x, precision=None, unique=True,
digits, before or after the decimal point, ignoring leading zeros.
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
- k : keep trailing zeros, keep decimal point (no trimming)
- . : trim all trailing zeros, leave decimal point
- 0 : trim all but the zero before the decimal point. Insert the
- zero if it is missing.
- - : trim trailing zeros and any trailing decimal point
+
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
+ * '.' : trim all trailing zeros, leave decimal point
+ * '0' : trim all but the zero before the decimal point. Insert the
+ zero if it is missing.
+ * '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
@@ -1081,7 +1088,7 @@ def format_float_positional(x, precision=None, unique=True,
Examples
--------
- >>> np.format_float_scientific(np.float32(np.pi))
+ >>> np.format_float_positional(np.float32(np.pi))
'3.1415927'
>>> np.format_float_positional(np.float16(np.pi))
'3.14'
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index 68ac5109c..cc6c3a5fb 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -41,3 +41,6 @@
# Version 12 (NumPy 1.14) Added PyArray_ResolveWritebackIfCopy,
# PyArray_SetWritebackIfCopyBase and deprecated PyArray_SetUpdateIfCopyBase.
0x0000000c = a1bc756c5782853ec2e3616cf66869d8
+
+# Version 13 (NumPy 1.15) Added NpyIter_Close
+0x0000000d = 4386e829d65aafce6bd09a85b142d585
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 1d3550e06..632bcb41f 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -420,36 +420,42 @@ defdict = {
docstrings.get('numpy.core.umath.greater'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'greater_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'less':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'less_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'not_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.not_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'logical_and':
Ufunc(2, 1, One,
@@ -902,8 +908,8 @@ if sys.version_info[0] >= 3:
del defdict['divide']
def indent(st, spaces):
- indention = ' '*spaces
- indented = indention + st.replace('\n', '\n'+indention)
+ indentation = ' '*spaces
+ indented = indentation + st.replace('\n', '\n'+indentation)
# trim off any trailing spaces
indented = re.sub(r' +$', r'', indented)
return indented
@@ -966,7 +972,7 @@ def make_arrays(funcdict):
for vt in t.simd:
code2list.append(textwrap.dedent("""\
#ifdef HAVE_ATTRIBUTE_TARGET_{ISA}
- if (npy_cpu_supports("{ISA}")) {{
+ if (npy_cpu_supports("{isa}")) {{
{fname}_functions[{idx}] = {type}_{fname}_{isa};
}}
#endif
diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py
index a454d95b0..6cfbbbcc7 100644
--- a/numpy/core/code_generators/numpy_api.py
+++ b/numpy/core/code_generators/numpy_api.py
@@ -5,7 +5,8 @@ Each dictionary contains name -> index pair.
Whenever you change one index, you break the ABI (and the ABI version number
should be incremented). Whenever you add an item to one of the dict, the API
-needs to be updated.
+needs to be updated in both setup_common.py and by adding an appropriate
+entry to cversion.txt (generate the hash via "python cversions.py").
When adding a function, make sure to use the next integer not used as an index
(in case you use an existing index or jump, the build will stop and raise an
@@ -349,6 +350,8 @@ multiarray_funcs_api = {
'PyArray_ResolveWritebackIfCopy': (302,),
'PyArray_SetWritebackIfCopyBase': (303,),
# End 1.14 API
+ 'NpyIter_Close': (304,),
+ # End 1.15 API
}
ufunc_types_api = {
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index c7e5cf600..bd90d0460 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -1077,8 +1077,9 @@ add_newdoc('numpy.core.umath', 'equal',
Returns
-------
- out : ndarray or bool
- Output array of bools.
+ out : ndarray or scalar
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
@@ -1415,8 +1416,9 @@ add_newdoc('numpy.core.umath', 'greater',
Returns
-------
- out : bool or ndarray of bool
- Array of bools.
+ out : ndarray or scalar
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
@@ -1453,7 +1455,8 @@ add_newdoc('numpy.core.umath', 'greater_equal',
Returns
-------
out : bool or ndarray of bool
- Array of bools.
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
@@ -1820,8 +1823,9 @@ add_newdoc('numpy.core.umath', 'less',
Returns
-------
- out : bool or ndarray of bool
- Array of bools.
+ out : ndarray or scalar
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
@@ -1849,8 +1853,9 @@ add_newdoc('numpy.core.umath', 'less_equal',
Returns
-------
- out : bool or ndarray of bool
- Array of bools.
+ out : ndarray or scalar
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
@@ -2668,12 +2673,11 @@ add_newdoc('numpy.core.umath', 'not_equal',
Returns
-------
- not_equal : ndarray bool, scalar bool
- For each element in `x1, x2`, return True if `x1` is not equal
- to `x2` and False otherwise.
+ out : ndarray or scalar
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
-
See Also
--------
equal, greater, greater_equal, less, less_equal
@@ -3740,7 +3744,7 @@ add_newdoc('numpy.core.umath', 'ldexp',
add_newdoc('numpy.core.umath', 'gcd',
"""
- Returns the greatest common divisor of |x1| and |x2|
+ Returns the greatest common divisor of ``|x1|`` and ``|x2|``
Parameters
----------
@@ -3770,7 +3774,7 @@ add_newdoc('numpy.core.umath', 'gcd',
add_newdoc('numpy.core.umath', 'lcm',
"""
- Returns the lowest common multiple of |x1| and |x2|
+ Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index 8cd6eae12..bb6767c4f 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -1148,7 +1148,7 @@ def einsum(*operands, **kwargs):
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
- # Append new items and derefernce what we can
+ # Append new items and dereference what we can
operands.append(new_view)
del tmp_operands, new_view
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 948c2139d..d1aae0aa0 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -140,6 +140,7 @@ def take(a, indices, axis=None, out=None, mode='raise'):
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
+ take_along_axis : Take elements by matching the array and the index arrays
Notes
-----
@@ -478,6 +479,7 @@ def put(a, ind, v, mode='raise'):
See Also
--------
putmask, place
+ put_along_axis : Put elements by matching the array and the index arrays
Examples
--------
@@ -723,7 +725,9 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
- In other words, ``a[index_array]`` yields a partitioned `a`.
+ If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
+ More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
+ yields the partitioned `a`, irrespective of dimensionality.
See Also
--------
@@ -904,6 +908,8 @@ def argsort(a, axis=-1, kind='quicksort', order=None):
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
+ More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
+ yields the sorted `a`, irrespective of dimensionality.
See Also
--------
@@ -1336,10 +1342,11 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
Returns
-------
array_of_diagonals : ndarray
- If `a` is 2-D and not a `matrix`, a 1-D array of the same type as `a`
- containing the diagonal is returned. If `a` is a `matrix`, a 1-D
- array containing the diagonal is returned in order to maintain
- backward compatibility.
+ If `a` is 2-D, then a 1-D array containing the diagonal and of the
+ same type as `a` is returned unless `a` is a `matrix`, in which case
+ a 1-D array rather than a (2-D) `matrix` is returned in order to
+ maintain backward compatibility.
+
If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
are removed, and a new axis inserted at the end corresponding to the
diagonal.
@@ -1496,10 +1503,9 @@ def ravel(a, order='C'):
Returns
-------
y : array_like
- If `a` is a matrix, y is a 1-D ndarray, otherwise y is an array of
- the same subtype as `a`. The shape of the returned array is
- ``(a.size,)``. Matrices are special cased for backward
- compatibility.
+ y is an array of the same subtype as `a`, with shape ``(a.size,)``.
+ Note that matrices are special cased for backward compatibility, if `a`
+ is a matrix, then y is a 1-D ndarray.
See Also
--------
@@ -1812,7 +1818,7 @@ def clip(a, a_min, a_max, out=None):
return _wrapfunc(a, 'clip', a_min, a_max, out=out)
-def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Sum of array elements over a given axis.
@@ -1851,6 +1857,10 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
Returns
-------
@@ -1898,6 +1908,10 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
+ You can also start the sum with a value other than zero:
+
+ >>> np.sum([10], initial=5)
+ 15
"""
if isinstance(a, _gentype):
# 2018-02-25, 1.15.0
@@ -1912,7 +1926,8 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
return out
return res
- return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims)
+ return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
+ initial=initial)
def any(a, axis=None, out=None, keepdims=np._NoValue):
@@ -2209,7 +2224,7 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue):
return _methods._ptp(a, axis=axis, out=out, **kwargs)
-def amax(a, axis=None, out=None, keepdims=np._NoValue):
+def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
@@ -2241,6 +2256,13 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue):
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ The minimum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
+
Returns
-------
amax : ndarray or scalar
@@ -2293,11 +2315,26 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.nanmax(b)
4.0
+ You can use an initial value to compute the maximum of an empty slice, or
+ to initialize it to a different value:
+
+ >>> np.max([[-50], [10]], axis=-1, initial=0)
+ array([ 0, 10])
+
+ Notice that the initial value is used as one of the elements for which the
+ maximum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ >>> np.max([5], initial=6)
+ 6
+ >>> max([5], default=6)
+ 5
"""
- return _wrapreduction(a, np.maximum, 'max', axis, None, out, keepdims=keepdims)
+ return _wrapreduction(a, np.maximum, 'max', axis, None, out, keepdims=keepdims,
+ initial=initial)
-def amin(a, axis=None, out=None, keepdims=np._NoValue):
+def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
@@ -2329,6 +2366,12 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue):
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ The maximum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
Returns
-------
amin : ndarray or scalar
@@ -2381,8 +2424,22 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.nanmin(b)
0.0
+ >>> np.min([[-50], [10]], axis=-1, initial=0)
+ array([-50, 0])
+
+ Notice that the initial value is used as one of the elements for which the
+ minimum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ Notice that this isn't the same as Python's ``default`` argument.
+
+ >>> np.min([6], initial=5)
+ 5
+ >>> min([6], default=5)
+ 6
"""
- return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims)
+ return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims,
+ initial=initial)
def alen(a):
@@ -2418,7 +2475,7 @@ def alen(a):
return len(array(a, ndmin=1))
-def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the product of array elements over a given axis.
@@ -2458,6 +2515,10 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ The starting value for this product. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
Returns
-------
@@ -2515,8 +2576,13 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
>>> np.prod(x).dtype == int
True
+ You can also start the product with a value other than one:
+
+ >>> np.prod([1, 2], initial=5)
+ 10
"""
- return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, keepdims=keepdims)
+ return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, keepdims=keepdims,
+ initial=initial)
def cumprod(a, axis=None, dtype=None, out=None):
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index ec0fd1ee9..12fc7098c 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -170,14 +170,17 @@ extern "C" CONFUSE_EMACS
(k)*PyArray_STRIDES(obj)[2] + \
(l)*PyArray_STRIDES(obj)[3]))
+/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */
static NPY_INLINE void
PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
{
- if (arr != NULL) {
- if ((PyArray_FLAGS(arr) & NPY_ARRAY_WRITEBACKIFCOPY) ||
- (PyArray_FLAGS(arr) & NPY_ARRAY_UPDATEIFCOPY)) {
- PyArrayObject *base = (PyArrayObject *)PyArray_BASE(arr);
- PyArray_ENABLEFLAGS(base, NPY_ARRAY_WRITEABLE);
+ PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
+ if (fa && fa->base) {
+ if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) ||
+ (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) {
+ PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE);
+ Py_DECREF(fa->base);
+ fa->base = NULL;
PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);
PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
}
diff --git a/numpy/core/include/numpy/npy_interrupt.h b/numpy/core/include/numpy/npy_interrupt.h
index f71fd689e..40cb7ac5e 100644
--- a/numpy/core/include/numpy/npy_interrupt.h
+++ b/numpy/core/include/numpy/npy_interrupt.h
@@ -55,7 +55,7 @@ Ideas:
Simple Interface:
-In your C-extension: around a block of code you want to be interruptable
+In your C-extension: around a block of code you want to be interruptible
with a SIGINT
NPY_SIGINT_ON
diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h
index ba32bcdd3..582390cdc 100644
--- a/numpy/core/include/numpy/npy_math.h
+++ b/numpy/core/include/numpy/npy_math.h
@@ -524,8 +524,17 @@ npy_clongdouble npy_catanhl(npy_clongdouble z);
#define NPY_FPE_UNDERFLOW 4
#define NPY_FPE_INVALID 8
-int npy_get_floatstatus(void);
+int npy_clear_floatstatus_barrier(char*);
+int npy_get_floatstatus_barrier(char*);
+/*
+ * use caution with these - clang and gcc8.1 are known to reorder calls
+ * to this form of the function which can defeat the check. The _barrier
+ * form of the call is preferable, where the argument is
+ * (char*)&local_variable
+ */
int npy_clear_floatstatus(void);
+int npy_get_floatstatus(void);
+
void npy_set_floatstatus_divbyzero(void);
void npy_set_floatstatus_overflow(void);
void npy_set_floatstatus_underflow(void);
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index d154206c5..7ade3d224 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -123,11 +123,10 @@ def zeros_like(a, dtype=None, order='K', subok=True):
See Also
--------
- ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
zeros : Return a new array setting values to zero.
- ones : Return a new array setting values to one.
- empty : Return a new uninitialized array.
Examples
--------
@@ -177,7 +176,11 @@ def ones(shape, dtype=None, order='C'):
See Also
--------
- zeros, ones_like
+ ones_like : Return an array of ones with shape and type of input.
+ empty : Return a new uninitialized array.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
Examples
--------
@@ -234,11 +237,10 @@ def ones_like(a, dtype=None, order='K', subok=True):
See Also
--------
- zeros_like : Return an array of zeros with shape and type of input.
empty_like : Return an empty array with shape and type of input.
- zeros : Return a new array setting values to zero.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
- empty : Return a new uninitialized array.
Examples
--------
@@ -287,13 +289,10 @@ def full(shape, fill_value, dtype=None, order='C'):
See Also
--------
- zeros_like : Return an array of zeros with shape and type of input.
- ones_like : Return an array of ones with shape and type of input.
- empty_like : Return an empty array with shape and type of input.
- full_like : Fill an array with shape and type of input.
- zeros : Return a new array setting values to zero.
- ones : Return a new array setting values to one.
+ full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
Examples
--------
@@ -342,13 +341,10 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True):
See Also
--------
- zeros_like : Return an array of zeros with shape and type of input.
- ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
- zeros : Return a new array setting values to zero.
- ones : Return a new array setting values to one.
- empty : Return a new uninitialized array.
- full : Fill a new array.
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full : Return a new array of given shape filled with value.
Examples
--------
@@ -493,9 +489,9 @@ def asarray(a, dtype=None, order=None):
Contrary to `asanyarray`, ndarray subclasses are not passed through:
- >>> issubclass(np.matrix, np.ndarray)
+ >>> issubclass(np.recarray, np.ndarray)
True
- >>> a = np.matrix([[1, 2]])
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
@@ -549,7 +545,7 @@ def asanyarray(a, dtype=None, order=None):
Instances of `ndarray` subclasses are passed through as-is:
- >>> a = np.matrix([1, 2])
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asanyarray(a) is a
True
@@ -2039,7 +2035,7 @@ def binary_repr(num, width=None):
'11101'
"""
- def warn_if_insufficient(width, binwdith):
+ def warn_if_insufficient(width, binwidth):
if width is not None and width < binwidth:
warnings.warn(
"Insufficient bit width provided. This behavior "
@@ -2284,7 +2280,7 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
-
+
.. warning:: The default `atol` is not appropriate for comparing numbers
that are much smaller than one (see Notes).
@@ -2533,7 +2529,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
Notes
-----
- The floating-point exceptions are defined in the IEEE 754 standard [1]:
+ The floating-point exceptions are defined in the IEEE 754 standard [1]_:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 15f6e1522..7d8bab557 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -664,7 +664,7 @@ def configuration(parent_package='',top_path=None):
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
- # update the substition dictionary during npymath build
+ # update the substitution dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index f36d61f55..a8aba40bd 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -40,7 +40,8 @@ C_ABI_VERSION = 0x01000009
# 0x0000000a - 1.12.x
# 0x0000000b - 1.13.x
# 0x0000000c - 1.14.x
-C_API_VERSION = 0x0000000c
+# 0x0000000d - 1.15.x
+C_API_VERSION = 0x0000000d
class MismatchCAPIWarning(Warning):
pass
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index afc6db1aa..0299f1a1b 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -657,6 +657,24 @@ npy_create_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args)
return array;
}
+/* used to test WRITEBACKIFCOPY without resolution emits runtime warning */
+static PyObject*
+npy_abuse_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args)
+{
+ int flags;
+ PyObject* array;
+ if (!PyArray_Check(args)) {
+ PyErr_SetString(PyExc_TypeError, "test needs ndarray input");
+ return NULL;
+ }
+ flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY;
+ array = PyArray_FromArray((PyArrayObject*)args, NULL, flags);
+ if (array == NULL)
+ return NULL;
+ Py_DECREF(array); /* calls array_dealloc even on PyPy */
+ Py_RETURN_NONE;
+}
+
/* resolve WRITEBACKIFCOPY */
static PyObject*
npy_resolve(PyObject* NPY_UNUSED(self), PyObject* args)
@@ -669,6 +687,18 @@ npy_resolve(PyObject* NPY_UNUSED(self), PyObject* args)
Py_RETURN_NONE;
}
+/* resolve WRITEBACKIFCOPY */
+static PyObject*
+npy_discard(PyObject* NPY_UNUSED(self), PyObject* args)
+{
+ if (!PyArray_Check(args)) {
+ PyErr_SetString(PyExc_TypeError, "test needs ndarray input");
+ return NULL;
+ }
+ PyArray_DiscardWritebackIfCopy((PyArrayObject*)args);
+ Py_RETURN_NONE;
+}
+
#if !defined(NPY_PY3K)
static PyObject *
int_subclass(PyObject *dummy, PyObject *args)
@@ -1009,6 +1039,75 @@ test_nditer_too_large(PyObject *NPY_UNUSED(self), PyObject *args) {
return NULL;
}
+static PyObject *
+test_nditer_writeback(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
+{
+ /* like npyiter_init */
+ PyObject *op_in = NULL, *op_dtypes_in = NULL, *value = NULL;
+ PyArrayObject * opview;
+ int iop, nop = 0;
+ PyArrayObject *op[NPY_MAXARGS];
+ npy_uint32 flags = 0;
+ NPY_ORDER order = NPY_KEEPORDER;
+ NPY_CASTING casting = NPY_EQUIV_CASTING;
+ npy_uint32 op_flags[NPY_MAXARGS];
+ PyArray_Descr *op_request_dtypes[NPY_MAXARGS];
+ int retval;
+ unsigned char do_close;
+ int buffersize = 0;
+ NpyIter *iter = NULL;
+ static char *kwlist[] = {"value", "do_close", "input", "op_dtypes", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds,
+ "ObO|O:test_nditer_writeback", kwlist,
+ &value,
+ &do_close,
+ &op_in,
+ &op_dtypes_in)) {
+ return NULL;
+ }
+ /* op and op_flags */
+ if (! PyArray_Check(op_in)) {
+ return NULL;
+ }
+ nop = 1;
+ op[0] = (PyArrayObject*)op_in;
+ op_flags[0] = NPY_ITER_READWRITE|NPY_ITER_UPDATEIFCOPY;
+
+ /* Set the dtypes */
+ for (iop=0; iop<nop; iop++) {
+ PyObject *dtype = PySequence_GetItem(op_dtypes_in, iop);
+ PyArray_DescrConverter2(dtype, &op_request_dtypes[iop]);
+ }
+
+ iter = NpyIter_AdvancedNew(nop, op, flags, order, casting, op_flags,
+ op_request_dtypes,
+ -1, NULL, NULL,
+ buffersize);
+ if (iter == NULL) {
+ goto fail;
+ }
+
+ opview = NpyIter_GetIterView(iter, 0);
+ retval = PyArray_FillWithScalar(opview, value);
+ Py_DECREF(opview);
+ if (retval < 0) {
+ NpyIter_Deallocate(iter);
+ return NULL;
+ }
+ if (do_close != 0) {
+ NpyIter_Close(iter);
+ }
+ NpyIter_Deallocate(iter);
+ Py_RETURN_NONE;
+
+fail:
+ for (iop = 0; iop < nop; ++iop) {
+ Py_XDECREF(op[iop]);
+ Py_XDECREF(op_request_dtypes[iop]);
+ }
+ return NULL;
+}
static PyObject *
array_solve_diophantine(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
@@ -1764,9 +1863,15 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"npy_create_writebackifcopy",
npy_create_writebackifcopy,
METH_O, NULL},
+ {"npy_abuse_writebackifcopy",
+ npy_abuse_writebackifcopy,
+ METH_O, NULL},
{"npy_resolve",
npy_resolve,
METH_O, NULL},
+ {"npy_discard",
+ npy_discard,
+ METH_O, NULL},
#if !defined(NPY_PY3K)
{"test_int_subclass",
int_subclass,
@@ -1784,6 +1889,9 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"test_nditer_too_large",
test_nditer_too_large,
METH_VARARGS, NULL},
+ {"test_nditer_writeback",
+ (PyCFunction)test_nditer_writeback,
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"solve_diophantine",
(PyCFunction)array_solve_diophantine,
METH_VARARGS | METH_KEYWORDS, NULL},
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index 3d259ae05..17de99cb9 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -245,6 +245,10 @@ PyArray_AssignRawScalar(PyArrayObject *dst,
allocated_src_data = 1;
}
+ if (PyDataType_FLAGCHK(PyArray_DESCR(dst), NPY_NEEDS_INIT)) {
+ memset(tmp_src_data, 0, PyArray_DESCR(dst)->elsize);
+ }
+
if (PyArray_CastRawArrays(1, src_data, tmp_src_data, 0, 0,
src_dtype, PyArray_DESCR(dst), 0) != NPY_SUCCEED) {
src_data = tmp_src_data;
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 0aaf27b27..6f4d3d349 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -86,17 +86,8 @@ NPY_NO_EXPORT int
PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base)
{
int ret;
-#ifdef PYPY_VERSION
- #ifndef DEPRECATE_UPDATEIFCOPY
- #define DEPRECATE_UPDATEIFCOPY
- #endif
-#endif
-
-#ifdef DEPRECATE_UPDATEIFCOPY
- /* TODO: enable this once a solution for UPDATEIFCOPY
- * and nditer are resolved, also pending the fix for GH7054
- */
- /* 2017-Nov-10 1.14 */
+ /* 2017-Nov -10 1.14 (for PyPy only) */
+ /* 2018-April-21 1.15 (all Python implementations) */
if (DEPRECATE("PyArray_SetUpdateIfCopyBase is deprecated, use "
"PyArray_SetWritebackIfCopyBase instead, and be sure to call "
"PyArray_ResolveWritebackIfCopy before the array is deallocated, "
@@ -104,7 +95,6 @@ PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base)
"error, PyArray_DiscardWritebackIfCopy may be called instead to "
"throw away the scratch buffer.") < 0)
return -1;
-#endif
ret = PyArray_SetWritebackIfCopyBase(arr, base);
if (ret >=0) {
PyArray_ENABLEFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
@@ -453,6 +443,27 @@ PyArray_ResolveWritebackIfCopy(PyArrayObject * self)
/*********************** end C-API functions **********************/
+
+/* dealloc must not raise an error, best effort try to write
+ to stderr and clear the error
+*/
+
+static NPY_INLINE void
+WARN_IN_DEALLOC(PyObject* warning, const char * msg) {
+ if (PyErr_WarnEx(warning, msg, 1) < 0) {
+ PyObject * s;
+
+ s = PyUString_FromString("array_dealloc");
+ if (s) {
+ PyErr_WriteUnraisable(s);
+ Py_DECREF(s);
+ }
+ else {
+ PyErr_WriteUnraisable(Py_None);
+ }
+ }
+};
+
/* array object functions */
static void
@@ -469,17 +480,15 @@ array_dealloc(PyArrayObject *self)
int retval;
if (PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY)
{
- char * msg = "WRITEBACKIFCOPY requires a call to "
- "PyArray_ResolveWritebackIfCopy or "
- "PyArray_DiscardWritebackIfCopy before array_dealloc is "
- "called.";
- /* 2017-Nov-10 1.14 */
- if (DEPRECATE(msg) < 0) {
- /* dealloc cannot raise an error, best effort try to write
- to stderr and clear the error
- */
- PyErr_WriteUnraisable((PyObject *)&PyArray_Type);
- }
+ char const * msg = "WRITEBACKIFCOPY detected in array_dealloc. "
+ " Required call to PyArray_ResolveWritebackIfCopy or "
+ "PyArray_DiscardWritebackIfCopy is missing. This could also "
+ "be caused by using a nditer without a context manager";
+ Py_INCREF(self); /* hold on to self in next call since if
+ * refcount == 0 it will recurse back into
+ *array_dealloc
+ */
+ WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg);
retval = PyArray_ResolveWritebackIfCopy(self);
if (retval < 0)
{
@@ -489,10 +498,15 @@ array_dealloc(PyArrayObject *self)
}
if (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY) {
/* DEPRECATED, remove once the flag is removed */
+ char const * msg = "UPDATEIFCOPY detected in array_dealloc. "
+ " Required call to PyArray_ResolveWritebackIfCopy or "
+ "PyArray_DiscardWritebackIfCopy is missing";
Py_INCREF(self); /* hold on to self in next call since if
- * refcount == 0 it will recurse back into
+ * refcount == 0 it will recurse back into
*array_dealloc
*/
+ /* 2017-Nov-10 1.14 */
+ WARN_IN_DEALLOC(PyExc_DeprecationWarning, msg);
retval = PyArray_ResolveWritebackIfCopy(self);
if (retval < 0)
{
@@ -501,7 +515,7 @@ array_dealloc(PyArrayObject *self)
}
}
/*
- * In any case base is pointing to something that we need
+ * If fa->base is non-NULL, it is something
* to DECREF -- either a view or a buffer object
*/
Py_XDECREF(fa->base);
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 5e6804a5c..972147bb0 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -182,6 +182,15 @@ npy_strtoull(const char *str, char **endptr, int base)
*****************************************************************************
*/
+#define _ALIGN(type) offsetof(struct {char c; type v;}, v)
+/*
+ * Disable harmless compiler warning "4116: unnamed type definition in
+ * parentheses" which is caused by the _ALIGN macro.
+ */
+#if defined(_MSC_VER)
+#pragma warning(disable:4116)
+#endif
+
/**begin repeat
*
@@ -246,8 +255,10 @@ static int
}
return -1;
}
- if (ap == NULL || PyArray_ISBEHAVED(ap))
+ if (ap == NULL || PyArray_ISBEHAVED(ap)) {
+ assert(npy_is_aligned(ov, _ALIGN(@type@)));
*((@type@ *)ov)=temp;
+ }
else {
PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap),
ap);
@@ -655,9 +666,7 @@ VOID_getitem(void *input, void *vap)
{
PyArrayObject *ap = vap;
char *ip = input;
- PyArrayObject *u = NULL;
PyArray_Descr* descr;
- int itemsize;
descr = PyArray_DESCR(ap);
if (PyDataType_HASFIELDS(descr)) {
@@ -731,68 +740,7 @@ VOID_getitem(void *input, void *vap)
return (PyObject *)ret;
}
- /* 2017-11-26, 1.14 */
- if (DEPRECATE_FUTUREWARNING(
- "the `.item()` method of unstructured void types will return an "
- "immutable `bytes` object in the near future, the same as "
- "returned by `bytes(void_obj)`, instead of the mutable memoryview "
- "or integer array returned in numpy 1.13.") < 0) {
- return NULL;
- }
- /*
- * In the future all the code below will be replaced by
- *
- * For unstructured void types like V4, return a bytes object (copy).
- * return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize);
- */
-
- if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)
- || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) {
- PyErr_SetString(PyExc_ValueError,
- "tried to get void-array with object members as buffer.");
- return NULL;
- }
- itemsize = PyArray_DESCR(ap)->elsize;
-
-#if defined(NPY_PY3K)
- /*
- * Return a byte array; there are no plain buffer objects on Py3
- */
- {
- npy_intp dims[1], strides[1];
- dims[0] = itemsize;
- strides[0] = 1;
- descr = PyArray_DescrNewFromType(NPY_BYTE);
- u = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- descr, 1, dims, strides, ip,
- PyArray_ISWRITEABLE(ap) ? NPY_ARRAY_WRITEABLE : 0,
- NULL);
- Py_INCREF(ap);
- if (PyArray_SetBaseObject(u, (PyObject *)ap) < 0) {
- Py_DECREF(u);
- return NULL;
- }
- }
-#else
- /*
- * default is to return buffer object pointing to
- * current item a view of it
- */
- if (PyArray_ISWRITEABLE(ap)) {
- if (array_might_be_written(ap) < 0) {
- return NULL;
- }
- u = (PyArrayObject *)PyBuffer_FromReadWriteMemory(ip, itemsize);
- }
- else {
- u = (PyArrayObject *)PyBuffer_FromMemory(ip, itemsize);
- }
-#endif
-
- if (u == NULL) {
- return NULL;
- }
- return (PyObject *)u;
+ return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize);
}
@@ -809,7 +757,7 @@ NPY_NO_EXPORT int PyArray_CopyObject(PyArrayObject *, PyObject *);
*/
NPY_NO_EXPORT int
_setup_field(int i, PyArray_Descr *descr, PyArrayObject *arr,
- npy_intp *offset_p)
+ npy_intp *offset_p, char *dstdata)
{
PyObject *key;
PyObject *tup;
@@ -823,7 +771,8 @@ _setup_field(int i, PyArray_Descr *descr, PyArrayObject *arr,
}
((PyArrayObject_fields *)(arr))->descr = new;
- if ((new->alignment > 1) && ((offset % new->alignment) != 0)) {
+ if ((new->alignment > 1) &&
+ ((((uintptr_t)dstdata + offset) % new->alignment) != 0)) {
PyArray_CLEARFLAGS(arr, NPY_ARRAY_ALIGNED);
}
else {
@@ -851,7 +800,7 @@ _copy_and_return_void_setitem(PyArray_Descr *dstdescr, char *dstdata,
if (PyArray_EquivTypes(srcdescr, dstdescr)) {
for (i = 0; i < names_size; i++) {
/* neither line can ever fail, in principle */
- if (_setup_field(i, dstdescr, dummy, &offset)) {
+ if (_setup_field(i, dstdescr, dummy, &offset, dstdata)) {
return -1;
}
PyArray_DESCR(dummy)->f->copyswap(dstdata + offset,
@@ -921,7 +870,7 @@ VOID_setitem(PyObject *op, void *input, void *vap)
PyObject *item;
/* temporarily make ap have only this field */
- if (_setup_field(i, descr, ap, &offset) == -1) {
+ if (_setup_field(i, descr, ap, &offset, ip) == -1) {
failed = 1;
break;
}
@@ -943,7 +892,7 @@ VOID_setitem(PyObject *op, void *input, void *vap)
for (i = 0; i < names_size; i++) {
/* temporarily make ap have only this field */
- if (_setup_field(i, descr, ap, &offset) == -1) {
+ if (_setup_field(i, descr, ap, &offset, ip) == -1) {
failed = 1;
break;
}
@@ -4256,17 +4205,6 @@ small_correlate(const char * d_, npy_intp dstride,
*****************************************************************************
*/
-
-#define _ALIGN(type) offsetof(struct {char c; type v;}, v)
-/*
- * Disable harmless compiler warning "4116: unnamed type definition in
- * parentheses" which is caused by the _ALIGN macro.
- */
-#if defined(_MSC_VER)
-#pragma warning(disable:4116)
-#endif
-
-
/**begin repeat
*
* #from = VOID, STRING, UNICODE#
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 0eba077da..5d3cee647 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -1014,7 +1014,7 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
}
else {
fa->flags = (flags & ~NPY_ARRAY_WRITEBACKIFCOPY);
- fa->flags = (fa->flags & ~NPY_ARRAY_UPDATEIFCOPY);
+ fa->flags &= ~NPY_ARRAY_UPDATEIFCOPY;
}
fa->descr = descr;
fa->base = (PyObject *)NULL;
@@ -1276,42 +1276,31 @@ PyArray_New(PyTypeObject *subtype, int nd, npy_intp *dims, int type_num,
}
-NPY_NO_EXPORT int
-_array_from_buffer_3118(PyObject *obj, PyObject **out)
+/* Steals a reference to the memory view */
+NPY_NO_EXPORT PyObject *
+_array_from_buffer_3118(PyObject *memoryview)
{
/* PEP 3118 */
- PyObject *memoryview;
Py_buffer *view;
PyArray_Descr *descr = NULL;
- PyObject *r;
- int nd, flags, k;
+ PyObject *r = NULL;
+ int nd, flags;
Py_ssize_t d;
npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS];
- memoryview = PyMemoryView_FromObject(obj);
- if (memoryview == NULL) {
- PyErr_Clear();
- return -1;
- }
-
view = PyMemoryView_GET_BUFFER(memoryview);
if (view->format != NULL) {
descr = _descriptor_from_pep3118_format(view->format);
if (descr == NULL) {
- PyObject *msg;
- msg = PyBytes_FromFormat("Invalid PEP 3118 format string: '%s'",
- view->format);
- PyErr_WarnEx(PyExc_RuntimeWarning, PyBytes_AS_STRING(msg), 0);
- Py_DECREF(msg);
goto fail;
}
/* Sanity check */
if (descr->elsize != view->itemsize) {
- PyErr_WarnEx(PyExc_RuntimeWarning,
- "Item size computed from the PEP 3118 buffer format "
- "string does not match the actual item size.",
- 0);
+ PyErr_SetString(
+ PyExc_RuntimeError,
+ "Item size computed from the PEP 3118 buffer format "
+ "string does not match the actual item size.");
goto fail;
}
}
@@ -1322,13 +1311,13 @@ _array_from_buffer_3118(PyObject *obj, PyObject **out)
nd = view->ndim;
if (view->shape != NULL) {
- if (nd >= NPY_MAXDIMS || nd < 0) {
+ int k;
+ if (nd > NPY_MAXDIMS || nd < 0) {
+ PyErr_Format(PyExc_RuntimeError,
+ "PEP3118 dimensions do not satisfy 0 <= ndim <= NPY_MAXDIMS");
goto fail;
}
for (k = 0; k < nd; ++k) {
- if (k >= NPY_MAXDIMS) {
- goto fail;
- }
shape[k] = view->shape[k];
}
if (view->strides != NULL) {
@@ -1352,10 +1341,9 @@ _array_from_buffer_3118(PyObject *obj, PyObject **out)
strides[0] = view->itemsize;
}
else if (nd > 1) {
- PyErr_WarnEx(PyExc_RuntimeWarning,
- "ndim computed from the PEP 3118 buffer format "
- "is greater than 1, but shape is NULL.",
- 0);
+ PyErr_SetString(PyExc_RuntimeError,
+ "ndim computed from the PEP 3118 buffer format "
+ "is greater than 1, but shape is NULL.");
goto fail;
}
}
@@ -1364,21 +1352,21 @@ _array_from_buffer_3118(PyObject *obj, PyObject **out)
r = PyArray_NewFromDescr(&PyArray_Type, descr,
nd, shape, strides, view->buf,
flags, NULL);
- if (r == NULL ||
- PyArray_SetBaseObject((PyArrayObject *)r, memoryview) < 0) {
- Py_XDECREF(r);
- Py_DECREF(memoryview);
- return -1;
+ if (r == NULL) {
+ goto fail;
+ }
+ if (PyArray_SetBaseObject((PyArrayObject *)r, memoryview) < 0) {
+ goto fail;
}
PyArray_UpdateFlags((PyArrayObject *)r, NPY_ARRAY_UPDATE_ALL);
- *out = r;
- return 0;
+ return r;
fail:
+ Py_XDECREF(r);
Py_XDECREF(descr);
Py_DECREF(memoryview);
- return -1;
+ return NULL;
}
@@ -1490,14 +1478,25 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
}
/* If op supports the PEP 3118 buffer interface */
- if (!PyBytes_Check(op) && !PyUnicode_Check(op) &&
- _array_from_buffer_3118(op, (PyObject **)out_arr) == 0) {
- if (writeable
- && PyArray_FailUnlessWriteable(*out_arr, "PEP 3118 buffer") < 0) {
- Py_DECREF(*out_arr);
- return -1;
+ if (!PyBytes_Check(op) && !PyUnicode_Check(op)) {
+
+ PyObject *memoryview = PyMemoryView_FromObject(op);
+ if (memoryview == NULL) {
+ PyErr_Clear();
+ }
+ else {
+ PyObject *arr = _array_from_buffer_3118(memoryview);
+ if (arr == NULL) {
+ return -1;
+ }
+ if (writeable
+ && PyArray_FailUnlessWriteable((PyArrayObject *)arr, "PEP 3118 buffer") < 0) {
+ Py_DECREF(arr);
+ return -1;
+ }
+ *out_arr = (PyArrayObject *)arr;
+ return 0;
}
- return (*out_arr) == NULL ? -1 : 0;
}
/* If op supports the __array_struct__ or __array_interface__ interface */
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index a4a028ad4..af542aecc 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -2808,9 +2808,12 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
us_meta.base = NPY_FR_m;
}
else if (td % (24*60*60*1000000LL) != 0) {
- us_meta.base = NPY_FR_D;
+ us_meta.base = NPY_FR_h;
}
else if (td % (7*24*60*60*1000000LL) != 0) {
+ us_meta.base = NPY_FR_D;
+ }
+ else {
us_meta.base = NPY_FR_W;
}
us_meta.num = 1;
@@ -3679,11 +3682,11 @@ recursive_find_object_datetime64_type(PyObject *obj,
return 0;
}
- /* Python date object -> 'D' */
- else if (PyDate_Check(obj)) {
+ /* Python datetime object -> 'us' */
+ else if (PyDateTime_Check(obj)) {
PyArray_DatetimeMetaData tmp_meta;
- tmp_meta.base = NPY_FR_D;
+ tmp_meta.base = NPY_FR_us;
tmp_meta.num = 1;
/* Combine it with 'meta' */
@@ -3694,11 +3697,11 @@ recursive_find_object_datetime64_type(PyObject *obj,
return 0;
}
- /* Python datetime object -> 'us' */
- else if (PyDateTime_Check(obj)) {
+ /* Python date object -> 'D' */
+ else if (PyDate_Check(obj)) {
PyArray_DatetimeMetaData tmp_meta;
- tmp_meta.base = NPY_FR_us;
+ tmp_meta.base = NPY_FR_D;
tmp_meta.num = 1;
/* Combine it with 'meta' */
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 8d983ffc9..bb3cc9d4e 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -18,6 +18,7 @@
#include "templ_common.h" /* for npy_mul_with_overflow_intp */
#include "descriptor.h"
#include "alloc.h"
+#include "assert.h"
/*
* offset: A starting offset.
@@ -243,7 +244,7 @@ is_datetime_typestr(char *type, Py_ssize_t len)
}
static PyArray_Descr *
-_convert_from_tuple(PyObject *obj)
+_convert_from_tuple(PyObject *obj, int align)
{
PyArray_Descr *type, *res;
PyObject *val;
@@ -252,9 +253,16 @@ _convert_from_tuple(PyObject *obj)
if (PyTuple_GET_SIZE(obj) != 2) {
return NULL;
}
- if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj,0), &type)) {
- return NULL;
+ if (align) {
+ if (!PyArray_DescrAlignConverter(PyTuple_GET_ITEM(obj, 0), &type)) {
+ return NULL;
+ }
}
+ else {
+ if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj, 0), &type)) {
+ return NULL;
+ }
+ }
val = PyTuple_GET_ITEM(obj,1);
/* try to interpret next item as a type */
res = _use_inherit(type, val, &errflag);
@@ -1547,7 +1555,7 @@ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
}
else if (PyTuple_Check(obj)) {
/* or a tuple */
- *at = _convert_from_tuple(obj);
+ *at = _convert_from_tuple(obj, 0);
if (*at == NULL){
if (PyErr_Occurred()) {
return NPY_FAIL;
@@ -1931,33 +1939,26 @@ arraydescr_shape_get(PyArray_Descr *self)
if (!PyDataType_HASSUBARRAY(self)) {
return PyTuple_New(0);
}
- /*TODO
- * self->subarray->shape should always be a tuple,
- * so this check should be unnecessary
- */
- if (PyTuple_Check(self->subarray->shape)) {
- Py_INCREF(self->subarray->shape);
- return (PyObject *)(self->subarray->shape);
- }
- return Py_BuildValue("(O)", self->subarray->shape);
+ assert(PyTuple_Check(self->subarray->shape));
+ Py_INCREF(self->subarray->shape);
+ return self->subarray->shape;
}
static PyObject *
arraydescr_ndim_get(PyArray_Descr *self)
{
+ Py_ssize_t ndim;
+
if (!PyDataType_HASSUBARRAY(self)) {
return PyInt_FromLong(0);
}
- /*TODO
- * self->subarray->shape should always be a tuple,
- * so this check should be unnecessary
+
+ /*
+ * PyTuple_Size has built in check
+ * for tuple argument
*/
- if (PyTuple_Check(self->subarray->shape)) {
- Py_ssize_t ndim = PyTuple_Size(self->subarray->shape);
- return PyInt_FromLong(ndim);
- }
- /* consistent with arraydescr_shape_get */
- return PyInt_FromLong(1);
+ ndim = PyTuple_Size(self->subarray->shape);
+ return PyInt_FromLong(ndim);
}
@@ -2928,6 +2929,9 @@ PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at)
*at = _convert_from_commastring(tmp, 1);
Py_DECREF(tmp);
}
+ else if (PyTuple_Check(obj)) {
+ *at = _convert_from_tuple(obj, 1);
+ }
else if (PyList_Check(obj)) {
*at = _convert_from_array_descr(obj, 1);
}
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 9c27255aa..9f9aa6757 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -3400,6 +3400,7 @@ PyArray_GetDTypeTransferFunction(int aligned,
{
npy_intp src_itemsize, dst_itemsize;
int src_type_num, dst_type_num;
+ int is_builtin;
#if NPY_DT_DBG_TRACING
printf("Calculating dtype transfer from ");
@@ -3439,6 +3440,7 @@ PyArray_GetDTypeTransferFunction(int aligned,
dst_itemsize = dst_dtype->elsize;
src_type_num = src_dtype->type_num;
dst_type_num = dst_dtype->type_num;
+ is_builtin = src_type_num < NPY_NTYPES && dst_type_num < NPY_NTYPES;
/* Common special case - number -> number NBO cast */
if (PyTypeNum_ISNUMBER(src_type_num) &&
@@ -3462,13 +3464,14 @@ PyArray_GetDTypeTransferFunction(int aligned,
}
/*
- * If there are no references and the data types are equivalent,
+ * If there are no references and the data types are equivalent and builtin,
* return a simple copy
*/
if (PyArray_EquivTypes(src_dtype, dst_dtype) &&
!PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) &&
( !PyDataType_HASFIELDS(dst_dtype) ||
- is_dtype_struct_simple_unaligned_layout(dst_dtype)) ) {
+ is_dtype_struct_simple_unaligned_layout(dst_dtype)) &&
+ is_builtin) {
/*
* We can't pass through the aligned flag because it's not
* appropriate. Consider a size-8 string, it will say it's
@@ -3494,7 +3497,7 @@ PyArray_GetDTypeTransferFunction(int aligned,
!PyDataType_HASSUBARRAY(dst_dtype) &&
src_type_num != NPY_DATETIME && src_type_num != NPY_TIMEDELTA) {
/* A custom data type requires that we use its copy/swap */
- if (src_type_num >= NPY_NTYPES || dst_type_num >= NPY_NTYPES) {
+ if (!is_builtin) {
/*
* If the sizes and kinds are identical, but they're different
* custom types, then get a cast function
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index 5dbc30aa9..470a5fff9 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -591,7 +591,7 @@ finish_after_unrolled_loop:
accum += @from@(data0[@i@]) * @from@(data1[@i@]);
/**end repeat2**/
case 0:
- *(@type@ *)dataptr[2] += @to@(accum);
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum);
return;
}
@@ -749,7 +749,7 @@ finish_after_unrolled_loop:
accum += @from@(data1[@i@]);
/**end repeat2**/
case 0:
- *(@type@ *)dataptr[2] += @to@(value0 * accum);
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value0 * accum);
return;
}
@@ -848,7 +848,7 @@ finish_after_unrolled_loop:
accum += @from@(data0[@i@]);
/**end repeat2**/
case 0:
- *(@type@ *)dataptr[2] += @to@(accum * value1);
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum * value1);
return;
}
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index 397aaf209..fa68af19a 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -1373,7 +1373,7 @@ PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
/*
* Advanded indexing iteration of arrays when there is a single indexing
* array which has the same memory order as the value array and both
- * can be trivally iterated (single stride, aligned, no casting necessary).
+ * can be trivially iterated (single stride, aligned, no casting necessary).
*/
NPY_NO_EXPORT int
mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
@@ -1747,7 +1747,7 @@ mapiter_@name@(PyArrayMapIterObject *mit)
}
else {
/*
- * faster resetting if the subspace iteration is trival.
+ * faster resetting if the subspace iteration is trivial.
* reset_offsets are zero for positive strides,
* for negative strides this shifts the pointer to the last
* item.
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index eca4e98be..42dbc3cce 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -293,8 +293,7 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
if (commit_to_unpack) {
/* propagate errors */
if (tmp_obj == NULL) {
- multi_DECREF(result, i);
- return -1;
+ goto fail;
}
}
else {
@@ -313,6 +312,16 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
|| PySlice_Check(tmp_obj)
|| tmp_obj == Py_Ellipsis
|| tmp_obj == Py_None) {
+ if (DEPRECATE_FUTUREWARNING(
+ "Using a non-tuple sequence for multidimensional "
+ "indexing is deprecated; use `arr[tuple(seq)]` "
+ "instead of `arr[seq]`. In the future this will be "
+ "interpreted as an array index, `arr[np.array(seq)]`, "
+ "which will result either in an error or a different "
+ "result.") < 0) {
+ i++; /* since loop update doesn't run */
+ goto fail;
+ }
commit_to_unpack = 1;
}
}
@@ -328,6 +337,10 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
multi_DECREF(result, i);
return unpack_scalar(index, result, result_n);
}
+
+fail:
+ multi_DECREF(result, i);
+ return -1;
}
/**
@@ -3374,6 +3387,7 @@ PyArray_MapIterArray(PyArrayObject * a, PyObject * index)
static void
arraymapiter_dealloc(PyArrayMapIterObject *mit)
{
+ PyArray_ResolveWritebackIfCopy(mit->array);
Py_XDECREF(mit->array);
Py_XDECREF(mit->ait);
Py_XDECREF(mit->subspace);
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 0008cb04b..6d323dbd8 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -197,7 +197,7 @@ PyArray_CompareLists(npy_intp *l1, npy_intp *l2, int n)
}
/*
- * simulates a C-style 1-3 dimensional array which can be accesed using
+ * simulates a C-style 1-3 dimensional array which can be accessed using
* ptr[i] or ptr[i][j] or ptr[i][j][k] -- requires pointer allocation
* for 2-d and 3-d.
*
@@ -3605,7 +3605,7 @@ as_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
/*
- * Prints floating-point scalars usign the Dragon4 algorithm, scientific mode.
+ * Prints floating-point scalars using the Dragon4 algorithm, scientific mode.
* See docstring of `np.format_float_scientific` for description of arguments.
* The differences is that a value of -1 is valid for pad_left, exp_digits,
* precision, which is equivalent to `None`.
@@ -3661,7 +3661,7 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
}
/*
- * Prints floating-point scalars usign the Dragon4 algorithm, positional mode.
+ * Prints floating-point scalars using the Dragon4 algorithm, positional mode.
* See docstring of `np.format_float_positional` for description of arguments.
* The differences is that a value of -1 is valid for pad_left, pad_right,
* precision, which is equivalent to `None`.
@@ -4732,10 +4732,10 @@ static struct PyModuleDef moduledef = {
/* Initialization function for the module */
#if defined(NPY_PY3K)
-#define RETVAL m
+#define RETVAL(x) x
PyMODINIT_FUNC PyInit_multiarray(void) {
#else
-#define RETVAL
+#define RETVAL(x)
PyMODINIT_FUNC initmultiarray(void) {
#endif
PyObject *m, *d, *s;
@@ -4763,6 +4763,10 @@ PyMODINIT_FUNC initmultiarray(void) {
/* Initialize access to the PyDateTime API */
numpy_pydatetime_import();
+ if (PyErr_Occurred()) {
+ goto err;
+ }
+
/* Add some symbolic constants to the module */
d = PyModule_GetDict(m);
if (!d) {
@@ -4776,7 +4780,7 @@ PyMODINIT_FUNC initmultiarray(void) {
*/
PyArray_Type.tp_hash = PyObject_HashNotImplemented;
if (PyType_Ready(&PyArray_Type) < 0) {
- return RETVAL;
+ goto err;
}
if (setup_scalartypes(d) < 0) {
goto err;
@@ -4786,32 +4790,32 @@ PyMODINIT_FUNC initmultiarray(void) {
PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter;
PyArrayMultiIter_Type.tp_free = PyArray_free;
if (PyType_Ready(&PyArrayIter_Type) < 0) {
- return RETVAL;
+ goto err;
}
if (PyType_Ready(&PyArrayMapIter_Type) < 0) {
- return RETVAL;
+ goto err;
}
if (PyType_Ready(&PyArrayMultiIter_Type) < 0) {
- return RETVAL;
+ goto err;
}
PyArrayNeighborhoodIter_Type.tp_new = PyType_GenericNew;
if (PyType_Ready(&PyArrayNeighborhoodIter_Type) < 0) {
- return RETVAL;
+ goto err;
}
if (PyType_Ready(&NpyIter_Type) < 0) {
- return RETVAL;
+ goto err;
}
PyArrayDescr_Type.tp_hash = PyArray_DescrHash;
if (PyType_Ready(&PyArrayDescr_Type) < 0) {
- return RETVAL;
+ goto err;
}
if (PyType_Ready(&PyArrayFlags_Type) < 0) {
- return RETVAL;
+ goto err;
}
NpyBusDayCalendar_Type.tp_new = PyType_GenericNew;
if (PyType_Ready(&NpyBusDayCalendar_Type) < 0) {
- return RETVAL;
+ goto err;
}
c_api = NpyCapsule_FromVoidPtr((void *)PyArray_API, NULL);
@@ -4897,12 +4901,13 @@ PyMODINIT_FUNC initmultiarray(void) {
if (set_typeinfo(d) != 0) {
goto err;
}
- return RETVAL;
+
+ return RETVAL(m);
err:
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"cannot load multiarray module.");
}
- return RETVAL;
+ return RETVAL(NULL);
}
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index f2bc23715..152955940 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -1391,6 +1391,47 @@ NpyIter_GetInnerLoopSizePtr(NpyIter *iter)
}
/*NUMPY_API
+ * Resolves all writebackifcopy scratch buffers, not safe to use iterator
+ * operands after this call, in this iterator as well as any copies.
+ * Returns 0 on success, -1 on failure
+ */
+NPY_NO_EXPORT int
+NpyIter_Close(NpyIter *iter)
+{
+ int ret=0, iop, nop;
+ PyArrayObject ** operands;
+ npyiter_opitflags *op_itflags;
+ if (iter == NULL) {
+ return 0;
+ }
+ nop = NIT_NOP(iter);
+ operands = NIT_OPERANDS(iter);
+ op_itflags = NIT_OPITFLAGS(iter);
+ /* If NPY_OP_ITFLAG_HAS_WRITEBACK flag set on operand, resolve it.
+ * If the resolution fails (should never happen), continue from the
+ * next operand and discard the writeback scratch buffers, and return
+ * failure status
+ */
+ for (iop=0; iop<nop; iop++) {
+ if (op_itflags[iop] & NPY_OP_ITFLAG_HAS_WRITEBACK) {
+ op_itflags[iop] &= ~NPY_OP_ITFLAG_HAS_WRITEBACK;
+ if (PyArray_ResolveWritebackIfCopy(operands[iop]) < 0) {
+ ret = -1;
+ iop++;
+ break;
+ }
+ }
+ }
+ for (; iop<nop; iop++) {
+ if (op_itflags[iop] & NPY_OP_ITFLAG_HAS_WRITEBACK) {
+ op_itflags[iop] &= ~NPY_OP_ITFLAG_HAS_WRITEBACK;
+ PyArray_DiscardWritebackIfCopy(operands[iop]);
+ }
+ }
+ return ret;
+}
+
+/*NUMPY_API
* For debugging
*/
NPY_NO_EXPORT void
@@ -2799,5 +2840,4 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
}
return count * (*reduce_innersize);
}
-
#undef NPY_ITERATOR_IMPLEMENTATION_CODE
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index b74aca01c..c512cf208 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -403,6 +403,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
*/
if (!npyiter_allocate_arrays(iter, flags, op_dtype, subtype, op_flags,
op_itflags, op_axes)) {
+ NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return NULL;
}
@@ -464,12 +465,14 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
/* If buffering is set without delayed allocation */
if (itflags & NPY_ITFLAG_BUFFER) {
if (!npyiter_allocate_transfer_functions(iter)) {
+ NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return NULL;
}
if (!(itflags & NPY_ITFLAG_DELAYBUF)) {
/* Allocate the buffers */
if (!npyiter_allocate_buffers(iter, NULL)) {
+ NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return NULL;
}
@@ -2716,7 +2719,7 @@ npyiter_allocate_arrays(NpyIter *iter,
*
* If any write operand has memory overlap with any read operand,
* eliminate all overlap by making temporary copies, by enabling
- * NPY_OP_ITFLAG_FORCECOPY for the write operand to force UPDATEIFCOPY.
+ * NPY_OP_ITFLAG_FORCECOPY for the write operand to force WRITEBACKIFCOPY.
*
* Operands with NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE enabled are not
* considered overlapping if the arrays are exactly the same. In this
@@ -2920,13 +2923,15 @@ npyiter_allocate_arrays(NpyIter *iter,
return 0;
}
}
- /* If the data will be written to, set UPDATEIFCOPY */
+ /* If the data will be written to, set WRITEBACKIFCOPY
+ and require a context manager */
if (op_itflags[iop] & NPY_OP_ITFLAG_WRITE) {
Py_INCREF(op[iop]);
- if (PyArray_SetUpdateIfCopyBase(temp, op[iop]) < 0) {
+ if (PyArray_SetWritebackIfCopyBase(temp, op[iop]) < 0) {
Py_DECREF(temp);
return 0;
}
+ op_itflags[iop] |= NPY_OP_ITFLAG_HAS_WRITEBACK;
}
Py_DECREF(op[iop]);
diff --git a/numpy/core/src/multiarray/nditer_impl.h b/numpy/core/src/multiarray/nditer_impl.h
index 7788d327b..5fb146026 100644
--- a/numpy/core/src/multiarray/nditer_impl.h
+++ b/numpy/core/src/multiarray/nditer_impl.h
@@ -124,6 +124,8 @@
#define NPY_OP_ITFLAG_USINGBUFFER 0x0100
/* The operand must be copied (with UPDATEIFCOPY if also ITFLAG_WRITE) */
#define NPY_OP_ITFLAG_FORCECOPY 0x0200
+/* The operand has temporary data, write it back at dealloc */
+#define NPY_OP_ITFLAG_HAS_WRITEBACK 0x0400
/*
* The data layout of the iterator is fully specified by
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 0d318178f..4505e645b 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -26,6 +26,8 @@ struct NewNpyArrayIterObject_tag {
NpyIter *iter;
/* Flag indicating iteration started/stopped */
char started, finished;
+ /* iter operands cannot be referenced if iter is closed */
+ npy_bool is_closed;
/* Child to update for nested iteration */
NewNpyArrayIterObject *nested_child;
/* Cached values from the iterator */
@@ -85,6 +87,7 @@ npyiter_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
if (self != NULL) {
self->iter = NULL;
self->nested_child = NULL;
+ self->is_closed = 0;
}
return (PyObject *)self;
@@ -704,7 +707,7 @@ npyiter_convert_ops(PyObject *op_in, PyObject *op_flags_in,
PyErr_SetString(PyExc_TypeError,
"Iterator operand is flagged as writeable, "
"but is an object which cannot be written "
- "back to via UPDATEIFCOPY");
+ "back to via WRITEBACKIFCOPY");
}
for (iop = 0; iop < nop; ++iop) {
Py_DECREF(op[iop]);
@@ -1414,6 +1417,12 @@ static PyObject *npyiter_value_get(NewNpyArrayIterObject *self)
ret = npyiter_seq_item(self, 0);
}
else {
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError,
+ "Iterator is closed");
+ return NULL;
+ }
+
ret = PyTuple_New(nop);
if (ret == NULL) {
return NULL;
@@ -1443,6 +1452,11 @@ static PyObject *npyiter_operands_get(NewNpyArrayIterObject *self)
"Iterator is invalid");
return NULL;
}
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError,
+ "Iterator is closed");
+ return NULL;
+ }
nop = NpyIter_GetNOp(self->iter);
operands = self->operands;
@@ -1473,6 +1487,12 @@ static PyObject *npyiter_itviews_get(NewNpyArrayIterObject *self)
return NULL;
}
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError,
+ "Iterator is closed");
+ return NULL;
+ }
+
nop = NpyIter_GetNOp(self->iter);
ret = PyTuple_New(nop);
@@ -1495,7 +1515,8 @@ static PyObject *npyiter_itviews_get(NewNpyArrayIterObject *self)
static PyObject *
npyiter_next(NewNpyArrayIterObject *self)
{
- if (self->iter == NULL || self->iternext == NULL || self->finished) {
+ if (self->iter == NULL || self->iternext == NULL ||
+ self->finished || self->is_closed) {
return NULL;
}
@@ -1890,6 +1911,12 @@ static PyObject *npyiter_dtypes_get(NewNpyArrayIterObject *self)
return NULL;
}
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError,
+ "Iterator is closed");
+ return NULL;
+ }
+
nop = NpyIter_GetNOp(self->iter);
ret = PyTuple_New(nop);
@@ -1986,6 +2013,12 @@ npyiter_seq_item(NewNpyArrayIterObject *self, Py_ssize_t i)
return NULL;
}
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError,
+ "Iterator is closed");
+ return NULL;
+ }
+
nop = NpyIter_GetNOp(self->iter);
/* Negative indexing */
@@ -2070,6 +2103,12 @@ npyiter_seq_slice(NewNpyArrayIterObject *self,
return NULL;
}
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError,
+ "Iterator is closed");
+ return NULL;
+ }
+
nop = NpyIter_GetNOp(self->iter);
if (ilow < 0) {
ilow = 0;
@@ -2130,6 +2169,12 @@ npyiter_seq_ass_item(NewNpyArrayIterObject *self, Py_ssize_t i, PyObject *v)
return -1;
}
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError,
+ "Iterator is closed");
+ return -1;
+ }
+
nop = NpyIter_GetNOp(self->iter);
/* Negative indexing */
@@ -2204,6 +2249,12 @@ npyiter_seq_ass_slice(NewNpyArrayIterObject *self, Py_ssize_t ilow,
return -1;
}
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError,
+ "Iterator is closed");
+ return -1;
+ }
+
nop = NpyIter_GetNOp(self->iter);
if (ilow < 0) {
ilow = 0;
@@ -2255,6 +2306,12 @@ npyiter_subscript(NewNpyArrayIterObject *self, PyObject *op)
return NULL;
}
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError,
+ "Iterator is closed");
+ return NULL;
+ }
+
if (PyInt_Check(op) || PyLong_Check(op) ||
(PyIndex_Check(op) && !PySequence_Check(op))) {
npy_intp i = PyArray_PyIntAsIntp(op);
@@ -2304,6 +2361,12 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
return -1;
}
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError,
+ "Iterator is closed");
+ return -1;
+ }
+
if (PyInt_Check(op) || PyLong_Check(op) ||
(PyIndex_Check(op) && !PySequence_Check(op))) {
npy_intp i = PyArray_PyIntAsIntp(op);
@@ -2331,6 +2394,44 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
return -1;
}
+static PyObject *
+npyiter_enter(NewNpyArrayIterObject *self)
+{
+ if (self->iter == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "operation on non-initialized iterator");
+ return NULL;
+ }
+ if (self->is_closed) {
+ PyErr_SetString(PyExc_ValueError, "cannot reuse closed iterator");
+ return NULL;
+ }
+ Py_INCREF(self);
+ return (PyObject *)self;
+}
+
+static PyObject *
+npyiter_close(NewNpyArrayIterObject *self)
+{
+ NpyIter *iter = self->iter;
+ int ret;
+ if (self->iter == NULL) {
+ Py_RETURN_NONE;
+ }
+ ret = NpyIter_Close(iter);
+ self->is_closed = 1;
+ if (ret < 0) {
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+npyiter_exit(NewNpyArrayIterObject *self, PyObject *args)
+{
+ /* even if called via exception handling, writeback any data */
+ return npyiter_close(self);
+}
+
static PyMethodDef npyiter_methods[] = {
{"reset",
(PyCFunction)npyiter_reset,
@@ -2356,6 +2457,12 @@ static PyMethodDef npyiter_methods[] = {
{"debug_print",
(PyCFunction)npyiter_debug_print,
METH_NOARGS, NULL},
+ {"__enter__", (PyCFunction)npyiter_enter,
+ METH_NOARGS, NULL},
+ {"__exit__", (PyCFunction)npyiter_exit,
+ METH_VARARGS, NULL},
+ {"close", (PyCFunction)npyiter_close,
+ METH_VARARGS, NULL},
{NULL, NULL, 0, NULL},
};
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 915d743c8..14389a925 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -476,7 +476,9 @@ fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace,
double exponent;
NPY_SCALARKIND kind; /* NPY_NOSCALAR is not scalar */
- if (PyArray_Check(a1) && ((kind=is_scalar_with_conversion(o2, &exponent))>0)) {
+ if (PyArray_Check(a1) &&
+ !PyArray_ISOBJECT(a1) &&
+ ((kind=is_scalar_with_conversion(o2, &exponent))>0)) {
PyObject *fastop = NULL;
if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) {
if (exponent == 1.0) {
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 9df635dee..6dc7e5a3e 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -1607,7 +1607,8 @@ static PyGetSetDef gentype_getsets[] = {
/* 0-dim array from scalar object */
-static char doc_getarray[] = "sc.__array__(|type) return 0-dim array";
+static char doc_getarray[] = "sc.__array__(dtype) return 0-dim array from "
+ "scalar with specified dtype";
static PyObject *
gentype_getarray(PyObject *scalar, PyObject *args)
@@ -4200,7 +4201,7 @@ doubletype_print(PyObject *o, FILE *fp, int flags)
return -1;
}
- ret = PyObject_Print(to_print, fp, flags);
+ ret = PyObject_Print(to_print, fp, Py_PRINT_RAW);
Py_DECREF(to_print);
return ret;
}
diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src
index bca690b4d..5405c8fe3 100644
--- a/numpy/core/src/npymath/ieee754.c.src
+++ b/numpy/core/src/npymath/ieee754.c.src
@@ -6,6 +6,7 @@
*/
#include "npy_math_common.h"
#include "npy_math_private.h"
+#include "numpy/utils.h"
#ifndef HAVE_COPYSIGN
double npy_copysign(double x, double y)
@@ -557,6 +558,15 @@ npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y)
}
#endif
+int npy_clear_floatstatus() {
+ char x=0;
+ return npy_clear_floatstatus_barrier(&x);
+}
+int npy_get_floatstatus() {
+ char x=0;
+ return npy_get_floatstatus_barrier(&x);
+}
+
/*
* Functions to set the floating point status word.
* keep in sync with NO_FLOATING_POINT_SUPPORT in ufuncobject.h
@@ -574,18 +584,24 @@ npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y)
defined(__NetBSD__)
#include <ieeefp.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char * param))
{
int fpstatus = fpgetsticky();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FP_X_DZ & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FP_X_OFL & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((FP_X_UFL & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((FP_X_INV & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
fpsetsticky(0);
return fpstatus;
@@ -617,10 +633,16 @@ void npy_set_floatstatus_invalid(void)
(defined(__FreeBSD__) && (__FreeBSD_version >= 502114))
# include <fenv.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char* param)
{
int fpstatus = fetestexcept(FE_DIVBYZERO | FE_OVERFLOW |
FE_UNDERFLOW | FE_INVALID);
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FE_DIVBYZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FE_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
@@ -628,10 +650,10 @@ int npy_get_floatstatus(void)
((FE_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
/* testing float status is 50-100 times faster than clearing on x86 */
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
if (fpstatus != 0) {
feclearexcept(FE_DIVBYZERO | FE_OVERFLOW |
FE_UNDERFLOW | FE_INVALID);
@@ -665,18 +687,24 @@ void npy_set_floatstatus_invalid(void)
#include <float.h>
#include <fpxcp.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
int fpstatus = fp_read_flag();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FP_DIV_BY_ZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FP_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((FP_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((FP_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
fp_swap_flag(0);
return fpstatus;
@@ -710,8 +738,11 @@ void npy_set_floatstatus_invalid(void)
#include <float.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
#if defined(_WIN64)
int fpstatus = _statusfp();
#else
@@ -720,15 +751,18 @@ int npy_get_floatstatus(void)
_statusfp2(&fpstatus, &fpstatus2);
fpstatus |= fpstatus2;
#endif
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((SW_ZERODIVIDE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((SW_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((SW_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((SW_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
_clearfp();
return fpstatus;
@@ -739,18 +773,24 @@ int npy_clear_floatstatus(void)
#include <machine/fpu.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
unsigned long fpstatus = ieee_get_fp_control();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((IEEE_STATUS_DZE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((IEEE_STATUS_OVF & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((IEEE_STATUS_UNF & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((IEEE_STATUS_INV & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
- long fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
/* clear status bits as well as disable exception mode if on */
ieee_set_fp_control(0);
@@ -759,13 +799,14 @@ int npy_clear_floatstatus(void)
#else
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char NPY_UNUSED(*param))
{
return 0;
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
+ int fpstatus = npy_get_floatstatus_barrier(param);
return 0;
}
diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c
index e44036358..188054e22 100644
--- a/numpy/core/src/umath/extobj.c
+++ b/numpy/core/src/umath/extobj.c
@@ -284,7 +284,7 @@ _check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) {
if (!errmask) {
return 0;
}
- fperr = PyUFunc_getfperr();
+ fperr = npy_get_floatstatus_barrier((char*)extobj);
if (!fperr) {
return 0;
}
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index d196a8d4e..1ca298b30 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1819,7 +1819,7 @@ NPY_NO_EXPORT void
*((npy_bool *)op1) = @func@(in1) != 0;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -1866,6 +1866,9 @@ NPY_NO_EXPORT void
const @type@ in2 = *(@type@ *)ip2;
io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2;
}
+ if (npy_isnan(io1)) {
+ npy_set_floatstatus_invalid();
+ }
*((@type@ *)iop1) = io1;
}
}
@@ -1901,7 +1904,7 @@ NPY_NO_EXPORT void
*((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -1991,7 +1994,7 @@ NPY_NO_EXPORT void
*((@type@ *)op1) = tmp + 0;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
NPY_NO_EXPORT void
@@ -2177,7 +2180,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
const npy_half in1 = *(npy_half *)ip1;
*((npy_bool *)op1) = @func@(in1) != 0;
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat**/
@@ -2239,7 +2242,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
const npy_half in2 = *(npy_half *)ip2;
*((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in2)) ? in1 : in2;
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat**/
@@ -2681,7 +2684,7 @@ NPY_NO_EXPORT void
const @ftype@ in1i = ((@ftype@ *)ip1)[1];
*((npy_bool *)op1) = @func@(in1r) @OP@ @func@(in1i);
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -2790,7 +2793,7 @@ NPY_NO_EXPORT void
((@ftype@ *)op1)[1] = in2i;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -2840,10 +2843,14 @@ NPY_NO_EXPORT void
* #OP = EQ, NE, GT, GE, LT, LE#
* #identity = NPY_TRUE, NPY_FALSE, -1*4#
*/
+
+/**begin repeat1
+ * #suffix = , _OO_O#
+ * #as_bool = 1, 0#
+ */
NPY_NO_EXPORT void
-OBJECT_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) {
+OBJECT@suffix@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) {
BINARY_LOOP {
- int ret;
PyObject *ret_obj;
PyObject *in1 = *(PyObject **)ip1;
PyObject *in2 = *(PyObject **)ip2;
@@ -2860,14 +2867,21 @@ OBJECT_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUS
if (ret_obj == NULL) {
return;
}
- ret = PyObject_IsTrue(ret_obj);
- Py_DECREF(ret_obj);
- if (ret == -1) {
- return;
+#if @as_bool@
+ {
+ int ret = PyObject_IsTrue(ret_obj);
+ Py_DECREF(ret_obj);
+ if (ret == -1) {
+ return;
+ }
+ *((npy_bool *)op1) = (npy_bool)ret;
}
- *((npy_bool *)op1) = (npy_bool)ret;
+#else
+ *((PyObject **)op1) = ret_obj;
+#endif
}
}
+/**end repeat1**/
/**end repeat**/
NPY_NO_EXPORT void
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index a01ef1529..5c2b2c22c 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -496,8 +496,12 @@ TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
* #kind = equal, not_equal, greater, greater_equal, less, less_equal#
* #OP = EQ, NE, GT, GE, LT, LE#
*/
+/**begin repeat1
+ * #suffix = , _OO_O#
+ */
NPY_NO_EXPORT void
-OBJECT_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+OBJECT@suffix@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+/**end repeat1**/
/**end repeat**/
NPY_NO_EXPORT void
diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c
index 0aef093b0..123d9af87 100644
--- a/numpy/core/src/umath/override.c
+++ b/numpy/core/src/umath/override.c
@@ -123,11 +123,16 @@ normalize_reduce_args(PyUFuncObject *ufunc, PyObject *args,
npy_intp nargs = PyTuple_GET_SIZE(args);
npy_intp i;
PyObject *obj;
- static char *kwlist[] = {"array", "axis", "dtype", "out", "keepdims"};
+ static PyObject *NoValue = NULL;
+ static char *kwlist[] = {"array", "axis", "dtype", "out", "keepdims",
+ "initial"};
+
+ npy_cache_import("numpy", "_NoValue", &NoValue);
+ if (NoValue == NULL) return -1;
- if (nargs < 1 || nargs > 5) {
+ if (nargs < 1 || nargs > 6) {
PyErr_Format(PyExc_TypeError,
- "ufunc.reduce() takes from 1 to 5 positional "
+ "ufunc.reduce() takes from 1 to 6 positional "
"arguments but %"NPY_INTP_FMT" were given", nargs);
return -1;
}
@@ -151,6 +156,10 @@ normalize_reduce_args(PyUFuncObject *ufunc, PyObject *args,
}
obj = PyTuple_GetSlice(args, 3, 4);
}
+ /* Remove initial=np._NoValue */
+ if (i == 5 && obj == NoValue) {
+ continue;
+ }
PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
if (i == 3) {
Py_DECREF(obj);
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 681d3fefa..5c3a84e21 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -537,7 +537,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
}
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&iter);
if (NpyIter_GetIterSize(iter) != 0) {
NpyIter_IterNextFunc *iternext;
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index 6e1fb1ee8..3e29c4b4e 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -848,7 +848,7 @@ static PyObject *
}
#if @fperr@
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
#endif
/*
@@ -863,7 +863,7 @@ static PyObject *
#if @fperr@
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
@@ -993,7 +993,7 @@ static PyObject *
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
@@ -1008,7 +1008,7 @@ static PyObject *
}
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
@@ -1072,7 +1072,7 @@ static PyObject *
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
@@ -1136,7 +1136,7 @@ static PyObject *
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
@@ -1150,7 +1150,7 @@ static PyObject *
}
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 2241414ac..5c0568c12 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -1031,7 +1031,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
i += 2 * stride;
/* minps/minpd will set invalid flag if nan is encountered */
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)&c1);
LOOP_BLOCKED(@type@, 32) {
@vtype@ v1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
@vtype@ v2 = @vpre@_load_@vsuf@((@type@*)&ip[i + stride]);
@@ -1040,7 +1040,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
}
c1 = @vpre@_@VOP@_@vsuf@(c1, c2);
- if (npy_get_floatstatus() & NPY_FPE_INVALID) {
+ if (npy_get_floatstatus_barrier((char*)&c1) & NPY_FPE_INVALID) {
*op = @nan@;
}
else {
@@ -1051,6 +1051,9 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
LOOP_BLOCKED_END {
*op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
}
+ if (npy_isnan(*op)) {
+ npy_set_floatstatus_invalid();
+ }
}
/**end repeat1**/
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index bf5a4ead3..af415362b 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -65,6 +65,28 @@
#endif
/**********************************************/
+typedef struct {
+ PyObject *in; /* The input arguments to the ufunc, a tuple */
+ PyObject *out; /* The output arguments, a tuple. If no non-None outputs are
+ provided, then this is NULL. */
+} ufunc_full_args;
+
+/* Get the arg tuple to pass in the context argument to __array_wrap__ and
+ * __array_prepare__.
+ *
+ * Output arguments are only passed if at least one is non-None.
+ */
+static PyObject *
+_get_wrap_prepare_args(ufunc_full_args full_args) {
+ if (full_args.out == NULL) {
+ Py_INCREF(full_args.in);
+ return full_args.in;
+ }
+ else {
+ return PySequence_Concat(full_args.in, full_args.out);
+ }
+}
+
/* ---------------------------------------------------------------- */
static int
@@ -78,7 +100,8 @@ PyUFunc_getfperr(void)
* non-clearing get was only added in 1.9 so this function always cleared
* keep it so just in case third party code relied on the clearing
*/
- return npy_clear_floatstatus();
+ char param = 0;
+ return npy_clear_floatstatus_barrier(&param);
}
#define HANDLEIT(NAME, str) {if (retstatus & NPY_FPE_##NAME) { \
@@ -111,7 +134,8 @@ NPY_NO_EXPORT int
PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first)
{
/* clearing is done for backward compatibility */
- int retstatus = npy_clear_floatstatus();
+ int retstatus;
+ retstatus = npy_clear_floatstatus_barrier((char*)&retstatus);
return PyUFunc_handlefperr(errmask, errobj, retstatus, first);
}
@@ -122,7 +146,8 @@ PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first)
NPY_NO_EXPORT void
PyUFunc_clearfperr()
{
- npy_clear_floatstatus();
+ char param = 0;
+ npy_clear_floatstatus_barrier(&param);
}
/*
@@ -132,7 +157,7 @@ PyUFunc_clearfperr()
* defines the method.
*/
static PyObject*
-_find_array_method(PyObject *args, int nin, PyObject *method_name)
+_find_array_method(PyObject *args, PyObject *method_name)
{
int i, n_methods;
PyObject *obj;
@@ -140,7 +165,7 @@ _find_array_method(PyObject *args, int nin, PyObject *method_name)
PyObject *method = NULL;
n_methods = 0;
- for (i = 0; i < nin; i++) {
+ for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
obj = PyTuple_GET_ITEM(args, i);
if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) {
continue;
@@ -238,17 +263,17 @@ _get_output_array_method(PyObject *obj, PyObject *method,
* should just have PyArray_Return called.
*/
static void
-_find_array_prepare(PyObject *args, PyObject *kwds,
+_find_array_prepare(ufunc_full_args args,
PyObject **output_prep, int nin, int nout)
{
- Py_ssize_t nargs;
int i;
+ PyObject *prep;
/*
* Determine the prepping function given by the input arrays
* (could be NULL).
*/
- PyObject *prep = _find_array_method(args, nin, npy_um_str_array_prepare);
+ prep = _find_array_method(args.in, npy_um_str_array_prepare);
/*
* For all the output arrays decide what to do.
*
@@ -261,29 +286,16 @@ _find_array_prepare(PyObject *args, PyObject *kwds,
* exact ndarray so that no PyArray_Return is
* done in that case.
*/
- nargs = PyTuple_GET_SIZE(args);
- for (i = 0; i < nout; i++) {
- int j = nin + i;
- PyObject *obj = NULL;
- if (j < nargs) {
- obj = PyTuple_GET_ITEM(args, j);
- /* Output argument one may also be in a keyword argument */
- if (i == 0 && obj == Py_None && kwds != NULL) {
- obj = PyDict_GetItem(kwds, npy_um_str_out);
- }
- }
- /* Output argument one may also be in a keyword argument */
- else if (i == 0 && kwds != NULL) {
- obj = PyDict_GetItem(kwds, npy_um_str_out);
- }
-
- if (obj == NULL) {
+ if (args.out == NULL) {
+ for (i = 0; i < nout; i++) {
Py_XINCREF(prep);
output_prep[i] = prep;
}
- else {
+ }
+ else {
+ for (i = 0; i < nout; i++) {
output_prep[i] = _get_output_array_method(
- obj, npy_um_str_array_prepare, prep);
+ PyTuple_GET_ITEM(args.out, i), npy_um_str_array_prepare, prep);
}
}
Py_XDECREF(prep);
@@ -556,7 +568,8 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
PyObject **out_typetup,
int *out_subok,
PyArrayObject **out_wheremask,
- PyObject **out_axes)
+ PyObject **out_axes,
+ int *out_keepdims)
{
int i, nargs;
int nin = ufunc->nin;
@@ -811,9 +824,10 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
switch (str[0]) {
case 'a':
- /* possible axis argument for generalized ufunc */
+ /* possible axes argument for generalized ufunc */
if (out_axes != NULL && strcmp(str, "axes") == 0) {
*out_axes = value;
+
bad_arg = 0;
}
break;
@@ -855,6 +869,17 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
bad_arg = 0;
}
break;
+ case 'k':
+ if (out_keepdims != NULL && strcmp(str, "keepdims") == 0) {
+ if (!PyBool_Check(value)) {
+ PyErr_SetString(PyExc_TypeError,
+ "'keepdims' must be a boolean");
+ goto fail;
+ }
+ *out_keepdims = (value == Py_True);
+ bad_arg = 0;
+ }
+ break;
case 'o':
/*
* Output arrays may be specified as a keyword argument,
@@ -1141,22 +1166,31 @@ static int
prepare_ufunc_output(PyUFuncObject *ufunc,
PyArrayObject **op,
PyObject *arr_prep,
- PyObject *arr_prep_args,
+ ufunc_full_args full_args,
int i)
{
if (arr_prep != NULL && arr_prep != Py_None) {
PyObject *res;
PyArrayObject *arr;
+ PyObject *args_tup;
- res = PyObject_CallFunction(arr_prep, "O(OOi)",
- *op, ufunc, arr_prep_args, i);
- if ((res == NULL) || (res == Py_None) || !PyArray_Check(res)) {
- if (!PyErr_Occurred()){
- PyErr_SetString(PyExc_TypeError,
- "__array_prepare__ must return an "
- "ndarray or subclass thereof");
- }
- Py_XDECREF(res);
+ /* Call with the context argument */
+ args_tup = _get_wrap_prepare_args(full_args);
+ if (args_tup == NULL) {
+ return -1;
+ }
+ res = PyObject_CallFunction(
+ arr_prep, "O(OOi)", *op, ufunc, args_tup, i);
+ Py_DECREF(args_tup);
+
+ if (res == NULL) {
+ return -1;
+ }
+ else if (!PyArray_Check(res)) {
+ PyErr_SetString(PyExc_TypeError,
+ "__array_prepare__ must return an "
+ "ndarray or subclass thereof");
+ Py_DECREF(res);
return -1;
}
arr = (PyArrayObject *)res;
@@ -1199,11 +1233,11 @@ iterator_loop(PyUFuncObject *ufunc,
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- PyObject *arr_prep_args,
+ ufunc_full_args full_args,
PyUFuncGenericFunction innerloop,
void *innerloopdata)
{
- npy_intp i, iop, nin = ufunc->nin, nout = ufunc->nout;
+ npy_intp i, nin = ufunc->nin, nout = ufunc->nout;
npy_intp nop = nin + nout;
npy_uint32 op_flags[NPY_MAXARGS];
NpyIter *iter;
@@ -1216,6 +1250,7 @@ iterator_loop(PyUFuncObject *ufunc,
PyArrayObject **op_it;
npy_uint32 iter_flags;
+ int retval;
NPY_BEGIN_THREADS_DEF;
@@ -1260,7 +1295,7 @@ iterator_loop(PyUFuncObject *ufunc,
continue;
}
if (prepare_ufunc_output(ufunc, &op[nin+i],
- arr_prep[i], arr_prep_args, i) < 0) {
+ arr_prep[i], full_args, i) < 0) {
return -1;
}
}
@@ -1288,13 +1323,8 @@ iterator_loop(PyUFuncObject *ufunc,
/* Call the __array_prepare__ functions for the new array */
if (prepare_ufunc_output(ufunc, &op[nin+i],
- arr_prep[i], arr_prep_args, i) < 0) {
- for(iop = 0; iop < nin+i; ++iop) {
- if (op_it[iop] != op[iop]) {
- /* ignore errors */
- PyArray_ResolveWritebackIfCopy(op_it[iop]);
- }
- }
+ arr_prep[i], full_args, i) < 0) {
+ NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return -1;
}
@@ -1323,6 +1353,7 @@ iterator_loop(PyUFuncObject *ufunc,
baseptrs[i] = PyArray_BYTES(op_it[i]);
}
if (NpyIter_ResetBasePointers(iter, baseptrs, NULL) != NPY_SUCCEED) {
+ NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return -1;
}
@@ -1330,6 +1361,7 @@ iterator_loop(PyUFuncObject *ufunc,
/* Get the variables needed for the loop */
iternext = NpyIter_GetIterNext(iter, NULL);
if (iternext == NULL) {
+ NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return -1;
}
@@ -1347,13 +1379,9 @@ iterator_loop(PyUFuncObject *ufunc,
NPY_END_THREADS;
}
- for(iop = 0; iop < nop; ++iop) {
- if (op_it[iop] != op[iop]) {
- PyArray_ResolveWritebackIfCopy(op_it[iop]);
- }
- }
+ retval = NpyIter_Close(iter);
NpyIter_Deallocate(iter);
- return 0;
+ return retval;
}
/*
@@ -1375,7 +1403,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- PyObject *arr_prep_args)
+ ufunc_full_args full_args)
{
npy_intp nin = ufunc->nin, nout = ufunc->nout;
PyUFuncGenericFunction innerloop;
@@ -1412,7 +1440,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[1],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
@@ -1429,7 +1457,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[1],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
@@ -1471,7 +1499,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[2],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
@@ -1490,7 +1518,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[2],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
@@ -1509,7 +1537,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
NPY_UF_DBG_PRINT("iterator loop\n");
if (iterator_loop(ufunc, op, dtypes, order,
- buffersize, arr_prep, arr_prep_args,
+ buffersize, arr_prep, full_args,
innerloop, innerloopdata) < 0) {
return -1;
}
@@ -1536,7 +1564,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- PyObject *arr_prep_args)
+ ufunc_full_args full_args)
{
int retval, i, nin = ufunc->nin, nout = ufunc->nout;
int nop = nin + nout;
@@ -1649,7 +1677,8 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
Py_INCREF(op_tmp);
if (prepare_ufunc_output(ufunc, &op_tmp,
- arr_prep[i], arr_prep_args, i) < 0) {
+ arr_prep[i], full_args, i) < 0) {
+ NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return -1;
}
@@ -1660,6 +1689,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
"The __array_prepare__ functions modified the data "
"pointer addresses in an invalid fashion");
Py_DECREF(op_tmp);
+ NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return -1;
}
@@ -1694,6 +1724,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
wheremask != NULL ? fixed_strides[nop]
: fixed_strides[nop + nin],
&innerloop, &innerloopdata, &needs_api) < 0) {
+ NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return -1;
}
@@ -1701,6 +1732,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
/* Get the variables needed for the loop */
iternext = NpyIter_GetIterNext(iter, NULL);
if (iternext == NULL) {
+ NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return -1;
}
@@ -1724,54 +1756,114 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
NPY_AUXDATA_FREE(innerloopdata);
}
- retval = 0;
- nop = NpyIter_GetNOp(iter);
- for(i=0; i< nop; ++i) {
- if (PyArray_ResolveWritebackIfCopy(NpyIter_GetOperandArray(iter)[i]) < 0) {
- retval = -1;
- }
- }
-
+ retval = NpyIter_Close(iter);
NpyIter_Deallocate(iter);
return retval;
}
-static PyObject *
-make_arr_prep_args(npy_intp nin, PyObject *args, PyObject *kwds)
+static npy_bool
+tuple_all_none(PyObject *tup) {
+ npy_intp i;
+ for (i = 0; i < PyTuple_GET_SIZE(tup); ++i) {
+ if (PyTuple_GET_ITEM(tup, i) != Py_None) {
+ return NPY_FALSE;
+ }
+ }
+ return NPY_TRUE;
+}
+
+/*
+ * Convert positional args and the out kwarg into an input and output tuple.
+ *
+ * If the output tuple would be all None, return NULL instead.
+ *
+ * This duplicates logic in many places, so further refactoring is needed:
+ * - get_ufunc_arguments
+ * - PyUFunc_WithOverride
+ * - normalize___call___args
+ */
+static int
+make_full_arg_tuple(
+ ufunc_full_args *full_args,
+ npy_intp nin, npy_intp nout,
+ PyObject *args, PyObject *kwds)
{
- PyObject *out = kwds ? PyDict_GetItem(kwds, npy_um_str_out) : NULL;
- PyObject *arr_prep_args;
+ PyObject *out_kwd = NULL;
+ npy_intp nargs = PyTuple_GET_SIZE(args);
+ npy_intp i;
- if (out == NULL) {
- Py_INCREF(args);
- return args;
+ /* This should have been checked by the caller */
+ assert(nin <= nargs && nargs <= nin + nout);
+
+ /* Initialize so we can XDECREF safely */
+ full_args->in = NULL;
+ full_args->out = NULL;
+
+ /* Get the input arguments*/
+ full_args->in = PyTuple_GetSlice(args, 0, nin);
+ if (full_args->in == NULL) {
+ goto fail;
}
- else {
- npy_intp i, nargs = PyTuple_GET_SIZE(args), n;
- n = nargs;
- if (n < nin + 1) {
- n = nin + 1;
- }
- arr_prep_args = PyTuple_New(n);
- if (arr_prep_args == NULL) {
- return NULL;
+
+ /* Look for output keyword arguments */
+ out_kwd = kwds ? PyDict_GetItem(kwds, npy_um_str_out) : NULL;
+
+ if (out_kwd != NULL) {
+ assert(nargs == nin);
+ if (out_kwd == Py_None) {
+ return 0;
}
- /* Copy the tuple, but set the nin-th item to the keyword arg */
- for (i = 0; i < nin; ++i) {
- PyObject *item = PyTuple_GET_ITEM(args, i);
- Py_INCREF(item);
- PyTuple_SET_ITEM(arr_prep_args, i, item);
+ else if (PyTuple_Check(out_kwd)) {
+ assert(PyTuple_GET_SIZE(out_kwd) == nout);
+ if (tuple_all_none(out_kwd)) {
+ return 0;
+ }
+ Py_INCREF(out_kwd);
+ full_args->out = out_kwd;
+ return 0;
}
- Py_INCREF(out);
- PyTuple_SET_ITEM(arr_prep_args, nin, out);
- for (i = nin+1; i < n; ++i) {
- PyObject *item = PyTuple_GET_ITEM(args, i);
- Py_INCREF(item);
- PyTuple_SET_ITEM(arr_prep_args, i, item);
+ else {
+ /* A single argument x is promoted to (x, None, None ...) */
+ full_args->out = PyTuple_New(nout);
+ if (full_args->out == NULL) {
+ goto fail;
+ }
+ Py_INCREF(out_kwd);
+ PyTuple_SET_ITEM(full_args->out, 0, out_kwd);
+ for (i = 1; i < nout; ++i) {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(full_args->out, i, Py_None);
+ }
+ return 0;
}
+ }
+
+ /* copy across positional output arguments, adding trailing Nones */
+ full_args->out = PyTuple_New(nout);
+ if (full_args->out == NULL) {
+ goto fail;
+ }
+ for (i = nin; i < nargs; ++i) {
+ PyObject *item = PyTuple_GET_ITEM(args, i);
+ Py_INCREF(item);
+ PyTuple_SET_ITEM(full_args->out, i - nin, item);
+ }
+ for (i = nargs; i < nin + nout; ++i) {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(full_args->out, i - nin, Py_None);
+ }
- return arr_prep_args;
+ /* don't return a tuple full of None */
+ if (tuple_all_none(full_args->out)) {
+ Py_DECREF(full_args->out);
+ full_args->out = NULL;
}
+ return 0;
+
+fail:
+ Py_XDECREF(full_args->in);
+ Py_XDECREF(full_args->out);
+ return -1;
}
/*
@@ -1789,6 +1881,35 @@ _has_output_coredims(PyUFuncObject *ufunc) {
}
/*
+ * Check whether the gufunc can be used with keepdims, i.e., that all its
+ * input arguments have the same number of core dimension, and all output
+ * arguments have no core dimensions. Returns 0 if all is fine, and sets
+ * an error and returns -1 if not.
+ */
+static int
+_check_keepdims_support(PyUFuncObject *ufunc) {
+ int i;
+ int nin = ufunc->nin, nout = ufunc->nout;
+ int input_core_dims = ufunc->core_num_dims[0];
+ for (i = 1; i < nin + nout; i++) {
+ if (ufunc->core_num_dims[i] != (i < nin ? input_core_dims : 0)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s does not support keepdims: its signature %s requires "
+ "that %s %d has %d core dimensions, but keepdims can only "
+ "be used when all inputs have the same number of core "
+ "dimensions and all outputs have no core dimensions.",
+ ufunc_get_name_cstr(ufunc),
+ ufunc->core_signature,
+ i < nin ? "input" : "output",
+ i < nin ? i : i - nin,
+ ufunc->core_num_dims[i]);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
* Interpret a possible axes keyword argument, using it to fill the remap_axis
* array which maps default to actual axes for each operand, indexed as
* as remap_axis[iop][iaxis]. The default axis order has first all broadcast
@@ -1797,8 +1918,8 @@ _has_output_coredims(PyUFuncObject *ufunc) {
* Returns 0 on success, and -1 on failure
*/
static int
-_parse_axes_arg(PyUFuncObject *ufunc, PyObject *axes, PyArrayObject **op,
- int broadcast_ndim, int **remap_axis) {
+_parse_axes_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axes,
+ PyArrayObject **op, int broadcast_ndim, int **remap_axis) {
int nin = ufunc->nin;
int nout = ufunc->nout;
int nop = nin + nout;
@@ -1828,7 +1949,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, PyObject *axes, PyArrayObject **op,
PyObject *op_axes_tuple, *axis_item;
int axis, op_axis;
- op_ncore = ufunc->core_num_dims[iop];
+ op_ncore = core_num_dims[iop];
if (op[iop] != NULL) {
op_ndim = PyArray_NDIM(op[iop]);
op_nbroadcast = op_ndim - op_ncore;
@@ -2078,6 +2199,8 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Use remapped axes for generalized ufunc */
int broadcast_ndim, iter_ndim;
+ int core_num_dims_array[NPY_MAXARGS];
+ int *core_num_dims;
int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS];
int *op_axes[NPY_MAXARGS];
@@ -2106,17 +2229,15 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
int **remap_axis = NULL;
/* The __array_prepare__ function to call for each output */
PyObject *arr_prep[NPY_MAXARGS];
- /*
- * This is either args, or args with the out= parameter from
- * kwds added appropriately.
- */
- PyObject *arr_prep_args = NULL;
+ /* The separated input and output arguments, parsed from args and kwds */
+ ufunc_full_args full_args = {NULL, NULL};
NPY_ORDER order = NPY_KEEPORDER;
/* Use the default assignment casting rule */
NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING;
/* When provided, extobj, typetup, and axes contain borrowed references */
PyObject *extobj = NULL, *type_tup = NULL, *axes = NULL;
+ int keepdims = -1;
if (ufunc == NULL) {
PyErr_SetString(PyExc_ValueError, "function not supported");
@@ -2143,25 +2264,53 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Get all the arguments */
retval = get_ufunc_arguments(ufunc, args, kwds,
op, &order, &casting, &extobj,
- &type_tup, &subok, NULL, &axes);
+ &type_tup, &subok, NULL, &axes, &keepdims);
if (retval < 0) {
goto fail;
}
-
+ /*
+ * If keepdims was passed in (and thus changed from the initial value
+ * on top), check the gufunc is suitable, i.e., that its inputs share
+ * the same number of core dimensions, and its outputs have none.
+ */
+ if (keepdims != -1) {
+ retval = _check_keepdims_support(ufunc);
+ if (retval < 0) {
+ goto fail;
+ }
+ }
+ /*
+ * If keepdims is set and true, signal all dimensions will be the same.
+ */
+ if (keepdims == 1) {
+ int num_dims = ufunc->core_num_dims[0];
+ for (i = 0; i < nop; ++i) {
+ core_num_dims_array[i] = num_dims;
+ }
+ core_num_dims = core_num_dims_array;
+ }
+ else {
+ /* keepdims was not set or was false; no adjustment necessary */
+ core_num_dims = ufunc->core_num_dims;
+ keepdims = 0;
+ }
/*
* Check that operands have the minimum dimensions required.
* (Just checks core; broadcast dimensions are tested by the iterator.)
*/
for (i = 0; i < nop; i++) {
- if (op[i] != NULL && PyArray_NDIM(op[i]) < ufunc->core_num_dims[i]) {
+ if (op[i] != NULL && PyArray_NDIM(op[i]) < core_num_dims[i]) {
PyErr_Format(PyExc_ValueError,
"%s: %s operand %d does not have enough "
"dimensions (has %d, gufunc core with "
"signature %s requires %d)",
- ufunc_get_name_cstr(ufunc),
+ ufunc_name,
i < nin ? "Input" : "Output",
- i < nin ? i : i - nin, PyArray_NDIM(op[i]),
- ufunc->core_signature, ufunc->core_num_dims[i]);
+ i < nin ? i : i - nin,
+ PyArray_NDIM(op[i]),
+ ufunc->core_signature,
+ core_num_dims[i]);
+ retval = -1;
goto fail;
}
}
@@ -2173,7 +2322,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
*/
broadcast_ndim = 0;
for (i = 0; i < nin; ++i) {
- int n = PyArray_NDIM(op[i]) - ufunc->core_num_dims[i];
+ int n = PyArray_NDIM(op[i]) - core_num_dims[i];
if (n > broadcast_ndim) {
broadcast_ndim = n;
}
@@ -2187,7 +2336,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
*/
iter_ndim = broadcast_ndim;
for (i = nin; i < nop; ++i) {
- iter_ndim += ufunc->core_num_dims[i];
+ iter_ndim += core_num_dims[i];
}
if (iter_ndim > NPY_MAXDIMS) {
PyErr_Format(PyExc_ValueError,
@@ -2209,7 +2358,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
for (i=0; i < nop; i++) {
remap_axis[i] = remap_axis_memory + i * NPY_MAXDIMS;
}
- retval = _parse_axes_arg(ufunc, axes, op, broadcast_ndim,
+ retval = _parse_axes_arg(ufunc, core_num_dims, axes, op, broadcast_ndim,
remap_axis);
if(retval < 0) {
goto fail;
@@ -2231,12 +2380,13 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
j = broadcast_ndim;
for (i = 0; i < nop; ++i) {
int n;
+
if (op[i]) {
/*
* Note that n may be negative if broadcasting
* extends into the core dimensions.
*/
- n = PyArray_NDIM(op[i]) - ufunc->core_num_dims[i];
+ n = PyArray_NDIM(op[i]) - core_num_dims[i];
}
else {
n = broadcast_ndim;
@@ -2260,10 +2410,15 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Except for when it belongs to this output */
if (i >= nin) {
int dim_offset = ufunc->core_offsets[i];
- int num_dims = ufunc->core_num_dims[i];
- /* Fill in 'iter_shape' and 'op_axes' for this output */
+ int num_dims = core_num_dims[i];
+ /*
+ * Fill in 'iter_shape' and 'op_axes' for the core dimensions
+ * of this output. Here, we have to be careful: if keepdims
+ * was used, then this axis is not a real core dimension,
+ * but is being added back for broadcasting, so its size is 1.
+ */
for (idim = 0; idim < num_dims; ++idim) {
- iter_shape[j] = core_dim_sizes[
+ iter_shape[j] = keepdims ? 1 : core_dim_sizes[
ufunc->core_dim_ixs[dim_offset + idim]];
op_axes_arrays[i][j] = REMAP_AXIS(i, n + idim);
++j;
@@ -2309,19 +2464,15 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
#endif
if (subok) {
+ if (make_full_arg_tuple(&full_args, nin, nout, args, kwds) < 0) {
+ goto fail;
+ }
+
/*
* Get the appropriate __array_prepare__ function to call
* for each output
*/
- _find_array_prepare(args, kwds, arr_prep, nin, nout);
-
- /* Set up arr_prep_args if a prep function was needed */
- for (i = 0; i < nout; ++i) {
- if (arr_prep[i] != NULL && arr_prep[i] != Py_None) {
- arr_prep_args = make_arr_prep_args(nin, args, kwds);
- break;
- }
- }
+ _find_array_prepare(full_args, arr_prep, nin, nout);
}
/* If the loop wants the arrays, provide them */
@@ -2387,7 +2538,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
*/
core_dim_ixs_size = 0;
for (i = 0; i < nop; ++i) {
- core_dim_ixs_size += ufunc->core_num_dims[i];
+ core_dim_ixs_size += core_num_dims[i];
}
inner_strides = (npy_intp *)PyArray_malloc(
NPY_SIZEOF_INTP * (nop+core_dim_ixs_size));
@@ -2399,7 +2550,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Copy the strides after the first nop */
idim = nop;
for (i = 0; i < nop; ++i) {
- int num_dims = ufunc->core_num_dims[i];
+ int num_dims = core_num_dims[i];
int core_start_dim = PyArray_NDIM(op[i]) - num_dims;
/*
* Need to use the arrays in the iterator, not op, because
@@ -2468,7 +2619,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
#endif
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&iter);
NPY_UF_DBG_PRINT("Executing inner loop\n");
@@ -2537,11 +2688,14 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
}
/* Write back any temporary data from PyArray_SetWritebackIfCopyBase */
- for(i=nin; i< nop; ++i)
- if (PyArray_ResolveWritebackIfCopy(NpyIter_GetOperandArray(iter)[i]) < 0)
- goto fail;
+ if (NpyIter_Close(iter) < 0) {
+ goto fail;
+ }
PyArray_free(inner_strides);
+ if (NpyIter_Close(iter) < 0) {
+ goto fail;
+ }
NpyIter_Deallocate(iter);
/* The caller takes ownership of all the references in op */
for (i = 0; i < nop; ++i) {
@@ -2549,7 +2703,8 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
NPY_UF_DBG_PRINT("Returning Success\n");
@@ -2558,6 +2713,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
fail:
NPY_UF_DBG_PRINT1("Returning failure code %d\n", retval);
PyArray_free(inner_strides);
+ NpyIter_Close(iter);
NpyIter_Deallocate(iter);
for (i = 0; i < nop; ++i) {
Py_XDECREF(op[i]);
@@ -2566,7 +2722,8 @@ fail:
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
PyArray_free(remap_axis_memory);
PyArray_free(remap_axis);
return retval;
@@ -2604,7 +2761,7 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
* This is either args, or args with the out= parameter from
* kwds added appropriately.
*/
- PyObject *arr_prep_args = NULL;
+ ufunc_full_args full_args = {NULL, NULL};
int trivial_loop_ok = 0;
@@ -2643,7 +2800,7 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
/* Get all the arguments */
retval = get_ufunc_arguments(ufunc, args, kwds,
op, &order, &casting, &extobj,
- &type_tup, &subok, &wheremask, NULL);
+ &type_tup, &subok, &wheremask, NULL, NULL);
if (retval < 0) {
goto fail;
}
@@ -2696,23 +2853,18 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
#endif
if (subok) {
+ if (make_full_arg_tuple(&full_args, nin, nout, args, kwds) < 0) {
+ goto fail;
+ }
/*
* Get the appropriate __array_prepare__ function to call
* for each output
*/
- _find_array_prepare(args, kwds, arr_prep, nin, nout);
-
- /* Set up arr_prep_args if a prep function was needed */
- for (i = 0; i < nout; ++i) {
- if (arr_prep[i] != NULL && arr_prep[i] != Py_None) {
- arr_prep_args = make_arr_prep_args(nin, args, kwds);
- break;
- }
- }
+ _find_array_prepare(full_args, arr_prep, nin, nout);
}
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&ufunc);
/* Do the ufunc loop */
if (need_fancy) {
@@ -2720,14 +2872,14 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
retval = execute_fancy_ufunc_loop(ufunc, wheremask,
op, dtypes, order,
- buffersize, arr_prep, arr_prep_args);
+ buffersize, arr_prep, full_args);
}
else {
NPY_UF_DBG_PRINT("Executing legacy inner loop\n");
retval = execute_legacy_ufunc_loop(ufunc, trivial_loop_ok,
op, dtypes, order,
- buffersize, arr_prep, arr_prep_args);
+ buffersize, arr_prep, full_args);
}
if (retval < 0) {
goto fail;
@@ -2747,7 +2899,8 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
Py_XDECREF(wheremask);
NPY_UF_DBG_PRINT("Returning Success\n");
@@ -2763,7 +2916,8 @@ fail:
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
Py_XDECREF(wheremask);
return retval;
@@ -3024,20 +3178,25 @@ finish_loop:
*/
static PyArrayObject *
PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
- int naxes, int *axes, PyArray_Descr *odtype, int keepdims)
+ int naxes, int *axes, PyArray_Descr *odtype, int keepdims,
+ PyObject *initial)
{
int iaxes, ndim;
npy_bool reorderable;
npy_bool axis_flags[NPY_MAXDIMS];
PyArray_Descr *dtype;
PyArrayObject *result;
- PyObject *identity = NULL;
+ PyObject *identity;
const char *ufunc_name = ufunc_get_name_cstr(ufunc);
/* These parameters come from a TLS global */
int buffersize = 0, errormask = 0;
+ static PyObject *NoValue = NULL;
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s.reduce\n", ufunc_name);
+ npy_cache_import("numpy", "_NoValue", &NoValue);
+ if (NoValue == NULL) return NULL;
+
ndim = PyArray_NDIM(arr);
/* Create an array of flags for reduction */
@@ -3061,19 +3220,28 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
if (identity == NULL) {
return NULL;
}
- /*
- * The identity for a dynamic dtype like
- * object arrays can't be used in general
- */
- if (identity != Py_None && PyArray_ISOBJECT(arr) && PyArray_SIZE(arr) != 0) {
+
+ /* Get the initial value */
+ if (initial == NULL || initial == NoValue) {
+ initial = identity;
+
+ /*
+ * The identity for a dynamic dtype like
+ * object arrays can't be used in general
+ */
+ if (initial != Py_None && PyArray_ISOBJECT(arr) && PyArray_SIZE(arr) != 0) {
+ Py_DECREF(initial);
+ initial = Py_None;
+ Py_INCREF(initial);
+ }
+ } else {
Py_DECREF(identity);
- identity = Py_None;
- Py_INCREF(identity);
+ Py_INCREF(initial); /* match the reference count in the if above */
}
/* Get the reduction dtype */
if (reduce_type_resolver(ufunc, arr, odtype, &dtype) < 0) {
- Py_DECREF(identity);
+ Py_DECREF(initial);
return NULL;
}
@@ -3081,12 +3249,12 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
NPY_UNSAFE_CASTING,
axis_flags, reorderable,
keepdims, 0,
- identity,
+ initial,
reduce_loop,
ufunc, buffersize, ufunc_name, errormask);
Py_DECREF(dtype);
- Py_DECREF(identity);
+ Py_DECREF(initial);
return result;
}
@@ -3215,9 +3383,15 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
*/
ndim_iter = ndim;
flags |= NPY_ITER_MULTI_INDEX;
- /* Add some more flags */
- op_flags[0] |= NPY_ITER_UPDATEIFCOPY|NPY_ITER_ALIGNED;
- op_flags[1] |= NPY_ITER_COPY|NPY_ITER_ALIGNED;
+ /*
+ * Add some more flags.
+ *
+ * The accumulation outer loop is 'elementwise' over the array, so turn
+ * on NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE. That is, in-place
+ * accumulate(x, out=x) is safe to do without temporary copies.
+ */
+ op_flags[0] |= NPY_ITER_UPDATEIFCOPY|NPY_ITER_ALIGNED|NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
+ op_flags[1] |= NPY_ITER_COPY|NPY_ITER_ALIGNED|NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
op_dtypes_param = op_dtypes;
op_dtypes[1] = op_dtypes[0];
NPY_UF_DBG_PRINT("Allocating outer iterator\n");
@@ -3419,9 +3593,12 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
}
finish:
- /* Write back any temporary data from PyArray_SetWritebackIfCopyBase */
- if (PyArray_ResolveWritebackIfCopy(op[0]) < 0)
+ if (NpyIter_Close(iter) < 0) {
goto fail;
+ }
+ if (NpyIter_Close(iter_inner) < 0) {
+ goto fail;
+ }
Py_XDECREF(op_dtypes[0]);
NpyIter_Deallocate(iter);
NpyIter_Deallocate(iter_inner);
@@ -3468,7 +3645,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
op_axes_arrays[2]};
npy_uint32 op_flags[3];
int i, idim, ndim, otype_final;
- int need_outer_iterator;
+ int need_outer_iterator = 0;
NpyIter *iter = NULL;
@@ -3804,7 +3981,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
}
finish:
- if (op[0] && PyArray_ResolveWritebackIfCopy(op[0]) < 0) {
+ if (NpyIter_Close(iter) < 0) {
goto fail;
}
Py_XDECREF(op_dtypes[0]);
@@ -3841,8 +4018,9 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
PyArray_Descr *otype = NULL;
PyArrayObject *out = NULL;
int keepdims = 0;
+ PyObject *initial = NULL;
static char *reduce_kwlist[] = {
- "array", "axis", "dtype", "out", "keepdims", NULL};
+ "array", "axis", "dtype", "out", "keepdims", "initial", NULL};
static char *accumulate_kwlist[] = {
"array", "axis", "dtype", "out", NULL};
static char *reduceat_kwlist[] = {
@@ -3914,13 +4092,13 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
}
}
else {
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&i:reduce",
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&iO:reduce",
reduce_kwlist,
&op,
&axes_in,
PyArray_DescrConverter2, &otype,
PyArray_OutputConverter, &out,
- &keepdims)) {
+ &keepdims, &initial)) {
goto fail;
}
}
@@ -4051,7 +4229,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
switch(operation) {
case UFUNC_REDUCE:
ret = PyUFunc_Reduce(ufunc, mp, out, naxes, axes,
- otype, keepdims);
+ otype, keepdims, initial);
break;
case UFUNC_ACCUMULATE:
if (naxes != 1) {
@@ -4123,11 +4301,10 @@ fail:
* should just have PyArray_Return called.
*/
static void
-_find_array_wrap(PyObject *args, PyObject *kwds,
+_find_array_wrap(ufunc_full_args args, PyObject *kwds,
PyObject **output_wrap, int nin, int nout)
{
- Py_ssize_t nargs;
- int i, idx_offset, start_idx;
+ int i;
PyObject *obj;
PyObject *wrap = NULL;
@@ -4147,7 +4324,7 @@ _find_array_wrap(PyObject *args, PyObject *kwds,
* Determine the wrapping function given by the input arrays
* (could be NULL).
*/
- wrap = _find_array_method(args, nin, npy_um_str_array_wrap);
+ wrap = _find_array_method(args.in, npy_um_str_array_wrap);
/*
* For all the output arrays decide what to do.
@@ -4162,44 +4339,16 @@ _find_array_wrap(PyObject *args, PyObject *kwds,
* done in that case.
*/
handle_out:
- nargs = PyTuple_GET_SIZE(args);
- /* Default is using positional arguments */
- obj = args;
- idx_offset = nin;
- start_idx = 0;
- if (nin == nargs && kwds != NULL) {
- /* There may be a keyword argument we can use instead */
- obj = PyDict_GetItem(kwds, npy_um_str_out);
- if (obj == NULL) {
- /* No, go back to positional (even though there aren't any) */
- obj = args;
- }
- else {
- idx_offset = 0;
- if (PyTuple_Check(obj)) {
- /* If a tuple, must have all nout items */
- nargs = nout;
- }
- else {
- /* If the kwarg is not a tuple then it is an array (or None) */
- output_wrap[0] = _get_output_array_method(
- obj, npy_um_str_array_wrap, wrap);
- start_idx = 1;
- nargs = 1;
- }
+ if (args.out == NULL) {
+ for (i = 0; i < nout; i++) {
+ Py_XINCREF(wrap);
+ output_wrap[i] = wrap;
}
}
-
- for (i = start_idx; i < nout; ++i) {
- int j = idx_offset + i;
-
- if (j < nargs) {
+ else {
+ for (i = 0; i < nout; i++) {
output_wrap[i] = _get_output_array_method(
- PyTuple_GET_ITEM(obj, j), npy_um_str_array_wrap, wrap);
- }
- else {
- output_wrap[i] = wrap;
- Py_XINCREF(wrap);
+ PyTuple_GET_ITEM(args.out, i), npy_um_str_array_wrap, wrap);
}
}
@@ -4212,12 +4361,11 @@ static PyObject *
ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
{
int i;
- PyTupleObject *ret;
PyArrayObject *mps[NPY_MAXARGS];
PyObject *retobj[NPY_MAXARGS];
PyObject *wraparr[NPY_MAXARGS];
- PyObject *res;
PyObject *override = NULL;
+ ufunc_full_args full_args = {NULL, NULL};
int errval;
errval = PyUFunc_CheckOverride(ufunc, "__call__", args, kwds, &override);
@@ -4282,20 +4430,37 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
* None --- array-object passed in don't call PyArray_Return
* method --- the __array_wrap__ method to call.
*/
- _find_array_wrap(args, kwds, wraparr, ufunc->nin, ufunc->nout);
+ if (make_full_arg_tuple(&full_args, ufunc->nin, ufunc->nout, args, kwds) < 0) {
+ goto fail;
+ }
+ _find_array_wrap(full_args, kwds, wraparr, ufunc->nin, ufunc->nout);
/* wrap outputs */
for (i = 0; i < ufunc->nout; i++) {
int j = ufunc->nin+i;
PyObject *wrap = wraparr[i];
- if (wrap != NULL) {
- if (wrap == Py_None) {
- Py_DECREF(wrap);
- retobj[i] = (PyObject *)mps[j];
- continue;
+ if (wrap == NULL) {
+ /* default behavior */
+ retobj[i] = PyArray_Return(mps[j]);
+ }
+ else if (wrap == Py_None) {
+ Py_DECREF(wrap);
+ retobj[i] = (PyObject *)mps[j];
+ }
+ else {
+ PyObject *res;
+ PyObject *args_tup;
+
+ /* Call the method with appropriate context */
+ args_tup = _get_wrap_prepare_args(full_args);
+ if (args_tup == NULL) {
+ goto fail;
}
- res = PyObject_CallFunction(wrap, "O(OOi)", mps[j], ufunc, args, i);
+ res = PyObject_CallFunction(
+ wrap, "O(OOi)", mps[j], ufunc, args_tup, i);
+ Py_DECREF(args_tup);
+
/* Handle __array_wrap__ that does not accept a context argument */
if (res == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) {
PyErr_Clear();
@@ -4305,23 +4470,21 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
if (res == NULL) {
goto fail;
}
- else {
- Py_DECREF(mps[j]);
- retobj[i] = res;
- continue;
- }
- }
- else {
- /* default behavior */
- retobj[i] = PyArray_Return(mps[j]);
- }
+ Py_DECREF(mps[j]);
+ retobj[i] = res;
+ }
}
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
+
if (ufunc->nout == 1) {
return retobj[0];
}
else {
+ PyTupleObject *ret;
+
ret = (PyTupleObject *)PyTuple_New(ufunc->nout);
for (i = 0; i < ufunc->nout; i++) {
PyTuple_SET_ITEM(ret, i, retobj[i]);
@@ -4330,6 +4493,8 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
}
fail:
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
for (i = ufunc->nin; i < ufunc->nargs; i++) {
Py_XDECREF(mps[i]);
}
@@ -4435,7 +4600,7 @@ PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void **data,
const char *name, const char *doc, int unused)
{
return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes,
- nin, nout, identity, name, doc, 0, NULL);
+ nin, nout, identity, name, doc, unused, NULL);
}
/*UFUNC_API*/
@@ -5262,6 +5427,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
iternext = NpyIter_GetIterNext(iter_buffer, NULL);
if (iternext == NULL) {
+ NpyIter_Close(iter_buffer);
NpyIter_Deallocate(iter_buffer);
goto fail;
}
@@ -5331,11 +5497,9 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
PyErr_SetString(PyExc_ValueError, err_msg);
}
+ NpyIter_Close(iter_buffer);
NpyIter_Deallocate(iter_buffer);
- if (op1_array != (PyArrayObject*)op1) {
- PyArray_ResolveWritebackIfCopy(op1_array);
- }
Py_XDECREF(op2_array);
Py_XDECREF(iter);
Py_XDECREF(iter2);
@@ -5351,9 +5515,9 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
}
fail:
-
+ /* iter_buffer has already been deallocated, don't use NpyIter_Close */
if (op1_array != (PyArrayObject*)op1) {
- PyArray_ResolveWritebackIfCopy(op1_array);
+ PyArray_DiscardWritebackIfCopy(op1_array);
}
Py_XDECREF(op2_array);
Py_XDECREF(iter);
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 03bf5bfd8..5567b9bbf 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -87,11 +87,12 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
/* Keywords are ignored for now */
PyObject *function, *pyname = NULL;
- int nin, nout, i;
+ int nin, nout, i, nargs;
PyUFunc_PyFuncData *fdata;
PyUFuncObject *self;
- char *fname, *str;
+ char *fname, *str, *types, *doc;
Py_ssize_t fname_len = -1;
+ void * ptr, **data;
int offset[2];
if (!PyArg_ParseTuple(args, "Oii:frompyfunc", &function, &nin, &nout)) {
@@ -101,43 +102,7 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
PyErr_SetString(PyExc_TypeError, "function must be callable");
return NULL;
}
- if (nin + nout > NPY_MAXARGS) {
- PyErr_Format(PyExc_ValueError,
- "Cannot construct a ufunc with more than %d operands "
- "(requested number were: inputs = %d and outputs = %d)",
- NPY_MAXARGS, nin, nout);
- return NULL;
- }
- self = PyArray_malloc(sizeof(PyUFuncObject));
- if (self == NULL) {
- return NULL;
- }
- PyObject_Init((PyObject *)self, &PyUFunc_Type);
-
- self->userloops = NULL;
- self->nin = nin;
- self->nout = nout;
- self->nargs = nin + nout;
- self->identity = PyUFunc_None;
- self->functions = pyfunc_functions;
- self->ntypes = 1;
-
- /* generalized ufunc */
- self->core_enabled = 0;
- self->core_num_dim_ix = 0;
- self->core_num_dims = NULL;
- self->core_dim_ixs = NULL;
- self->core_offsets = NULL;
- self->core_signature = NULL;
- self->op_flags = PyArray_malloc(sizeof(npy_uint32)*self->nargs);
- if (self->op_flags == NULL) {
- return PyErr_NoMemory();
- }
- memset(self->op_flags, 0, sizeof(npy_uint32)*self->nargs);
- self->iter_flags = 0;
-
- self->type_resolver = &object_ufunc_type_resolver;
- self->legacy_inner_loop_selector = &object_ufunc_loop_selector;
+ nargs = nin + nout;
pyname = PyObject_GetAttrString(function, "__name__");
if (pyname) {
@@ -150,7 +115,7 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
}
/*
- * self->ptr holds a pointer for enough memory for
+ * ptr will be assigned to self->ptr, holds a pointer for enough memory for
* self->data[0] (fdata)
* self->data
* self->name
@@ -164,39 +129,51 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
if (i) {
offset[0] += (sizeof(void *) - i);
}
- offset[1] = self->nargs;
- i = (self->nargs % sizeof(void *));
+ offset[1] = nargs;
+ i = (nargs % sizeof(void *));
if (i) {
offset[1] += (sizeof(void *)-i);
}
- self->ptr = PyArray_malloc(offset[0] + offset[1] + sizeof(void *) +
+ ptr = PyArray_malloc(offset[0] + offset[1] + sizeof(void *) +
(fname_len + 14));
- if (self->ptr == NULL) {
+ if (ptr == NULL) {
Py_XDECREF(pyname);
return PyErr_NoMemory();
}
- Py_INCREF(function);
- self->obj = function;
- fdata = (PyUFunc_PyFuncData *)(self->ptr);
+ fdata = (PyUFunc_PyFuncData *)(ptr);
+ fdata->callable = function;
fdata->nin = nin;
fdata->nout = nout;
- fdata->callable = function;
- self->data = (void **)(((char *)self->ptr) + offset[0]);
- self->data[0] = (void *)fdata;
- self->types = (char *)self->data + sizeof(void *);
- for (i = 0; i < self->nargs; i++) {
- self->types[i] = NPY_OBJECT;
+ data = (void **)(((char *)ptr) + offset[0]);
+ data[0] = (void *)fdata;
+ types = (char *)data + sizeof(void *);
+ for (i = 0; i < nargs; i++) {
+ types[i] = NPY_OBJECT;
}
- str = self->types + offset[1];
+ str = types + offset[1];
memcpy(str, fname, fname_len);
memcpy(str+fname_len, " (vectorized)", 14);
- self->name = str;
-
Py_XDECREF(pyname);
/* Do a better job someday */
- self->doc = "dynamic ufunc based on a python function";
+ doc = "dynamic ufunc based on a python function";
+
+ self = (PyUFuncObject *)PyUFunc_FromFuncAndData(
+ (PyUFuncGenericFunction *)pyfunc_functions, data,
+ types, /* ntypes */ 1, nin, nout, PyUFunc_None,
+ str, doc, /* unused */ 0);
+
+ if (self == NULL) {
+ PyArray_free(ptr);
+ return NULL;
+ }
+ Py_INCREF(function);
+ self->obj = function;
+ self->ptr = ptr;
+
+ self->type_resolver = &object_ufunc_type_resolver;
+ self->legacy_inner_loop_selector = &object_ufunc_loop_selector;
return (PyObject *)self;
}
@@ -310,10 +287,10 @@ static struct PyModuleDef moduledef = {
#include <stdio.h>
#if defined(NPY_PY3K)
-#define RETVAL m
+#define RETVAL(x) x
PyMODINIT_FUNC PyInit_umath(void)
#else
-#define RETVAL
+#define RETVAL(x)
PyMODINIT_FUNC initumath(void)
#endif
{
@@ -330,7 +307,7 @@ PyMODINIT_FUNC initumath(void)
m = Py_InitModule("umath", methods);
#endif
if (!m) {
- return RETVAL;
+ goto err;
}
/* Import the array */
@@ -339,12 +316,12 @@ PyMODINIT_FUNC initumath(void)
PyErr_SetString(PyExc_ImportError,
"umath failed: Could not import array core.");
}
- return RETVAL;
+ goto err;
}
/* Initialize the types */
if (PyType_Ready(&PyUFunc_Type) < 0)
- return RETVAL;
+ goto err;
/* Add some symbolic constants to the module */
d = PyModule_GetDict(m);
@@ -426,7 +403,7 @@ PyMODINIT_FUNC initumath(void)
goto err;
}
- return RETVAL;
+ return RETVAL(m);
err:
/* Check for errors */
@@ -434,5 +411,5 @@ PyMODINIT_FUNC initumath(void)
PyErr_SetString(PyExc_RuntimeError,
"cannot load umath module.");
}
- return RETVAL;
+ return RETVAL(NULL);
}
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index a927968a4..9755e7b36 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -223,22 +223,25 @@ def test_array_astype():
b = a.astype('f4', subok=0, copy=False)
assert_(a is b)
- a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4')
+ class MyNDArray(np.ndarray):
+ pass
- # subok=True passes through a matrix
+ a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
+
+ # subok=True passes through a subclass
b = a.astype('f4', subok=True, copy=False)
assert_(a is b)
# subok=True is default, and creates a subtype on a cast
b = a.astype('i4', copy=False)
assert_equal(a, b)
- assert_equal(type(b), np.matrix)
+ assert_equal(type(b), MyNDArray)
- # subok=False never returns a matrix
+ # subok=False never returns a subclass
b = a.astype('f4', subok=False, copy=False)
assert_equal(a, b)
assert_(not (a is b))
- assert_(type(b) is not np.matrix)
+ assert_(type(b) is not MyNDArray)
# Make sure converting from string object to fixed length string
# does not truncate.
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index 2c142f82b..6214e325c 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -491,6 +491,8 @@ class TestPrintOptions(object):
np.array(1.), style=repr)
# but not in legacy mode
np.array2string(np.array(1.), style=repr, legacy='1.13')
+ # gh-10934 style was broken in legacy mode, check it works
+ np.array2string(np.array(1.), legacy='1.13')
def test_float_spacing(self):
x = np.array([1., 2., 3.])
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index dca2d2541..e433877e8 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -124,7 +124,7 @@ class TestDateTime(object):
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_compare_generic_nat(self):
- # regression tests for GH6452
+ # regression tests for gh-6452
assert_equal(np.datetime64('NaT'),
np.datetime64('2000') + np.timedelta64('NaT'))
# nb. we may want to make NaT != NaT true in the future
@@ -236,18 +236,25 @@ class TestDateTime(object):
# find "supertype" for non-dates and dates
b = np.bool_(True)
- dt = np.datetime64('1970-01-01', 'M')
- arr = np.array([b, dt])
+ dm = np.datetime64('1970-01-01', 'M')
+ d = datetime.date(1970, 1, 1)
+ dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
+
+ arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
- dt = datetime.date(1970, 1, 1)
- arr = np.array([b, dt])
+ arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
- dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
+ arr = np.array([d, d]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[D]'))
+
+ arr = np.array([dt, dt]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[us]'))
+
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
@@ -324,6 +331,24 @@ class TestDateTime(object):
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
+ a = datetime.timedelta(seconds=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta(weeks=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta()
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+
+ def test_timedelta_object_array_conversion(self):
+ # Regression test for gh-11096
+ inputs = [datetime.timedelta(28),
+ datetime.timedelta(30),
+ datetime.timedelta(31)]
+ expected = np.array([28, 30, 31], dtype='timedelta64[D]')
+ actual = np.array(inputs, dtype='timedelta64[D]')
+ assert_equal(expected, actual)
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 5d59d8226..60a7c72f7 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -134,6 +134,22 @@ class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning
+class TestNonTupleNDIndexDeprecation(object):
+ def test_basic(self):
+ a = np.zeros((5, 5))
+ with warnings.catch_warnings():
+ warnings.filterwarnings('always')
+ assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
+ assert_warns(FutureWarning, a.__getitem__, [slice(None)])
+
+ warnings.filterwarnings('error')
+ assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
+ assert_raises(FutureWarning, a.__getitem__, [slice(None)])
+
+ # a a[[0, 1]] always was advanced indexing, so no error/warning
+ a[[0, 1]]
+
+
class TestRankDeprecation(_DeprecationTestCase):
"""Test that np.rank is deprecated. The function should simply be
removed. The VisibleDeprecationWarning may become unnecessary.
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 35b8f6868..27fbb10d5 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -205,6 +205,14 @@ class TestRecord(object):
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
+ # Array of subtype should preserve alignment
+ dt1 = np.dtype([('a', '|i1'),
+ ('b', [('f0', '<i2'),
+ ('f1', '<f4')], 2)], align=True)
+ assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
+ ('b', [('f0', '<i2'), ('', '|V2'),
+ ('f1', '<f4')], (2,))])
+
def test_union_struct(self):
# Should be able to create union dtypes
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 792b9e0a2..104dd1986 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -502,6 +502,16 @@ class TestEinSum(object):
optimize=optimize),
np.full((1, 5), 5))
+ # Cases which were failing (gh-10899)
+ x = np.eye(2, dtype=dtype)
+ y = np.ones(2, dtype=dtype)
+ assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
+ [2.]) # contig_contig_outstride0_two
+ assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
+ [2.]) # stride0_contig_outstride0_two
+ assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
+ [2.]) # contig_stride0_outstride0_two
+
def test_einsum_sums_int8(self):
self.check_einsum_sums('i1')
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 65852e577..88f5deabc 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -576,19 +576,6 @@ class TestSubclasses(object):
assert_(isinstance(s[[0, 1, 2]], SubClass))
assert_(isinstance(s[s > 0], SubClass))
- def test_matrix_fancy(self):
- # The matrix class messes with the shape. While this is always
- # weird (getitem is not used, it does not have setitem nor knows
- # about fancy indexing), this tests gh-3110
- m = np.matrix([[1, 2], [3, 4]])
-
- assert_(isinstance(m[[0,1,0], :], np.matrix))
-
- # gh-3110. Note the transpose currently because matrices do *not*
- # support dimension fixing for fancy indexing correctly.
- x = np.asmatrix(np.arange(50).reshape(5,10))
- assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
-
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index 513a71b99..cf50d5d5c 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -6,7 +6,7 @@ import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, temppath,
)
-from ._locales import CommaDecimalPointLocale
+from numpy.core.tests._locales import CommaDecimalPointLocale
LD_INFO = np.finfo(np.longdouble)
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 4bc85ad97..a60f2cd92 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -34,7 +34,7 @@ from numpy.testing import (
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
SkipTest, temppath, suppress_warnings
)
-from ._locales import CommaDecimalPointLocale
+from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
@@ -1745,13 +1745,6 @@ class TestMethods(object):
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
- def test_sort_matrix_none(self):
- a = np.matrix([[2, 1, 0]])
- actual = np.sort(a, axis=None)
- expected = np.matrix([[0, 1, 2]])
- assert_equal(actual, expected)
- assert_(type(expected) is np.matrix)
-
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
@@ -2497,14 +2490,6 @@ class TestMethods(object):
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
- def test_partition_matrix_none(self):
- # gh-4301
- a = np.matrix([[2, 1, 0]])
- actual = np.partition(a, 1, axis=None)
- expected = np.matrix([[0, 1, 2]])
- assert_equal(actual, expected)
- assert_(type(expected) is np.matrix)
-
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
@@ -3332,7 +3317,39 @@ class TestBinop(object):
with assert_raises(NotImplementedError):
a ** 2
+ def test_pow_array_object_dtype(self):
+ # test pow on arrays of object dtype
+ class SomeClass(object):
+ def __init__(self, num=None):
+ self.num = num
+
+ # want to ensure a fast pow path is not taken
+ def __mul__(self, other):
+ raise AssertionError('__mul__ should not be called')
+
+ def __div__(self, other):
+ raise AssertionError('__div__ should not be called')
+
+ def __pow__(self, exp):
+ return SomeClass(num=self.num ** exp)
+
+ def __eq__(self, other):
+ if isinstance(other, SomeClass):
+ return self.num == other.num
+
+ __rpow__ = __pow__
+
+ def pow_for(exp, arr):
+ return np.array([x ** exp for x in arr])
+
+ obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
+ assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
+ assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
+ assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
+ assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
+ assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
+
class TestTemporaryElide(object):
# elision is only triggered on relatively large arrays
@@ -5279,13 +5296,6 @@ class TestDot(object):
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
- def test_dot_scalar_and_matrix_of_objects(self):
- # Ticket #2469
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.dot(arr, 3), desired)
- assert_equal(np.dot(3, arr), desired)
-
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
@@ -5641,21 +5651,6 @@ class TestInner(object):
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
- def test_inner_scalar_and_matrix(self):
- for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
- sca = np.array(3, dtype=dt)[()]
- arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
- desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
- assert_equal(np.inner(arr, sca), desired)
- assert_equal(np.inner(sca, arr), desired)
-
- def test_inner_scalar_and_matrix_of_objects(self):
- # Ticket #4482
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.inner(arr, 3), desired)
- assert_equal(np.inner(3, arr), desired)
-
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
@@ -6204,7 +6199,10 @@ class TestPEP3118Dtype(object):
self._check('i', 'i')
self._check('i:f0:', [('f0', 'i')])
+
class TestNewBufferProtocol(object):
+ """ Test PEP3118 buffers """
+
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
@@ -6515,6 +6513,36 @@ class TestNewBufferProtocol(object):
with assert_raises(ValueError):
memoryview(arr)
+ def test_max_dims(self):
+ a = np.empty((1,) * 32)
+ self._check_roundtrip(a)
+
+ @pytest.mark.skipif(sys.version_info < (2, 7, 7), reason="See gh-11115")
+ def test_error_too_many_dims(self):
+ def make_ctype(shape, scalar_type):
+ t = scalar_type
+ for dim in shape[::-1]:
+ t = dim * t
+ return t
+
+ # construct a memoryview with 33 dimensions
+ c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8)
+ m = memoryview(c_u8_33d())
+ assert_equal(m.ndim, 33)
+
+ assert_raises_regex(
+ RuntimeError, "ndim",
+ np.array, m)
+
+ def test_error_pointer_type(self):
+ # gh-6741
+ m = memoryview(ctypes.pointer(ctypes.c_uint8()))
+ assert_('&' in m.format)
+
+ assert_raises_regex(
+ ValueError, "format string",
+ np.array, m)
+
class TestArrayAttributeDeletion(object):
@@ -7246,18 +7274,53 @@ class TestWritebackIfCopy(object):
def test_view_assign(self):
from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve
+
arr = np.arange(9).reshape(3, 3).T
arr_wb = npy_create_writebackifcopy(arr)
assert_(arr_wb.flags.writebackifcopy)
assert_(arr_wb.base is arr)
- arr_wb[:] = -100
+ arr_wb[...] = -100
npy_resolve(arr_wb)
+ # arr changes after resolve, even though we assigned to arr_wb
assert_equal(arr, -100)
# after resolve, the two arrays no longer reference each other
- assert_(not arr_wb.ctypes.data == 0)
- arr_wb[:] = 100
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
assert_equal(arr, -100)
+ def test_dealloc_warning(self):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ arr = np.arange(9).reshape(3, 3)
+ v = arr.T
+ _multiarray_tests.npy_abuse_writebackifcopy(v)
+ assert len(sup.log) == 1
+
+ def test_view_discard_refcount(self):
+ from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard
+
+ arr = np.arange(9).reshape(3, 3).T
+ orig = arr.copy()
+ if HAS_REFCOUNT:
+ arr_cnt = sys.getrefcount(arr)
+ arr_wb = npy_create_writebackifcopy(arr)
+ assert_(arr_wb.flags.writebackifcopy)
+ assert_(arr_wb.base is arr)
+ arr_wb[...] = -100
+ npy_discard(arr_wb)
+ # arr remains unchanged after discard
+ assert_equal(arr, orig)
+ # after discard, the two arrays no longer reference each other
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ if HAS_REFCOUNT:
+ assert_equal(arr_cnt, sys.getrefcount(arr))
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
+ assert_equal(arr, orig)
+
class TestArange(object):
def test_infinite(self):
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 0e29876eb..a0096efdb 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -43,13 +43,14 @@ def test_iter_refcount():
dt = np.dtype('f4').newbyteorder()
rc_a = sys.getrefcount(a)
rc_dt = sys.getrefcount(dt)
- it = nditer(a, [],
+ with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='unsafe',
- op_dtypes=[dt])
- assert_(not it.iterationneedsapi)
- assert_(sys.getrefcount(a) > rc_a)
- assert_(sys.getrefcount(dt) > rc_dt)
+ op_dtypes=[dt]) as it:
+ assert_(not it.iterationneedsapi)
+ assert_(sys.getrefcount(a) > rc_a)
+ assert_(sys.getrefcount(dt) > rc_dt)
+ # del 'it'
it = None
assert_equal(sys.getrefcount(a), rc_a)
assert_equal(sys.getrefcount(dt), rc_dt)
@@ -766,12 +767,32 @@ def test_iter_flags_errors():
def test_iter_slice():
a, b, c = np.arange(3), np.arange(3), np.arange(3.)
i = nditer([a, b, c], [], ['readwrite'])
- i[0:2] = (3, 3)
- assert_equal(a, [3, 1, 2])
- assert_equal(b, [3, 1, 2])
- assert_equal(c, [0, 1, 2])
- i[1] = 12
- assert_equal(i[0:2], [3, 12])
+ with i:
+ i[0:2] = (3, 3)
+ assert_equal(a, [3, 1, 2])
+ assert_equal(b, [3, 1, 2])
+ assert_equal(c, [0, 1, 2])
+ i[1] = 12
+ assert_equal(i[0:2], [3, 12])
+
+def test_iter_assign_mapping():
+ a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
+ it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+ casting='same_kind', op_dtypes=[np.dtype('f4')])
+ with it:
+ it.operands[0][...] = 3
+ it.operands[0][...] = 14
+ assert_equal(a, 14)
+ it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+ casting='same_kind', op_dtypes=[np.dtype('f4')])
+ with it:
+ x = it.operands[0][-1:1]
+ x[...] = 14
+ it.operands[0][...] = -1234
+ assert_equal(a, -1234)
+ # check for no warnings on dealloc
+ x = None
+ it = None
def test_iter_nbo_align_contig():
# Check that byte order, alignment, and contig changes work
@@ -783,23 +804,26 @@ def test_iter_nbo_align_contig():
i = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('f4')])
- assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 2
- i = None
+ with i:
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 2
assert_equal(au, [2]*6)
-
+ del i # should not raise a warning
# Byte order change by requesting NBO
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
- i = nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], casting='equiv')
- assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 2
- i = None
+ with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']],
+ casting='equiv') as i:
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 12345
+ i.operands[0][:] = 2
assert_equal(au, [2]*6)
# Unaligned input
@@ -812,11 +836,11 @@ def test_iter_nbo_align_contig():
assert_(not i.operands[0].flags.aligned)
assert_equal(i.operands[0], a)
# With 'aligned', should make a copy
- i = nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']])
- assert_(i.operands[0].flags.aligned)
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 3
- i = None
+ with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i:
+ assert_(i.operands[0].flags.aligned)
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 3
assert_equal(a, [3]*6)
# Discontiguous input
@@ -838,16 +862,17 @@ def test_iter_array_cast():
# No cast 'f4' -> 'f4'
a = np.arange(6, dtype='f4').reshape(2, 3)
i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')])
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ with i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
# Byte-order cast '<f4' -> '>f4'
a = np.arange(6, dtype='<f4').reshape(2, 3)
- i = nditer(a, [], [['readwrite', 'updateifcopy']],
+ with nditer(a, [], [['readwrite', 'updateifcopy']],
casting='equiv',
- op_dtypes=[np.dtype('>f4')])
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('>f4'))
+ op_dtypes=[np.dtype('>f4')]) as i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('>f4'))
# Safe case 'f4' -> 'f8'
a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2)
@@ -869,30 +894,28 @@ def test_iter_array_cast():
# Same-kind cast 'f8' -> 'f4' -> 'f8'
a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
- i = nditer(a, [],
+ with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='same_kind',
- op_dtypes=[np.dtype('f4')])
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
- assert_equal(i.operands[0].strides, (4, 16, 48))
- # Check that UPDATEIFCOPY is activated
- i.operands[0][2, 1, 1] = -12.5
- assert_(a[2, 1, 1] != -12.5)
- i = None
+ op_dtypes=[np.dtype('f4')]) as i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ assert_equal(i.operands[0].strides, (4, 16, 48))
+ # Check that WRITEBACKIFCOPY is activated at exit
+ i.operands[0][2, 1, 1] = -12.5
+ assert_(a[2, 1, 1] != -12.5)
assert_equal(a[2, 1, 1], -12.5)
a = np.arange(6, dtype='i4')[::-2]
- i = nditer(a, [],
+ with nditer(a, [],
[['writeonly', 'updateifcopy']],
casting='unsafe',
- op_dtypes=[np.dtype('f4')])
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
- # Even though the stride was negative in 'a', it
- # becomes positive in the temporary
- assert_equal(i.operands[0].strides, (4,))
- i.operands[0][:] = [1, 2, 3]
- i = None
+ op_dtypes=[np.dtype('f4')]) as i:
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ # Even though the stride was negative in 'a', it
+ # becomes positive in the temporary
+ assert_equal(i.operands[0].strides, (4,))
+ i.operands[0][:] = [1, 2, 3]
assert_equal(a, [1, 2, 3])
def test_iter_array_cast_errors():
@@ -1027,9 +1050,10 @@ def test_iter_object_arrays_basic():
i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
['readwrite'], order='C')
- for x in i:
- x[...] = None
- vals, i, x = [None]*3
+ with i:
+ for x in i:
+ x[...] = None
+ vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_(sys.getrefcount(obj) == rc-1)
assert_equal(a, np.array([None]*4, dtype='O'))
@@ -1039,15 +1063,17 @@ def test_iter_object_arrays_conversions():
a = np.arange(6, dtype='O')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
- for x in i:
- x[...] += 1
+ with i:
+ for x in i:
+ x[...] += 1
assert_equal(a, np.arange(6)+1)
a = np.arange(6, dtype='i4')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
- for x in i:
- x[...] += 1
+ with i:
+ for x in i:
+ x[...] += 1
assert_equal(a, np.arange(6)+1)
# Non-contiguous object array
@@ -1056,8 +1082,9 @@ def test_iter_object_arrays_conversions():
a[:] = np.arange(6)
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
- for x in i:
- x[...] += 1
+ with i:
+ for x in i:
+ x[...] += 1
assert_equal(a, np.arange(6)+1)
#Non-contiguous value array
@@ -1066,11 +1093,12 @@ def test_iter_object_arrays_conversions():
a[:] = np.arange(6) + 98172488
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
- ob = i[0][()]
- if HAS_REFCOUNT:
- rc = sys.getrefcount(ob)
- for x in i:
- x[...] += 1
+ with i:
+ ob = i[0][()]
+ if HAS_REFCOUNT:
+ rc = sys.getrefcount(ob)
+ for x in i:
+ x[...] += 1
if HAS_REFCOUNT:
assert_(sys.getrefcount(ob) == rc-1)
assert_equal(a, np.arange(6)+98172489)
@@ -1146,14 +1174,15 @@ def test_iter_copy_if_overlap():
for flag in ['readonly', 'writeonly', 'readwrite']:
a = arange(10)
i = nditer([a], ['copy_if_overlap'], [[flag]])
- assert_(i.operands[0] is a)
+ with i:
+ assert_(i.operands[0] is a)
# Copy needed, 2 ops, read-write overlap
x = arange(10)
a = x[1:]
b = x[:-1]
- i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']])
- assert_(not np.shares_memory(*i.operands))
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+ assert_(not np.shares_memory(*i.operands))
# Copy not needed with elementwise, 2 ops, exactly same arrays
x = arange(10)
@@ -1161,9 +1190,10 @@ def test_iter_copy_if_overlap():
b = x
i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'],
['readwrite', 'overlap_assume_elementwise']])
- assert_(i.operands[0] is a and i.operands[1] is b)
- i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']])
- assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
+ with i:
+ assert_(i.operands[0] is a and i.operands[1] is b)
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+ assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
# Copy not needed, 2 ops, no overlap
x = arange(10)
@@ -1176,8 +1206,8 @@ def test_iter_copy_if_overlap():
x = arange(4, dtype=np.int8)
a = x[3:]
b = x.view(np.int32)[:1]
- i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']])
- assert_(not np.shares_memory(*i.operands))
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i:
+ assert_(not np.shares_memory(*i.operands))
# Copy needed, 3 ops, read-write overlap
for flag in ['writeonly', 'readwrite']:
@@ -1185,11 +1215,11 @@ def test_iter_copy_if_overlap():
a = x
b = x.T
c = x
- i = nditer([a, b, c], ['copy_if_overlap'],
- [['readonly'], ['readonly'], [flag]])
- a2, b2, c2 = i.operands
- assert_(not np.shares_memory(a2, c2))
- assert_(not np.shares_memory(b2, c2))
+ with nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['readonly'], [flag]]) as i:
+ a2, b2, c2 = i.operands
+ assert_(not np.shares_memory(a2, c2))
+ assert_(not np.shares_memory(b2, c2))
# Copy not needed, 3 ops, read-only overlap
x = np.ones([10, 10])
@@ -1324,17 +1354,15 @@ def test_iter_copy():
assert_equal([x[()] for x in i], [x[()] for x in j])
# Casting iterator
- i = nditer(a, ['buffered'], order='F', casting='unsafe',
- op_dtypes='f8', buffersize=5)
- j = i.copy()
- i = None
+ with nditer(a, ['buffered'], order='F', casting='unsafe',
+ op_dtypes='f8', buffersize=5) as i:
+ j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
a = arange(24, dtype='<i4').reshape(2, 3, 4)
- i = nditer(a, ['buffered'], order='F', casting='unsafe',
- op_dtypes='>f8', buffersize=5)
- j = i.copy()
- i = None
+ with nditer(a, ['buffered'], order='F', casting='unsafe',
+ op_dtypes='>f8', buffersize=5) as i:
+ j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
def test_iter_allocate_output_simple():
@@ -1353,11 +1381,12 @@ def test_iter_allocate_output_buffered_readwrite():
a = arange(6)
i = nditer([a, None], ['buffered', 'delay_bufalloc'],
[['readonly'], ['allocate', 'readwrite']])
- i.operands[1][:] = 1
- i.reset()
- for x in i:
- x[1][...] += x[0][...]
- assert_equal(i.operands[1], a+1)
+ with i:
+ i.operands[1][:] = 1
+ i.reset()
+ for x in i:
+ x[1][...] += x[0][...]
+ assert_equal(i.operands[1], a+1)
def test_iter_allocate_output_itorder():
# The allocated output should match the iteration order
@@ -1440,26 +1469,25 @@ def test_iter_allocate_output_types_scalar():
def test_iter_allocate_output_subtype():
# Make sure that the subtype with priority wins
+ class MyNDArray(np.ndarray):
+ __array_priority__ = 15
- # matrix vs ndarray
- a = np.matrix([[1, 2], [3, 4]])
+ # subclass vs ndarray
+ a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = np.arange(4).reshape(2, 2).T
i = nditer([a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_equal(type(a), type(i.operands[2]))
- assert_(type(b) != type(i.operands[2]))
+ assert_(type(b) is not type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
- # matrix always wants things to be 2D
- b = np.arange(4).reshape(1, 2, 2)
- assert_raises(RuntimeError, nditer, [a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate']])
- # but if subtypes are disabled, the result can still work
+ # If subtypes are disabled, we should get back an ndarray.
i = nditer([a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_subtype']])
+ [['readonly'], ['readonly'],
+ ['writeonly', 'allocate', 'no_subtype']])
assert_equal(type(b), type(i.operands[2]))
- assert_(type(a) != type(i.operands[2]))
- assert_equal(i.operands[2].shape, (1, 2, 2))
+ assert_(type(a) is not type(i.operands[2]))
+ assert_equal(i.operands[2].shape, (2, 2))
def test_iter_allocate_output_errors():
# Check that the iterator will throw errors for bad output allocations
@@ -1652,10 +1680,11 @@ def test_iter_write_buffering():
order='C',
buffersize=16)
x = 0
- while not i.finished:
- i[0] = x
- x += 1
- i.iternext()
+ with i:
+ while not i.finished:
+ i[0] = x
+ x += 1
+ i.iternext()
assert_equal(a.ravel(order='C'), np.arange(24))
def test_iter_buffering_delayed_alloc():
@@ -1679,10 +1708,11 @@ def test_iter_buffering_delayed_alloc():
i.reset()
assert_(not i.has_delayed_bufalloc)
assert_equal(i.multi_index, (0,))
- assert_equal(i[0], 0)
- i[1] = 1
- assert_equal(i[0:2], [0, 1])
- assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
+ with i:
+ assert_equal(i[0], 0)
+ i[1] = 1
+ assert_equal(i[0:2], [0, 1])
+ assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
def test_iter_buffered_cast_simple():
# Test that buffering can handle a simple cast
@@ -1693,8 +1723,9 @@ def test_iter_buffered_cast_simple():
casting='same_kind',
op_dtypes=[np.dtype('f8')],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
@@ -1707,8 +1738,9 @@ def test_iter_buffered_cast_byteswapped():
casting='same_kind',
op_dtypes=[np.dtype('f8').newbyteorder()],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
@@ -1721,8 +1753,9 @@ def test_iter_buffered_cast_byteswapped():
casting='unsafe',
op_dtypes=[np.dtype('c8').newbyteorder()],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f8'))
@@ -1736,8 +1769,9 @@ def test_iter_buffered_cast_byteswapped_complex():
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype='c8')
@@ -1747,8 +1781,9 @@ def test_iter_buffered_cast_byteswapped_complex():
casting='same_kind',
op_dtypes=[np.dtype('c16').newbyteorder()],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap()
@@ -1758,8 +1793,9 @@ def test_iter_buffered_cast_byteswapped_complex():
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j)
a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap()
@@ -1768,8 +1804,9 @@ def test_iter_buffered_cast_byteswapped_complex():
casting='same_kind',
op_dtypes=[np.dtype('f4')],
buffersize=7)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.longdouble))
def test_iter_buffered_cast_structured_type():
@@ -1880,12 +1917,13 @@ def test_iter_buffered_cast_subarray():
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_(np.all(x['a'] == count))
- x['a'][0] += 2
- count += 1
+ with i:
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_(np.all(x['a'] == count))
+ x['a'][0] += 2
+ count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2)
# many -> one element -> back (copies just element 0)
@@ -1896,12 +1934,13 @@ def test_iter_buffered_cast_subarray():
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'], count)
- x['a'] += 2
- count += 1
+ with i:
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], count)
+ x['a'] += 2
+ count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2)
# many -> one element -> back (copies just element 0)
@@ -2109,27 +2148,29 @@ def test_iter_buffered_reduce_reuse():
nditer2 = np.nditer([arr.copy(), None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
op_dtypes=op_dtypes)
- nditer2.operands[-1][...] = 0
- nditer2.reset()
- nditer2.iterindex = skip
+ with nditer2:
+ nditer2.operands[-1][...] = 0
+ nditer2.reset()
+ nditer2.iterindex = skip
- for (a2_in, b2_in) in nditer2:
- b2_in += a2_in.astype(np.int_)
+ for (a2_in, b2_in) in nditer2:
+ b2_in += a2_in.astype(np.int_)
- comp_res = nditer2.operands[-1]
+ comp_res = nditer2.operands[-1]
for bufsize in range(0, 3**3):
nditer1 = np.nditer([arr, None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
buffersize=bufsize, op_dtypes=op_dtypes)
- nditer1.operands[-1][...] = 0
- nditer1.reset()
- nditer1.iterindex = skip
+ with nditer1:
+ nditer1.operands[-1][...] = 0
+ nditer1.reset()
+ nditer1.iterindex = skip
- for (a1_in, b1_in) in nditer1:
- b1_in += a1_in.astype(np.int_)
+ for (a1_in, b1_in) in nditer1:
+ b1_in += a1_in.astype(np.int_)
- res = nditer1.operands[-1]
+ res = nditer1.operands[-1]
assert_array_equal(res, comp_res)
@@ -2288,7 +2329,21 @@ class TestIterNested(object):
assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
vals = None
- # updateifcopy
+ # writebackifcopy - using conext manager
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ op_flags=['readwrite', 'updateifcopy'],
+ casting='same_kind',
+ op_dtypes='f8')
+ with i, j:
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[0, 1, 2], [3, 4, 5]])
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+ # writebackifcopy - using close()
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readwrite', 'updateifcopy'],
@@ -2299,9 +2354,11 @@ class TestIterNested(object):
for y in j:
y[...] += 1
assert_equal(a, [[0, 1, 2], [3, 4, 5]])
- i, j, x, y = (None,)*4 # force the updateifcopy
+ i.close()
+ j.close()
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
def test_dtype_buffered(self):
# Test nested iteration with buffering to change dtype
@@ -2338,6 +2395,21 @@ class TestIterNested(object):
vals.append([z for z in k])
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+ def test_iter_nested_iters_dtype_buffered(self):
+ # Test nested iteration with buffering to change dtype
+
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ flags=['buffered'],
+ op_flags=['readwrite'],
+ casting='same_kind',
+ op_dtypes='f8')
+ with i, j:
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_iter_reduction_error():
@@ -2359,33 +2431,35 @@ def test_iter_reduction():
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0], [-1]])
# Need to initialize the output operand to the addition unit
- i.operands[1][...] = 0
- # Do the reduction
- for x, y in i:
- y[...] += x
- # Since no axes were specified, should have allocated a scalar
- assert_equal(i.operands[1].ndim, 0)
- assert_equal(i.operands[1], np.sum(a))
+ with i:
+ i.operands[1][...] = 0
+ # Do the reduction
+ for x, y in i:
+ y[...] += x
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(i.operands[1].ndim, 0)
+ assert_equal(i.operands[1], np.sum(a))
a = np.arange(6).reshape(2, 3)
i = nditer([a, None], ['reduce_ok', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0, 1], [-1, -1]])
# Need to initialize the output operand to the addition unit
- i.operands[1][...] = 0
- # Reduction shape/strides for the output
- assert_equal(i[1].shape, (6,))
- assert_equal(i[1].strides, (0,))
- # Do the reduction
- for x, y in i:
- # Use a for loop instead of ``y[...] += x``
- # (equivalent to ``y[...] = y[...].copy() + x``),
- # because y has zero strides we use for the reduction
- for j in range(len(y)):
- y[j] += x[j]
- # Since no axes were specified, should have allocated a scalar
- assert_equal(i.operands[1].ndim, 0)
- assert_equal(i.operands[1], np.sum(a))
+ with i:
+ i.operands[1][...] = 0
+ # Reduction shape/strides for the output
+ assert_equal(i[1].shape, (6,))
+ assert_equal(i[1].strides, (0,))
+ # Do the reduction
+ for x, y in i:
+ # Use a for loop instead of ``y[...] += x``
+ # (equivalent to ``y[...] = y[...].copy() + x``),
+ # because y has zero strides we use for the reduction
+ for j in range(len(y)):
+ y[j] += x[j]
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(i.operands[1].ndim, 0)
+ assert_equal(i.operands[1], np.sum(a))
# This is a tricky reduction case for the buffering double loop
# to handle
@@ -2397,15 +2471,16 @@ def test_iter_reduction():
'buffered', 'delay_bufalloc'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[None, [0, -1, 1]], buffersize=10)
- it1.operands[1].fill(0)
- it2.operands[1].fill(0)
- it2.reset()
- for x in it1:
- x[1][...] += x[0]
- for x in it2:
- x[1][...] += x[0]
- assert_equal(it1.operands[1], it2.operands[1])
- assert_equal(it2.operands[1].sum(), a.size)
+ with it1, it2:
+ it1.operands[1].fill(0)
+ it2.operands[1].fill(0)
+ it2.reset()
+ for x in it1:
+ x[1][...] += x[0]
+ for x in it2:
+ x[1][...] += x[0]
+ assert_equal(it1.operands[1], it2.operands[1])
+ assert_equal(it2.operands[1].sum(), a.size)
def test_iter_buffering_reduction():
# Test doing buffered reductions with the iterator
@@ -2415,11 +2490,12 @@ def test_iter_buffering_reduction():
i = nditer([a, b], ['reduce_ok', 'buffered'],
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0], [-1]])
- assert_equal(i[1].dtype, np.dtype('f8'))
- assert_(i[1].dtype != b.dtype)
- # Do the reduction
- for x, y in i:
- y[...] += x
+ with i:
+ assert_equal(i[1].dtype, np.dtype('f8'))
+ assert_(i[1].dtype != b.dtype)
+ # Do the reduction
+ for x, y in i:
+ y[...] += x
# Since no axes were specified, should have allocated a scalar
assert_equal(b, np.sum(a))
@@ -2429,15 +2505,16 @@ def test_iter_buffering_reduction():
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0, 1], [0, -1]])
# Reduction shape/strides for the output
- assert_equal(i[1].shape, (3,))
- assert_equal(i[1].strides, (0,))
- # Do the reduction
- for x, y in i:
- # Use a for loop instead of ``y[...] += x``
- # (equivalent to ``y[...] = y[...].copy() + x``),
- # because y has zero strides we use for the reduction
- for j in range(len(y)):
- y[j] += x[j]
+ with i:
+ assert_equal(i[1].shape, (3,))
+ assert_equal(i[1].strides, (0,))
+ # Do the reduction
+ for x, y in i:
+ # Use a for loop instead of ``y[...] += x``
+ # (equivalent to ``y[...] = y[...].copy() + x``),
+ # because y has zero strides we use for the reduction
+ for j in range(len(y)):
+ y[j] += x[j]
assert_equal(b, np.sum(a, axis=1))
# Iterator inner double loop was wrong on this one
@@ -2447,9 +2524,10 @@ def test_iter_buffering_reduction():
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[-1, 0], [-1, -1]],
itershape=(2, 2))
- it.operands[1].fill(0)
- it.reset()
- assert_equal(it[0], [1, 2, 1, 2])
+ with it:
+ it.operands[1].fill(0)
+ it.reset()
+ assert_equal(it[0], [1, 2, 1, 2])
# Iterator inner loop should take argument contiguity into account
x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0)
@@ -2461,8 +2539,9 @@ def test_iter_buffering_reduction():
it = np.nditer([y, x],
['buffered', 'external_loop', 'reduce_ok'],
[['readwrite'], ['readonly']])
- for a, b in it:
- a.fill(2)
+ with it:
+ for a, b in it:
+ a.fill(2)
assert_equal(y_base[1::2], y_base_copy[1::2])
assert_equal(y_base[::2], 2)
@@ -2479,8 +2558,9 @@ def test_iter_buffering_reduction_reuse_reduce_loops():
buffersize=5)
bufsizes = []
- for x, y in it:
- bufsizes.append(x.shape[0])
+ with it:
+ for x, y in it:
+ bufsizes.append(x.shape[0])
assert_equal(bufsizes, [5, 2, 5, 2])
assert_equal(sum(bufsizes), a.size)
@@ -2559,8 +2639,9 @@ def test_iter_writemasked():
it = np.nditer([a, msk], [],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
- for x, m in it:
- x[...] = 1
+ with it:
+ for x, m in it:
+ x[...] = 1
# Because we violated the semantics, all the values became 1
assert_equal(a, [1, 1, 1])
@@ -2569,8 +2650,9 @@ def test_iter_writemasked():
it = np.nditer([a, msk], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
- for x, m in it:
- x[...] = 2.5
+ with it:
+ for x, m in it:
+ x[...] = 2.5
# Because we violated the semantics, all the values became 2.5
assert_equal(a, [2.5, 2.5, 2.5])
@@ -2582,8 +2664,9 @@ def test_iter_writemasked():
['readonly', 'arraymask']],
op_dtypes=['i8', None],
casting='unsafe')
- for x, m in it:
- x[...] = 3
+ with it:
+ for x, m in it:
+ x[...] = 3
# Even though we violated the semantics, only the selected values
# were copied back
assert_equal(a, [3, 3, 2.5])
@@ -2711,3 +2794,115 @@ def test_iter_too_large_with_multiindex():
# an axis with size 1 is removed:
with assert_raises(ValueError):
_multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode)
+
+def test_writebacks():
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ assert_(a.dtype.byteorder != au.dtype.byteorder)
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ with it:
+ it.operands[0][:] = 100
+ assert_equal(au, 100)
+ # do it again, this time raise an error,
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ try:
+ with it:
+ assert_equal(au.flags.writeable, False)
+ it.operands[0][:] = 0
+ raise ValueError('exit context manager on exception')
+ except:
+ pass
+ assert_equal(au, 0)
+ assert_equal(au.flags.writeable, True)
+ # cannot reuse i outside context manager
+ assert_raises(ValueError, getattr, it, 'operands')
+
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ with it:
+ x = it.operands[0]
+ x[:] = 6
+ assert_(x.flags.writebackifcopy)
+ assert_equal(au, 6)
+ assert_(not x.flags.writebackifcopy)
+ x[:] = 123 # x.data still valid
+ assert_equal(au, 6) # but not connected to au
+
+ do_close = 1
+ # test like above, only in C, and with an option to skip the NpyIter_Close
+ _multiarray_tests.test_nditer_writeback(3, do_close, au, op_dtypes=[np.dtype('f4')])
+ assert_equal(au, 3)
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ # reentering works
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ # make sure exiting the inner context manager closes the iterator
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+ assert_raises(ValueError, getattr, it, 'operands')
+ # do not crash if original data array is decrefed
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ del au
+ with it:
+ for x in it:
+ x[...] = 123
+ # make sure we cannot reenter the closed iterator
+ enter = it.__enter__
+ assert_raises(ValueError, enter)
+
+def test_close_equivalent():
+ ''' using a context amanger and using nditer.close are equivalent
+ '''
+ def add_close(x, y, out=None):
+ addop = np.add
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ for (a, b, c) in it:
+ addop(a, b, out=c)
+ ret = it.operands[2]
+ it.close()
+ return ret
+
+ def add_context(x, y, out=None):
+ addop = np.add
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ with it:
+ for (a, b, c) in it:
+ addop(a, b, out=c)
+ return it.operands[2]
+ z = add_close(range(5), range(5))
+ assert_equal(z, range(0, 10, 2))
+ z = add_context(range(5), range(5))
+ assert_equal(z, range(0, 10, 2))
+
+def test_close_raises():
+ it = np.nditer(np.arange(3))
+ assert_equal (next(it), 0)
+ it.close()
+ assert_raises(StopIteration, next, it)
+ assert_raises(ValueError, getattr, it, 'operands')
+
+def test_warn_noclose():
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ do_close = 0
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ # test like above, only in C, and with an option to skip the NpyIter_Close
+ _multiarray_tests.test_nditer_writeback(3, do_close, au, op_dtypes=[np.dtype('f4')])
+ assert len(sup.log) == 1
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 40cccd404..53486dc51 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -552,7 +552,6 @@ class TestFloatExceptions(object):
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
- @pytest.mark.xfail(reason="See ticket #2350")
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
@@ -905,7 +904,7 @@ class TestTypes(object):
fi = np.finfo(dt)
assert_(np.can_cast(fi.min, dt))
assert_(np.can_cast(fi.max, dt))
-
+
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
@@ -2201,13 +2200,16 @@ class TestLikeFuncs(object):
self.compare_array_value(dz, value, fill_value)
# Test the 'subok' parameter
- a = np.matrix([[1, 2], [3, 4]])
+ class MyNDArray(np.ndarray):
+ pass
+
+ a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = like_function(a, **fill_kwarg)
- assert_(type(b) is np.matrix)
+ assert_(type(b) is MyNDArray)
b = like_function(a, subok=False, **fill_kwarg)
- assert_(type(b) is not np.matrix)
+ assert_(type(b) is not MyNDArray)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py
index 746ad0e4b..433208748 100644
--- a/numpy/core/tests/test_print.py
+++ b/numpy/core/tests/test_print.py
@@ -4,7 +4,7 @@ import sys
import numpy as np
from numpy.testing import assert_, assert_equal, SkipTest
-from ._locales import CommaDecimalPointLocale
+from numpy.core.tests._locales import CommaDecimalPointLocale
if sys.version_info[0] >= 3:
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index b3cb3e610..d6dcaa982 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -2325,13 +2325,10 @@ class TestRegression(object):
def test_void_item_memview(self):
va = np.zeros(10, 'V4')
- # for now, there is just a futurewarning
- assert_warns(FutureWarning, va[:1].item)
- # in the future, test we got a bytes copy:
- #x = va[:1].item()
- #va[0] = b'\xff\xff\xff\xff'
- #del va
- #assert_equal(x, b'\x00\x00\x00\x00')
+ x = va[:1].item()
+ va[0] = b'\xff\xff\xff\xff'
+ del va
+ assert_equal(x, b'\x00\x00\x00\x00')
def test_structarray_title(self):
# The following used to segfault on pypy, due to NPY_TITLE_KEY
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index 94d8294f1..a20ec9f74 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -4,9 +4,10 @@
"""
from __future__ import division, absolute_import, print_function
-import tempfile
+import code, sys
+from tempfile import TemporaryFile
import numpy as np
-from numpy.testing import assert_, assert_equal
+from numpy.testing import assert_, assert_equal, suppress_warnings
class TestRealScalars(object):
@@ -53,7 +54,7 @@ class TestRealScalars(object):
# output to a "real file" (ie, not a StringIO). Make sure we don't
# inherit it.
x = np.double(0.1999999999999)
- with tempfile.TemporaryFile('r+t') as f:
+ with TemporaryFile('r+t') as f:
print(x, file=f)
f.seek(0)
output = f.read()
@@ -62,6 +63,37 @@ class TestRealScalars(object):
# precision as '0.2', but we want numpy's np.double('0.1999999999999')
# to print the unique value, '0.1999999999999'.
+ # gh-11031
+ # Only in the python2 interactive shell and when stdout is a "real"
+ # file, the output of the last command is printed to stdout without
+ # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
+ # x` are potentially different. Make sure they are the same. The only
+ # way I found to get prompt-like output is using an actual prompt from
+ # the 'code' module. Again, must use tempfile to get a "real" file.
+
+ # dummy user-input which enters one line and then ctrl-Ds.
+ def userinput():
+ yield 'np.sqrt(2)'
+ raise EOFError
+ gen = userinput()
+ input_func = lambda prompt="": next(gen)
+
+ with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
+ orig_stdout, orig_stderr = sys.stdout, sys.stderr
+ sys.stdout, sys.stderr = fo, fe
+
+ # py2 code.interact sends irrelevant internal DeprecationWarnings
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ code.interact(local={'np': np}, readfunc=input_func, banner='')
+
+ sys.stdout, sys.stderr = orig_stdout, orig_stderr
+
+ fo.seek(0)
+ capture = fo.read().strip()
+
+ assert_equal(capture, repr(np.sqrt(2)))
+
def test_dragon4(self):
# these tests are adapted from Ryan Juckett's dragon4 implementation,
# see dragon4.c for details.
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index 1d91a651e..72b3451a4 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -364,10 +364,6 @@ def test_stack():
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
- # np.matrix
- m = np.matrix([[1, 2], [3, 4]])
- assert_raises_regex(ValueError, 'shape too large to be a matrix',
- stack, [m, m])
class TestBlock(object):
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index ea9ca021c..a5b9ce76f 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -5,6 +5,7 @@ import itertools
import numpy as np
import numpy.core._umath_tests as umt
+import numpy.linalg._umath_linalg as uml
import numpy.core._operand_flag_tests as opflag_tests
import numpy.core._rational_tests as _rational_tests
from numpy.testing import (
@@ -494,6 +495,17 @@ class TestUfunc(object):
d += d
assert_almost_equal(d, 2. + 2j)
+ def test_sum_initial(self):
+ # Integer, single axis
+ assert_equal(np.sum([3], initial=2), 5)
+
+ # Floating point
+ assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
+
+ # Multiple non-adjacent axes
+ assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
+ [12, 12, 12])
+
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
@@ -600,49 +612,49 @@ class TestUfunc(object):
def test_axes_argument(self):
# inner1d signature: '(i),(i)->()'
- in1d = umt.inner1d
+ inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
# basic tests on inputs (outputs tested below with matrix_multiply).
- c = in1d(a, b)
+ c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
# default
- c = in1d(a, b, axes=[(-1,), (-1,), ()])
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()])
assert_array_equal(c, (a * b).sum(-1))
# integers ok for single axis.
- c = in1d(a, b, axes=[-1, -1, ()])
+ c = inner1d(a, b, axes=[-1, -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# mix fine
- c = in1d(a, b, axes=[(-1,), -1, ()])
+ c = inner1d(a, b, axes=[(-1,), -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# can omit last axis.
- c = in1d(a, b, axes=[-1, -1])
+ c = inner1d(a, b, axes=[-1, -1])
assert_array_equal(c, (a * b).sum(-1))
# can pass in other types of integer (with __index__ protocol)
- c = in1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
+ c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
assert_array_equal(c, (a * b).sum(-1))
# swap some axes
- c = in1d(a, b, axes=[0, 0])
+ c = inner1d(a, b, axes=[0, 0])
assert_array_equal(c, (a * b).sum(0))
- c = in1d(a, b, axes=[0, 2])
+ c = inner1d(a, b, axes=[0, 2])
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
# Check errors for improperly constructed axes arguments.
# should have list.
- assert_raises(TypeError, in1d, a, b, axes=-1)
+ assert_raises(TypeError, inner1d, a, b, axes=-1)
# needs enough elements
- assert_raises(ValueError, in1d, a, b, axes=[-1])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1])
# should pass in indices.
- assert_raises(TypeError, in1d, a, b, axes=[-1.0, -1.0])
- assert_raises(TypeError, in1d, a, b, axes=[(-1.0,), -1])
- assert_raises(TypeError, in1d, a, b, axes=[None, 1])
+ assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0])
+ assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1])
+ assert_raises(TypeError, inner1d, a, b, axes=[None, 1])
# cannot pass an index unless there is only one dimension
# (output is wrong in this case)
- assert_raises(TypeError, in1d, a, b, axes=[-1, -1, -1])
+ assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1])
# or pass in generally the wrong number of axes
- assert_raises(ValueError, in1d, a, b, axes=[-1, -1, (-1,)])
- assert_raises(ValueError, in1d, a, b, axes=[-1, (-2, -1), ()])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()])
# axes need to have same length.
- assert_raises(ValueError, in1d, a, b, axes=[0, 1])
+ assert_raises(ValueError, inner1d, a, b, axes=[0, 1])
# matrix_multiply signature: '(m,n),(n,p)->(m,p)'
mm = umt.matrix_multiply
@@ -697,6 +709,73 @@ class TestUfunc(object):
assert_raises(ValueError, mm, z[1], z, axes=[0, 1])
assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1])
+ def test_keepdims_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ out = np.zeros_like(c)
+ d = inner1d(a, b, keepdims=True, out=out)
+ assert_(d is out)
+ assert_array_equal(d, c)
+ # Now combined with axes.
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ c = inner1d(a, b, axes=[0, 0], keepdims=False)
+ assert_array_equal(c, (a * b).sum(0))
+ c = inner1d(a, b, axes=[0, 0, 0], keepdims=True)
+ assert_array_equal(c, (a * b).sum(0, keepdims=True))
+ c = inner1d(a, b, axes=[0, 2], keepdims=False)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
+ c = inner1d(a, b, axes=[0, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 0], keepdims=True)
+ assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True))
+ # Hardly useful, but should work.
+ c = inner1d(a, b, axes=[0, 2, 1], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1))
+ .sum(1, keepdims=True))
+ # Check with two core dimensions.
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected = uml.det(a)
+ c = uml.det(a, keepdims=False)
+ assert_array_equal(c, expected)
+ c = uml.det(a, keepdims=True)
+ assert_array_equal(c, expected[:, np.newaxis, np.newaxis])
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected_s, expected_l = uml.slogdet(a)
+ cs, cl = uml.slogdet(a, keepdims=False)
+ assert_array_equal(cs, expected_s)
+ assert_array_equal(cl, expected_l)
+ cs, cl = uml.slogdet(a, keepdims=True)
+ assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis])
+ assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis])
+ # Sanity check on innerwt.
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w, keepdims=True),
+ np.sum(a * b * w, axis=-1, keepdims=True))
+ # Check errors.
+ # Not a boolean
+ assert_raises(TypeError, inner1d, a, b, keepdims='true')
+ # 1 core dimension only.
+ mm = umt.matrix_multiply
+ assert_raises(TypeError, mm, a, b, keepdims=True)
+ assert_raises(TypeError, mm, a, b, keepdims=False)
+
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
@@ -814,6 +893,21 @@ class TestUfunc(object):
assert_equal(np.logical_or.reduce(a), 3)
assert_equal(np.logical_and.reduce(a), None)
+ def test_object_comparison(self):
+ class HasComparisons(object):
+ def __eq__(self, other):
+ return '=='
+
+ arr0d = np.array(HasComparisons())
+ assert_equal(arr0d == arr0d, True)
+ assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
+ assert_equal(np.equal(arr0d, arr0d, dtype=object), '==')
+
+ arr1d = np.array([HasComparisons()])
+ assert_equal(arr1d == arr1d, np.array([True]))
+ assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast
+ assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['==']))
+
def test_object_array_reduction(self):
# Reductions on object arrays
a = np.array(['a', 'b', 'c'], dtype=object)
@@ -829,6 +923,7 @@ class TestUfunc(object):
assert_equal(np.min(a), False)
assert_equal(np.array([[1]], dtype=object).sum(), 1)
assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
+ assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
def test_object_array_accumulate_inplace(self):
# Checks that in-place accumulates work, see also gh-7402
@@ -865,13 +960,6 @@ class TestUfunc(object):
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
assert_array_equal(arr, out)
- def test_object_scalar_multiply(self):
- # Tickets #2469 and #4482
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.multiply(arr, 3), desired)
- assert_equal(np.multiply(3, arr), desired)
-
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
@@ -972,7 +1060,7 @@ class TestUfunc(object):
assert_equal(np.sqrt(a, where=m), [1])
def check_identityless_reduction(self, a):
- # np.minimum.reduce is a identityless reduction
+ # np.minimum.reduce is an identityless reduction
# Verify that it sees the zero at various positions
a[...] = 1
@@ -1041,6 +1129,35 @@ class TestUfunc(object):
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
+ def test_initial_reduction(self):
+ # np.minimum.reduce is an identityless reduction
+
+ # For cases like np.maximum(np.abs(...), initial=0)
+ # More generally, a supremum over non-negative numbers.
+ assert_equal(np.maximum.reduce([], initial=0), 0)
+
+ # For cases like reduction of an empty array over the reals.
+ assert_equal(np.minimum.reduce([], initial=np.inf), np.inf)
+ assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf)
+
+ # Random tests
+ assert_equal(np.minimum.reduce([5], initial=4), 4)
+ assert_equal(np.maximum.reduce([4], initial=5), 5)
+ assert_equal(np.maximum.reduce([5], initial=4), 5)
+ assert_equal(np.minimum.reduce([4], initial=5), 4)
+
+ # Check initial=None raises ValueError for both types of ufunc reductions
+ assert_raises(ValueError, np.minimum.reduce, [], initial=None)
+ assert_raises(ValueError, np.add.reduce, [], initial=None)
+
+ # Check that np._NoValue gives default behavior.
+ assert_equal(np.add.reduce([], initial=np._NoValue), 0)
+
+ # Check that initial kwarg behaves as intended for dtype=object
+ a = np.array([10], dtype=object)
+ res = np.add.reduce(a, initial=5)
+ assert_equal(res, 15)
+
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
@@ -1392,15 +1509,18 @@ class TestUfunc(object):
assert_equal(f(d, 0, None, None), r)
assert_equal(f(d, 0, None, None, keepdims=False), r)
assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
+ assert_equal(f(d, 0, None, None, False, 0), r)
+ assert_equal(f(d, 0, None, None, False, initial=0), r)
# multiple keywords
assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False), r)
+ assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0), r)
# too little
assert_raises(TypeError, f)
# too much
- assert_raises(TypeError, f, d, 0, None, None, False, 1)
+ assert_raises(TypeError, f, d, 0, None, None, False, 0, 1)
# invalid axis
assert_raises(TypeError, f, d, "invalid")
assert_raises(TypeError, f, d, axis="invalid")
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 9da6abd4b..2a42b1ed1 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1328,6 +1328,17 @@ class TestMinMax(object):
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
+ def test_reduce_warns(self):
+ # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
+ # and put it before the call to an intrisic function that causes
+ # invalid status to be set. Also make sure warnings are emitted
+ for n in (2, 4, 8, 16, 32):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ for r in np.diagflat([np.nan] * n):
+ assert_equal(np.min(r), np.nan)
+ assert_equal(len(sup.log), n)
+
class TestAbsoluteNegative(object):
def test_abs_neg_blocked(self):
@@ -1413,6 +1424,57 @@ class TestSpecialMethods(object):
assert_equal(args[1], a)
assert_equal(i, 0)
+ def test_wrap_and_prepare_out(self):
+ # Calling convention for out should not affect how special methods are
+ # called
+
+ class StoreArrayPrepareWrap(np.ndarray):
+ _wrap_args = None
+ _prepare_args = None
+ def __new__(cls):
+ return np.empty(()).view(cls)
+ def __array_wrap__(self, obj, context):
+ self._wrap_args = context[1]
+ return obj
+ def __array_prepare__(self, obj, context):
+ self._prepare_args = context[1]
+ return obj
+ @property
+ def args(self):
+ # We need to ensure these are fetched at the same time, before
+ # any other ufuncs are calld by the assertions
+ return (self._prepare_args, self._wrap_args)
+ def __repr__(self):
+ return "a" # for short test output
+
+ def do_test(f_call, f_expected):
+ a = StoreArrayPrepareWrap()
+ f_call(a)
+ p, w = a.args
+ expected = f_expected(a)
+ try:
+ assert_equal(p, expected)
+ assert_equal(w, expected)
+ except AssertionError as e:
+ # assert_equal produces truly useless error messages
+ raise AssertionError("\n".join([
+ "Bad arguments passed in ufunc call",
+ " expected: {}".format(expected),
+ " __array_prepare__ got: {}".format(p),
+ " __array_wrap__ got: {}".format(w)
+ ]))
+
+ # method not on the out argument
+ do_test(lambda a: np.add(a, 0), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0))
+
+ # method on the out argument
+ do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a))
+
def test_wrap_with_iterable(self):
# test fix for bug #1026:
@@ -1613,13 +1675,16 @@ class TestSpecialMethods(object):
assert_equal(ncu.maximum(a, C()), 0)
def test_ufunc_override(self):
-
+ # check override works even with instance with high priority.
class A(object):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return self, func, method, inputs, kwargs
+ class MyNDArray(np.ndarray):
+ __array_priority__ = 100
+
a = A()
- b = np.matrix([1])
+ b = np.array([1]).view(MyNDArray)
res0 = np.multiply(a, b)
res1 = np.multiply(b, b, out=a)
@@ -1759,7 +1824,7 @@ class TestSpecialMethods(object):
# reduce, kwargs
res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
- keepdims='keep0')
+ keepdims='keep0', initial='init0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
@@ -1767,7 +1832,8 @@ class TestSpecialMethods(object):
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'keepdims': 'keep0',
- 'axis': 'axis0'})
+ 'axis': 'axis0',
+ 'initial': 'init0'})
# reduce, output equal to None removed, but not other explicit ones,
# even if they are at their default value.
@@ -1777,6 +1843,14 @@ class TestSpecialMethods(object):
assert_equal(res[4], {'axis': 0, 'keepdims': True})
res = np.multiply.reduce(a, None, out=(None,), dtype=None)
assert_equal(res[4], {'axis': None, 'dtype': None})
+ res = np.multiply.reduce(a, 0, None, None, False, 2)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': 2})
+ # np._NoValue ignored for initial.
+ res = np.multiply.reduce(a, 0, None, None, False, np._NoValue)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
+ # None kept for initial.
+ res = np.multiply.reduce(a, 0, None, None, False, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': None})
# reduce, wrong args
assert_raises(ValueError, np.multiply.reduce, a, out=())
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 1d08942f6..41f0b1f61 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -256,6 +256,11 @@ def minrelpath(path):
return ''
return os.sep.join(l)
+def sorted_glob(fileglob):
+ """sorts output of python glob for http://bugs.python.org/issue30461
+ to allow extensions to have reproducible build results"""
+ return sorted(glob.glob(fileglob))
+
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
@@ -263,8 +268,8 @@ def _fix_paths(paths, local_path, include_non_existing):
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
- p = glob.glob(n)
- p2 = glob.glob(njoin(local_path, n))
+ p = sorted_glob(n)
+ p2 = sorted_glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
@@ -528,7 +533,7 @@ def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
- head = glob.glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
+ head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
@@ -882,7 +887,7 @@ class Configuration(object):
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
- dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)]
+ dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
@@ -1218,15 +1223,15 @@ class Configuration(object):
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
- #. *.txt -> parent/a.txt, parent/b.txt
- #. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
- #. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
+ #. ``*``.txt -> parent/a.txt, parent/b.txt
+ #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
+ #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
- #. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
- #. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
- #. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
+ #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
+ #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
+ #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index d12381028..65d7de316 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -385,6 +385,7 @@ def get_info(name, notfound_action=0):
'blis': blis_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
+ 'accelerate': accelerate_info, # use blas_opt instead
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
@@ -1551,39 +1552,10 @@ class lapack_opt_info(system_info):
if not atlas_info:
atlas_info = get_info('atlas')
- if sys.platform == 'darwin' \
- and not os.getenv('_PYTHON_HOST_PLATFORM', None) \
- and not (atlas_info or openblas_info or
- lapack_mkl_info):
- # Use the system lapack from Accelerate or vecLib under OSX
- args = []
- link_args = []
- if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
- 'x86_64' in get_platform() or \
- 'i386' in platform.platform():
- intel = 1
- else:
- intel = 0
- if os.path.exists('/System/Library/Frameworks'
- '/Accelerate.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
- elif os.path.exists('/System/Library/Frameworks'
- '/vecLib.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
- if args:
- self.set_info(extra_compile_args=args,
- extra_link_args=link_args,
- define_macros=[('NO_ATLAS_INFO', 3),
- ('HAVE_CBLAS', None)])
- return
+ accelerate_info = get_info('accelerate')
+ if accelerate_info and not atlas_info:
+ self.set_info(**accelerate_info)
+ return
need_lapack = 0
need_blas = 0
@@ -1659,43 +1631,10 @@ class blas_opt_info(system_info):
if not atlas_info:
atlas_info = get_info('atlas_blas')
- if sys.platform == 'darwin' \
- and not os.getenv('_PYTHON_HOST_PLATFORM', None) \
- and not (atlas_info or openblas_info or
- blas_mkl_info or blis_info):
- # Use the system BLAS from Accelerate or vecLib under OSX
- args = []
- link_args = []
- if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
- 'x86_64' in get_platform() or \
- 'i386' in platform.platform():
- intel = 1
- else:
- intel = 0
- if os.path.exists('/System/Library/Frameworks'
- '/Accelerate.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- args.extend([
- '-I/System/Library/Frameworks/vecLib.framework/Headers'])
- link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
- elif os.path.exists('/System/Library/Frameworks'
- '/vecLib.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- args.extend([
- '-I/System/Library/Frameworks/vecLib.framework/Headers'])
- link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
- if args:
- self.set_info(extra_compile_args=args,
- extra_link_args=link_args,
- define_macros=[('NO_ATLAS_INFO', 3),
- ('HAVE_CBLAS', None)])
- return
+ accelerate_info = get_info('accelerate')
+ if accelerate_info and not atlas_info:
+ self.set_info(**accelerate_info)
+ return
need_blas = 0
info = {}
@@ -1882,7 +1821,7 @@ class openblas_lapack_info(openblas_info):
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
- s = """void zungqr();
+ s = """void zungqr_();
int main(int argc, const char *argv[])
{
zungqr_();
@@ -1939,6 +1878,58 @@ class blis_info(blas_info):
include_dirs=incl_dirs)
self.set_info(**info)
+class accelerate_info(system_info):
+ section = 'accelerate'
+ notfounderror = BlasNotFoundError
+
+ def calc_info(self):
+ # Make possible to enable/disable from config file/env var
+ libraries = os.environ.get('ACCELERATE')
+ if libraries:
+ libraries = [libraries]
+ else:
+ libraries = self.get_libs('libraries', ['accelerate', 'veclib'])
+ libraries = [lib.strip().lower() for lib in libraries]
+
+ if (sys.platform == 'darwin' and
+ not os.getenv('_PYTHON_HOST_PLATFORM', None)):
+ # Use the system BLAS from Accelerate or vecLib under OSX
+ args = []
+ link_args = []
+ if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
+ 'x86_64' in get_platform() or \
+ 'i386' in platform.platform():
+ intel = 1
+ else:
+ intel = 0
+ if (os.path.exists('/System/Library/Frameworks'
+ '/Accelerate.framework/') and
+ 'accelerate' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ else:
+ args.extend(['-faltivec'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
+ elif (os.path.exists('/System/Library/Frameworks'
+ '/vecLib.framework/') and
+ 'veclib' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ else:
+ args.extend(['-faltivec'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
+
+ if args:
+ self.set_info(extra_compile_args=args,
+ extra_link_args=link_args,
+ define_macros=[('NO_ATLAS_INFO', 3),
+ ('HAVE_CBLAS', None)])
+
+ return
class blas_src_info(system_info):
section = 'blas_src'
diff --git a/numpy/doc/constants.py b/numpy/doc/constants.py
index f3b835085..21c7a3c67 100644
--- a/numpy/doc/constants.py
+++ b/numpy/doc/constants.py
@@ -296,8 +296,9 @@ add_newdoc('numpy', 'nan',
See Also
--------
isnan : Shows which elements are Not a Number.
+
isfinite : Shows which elements are finite (not one of
- Not a Number, positive infinity and negative infinity)
+ Not a Number, positive infinity and negative infinity)
Notes
-----
diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py
index 9b7d613ba..0e1df495b 100644
--- a/numpy/doc/glossary.py
+++ b/numpy/doc/glossary.py
@@ -48,7 +48,7 @@ Glossary
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
- Fast element-wise operations, called :term:`ufuncs`, operate on arrays.
+ Fast element-wise operations, called a :term:`ufunc`, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
@@ -62,6 +62,12 @@ Glossary
>>> x.shape
(3,)
+ big-endian
+ When storing a multi-byte value in memory as a sequence of bytes, the
+ sequence addresses/sends/stores the most significant byte first (lowest
+ address) and the least significant byte last (highest address). Common in
+ micro-processors and used for transmission of data over network protocols.
+
BLAS
`Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_
@@ -151,6 +157,11 @@ Glossary
For more information on dictionaries, read the
`Python tutorial <http://docs.python.org/tut>`_.
+ field
+ In a :term:`structured data type`, each sub-type is called a `field`.
+ The `field` has a name (a string), a type (any valid :term:`dtype`, and
+ an optional `title`. See :ref:`arrays.dtypes`
+
Fortran order
See `column-major`
@@ -158,6 +169,12 @@ Glossary
Collapsed to a one-dimensional array. See `numpy.ndarray.flatten`
for details.
+ homogenous
+ Describes a block of memory comprised of blocks, each block comprised of
+ items and of the same size, and blocks are interpreted in exactly the
+ same way. In the simplest case each block contains a single item, for
+ instance int32 or float64.
+
immutable
An object that cannot be modified after execution is called
immutable. Two common examples are strings and tuples.
@@ -224,6 +241,12 @@ Glossary
tutorial <http://docs.python.org/tut>`_. For a mapping
type (key-value), see *dictionary*.
+ little-endian
+ When storing a multi-byte value in memory as a sequence of bytes, the
+ sequence addresses/sends/stores the least significant byte first (lowest
+ address) and the most significant byte last (highest address). Common in
+ x86 processors.
+
mask
A boolean array, used to select only certain elements for an operation::
@@ -285,7 +308,7 @@ Glossary
See *array*.
record array
- An :term:`ndarray` with :term:`structured data type`_ which has been
+ An :term:`ndarray` with :term:`structured data type` which has been
subclassed as ``np.recarray`` and whose dtype is of type ``np.record``,
making the fields of its data type to be accessible by attribute.
@@ -350,6 +373,9 @@ Glossary
>>> x[:, 1]
array([2, 4])
+ structure
+ See :term:`structured data type`
+
structured data type
A data type composed of other datatypes
diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py
index 5d6708a0d..24369871c 100644
--- a/numpy/doc/misc.py
+++ b/numpy/doc/misc.py
@@ -209,7 +209,7 @@ Only a survey of the choices. Little detail on how each works.
Interfacing to Fortran:
-----------------------
The clear choice to wrap Fortran code is
-`f2py <http://docs.scipy.org/doc/numpy-dev/f2py/>`_.
+`f2py <http://docs.scipy.org/doc/numpy/f2py/>`_.
Pyfort is an older alternative, but not supported any longer.
Fwrap is a newer project that looked promising but isn't being developed any
diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py
index af02e2173..ba667da59 100644
--- a/numpy/doc/structured_arrays.py
+++ b/numpy/doc/structured_arrays.py
@@ -284,7 +284,7 @@ the desired underlying dtype, and fields and flags will be copied from
``dtype``. This dtype is similar to a 'union' in C.
Indexing and Assignment to Structured arrays
-=============================================
+============================================
Assigning data to a Structured Array
------------------------------------
@@ -293,7 +293,7 @@ There are a number of ways to assign values to a structured array: Using python
tuples, using scalar values, or using other structured arrays.
Assignment from Python Native Types (Tuples)
-```````````````````````````````````````````
+````````````````````````````````````````````
The simplest way to assign values to a structured array is using python tuples.
Each assigned value should be a tuple of length equal to the number of fields
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index dd2484eb4..78b06f066 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -539,7 +539,7 @@ void f2py_report_on_exit(int exit_flag,void *name) {
fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n",
cb_passed_counter,cb_passed_time);
- fprintf(stderr,"(e) wrapped (Fortran/C) functions (acctual) : %8d msec\n\n",
+ fprintf(stderr,"(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n",
passed_call_time-cb_passed_call_time-cb_passed_time);
fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n");
fprintf(stderr,"Exit status: %d\n",exit_flag);
diff --git a/numpy/fft/fftpack_litemodule.c b/numpy/fft/fftpack_litemodule.c
index dfa0d211b..bd6cfc120 100644
--- a/numpy/fft/fftpack_litemodule.c
+++ b/numpy/fft/fftpack_litemodule.c
@@ -330,10 +330,10 @@ static struct PyModuleDef moduledef = {
/* Initialization function for the module */
#if PY_MAJOR_VERSION >= 3
-#define RETVAL m
+#define RETVAL(x) x
PyMODINIT_FUNC PyInit_fftpack_lite(void)
#else
-#define RETVAL
+#define RETVAL(x)
PyMODINIT_FUNC
initfftpack_lite(void)
#endif
@@ -348,6 +348,9 @@ initfftpack_lite(void)
fftpack_module_documentation,
(PyObject*)NULL,PYTHON_API_VERSION);
#endif
+ if (m == NULL) {
+ return RETVAL(NULL);
+ }
/* Import the array object */
import_array();
@@ -359,5 +362,5 @@ initfftpack_lite(void)
/* XXXX Add constants here */
- return RETVAL;
+ return RETVAL(m);
}
diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py
index 0019c5607..c3563a7fa 100644
--- a/numpy/lib/_version.py
+++ b/numpy/lib/_version.py
@@ -45,7 +45,7 @@ class NumpyVersion():
Examples
--------
>>> from numpy.lib import NumpyVersion
- >>> if NumpyVersion(np.__version__) < '1.7.0'):
+ >>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
skip
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index daaa68d06..e9ca9de4d 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -74,6 +74,35 @@ def _round_ifneeded(arr, dtype):
arr.round(out=arr)
+def _slice_at_axis(shape, sl, axis):
+ """
+ Construct a slice tuple the length of shape, with sl at the specified axis
+ """
+ slice_tup = (slice(None),)
+ return slice_tup * axis + (sl,) + slice_tup * (len(shape) - axis - 1)
+
+
+def _slice_first(shape, n, axis):
+ """ Construct a slice tuple to take the first n elements along axis """
+ return _slice_at_axis(shape, slice(0, n), axis=axis)
+
+
+def _slice_last(shape, n, axis):
+ """ Construct a slice tuple to take the last n elements along axis """
+ dim = shape[axis] # doing this explicitly makes n=0 work
+ return _slice_at_axis(shape, slice(dim - n, dim), axis=axis)
+
+
+def _do_prepend(arr, pad_chunk, axis):
+ return np.concatenate(
+ (pad_chunk.astype(arr.dtype, copy=False), arr), axis=axis)
+
+
+def _do_append(arr, pad_chunk, axis):
+ return np.concatenate(
+ (arr, pad_chunk.astype(arr.dtype, copy=False)), axis=axis)
+
+
def _prepend_const(arr, pad_amt, val, axis=-1):
"""
Prepend constant `val` along `axis` of `arr`.
@@ -100,12 +129,7 @@ def _prepend_const(arr, pad_amt, val, axis=-1):
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- if val == 0:
- return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),
- axis=axis)
- else:
- return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),
- arr), axis=axis)
+ return _do_prepend(arr, np.full(padshape, val, dtype=arr.dtype), axis)
def _append_const(arr, pad_amt, val, axis=-1):
@@ -134,12 +158,8 @@ def _append_const(arr, pad_amt, val, axis=-1):
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- if val == 0:
- return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),
- axis=axis)
- else:
- return np.concatenate(
- (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, np.full(padshape, val, dtype=arr.dtype), axis)
+
def _prepend_edge(arr, pad_amt, axis=-1):
@@ -164,15 +184,9 @@ def _prepend_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- edge_arr = arr[edge_slice].reshape(pad_singleton)
- return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ edge_slice = _slice_first(arr.shape, 1, axis=axis)
+ edge_arr = arr[edge_slice]
+ return _do_prepend(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
def _append_edge(arr, pad_amt, axis=-1):
@@ -198,15 +212,9 @@ def _append_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- edge_arr = arr[edge_slice].reshape(pad_singleton)
- return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),
- axis=axis)
+ edge_slice = _slice_last(arr.shape, 1, axis=axis)
+ edge_arr = arr[edge_slice]
+ return _do_append(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
def _prepend_ramp(arr, pad_amt, end, axis=-1):
@@ -244,15 +252,10 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
reverse=True).astype(np.float64)
# Appropriate slicing to extract n-dimensional edge along `axis`
- edge_slice = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_first(arr.shape, 1, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract edge, reshape to original rank, and extend along `axis`
- edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+ # Extract edge, and extend along `axis`
+ edge_pad = arr[edge_slice].repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
@@ -261,7 +264,7 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
- return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)
+ return _do_prepend(arr, ramp_arr, axis)
def _append_ramp(arr, pad_amt, end, axis=-1):
@@ -299,15 +302,10 @@ def _append_ramp(arr, pad_amt, end, axis=-1):
reverse=False).astype(np.float64)
# Slice a chunk from the edge to calculate stats on
- edge_slice = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_last(arr.shape, 1, axis=axis)
- # Extract edge, reshape to original rank, and extend along `axis`
- edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+ # Extract edge, and extend along `axis`
+ edge_pad = arr[edge_slice].repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
@@ -316,7 +314,7 @@ def _append_ramp(arr, pad_amt, end, axis=-1):
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
- return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)
+ return _do_append(arr, ramp_arr, axis)
def _prepend_max(arr, pad_amt, num, axis=-1):
@@ -356,19 +354,13 @@ def _prepend_max(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- max_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ max_slice = _slice_first(arr.shape, num, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate max, reshape to add singleton dimension back
- max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate max
+ max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ return _do_prepend(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
def _append_max(arr, pad_amt, num, axis=-1):
@@ -407,24 +399,16 @@ def _append_max(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- max_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ max_slice = _slice_last(arr.shape, num, axis=axis)
else:
max_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate max, reshape to add singleton dimension back
- max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate max
+ max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),
- axis=axis)
+ return _do_append(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
def _prepend_mean(arr, pad_amt, num, axis=-1):
@@ -463,20 +447,14 @@ def _prepend_mean(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- mean_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ mean_slice = _slice_first(arr.shape, num, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate mean, reshape to add singleton dimension back
- mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)
+ # Extract slice, calculate mean
+ mean_chunk = arr[mean_slice].mean(axis, keepdims=True)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),
- arr), axis=axis)
+ return _do_prepend(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
def _append_mean(arr, pad_amt, num, axis=-1):
@@ -515,25 +493,17 @@ def _append_mean(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- mean_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ mean_slice = _slice_last(arr.shape, num, axis=axis)
else:
mean_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate mean, reshape to add singleton dimension back
- mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate mean
+ mean_chunk = arr[mean_slice].mean(axis=axis, keepdims=True)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
def _prepend_med(arr, pad_amt, num, axis=-1):
@@ -572,20 +542,14 @@ def _prepend_med(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- med_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ med_slice = _slice_first(arr.shape, num, axis=axis)
- # Extract slice, calculate median, reshape to add singleton dimension back
- med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate median
+ med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)
+ return _do_prepend(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
def _append_med(arr, pad_amt, num, axis=-1):
@@ -624,25 +588,17 @@ def _append_med(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- med_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ med_slice = _slice_last(arr.shape, num, axis=axis)
else:
med_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate median, reshape to add singleton dimension back
- med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate median
+ med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
def _prepend_min(arr, pad_amt, num, axis=-1):
@@ -682,19 +638,13 @@ def _prepend_min(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- min_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ min_slice = _slice_first(arr.shape, num, axis=axis)
- # Extract slice, calculate min, reshape to add singleton dimension back
- min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate min
+ min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ return _do_prepend(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
def _append_min(arr, pad_amt, num, axis=-1):
@@ -733,24 +683,16 @@ def _append_min(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- min_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ min_slice = _slice_last(arr.shape, num, axis=axis)
else:
min_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate min, reshape to add singleton dimension back
- min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate min
+ min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),
- axis=axis)
+ return _do_append(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
def _pad_ref(arr, pad_amt, method, axis=-1):
@@ -793,22 +735,14 @@ def _pad_ref(arr, pad_amt, method, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)
- for (i, x) in enumerate(arr.shape))
+ ref_slice = _slice_at_axis(arr.shape, slice(pad_amt[0], 0, -1), axis=axis)
ref_chunk1 = arr[ref_slice]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- ref_chunk1 = ref_chunk1.reshape(pad_singleton)
-
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice1]
ref_chunk1 = 2 * edge_chunk - ref_chunk1
del edge_chunk
@@ -818,19 +752,13 @@ def _pad_ref(arr, pad_amt, method, axis=-1):
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1] - 1
end = arr.shape[axis] - 1
- ref_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
- rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
- for (i, x) in enumerate(arr.shape))
+ ref_slice = _slice_at_axis(arr.shape, slice(start, end), axis=axis)
+ rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
ref_chunk2 = arr[ref_slice][rev_idx]
- if pad_amt[1] == 1:
- ref_chunk2 = ref_chunk2.reshape(pad_singleton)
-
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice2]
ref_chunk2 = 2 * edge_chunk - ref_chunk2
del edge_chunk
@@ -878,23 +806,14 @@ def _pad_sym(arr, pad_amt, method, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])
- for (i, x) in enumerate(arr.shape))
- rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
- for (i, x) in enumerate(arr.shape))
+ sym_slice = _slice_first(arr.shape, pad_amt[0], axis=axis)
+ rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
sym_chunk1 = arr[sym_slice][rev_idx]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- sym_chunk1 = sym_chunk1.reshape(pad_singleton)
-
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice1]
sym_chunk1 = 2 * edge_chunk - sym_chunk1
del edge_chunk
@@ -902,19 +821,12 @@ def _pad_sym(arr, pad_amt, method, axis=-1):
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
- start = arr.shape[axis] - pad_amt[1]
- end = arr.shape[axis]
- sym_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
+ sym_slice = _slice_last(arr.shape, pad_amt[1], axis=axis)
sym_chunk2 = arr[sym_slice][rev_idx]
- if pad_amt[1] == 1:
- sym_chunk2 = sym_chunk2.reshape(pad_singleton)
-
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice2]
sym_chunk2 = 2 * edge_chunk - sym_chunk2
del edge_chunk
@@ -959,29 +871,16 @@ def _pad_wrap(arr, pad_amt, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- start = arr.shape[axis] - pad_amt[0]
- end = arr.shape[axis]
- wrap_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
+ wrap_slice = _slice_last(arr.shape, pad_amt[0], axis=axis)
wrap_chunk1 = arr[wrap_slice]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)
-
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
- wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])
- for (i, x) in enumerate(arr.shape))
+ wrap_slice = _slice_first(arr.shape, pad_amt[1], axis=axis)
wrap_chunk2 = arr[wrap_slice]
- if pad_amt[1] == 1:
- wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)
-
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index e8eda297f..4d3f35183 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -298,7 +298,7 @@ def _unique1d(ar, return_index=False, return_inverse=False,
return ret
-def intersect1d(ar1, ar2, assume_unique=False):
+def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
"""
Find the intersection of two arrays.
@@ -307,15 +307,28 @@ def intersect1d(ar1, ar2, assume_unique=False):
Parameters
----------
ar1, ar2 : array_like
- Input arrays.
+ Input arrays. Will be flattened if not already 1D.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
-
+ return_indices : bool
+ If True, the indices which correspond to the intersection of the
+ two arrays are returned. The first instance of a value is used
+ if there are multiple. Default is False.
+
+ .. versionadded:: 1.15.0
+
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
+ comm1 : ndarray
+ The indices of the first occurrences of the common values in `ar1`.
+ Only provided if `return_indices` is True.
+ comm2 : ndarray
+ The indices of the first occurrences of the common values in `ar2`.
+ Only provided if `return_indices` is True.
+
See Also
--------
@@ -332,14 +345,49 @@ def intersect1d(ar1, ar2, assume_unique=False):
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
+
+ To return the indices of the values common to the input arrays
+ along with the intersected values:
+ >>> x = np.array([1, 1, 2, 3, 4])
+ >>> y = np.array([2, 1, 4, 6])
+ >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
+ >>> x_ind, y_ind
+ (array([0, 2, 4]), array([1, 0, 2]))
+ >>> xy, x[x_ind], y[y_ind]
+ (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
+
"""
if not assume_unique:
- # Might be faster than unique( intersect1d( ar1, ar2 ) )?
- ar1 = unique(ar1)
- ar2 = unique(ar2)
+ if return_indices:
+ ar1, ind1 = unique(ar1, return_index=True)
+ ar2, ind2 = unique(ar2, return_index=True)
+ else:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ else:
+ ar1 = ar1.ravel()
+ ar2 = ar2.ravel()
+
aux = np.concatenate((ar1, ar2))
- aux.sort()
- return aux[:-1][aux[1:] == aux[:-1]]
+ if return_indices:
+ aux_sort_indices = np.argsort(aux, kind='mergesort')
+ aux = aux[aux_sort_indices]
+ else:
+ aux.sort()
+
+ mask = aux[1:] == aux[:-1]
+ int1d = aux[:-1][mask]
+
+ if return_indices:
+ ar1_indices = aux_sort_indices[:-1][mask]
+ ar2_indices = aux_sort_indices[1:][mask] - ar1.size
+ if not assume_unique:
+ ar1_indices = ind1[ar1_indices]
+ ar2_indices = ind2[ar2_indices]
+
+ return int1d, ar1_indices, ar2_indices
+ else:
+ return int1d
def setxor1d(ar1, ar2, assume_unique=False):
"""
@@ -660,3 +708,4 @@ def setdiff1d(ar1, ar2, assume_unique=False):
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
+
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 363bb2101..23eac7e7d 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -1,5 +1,10 @@
"""
-Define a simple format for saving numpy arrays to disk with the full
+Binary serialization
+
+NPY format
+==========
+
+A simple format for saving numpy arrays to disk with the full
information about them.
The ``.npy`` format is the standard binary file format in NumPy for
@@ -143,8 +148,10 @@ data HEADER_LEN."
Notes
-----
-The ``.npy`` format, including reasons for creating it and a comparison of
-alternatives, is described fully in the "npy-format" NEP.
+The ``.npy`` format, including motivation for creating it and a comparison of
+alternatives, is described in the `"npy-format" NEP
+<http://www.numpy.org/neps/nep-0001-npy-format.html>`_, however details have
+evolved with time and this document is more current.
"""
from __future__ import division, absolute_import, print_function
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 8440be52e..a6e3e07d3 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -54,7 +54,8 @@ __all__ = [
'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
- 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
+ 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc',
+ 'quantile'
]
@@ -145,7 +146,7 @@ def rot90(m, k=1, axes=(0,1)):
return flip(transpose(m, axes_list), axes[1])
-def flip(m, axis):
+def flip(m, axis=None):
"""
Reverse the order of elements in an array along the given axis.
@@ -157,9 +158,16 @@ def flip(m, axis):
----------
m : array_like
Input array.
- axis : integer
- Axis in array, which entries are reversed.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to flip over. The default,
+ axis=None, will flip over all of the axes of the input array.
+ If axis is negative it counts from the last to the first axis.
+
+ If axis is a tuple of ints, flipping is performed on all of the axes
+ specified in the tuple.
+ .. versionchanged:: 1.15.0
+ None and tuples of axes are supported
Returns
-------
@@ -175,9 +183,17 @@ def flip(m, axis):
Notes
-----
flip(m, 0) is equivalent to flipud(m).
+
flip(m, 1) is equivalent to fliplr(m).
+
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
+ flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all
+ positions.
+
+ flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at
+ position 0 and position 1.
+
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
@@ -186,32 +202,41 @@ def flip(m, axis):
[2, 3]],
[[4, 5],
[6, 7]]])
-
>>> flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
-
>>> flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
-
+ >>> np.flip(A)
+ array([[[7, 6],
+ [5, 4]],
+ [[3, 2],
+ [1, 0]]])
+ >>> np.flip(A, (0, 2))
+ array([[[5, 4],
+ [7, 6]],
+ [[1, 0],
+ [3, 2]]])
>>> A = np.random.randn(3,4,5)
>>> np.all(flip(A,2) == A[:,:,::-1,...])
True
"""
if not hasattr(m, 'ndim'):
m = asarray(m)
- indexer = [slice(None)] * m.ndim
- try:
- indexer[axis] = slice(None, None, -1)
- except IndexError:
- raise ValueError("axis=%i is invalid for the %i-dimensional input array"
- % (axis, m.ndim))
- return m[tuple(indexer)]
+ if axis is None:
+ indexer = (np.s_[::-1],) * m.ndim
+ else:
+ axis = _nx.normalize_axis_tuple(axis, m.ndim)
+ indexer = [np.s_[:]] * m.ndim
+ for ax in axis:
+ indexer[ax] = np.s_[::-1]
+ indexer = tuple(indexer)
+ return m[indexer]
def iterable(y):
@@ -1608,9 +1633,9 @@ def disp(mesg, device=None, linefeed=True):
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
- >>> from StringIO import StringIO
+ >>> from io import StringIO
>>> buf = StringIO()
- >>> np.disp('"Display" in a file', device=buf)
+ >>> np.disp(u'"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
@@ -3400,17 +3425,19 @@ def percentile(a, q, axis=None, out=None,
If True, then allow the input array `a` to be modified by intermediate
calculations, to save memory. In this case, the contents of the input
`a` after this function completes is undefined.
+
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
- use when the desired quantile lies between two data points
+ use when the desired percentile lies between two data points
``i < j``:
- * linear: ``i + (j - i) * fraction``, where ``fraction``
- is the fractional part of the index surrounded by ``i``
- and ``j``.
- * lower: ``i``.
- * higher: ``j``.
- * nearest: ``i`` or ``j``, whichever is nearest.
- * midpoint: ``(i + j) / 2``.
+
+ * 'linear': ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * 'lower': ``i``.
+ * 'higher': ``j``.
+ * 'nearest': ``i`` or ``j``, whichever is nearest.
+ * 'midpoint': ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
@@ -3437,6 +3464,7 @@ def percentile(a, q, axis=None, out=None,
mean
median : equivalent to ``percentile(..., 50)``
nanpercentile
+ quantile : equivalent to percentile, except with q in the range [0, 1].
Notes
-----
@@ -3479,18 +3507,19 @@ def percentile(a, q, axis=None, out=None,
The different types of interpolation can be visualized graphically:
- ..plot::
+ .. plot::
+
import matplotlib.pyplot as plt
a = np.arange(4)
p = np.linspace(0, 100, 6001)
ax = plt.gca()
lines = [
- ('linear', None)
- ('higher', '--')
- ('lower', '--')
- ('nearest', '-.')
- ('midpoint', '-.')
+ ('linear', None),
+ ('higher', '--'),
+ ('lower', '--'),
+ ('nearest', '-.'),
+ ('midpoint', '-.'),
]
for interpolation, style in lines:
ax.plot(
@@ -3512,6 +3541,110 @@ def percentile(a, q, axis=None, out=None,
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def quantile(a, q, axis=None, out=None,
+ overwrite_input=False, interpolation='linear', keepdims=False):
+ """
+ Compute the `q`th quantile of the data along the specified axis.
+ ..versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ q : array_like of float
+ Quantile or sequence of quantiles to compute, which must be between
+ 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The
+ default is to compute the quantile(s) along a flattened
+ version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
+ This optional parameter specifies the interpolation method to
+ use when the desired quantile lies between two data points
+ ``i < j``:
+ * linear: ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single quantile and `axis=None`, then the result
+ is a scalar. If multiple quantiles are given, first axis of
+ the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean
+ percentile : equivalent to quantile, but with q in the range [0, 100].
+ median : equivalent to ``quantile(..., 0.5)``
+ nanquantile
+
+ Notes
+ -----
+ Given a vector ``V`` of length ``N``, the ``q``-th quantile of
+ ``V`` is the value ``q`` of the way from the minimum to the
+ maximum in a sorted copy of ``V``. The values and distances of
+ the two nearest neighbors as well as the `interpolation` parameter
+ will determine the quantile if the normalized ranking does not
+ match the location of ``q`` exactly. This function is the same as
+ the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the
+ same as the maximum if ``q=1.0``.
+
+ Examples
+ --------
+ >>> a = np.array([[10, 7, 4], [3, 2, 1]])
+ >>> a
+ array([[10, 7, 4],
+ [ 3, 2, 1]])
+ >>> np.quantile(a, 0.5)
+ 3.5
+ >>> np.quantile(a, 0.5, axis=0)
+ array([[ 6.5, 4.5, 2.5]])
+ >>> np.quantile(a, 0.5, axis=1)
+ array([ 7., 2.])
+ >>> np.quantile(a, 0.5, axis=1, keepdims=True)
+ array([[ 7.],
+ [ 2.]])
+ >>> m = np.quantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.quantile(a, 0.5, axis=0, out=out)
+ array([[ 6.5, 4.5, 2.5]])
+ >>> m
+ array([[ 6.5, 4.5, 2.5]])
+ >>> b = a.copy()
+ >>> np.quantile(b, 0.5, axis=1, overwrite_input=True)
+ array([ 7., 2.])
+ >>> assert not np.all(a == b)
+ """
+ q = np.asanyarray(q)
+ if not _quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+ return _quantile_unchecked(
+ a, q, axis, out, overwrite_input, interpolation, keepdims)
+
+
def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
"""Assumes that q is in [0, 1], and is an ndarray"""
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index d2a398a0a..2922b3a86 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -877,12 +877,6 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
# bins is an integer
bins = D*[bins]
- # avoid rounding issues for comparisons when dealing with inexact types
- if np.issubdtype(sample.dtype, np.inexact):
- edge_dt = sample.dtype
- else:
- edge_dt = float
-
# normalize the range argument
if range is None:
range = (None,) * D
@@ -896,13 +890,12 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
- edges[i] = np.linspace(smin, smax, bins[i] + 1, dtype=edge_dt)
+ edges[i] = np.linspace(smin, smax, bins[i] + 1)
elif np.ndim(bins[i]) == 1:
- edges[i] = np.asarray(bins[i], edge_dt)
- # not just monotonic, due to the use of mindiff below
- if np.any(edges[i][:-1] >= edges[i][1:]):
+ edges[i] = np.asarray(bins[i])
+ if np.any(edges[i][:-1] > edges[i][1:]):
raise ValueError(
- '`bins[{}]` must be strictly increasing, when an array'
+ '`bins[{}]` must be monotonically increasing, when an array'
.format(i))
else:
raise ValueError(
@@ -911,13 +904,10 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
dedges[i] = np.diff(edges[i])
- # Handle empty input.
- if N == 0:
- return np.zeros(nbin-2), edges
-
# Compute the bin number each sample falls into.
Ncount = tuple(
- np.digitize(sample[:, i], edges[i])
+ # avoid np.digitize to work around gh-11022
+ np.searchsorted(edges[i], sample[:, i], side='right')
for i in _range(D)
)
@@ -925,16 +915,10 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
- # Rounding precision
- mindiff = dedges[i].min()
- if not np.isinf(mindiff):
- decimal = int(-np.log10(mindiff)) + 6
- # Find which points are on the rightmost edge.
- not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
- on_edge = (np.around(sample[:, i], decimal) ==
- np.around(edges[i][-1], decimal))
- # Shift these points one bin to the left.
- Ncount[i][on_edge & not_smaller_than_edge] -= 1
+ # Find which points are on the rightmost edge.
+ on_edge = (sample[:, i] == edges[i][-1])
+ # Shift these points one bin to the left.
+ Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py
index 3220f6534..0379ecb1a 100644
--- a/numpy/lib/mixins.py
+++ b/numpy/lib/mixins.py
@@ -74,8 +74,8 @@ class NDArrayOperatorsMixin(object):
It is useful for writing classes that do not inherit from `numpy.ndarray`,
but that should support arithmetic and numpy universal functions like
- arrays as described in :ref:`A Mechanism for Overriding Ufuncs
- <neps.ufunc-overrides>`.
+ arrays as described in `A Mechanism for Overriding Ufuncs
+ <../../neps/nep-0013-ufunc-overrides.html>`_.
As an trivial example, consider this implementation of an ``ArrayLike``
class that simply wraps a NumPy array and ensures that the result of any
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 16e363d7c..abd2da1a2 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -16,6 +16,7 @@ Functions
- `nanvar` -- variance of non-NaN values
- `nanstd` -- standard deviation of non-NaN values
- `nanmedian` -- median of non-NaN values
+- `nanquantile` -- qth quantile of non-NaN values
- `nanpercentile` -- qth percentile of non-NaN values
"""
@@ -29,7 +30,7 @@ from numpy.lib import function_base
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
- 'nancumsum', 'nancumprod'
+ 'nancumsum', 'nancumprod', 'nanquantile'
]
@@ -1057,15 +1058,16 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
`a` after this function completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
- use when the desired quantile lies between two data points
+ use when the desired percentile lies between two data points
``i < j``:
- * linear: ``i + (j - i) * fraction``, where ``fraction``
- is the fractional part of the index surrounded by ``i``
- and ``j``.
- * lower: ``i``.
- * higher: ``j``.
- * nearest: ``i`` or ``j``, whichever is nearest.
- * midpoint: ``(i + j) / 2``.
+
+ * 'linear': ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * 'lower': ``i``.
+ * 'higher': ``j``.
+ * 'nearest': ``i`` or ``j``, whichever is nearest.
+ * 'midpoint': ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
@@ -1094,6 +1096,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
nanmean
nanmedian : equivalent to ``nanpercentile(..., 50)``
percentile, median, mean
+ nanquantile : equivalent to nanpercentile, but with q in the range [0, 1].
Notes
-----
@@ -1143,6 +1146,110 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
+ interpolation='linear', keepdims=np._NoValue):
+ """
+ Compute the qth quantile of the data along the specified axis,
+ while ignoring nan values.
+ Returns the qth quantile(s) of the array elements.
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array, containing
+ nan values to be ignored
+ q : array_like of float
+ Quantile or sequence of quantiles to compute, which must be between
+ 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The
+ default is to compute the quantile(s) along a flattened
+ version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
+ This optional parameter specifies the interpolation method to
+ use when the desired quantile lies between two data points
+ ``i < j``:
+ * linear: ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single percentile and `axis=None`, then the result
+ is a scalar. If multiple quantiles are given, first axis of
+ the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ quantile
+ nanmean, nanmedian
+ nanmedian : equivalent to ``nanquantile(..., 0.5)``
+ nanpercentile : same as nanquantile, but with q in the range [0, 100].
+
+ Examples
+ --------
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
+ >>> a[0][1] = np.nan
+ >>> a
+ array([[ 10., nan, 4.],
+ [ 3., 2., 1.]])
+ >>> np.quantile(a, 0.5)
+ nan
+ >>> np.nanquantile(a, 0.5)
+ 3.5
+ >>> np.nanquantile(a, 0.5, axis=0)
+ array([ 6.5, 2., 2.5])
+ >>> np.nanquantile(a, 0.5, axis=1, keepdims=True)
+ array([[ 7.],
+ [ 2.]])
+ >>> m = np.nanquantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.nanquantile(a, 0.5, axis=0, out=out)
+ array([ 6.5, 2., 2.5])
+ >>> m
+ array([ 6.5, 2. , 2.5])
+ >>> b = a.copy()
+ >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)
+ array([ 7., 2.])
+ >>> assert not np.all(a==b)
+ """
+ a = np.asanyarray(a)
+ q = np.asanyarray(q)
+ if not function_base._quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+ return _nanquantile_unchecked(
+ a, q, axis, out, overwrite_input, interpolation, keepdims)
+
+
def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""Assumes that q is in [0, 1], and is an ndarray"""
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 0f338d781..b109d65e1 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -475,9 +475,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
Notes
-----
- For a description of the ``.npy`` format, see the module docstring
- of `numpy.lib.format` or the NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
Examples
--------
@@ -561,9 +559,7 @@ def savez(file, *args, **kwds):
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
- description of the ``.npy`` format, see `numpy.lib.format` or the
- NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
@@ -642,9 +638,9 @@ def savez_compressed(file, *args, **kwds):
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
- in ``.npy`` format. For a description of the ``.npy`` format, see
- `numpy.lib.format` or the NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ in ``.npy`` format. For a description of the ``.npy`` format, see
+ :py:mod:`numpy.lib.format`.
+
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
@@ -758,7 +754,7 @@ def _getconv(dtype):
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, complex):
- return lambda x: complex(asstr(x))
+ return lambda x: complex(asstr(x).replace('+-', '-'))
elif issubclass(typ, np.bytes_):
return asbytes
elif issubclass(typ, np.unicode_):
@@ -791,8 +787,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
the data-type.
comments : str or sequence of str, optional
The characters or list of characters used to indicate the start of a
- comment. For backwards compatibility, byte strings will be decoded as
- 'latin1'. The default is '#'.
+ comment. None implies no comments. For backwards compatibility, byte
+ strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
@@ -859,18 +855,18 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
- >>> c = StringIO("0 1\\n2 3")
+ >>> c = StringIO(u"0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
- >>> d = StringIO("M 21 72\\nF 35 58")
+ >>> d = StringIO(u"M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
- >>> c = StringIO("1,0,2\\n3,0,4")
+ >>> c = StringIO(u"1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
@@ -936,7 +932,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
if encoding is not None:
fencoding = encoding
# we must assume local encoding
- # TOOD emit portability warning?
+ # TODO emit portability warning?
elif fencoding is None:
import locale
fencoding = locale.getpreferredencoding()
@@ -1166,13 +1162,14 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
- a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
- like `' (%s+%sj)' % (fmt, fmt)`
- b) a full string specifying every real and imaginary part, e.g.
- `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
- c) a list of specifiers, one per column - in this case, the real
- and imaginary part must have separate specifiers,
- e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
+
+ * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
+ like `' (%s+%sj)' % (fmt, fmt)`
+ * a full string specifying every real and imaginary part, e.g.
+ `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
+ * a list of specifiers, one per column - in this case, the real
+ and imaginary part must have separate specifiers,
+ e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
@@ -1377,7 +1374,8 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
for number in row:
row2.append(number.real)
row2.append(number.imag)
- fh.write(format % tuple(row2) + newline)
+ s = format % tuple(row2) + newline
+ fh.write(s.replace('+-', '-'))
else:
for row in X:
try:
@@ -1630,7 +1628,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
Comma delimited file with mixed dtype
- >>> s = StringIO("1,1.3,abcde")
+ >>> s = StringIO(u"1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
@@ -1657,7 +1655,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
An example with fixed-width columns
- >>> s = StringIO("11.3abcde")
+ >>> s = StringIO(u"11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
@@ -1719,7 +1717,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
try:
while not first_values:
first_line = _decode_line(next(fhd), encoding)
- if names is True:
+ if (names is True) and (comments is not None):
if comments in first_line:
first_line = (
''.join(first_line.split(comments)[1:]))
@@ -1733,8 +1731,9 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
- if fval in comments:
- del first_values[0]
+ if comments is not None:
+ if fval in comments:
+ del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 41b5e2f64..078608bbb 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -113,11 +113,6 @@ def poly(seq_of_zeros):
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
- Or a square matrix object:
-
- >>> np.poly(np.matrix(P))
- array([ 1. , 0. , 0.16666667])
-
Note how in all cases the leading coefficient is always 1.
"""
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index e07caf805..f1838fee6 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -555,7 +555,7 @@ def arctanh(x):
--------
>>> np.set_printoptions(precision=4)
- >>> np.emath.arctanh(np.matrix(np.eye(2)))
+ >>> np.emath.arctanh(np.eye(2))
array([[ Inf, 0.],
[ 0., Inf]])
>>> np.emath.arctanh([1j])
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 41ef28ef3..65104115a 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -16,10 +16,235 @@ from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm
__all__ = [
'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
- 'apply_along_axis', 'kron', 'tile', 'get_array_wrap'
+ 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis',
+ 'put_along_axis'
]
+def _make_along_axis_idx(arr_shape, indices, axis):
+ # compute dimensions to iterate over
+ if not _nx.issubdtype(indices.dtype, _nx.integer):
+ raise IndexError('`indices` must be an integer array')
+ if len(arr_shape) != indices.ndim:
+ raise ValueError(
+ "`indices` and `arr` must have the same number of dimensions")
+ shape_ones = (1,) * indices.ndim
+ dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
+
+ # build a fancy index, consisting of orthogonal aranges, with the
+ # requested index inserted at the right location
+ fancy_index = []
+ for dim, n in zip(dest_dims, arr_shape):
+ if dim is None:
+ fancy_index.append(indices)
+ else:
+ ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
+ fancy_index.append(_nx.arange(n).reshape(ind_shape))
+
+ return tuple(fancy_index)
+
+
+def take_along_axis(arr, indices, axis):
+ """
+ Take values from the input array by matching 1d index and data slices.
+
+ This iterates over matching 1d slices oriented along the specified axis in
+ the index and data arrays, and uses the former to look up values in the
+ latter. These slices can be different lengths.
+
+ Functions returning an index along an axis, like `argsort` and
+ `argpartition`, produce suitable indices for this function.
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ arr: ndarray (Ni..., M, Nk...)
+ Source array
+ indices: ndarray (Ni..., J, Nk...)
+ Indices to take along each 1d slice of `arr`. This must match the
+ dimension of arr, but dimensions Ni and Nj only need to broadcast
+ against `arr`.
+ axis: int
+ The axis to take 1d slices along. If axis is None, the input array is
+ treated as if it had first been flattened to 1d, for consistency with
+ `sort` and `argsort`.
+
+ Returns
+ -------
+ out: ndarray (Ni..., J, Nk...)
+ The indexed result.
+
+ Notes
+ -----
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+ J = indices.shape[axis] # Need not equal M
+ out = np.empty(Nk + (J,) + Nk)
+
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ a_1d = a [ii + s_[:,] + kk]
+ indices_1d = indices[ii + s_[:,] + kk]
+ out_1d = out [ii + s_[:,] + kk]
+ for j in range(J):
+ out_1d[j] = a_1d[indices_1d[j]]
+
+ Equivalently, eliminating the inner loop, the last two lines would be::
+
+ out_1d[:] = a_1d[indices_1d]
+
+ See Also
+ --------
+ take : Take along an axis, using the same indices for every 1d slice
+ put_along_axis :
+ Put values into the destination array by matching 1d index and data slices
+
+ Examples
+ --------
+
+ For this sample array
+
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+ We can sort either by using sort directly, or argsort and this function
+
+ >>> np.sort(a, axis=1)
+ array([[10, 20, 30],
+ [40, 50, 60]])
+ >>> ai = np.argsort(a, axis=1); ai
+ array([[0, 2, 1],
+ [1, 2, 0]], dtype=int64)
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[10, 20, 30],
+ [40, 50, 60]])
+
+ The same works for max and min, if you expand the dimensions:
+
+ >>> np.expand_dims(np.max(a, axis=1), axis=1)
+ array([[30],
+ [60]])
+ >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai
+ array([[1],
+ [0], dtype=int64)
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[30],
+ [60]])
+
+ If we want to get the max and min at the same time, we can stack the
+ indices first
+
+ >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1)
+ >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai = np.concatenate([ai_min, ai_max], axis=axis)
+ >> ai
+ array([[0, 1],
+ [1, 0]], dtype=int64)
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[10, 30],
+ [40, 60]])
+ """
+ # normalize inputs
+ if axis is None:
+ arr = arr.flat
+ arr_shape = (len(arr),) # flatiter has no .shape
+ axis = 0
+ else:
+ axis = normalize_axis_index(axis, arr.ndim)
+ arr_shape = arr.shape
+
+ # use the fancy index
+ return arr[_make_along_axis_idx(arr_shape, indices, axis)]
+
+
+def put_along_axis(arr, indices, values, axis):
+ """
+ Put values into the destination array by matching 1d index and data slices.
+
+ This iterates over matching 1d slices oriented along the specified axis in
+ the index and data arrays, and uses the former to place values into the
+ latter. These slices can be different lengths.
+
+ Functions returning an index along an axis, like `argsort` and
+ `argpartition`, produce suitable indices for this function.
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ arr: ndarray (Ni..., M, Nk...)
+ Destination array.
+ indices: ndarray (Ni..., J, Nk...)
+ Indices to change along each 1d slice of `arr`. This must match the
+ dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast
+ against `arr`.
+ values: array_like (Ni..., J, Nk...)
+ values to insert at those indices. Its shape and dimension are
+ broadcast to match that of `indices`.
+ axis: int
+ The axis to take 1d slices along. If axis is None, the destination
+ array is treated as if a flattened 1d view had been created of it.
+
+ Notes
+ -----
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+ J = indices.shape[axis] # Need not equal M
+
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ a_1d = a [ii + s_[:,] + kk]
+ indices_1d = indices[ii + s_[:,] + kk]
+ values_1d = values [ii + s_[:,] + kk]
+ for j in range(J):
+ a_1d[indices_1d[j]] = values_1d[j]
+
+ Equivalently, eliminating the inner loop, the last two lines would be::
+
+ a_1d[indices_1d] = values_1d
+
+ See Also
+ --------
+ take_along_axis :
+ Take values from the input array by matching 1d index and data slices
+
+ Examples
+ --------
+
+ For this sample array
+
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+ We can replace the maximum values with:
+
+ >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai
+ array([[1],
+ [0]], dtype=int64)
+ >>> np.put_along_axis(a, ai, 99, axis=1)
+ >>> a
+ array([[10, 99, 20],
+ [99, 40, 50]])
+
+ """
+ # normalize inputs
+ if axis is None:
+ arr = arr.flat
+ axis = 0
+ arr_shape = (len(arr),) # flatiter has no .shape
+ else:
+ axis = normalize_axis_index(axis, arr.ndim)
+ arr_shape = arr.shape
+
+ # use the fancy index
+ arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
+
+
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 6c240db7f..2abe5cdd1 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -123,9 +123,12 @@ def _broadcast_to(array, shape, subok, readonly):
needs_writeable = not readonly and array.flags.writeable
extras = ['reduce_ok'] if needs_writeable else []
op_flag = 'readwrite' if needs_writeable else 'readonly'
- broadcast = np.nditer(
+ it = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
- op_flags=[op_flag], itershape=shape, order='C').itviews[0]
+ op_flags=[op_flag], itershape=shape, order='C')
+ with it:
+ # never really has writebackifcopy semantics
+ broadcast = it.itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if needs_writeable and not result.flags.writeable:
result.flags.writeable = True
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 8be49ce67..8ba0370b0 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -489,6 +489,19 @@ class TestConstant(object):
)
assert_allclose(test, expected)
+ def test_check_large_integers(self):
+ uint64_max = 2 ** 64 - 1
+ arr = np.full(5, uint64_max, dtype=np.uint64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, uint64_max, dtype=np.uint64)
+ assert_array_equal(test, expected)
+
+ int64_max = 2 ** 63 - 1
+ arr = np.full(5, int64_max, dtype=np.int64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, int64_max, dtype=np.int64)
+ assert_array_equal(test, expected)
+
class TestLinearRamp(object):
def test_check_simple(self):
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index 76c36c53e..dace5ade8 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -32,7 +32,46 @@ class TestSetOps(object):
assert_array_equal(c, ed)
assert_array_equal([], intersect1d([], []))
-
+
+ def test_intersect1d_indices(self):
+ # unique inputs
+ a = np.array([1, 2, 3, 4])
+ b = np.array([2, 1, 4, 6])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ee = np.array([1, 2, 4])
+ assert_array_equal(c, ee)
+ assert_array_equal(a[i1], ee)
+ assert_array_equal(b[i2], ee)
+
+ # non-unique inputs
+ a = np.array([1, 2, 2, 3, 4, 3, 2])
+ b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ef = np.array([1, 2, 3, 4])
+ assert_array_equal(c, ef)
+ assert_array_equal(a[i1], ef)
+ assert_array_equal(b[i2], ef)
+
+ # non1d, unique inputs
+ a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
+ b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 6, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
+ # non1d, not assumed to be uniqueinputs
+ a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
+ b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
def test_setxor1d(self):
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
@@ -74,8 +113,6 @@ class TestSetOps(object):
assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8]))
assert_array_equal([7,1], ediff1d(two_elem, to_begin=7))
assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
- assert(isinstance(ediff1d(np.matrix(1)), np.matrix))
- assert(isinstance(ediff1d(np.matrix(1), to_begin=1), np.matrix))
def test_isin(self):
# the tests for in1d cover most of isin's behavior
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 0a4c7c370..4103a9eb3 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -104,9 +104,10 @@ class TestRot90(object):
class TestFlip(object):
def test_axes(self):
- assert_raises(ValueError, np.flip, np.ones(4), axis=1)
- assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=2)
- assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(np.AxisError, np.flip, np.ones(4), axis=1)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=2)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=(0, 3))
def test_basic_lr(self):
a = get_mat(4)
@@ -173,6 +174,35 @@ class TestFlip(object):
assert_equal(np.flip(a, i),
np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
+ def test_default_axis(self):
+ a = np.array([[1, 2, 3],
+ [4, 5, 6]])
+ b = np.array([[6, 5, 4],
+ [3, 2, 1]])
+ assert_equal(np.flip(a), b)
+
+ def test_multiple_axes(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ assert_equal(np.flip(a, axis=()), a)
+
+ b = np.array([[[5, 4],
+ [7, 6]],
+ [[1, 0],
+ [3, 2]]])
+
+ assert_equal(np.flip(a, axis=(0, 2)), b)
+
+ c = np.array([[[3, 2],
+ [1, 0]],
+ [[7, 6],
+ [5, 4]]])
+
+ assert_equal(np.flip(a, axis=(1, 2)), c)
+
class TestAny(object):
@@ -257,9 +287,6 @@ class TestAverage(object):
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
- y6 = np.matrix(rand(5, 5))
- assert_array_equal(y6.mean(0), average(y6, 0))
-
def test_weights(self):
y = np.arange(10)
w = np.arange(10)
@@ -327,14 +354,6 @@ class TestAverage(object):
assert_equal(type(np.average(a)), subclass)
assert_equal(type(np.average(a, weights=w)), subclass)
- # also test matrices
- a = np.matrix([[1,2],[3,4]])
- w = np.matrix([[1,2],[3,4]])
-
- r = np.average(a, axis=0, weights=w)
- assert_equal(type(r), np.matrix)
- assert_equal(r, [[2.5, 10.0/3]])
-
def test_upcasting(self):
types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]
@@ -1495,9 +1514,9 @@ class TestDigitize(object):
class TestUnwrap(object):
def test_simple(self):
- # check that unwrap removes jumps greather that 2*pi
+ # check that unwrap removes jumps greater that 2*pi
assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
- # check that unwrap maintans continuity
+ # check that unwrap maintains continuity
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
@@ -1593,16 +1612,6 @@ class TestTrapz(object):
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(y, xm), r)
- def test_matrix(self):
- # Test to make sure matrices give the same answer as ndarrays
- x = np.linspace(0, 5)
- y = x * x
- r = trapz(y, x)
- mx = np.matrix(x)
- my = np.matrix(y)
- mr = trapz(my, mx)
- assert_almost_equal(mr, r)
-
class TestSinc(object):
@@ -2719,6 +2728,28 @@ class TestPercentile(object):
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
+class TestQuantile(object):
+ # most of this is already tested by TestPercentile
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.quantile(x, 0), 0.)
+ assert_equal(np.quantile(x, 1), 3.5)
+ assert_equal(np.quantile(x, 0.5), 1.75)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.quantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.quantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+
class TestMedian(object):
def test_basic(self):
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index 06daacbdc..e16ae12c2 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -253,7 +253,7 @@ class TestHistogram(object):
one_nan = np.array([0, 1, np.nan])
all_nan = np.array([np.nan, np.nan])
- # the internal commparisons with NaN give warnings
+ # the internal comparisons with NaN give warnings
sup = suppress_warnings()
sup.filter(RuntimeWarning)
with sup:
@@ -613,8 +613,6 @@ class TestHistogramdd(object):
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
assert_raises(
- ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]])
- assert_raises(
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
@@ -646,7 +644,7 @@ class TestHistogramdd(object):
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
- assert_(hist[1] == 1.)
+ assert_(hist[1] == 0.0)
x = [1.0001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
@@ -660,3 +658,40 @@ class TestHistogramdd(object):
range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
+
+ def test_equal_edges(self):
+ """ Test that adjacent entries in an edge array can be equal """
+ x = np.array([0, 1, 2])
+ y = np.array([0, 1, 2])
+ x_edges = np.array([0, 2, 2])
+ y_edges = 1
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ hist_expected = np.array([
+ [2.],
+ [1.], # x == 2 falls in the final bin
+ ])
+ assert_equal(hist, hist_expected)
+
+ def test_edge_dtype(self):
+ """ Test that if an edge array is input, its type is preserved """
+ x = np.array([0, 10, 20])
+ y = x / 10
+ x_edges = np.array([0, 5, 15, 20])
+ y_edges = x_edges / 10
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(edges[0].dtype, x_edges.dtype)
+ assert_equal(edges[1].dtype, y_edges.dtype)
+
+ def test_large_integers(self):
+ big = 2**60 # Too large to represent with a full precision float
+
+ x = np.array([0], np.int64)
+ x_edges = np.array([-1, +1], np.int64)
+ y = big + x
+ y_edges = big + x_edges
+
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(hist[0, 0], 1)
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index f934e952a..089a7589a 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -184,37 +184,6 @@ class TestConcatenator(object):
assert_array_equal(d[:5, :], b)
assert_array_equal(d[5:, :], c)
- def test_matrix(self):
- a = [1, 2]
- b = [3, 4]
-
- ab_r = np.r_['r', a, b]
- ab_c = np.r_['c', a, b]
-
- assert_equal(type(ab_r), np.matrix)
- assert_equal(type(ab_c), np.matrix)
-
- assert_equal(np.array(ab_r), [[1,2,3,4]])
- assert_equal(np.array(ab_c), [[1],[2],[3],[4]])
-
- assert_raises(ValueError, lambda: np.r_['rc', a, b])
-
- def test_matrix_scalar(self):
- r = np.r_['r', [1, 2], 3]
- assert_equal(type(r), np.matrix)
- assert_equal(np.array(r), [[1,2,3]])
-
- def test_matrix_builder(self):
- a = np.array([1])
- b = np.array([2])
- c = np.array([3])
- d = np.array([4])
- actual = np.r_['a, b; c, d']
- expected = np.bmat([[a, b], [c, d]])
-
- assert_equal(actual, expected)
- assert_equal(type(actual), type(expected))
-
def test_0d(self):
assert_equal(r_[0, np.array(1), 2], [0, 1, 2])
assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3])
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 06c57d49c..f58c9e33d 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -23,7 +23,7 @@ from numpy.ma.testutils import assert_equal
from numpy.testing import (
assert_warns, assert_, SkipTest, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
- HAS_REFCOUNT, suppress_warnings,
+ HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles,
)
@@ -468,6 +468,26 @@ class TestSaveTxt(object):
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
+ def test_complex_negative_exponent(self):
+ # Previous to 1.15, some formats generated x+-yj, gh 7895
+ ncols = 2
+ nrows = 2
+ a = np.zeros((ncols, nrows), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.3e')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
+ b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
+
+
+
+
def test_custom_writer(self):
class CustomWriter(list):
@@ -916,6 +936,26 @@ class TestLoadTxt(LoadTxtBase):
res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
+ def test_complex_misformatted(self):
+ # test for backward compatibility
+ # some complex formats used to generate x+-yj
+ a = np.zeros((2, 2), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.16e')
+ c.seek(0)
+ txt = c.read()
+ c.seek(0)
+ # misformat the sign on the imaginary part, gh 7895
+ txt_bad = txt.replace(b'e+00-', b'e00+-')
+ assert_(txt_bad != txt)
+ c.write(txt_bad)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=complex)
+ assert_equal(res, a)
+
def test_universal_newline(self):
with temppath() as name:
with open(name, 'w') as f:
@@ -1277,6 +1317,13 @@ M 33 21.99
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test, ctrl)
+ def test_names_and_comments_none(self):
+ # Tests case when names is true but comments is None (gh-10780)
+ data = TextIO('col1 col2\n 1 2\n 3 4')
+ test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
+ control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
+ assert_equal(test, control)
+
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
@@ -2369,14 +2416,5 @@ def test_load_refcount():
np.savez(f, [1, 2, 3])
f.seek(0)
- assert_(gc.isenabled())
- gc.disable()
- try:
- gc.collect()
+ with assert_no_gc_cycles():
np.load(f)
- # gc.collect returns the number of unreachable objects in cycles that
- # were found -- we are checking that no cycles were created by np.load
- n_objects_in_cycles = gc.collect()
- finally:
- gc.enable()
- assert_equal(n_objects_in_cycles, 0)
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index 1f403f7b8..504372faf 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -113,42 +113,46 @@ class TestNanFunctions_MinMax(object):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
+
# check that rows of nan are dealt with for subclasses (#4628)
- mat[1] = np.nan
+ mine[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
- and not np.isnan(res[2, 0]))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(np.isnan(res[1]) and not np.isnan(res[0])
+ and not np.isnan(res[2]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine)
+ assert_(res.shape == ())
assert_(res != np.nan)
assert_(len(w) == 0)
@@ -209,19 +213,22 @@ class TestNanFunctions_ArgminArgmax(object):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
class TestNanFunctions_IntTypes(object):
@@ -381,19 +388,27 @@ class SharedNanFunctionsTestsMixin(object):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ array = np.eye(3)
+ mine = array.view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ expected_shape = f(array, axis=0).shape
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array, axis=1).shape
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array).shape
+ res = f(mine)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
@@ -481,18 +496,6 @@ class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):
res = f(d, axis=axis)
assert_equal(res.shape, (3, 5, 7, 11))
- def test_matrices(self):
- # Check that it works and that type and
- # shape are preserved
- mat = np.matrix(np.eye(3))
- for f in self.nanfuncs:
- for axis in np.arange(2):
- res = f(mat, axis=axis)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 3))
- res = f(mat)
- assert_(res.shape == (1, 3*3))
-
def test_result_values(self):
for axis in (-2, -1, 0, 1, None):
tgt = np.cumprod(_ndat_ones, axis=axis)
@@ -886,3 +889,39 @@ class TestNanFunctions_Percentile(object):
megamat = np.ones((3, 4, 5, 6))
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
+
+
+class TestNanFunctions_Quantile(object):
+ # most of this is already tested by TestPercentile
+
+ def test_regression(self):
+ ar = np.arange(24).reshape(2, 3, 4).astype(float)
+ ar[0][1] = np.nan
+
+ assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=0),
+ np.nanpercentile(ar, q=50, axis=0))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=1),
+ np.nanpercentile(ar, q=50, axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.5], axis=1),
+ np.nanpercentile(ar, q=[50], axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1),
+ np.nanpercentile(ar, q=[25, 50, 75], axis=1))
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.nanquantile(x, 0), 0.)
+ assert_equal(np.nanquantile(x, 1), 3.5)
+ assert_equal(np.nanquantile(x, 0.5), 1.75)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.nanquantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.nanquantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 1d7b8cbac..7f6fca4a4 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -1,15 +1,13 @@
-from __future__ import division, absolute_import, print_function
-
'''
>>> p = np.poly1d([1.,2,3])
>>> p
-poly1d([ 1., 2., 3.])
+poly1d([1., 2., 3.])
>>> print(p)
2
1 x + 2 x + 3
>>> q = np.poly1d([3.,2,1])
>>> q
-poly1d([ 3., 2., 1.])
+poly1d([3., 2., 1.])
>>> print(q)
2
3 x + 2 x + 1
@@ -30,23 +28,23 @@ poly1d([ 3., 2., 1.])
86.0
>>> p * q
-poly1d([ 3., 8., 14., 8., 3.])
+poly1d([ 3., 8., 14., 8., 3.])
>>> p / q
-(poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667]))
+(poly1d([0.33333333]), poly1d([1.33333333, 2.66666667]))
>>> p + q
-poly1d([ 4., 4., 4.])
+poly1d([4., 4., 4.])
>>> p - q
poly1d([-2., 0., 2.])
>>> p ** 4
-poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
+poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
>>> p(q)
-poly1d([ 9., 12., 16., 8., 6.])
+poly1d([ 9., 12., 16., 8., 6.])
>>> q(p)
-poly1d([ 3., 12., 32., 40., 34.])
+poly1d([ 3., 12., 32., 40., 34.])
>>> np.asarray(p)
-array([ 1., 2., 3.])
+array([1., 2., 3.])
>>> len(p)
2
@@ -54,16 +52,16 @@ array([ 1., 2., 3.])
(3.0, 2.0, 1.0, 0)
>>> p.integ()
-poly1d([ 0.33333333, 1. , 3. , 0. ])
+poly1d([0.33333333, 1. , 3. , 0. ])
>>> p.integ(1)
-poly1d([ 0.33333333, 1. , 3. , 0. ])
+poly1d([0.33333333, 1. , 3. , 0. ])
>>> p.integ(5)
-poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. ,
- 0. , 0. , 0. ])
+poly1d([0.00039683, 0.00277778, 0.025 , 0. , 0. ,
+ 0. , 0. , 0. ])
>>> p.deriv()
-poly1d([ 2., 2.])
+poly1d([2., 2.])
>>> p.deriv(2)
-poly1d([ 2.])
+poly1d([2.])
>>> q = np.poly1d([1.,2,3], variable='y')
>>> print(q)
@@ -75,9 +73,11 @@ poly1d([ 2.])
1 lambda + 2 lambda + 3
>>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1]))
-(poly1d([ 1., -1.]), poly1d([ 0.]))
+(poly1d([ 1., -1.]), poly1d([0.]))
'''
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 080fd066d..c95894f94 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -2,16 +2,106 @@ from __future__ import division, absolute_import, print_function
import numpy as np
import warnings
+import functools
from numpy.lib.shape_base import (
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
- vsplit, dstack, column_stack, kron, tile, expand_dims,
+ vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis,
+ put_along_axis
)
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns
)
+def _add_keepdims(func):
+ """ hack in keepdims behavior into a function taking an axis """
+ @functools.wraps(func)
+ def wrapped(a, axis, **kwargs):
+ res = func(a, axis=axis, **kwargs)
+ if axis is None:
+ axis = 0 # res is now a scalar, so we can insert this anywhere
+ return np.expand_dims(res, axis=axis)
+ return wrapped
+
+
+class TestTakeAlongAxis(object):
+ def test_argequivalent(self):
+ """ Test it translates from arg<func> to <func> """
+ from numpy.random import rand
+ a = rand(3, 4, 5)
+
+ funcs = [
+ (np.sort, np.argsort, dict()),
+ (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()),
+ (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()),
+ (np.partition, np.argpartition, dict(kth=2)),
+ ]
+
+ for func, argfunc, kwargs in funcs:
+ for axis in list(range(a.ndim)) + [None]:
+ a_func = func(a, axis=axis, **kwargs)
+ ai_func = argfunc(a, axis=axis, **kwargs)
+ assert_equal(a_func, take_along_axis(a, ai_func, axis=axis))
+
+ def test_invalid(self):
+ """ Test it errors when indices has too few dimensions """
+ a = np.ones((10, 10))
+ ai = np.ones((10, 2), dtype=np.intp)
+
+ # sanity check
+ take_along_axis(a, ai, axis=1)
+
+ # not enough indices
+ assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)
+ # bool arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)
+ # float arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)
+ # invalid axis
+ assert_raises(np.AxisError, take_along_axis, a, ai, axis=10)
+
+ def test_empty(self):
+ """ Test everything is ok with empty results, even with inserted dims """
+ a = np.ones((3, 4, 5))
+ ai = np.ones((3, 0, 5), dtype=np.intp)
+
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, ai.shape)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.ones((1, 2, 5), dtype=np.intp)
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, (3, 2, 5))
+
+
+class TestPutAlongAxis(object):
+ def test_replace_max(self):
+ a_base = np.array([[10, 30, 20], [60, 40, 50]])
+
+ for axis in list(range(a_base.ndim)) + [None]:
+ # we mutate this in the loop
+ a = a_base.copy()
+
+ # replace the max with a small value
+ i_max = _add_keepdims(np.argmax)(a, axis=axis)
+ put_along_axis(a, i_max, -99, axis=axis)
+
+ # find the new minimum, which should max
+ i_min = _add_keepdims(np.argmin)(a, axis=axis)
+
+ assert_equal(i_min, i_max)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4
+ put_along_axis(a, ai, 20, axis=1)
+ assert_equal(take_along_axis(a, ai, axis=1), 20)
+
+
class TestApplyAlongAxis(object):
def test_simple(self):
a = np.ones((20, 10), 'd')
@@ -29,19 +119,21 @@ class TestApplyAlongAxis(object):
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
def test_preserve_subclass(self):
- # this test is particularly malicious because matrix
- # refuses to become 1d
def double(row):
return row * 2
- m = np.matrix([[0, 1], [2, 3]])
- expected = np.matrix([[0, 2], [4, 6]])
+
+ class MyNDArray(np.ndarray):
+ pass
+
+ m = np.array([[0, 1], [2, 3]]).view(MyNDArray)
+ expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)
result = apply_along_axis(double, 0, m)
- assert_(isinstance(result, np.matrix))
+ assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
result = apply_along_axis(double, 1, m)
- assert_(isinstance(result, np.matrix))
+ assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
def test_subclass(self):
@@ -79,7 +171,7 @@ class TestApplyAlongAxis(object):
def test_axis_insertion(self, cls=np.ndarray):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
return (x[::-1] * x[1:,None]).view(cls)
@@ -123,7 +215,7 @@ class TestApplyAlongAxis(object):
def test_axis_insertion_ma(self):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
res = x[::-1] * x[1:,None]
return np.ma.masked_where(res%5==0, res)
@@ -492,16 +584,10 @@ class TestSqueeze(object):
class TestKron(object):
def test_return_type(self):
- a = np.ones([2, 2])
- m = np.asmatrix(a)
- assert_equal(type(kron(a, a)), np.ndarray)
- assert_equal(type(kron(m, m)), np.matrix)
- assert_equal(type(kron(a, m)), np.matrix)
- assert_equal(type(kron(m, a)), np.matrix)
-
class myarray(np.ndarray):
__array_priority__ = 0.0
+ a = np.ones([2, 2])
ma = myarray(a.shape, a.dtype, a.data)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(ma, ma)), myarray)
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 402c18850..cca316e9a 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -650,7 +650,7 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
N = 1
if N != 1 and N != 2:
- xedges = yedges = asarray(bins, float)
+ xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c
index bdde2e22d..696a6d874 100644
--- a/numpy/linalg/lapack_litemodule.c
+++ b/numpy/linalg/lapack_litemodule.c
@@ -331,10 +331,10 @@ static struct PyModuleDef moduledef = {
/* Initialization function for the module */
#if PY_MAJOR_VERSION >= 3
-#define RETVAL m
+#define RETVAL(x) x
PyMODINIT_FUNC PyInit_lapack_lite(void)
#else
-#define RETVAL
+#define RETVAL(x)
PyMODINIT_FUNC
initlapack_lite(void)
#endif
@@ -347,12 +347,12 @@ initlapack_lite(void)
"", (PyObject*)NULL,PYTHON_API_VERSION);
#endif
if (m == NULL) {
- return RETVAL;
+ return RETVAL(NULL);
}
import_array();
d = PyModule_GetDict(m);
LapackError = PyErr_NewException("lapack_lite.LapackError", NULL, NULL);
PyDict_SetItemString(d, "LapackError", LapackError);
- return RETVAL;
+ return RETVAL(m);
}
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 8ecb90dc9..98af0733b 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -16,20 +16,20 @@ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
+import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
- csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
- add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
- finfo, errstate, geterrobj, longdouble, moveaxis, amin, amax, product, abs,
- broadcast, atleast_2d, intp, asanyarray, object_, ones, matmul,
- swapaxes, divide, count_nonzero, ndarray, isnan
+ csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
+ add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
+ finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
+ atleast_2d, intp, asanyarray, object_, matmul,
+ swapaxes, divide, count_nonzero, isnan
)
from numpy.core.multiarray import normalize_axis_index
-from numpy.lib import triu, asfarray
+from numpy.lib.twodim_base import triu, eye
from numpy.linalg import lapack_lite, _umath_linalg
-from numpy.matrixlib.defmatrix import matrix_power
# For Python2/3 compatibility
_N = b'N'
@@ -533,6 +533,109 @@ def inv(a):
return wrap(ainv.astype(result_t, copy=False))
+def matrix_power(a, n):
+ """
+ Raise a square matrix to the (integer) power `n`.
+
+ For positive integers `n`, the power is computed by repeated matrix
+ squarings and matrix multiplications. If ``n == 0``, the identity matrix
+ of the same shape as M is returned. If ``n < 0``, the inverse
+ is computed and then raised to the ``abs(n)``.
+
+ Parameters
+ ----------
+ a : (..., M, M) array_like
+ Matrix to be "powered."
+ n : int
+ The exponent can be any integer or long integer, positive,
+ negative, or zero.
+
+ Returns
+ -------
+ a**n : (..., M, M) ndarray or matrix object
+ The return value is the same shape and type as `M`;
+ if the exponent is positive or zero then the type of the
+ elements is the same as those of `M`. If the exponent is
+ negative the elements are floating-point.
+
+ Raises
+ ------
+ LinAlgError
+ For matrices that are not square or that (for negative powers) cannot
+ be inverted numerically.
+
+ Examples
+ --------
+ >>> from numpy.linalg import matrix_power
+ >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
+ >>> matrix_power(i, 3) # should = -i
+ array([[ 0, -1],
+ [ 1, 0]])
+ >>> matrix_power(i, 0)
+ array([[1, 0],
+ [0, 1]])
+ >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
+ array([[ 0., 1.],
+ [-1., 0.]])
+
+ Somewhat more sophisticated example
+
+ >>> q = np.zeros((4, 4))
+ >>> q[0:2, 0:2] = -i
+ >>> q[2:4, 2:4] = i
+ >>> q # one of the three quaternion units not equal to 1
+ array([[ 0., -1., 0., 0.],
+ [ 1., 0., 0., 0.],
+ [ 0., 0., 0., 1.],
+ [ 0., 0., -1., 0.]])
+ >>> matrix_power(q, 2) # = -np.eye(4)
+ array([[-1., 0., 0., 0.],
+ [ 0., -1., 0., 0.],
+ [ 0., 0., -1., 0.],
+ [ 0., 0., 0., -1.]])
+
+ """
+ a = asanyarray(a)
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
+
+ try:
+ n = operator.index(n)
+ except TypeError:
+ raise TypeError("exponent must be an integer")
+
+ if n == 0:
+ a = empty_like(a)
+ a[...] = eye(a.shape[-2], dtype=a.dtype)
+ return a
+
+ elif n < 0:
+ a = inv(a)
+ n = abs(n)
+
+ # short-cuts.
+ if n == 1:
+ return a
+
+ elif n == 2:
+ return matmul(a, a)
+
+ elif n == 3:
+ return matmul(matmul(a, a), a)
+
+ # Use binary decomposition to reduce the number of matrix multiplications.
+ # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
+ # increasing powers of 2, and multiply into the result as needed.
+ z = result = None
+ while n > 0:
+ z = a if z is None else matmul(z, z)
+ n, bit = divmod(n, 2)
+ if bit:
+ result = z if result is None else matmul(result, z)
+
+ return result
+
+
# Cholesky decomposition
def cholesky(a):
@@ -1988,7 +2091,7 @@ def lstsq(a, b, rcond="warn"):
[ 2., 1.],
[ 3., 1.]])
- >>> m, c = np.linalg.lstsq(A, y)[0]
+ >>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
>>> print(m, c)
1.0 -0.95
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 4a87330c7..87dfe988a 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -7,11 +7,12 @@ import os
import sys
import itertools
import traceback
-import warnings
+import textwrap
+import subprocess
import pytest
import numpy as np
-from numpy import array, single, double, csingle, cdouble, dot, identity
+from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy import multiply, atleast_2d, inf, asarray, matrix
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
@@ -22,12 +23,11 @@ from numpy.testing import (
)
-def ifthen(a, b):
- return not a or b
-
-
-def imply(a, b):
- return not a or b
+def consistent_subclass(out, in_):
+ # For ndarray subclass input, our output should have the same subclass
+ # (non-ndarray input gets converted to ndarray).
+ return type(out) is (type(in_) if isinstance(in_, np.ndarray)
+ else np.ndarray)
old_assert_almost_equal = assert_almost_equal
@@ -65,6 +65,7 @@ all_tags = {
'generalized', 'size-0', 'strided' # optional additions
}
+
class LinalgCase(object):
def __init__(self, name, a, b, tags=set()):
"""
@@ -86,6 +87,7 @@ class LinalgCase(object):
def __repr__(self):
return "<LinalgCase: %s>" % (self.name,)
+
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
@@ -129,10 +131,6 @@ CASES += apply_tag('square', [
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
- LinalgCase("0x0_matrix",
- np.empty((0, 0), dtype=double).view(np.matrix),
- np.empty((0, 1), dtype=double).view(np.matrix),
- tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
@@ -142,12 +140,6 @@ CASES += apply_tag('square', [
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
- LinalgCase("matrix_b_only",
- array([[1., 2.], [3., 4.]]),
- matrix([2., 1.]).T),
- LinalgCase("matrix_a_and_b",
- matrix([[1., 2.], [3., 4.]]),
- matrix([2., 1.]).T),
])
# non-square test-cases
@@ -231,9 +223,6 @@ CASES += apply_tag('hermitian', [
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
- LinalgCase("hmatrix_a_and_b",
- matrix([[1., 2.], [2., 1.]]),
- None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
@@ -270,12 +259,13 @@ def _make_generalized_cases():
return new_cases
+
CASES += _make_generalized_cases()
+
#
# Generate stride combination variations of the above
#
-
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
@@ -323,6 +313,7 @@ def _stride_comb_iter(x):
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
+
def _make_strided_cases():
new_cases = []
for case in CASES:
@@ -333,94 +324,104 @@ def _make_strided_cases():
new_cases.append(new_case)
return new_cases
+
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
+class LinalgTestCase(object):
+ TEST_CASES = CASES
-def _check_cases(func, require=set(), exclude=set()):
- """
- Run func on each of the cases with all of the tags in require, and none
- of the tags in exclude
- """
- for case in CASES:
- # filter by require and exclude
- if case.tags & require != require:
- continue
- if case.tags & exclude:
- continue
+ def check_cases(self, require=set(), exclude=set()):
+ """
+ Run func on each of the cases with all of the tags in require, and none
+ of the tags in exclude
+ """
+ for case in self.TEST_CASES:
+ # filter by require and exclude
+ if case.tags & require != require:
+ continue
+ if case.tags & exclude:
+ continue
- try:
- case.check(func)
- except Exception:
- msg = "In test case: %r\n\n" % case
- msg += traceback.format_exc()
- raise AssertionError(msg)
+ try:
+ case.check(self.do)
+ except Exception:
+ msg = "In test case: %r\n\n" % case
+ msg += traceback.format_exc()
+ raise AssertionError(msg)
-class LinalgSquareTestCase(object):
+class LinalgSquareTestCase(LinalgTestCase):
def test_sq_cases(self):
- _check_cases(self.do, require={'square'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'square'},
+ exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
- _check_cases(self.do, require={'square', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'square', 'size-0'},
+ exclude={'generalized'})
-class LinalgNonsquareTestCase(object):
+class LinalgNonsquareTestCase(LinalgTestCase):
def test_nonsq_cases(self):
- _check_cases(self.do, require={'nonsquare'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'nonsquare'},
+ exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
- _check_cases(self.do, require={'nonsquare', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'nonsquare', 'size-0'},
+ exclude={'generalized'})
-class HermitianTestCase(object):
+
+class HermitianTestCase(LinalgTestCase):
def test_herm_cases(self):
- _check_cases(self.do, require={'hermitian'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'hermitian'},
+ exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
- _check_cases(self.do, require={'hermitian', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'hermitian', 'size-0'},
+ exclude={'generalized'})
-class LinalgGeneralizedSquareTestCase(object):
+class LinalgGeneralizedSquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_sq_cases(self):
- _check_cases(self.do, require={'generalized', 'square'}, exclude={'size-0'})
+ self.check_cases(require={'generalized', 'square'},
+ exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_sq_cases(self):
- _check_cases(self.do, require={'generalized', 'square', 'size-0'})
+ self.check_cases(require={'generalized', 'square', 'size-0'})
-class LinalgGeneralizedNonsquareTestCase(object):
+class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_nonsq_cases(self):
- _check_cases(self.do, require={'generalized', 'nonsquare'}, exclude={'size-0'})
+ self.check_cases(require={'generalized', 'nonsquare'},
+ exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_nonsq_cases(self):
- _check_cases(self.do, require={'generalized', 'nonsquare', 'size-0'})
+ self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
-class HermitianGeneralizedTestCase(object):
+class HermitianGeneralizedTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_herm_cases(self):
- _check_cases(self.do,
- require={'generalized', 'hermitian'},
- exclude={'size-0'})
+ self.check_cases(require={'generalized', 'hermitian'},
+ exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_herm_cases(self):
- _check_cases(self.do,
- require={'generalized', 'hermitian', 'size-0'},
- exclude={'none'})
+ self.check_cases(require={'generalized', 'hermitian', 'size-0'},
+ exclude={'none'})
def dot_generalized(a, b):
@@ -446,20 +447,21 @@ def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
- for c in itertools.product(*map(range, a.shape[:-2])):
- r[c] = identity(a.shape[-2])
+ r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
-class TestSolve(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
-
+class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+ # kept apart from TestSolve for use for testing with matrices.
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
- assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
+ assert_(consistent_subclass(x, b))
+
+class TestSolve(SolveCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -519,14 +521,16 @@ class TestSolve(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(result, ArraySubclass))
-class TestInv(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
- assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix)))
+ assert_(consistent_subclass(a_inv, a))
+
+class TestInv(InvCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -551,13 +555,15 @@ class TestInv(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(res, ArraySubclass))
-class TestEigvals(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
+
+class TestEigvals(EigvalsCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -586,15 +592,17 @@ class TestEigvals(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(res, np.ndarray))
-class TestEig(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
- assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix)))
+ assert_(consistent_subclass(evectors, a))
+
+class TestEig(EigCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -633,7 +641,7 @@ class TestEig(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(a, np.ndarray))
-class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
@@ -644,9 +652,11 @@ class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
- assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
- assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
+ assert_(consistent_subclass(u, a))
+ assert_(consistent_subclass(vt, a))
+
+class TestSVD(SVDCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -671,7 +681,7 @@ class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_raises(linalg.LinAlgError, linalg.svd, a)
-class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
def do(self, a, b, tags):
@@ -716,6 +726,8 @@ class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
* (abs(cinv)**2).sum(-1).sum(-1)),
single_decimal=5, double_decimal=11)
+
+class TestCond(CondCases):
def test_basic_nonsvd(self):
# Smoketest the non-svd norms
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
@@ -779,20 +791,24 @@ class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(np.isfinite(c[1,0]))
-class TestPinv(LinalgSquareTestCase,
- LinalgNonsquareTestCase,
- LinalgGeneralizedSquareTestCase,
- LinalgGeneralizedNonsquareTestCase):
+class PinvCases(LinalgSquareTestCase,
+ LinalgNonsquareTestCase,
+ LinalgGeneralizedSquareTestCase,
+ LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
- assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
+ assert_(consistent_subclass(a_ginv, a))
+
+
+class TestPinv(PinvCases):
+ pass
-class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
@@ -811,6 +827,8 @@ class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
+
+class TestDet(DetCases):
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
@@ -854,7 +872,7 @@ class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(res[1].dtype.type is np.float64)
-class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
+class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
@@ -882,9 +900,11 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
- assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
- assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
+ assert_(consistent_subclass(x, b))
+ assert_(consistent_subclass(residuals, b))
+
+class TestLstsq(LstsqCases):
def test_future_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
@@ -903,20 +923,26 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
# Warning should be raised exactly once (first command)
assert_(len(w) == 1)
+
class TestMatrixPower(object):
R90 = array([[0, 1], [-1, 0]])
Arb22 = array([[4, -7], [-2, 10]])
noninv = array([[1, 0], [0, 0]])
- arbfloat = array([[0.1, 3.2], [1.2, 0.7]])
+ arbfloat = array([[[0.1, 3.2], [1.2, 0.7]],
+ [[0.2, 6.4], [2.4, 1.4]]])
large = identity(10)
t = large[1, :].copy()
- large[1, :] = large[0,:]
+ large[1, :] = large[0, :]
large[0, :] = t
def test_large_power(self):
assert_equal(
matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5 + 1), self.R90)
+ assert_equal(
+ matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 1), self.R90)
+ assert_equal(
+ matrix_power(self.R90, 2 ** 100 + 2 + 1), -self.R90)
def test_large_power_trailing_zero(self):
assert_equal(
@@ -925,7 +951,7 @@ class TestMatrixPower(object):
def testip_zero(self):
def tz(M):
mz = matrix_power(M, 0)
- assert_equal(mz, identity(M.shape[0]))
+ assert_equal(mz, identity_like_generalized(M))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
tz(M)
@@ -941,7 +967,7 @@ class TestMatrixPower(object):
def testip_two(self):
def tz(M):
mz = matrix_power(M, 2)
- assert_equal(mz, dot(M, M))
+ assert_equal(mz, matmul(M, M))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
tz(M)
@@ -949,14 +975,19 @@ class TestMatrixPower(object):
def testip_invert(self):
def tz(M):
mz = matrix_power(M, -1)
- assert_almost_equal(identity(M.shape[0]), dot(mz, M))
+ assert_almost_equal(matmul(mz, M), identity_like_generalized(M))
for M in [self.R90, self.Arb22, self.arbfloat, self.large]:
tz(M)
def test_invert_noninvertible(self):
- import numpy.linalg
- assert_raises(numpy.linalg.linalg.LinAlgError,
- lambda: matrix_power(self.noninv, -1))
+ assert_raises(LinAlgError, matrix_power, self.noninv, -1)
+
+ def test_invalid(self):
+ assert_raises(TypeError, matrix_power, self.R90, 1.5)
+ assert_raises(TypeError, matrix_power, self.R90, [1])
+ assert_raises(LinAlgError, matrix_power, np.array([1]), 1)
+ assert_raises(LinAlgError, matrix_power, np.array([[1], [2]]), 1)
+ assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2)), 1)
class TestBoolPower(object):
@@ -966,7 +997,7 @@ class TestBoolPower(object):
assert_equal(matrix_power(A, 2), A)
-class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
+class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
@@ -979,6 +1010,8 @@ class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
ev2 = linalg.eigvalsh(a, 'U')
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
+
+class TestEigvalsh(object):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -1034,7 +1067,7 @@ class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
assert_(isinstance(res, np.ndarray))
-class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
+class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
@@ -1055,6 +1088,8 @@ class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
np.asarray(ev2)[..., None, :] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
+
+class TestEigh(object):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -1115,11 +1150,13 @@ class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
assert_(isinstance(a, np.ndarray))
-class _TestNorm(object):
-
+class _TestNormBase(object):
dt = None
dec = None
+
+class _TestNormGeneral(_TestNormBase):
+
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
@@ -1166,57 +1203,6 @@ class _TestNorm(object):
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
- def test_matrix_return_type(self):
- a = np.array([[1, 0, 1], [0, 1, 1]])
-
- exact_types = np.typecodes['AllInteger']
-
- # float32, complex64, float64, complex128 types are the only types
- # allowed by `linalg`, which performs the matrix operations used
- # within `norm`.
- inexact_types = 'fdFD'
-
- all_types = exact_types + inexact_types
-
- for each_inexact_types in all_types:
- at = a.astype(each_inexact_types)
-
- an = norm(at, -np.inf)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- with suppress_warnings() as sup:
- sup.filter(RuntimeWarning, "divide by zero encountered")
- an = norm(at, -1)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 1.0)
-
- an = norm(at, 1)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 2)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 3.0**(1.0/2.0))
-
- an = norm(at, -2)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 1.0)
-
- an = norm(at, np.inf)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 'fro')
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 'nuc')
- assert_(issubclass(an.dtype.type, np.floating))
- # Lower bar needed to support low precision floats.
- # They end up being off by 1 in the 7th place.
- old_assert_almost_equal(an, 2.7320508075688772, decimal=6)
-
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
@@ -1247,39 +1233,6 @@ class _TestNorm(object):
array(c, dtype=self.dt)):
_test(v)
- def test_matrix_2x2(self):
- A = matrix([[1, 3], [5, 7]], dtype=self.dt)
- assert_almost_equal(norm(A), 84 ** 0.5)
- assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
- assert_almost_equal(norm(A, 'nuc'), 10.0)
- assert_almost_equal(norm(A, inf), 12.0)
- assert_almost_equal(norm(A, -inf), 4.0)
- assert_almost_equal(norm(A, 1), 10.0)
- assert_almost_equal(norm(A, -1), 6.0)
- assert_almost_equal(norm(A, 2), 9.1231056256176615)
- assert_almost_equal(norm(A, -2), 0.87689437438234041)
-
- assert_raises(ValueError, norm, A, 'nofro')
- assert_raises(ValueError, norm, A, -3)
- assert_raises(ValueError, norm, A, 0)
-
- def test_matrix_3x3(self):
- # This test has been added because the 2x2 example
- # happened to have equal nuclear norm and induced 1-norm.
- # The 1/10 scaling factor accommodates the absolute tolerance
- # used in assert_almost_equal.
- A = (1 / 10) * \
- np.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
- assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
- assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
- assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
- assert_almost_equal(norm(A, inf), 1.1)
- assert_almost_equal(norm(A, -inf), 0.6)
- assert_almost_equal(norm(A, 1), 1.0)
- assert_almost_equal(norm(A, -1), 0.4)
- assert_almost_equal(norm(A, 2), 0.88722940323461277)
- assert_almost_equal(norm(A, -2), 0.19456584790481812)
-
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
@@ -1359,10 +1312,103 @@ class _TestNorm(object):
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
+
+class _TestNorm2D(_TestNormBase):
+ # Define the part for 2d arrays separately, so we can subclass this
+ # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg.
+ array = np.array
+
+ def test_matrix_empty(self):
+ assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0)
+
+ def test_matrix_return_type(self):
+ a = self.array([[1, 0, 1], [0, 1, 1]])
+
+ exact_types = np.typecodes['AllInteger']
+
+ # float32, complex64, float64, complex128 types are the only types
+ # allowed by `linalg`, which performs the matrix operations used
+ # within `norm`.
+ inexact_types = 'fdFD'
+
+ all_types = exact_types + inexact_types
+
+ for each_inexact_types in all_types:
+ at = a.astype(each_inexact_types)
+
+ an = norm(at, -np.inf)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "divide by zero encountered")
+ an = norm(at, -1)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 1.0)
+
+ an = norm(at, 1)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 2)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 3.0**(1.0/2.0))
+
+ an = norm(at, -2)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 1.0)
+
+ an = norm(at, np.inf)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 'fro')
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 'nuc')
+ assert_(issubclass(an.dtype.type, np.floating))
+ # Lower bar needed to support low precision floats.
+ # They end up being off by 1 in the 7th place.
+ np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)
+
+ def test_matrix_2x2(self):
+ A = self.array([[1, 3], [5, 7]], dtype=self.dt)
+ assert_almost_equal(norm(A), 84 ** 0.5)
+ assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
+ assert_almost_equal(norm(A, 'nuc'), 10.0)
+ assert_almost_equal(norm(A, inf), 12.0)
+ assert_almost_equal(norm(A, -inf), 4.0)
+ assert_almost_equal(norm(A, 1), 10.0)
+ assert_almost_equal(norm(A, -1), 6.0)
+ assert_almost_equal(norm(A, 2), 9.1231056256176615)
+ assert_almost_equal(norm(A, -2), 0.87689437438234041)
+
+ assert_raises(ValueError, norm, A, 'nofro')
+ assert_raises(ValueError, norm, A, -3)
+ assert_raises(ValueError, norm, A, 0)
+
+ def test_matrix_3x3(self):
+ # This test has been added because the 2x2 example
+ # happened to have equal nuclear norm and induced 1-norm.
+ # The 1/10 scaling factor accommodates the absolute tolerance
+ # used in assert_almost_equal.
+ A = (1 / 10) * \
+ self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
+ assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
+ assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
+ assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
+ assert_almost_equal(norm(A, inf), 1.1)
+ assert_almost_equal(norm(A, -inf), 0.6)
+ assert_almost_equal(norm(A, 1), 1.0)
+ assert_almost_equal(norm(A, -1), 0.4)
+ assert_almost_equal(norm(A, 2), 0.88722940323461277)
+ assert_almost_equal(norm(A, -2), 0.19456584790481812)
+
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
- A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
+ A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
@@ -1386,6 +1432,10 @@ class _TestNorm(object):
assert_raises(ValueError, norm, B, None, (0, 1, 2))
+class _TestNorm(_TestNorm2D, _TestNormGeneral):
+ pass
+
+
class TestNorm_NonSystematic(object):
def test_longdouble_norm(self):
@@ -1413,21 +1463,34 @@ class TestNorm_NonSystematic(object):
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
-class TestNormDouble(_TestNorm):
+# Separate definitions so we can use them for matrix tests.
+class _TestNormDoubleBase(_TestNormBase):
dt = np.double
dec = 12
-class TestNormSingle(_TestNorm):
+class _TestNormSingleBase(_TestNormBase):
dt = np.float32
dec = 6
-class TestNormInt64(_TestNorm):
+class _TestNormInt64Base(_TestNormBase):
dt = np.int64
dec = 12
+class TestNormDouble(_TestNorm, _TestNormDoubleBase):
+ pass
+
+
+class TestNormSingle(_TestNorm, _TestNormSingleBase):
+ pass
+
+
+class TestNormInt64(_TestNorm, _TestNormInt64Base):
+ pass
+
+
class TestMatrixRank(object):
def test_matrix_rank(self):
@@ -1478,6 +1541,8 @@ def test_reduced_rank():
class TestQR(object):
+ # Define the array class here, so run this on matrices elsewhere.
+ array = np.array
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
@@ -1528,7 +1593,7 @@ class TestQR(object):
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
- a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
+ a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
# Test double
h, tau = linalg.qr(a, mode='raw')
@@ -1544,22 +1609,21 @@ class TestQR(object):
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
- a = array([[1, 2], [3, 4]])
- b = array([[1, 2], [3, 4], [5, 6]])
+ a = self.array([[1, 2], [3, 4]])
+ b = self.array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
- self.check_qr(matrix(m1))
+
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
- self.check_qr(matrix(m1))
def test_0_size(self):
# There may be good ways to do (some of this) reasonably:
@@ -1699,6 +1763,40 @@ def test_xerbla_override():
raise SkipTest('Numpy xerbla not linked in.')
+def test_sdot_bug_8577():
+ # Regression test that loading certain other libraries does not
+ # result to wrong results in float32 linear algebra.
+ #
+ # There's a bug gh-8577 on OSX that can trigger this, and perhaps
+ # there are also other situations in which it occurs.
+ #
+ # Do the check in a separate process.
+
+ bad_libs = ['PyQt5.QtWidgets', 'IPython']
+
+ template = textwrap.dedent("""
+ import sys
+ {before}
+ try:
+ import {bad_lib}
+ except ImportError:
+ sys.exit(0)
+ {after}
+ x = np.ones(2, dtype=np.float32)
+ sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1)
+ """)
+
+ for bad_lib in bad_libs:
+ code = template.format(before="import numpy as np", after="",
+ bad_lib=bad_lib)
+ subprocess.check_call([sys.executable, "-c", code])
+
+ # Swapped import order
+ code = template.format(after="import numpy as np", before="",
+ bad_lib=bad_lib)
+ subprocess.check_call([sys.executable, "-c", code])
+
+
class TestMultiDot(object):
def test_basic_function_with_three_arguments(self):
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
index 71dfa556d..7dc1cb0cb 100644
--- a/numpy/linalg/umath_linalg.c.src
+++ b/numpy/linalg/umath_linalg.c.src
@@ -382,17 +382,11 @@ typedef f2c_doublecomplex fortran_doublecomplex;
*****************************************************************************
*/
-static NPY_INLINE void *
-offset_ptr(void* ptr, ptrdiff_t offset)
-{
- return (void*)((npy_uint8*)ptr + offset);
-}
-
static NPY_INLINE int
get_fp_invalid_and_clear(void)
{
int status;
- status = npy_clear_floatstatus();
+ status = npy_clear_floatstatus_barrier((char*)&status);
return !!(status & NPY_FPE_INVALID);
}
@@ -403,7 +397,7 @@ set_fp_invalid_or_clear(int error_occurred)
npy_set_floatstatus_invalid();
}
else {
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)&error_occurred);
}
}
@@ -577,104 +571,6 @@ dump_linearize_data(const char* name, const LINEARIZE_DATA_t* params)
params->row_strides, params->column_strides);
}
-
-static NPY_INLINE float
-FLOAT_add(float op1, float op2)
-{
- return op1 + op2;
-}
-
-static NPY_INLINE double
-DOUBLE_add(double op1, double op2)
-{
- return op1 + op2;
-}
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_add(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0] + op2.array[0];
- result.array[1] = op1.array[1] + op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_add(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0] + op2.array[0];
- result.array[1] = op1.array[1] + op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE float
-FLOAT_mul(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE double
-DOUBLE_mul(double op1, double op2)
-{
- return op1*op2;
-}
-
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_mul(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1];
- result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_mul(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1];
- result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE float
-FLOAT_mulc(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE double
-DOUBLE_mulc(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_mulc(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1];
- result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_mulc(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1];
- result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0];
-
- return result;
-}
-
static NPY_INLINE void
print_FLOAT(npy_float s)
{
@@ -3740,10 +3636,10 @@ static struct PyModuleDef moduledef = {
#endif
#if defined(NPY_PY3K)
-#define RETVAL m
+#define RETVAL(x) x
PyObject *PyInit__umath_linalg(void)
#else
-#define RETVAL
+#define RETVAL(x)
PyMODINIT_FUNC
init_umath_linalg(void)
#endif
@@ -3759,7 +3655,7 @@ init_umath_linalg(void)
m = Py_InitModule(UMATH_LINALG_MODULE_NAME, UMath_LinAlgMethods);
#endif
if (m == NULL) {
- return RETVAL;
+ return RETVAL(NULL);
}
import_array();
@@ -3777,7 +3673,8 @@ init_umath_linalg(void)
if (PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"cannot load _umath_linalg module.");
+ return RETVAL(NULL);
}
- return RETVAL;
+ return RETVAL(m);
}
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 91cf8ed0f..fdffc7360 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -2799,13 +2799,8 @@ class MaskedArray(ndarray):
# FIXME _sharedmask is never used.
_sharedmask = True
# Process mask.
- # Number of named fields (or zero if none)
- names_ = _data.dtype.names or ()
# Type of the mask
- if names_:
- mdtype = make_mask_descr(_data.dtype)
- else:
- mdtype = MaskType
+ mdtype = make_mask_descr(_data.dtype)
if mask is nomask:
# Case 1. : no mask in input.
@@ -2831,14 +2826,12 @@ class MaskedArray(ndarray):
_data._mask = mask
_data._sharedmask = False
else:
+ _data._sharedmask = not copy
if copy:
_data._mask = _data._mask.copy()
- _data._sharedmask = False
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
- else:
- _data._sharedmask = True
else:
# Case 2. : With a mask in input.
# If mask is boolean, create an array of True or False
@@ -2875,7 +2868,7 @@ class MaskedArray(ndarray):
_data._mask = mask
_data._sharedmask = not copy
else:
- if names_:
+ if _data.dtype.names:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
@@ -2884,7 +2877,7 @@ class MaskedArray(ndarray):
_recursive_or(af, bf)
else:
af |= bf
- return
+
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
@@ -3089,7 +3082,7 @@ class MaskedArray(ndarray):
returned object (this is equivalent to setting the ``type``
parameter).
type : Python type, optional
- Type of the returned view, e.g., ndarray or matrix. Again, the
+ Type of the returned view, either ndarray or a subclass. The
default None results in type preservation.
Notes
@@ -3673,14 +3666,14 @@ class MaskedArray(ndarray):
>>> type(x.filled())
<type 'numpy.ndarray'>
- Subclassing is preserved. This means that if the data part of the masked
- array is a matrix, `filled` returns a matrix:
-
- >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
- >>> x.filled()
- matrix([[ 1, 999999],
- [999999, 4]])
+ Subclassing is preserved. This means that if, e.g., the data part of
+ the masked array is a recarray, `filled` returns a recarray:
+ >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray)
+ >>> m = np.ma.array(x, mask=[(True, False), (False, True)])
+ >>> m.filled()
+ rec.array([(999999, 2), ( -3, 999999)],
+ dtype=[('f0', '<i8'), ('f1', '<i8')])
"""
m = self._mask
if m is nomask:
@@ -5531,15 +5524,7 @@ class MaskedArray(ndarray):
sidx = self.argsort(axis=axis, kind=kind, order=order,
fill_value=fill_value, endwith=endwith)
- # save memory for 1d arrays
- if self.ndim == 1:
- idx = sidx
- else:
- idx = list(np.ix_(*[np.arange(x) for x in self.shape]))
- idx[axis] = sidx
- idx = tuple(idx)
-
- self[...] = self[idx]
+ self[...] = np.take_along_axis(self, sidx, axis=axis)
def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
@@ -6317,6 +6302,12 @@ class MaskedConstant(MaskedArray):
# precedent for this with `np.bool_` scalars.
return self
+ def __copy__(self):
+ return self
+
+ def __deepcopy__(self, memo):
+ return self
+
def __setattr__(self, attr, value):
if not self.__has_singleton():
# allow the singleton to be initialized
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 8272dced9..3be4d3625 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -747,19 +747,17 @@ def _median(a, axis=None, out=None, overwrite_input=False):
return np.ma.minimum_fill_value(asorted)
return s
- counts = count(asorted, axis=axis)
+ counts = count(asorted, axis=axis, keepdims=True)
h = counts // 2
- # create indexing mesh grid for all but reduced axis
- axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape)
- if i != axis]
- ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij')
+ # duplicate high if odd number of elements so mean does nothing
+ odd = counts % 2 == 1
+ l = np.where(odd, h, h-1)
- # insert indices of low and high median
- ind.insert(axis, h - 1)
- low = asorted[tuple(ind)]
- ind[axis] = np.minimum(h, asorted.shape[axis] - 1)
- high = asorted[tuple(ind)]
+ lh = np.concatenate([l,h], axis=axis)
+
+ # get low and high median
+ low_high = np.take_along_axis(asorted, lh, axis=axis)
def replace_masked(s):
# Replace masked entries with minimum_full_value unless it all values
@@ -767,30 +765,20 @@ def _median(a, axis=None, out=None, overwrite_input=False):
# larger than the fill value is undefined and a valid value placed
# elsewhere, e.g. [4, --, inf].
if np.ma.is_masked(s):
- rep = (~np.all(asorted.mask, axis=axis)) & s.mask
+ rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask
s.data[rep] = np.ma.minimum_fill_value(asorted)
s.mask[rep] = False
- replace_masked(low)
- replace_masked(high)
-
- # duplicate high if odd number of elements so mean does nothing
- odd = counts % 2 == 1
- np.copyto(low, high, where=odd)
- # not necessary for scalar True/False masks
- try:
- np.copyto(low.mask, high.mask, where=odd)
- except Exception:
- pass
+ replace_masked(low_high)
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
- s = np.ma.sum([low, high], axis=0, out=out)
+ s = np.ma.sum(low_high, axis=axis, out=out)
np.true_divide(s.data, 2., casting='unsafe', out=s.data)
s = np.lib.utils._median_nancheck(asorted, s, axis, out)
else:
- s = np.ma.mean([low, high], axis=0, out=out)
+ s = np.ma.mean(low_high, axis=axis, out=out)
return s
@@ -1465,9 +1453,14 @@ class MAxisConcatenator(AxisConcatenator):
"""
concatenate = staticmethod(concatenate)
- @staticmethod
- def makemat(arr):
- return array(arr.data.view(np.matrix), mask=arr.mask)
+ @classmethod
+ def makemat(cls, arr):
+ # There used to be a view as np.matrix here, but we may eventually
+ # deprecate that class. In preparation, we use the unmasked version
+ # to construct the matrix (with copy=False for backwards compatibility
+ # with the .view)
+ data = super(MAxisConcatenator, cls).makemat(arr.data, copy=False)
+ return array(data, mask=arr.mask)
def __getitem__(self, key):
# matrix builder syntax, like 'a, b; c, d'
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 9caf38b56..4c7440aab 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -335,49 +335,6 @@ class TestMaskedArray(object):
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
- def test_matrix_indexing(self):
- # Tests conversions and indexing
- x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
- x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])
- x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])
- x4 = array(x1)
- # test conversion to strings
- str(x2) # raises?
- repr(x2) # raises?
- # tests of indexing
- assert_(type(x2[1, 0]) is type(x1[1, 0]))
- assert_(x1[1, 0] == x2[1, 0])
- assert_(x2[1, 1] is masked)
- assert_equal(x1[0, 2], x2[0, 2])
- assert_equal(x1[0, 1:], x2[0, 1:])
- assert_equal(x1[:, 2], x2[:, 2])
- assert_equal(x1[:], x2[:])
- assert_equal(x1[1:], x3[1:])
- x1[0, 2] = 9
- x2[0, 2] = 9
- assert_equal(x1, x2)
- x1[0, 1:] = 99
- x2[0, 1:] = 99
- assert_equal(x1, x2)
- x2[0, 1] = masked
- assert_equal(x1, x2)
- x2[0, 1:] = masked
- assert_equal(x1, x2)
- x2[0, :] = x1[0, :]
- x2[0, 1] = masked
- assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
- x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
- assert_(allequal(getmask(x3)[1], array([1, 1, 0])))
- assert_(allequal(getmask(x3[1]), array([1, 1, 0])))
- x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
- assert_(allequal(getmask(x4[1]), array([1, 1, 0])))
- assert_(allequal(x4[1], array([1, 2, 3])))
- x1 = np.matrix(np.arange(5) * 1.0)
- x2 = masked_values(x1, 3.0)
- assert_equal(x1, x2)
- assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
- assert_equal(3.0, x2.fill_value)
-
@suppress_copy_mask_on_assignment
def test_copy(self):
# Tests of some subtle points of copying and sizing.
@@ -611,11 +568,13 @@ class TestMaskedArray(object):
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
- a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
+ x = np.array([(1.0, 2), (3.0, 4)],
+ dtype=[('x', float), ('y', int)]).view(np.recarray)
+ a = masked_array(x, mask=[(True, False), (False, True)])
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
- assert_(isinstance(a_pickled._data, np.matrix))
+ assert_(isinstance(a_pickled._data, np.recarray))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
@@ -1448,16 +1407,6 @@ class TestMaskedArrayArithmetic(object):
assert_(result is output)
assert_(output[0] is masked)
- def test_count_mean_with_matrix(self):
- m = np.ma.array(np.matrix([[1,2],[3,4]]), mask=np.zeros((2,2)))
-
- assert_equal(m.count(axis=0).shape, (1,2))
- assert_equal(m.count(axis=1).shape, (2,1))
-
- #make sure broadcasting inside mean and var work
- assert_equal(m.mean(axis=0), [[2., 3.]])
- assert_equal(m.mean(axis=1), [[1.5], [3.5]])
-
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
@@ -1740,23 +1689,6 @@ class TestMaskedArrayAttributes(object):
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
- # test simple access
- test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
- assert_equal(test.flat[1], 2)
- assert_equal(test.flat[2], masked)
- assert_(np.all(test.flat[0:2] == test[0, 0:2]))
- # Test flat on masked_matrices
- test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
- test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
- control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
- assert_equal(test, control)
- # Test setting
- test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
- testflat = test.flat
- testflat[:] = testflat[[2, 1, 0]]
- assert_equal(test, control)
- testflat[0] = 9
- assert_equal(test[0, 0], 9)
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
@@ -1784,12 +1716,6 @@ class TestMaskedArrayAttributes(object):
if i >= x.shape[-1]:
i = 0
j += 1
- # test that matrices keep the correct shape (#4615)
- a = masked_array(np.matrix(np.eye(2)), mask=0)
- b = a.flat
- b01 = b[:2]
- assert_equal(b01.data, array([[1., 0.]]))
- assert_equal(b01.mask, array([[False, False]]))
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
@@ -2893,32 +2819,6 @@ class TestMaskedArrayMethods(object):
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
- def test_allany_onmatrices(self):
- x = np.array([[0.13, 0.26, 0.90],
- [0.28, 0.33, 0.63],
- [0.31, 0.87, 0.70]])
- X = np.matrix(x)
- m = np.array([[True, False, False],
- [False, False, False],
- [True, True, False]], dtype=np.bool_)
- mX = masked_array(X, mask=m)
- mXbig = (mX > 0.5)
- mXsmall = (mX < 0.5)
-
- assert_(not mXbig.all())
- assert_(mXbig.any())
- assert_equal(mXbig.all(0), np.matrix([False, False, True]))
- assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
- assert_equal(mXbig.any(0), np.matrix([False, False, True]))
- assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
-
- assert_(not mXsmall.all())
- assert_(mXsmall.any())
- assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
- assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
- assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
- assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
-
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
@@ -3017,14 +2917,6 @@ class TestMaskedArrayMethods(object):
b = a.compressed()
assert_equal(b, [2, 3, 4])
- a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
- b = a.compressed()
- assert_equal(b, a)
- assert_(isinstance(b, np.matrix))
- a[0, 0] = masked
- b = a.compressed()
- assert_equal(b, [[2, 3, 4]])
-
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
@@ -3139,10 +3031,6 @@ class TestMaskedArrayMethods(object):
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
- a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
- aravel = a.ravel()
- assert_equal(aravel.shape, (1, 5))
- assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
@@ -4607,10 +4495,6 @@ class TestMaskedFields(object):
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
- test = a.view((float, 2), np.matrix)
- assert_equal(test, data)
- assert_(isinstance(test, np.matrix))
-
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
@@ -4794,11 +4678,12 @@ class TestMaskedView(object):
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
- test = a.view((float, 2), np.matrix)
+ test = a.view((float, 2), np.recarray)
assert_equal(test, data)
- assert_(isinstance(test, np.matrix))
+ assert_(isinstance(test, np.recarray))
assert_(not isinstance(test, MaskedArray))
+
class TestOptionalArgs(object):
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including multiple axes)
@@ -4941,6 +4826,16 @@ class TestMaskedConstant(object):
np.ma.masked.copy() is np.ma.masked,
np.True_.copy() is np.True_)
+ def test__copy(self):
+ import copy
+ assert_(
+ copy.copy(np.ma.masked) is np.ma.masked)
+
+ def test_deepcopy(self):
+ import copy
+ assert_(
+ copy.deepcopy(np.ma.masked) is np.ma.masked)
+
def test_immutable(self):
orig = np.ma.masked
assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1)
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index a7a32b628..c29bec2bd 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -307,18 +307,6 @@ class TestConcatenator(object):
assert_array_equal(d[5:,:], b_2)
assert_array_equal(d.mask, np.r_[m_1, m_2])
- def test_matrix_builder(self):
- assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4'])
-
- def test_matrix(self):
- actual = mr_['r', 1, 2, 3]
- expected = np.ma.array(np.r_['r', 1, 2, 3])
- assert_array_equal(actual, expected)
-
- # outer type is masked array, inner type is matrix
- assert_equal(type(actual), type(expected))
- assert_equal(type(actual.data), type(expected.data))
-
def test_masked_constant(self):
actual = mr_[np.ma.masked, 1]
assert_equal(actual.mask, [True, False])
diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py
index b61a46278..f8ab52bb9 100644
--- a/numpy/ma/tests/test_subclassing.py
+++ b/numpy/ma/tests/test_subclassing.py
@@ -75,27 +75,6 @@ class MSubArray(SubArray, MaskedArray):
msubarray = MSubArray
-class MMatrix(MaskedArray, np.matrix,):
-
- def __new__(cls, data, mask=nomask):
- mat = np.matrix(data)
- _data = MaskedArray.__new__(cls, data=mat, mask=mask)
- return _data
-
- def __array_finalize__(self, obj):
- np.matrix.__array_finalize__(self, obj)
- MaskedArray.__array_finalize__(self, obj)
- return
-
- def _get_series(self):
- _view = self.view(MaskedArray)
- _view._sharedmask = False
- return _view
- _series = property(fget=_get_series)
-
-mmatrix = MMatrix
-
-
# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing
# setting to non-class values (and thus np.ma.core.masked_print_option)
# and overrides __array_wrap__, updating the info dict, to check that this
@@ -180,7 +159,7 @@ class TestSubclassing(object):
def setup(self):
x = np.arange(5, dtype='float')
- mx = mmatrix(x, mask=[0, 1, 0, 0, 0])
+ mx = msubarray(x, mask=[0, 1, 0, 0, 0])
self.data = (x, mx)
def test_data_subclassing(self):
@@ -196,34 +175,34 @@ class TestSubclassing(object):
def test_maskedarray_subclassing(self):
# Tests subclassing MaskedArray
(x, mx) = self.data
- assert_(isinstance(mx._data, np.matrix))
+ assert_(isinstance(mx._data, subarray))
def test_masked_unary_operations(self):
# Tests masked_unary_operation
(x, mx) = self.data
with np.errstate(divide='ignore'):
- assert_(isinstance(log(mx), mmatrix))
+ assert_(isinstance(log(mx), msubarray))
assert_equal(log(x), np.log(x))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
(x, mx) = self.data
- # Result should be a mmatrix
- assert_(isinstance(add(mx, mx), mmatrix))
- assert_(isinstance(add(mx, x), mmatrix))
+ # Result should be a msubarray
+ assert_(isinstance(add(mx, mx), msubarray))
+ assert_(isinstance(add(mx, x), msubarray))
# Result should work
assert_equal(add(mx, x), mx+x)
- assert_(isinstance(add(mx, mx)._data, np.matrix))
- assert_(isinstance(add.outer(mx, mx), mmatrix))
- assert_(isinstance(hypot(mx, mx), mmatrix))
- assert_(isinstance(hypot(mx, x), mmatrix))
+ assert_(isinstance(add(mx, mx)._data, subarray))
+ assert_(isinstance(add.outer(mx, mx), msubarray))
+ assert_(isinstance(hypot(mx, mx), msubarray))
+ assert_(isinstance(hypot(mx, x), msubarray))
def test_masked_binary_operations2(self):
# Tests domained_masked_binary_operation
(x, mx) = self.data
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
- assert_(isinstance(divide(mx, mx), mmatrix))
- assert_(isinstance(divide(mx, x), mmatrix))
+ assert_(isinstance(divide(mx, mx), msubarray))
+ assert_(isinstance(divide(mx, x), msubarray))
assert_equal(divide(mx, mx), divide(xmx, xmx))
def test_attributepropagation(self):
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index 1f5c94921..9909fec8d 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -5,8 +5,11 @@ __all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
import ast
import numpy.core.numeric as N
-from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray
-from numpy.core.numerictypes import issubdtype
+from numpy.core.numeric import concatenate, isscalar
+# While not in __all__, matrix_power used to be defined here, so we import
+# it for backward compatibility.
+from numpy.linalg import matrix_power
+
def _convert_from_string(data):
for char in '[]':
@@ -63,114 +66,6 @@ def asmatrix(data, dtype=None):
"""
return matrix(data, dtype=dtype, copy=False)
-def matrix_power(M, n):
- """
- Raise a square matrix to the (integer) power `n`.
-
- For positive integers `n`, the power is computed by repeated matrix
- squarings and matrix multiplications. If ``n == 0``, the identity matrix
- of the same shape as M is returned. If ``n < 0``, the inverse
- is computed and then raised to the ``abs(n)``.
-
- Parameters
- ----------
- M : ndarray or matrix object
- Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``,
- with `m` a positive integer.
- n : int
- The exponent can be any integer or long integer, positive,
- negative, or zero.
-
- Returns
- -------
- M**n : ndarray or matrix object
- The return value is the same shape and type as `M`;
- if the exponent is positive or zero then the type of the
- elements is the same as those of `M`. If the exponent is
- negative the elements are floating-point.
-
- Raises
- ------
- LinAlgError
- If the matrix is not numerically invertible.
-
- See Also
- --------
- matrix
- Provides an equivalent function as the exponentiation operator
- (``**``, not ``^``).
-
- Examples
- --------
- >>> from numpy import linalg as LA
- >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
- >>> LA.matrix_power(i, 3) # should = -i
- array([[ 0, -1],
- [ 1, 0]])
- >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
- matrix([[ 0, -1],
- [ 1, 0]])
- >>> LA.matrix_power(i, 0)
- array([[1, 0],
- [0, 1]])
- >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
- array([[ 0., 1.],
- [-1., 0.]])
-
- Somewhat more sophisticated example
-
- >>> q = np.zeros((4, 4))
- >>> q[0:2, 0:2] = -i
- >>> q[2:4, 2:4] = i
- >>> q # one of the three quaternion units not equal to 1
- array([[ 0., -1., 0., 0.],
- [ 1., 0., 0., 0.],
- [ 0., 0., 0., 1.],
- [ 0., 0., -1., 0.]])
- >>> LA.matrix_power(q, 2) # = -np.eye(4)
- array([[-1., 0., 0., 0.],
- [ 0., -1., 0., 0.],
- [ 0., 0., -1., 0.],
- [ 0., 0., 0., -1.]])
-
- """
- M = asanyarray(M)
- if M.ndim != 2 or M.shape[0] != M.shape[1]:
- raise ValueError("input must be a square array")
- if not issubdtype(type(n), N.integer):
- raise TypeError("exponent must be an integer")
-
- from numpy.linalg import inv
-
- if n==0:
- M = M.copy()
- M[:] = identity(M.shape[0])
- return M
- elif n<0:
- M = inv(M)
- n *= -1
-
- result = M
- if n <= 3:
- for _ in range(n-1):
- result=N.dot(result, M)
- return result
-
- # binary decomposition to reduce the number of Matrix
- # multiplications for n > 3.
- beta = binary_repr(n)
- Z, q, t = M, 0, len(beta)
- while beta[t-q-1] == '0':
- Z = N.dot(Z, Z)
- q += 1
- result = Z
- for k in range(q+1, t):
- Z = N.dot(Z, Z)
- if beta[t-k-1] == '1':
- result = N.dot(result, Z)
- return result
-
-
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py
index a02a05c09..d160490b3 100644
--- a/numpy/matrixlib/tests/test_defmatrix.py
+++ b/numpy/matrixlib/tests/test_defmatrix.py
@@ -13,7 +13,7 @@ from numpy.testing import (
assert_, assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_raises
)
-from numpy.matrixlib.defmatrix import matrix_power
+from numpy.linalg import matrix_power
from numpy.matrixlib import mat
class TestCtor(object):
diff --git a/numpy/matrixlib/tests/test_interaction.py b/numpy/matrixlib/tests/test_interaction.py
new file mode 100644
index 000000000..fefb159c6
--- /dev/null
+++ b/numpy/matrixlib/tests/test_interaction.py
@@ -0,0 +1,361 @@
+"""Tests of interaction of matrix with other parts of numpy.
+
+Note that tests with MaskedArray and linalg are done in separate files.
+"""
+from __future__ import division, absolute_import, print_function
+
+import textwrap
+import warnings
+
+import numpy as np
+from numpy.testing import (assert_, assert_equal, assert_raises,
+ assert_raises_regex, assert_array_equal,
+ assert_almost_equal, assert_array_almost_equal)
+
+
+def test_fancy_indexing():
+ # The matrix class messes with the shape. While this is always
+ # weird (getitem is not used, it does not have setitem nor knows
+ # about fancy indexing), this tests gh-3110
+ # 2018-04-29: moved here from core.tests.test_index.
+ m = np.matrix([[1, 2], [3, 4]])
+
+ assert_(isinstance(m[[0, 1, 0], :], np.matrix))
+
+ # gh-3110. Note the transpose currently because matrices do *not*
+ # support dimension fixing for fancy indexing correctly.
+ x = np.asmatrix(np.arange(50).reshape(5, 10))
+ assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
+
+
+def test_polynomial_mapdomain():
+ # test that polynomial preserved matrix subtype.
+ # 2018-04-29: moved here from polynomial.tests.polyutils.
+ dom1 = [0, 4]
+ dom2 = [1, 3]
+ x = np.matrix([dom1, dom1])
+ res = np.polynomial.polyutils.mapdomain(x, dom1, dom2)
+ assert_(isinstance(res, np.matrix))
+
+
+def test_sort_matrix_none():
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ a = np.matrix([[2, 1, 0]])
+ actual = np.sort(a, axis=None)
+ expected = np.matrix([[0, 1, 2]])
+ assert_equal(actual, expected)
+ assert_(type(expected) is np.matrix)
+
+
+def test_partition_matrix_none():
+ # gh-4301
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ a = np.matrix([[2, 1, 0]])
+ actual = np.partition(a, 1, axis=None)
+ expected = np.matrix([[0, 1, 2]])
+ assert_equal(actual, expected)
+ assert_(type(expected) is np.matrix)
+
+
+def test_dot_scalar_and_matrix_of_objects():
+ # Ticket #2469
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.dot(arr, 3), desired)
+ assert_equal(np.dot(3, arr), desired)
+
+
+def test_inner_scalar_and_matrix():
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ sca = np.array(3, dtype=dt)[()]
+ arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
+ desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
+ assert_equal(np.inner(arr, sca), desired)
+ assert_equal(np.inner(sca, arr), desired)
+
+
+def test_inner_scalar_and_matrix_of_objects():
+ # Ticket #4482
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.inner(arr, 3), desired)
+ assert_equal(np.inner(3, arr), desired)
+
+
+def test_iter_allocate_output_subtype():
+ # Make sure that the subtype with priority wins
+ # 2018-04-29: moved here from core.tests.test_nditer, given the
+ # matrix specific shape test.
+
+ # matrix vs ndarray
+ a = np.matrix([[1, 2], [3, 4]])
+ b = np.arange(4).reshape(2, 2).T
+ i = np.nditer([a, b, None], [],
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ assert_(type(i.operands[2]) is np.matrix)
+ assert_(type(i.operands[2]) is not np.ndarray)
+ assert_equal(i.operands[2].shape, (2, 2))
+
+ # matrix always wants things to be 2D
+ b = np.arange(4).reshape(1, 2, 2)
+ assert_raises(RuntimeError, np.nditer, [a, b, None], [],
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ # but if subtypes are disabled, the result can still work
+ i = np.nditer([a, b, None], [],
+ [['readonly'], ['readonly'],
+ ['writeonly', 'allocate', 'no_subtype']])
+ assert_(type(i.operands[2]) is np.ndarray)
+ assert_(type(i.operands[2]) is not np.matrix)
+ assert_equal(i.operands[2].shape, (1, 2, 2))
+
+
+def like_function():
+ # 2018-04-29: moved here from core.tests.test_numeric
+ a = np.matrix([[1, 2], [3, 4]])
+ for like_function in np.zeros_like, np.ones_like, np.empty_like:
+ b = like_function(a)
+ assert_(type(b) is np.matrix)
+
+ c = like_function(a, subok=False)
+ assert_(type(c) is not np.matrix)
+
+
+def test_array_astype():
+ # 2018-04-29: copied here from core.tests.test_api
+ # subok=True passes through a matrix
+ a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4')
+ b = a.astype('f4', subok=True, copy=False)
+ assert_(a is b)
+
+ # subok=True is default, and creates a subtype on a cast
+ b = a.astype('i4', copy=False)
+ assert_equal(a, b)
+ assert_equal(type(b), np.matrix)
+
+ # subok=False never returns a matrix
+ b = a.astype('f4', subok=False, copy=False)
+ assert_equal(a, b)
+ assert_(not (a is b))
+ assert_(type(b) is not np.matrix)
+
+
+def test_stack():
+ # 2018-04-29: copied here from core.tests.test_shape_base
+ # check np.matrix cannot be stacked
+ m = np.matrix([[1, 2], [3, 4]])
+ assert_raises_regex(ValueError, 'shape too large to be a matrix',
+ np.stack, [m, m])
+
+
+def test_object_scalar_multiply():
+ # Tickets #2469 and #4482
+ # 2018-04-29: moved here from core.tests.test_ufunc
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.multiply(arr, 3), desired)
+ assert_equal(np.multiply(3, arr), desired)
+
+
+def test_nanfunctions_matrices():
+ # Check that it works and that type and
+ # shape are preserved
+ # 2018-04-29: moved here from core.tests.test_nanfunctions
+ mat = np.matrix(np.eye(3))
+ for f in [np.nanmin, np.nanmax]:
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 1))
+ res = f(mat)
+ assert_(np.isscalar(res))
+ # check that rows of nan are dealt with for subclasses (#4628)
+ mat[1] = np.nan
+ for f in [np.nanmin, np.nanmax]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(not np.any(np.isnan(res)))
+ assert_(len(w) == 0)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
+ and not np.isnan(res[2, 0]))
+ assert_(len(w) == 1, 'no warning raised')
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat)
+ assert_(np.isscalar(res))
+ assert_(res != np.nan)
+ assert_(len(w) == 0)
+
+
+def test_nanfunctions_matrices_general():
+ # Check that it works and that type and
+ # shape are preserved
+ # 2018-04-29: moved here from core.tests.test_nanfunctions
+ mat = np.matrix(np.eye(3))
+ for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod,
+ np.nanmean, np.nanvar, np.nanstd):
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 1))
+ res = f(mat)
+ assert_(np.isscalar(res))
+
+ for f in np.nancumsum, np.nancumprod:
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 3))
+ res = f(mat)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3*3))
+
+
+def test_average_matrix():
+ # 2018-04-29: moved here from core.tests.test_function_base.
+ y = np.matrix(np.random.rand(5, 5))
+ assert_array_equal(y.mean(0), np.average(y, 0))
+
+ a = np.matrix([[1, 2], [3, 4]])
+ w = np.matrix([[1, 2], [3, 4]])
+
+ r = np.average(a, axis=0, weights=w)
+ assert_equal(type(r), np.matrix)
+ assert_equal(r, [[2.5, 10.0/3]])
+
+
+def test_trapz_matrix():
+ # Test to make sure matrices give the same answer as ndarrays
+ # 2018-04-29: moved here from core.tests.test_function_base.
+ x = np.linspace(0, 5)
+ y = x * x
+ r = np.trapz(y, x)
+ mx = np.matrix(x)
+ my = np.matrix(y)
+ mr = np.trapz(my, mx)
+ assert_almost_equal(mr, r)
+
+
+def test_ediff1d_matrix():
+ # 2018-04-29: moved here from core.tests.test_arraysetops.
+ assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix))
+ assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix))
+
+
+def test_apply_along_axis_matrix():
+ # this test is particularly malicious because matrix
+ # refuses to become 1d
+ # 2018-04-29: moved here from core.tests.test_shape_base.
+ def double(row):
+ return row * 2
+
+ m = np.matrix([[0, 1], [2, 3]])
+ expected = np.matrix([[0, 2], [4, 6]])
+
+ result = np.apply_along_axis(double, 0, m)
+ assert_(isinstance(result, np.matrix))
+ assert_array_equal(result, expected)
+
+ result = np.apply_along_axis(double, 1, m)
+ assert_(isinstance(result, np.matrix))
+ assert_array_equal(result, expected)
+
+
+def test_kron_matrix():
+ # 2018-04-29: moved here from core.tests.test_shape_base.
+ a = np.ones([2, 2])
+ m = np.asmatrix(a)
+ assert_equal(type(np.kron(a, a)), np.ndarray)
+ assert_equal(type(np.kron(m, m)), np.matrix)
+ assert_equal(type(np.kron(a, m)), np.matrix)
+ assert_equal(type(np.kron(m, a)), np.matrix)
+
+
+class TestConcatenatorMatrix(object):
+ # 2018-04-29: moved here from core.tests.test_index_tricks.
+ def test_matrix(self):
+ a = [1, 2]
+ b = [3, 4]
+
+ ab_r = np.r_['r', a, b]
+ ab_c = np.r_['c', a, b]
+
+ assert_equal(type(ab_r), np.matrix)
+ assert_equal(type(ab_c), np.matrix)
+
+ assert_equal(np.array(ab_r), [[1, 2, 3, 4]])
+ assert_equal(np.array(ab_c), [[1], [2], [3], [4]])
+
+ assert_raises(ValueError, lambda: np.r_['rc', a, b])
+
+ def test_matrix_scalar(self):
+ r = np.r_['r', [1, 2], 3]
+ assert_equal(type(r), np.matrix)
+ assert_equal(np.array(r), [[1, 2, 3]])
+
+ def test_matrix_builder(self):
+ a = np.array([1])
+ b = np.array([2])
+ c = np.array([3])
+ d = np.array([4])
+ actual = np.r_['a, b; c, d']
+ expected = np.bmat([[a, b], [c, d]])
+
+ assert_equal(actual, expected)
+ assert_equal(type(actual), type(expected))
+
+
+def test_array_equal_error_message_matrix():
+ # 2018-04-29: moved here from testing.tests.test_utils.
+ try:
+ assert_equal(np.array([1, 2]), np.matrix([1, 2]))
+ except AssertionError as e:
+ msg = str(e)
+ msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
+ msg_reference = textwrap.dedent("""\
+
+ Arrays are not equal
+
+ (shapes (2,), (1, 2) mismatch)
+ x: array([1, 2])
+ y: matrix([[1, 2]])""")
+ try:
+ assert_equal(msg, msg_reference)
+ except AssertionError:
+ assert_equal(msg2, msg_reference)
+ else:
+ raise AssertionError("Did not raise")
+
+
+def test_array_almost_equal_matrix():
+ # Matrix slicing keeps things 2-D, while array does not necessarily.
+ # See gh-8452.
+ # 2018-04-29: moved here from testing.tests.test_utils.
+ m1 = np.matrix([[1., 2.]])
+ m2 = np.matrix([[1., np.nan]])
+ m3 = np.matrix([[1., -np.inf]])
+ m4 = np.matrix([[np.nan, np.inf]])
+ m5 = np.matrix([[1., 2.], [np.nan, np.inf]])
+ for assert_func in assert_array_almost_equal, assert_almost_equal:
+ for m in m1, m2, m3, m4, m5:
+ assert_func(m, m)
+ a = np.array(m)
+ assert_func(a, m)
+ assert_func(m, a)
diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py
new file mode 100644
index 000000000..0a0d985c4
--- /dev/null
+++ b/numpy/matrixlib/tests/test_masked_matrix.py
@@ -0,0 +1,231 @@
+from __future__ import division, absolute_import, print_function
+
+import pickle
+
+import numpy as np
+from numpy.ma.testutils import (assert_, assert_equal, assert_raises,
+ assert_array_equal)
+from numpy.ma.core import (masked_array, masked_values, masked, allequal,
+ MaskType, getmask, MaskedArray, nomask,
+ log, add, hypot, divide)
+from numpy.ma.extras import mr_
+
+
+class MMatrix(MaskedArray, np.matrix,):
+
+ def __new__(cls, data, mask=nomask):
+ mat = np.matrix(data)
+ _data = MaskedArray.__new__(cls, data=mat, mask=mask)
+ return _data
+
+ def __array_finalize__(self, obj):
+ np.matrix.__array_finalize__(self, obj)
+ MaskedArray.__array_finalize__(self, obj)
+ return
+
+ def _get_series(self):
+ _view = self.view(MaskedArray)
+ _view._sharedmask = False
+ return _view
+ _series = property(fget=_get_series)
+
+
+class TestMaskedMatrix(object):
+ def test_matrix_indexing(self):
+ # Tests conversions and indexing
+ x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
+ x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]])
+ x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]])
+ x4 = masked_array(x1)
+ # test conversion to strings
+ str(x2) # raises?
+ repr(x2) # raises?
+ # tests of indexing
+ assert_(type(x2[1, 0]) is type(x1[1, 0]))
+ assert_(x1[1, 0] == x2[1, 0])
+ assert_(x2[1, 1] is masked)
+ assert_equal(x1[0, 2], x2[0, 2])
+ assert_equal(x1[0, 1:], x2[0, 1:])
+ assert_equal(x1[:, 2], x2[:, 2])
+ assert_equal(x1[:], x2[:])
+ assert_equal(x1[1:], x3[1:])
+ x1[0, 2] = 9
+ x2[0, 2] = 9
+ assert_equal(x1, x2)
+ x1[0, 1:] = 99
+ x2[0, 1:] = 99
+ assert_equal(x1, x2)
+ x2[0, 1] = masked
+ assert_equal(x1, x2)
+ x2[0, 1:] = masked
+ assert_equal(x1, x2)
+ x2[0, :] = x1[0, :]
+ x2[0, 1] = masked
+ assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
+ x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
+ assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0])))
+ assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0])))
+ x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
+ assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0])))
+ assert_(allequal(x4[1], masked_array([1, 2, 3])))
+ x1 = np.matrix(np.arange(5) * 1.0)
+ x2 = masked_values(x1, 3.0)
+ assert_equal(x1, x2)
+ assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType),
+ x2.mask))
+ assert_equal(3.0, x2.fill_value)
+
+ def test_pickling_subbaseclass(self):
+ # Test pickling w/ a subclass of ndarray
+ a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
+ a_pickled = pickle.loads(a.dumps())
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled, a)
+ assert_(isinstance(a_pickled._data, np.matrix))
+
+ def test_count_mean_with_matrix(self):
+ m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2)))
+
+ assert_equal(m.count(axis=0).shape, (1, 2))
+ assert_equal(m.count(axis=1).shape, (2, 1))
+
+ # Make sure broadcasting inside mean and var work
+ assert_equal(m.mean(axis=0), [[2., 3.]])
+ assert_equal(m.mean(axis=1), [[1.5], [3.5]])
+
+ def test_flat(self):
+ # Test that flat can return items even for matrices [#4585, #4615]
+ # test simple access
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ assert_equal(test.flat[1], 2)
+ assert_equal(test.flat[2], masked)
+ assert_(np.all(test.flat[0:2] == test[0, 0:2]))
+ # Test flat on masked_matrices
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
+ control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
+ assert_equal(test, control)
+ # Test setting
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ testflat = test.flat
+ testflat[:] = testflat[[2, 1, 0]]
+ assert_equal(test, control)
+ testflat[0] = 9
+ # test that matrices keep the correct shape (#4615)
+ a = masked_array(np.matrix(np.eye(2)), mask=0)
+ b = a.flat
+ b01 = b[:2]
+ assert_equal(b01.data, np.array([[1., 0.]]))
+ assert_equal(b01.mask, np.array([[False, False]]))
+
+ def test_allany_onmatrices(self):
+ x = np.array([[0.13, 0.26, 0.90],
+ [0.28, 0.33, 0.63],
+ [0.31, 0.87, 0.70]])
+ X = np.matrix(x)
+ m = np.array([[True, False, False],
+ [False, False, False],
+ [True, True, False]], dtype=np.bool_)
+ mX = masked_array(X, mask=m)
+ mXbig = (mX > 0.5)
+ mXsmall = (mX < 0.5)
+
+ assert_(not mXbig.all())
+ assert_(mXbig.any())
+ assert_equal(mXbig.all(0), np.matrix([False, False, True]))
+ assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
+ assert_equal(mXbig.any(0), np.matrix([False, False, True]))
+ assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
+
+ assert_(not mXsmall.all())
+ assert_(mXsmall.any())
+ assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
+ assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
+ assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
+ assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
+
+ def test_compressed(self):
+ a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
+ b = a.compressed()
+ assert_equal(b, a)
+ assert_(isinstance(b, np.matrix))
+ a[0, 0] = masked
+ b = a.compressed()
+ assert_equal(b, [[2, 3, 4]])
+
+ def test_ravel(self):
+ a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
+ aravel = a.ravel()
+ assert_equal(aravel.shape, (1, 5))
+ assert_equal(aravel._mask.shape, a.shape)
+
+ def test_view(self):
+ # Test view w/ flexible dtype
+ iterator = list(zip(np.arange(10), np.random.rand(10)))
+ data = np.array(iterator)
+ a = masked_array(iterator, dtype=[('a', float), ('b', float)])
+ a.mask[0] = (1, 0)
+ test = a.view((float, 2), np.matrix)
+ assert_equal(test, data)
+ assert_(isinstance(test, np.matrix))
+ assert_(not isinstance(test, MaskedArray))
+
+
+class TestSubclassing(object):
+ # Test suite for masked subclasses of ndarray.
+
+ def setup(self):
+ x = np.arange(5, dtype='float')
+ mx = MMatrix(x, mask=[0, 1, 0, 0, 0])
+ self.data = (x, mx)
+
+ def test_maskedarray_subclassing(self):
+ # Tests subclassing MaskedArray
+ (x, mx) = self.data
+ assert_(isinstance(mx._data, np.matrix))
+
+ def test_masked_unary_operations(self):
+ # Tests masked_unary_operation
+ (x, mx) = self.data
+ with np.errstate(divide='ignore'):
+ assert_(isinstance(log(mx), MMatrix))
+ assert_equal(log(x), np.log(x))
+
+ def test_masked_binary_operations(self):
+ # Tests masked_binary_operation
+ (x, mx) = self.data
+ # Result should be a MMatrix
+ assert_(isinstance(add(mx, mx), MMatrix))
+ assert_(isinstance(add(mx, x), MMatrix))
+ # Result should work
+ assert_equal(add(mx, x), mx+x)
+ assert_(isinstance(add(mx, mx)._data, np.matrix))
+ assert_(isinstance(add.outer(mx, mx), MMatrix))
+ assert_(isinstance(hypot(mx, mx), MMatrix))
+ assert_(isinstance(hypot(mx, x), MMatrix))
+
+ def test_masked_binary_operations2(self):
+ # Tests domained_masked_binary_operation
+ (x, mx) = self.data
+ xmx = masked_array(mx.data.__array__(), mask=mx.mask)
+ assert_(isinstance(divide(mx, mx), MMatrix))
+ assert_(isinstance(divide(mx, x), MMatrix))
+ assert_equal(divide(mx, mx), divide(xmx, xmx))
+
+class TestConcatenator(object):
+ # Tests for mr_, the equivalent of r_ for masked arrays.
+
+ def test_matrix_builder(self):
+ assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4'])
+
+ def test_matrix(self):
+ # Test consistency with unmasked version. If we ever deprecate
+ # matrix, this test should either still pass, or both actual and
+ # expected should fail to be build.
+ actual = mr_['r', 1, 2, 3]
+ expected = np.ma.array(np.r_['r', 1, 2, 3])
+ assert_array_equal(actual, expected)
+
+ # outer type is masked array, inner type is matrix
+ assert_equal(type(actual), type(expected))
+ assert_equal(type(actual.data), type(expected.data))
diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py
new file mode 100644
index 000000000..6fc733c2e
--- /dev/null
+++ b/numpy/matrixlib/tests/test_matrix_linalg.py
@@ -0,0 +1,95 @@
+""" Test functions for linalg module using the matrix class."""
+from __future__ import division, absolute_import, print_function
+
+import numpy as np
+
+from numpy.linalg.tests.test_linalg import (
+ LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase,
+ _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base,
+ SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases,
+ PinvCases, DetCases, LstsqCases)
+
+
+CASES = []
+
+# square test cases
+CASES += apply_tag('square', [
+ LinalgCase("0x0_matrix",
+ np.empty((0, 0), dtype=np.double).view(np.matrix),
+ np.empty((0, 1), dtype=np.double).view(np.matrix),
+ tags={'size-0'}),
+ LinalgCase("matrix_b_only",
+ np.array([[1., 2.], [3., 4.]]),
+ np.matrix([2., 1.]).T),
+ LinalgCase("matrix_a_and_b",
+ np.matrix([[1., 2.], [3., 4.]]),
+ np.matrix([2., 1.]).T),
+])
+
+# hermitian test-cases
+CASES += apply_tag('hermitian', [
+ LinalgCase("hmatrix_a_and_b",
+ np.matrix([[1., 2.], [2., 1.]]),
+ None),
+])
+# No need to make generalized or strided cases for matrices.
+
+
+class MatrixTestCase(LinalgTestCase):
+ TEST_CASES = CASES
+
+
+class TestSolveMatrix(SolveCases, MatrixTestCase):
+ pass
+
+
+class TestInvMatrix(InvCases, MatrixTestCase):
+ pass
+
+
+class TestEigvalsMatrix(EigvalsCases, MatrixTestCase):
+ pass
+
+
+class TestEigMatrix(EigCases, MatrixTestCase):
+ pass
+
+
+class TestSVDMatrix(SVDCases, MatrixTestCase):
+ pass
+
+
+class TestCondMatrix(CondCases, MatrixTestCase):
+ pass
+
+
+class TestPinvMatrix(PinvCases, MatrixTestCase):
+ pass
+
+
+class TestDetMatrix(DetCases, MatrixTestCase):
+ pass
+
+
+class TestLstsqMatrix(LstsqCases, MatrixTestCase):
+ pass
+
+
+class _TestNorm2DMatrix(_TestNorm2D):
+ array = np.matrix
+
+
+class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase):
+ pass
+
+
+class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase):
+ pass
+
+
+class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base):
+ pass
+
+
+class TestQRMatrix(_TestQR):
+ array = np.matrix
diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py
index 32ea55716..801c558cc 100644
--- a/numpy/polynomial/tests/test_polyutils.py
+++ b/numpy/polynomial/tests/test_polyutils.py
@@ -63,7 +63,7 @@ class TestDomain(object):
dom1 = [0, 4]
dom2 = [1, 3]
tgt = dom2
- res = pu. mapdomain(dom1, dom1, dom2)
+ res = pu.mapdomain(dom1, dom1, dom2)
assert_almost_equal(res, tgt)
# test for complex values
@@ -83,11 +83,14 @@ class TestDomain(object):
assert_almost_equal(res, tgt)
# test that subtypes are preserved.
+ class MyNDArray(np.ndarray):
+ pass
+
dom1 = [0, 4]
dom2 = [1, 3]
- x = np.matrix([dom1, dom1])
+ x = np.array([dom1, dom1]).view(MyNDArray)
res = pu.mapdomain(x, dom1, dom2)
- assert_(isinstance(res, np.matrix))
+ assert_(isinstance(res, MyNDArray))
def test_mapparms(self):
# test for real values
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 16d649c4a..b45b3146f 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -1284,7 +1284,7 @@ cdef class RandomState:
probability density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 15, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 15, density=True)
>>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
>>> plt.show()
@@ -1394,7 +1394,7 @@ cdef class RandomState:
See Also
--------
- random.standard_normal : Similar, but takes a tuple as its argument.
+ standard_normal : Similar, but takes a tuple as its argument.
Notes
-----
@@ -1457,7 +1457,7 @@ cdef class RandomState:
See Also
--------
- random.randint : Similar to `random_integers`, only for the half-open
+ randint : Similar to `random_integers`, only for the half-open
interval [`low`, `high`), and 0 is the lowest value if `high` is
omitted.
@@ -1495,7 +1495,7 @@ cdef class RandomState:
Display results as a histogram:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(dsums, 11, normed=True)
+ >>> count, bins, ignored = plt.hist(dsums, 11, density=True)
>>> plt.show()
"""
@@ -1631,7 +1631,7 @@ cdef class RandomState:
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 30, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
>>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
... np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
... linewidth=2, color='r')
@@ -1874,7 +1874,7 @@ cdef class RandomState:
>>> import matplotlib.pyplot as plt
>>> import scipy.special as sps
- >>> count, bins, ignored = plt.hist(s, 50, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 50, density=True)
>>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ \\
... (sps.gamma(shape) * scale**shape))
>>> plt.plot(bins, y, linewidth=2, color='r')
@@ -1964,7 +1964,7 @@ cdef class RandomState:
>>> import matplotlib.pyplot as plt
>>> import scipy.special as sps
- >>> count, bins, ignored = plt.hist(s, 50, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 50, density=True)
>>> y = bins**(shape-1)*(np.exp(-bins/scale) /
... (sps.gamma(shape)*scale**shape))
>>> plt.plot(bins, y, linewidth=2, color='r')
@@ -2164,9 +2164,9 @@ cdef class RandomState:
>>> dfden = 20 # within groups degrees of freedom
>>> nonc = 3.0
>>> nc_vals = np.random.noncentral_f(dfnum, dfden, nonc, 1000000)
- >>> NF = np.histogram(nc_vals, bins=50, normed=True)
+ >>> NF = np.histogram(nc_vals, bins=50, density=True)
>>> c_vals = np.random.f(dfnum, dfden, 1000000)
- >>> F = np.histogram(c_vals, bins=50, normed=True)
+ >>> F = np.histogram(c_vals, bins=50, density=True)
>>> plt.plot(F[1][1:], F[0])
>>> plt.plot(NF[1][1:], NF[0])
>>> plt.show()
@@ -2342,7 +2342,7 @@ cdef class RandomState:
>>> import matplotlib.pyplot as plt
>>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),
- ... bins=200, normed=True)
+ ... bins=200, density=True)
>>> plt.show()
Draw values from a noncentral chisquare with very small noncentrality,
@@ -2350,9 +2350,9 @@ cdef class RandomState:
>>> plt.figure()
>>> values = plt.hist(np.random.noncentral_chisquare(3, .0000001, 100000),
- ... bins=np.arange(0., 25, .1), normed=True)
+ ... bins=np.arange(0., 25, .1), density=True)
>>> values2 = plt.hist(np.random.chisquare(3, 100000),
- ... bins=np.arange(0., 25, .1), normed=True)
+ ... bins=np.arange(0., 25, .1), density=True)
>>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')
>>> plt.show()
@@ -2361,7 +2361,7 @@ cdef class RandomState:
>>> plt.figure()
>>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),
- ... bins=200, normed=True)
+ ... bins=200, density=True)
>>> plt.show()
"""
@@ -2529,7 +2529,7 @@ cdef class RandomState:
>>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))
>>> import matplotlib.pyplot as plt
- >>> h = plt.hist(s, bins=100, normed=True)
+ >>> h = plt.hist(s, bins=100, density=True)
For a one-sided t-test, how far out in the distribution does the t
statistic appear?
@@ -2630,7 +2630,7 @@ cdef class RandomState:
>>> import matplotlib.pyplot as plt
>>> from scipy.special import i0
- >>> plt.hist(s, 50, normed=True)
+ >>> plt.hist(s, 50, density=True)
>>> x = np.linspace(-np.pi, np.pi, num=51)
>>> y = np.exp(kappa*np.cos(x-mu))/(2*np.pi*i0(kappa))
>>> plt.plot(x, y, linewidth=2, color='r')
@@ -2744,7 +2744,7 @@ cdef class RandomState:
density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, _ = plt.hist(s, 100, normed=True)
+ >>> count, bins, _ = plt.hist(s, 100, density=True)
>>> fit = a*m**a / bins**(a+1)
>>> plt.plot(bins, max(count)*fit/max(fit), linewidth=2, color='r')
>>> plt.show()
@@ -2957,17 +2957,17 @@ cdef class RandomState:
>>> powpdf = stats.powerlaw.pdf(xx,5)
>>> plt.figure()
- >>> plt.hist(rvs, bins=50, normed=True)
+ >>> plt.hist(rvs, bins=50, density=True)
>>> plt.plot(xx,powpdf,'r-')
>>> plt.title('np.random.power(5)')
>>> plt.figure()
- >>> plt.hist(1./(1.+rvsp), bins=50, normed=True)
+ >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
>>> plt.plot(xx,powpdf,'r-')
>>> plt.title('inverse of 1 + np.random.pareto(5)')
>>> plt.figure()
- >>> plt.hist(1./(1.+rvsp), bins=50, normed=True)
+ >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
>>> plt.plot(xx,powpdf,'r-')
>>> plt.title('inverse of stats.pareto(5)')
@@ -3055,7 +3055,7 @@ cdef class RandomState:
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 30, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
>>> x = np.arange(-8., 8., .01)
>>> pdf = np.exp(-abs(x-loc)/scale)/(2.*scale)
>>> plt.plot(x, pdf)
@@ -3171,7 +3171,7 @@ cdef class RandomState:
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 30, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
>>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
... * np.exp( -np.exp( -(bins - mu) /beta) ),
... linewidth=2, color='r')
@@ -3186,7 +3186,7 @@ cdef class RandomState:
... a = np.random.normal(mu, beta, 1000)
... means.append(a.mean())
... maxima.append(a.max())
- >>> count, bins, ignored = plt.hist(maxima, 30, normed=True)
+ >>> count, bins, ignored = plt.hist(maxima, 30, density=True)
>>> beta = np.std(maxima) * np.sqrt(6) / np.pi
>>> mu = np.mean(maxima) - 0.57721*beta
>>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
@@ -3381,7 +3381,7 @@ cdef class RandomState:
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 100, normed=True, align='mid')
+ >>> count, bins, ignored = plt.hist(s, 100, density=True, align='mid')
>>> x = np.linspace(min(bins), max(bins), 10000)
>>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
@@ -3403,7 +3403,7 @@ cdef class RandomState:
... b.append(np.product(a))
>>> b = np.array(b) / np.min(b) # scale values to be positive
- >>> count, bins, ignored = plt.hist(b, 100, normed=True, align='mid')
+ >>> count, bins, ignored = plt.hist(b, 100, density=True, align='mid')
>>> sigma = np.std(np.log(b))
>>> mu = np.mean(np.log(b))
@@ -3480,7 +3480,7 @@ cdef class RandomState:
--------
Draw values from the distribution and plot the histogram
- >>> values = hist(np.random.rayleigh(3, 100000), bins=200, normed=True)
+ >>> values = hist(np.random.rayleigh(3, 100000), bins=200, density=True)
Wave heights tend to follow a Rayleigh distribution. If the mean wave
height is 1 meter, what fraction of waves are likely to be larger than 3
@@ -3572,7 +3572,7 @@ cdef class RandomState:
Draw values from the distribution and plot the histogram:
>>> import matplotlib.pyplot as plt
- >>> h = plt.hist(np.random.wald(3, 2, 100000), bins=200, normed=True)
+ >>> h = plt.hist(np.random.wald(3, 2, 100000), bins=200, density=True)
>>> plt.show()
"""
@@ -3659,7 +3659,7 @@ cdef class RandomState:
>>> import matplotlib.pyplot as plt
>>> h = plt.hist(np.random.triangular(-3, 0, 8, 100000), bins=200,
- ... normed=True)
+ ... density=True)
>>> plt.show()
"""
@@ -3817,7 +3817,7 @@ cdef class RandomState:
Draw samples from a negative binomial distribution.
Samples are drawn from a negative binomial distribution with specified
- parameters, `n` trials and `p` probability of success where `n` is an
+ parameters, `n` successes and `p` probability of success where `n` is an
integer > 0 and `p` is in the interval [0, 1].
Parameters
@@ -3837,21 +3837,19 @@ cdef class RandomState:
-------
out : ndarray or scalar
Drawn samples from the parameterized negative binomial distribution,
- where each sample is equal to N, the number of trials it took to
- achieve n - 1 successes, N - (n - 1) failures, and a success on the,
- (N + n)th trial.
+ where each sample is equal to N, the number of failures that
+ occurred before a total of n successes was reached.
Notes
-----
The probability density for the negative binomial distribution is
- .. math:: P(N;n,p) = \\binom{N+n-1}{n-1}p^{n}(1-p)^{N},
+ .. math:: P(N;n,p) = \\binom{N+n-1}{N}p^{n}(1-p)^{N},
- where :math:`n-1` is the number of successes, :math:`p` is the
- probability of success, and :math:`N+n-1` is the number of trials.
- The negative binomial distribution gives the probability of n-1
- successes and N failures in N+n-1 trials, and success on the (N+n)th
- trial.
+ where :math:`n` is the number of successes, :math:`p` is the
+ probability of success, and :math:`N+n` is the number of trials.
+ The negative binomial distribution gives the probability of N
+ failures given n successes, with a success on the last trial.
If one throws a die repeatedly until the third time a "1" appears,
then the probability distribution of the number of non-"1"s that
@@ -3969,7 +3967,7 @@ cdef class RandomState:
Display histogram of the sample:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 14, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 14, density=True)
>>> plt.show()
Draw each 100 values for lambda 100 and 500:
@@ -4066,7 +4064,7 @@ cdef class RandomState:
Truncate s values at 50 so plot is interesting:
- >>> count, bins, ignored = plt.hist(s[s<50], 50, normed=True)
+ >>> count, bins, ignored = plt.hist(s[s<50], 50, density=True)
>>> x = np.arange(1., 50.)
>>> y = x**(-a) / special.zetac(a)
>>> plt.plot(x, y/max(y), linewidth=2, color='r')
@@ -4903,10 +4901,24 @@ cdef class RandomState:
"""
if isinstance(x, (int, long, np.integer)):
arr = np.arange(x)
- else:
- arr = np.array(x)
- self.shuffle(arr)
- return arr
+ self.shuffle(arr)
+ return arr
+
+ arr = np.asarray(x)
+
+ # shuffle has fast-path for 1-d
+ if arr.ndim == 1:
+ # must return a copy
+ if arr is x:
+ arr = np.array(arr)
+ self.shuffle(arr)
+ return arr
+
+ # Shuffle index array, dtype to ensure fast path
+ idx = np.arange(arr.shape[0], dtype=np.intp)
+ self.shuffle(idx)
+ return arr[idx]
+
_rand = RandomState()
seed = _rand.seed
diff --git a/numpy/testing/_private/decorators.py b/numpy/testing/_private/decorators.py
index 60d3f968f..24c4e385d 100644
--- a/numpy/testing/_private/decorators.py
+++ b/numpy/testing/_private/decorators.py
@@ -34,7 +34,7 @@ def slow(t):
The exact definition of a slow test is obviously both subjective and
hardware-dependent, but in general any individual test that requires more
- than a second or two should be labeled as slow (the whole suite consits of
+ than a second or two should be labeled as slow (the whole suite consists of
thousands of tests, so even a second is significant).
Parameters
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 507ecb1e2..a7935f175 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -7,6 +7,7 @@ from __future__ import division, absolute_import, print_function
import os
import sys
import re
+import gc
import operator
import warnings
from functools import partial, wraps
@@ -14,6 +15,7 @@ import shutil
import contextlib
from tempfile import mkdtemp, mkstemp
from unittest.case import SkipTest
+import pprint
from numpy.core import(
float32, empty, arange, array_repr, ndarray, isnat, array)
@@ -35,7 +37,7 @@ __all__ = [
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
- '_assert_valid_refcount', '_gen_alignment_data',
+ '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles',
]
@@ -769,7 +771,11 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
- if not cond:
+ # The below comparison is a hack to ensure that fully masked
+ # results, for which val.ravel().all() returns np.ma.masked,
+ # do not trigger a failure (np.ma.masked != True evaluates as
+ # np.ma.masked, which is falsy).
+ if cond != True:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
@@ -1367,16 +1373,20 @@ def _assert_valid_refcount(op):
"""
if not HAS_REFCOUNT:
return True
- import numpy as np
+ import numpy as np, gc
b = np.arange(100*100).reshape(100, 100)
c = b
i = 1
- rc = sys.getrefcount(i)
- for j in range(15):
- d = op(b, c)
- assert_(sys.getrefcount(i) >= rc)
+ gc.disable()
+ try:
+ rc = sys.getrefcount(i)
+ for j in range(15):
+ d = op(b, c)
+ assert_(sys.getrefcount(i) >= rc)
+ finally:
+ gc.enable()
del d # for pyflakes
@@ -2272,3 +2282,89 @@ class suppress_warnings(object):
return func(*args, **kwargs)
return new_func
+
+
+@contextlib.contextmanager
+def _assert_no_gc_cycles_context(name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+
+ # not meaningful to test if there is no refcounting
+ if not HAS_REFCOUNT:
+ return
+
+ assert_(gc.isenabled())
+ gc.disable()
+ gc_debug = gc.get_debug()
+ try:
+ for i in range(100):
+ if gc.collect() == 0:
+ break
+ else:
+ raise RuntimeError(
+ "Unable to fully collect garbage - perhaps a __del__ method is "
+ "creating more reference cycles?")
+
+ gc.set_debug(gc.DEBUG_SAVEALL)
+ yield
+ # gc.collect returns the number of unreachable objects in cycles that
+ # were found -- we are checking that no cycles were created in the context
+ n_objects_in_cycles = gc.collect()
+ objects_in_cycles = gc.garbage[:]
+ finally:
+ del gc.garbage[:]
+ gc.set_debug(gc_debug)
+ gc.enable()
+
+ if n_objects_in_cycles:
+ name_str = " when calling %s" % name if name is not None else ""
+ raise AssertionError(
+ "Reference cycles were found{}: {} objects were collected, "
+ "of which {} are shown below:{}"
+ .format(
+ name_str,
+ n_objects_in_cycles,
+ len(objects_in_cycles),
+ ''.join(
+ "\n {} object with id={}:\n {}".format(
+ type(o).__name__,
+ id(o),
+ pprint.pformat(o).replace('\n', '\n ')
+ ) for o in objects_in_cycles
+ )
+ )
+ )
+
+
+def assert_no_gc_cycles(*args, **kwargs):
+ """
+ Fail if the given callable produces any reference cycles.
+
+ If called with all arguments omitted, may be used as a context manager:
+
+ with assert_no_gc_cycles():
+ do_something()
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ func : callable
+ The callable to test.
+ \\*args : Arguments
+ Arguments passed to `func`.
+ \\*\\*kwargs : Kwargs
+ Keyword arguments passed to `func`.
+
+ Returns
+ -------
+ Nothing. The result is deliberately discarded to ensure that all cycles
+ are found.
+
+ """
+ if not args:
+ return _assert_no_gc_cycles_context()
+
+ func = args[0]
+ args = args[1:]
+ with _assert_no_gc_cycles_context(name=func.__name__):
+ func(*args, **kwargs)
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 35f81d8a7..602cdf5f2 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -6,6 +6,7 @@ import os
import itertools
import textwrap
import pytest
+import weakref
import numpy as np
from numpy.testing import (
@@ -14,7 +15,7 @@ from numpy.testing import (
assert_raises, assert_warns, assert_no_warnings, assert_allclose,
assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,
clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
- tempdir, temppath,
+ tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
)
@@ -285,7 +286,7 @@ class TestEqual(TestArrayEqual):
def test_error_message(self):
try:
- self._assert_func(np.array([1, 2]), np.matrix([1, 2]))
+ self._assert_func(np.array([1, 2]), np.array([[1, 2]]))
except AssertionError as e:
msg = str(e)
msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
@@ -295,7 +296,7 @@ class TestEqual(TestArrayEqual):
(shapes (2,), (1, 2) mismatch)
x: array([1, 2])
- y: matrix([[1, 2]])""")
+ y: array([[1, 2]])""")
try:
assert_equal(msg, msg_reference)
except AssertionError:
@@ -365,19 +366,23 @@ class TestArrayAlmostEqual(_GenericTest):
self._assert_func(b, a)
self._assert_func(b, b)
- def test_matrix(self):
- # Matrix slicing keeps things 2-D, while array does not necessarily.
- # See gh-8452.
- m1 = np.matrix([[1., 2.]])
- m2 = np.matrix([[1., np.nan]])
- m3 = np.matrix([[1., -np.inf]])
- m4 = np.matrix([[np.nan, np.inf]])
- m5 = np.matrix([[1., 2.], [np.nan, np.inf]])
- for m in m1, m2, m3, m4, m5:
- self._assert_func(m, m)
- a = np.array(m)
- self._assert_func(a, m)
- self._assert_func(m, a)
+ # Test fully masked as well (see gh-11123).
+ a = np.ma.MaskedArray(3.5, mask=True)
+ b = np.array([3., 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.masked
+ b = np.array([3., 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+ b = np.array([1., 2., 3.])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+ b = np.array(1.)
+ self._test_equal(a, b)
+ self._test_equal(b, a)
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
@@ -478,20 +483,6 @@ class TestAlmostEqual(_GenericTest):
# remove anything that's not the array string
assert_equal(str(e).split('%)\n ')[1], b)
- def test_matrix(self):
- # Matrix slicing keeps things 2-D, while array does not necessarily.
- # See gh-8452.
- m1 = np.matrix([[1., 2.]])
- m2 = np.matrix([[1., np.nan]])
- m3 = np.matrix([[1., -np.inf]])
- m4 = np.matrix([[np.nan, np.inf]])
- m5 = np.matrix([[1., 2.], [np.nan, np.inf]])
- for m in m1, m2, m3, m4, m5:
- self._assert_func(m, m)
- a = np.array(m)
- self._assert_func(a, m)
- self._assert_func(m, a)
-
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
@@ -1360,3 +1351,76 @@ def test_clear_and_catch_warnings_inherit():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+class TestAssertNoGcCycles(object):
+ """ Test assert_no_gc_cycles """
+ def test_passes(self):
+ def no_cycle():
+ b = []
+ b.append([])
+ return b
+
+ with assert_no_gc_cycles():
+ no_cycle()
+
+ assert_no_gc_cycles(no_cycle)
+
+
+ def test_asserts(self):
+ def make_cycle():
+ a = []
+ a.append(a)
+ a.append(a)
+ return a
+
+ with assert_raises(AssertionError):
+ with assert_no_gc_cycles():
+ make_cycle()
+
+ with assert_raises(AssertionError):
+ assert_no_gc_cycles(make_cycle)
+
+
+ def test_fails(self):
+ """
+ Test that in cases where the garbage cannot be collected, we raise an
+ error, instead of hanging forever trying to clear it.
+ """
+
+ class ReferenceCycleInDel(object):
+ """
+ An object that not only contains a reference cycle, but creates new
+ cycles whenever it's garbage-collected and its __del__ runs
+ """
+ make_cycle = True
+
+ def __init__(self):
+ self.cycle = self
+
+ def __del__(self):
+ # break the current cycle so that `self` can be freed
+ self.cycle = None
+
+ if ReferenceCycleInDel.make_cycle:
+ # but create a new one so that the garbage collector has more
+ # work to do.
+ ReferenceCycleInDel()
+
+ try:
+ w = weakref.ref(ReferenceCycleInDel())
+ try:
+ with assert_raises(RuntimeError):
+ # this will be unable to get a baseline empty garbage
+ assert_no_gc_cycles(lambda: None)
+ except AssertionError:
+ # the above test is only necessary if the GC actually tried to free
+ # our object anyway, which python 2.7 does not.
+ if w() is not None:
+ pytest.skip("GC does not call __del__ on cyclic objects")
+ raise
+
+ finally:
+ # make sure that we stop creating reference cycles
+ ReferenceCycleInDel.make_cycle = False
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 78cf405cf..184adcc74 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -25,5 +25,5 @@ __all__ = [
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
- '_assert_valid_refcount', '_gen_alignment_data',
+ '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles'
]