summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.pxd224
-rw-r--r--numpy/compat/setup.py2
-rw-r--r--numpy/core/_add_newdocs.py10
-rw-r--r--numpy/core/code_generators/cversions.txt1
-rw-r--r--numpy/core/code_generators/generate_umath.py9
-rw-r--r--numpy/core/defchararray.py2
-rw-r--r--numpy/core/einsumfunc.py21
-rw-r--r--numpy/core/fromnumeric.py66
-rw-r--r--numpy/core/getlimits.py17
-rw-r--r--numpy/core/include/numpy/numpyconfig.h1
-rw-r--r--numpy/core/machar.py4
-rw-r--r--numpy/core/multiarray.py48
-rw-r--r--numpy/core/numeric.py76
-rw-r--r--numpy/core/records.py54
-rw-r--r--numpy/core/setup.py4
-rw-r--r--numpy/core/shape_base.py29
-rw-r--r--numpy/core/src/common/mem_overlap.c2
-rw-r--r--numpy/core/src/common/npy_import.h2
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c15
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c15
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src10
-rw-r--r--numpy/core/src/multiarray/common.h9
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c48
-rw-r--r--numpy/core/src/multiarray/convert_datatype.h8
-rw-r--r--numpy/core/src/multiarray/ctors.c37
-rw-r--r--numpy/core/src/multiarray/datetime.c2
-rw-r--r--numpy/core/src/multiarray/datetime_strings.c2
-rw-r--r--numpy/core/src/multiarray/dragon4.c2
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c10
-rw-r--r--numpy/core/src/multiarray/mapping.c2
-rw-r--r--numpy/core/src/multiarray/methods.c147
-rw-r--r--numpy/core/src/multiarray/methods.h2
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c26
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c42
-rw-r--r--numpy/core/src/umath/fast_loop_macros.h6
-rw-r--r--numpy/core/src/umath/loops.c.src7
-rw-r--r--numpy/core/src/umath/scalarmath.c.src9
-rw-r--r--numpy/core/src/umath/simd.inc.src6
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c19
-rw-r--r--numpy/core/tests/test_api.py39
-rw-r--r--numpy/core/tests/test_einsum.py7
-rw-r--r--numpy/core/tests/test_multiarray.py35
-rw-r--r--numpy/core/tests/test_numeric.py30
-rw-r--r--numpy/core/tests/test_umath.py29
-rw-r--r--numpy/distutils/fcompiler/intel.py8
-rw-r--r--numpy/distutils/setup.py2
-rw-r--r--numpy/doc/structured_arrays.py2
-rw-r--r--numpy/f2py/setup.py3
-rw-r--r--numpy/fft/setup.py2
-rw-r--r--numpy/lib/_iotools.py138
-rw-r--r--numpy/lib/financial.py2
-rw-r--r--numpy/lib/function_base.py71
-rw-r--r--numpy/lib/histograms.py10
-rw-r--r--numpy/lib/setup.py3
-rw-r--r--numpy/lib/shape_base.py8
-rw-r--r--numpy/lib/tests/test__iotools.py4
-rw-r--r--numpy/lib/tests/test_function_base.py69
-rw-r--r--numpy/lib/tests/test_io.py7
-rw-r--r--numpy/linalg/setup.py2
-rw-r--r--numpy/ma/core.py60
-rw-r--r--numpy/ma/setup.py2
-rw-r--r--numpy/ma/tests/test_core.py42
-rw-r--r--numpy/matrixlib/defmatrix.py4
-rw-r--r--numpy/matrixlib/setup.py2
-rw-r--r--numpy/polynomial/setup.py2
-rw-r--r--numpy/random/_generator.pyx2
-rw-r--r--numpy/random/_mt19937.pyx17
-rw-r--r--numpy/random/mtrand.pyx2
-rw-r--r--numpy/random/setup.py3
-rw-r--r--numpy/random/src/mt19937/mt19937-jump.c118
-rw-r--r--numpy/random/src/mt19937/mt19937-jump.h140
-rw-r--r--numpy/random/src/mt19937/mt19937-poly.h207
-rw-r--r--numpy/random/src/mt19937/mt19937.c3
-rw-r--r--numpy/random/tests/test_extending.py16
-rw-r--r--numpy/random/tests/test_generator_mt19937.py54
-rw-r--r--numpy/setup.py2
-rwxr-xr-xnumpy/testing/setup.py2
77 files changed, 1255 insertions, 890 deletions
diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd
index 23bd22e36..d5c50d9bf 100644
--- a/numpy/__init__.pxd
+++ b/numpy/__init__.pxd
@@ -254,20 +254,8 @@ cdef extern from "numpy/arrayobject.h":
npy_intp *shape "dimensions"
npy_intp *strides
dtype descr # deprecated since NumPy 1.7 !
- PyObject* base
+ PyObject* base # NOT PUBLIC, DO NOT USE !
- # Note: This syntax (function definition in pxd files) is an
- # experimental exception made for __getbuffer__ and __releasebuffer__
- # -- the details of this may change.
- def __getbuffer__(ndarray self, Py_buffer* info, int flags):
- PyObject_GetBuffer(<object>self, info, flags);
-
- def __releasebuffer__(ndarray self, Py_buffer* info):
- # We should call a possible tp_bufferrelease(self, info) but no
- # interface to that is exposed by cython or python. And currently
- # the function is NULL in numpy, we rely on refcounting to release
- # info when self is collected
- pass
ctypedef unsigned char npy_bool
@@ -345,103 +333,107 @@ cdef extern from "numpy/arrayobject.h":
int len
int _import_array() except -1
+ # A second definition so _import_array isn't marked as used when we use it here.
+ # Do not use - subject to change any time.
+ int __pyx_import_array "_import_array"() except -1
#
# Macros from ndarrayobject.h
#
- bint PyArray_CHKFLAGS(ndarray m, int flags)
- bint PyArray_IS_C_CONTIGUOUS(ndarray arr)
- bint PyArray_IS_F_CONTIGUOUS(ndarray arr)
- bint PyArray_ISCONTIGUOUS(ndarray m)
- bint PyArray_ISWRITEABLE(ndarray m)
- bint PyArray_ISALIGNED(ndarray m)
-
- int PyArray_NDIM(ndarray)
- bint PyArray_ISONESEGMENT(ndarray)
- bint PyArray_ISFORTRAN(ndarray)
- int PyArray_FORTRANIF(ndarray)
-
- void* PyArray_DATA(ndarray)
- char* PyArray_BYTES(ndarray)
- npy_intp* PyArray_DIMS(ndarray)
- npy_intp* PyArray_STRIDES(ndarray)
- npy_intp PyArray_DIM(ndarray, size_t)
- npy_intp PyArray_STRIDE(ndarray, size_t)
-
- PyObject *PyArray_BASE(ndarray) # returns borrowed reference!
- PyArray_Descr *PyArray_DESCR(ndarray) # returns borrowed reference to dtype!
- int PyArray_FLAGS(ndarray)
- npy_intp PyArray_ITEMSIZE(ndarray)
- int PyArray_TYPE(ndarray arr)
+ bint PyArray_CHKFLAGS(ndarray m, int flags) nogil
+ bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil
+ bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil
+ bint PyArray_ISCONTIGUOUS(ndarray m) nogil
+ bint PyArray_ISWRITEABLE(ndarray m) nogil
+ bint PyArray_ISALIGNED(ndarray m) nogil
+
+ int PyArray_NDIM(ndarray) nogil
+ bint PyArray_ISONESEGMENT(ndarray) nogil
+ bint PyArray_ISFORTRAN(ndarray) nogil
+ int PyArray_FORTRANIF(ndarray) nogil
+
+ void* PyArray_DATA(ndarray) nogil
+ char* PyArray_BYTES(ndarray) nogil
+
+ npy_intp* PyArray_DIMS(ndarray) nogil
+ npy_intp* PyArray_STRIDES(ndarray) nogil
+ npy_intp PyArray_DIM(ndarray, size_t) nogil
+ npy_intp PyArray_STRIDE(ndarray, size_t) nogil
+
+ PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference!
+ PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype!
+ int PyArray_FLAGS(ndarray) nogil
+ npy_intp PyArray_ITEMSIZE(ndarray) nogil
+ int PyArray_TYPE(ndarray arr) nogil
object PyArray_GETITEM(ndarray arr, void *itemptr)
int PyArray_SETITEM(ndarray arr, void *itemptr, object obj)
- bint PyTypeNum_ISBOOL(int)
- bint PyTypeNum_ISUNSIGNED(int)
- bint PyTypeNum_ISSIGNED(int)
- bint PyTypeNum_ISINTEGER(int)
- bint PyTypeNum_ISFLOAT(int)
- bint PyTypeNum_ISNUMBER(int)
- bint PyTypeNum_ISSTRING(int)
- bint PyTypeNum_ISCOMPLEX(int)
- bint PyTypeNum_ISPYTHON(int)
- bint PyTypeNum_ISFLEXIBLE(int)
- bint PyTypeNum_ISUSERDEF(int)
- bint PyTypeNum_ISEXTENDED(int)
- bint PyTypeNum_ISOBJECT(int)
-
- bint PyDataType_ISBOOL(dtype)
- bint PyDataType_ISUNSIGNED(dtype)
- bint PyDataType_ISSIGNED(dtype)
- bint PyDataType_ISINTEGER(dtype)
- bint PyDataType_ISFLOAT(dtype)
- bint PyDataType_ISNUMBER(dtype)
- bint PyDataType_ISSTRING(dtype)
- bint PyDataType_ISCOMPLEX(dtype)
- bint PyDataType_ISPYTHON(dtype)
- bint PyDataType_ISFLEXIBLE(dtype)
- bint PyDataType_ISUSERDEF(dtype)
- bint PyDataType_ISEXTENDED(dtype)
- bint PyDataType_ISOBJECT(dtype)
- bint PyDataType_HASFIELDS(dtype)
- bint PyDataType_HASSUBARRAY(dtype)
-
- bint PyArray_ISBOOL(ndarray)
- bint PyArray_ISUNSIGNED(ndarray)
- bint PyArray_ISSIGNED(ndarray)
- bint PyArray_ISINTEGER(ndarray)
- bint PyArray_ISFLOAT(ndarray)
- bint PyArray_ISNUMBER(ndarray)
- bint PyArray_ISSTRING(ndarray)
- bint PyArray_ISCOMPLEX(ndarray)
- bint PyArray_ISPYTHON(ndarray)
- bint PyArray_ISFLEXIBLE(ndarray)
- bint PyArray_ISUSERDEF(ndarray)
- bint PyArray_ISEXTENDED(ndarray)
- bint PyArray_ISOBJECT(ndarray)
- bint PyArray_HASFIELDS(ndarray)
-
- bint PyArray_ISVARIABLE(ndarray)
-
- bint PyArray_SAFEALIGNEDCOPY(ndarray)
- bint PyArray_ISNBO(char) # works on ndarray.byteorder
- bint PyArray_IsNativeByteOrder(char) # works on ndarray.byteorder
- bint PyArray_ISNOTSWAPPED(ndarray)
- bint PyArray_ISBYTESWAPPED(ndarray)
-
- bint PyArray_FLAGSWAP(ndarray, int)
-
- bint PyArray_ISCARRAY(ndarray)
- bint PyArray_ISCARRAY_RO(ndarray)
- bint PyArray_ISFARRAY(ndarray)
- bint PyArray_ISFARRAY_RO(ndarray)
- bint PyArray_ISBEHAVED(ndarray)
- bint PyArray_ISBEHAVED_RO(ndarray)
-
-
- bint PyDataType_ISNOTSWAPPED(dtype)
- bint PyDataType_ISBYTESWAPPED(dtype)
+ bint PyTypeNum_ISBOOL(int) nogil
+ bint PyTypeNum_ISUNSIGNED(int) nogil
+ bint PyTypeNum_ISSIGNED(int) nogil
+ bint PyTypeNum_ISINTEGER(int) nogil
+ bint PyTypeNum_ISFLOAT(int) nogil
+ bint PyTypeNum_ISNUMBER(int) nogil
+ bint PyTypeNum_ISSTRING(int) nogil
+ bint PyTypeNum_ISCOMPLEX(int) nogil
+ bint PyTypeNum_ISPYTHON(int) nogil
+ bint PyTypeNum_ISFLEXIBLE(int) nogil
+ bint PyTypeNum_ISUSERDEF(int) nogil
+ bint PyTypeNum_ISEXTENDED(int) nogil
+ bint PyTypeNum_ISOBJECT(int) nogil
+
+ bint PyDataType_ISBOOL(dtype) nogil
+ bint PyDataType_ISUNSIGNED(dtype) nogil
+ bint PyDataType_ISSIGNED(dtype) nogil
+ bint PyDataType_ISINTEGER(dtype) nogil
+ bint PyDataType_ISFLOAT(dtype) nogil
+ bint PyDataType_ISNUMBER(dtype) nogil
+ bint PyDataType_ISSTRING(dtype) nogil
+ bint PyDataType_ISCOMPLEX(dtype) nogil
+ bint PyDataType_ISPYTHON(dtype) nogil
+ bint PyDataType_ISFLEXIBLE(dtype) nogil
+ bint PyDataType_ISUSERDEF(dtype) nogil
+ bint PyDataType_ISEXTENDED(dtype) nogil
+ bint PyDataType_ISOBJECT(dtype) nogil
+ bint PyDataType_HASFIELDS(dtype) nogil
+ bint PyDataType_HASSUBARRAY(dtype) nogil
+
+ bint PyArray_ISBOOL(ndarray) nogil
+ bint PyArray_ISUNSIGNED(ndarray) nogil
+ bint PyArray_ISSIGNED(ndarray) nogil
+ bint PyArray_ISINTEGER(ndarray) nogil
+ bint PyArray_ISFLOAT(ndarray) nogil
+ bint PyArray_ISNUMBER(ndarray) nogil
+ bint PyArray_ISSTRING(ndarray) nogil
+ bint PyArray_ISCOMPLEX(ndarray) nogil
+ bint PyArray_ISPYTHON(ndarray) nogil
+ bint PyArray_ISFLEXIBLE(ndarray) nogil
+ bint PyArray_ISUSERDEF(ndarray) nogil
+ bint PyArray_ISEXTENDED(ndarray) nogil
+ bint PyArray_ISOBJECT(ndarray) nogil
+ bint PyArray_HASFIELDS(ndarray) nogil
+
+ bint PyArray_ISVARIABLE(ndarray) nogil
+
+ bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil
+ bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder
+ bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder
+ bint PyArray_ISNOTSWAPPED(ndarray) nogil
+ bint PyArray_ISBYTESWAPPED(ndarray) nogil
+
+ bint PyArray_FLAGSWAP(ndarray, int) nogil
+
+ bint PyArray_ISCARRAY(ndarray) nogil
+ bint PyArray_ISCARRAY_RO(ndarray) nogil
+ bint PyArray_ISFARRAY(ndarray) nogil
+ bint PyArray_ISFARRAY_RO(ndarray) nogil
+ bint PyArray_ISBEHAVED(ndarray) nogil
+ bint PyArray_ISBEHAVED_RO(ndarray) nogil
+
+
+ bint PyDataType_ISNOTSWAPPED(dtype) nogil
+ bint PyDataType_ISBYTESWAPPED(dtype) nogil
bint PyArray_DescrCheck(object)
@@ -461,10 +453,11 @@ cdef extern from "numpy/arrayobject.h":
bint PyArray_IsPythonScalar(object)
bint PyArray_IsAnyScalar(object)
bint PyArray_CheckAnyScalar(object)
+
ndarray PyArray_GETCONTIGUOUS(ndarray)
- bint PyArray_SAMESHAPE(ndarray, ndarray)
- npy_intp PyArray_SIZE(ndarray)
- npy_intp PyArray_NBYTES(ndarray)
+ bint PyArray_SAMESHAPE(ndarray, ndarray) nogil
+ npy_intp PyArray_SIZE(ndarray) nogil
+ npy_intp PyArray_NBYTES(ndarray) nogil
object PyArray_FROM_O(object)
object PyArray_FROM_OF(object m, int flags)
@@ -477,16 +470,16 @@ cdef extern from "numpy/arrayobject.h":
npy_intp PyArray_REFCOUNT(object)
object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth)
unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2)
- bint PyArray_EquivByteorders(int b1, int b2)
+ bint PyArray_EquivByteorders(int b1, int b2) nogil
object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data)
#object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr)
object PyArray_ToScalar(void* data, ndarray arr)
- void* PyArray_GETPTR1(ndarray m, npy_intp i)
- void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j)
- void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k)
- void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l)
+ void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil
+ void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil
+ void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil
+ void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil
void PyArray_XDECREF_ERR(ndarray)
# Cannot be supported due to out arg
@@ -614,7 +607,7 @@ cdef extern from "numpy/arrayobject.h":
object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
int PyArray_Sort (ndarray, int, NPY_SORTKIND)
object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
- object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE)
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, object)
object PyArray_ArgMax (ndarray, int, ndarray)
object PyArray_ArgMin (ndarray, int, ndarray)
object PyArray_Reshape (ndarray, object)
@@ -961,7 +954,7 @@ cdef inline object get_array_base(ndarray arr):
# Cython code.
cdef inline int import_array() except -1:
try:
- _import_array()
+ __pyx_import_array()
except Exception:
raise ImportError("numpy.core.multiarray failed to import")
@@ -976,3 +969,10 @@ cdef inline int import_ufunc() except -1:
_import_umath()
except Exception:
raise ImportError("numpy.core.umath failed to import")
+
+cdef extern from *:
+ # Leave a marker that the NumPy declarations came from this file
+ # See https://github.com/cython/cython/issues/3573
+ """
+ /* NumPy API declarations from "numpy/__init__.pxd" */
+ """
diff --git a/numpy/compat/setup.py b/numpy/compat/setup.py
index afa511673..c1b34a2cc 100644
--- a/numpy/compat/setup.py
+++ b/numpy/compat/setup.py
@@ -2,7 +2,7 @@ def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('compat', parent_package, top_path)
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
return config
if __name__ == '__main__':
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index e54103634..f43b77c44 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -5009,7 +5009,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
- """
+ r"""
outer(A, B, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
@@ -5043,7 +5043,13 @@ add_newdoc('numpy.core', 'ufunc', ('outer',
See Also
--------
- numpy.outer
+ numpy.outer : A less powerful version of ``np.multiply.outer``
+ that `ravel`\ s all inputs to 1D. This exists
+ primarily for compatibility with old code.
+
+ tensordot : ``np.tensordot(a, b, axes=((), ()))`` and
+ ``np.multiply.outer(a, b)`` behave same for all
+ dimensions of a and b.
Examples
--------
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index 5daa52d79..528113a9e 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -50,4 +50,5 @@
# Version 13 (NumPy 1.17) No change.
# Version 13 (NumPy 1.18) No change.
# Version 13 (NumPy 1.19) No change.
+# Version 13 (NumPy 1.20) No change.
0x0000000d = 5b0e8bbded00b166125974fc71e80a33
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 52ae3cdd7..f10ce9f0f 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -531,7 +531,7 @@ defdict = {
TD(flts, f="logaddexp", astype={'e':'f'})
),
'logaddexp2':
- Ufunc(2, 1, None,
+ Ufunc(2, 1, MinusInfinity,
docstrings.get('numpy.core.umath.logaddexp2'),
None,
TD(flts, f="logaddexp2", astype={'e':'f'})
@@ -1028,8 +1028,11 @@ def make_arrays(funcdict):
funclist.append('NULL')
try:
thedict = arity_lookup[uf.nin, uf.nout]
- except KeyError:
- raise ValueError("Could not handle {}[{}]".format(name, t.type))
+ except KeyError as e:
+ raise ValueError(
+ f"Could not handle {name}[{t.type}] "
+ f"with nin={uf.nin}, nout={uf.nout}"
+ ) from None
astype = ''
if not t.astype is None:
diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py
index cd01c0e77..1d447b86a 100644
--- a/numpy/core/defchararray.py
+++ b/numpy/core/defchararray.py
@@ -1779,7 +1779,7 @@ def isdecimal(a):
Calls `unicode.isdecimal` element-wise.
Decimal characters include digit characters, and all characters
- that that can be used to form decimal-radix numbers,
+ that can be used to form decimal-radix numbers,
e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``.
Parameters
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index a1e2efdb4..c46ae173d 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -3,6 +3,7 @@ Implementation of optimized einsum.
"""
import itertools
+import operator
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asanyarray, tensordot
@@ -576,11 +577,13 @@ def _parse_einsum_input(operands):
for s in sub:
if s is Ellipsis:
subscripts += "..."
- elif isinstance(s, int):
- subscripts += einsum_symbols[s]
else:
- raise TypeError("For this input type lists must contain "
- "either int or Ellipsis")
+ try:
+ s = operator.index(s)
+ except TypeError as e:
+ raise TypeError("For this input type lists must contain "
+ "either int or Ellipsis") from e
+ subscripts += einsum_symbols[s]
if num != last:
subscripts += ","
@@ -589,11 +592,13 @@ def _parse_einsum_input(operands):
for s in output_list:
if s is Ellipsis:
subscripts += "..."
- elif isinstance(s, int):
- subscripts += einsum_symbols[s]
else:
- raise TypeError("For this input type lists must contain "
- "either int or Ellipsis")
+ try:
+ s = operator.index(s)
+ except TypeError as e:
+ raise TypeError("For this input type lists must contain "
+ "either int or Ellipsis") from e
+ subscripts += einsum_symbols[s]
# Check for proper "->"
if ("-" in subscripts) or (">" in subscripts):
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index b32ad8d35..7193af839 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -2494,6 +2494,14 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue):
The name of the function comes from the acronym for 'peak to peak'.
+ .. warning::
+ `ptp` preserves the data type of the array. This means the
+ return value for an input of signed integers with n bits
+ (e.g. `np.int8`, `np.int16`, etc) is also a signed integer
+ with n bits. In that case, peak-to-peak values greater than
+ ``2**(n-1)-1`` will be returned as negative values. An example
+ with a work-around is shown below.
+
Parameters
----------
a : array_like
@@ -2531,16 +2539,33 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue):
Examples
--------
- >>> x = np.arange(4).reshape((2,2))
- >>> x
- array([[0, 1],
- [2, 3]])
+ >>> x = np.array([[4, 9, 2, 10],
+ ... [6, 9, 7, 12]])
+
+ >>> np.ptp(x, axis=1)
+ array([8, 6])
>>> np.ptp(x, axis=0)
- array([2, 2])
+ array([2, 0, 5, 2])
- >>> np.ptp(x, axis=1)
- array([1, 1])
+ >>> np.ptp(x)
+ 10
+
+ This example shows that a negative value can be returned when
+ the input is an array of signed integers.
+
+ >>> y = np.array([[1, 127],
+ ... [0, 127],
+ ... [-1, 127],
+ ... [-2, 127]], dtype=np.int8)
+ >>> np.ptp(y, axis=1)
+ array([ 126, 127, -128, -127], dtype=int8)
+
+ A work-around is to use the `view()` method to view the result as
+ unsigned integers with the same bit width:
+
+ >>> np.ptp(y, axis=1).view(np.uint8)
+ array([126, 127, 128, 129], dtype=uint8)
"""
kwargs = {}
@@ -3411,17 +3436,18 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
Notes
-----
The standard deviation is the square root of the average of the squared
- deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
-
- The average squared deviation is normally calculated as
- ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
- the divisor ``N - ddof`` is used instead. In standard statistical
- practice, ``ddof=1`` provides an unbiased estimator of the variance
- of the infinite population. ``ddof=0`` provides a maximum likelihood
- estimate of the variance for normally distributed variables. The
- standard deviation computed in this function is the square root of
- the estimated variance, so even with ``ddof=1``, it will not be an
- unbiased estimate of the standard deviation per se.
+ deviations from the mean, i.e., ``std = sqrt(mean(x))``, where
+ ``x = abs(a - a.mean())**2``.
+
+ The average squared deviation is typically calculated as ``x.sum() / N``,
+ where ``N = len(x)``. If, however, `ddof` is specified, the divisor
+ ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1``
+ provides an unbiased estimator of the variance of the infinite population.
+ ``ddof=0`` provides a maximum likelihood estimate of the variance for
+ normally distributed variables. The standard deviation computed in this
+ function is the square root of the estimated variance, so even with
+ ``ddof=1``, it will not be an unbiased estimate of the standard deviation
+ per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
@@ -3536,9 +3562,9 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
Notes
-----
The variance is the average of the squared deviations from the mean,
- i.e., ``var = mean(abs(x - x.mean())**2)``.
+ i.e., ``var = mean(x)``, where ``x = abs(a - a.mean())**2``.
- The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
+ The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index e2ff49393..f73c21f67 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -337,8 +337,8 @@ class finfo:
The approximate decimal resolution of this type, i.e.,
``10**-precision``.
tiny : float
- The smallest positive usable number. Type of `tiny` is an
- appropriate floating point type.
+ The smallest positive floating point number with full precision
+ (see Notes).
Parameters
----------
@@ -359,6 +359,18 @@ class finfo:
impacts import times. These objects are cached, so calling ``finfo()``
repeatedly inside your functions is not a problem.
+ Note that ``tiny`` is not actually the smallest positive representable
+ value in a NumPy floating point type. As in the IEEE-754 standard [1]_,
+ NumPy floating point types make use of subnormal numbers to fill the
+ gap between 0 and ``tiny``. However, subnormal numbers may have
+ significantly reduced precision [2]_.
+
+ References
+ ----------
+ .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008,
+ pp.1-70, 2008, http://www.doi.org/10.1109/IEEESTD.2008.4610935
+ .. [2] Wikipedia, "Denormal Numbers",
+ https://en.wikipedia.org/wiki/Denormal_number
"""
_finfo_cache = {}
@@ -546,4 +558,3 @@ class iinfo:
def __repr__(self):
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
self.min, self.max, self.dtype)
-
diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h
index 4df4ea438..8eaf446b7 100644
--- a/numpy/core/include/numpy/numpyconfig.h
+++ b/numpy/core/include/numpy/numpyconfig.h
@@ -41,5 +41,6 @@
#define NPY_1_17_API_VERSION 0x00000008
#define NPY_1_18_API_VERSION 0x00000008
#define NPY_1_19_API_VERSION 0x00000008
+#define NPY_1_20_API_VERSION 0x00000008
#endif
diff --git a/numpy/core/machar.py b/numpy/core/machar.py
index a48dc3d50..55285fe59 100644
--- a/numpy/core/machar.py
+++ b/numpy/core/machar.py
@@ -40,8 +40,8 @@ class MachAr:
Smallest (most negative) power of `ibeta` consistent with there
being no leading zeros in the mantissa.
xmin : float
- Floating point number ``beta**minexp`` (the smallest [in
- magnitude] usable floating value).
+ Floating-point number ``beta**minexp`` (the smallest [in
+ magnitude] positive floating point number with full precision).
maxexp : int
Smallest (positive) power of `ibeta` that causes overflow.
xmax : float
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index ec36f4f7e..5ae6a4272 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -171,14 +171,15 @@ def concatenate(arrays, axis=None, out=None):
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
- hsplit : Split array into multiple sub-arrays horizontally (column wise)
- vsplit : Split array into multiple sub-arrays vertically (row wise)
+ hsplit : Split array into multiple sub-arrays horizontally (column wise).
+ vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
- hstack : Stack arrays in sequence horizontally (column wise)
- vstack : Stack arrays in sequence vertically (row wise)
- dstack : Stack arrays in sequence depth wise (along third dimension)
block : Assemble arrays from blocks.
+ hstack : Stack arrays in sequence horizontally (column wise).
+ vstack : Stack arrays in sequence vertically (row wise).
+ dstack : Stack arrays in sequence depth wise (along third dimension).
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
Notes
-----
@@ -1266,7 +1267,13 @@ def shares_memory(a, b, max_work=None):
"""
shares_memory(a, b, max_work=None)
- Determine if two arrays share memory
+ Determine if two arrays share memory.
+
+ .. warning::
+
+ This function can be exponentially slow for some inputs, unless
+ `max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``.
+ If in doubt, use `numpy.may_share_memory` instead.
Parameters
----------
@@ -1279,7 +1286,8 @@ def shares_memory(a, b, max_work=None):
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
- True only if there is an element shared between the arrays.
+ True only if there is an element shared between the arrays. Finding
+ the exact solution may take extremely long in some cases.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
@@ -1298,9 +1306,33 @@ def shares_memory(a, b, max_work=None):
Examples
--------
- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ >>> x = np.array([1, 2, 3, 4])
+ >>> np.shares_memory(x, np.array([5, 6, 7]))
+ False
+ >>> np.shares_memory(x[::2], x)
+ True
+ >>> np.shares_memory(x[::2], x[1::2])
False
+ Checking whether two arrays share memory is NP-complete, and
+ runtime may increase exponentially in the number of
+ dimensions. Hence, `max_work` should generally be set to a finite
+ number, as it is possible to construct examples that take
+ extremely long to run:
+
+ >>> from numpy.lib.stride_tricks import as_strided
+ >>> x = np.zeros([192163377], dtype=np.int8)
+ >>> x1 = as_strided(x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049))
+ >>> x2 = as_strided(x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1))
+ >>> np.shares_memory(x1, x2, max_work=1000)
+ Traceback (most recent call last):
+ ...
+ numpy.TooHardError: Exceeded max_work
+
+ Running ``np.shares_memory(x1, x2)`` without `max_work` set takes
+ around 1 minute for this case. It is possible to find problems
+ that take still significantly longer.
+
"""
return (a, b)
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 83d985a7c..05f0b7820 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -857,8 +857,11 @@ def outer(a, b, out=None):
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
- ufunc.outer : A generalization to N dimensions and other operations.
- ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
+ ufunc.outer : A generalization to dimensions other than 1D and other
+ operations. ``np.multiply.outer(a.ravel(), b.ravel())``
+ is the equivalent.
+ tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))``
+ is the equivalent.
References
----------
@@ -1226,11 +1229,39 @@ def rollaxis(a, axis, start=0):
a : ndarray
Input array.
axis : int
- The axis to roll backwards. The positions of the other axes do not
+ The axis to be rolled. The positions of the other axes do not
change relative to one another.
start : int, optional
- The axis is rolled until it lies before this position. The default,
- 0, results in a "complete" roll.
+ When ``start <= axis``, the axis is rolled back until it lies in
+ this position. When ``start > axis``, the axis is rolled until it
+ lies before this position. The default, 0, results in a "complete"
+ roll. The following table describes how negative values of ``start``
+ are interpreted:
+
+ .. table::
+ :align: left
+
+ +-------------------+----------------------+
+ | ``start`` | Normalized ``start`` |
+ +===================+======================+
+ | ``-(arr.ndim+1)`` | raise ``AxisError`` |
+ +-------------------+----------------------+
+ | ``-arr.ndim`` | 0 |
+ +-------------------+----------------------+
+ | |vdots| | |vdots| |
+ +-------------------+----------------------+
+ | ``-1`` | ``arr.ndim-1`` |
+ +-------------------+----------------------+
+ | ``0`` | ``0`` |
+ +-------------------+----------------------+
+ | |vdots| | |vdots| |
+ +-------------------+----------------------+
+ | ``arr.ndim`` | ``arr.ndim`` |
+ +-------------------+----------------------+
+ | ``arr.ndim + 1`` | raise ``AxisError`` |
+ +-------------------+----------------------+
+
+ .. |vdots| unicode:: U+22EE .. Vertical Ellipsis
Returns
-------
@@ -2199,6 +2230,7 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
See Also
--------
allclose
+ math.isclose
Notes
-----
@@ -2279,12 +2311,12 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
return cond[()] # Flatten 0d arrays to scalars
-def _array_equal_dispatcher(a1, a2):
+def _array_equal_dispatcher(a1, a2, equal_nan=None):
return (a1, a2)
@array_function_dispatch(_array_equal_dispatcher)
-def array_equal(a1, a2):
+def array_equal(a1, a2, equal_nan=False):
"""
True if two arrays have the same shape and elements, False otherwise.
@@ -2292,6 +2324,12 @@ def array_equal(a1, a2):
----------
a1, a2 : array_like
Input arrays.
+ equal_nan : bool
+ Whether to compare NaN's as equal. If the dtype of a1 and a2 is
+ complex, values will be considered equal if either the real or the
+ imaginary component of a given value is ``nan``.
+
+ .. versionadded:: 1.19.0
Returns
-------
@@ -2315,7 +2353,21 @@ def array_equal(a1, a2):
False
>>> np.array_equal([1, 2], [1, 4])
False
+ >>> a = np.array([1, np.nan])
+ >>> np.array_equal(a, a)
+ False
+ >>> np.array_equal(a, a, equal_nan=True)
+ True
+ When ``equal_nan`` is True, complex values with nan components are
+ considered equal if either the real *or* the imaginary components are nan.
+
+ >>> a = np.array([1 + 1j])
+ >>> b = a.copy()
+ >>> a.real = np.nan
+ >>> b.imag = np.nan
+ >>> np.array_equal(a, b, equal_nan=True)
+ True
"""
try:
a1, a2 = asarray(a1), asarray(a2)
@@ -2323,7 +2375,15 @@ def array_equal(a1, a2):
return False
if a1.shape != a2.shape:
return False
- return bool(asarray(a1 == a2).all())
+ if not equal_nan:
+ return bool(asarray(a1 == a2).all())
+ # Handling NaN values if equal_nan is True
+ a1nan, a2nan = isnan(a1), isnan(a2)
+ # NaN's occur at different locations
+ if not (a1nan == a2nan).all():
+ return False
+ # Shapes of a1, a2 and masks are guaranteed to be consistent by this point
+ return bool(asarray(a1[~a1nan] == a2[~a1nan]).all())
def _array_equiv_dispatcher(a1, a2):
diff --git a/numpy/core/records.py b/numpy/core/records.py
index 04c970cf4..af59de425 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -772,8 +772,58 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
- """Create a (read-only) record array from binary data contained in
- a string"""
+ """Create a record array from binary data
+
+ Note that despite the name of this function it does not accept `str`
+ instances.
+
+ Parameters
+ ----------
+ datastring : bytes-like
+ Buffer of binary data
+ dtype : data-type, optional
+ Valid dtype for all arrays
+ shape : int or tuple of ints, optional
+ Shape of each array.
+ offset : int, optional
+ Position in the buffer to start reading from.
+ formats, names, titles, aligned, byteorder :
+ If `dtype` is ``None``, these arguments are passed to
+ `numpy.format_parser` to construct a dtype. See that function for
+ detailed documentation.
+
+
+ Returns
+ -------
+ np.recarray
+ Record array view into the data in datastring. This will be readonly
+ if `datastring` is readonly.
+
+ See Also
+ --------
+ numpy.frombuffer
+
+ Examples
+ --------
+ >>> a = b'\x01\x02\x03abc'
+ >>> np.core.records.fromstring(a, dtype='u1,u1,u1,S3')
+ rec.array([(1, 2, 3, b'abc')],
+ dtype=[('f0', 'u1'), ('f1', 'u1'), ('f2', 'u1'), ('f3', 'S3')])
+
+ >>> grades_dtype = [('Name', (np.str_, 10)), ('Marks', np.float64),
+ ... ('GradeLevel', np.int32)]
+ >>> grades_array = np.array([('Sam', 33.3, 3), ('Mike', 44.4, 5),
+ ... ('Aadi', 66.6, 6)], dtype=grades_dtype)
+ >>> np.core.records.fromstring(grades_array.tobytes(), dtype=grades_dtype)
+ rec.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), ('Aadi', 66.6, 6)],
+ dtype=[('Name', '<U10'), ('Marks', '<f8'), ('GradeLevel', '<i4')])
+
+ >>> s = '\x01\x02\x03abc'
+ >>> np.core.records.fromstring(s, dtype='u1,u1,u1,S3')
+ Traceback (most recent call last)
+ ...
+ TypeError: a bytes-like object is required, not 'str'
+ """
if dtype is None and formats is None:
raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 15e732614..fcc422545 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -520,7 +520,7 @@ def configuration(parent_package='',top_path=None):
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put common include directory in build_dir on search path
- # allows using code generation in headers headers
+ # allows using code generation in headers
config.add_include_dirs(join(build_dir, "src", "common"))
config.add_include_dirs(join(build_dir, "src", "npymath"))
@@ -961,7 +961,7 @@ def configuration(parent_package='',top_path=None):
config.add_extension('_operand_flag_tests',
sources=[join('src', 'umath', '_operand_flag_tests.c.src')])
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index ee56dbe43..7a76bbf9d 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -247,12 +247,13 @@ def vstack(tup):
See Also
--------
+ concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
+ block : Assemble an nd-array from nested lists of blocks.
hstack : Stack arrays in sequence horizontally (column wise).
- dstack : Stack arrays in sequence depth wise (along third dimension).
- concatenate : Join a sequence of arrays along an existing axis.
- vsplit : Split array into a list of multiple sub-arrays vertically.
- block : Assemble arrays from blocks.
+ dstack : Stack arrays in sequence depth wise (along third axis).
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+ vsplit : Split an array into multiple sub-arrays vertically (row-wise).
Examples
--------
@@ -309,12 +310,13 @@ def hstack(tup):
See Also
--------
+ concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
+ block : Assemble an nd-array from nested lists of blocks.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
- concatenate : Join a sequence of arrays along an existing axis.
- hsplit : Split array along second axis.
- block : Assemble arrays from blocks.
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+ hsplit : Split an array into multiple sub-arrays horizontally (column-wise).
Examples
--------
@@ -385,8 +387,8 @@ def stack(arrays, axis=0, out=None):
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
+ block : Assemble an nd-array from nested lists of blocks.
split : Split array into a list of multiple sub-arrays of equal size.
- block : Assemble arrays from blocks.
Examples
--------
@@ -723,12 +725,13 @@ def block(arrays):
See Also
--------
- concatenate : Join a sequence of arrays together.
- stack : Stack arrays in sequence along a new dimension.
- hstack : Stack arrays in sequence horizontally (column wise).
+ concatenate : Join a sequence of arrays along an existing axis.
+ stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
- dstack : Stack arrays in sequence depth wise (along third dimension).
- vsplit : Split array into a list of multiple sub-arrays vertically.
+ hstack : Stack arrays in sequence horizontally (column wise).
+ dstack : Stack arrays in sequence depth wise (along third axis).
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+ vsplit : Split an array into multiple sub-arrays vertically (row-wise).
Notes
-----
diff --git a/numpy/core/src/common/mem_overlap.c b/numpy/core/src/common/mem_overlap.c
index 21db1893b..9da33bfc1 100644
--- a/numpy/core/src/common/mem_overlap.c
+++ b/numpy/core/src/common/mem_overlap.c
@@ -127,7 +127,7 @@
ends up considering all values x3=0...5 separately.
The upper bound for work done is prod(shape_a)*prod(shape_b), which scales
- faster than than work done by binary ufuncs, after broadcasting,
+ faster than work done by binary ufuncs, after broadcasting,
prod(shape_a). The bound may be loose, but it is possible to construct hard
instances where ufunc is faster (adapted from [2,3])::
diff --git a/numpy/core/src/common/npy_import.h b/numpy/core/src/common/npy_import.h
index 221e1e645..f485514d1 100644
--- a/numpy/core/src/common/npy_import.h
+++ b/numpy/core/src/common/npy_import.h
@@ -19,7 +19,7 @@
NPY_INLINE static void
npy_cache_import(const char *module, const char *attr, PyObject **cache)
{
- if (*cache == NULL) {
+ if (NPY_UNLIKELY(*cache == NULL)) {
PyObject *mod = PyImport_ImportModule(module);
if (mod != NULL) {
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index e40b6c719..b8dc7d516 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -305,19 +305,8 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src,
/* Check the casting rule */
if (!PyArray_CanCastTypeTo(PyArray_DESCR(src),
PyArray_DESCR(dst), casting)) {
- PyObject *errmsg;
- errmsg = PyUString_FromString("Cannot cast scalar from ");
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(src)));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(dst)));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" according to the rule %s",
- npy_casting_to_string(casting)));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+ npy_set_invalid_cast_error(
+ PyArray_DESCR(src), PyArray_DESCR(dst), casting, NPY_FALSE);
goto fail;
}
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index 6bc9bcfee..41eb75f1c 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -203,19 +203,8 @@ PyArray_AssignRawScalar(PyArrayObject *dst,
/* Check the casting rule */
if (!can_cast_scalar_to(src_dtype, src_data,
PyArray_DESCR(dst), casting)) {
- PyObject *errmsg;
- errmsg = PyUString_FromString("Cannot cast scalar from ");
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)src_dtype));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(dst)));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" according to the rule %s",
- npy_casting_to_string(casting)));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+ npy_set_invalid_cast_error(
+ src_dtype, PyArray_DESCR(dst), casting, NPY_TRUE);
return -1;
}
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 5e07f0df4..38d5f21eb 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -1489,6 +1489,7 @@ OBJECT_to_@TOTYPE@(void *input, void *output, npy_intp n,
*
* #from = STRING*23, UNICODE*23, VOID*23#
* #fromtyp = npy_char*69#
+ * #is_string_to_bool = 1, 0*22, 1, 0*22, 0*23#
* #to = (BOOL,
* BYTE, UBYTE, SHORT, USHORT, INT, UINT,
* LONG, ULONG, LONGLONG, ULONGLONG,
@@ -1525,6 +1526,13 @@ static void
if (temp == NULL) {
return;
}
+#if @is_string_to_bool@
+ /* Legacy behaviour converts strings to integers before going to bool */
+ Py_SETREF(temp, PyNumber_Long(temp));
+ if (temp == NULL) {
+ return;
+ }
+#endif
if (@to@_setitem(temp, op, aop)) {
Py_DECREF(temp);
return;
@@ -3838,7 +3846,7 @@ static void
*/
/*
- * Compute correlation of data with with small kernels
+ * Compute correlation of data with small kernels
* Calling a BLAS dot product for the inner loop of the correlation is overkill
* for small kernels. It is faster to compute it directly.
* Intended to be used by _pyarray_correlate so no input verifications is done
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index 4913eb202..78a15a63c 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -5,6 +5,7 @@
#include <numpy/npy_cpu.h>
#include <numpy/ndarraytypes.h>
#include <limits.h>
+#include "npy_import.h"
#define error_converting(x) (((x) == -1) && PyErr_Occurred())
@@ -148,13 +149,9 @@ check_and_adjust_axis_msg(int *axis, int ndim, PyObject *msg_prefix)
static PyObject *AxisError_cls = NULL;
PyObject *exc;
+ npy_cache_import("numpy.core._exceptions", "AxisError", &AxisError_cls);
if (AxisError_cls == NULL) {
- PyObject *mod = PyImport_ImportModule("numpy.core._exceptions");
-
- if (mod != NULL) {
- AxisError_cls = PyObject_GetAttrString(mod, "AxisError");
- Py_DECREF(mod);
- }
+ return -1;
}
/* Invoke the AxisError constructor */
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index d59a62ed8..0390c92fc 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -985,6 +985,54 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to,
return PyArray_CanCastTypeTo(from, to, casting);
}
+
+NPY_NO_EXPORT const char *
+npy_casting_to_string(NPY_CASTING casting)
+{
+ switch (casting) {
+ case NPY_NO_CASTING:
+ return "'no'";
+ case NPY_EQUIV_CASTING:
+ return "'equiv'";
+ case NPY_SAFE_CASTING:
+ return "'safe'";
+ case NPY_SAME_KIND_CASTING:
+ return "'same_kind'";
+ case NPY_UNSAFE_CASTING:
+ return "'unsafe'";
+ default:
+ return "<unknown>";
+ }
+}
+
+
+/**
+ * Helper function to set a useful error when casting is not possible.
+ *
+ * @param src_dtype
+ * @param dst_dtype
+ * @param casting
+ * @param scalar Whether this was a "scalar" cast (includes 0-D array with
+ * PyArray_CanCastArrayTo result).
+ */
+NPY_NO_EXPORT void
+npy_set_invalid_cast_error(
+ PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
+ NPY_CASTING casting, npy_bool scalar)
+{
+ char *msg;
+
+ if (!scalar) {
+ msg = "Cannot cast array data from %R to %R according to the rule %s";
+ }
+ else {
+ msg = "Cannot cast scalar from %R to %R according to the rule %s";
+ }
+ PyErr_Format(PyExc_TypeError,
+ msg, src_dtype, dst_dtype, npy_casting_to_string(casting));
+}
+
+
/*NUMPY_API
* See if array scalars can be cast.
*
diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h
index 72867ead8..4a7d85187 100644
--- a/numpy/core/src/multiarray/convert_datatype.h
+++ b/numpy/core/src/multiarray/convert_datatype.h
@@ -22,6 +22,14 @@ NPY_NO_EXPORT int
should_use_min_scalar(npy_intp narrs, PyArrayObject **arr,
npy_intp ndtypes, PyArray_Descr **dtypes);
+NPY_NO_EXPORT const char *
+npy_casting_to_string(NPY_CASTING casting);
+
+NPY_NO_EXPORT void
+npy_set_invalid_cast_error(
+ PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
+ NPY_CASTING casting, npy_bool scalar);
+
/*
* This function calls Py_DECREF on flex_dtype, and replaces it with
* a new dtype that has been adapted based on the values in data_dtype
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 12bf9eace..9283eefce 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -21,7 +21,6 @@
#include "shape.h"
#include "npy_buffer.h"
#include "lowlevel_strided_loops.h"
-#include "methods.h"
#include "_datetime.h"
#include "datetime_strings.h"
#include "array_assign.h"
@@ -2181,41 +2180,9 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
/* Raise an error if the casting rule isn't followed */
if (!PyArray_CanCastArrayTo(arr, newtype, casting)) {
- PyObject *errmsg;
- PyArray_Descr *arr_descr = NULL;
- PyObject *arr_descr_repr = NULL;
- PyObject *newtype_repr = NULL;
-
PyErr_Clear();
- errmsg = PyUString_FromString("Cannot cast array data from ");
- arr_descr = PyArray_DESCR(arr);
- if (arr_descr == NULL) {
- Py_DECREF(newtype);
- Py_DECREF(errmsg);
- return NULL;
- }
- arr_descr_repr = PyObject_Repr((PyObject *)arr_descr);
- if (arr_descr_repr == NULL) {
- Py_DECREF(newtype);
- Py_DECREF(errmsg);
- return NULL;
- }
- PyUString_ConcatAndDel(&errmsg, arr_descr_repr);
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
- newtype_repr = PyObject_Repr((PyObject *)newtype);
- if (newtype_repr == NULL) {
- Py_DECREF(newtype);
- Py_DECREF(errmsg);
- return NULL;
- }
- PyUString_ConcatAndDel(&errmsg, newtype_repr);
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" according to the rule %s",
- npy_casting_to_string(casting)));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
-
+ npy_set_invalid_cast_error(
+ PyArray_DESCR(arr), newtype, casting, PyArray_NDIM(arr) == 0);
Py_DECREF(newtype);
return NULL;
}
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 67ed3ca85..cfe801898 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -22,9 +22,9 @@
#include "common.h"
#include "numpy/arrayscalars.h"
-#include "methods.h"
#include "_datetime.h"
#include "datetime_strings.h"
+#include "convert_datatype.h"
/*
* Computes the python `ret, d = divmod(d, unit)`.
diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c
index 4574c05d8..f847c7ea8 100644
--- a/numpy/core/src/multiarray/datetime_strings.c
+++ b/numpy/core/src/multiarray/datetime_strings.c
@@ -20,7 +20,7 @@
#include "npy_pycompat.h"
#include "numpy/arrayscalars.h"
-#include "methods.h"
+#include "convert_datatype.h"
#include "_datetime.h"
#include "datetime_strings.h"
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index 282cdad28..553d0effb 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -1566,7 +1566,7 @@ Dragon4(BigInt *bigints, const npy_int32 exponent,
*
* scientific - boolean controlling whether scientific notation is used
* digit_mode - whether to use unique or fixed fractional output
- * cutoff_mode - whether 'precision' refers to to all digits, or digits past
+ * cutoff_mode - whether 'precision' refers to all digits, or digits past
* the decimal point.
* precision - When negative, prints as many digits as needed for a unique
* number. When positive specifies the maximum number of
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index ecaa680ec..a26426d41 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -696,17 +696,15 @@ get_nbo_cast_numeric_transfer_function(int aligned,
if (PyTypeNum_ISCOMPLEX(src_type_num) &&
!PyTypeNum_ISCOMPLEX(dst_type_num) &&
!PyTypeNum_ISBOOL(dst_type_num)) {
- PyObject *cls = NULL, *obj = NULL;
+ static PyObject *cls = NULL;
int ret;
- obj = PyImport_ImportModule("numpy.core");
- if (obj) {
- cls = PyObject_GetAttrString(obj, "ComplexWarning");
- Py_DECREF(obj);
+ npy_cache_import("numpy.core", "ComplexWarning", &cls);
+ if (cls == NULL) {
+ return NPY_FAIL;
}
ret = PyErr_WarnEx(cls,
"Casting complex values to real discards "
"the imaginary part", 1);
- Py_XDECREF(cls);
if (ret < 0) {
return NPY_FAIL;
}
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 7047304eb..43dbde2f1 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -2689,7 +2689,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
if (mit->numiter == 0) {
/*
* For MapIterArray, it is possible that there is no fancy index.
- * to support this case, add a a dummy iterator.
+ * to support this case, add a dummy iterator.
* Since it is 0-d its transpose, etc. does not matter.
*/
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 7bfbeca15..262514ec6 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -54,33 +54,6 @@ NpyArg_ParseKeywords(PyObject *keys, const char *format, char **kwlist, ...)
return ret;
}
-static PyObject *
-get_forwarding_ndarray_method(const char *name)
-{
- PyObject *module_methods, *callable;
-
- /* Get a reference to the function we're calling */
- module_methods = PyImport_ImportModule("numpy.core._methods");
- if (module_methods == NULL) {
- return NULL;
- }
- callable = _PyDict_GetItemStringWithError(PyModule_GetDict(module_methods), name);
- if (callable == NULL && PyErr_Occurred()) {
- Py_DECREF(module_methods);
- return NULL;
- }
- if (callable == NULL) {
- Py_DECREF(module_methods);
- PyErr_Format(PyExc_RuntimeError,
- "NumPy internal error: could not find function "
- "numpy.core._methods.%s", name);
- }
- else {
- Py_INCREF(callable);
- }
- Py_DECREF(module_methods);
- return callable;
-}
/*
* Forwards an ndarray method to a the Python function
@@ -121,11 +94,9 @@ forward_ndarray_method(PyArrayObject *self, PyObject *args, PyObject *kwds,
*/
#define NPY_FORWARD_NDARRAY_METHOD(name) \
static PyObject *callable = NULL; \
+ npy_cache_import("numpy.core._methods", name, &callable); \
if (callable == NULL) { \
- callable = get_forwarding_ndarray_method(name); \
- if (callable == NULL) { \
- return NULL; \
- } \
+ return NULL; \
} \
return forward_ndarray_method(self, args, kwds, callable)
@@ -146,8 +117,15 @@ array_take(PyArrayObject *self, PyObject *args, PyObject *kwds)
PyArray_ClipmodeConverter, &mode))
return NULL;
- return PyArray_Return((PyArrayObject *)
- PyArray_TakeFrom(self, indices, dimension, out, mode));
+ PyObject *ret = PyArray_TakeFrom(self, indices, dimension, out, mode);
+
+ /* this matches the unpacking behavior of ufuncs */
+ if (out == NULL) {
+ return PyArray_Return((PyArrayObject *)ret);
+ }
+ else {
+ return ret;
+ }
}
static PyObject *
@@ -303,7 +281,15 @@ array_argmax(PyArrayObject *self, PyObject *args, PyObject *kwds)
PyArray_OutputConverter, &out))
return NULL;
- return PyArray_Return((PyArrayObject *)PyArray_ArgMax(self, axis, out));
+ PyObject *ret = PyArray_ArgMax(self, axis, out);
+
+ /* this matches the unpacking behavior of ufuncs */
+ if (out == NULL) {
+ return PyArray_Return((PyArrayObject *)ret);
+ }
+ else {
+ return ret;
+ }
}
static PyObject *
@@ -318,7 +304,15 @@ array_argmin(PyArrayObject *self, PyObject *args, PyObject *kwds)
PyArray_OutputConverter, &out))
return NULL;
- return PyArray_Return((PyArrayObject *)PyArray_ArgMin(self, axis, out));
+ PyObject *ret = PyArray_ArgMin(self, axis, out);
+
+ /* this matches the unpacking behavior of ufuncs */
+ if (out == NULL) {
+ return PyArray_Return((PyArrayObject *)ret);
+ }
+ else {
+ return ret;
+ }
}
static PyObject *
@@ -789,24 +783,6 @@ array_setscalar(PyArrayObject *self, PyObject *args)
}
}
-NPY_NO_EXPORT const char *
-npy_casting_to_string(NPY_CASTING casting)
-{
- switch (casting) {
- case NPY_NO_CASTING:
- return "'no'";
- case NPY_EQUIV_CASTING:
- return "'equiv'";
- case NPY_SAFE_CASTING:
- return "'safe'";
- case NPY_SAME_KIND_CASTING:
- return "'same_kind'";
- case NPY_UNSAFE_CASTING:
- return "'unsafe'";
- default:
- return "<unknown>";
- }
-}
static PyObject *
array_astype(PyArrayObject *self, PyObject *args, PyObject *kwds)
@@ -876,19 +852,8 @@ array_astype(PyArrayObject *self, PyObject *args, PyObject *kwds)
return (PyObject *)ret;
}
else {
- PyObject *errmsg;
- errmsg = PyUString_FromString("Cannot cast array from ");
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(self)));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)dtype));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" according to the rule %s",
- npy_casting_to_string(casting)));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+ npy_set_invalid_cast_error(
+ PyArray_DESCR(self), dtype, casting, PyArray_NDIM(self) == 0);
Py_DECREF(dtype);
return NULL;
}
@@ -1247,7 +1212,15 @@ array_choose(PyArrayObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
- return PyArray_Return((PyArrayObject *)PyArray_Choose(self, choices, out, clipmode));
+ PyObject *ret = PyArray_Choose(self, choices, out, clipmode);
+
+ /* this matches the unpacking behavior of ufuncs */
+ if (out == NULL) {
+ return PyArray_Return((PyArrayObject *)ret);
+ }
+ else {
+ return ret;
+ }
}
static PyObject *
@@ -1889,13 +1862,8 @@ array_reduce_ex(PyArrayObject *self, PyObject *args)
* method that involves using a temporary bytes allocation. */
return array_reduce_ex_regular(self, protocol);
}
- else if (protocol == 5) {
- return array_reduce_ex_picklebuffer(self, protocol);
- }
else {
- PyErr_Format(PyExc_ValueError,
- "__reduce_ex__ called with protocol > 5");
- return NULL;
+ return array_reduce_ex_picklebuffer(self, protocol);
}
}
@@ -2353,8 +2321,16 @@ array_compress(PyArrayObject *self, PyObject *args, PyObject *kwds)
PyArray_OutputConverter, &out)) {
return NULL;
}
- return PyArray_Return(
- (PyArrayObject *)PyArray_Compress(self, condition, axis, out));
+
+ PyObject *ret = PyArray_Compress(self, condition, axis, out);
+
+ /* this matches the unpacking behavior of ufuncs */
+ if (out == NULL) {
+ return PyArray_Return((PyArrayObject *)ret);
+ }
+ else {
+ return ret;
+ }
}
@@ -2389,7 +2365,15 @@ array_trace(PyArrayObject *self, PyObject *args, PyObject *kwds)
rtype = _CHKTYPENUM(dtype);
Py_XDECREF(dtype);
- return PyArray_Return((PyArrayObject *)PyArray_Trace(self, offset, axis1, axis2, rtype, out));
+ PyObject *ret = PyArray_Trace(self, offset, axis1, axis2, rtype, out);
+
+ /* this matches the unpacking behavior of ufuncs */
+ if (out == NULL) {
+ return PyArray_Return((PyArrayObject *)ret);
+ }
+ else {
+ return ret;
+ }
}
#undef _CHKTYPENUM
@@ -2474,7 +2458,16 @@ array_round(PyArrayObject *self, PyObject *args, PyObject *kwds)
PyArray_OutputConverter, &out)) {
return NULL;
}
- return PyArray_Return((PyArrayObject *)PyArray_Round(self, decimals, out));
+
+ PyObject *ret = PyArray_Round(self, decimals, out);
+
+ /* this matches the unpacking behavior of ufuncs */
+ if (out == NULL) {
+ return PyArray_Return((PyArrayObject *)ret);
+ }
+ else {
+ return ret;
+ }
}
diff --git a/numpy/core/src/multiarray/methods.h b/numpy/core/src/multiarray/methods.h
index 7a9a24a00..c0de23c35 100644
--- a/numpy/core/src/multiarray/methods.h
+++ b/numpy/core/src/multiarray/methods.h
@@ -5,8 +5,6 @@
extern NPY_NO_EXPORT PyMethodDef array_methods[];
-NPY_NO_EXPORT const char *
-npy_casting_to_string(NPY_CASTING casting);
/*
* Pathlib support, takes a borrowed reference and returns a new one.
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 4c316052d..6915371d8 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -2438,7 +2438,6 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize)
}
size = PySequence_Size(obj);
-
for (i = 0; i < size; ++i) {
item = PySequence_Fast_GET_ITEM(obj, i);
/* Ellipsis */
@@ -2461,8 +2460,16 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize)
ellipsis = 1;
}
/* Subscript */
- else if (PyInt_Check(item) || PyLong_Check(item)) {
- long s = PyInt_AsLong(item);
+ else {
+ npy_intp s = PyArray_PyIntAsIntp(item);
+ /* Invalid */
+ if (error_converting(s)) {
+ PyErr_SetString(PyExc_TypeError,
+ "each subscript must be either an integer "
+ "or an ellipsis");
+ Py_DECREF(obj);
+ return -1;
+ }
npy_bool bad_input = 0;
if (subindex + 1 >= subsize) {
@@ -2472,7 +2479,7 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize)
return -1;
}
- if ( s < 0 ) {
+ if (s < 0) {
bad_input = 1;
}
else if (s < 26) {
@@ -2490,16 +2497,9 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize)
"subscript is not within the valid range [0, 52)");
Py_DECREF(obj);
return -1;
- }
- }
- /* Invalid */
- else {
- PyErr_SetString(PyExc_ValueError,
- "each subscript must be either an integer "
- "or an ellipsis");
- Py_DECREF(obj);
- return -1;
+ }
}
+
}
Py_DECREF(obj);
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index e40a2d594..620c7d593 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -1308,21 +1308,11 @@ npyiter_check_casting(int nop, PyArrayObject **op,
!PyArray_CanCastArrayTo(op[iop],
op_dtype[iop],
casting)) {
- PyObject *errmsg;
- errmsg = PyUString_FromFormat(
- "Iterator operand %d dtype could not be cast from ",
- iop);
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(op[iop])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)op_dtype[iop]));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" according to the rule %s",
- npyiter_casting_to_string(casting)));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+ PyErr_Format(PyExc_TypeError,
+ "Iterator operand %d dtype could not be cast from "
+ "%R to %R according to the rule %s",
+ iop, PyArray_DESCR(op[iop]), op_dtype[iop],
+ npyiter_casting_to_string(casting));
return 0;
}
/* Check write (temp -> op) casting */
@@ -1330,22 +1320,12 @@ npyiter_check_casting(int nop, PyArrayObject **op,
!PyArray_CanCastTypeTo(op_dtype[iop],
PyArray_DESCR(op[iop]),
casting)) {
- PyObject *errmsg;
- errmsg = PyUString_FromString(
- "Iterator requested dtype could not be cast from ");
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)op_dtype[iop]));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(op[iop])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(", the operand %d dtype, "
- "according to the rule %s",
- iop,
- npyiter_casting_to_string(casting)));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+ PyErr_Format(PyExc_TypeError,
+ "Iterator requested dtype could not be cast from "
+ "%R to %R, the operand %d dtype, "
+ "according to the rule %s",
+ op_dtype[iop], PyArray_DESCR(op[iop]), iop,
+ npyiter_casting_to_string(casting));
return 0;
}
diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h
index e6789e1d6..74bf01643 100644
--- a/numpy/core/src/umath/fast_loop_macros.h
+++ b/numpy/core/src/umath/fast_loop_macros.h
@@ -10,7 +10,11 @@
#ifndef _NPY_UMATH_FAST_LOOP_MACROS_H_
#define _NPY_UMATH_FAST_LOOP_MACROS_H_
-#include "simd.inc"
+static NPY_INLINE npy_uintp
+abs_ptrdiff(char *a, char *b)
+{
+ return (a > b) ? (a - b) : (b - a);
+}
/**
* Simple unoptimized loop macros that iterate over the ufunc arguments in
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index eea82309c..a5c663a47 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -38,6 +38,9 @@
#define NPY_MAX_SIMD_SIZE 1024
#endif
+/** Provides the various *_LOOP macros */
+#include "fast_loop_macros.h"
+
/*
* include vectorized functions and dispatchers
* this file is safe to include also for generic builds
@@ -46,10 +49,6 @@
*/
#include "simd.inc"
-/** Provides the various *_LOOP macros */
-#include "fast_loop_macros.h"
-
-
/******************************************************************************
** GENERIC FLOAT LOOPS **
*****************************************************************************/
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index bb2915e09..90cc7a513 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -16,6 +16,7 @@
#include "numpy/ufuncobject.h"
#include "numpy/arrayscalars.h"
+#include "npy_import.h"
#include "npy_pycompat.h"
#include "numpy/halffloat.h"
@@ -1339,13 +1340,9 @@ static int
emit_complexwarning(void)
{
static PyObject *cls = NULL;
+ npy_cache_import("numpy.core", "ComplexWarning", &cls);
if (cls == NULL) {
- PyObject *mod;
- mod = PyImport_ImportModule("numpy.core");
- assert(mod != NULL);
- cls = PyObject_GetAttrString(mod, "ComplexWarning");
- assert(cls != NULL);
- Py_DECREF(mod);
+ return -1;
}
return PyErr_WarnEx(cls,
"Casting complex values to real discards the imaginary part", 1);
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 106c7e7c9..6b0bcc3dc 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -50,12 +50,6 @@
*/
#define MAX_STEP_SIZE 2097152
-static NPY_INLINE npy_uintp
-abs_ptrdiff(char *a, char *b)
-{
- return (a > b) ? (a - b) : (b - a);
-}
-
/*
* nomemoverlap - returns true if two strided arrays have an overlapping
* region in memory. ip_size/op_size = size of the arrays which can be negative
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 2534ff78a..ea20bb24f 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -51,25 +51,6 @@ npy_casting_to_py_object(NPY_CASTING casting)
}
-static const char *
-npy_casting_to_string(NPY_CASTING casting)
-{
- switch (casting) {
- case NPY_NO_CASTING:
- return "'no'";
- case NPY_EQUIV_CASTING:
- return "'equiv'";
- case NPY_SAFE_CASTING:
- return "'safe'";
- case NPY_SAME_KIND_CASTING:
- return "'same_kind'";
- case NPY_UNSAFE_CASTING:
- return "'unsafe'";
- default:
- return "<unknown>";
- }
-}
-
/**
* Always returns -1 to indicate the exception was raised, for convenience
*/
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 71b46e551..2600d409a 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -1,6 +1,7 @@
import sys
import numpy as np
+from numpy.core._rational_tests import rational
import pytest
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
@@ -141,6 +142,16 @@ def test_array_array():
assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
np.ones((10, 10), dtype=np.float64))
+@pytest.mark.parametrize("array", [True, False])
+def test_array_impossible_casts(array):
+ # All builtin types can forst cast as least theoretically
+ # but user dtypes cannot necessarily.
+ rt = rational(1, 2)
+ if array:
+ rt = np.array(rt)
+ with assert_raises(ValueError):
+ np.array(rt, dtype="M8")
+
def test_fastCopyAndTranspose():
# 0D array
@@ -278,6 +289,34 @@ def test_array_astype_warning(t):
a = np.array(10, dtype=np.complex_)
assert_warns(np.ComplexWarning, a.astype, t)
+@pytest.mark.parametrize(["dtype", "out_dtype"],
+ [(np.bytes_, np.bool_),
+ (np.unicode, np.bool_),
+ (np.dtype("S10,S9"), np.dtype("?,?"))])
+def test_string_to_boolean_cast(dtype, out_dtype):
+ """
+ Currently, for `astype` strings are cast to booleans effectively by
+ calling `bool(int(string)`. This is not consistent (see gh-9875) and
+ will eventually be deprecated.
+ """
+ arr = np.array(["10", "10\0\0\0", "0\0\0", "0"], dtype=dtype)
+ expected = np.array([True, True, False, False], dtype=out_dtype)
+ assert_array_equal(arr.astype(out_dtype), expected)
+
+@pytest.mark.parametrize(["dtype", "out_dtype"],
+ [(np.bytes_, np.bool_),
+ (np.unicode, np.bool_),
+ (np.dtype("S10,S9"), np.dtype("?,?"))])
+def test_string_to_boolean_cast_errors(dtype, out_dtype):
+ """
+ These currently error out, since cast to integers fails, but should not
+ error out in the future.
+ """
+ for invalid in ["False", "True", "", "\0", "non-empty"]:
+ arr = np.array([invalid], dtype=dtype)
+ with assert_raises(ValueError):
+ arr.astype(out_dtype)
+
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 68491681a..da84735a0 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -274,6 +274,13 @@ class TestEinsum:
assert_equal(np.einsum(a, [0, 0], optimize=do_opt),
np.trace(a).astype(dtype))
+ # gh-15961: should accept numpy int64 type in subscript list
+ np_array = np.asarray([0, 0])
+ assert_equal(np.einsum(a, np_array, optimize=do_opt),
+ np.trace(a).astype(dtype))
+ assert_equal(np.einsum(a, list(np_array), optimize=do_opt),
+ np.trace(a).astype(dtype))
+
# multiply(a, b)
assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
for n in range(1, 17):
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index f36c27c6c..a698370b6 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -1582,6 +1582,11 @@ class TestMethods:
# gh-12031, caused SEGFAULT
assert_raises(TypeError, oned.choose,np.void(0), [oned])
+ out = np.array(0)
+ ret = np.choose(np.array(1), [10, 20, 30], out=out)
+ assert out is ret
+ assert_equal(out[()], 20)
+
# gh-6272 check overlap on out
x = np.arange(5)
y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap')
@@ -1658,7 +1663,7 @@ class TestMethods:
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
- assert_equal(out, res)
+ assert out is res
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
@@ -3023,6 +3028,10 @@ class TestMethods:
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
+ out = np.array(1)
+ ret = a.trace(out=out)
+ assert ret is out
+
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
@@ -4126,6 +4135,13 @@ class TestArgmax:
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
+ @pytest.mark.parametrize('ndim', [0, 1])
+ def test_ret_is_out(self, ndim):
+ a = np.ones((4,) + (3,)*ndim)
+ out = np.empty((3,)*ndim, dtype=np.intp)
+ ret = a.argmax(axis=0, out=out)
+ assert ret is out
+
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
@@ -4275,6 +4291,13 @@ class TestArgmin:
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
+ @pytest.mark.parametrize('ndim', [0, 1])
+ def test_ret_is_out(self, ndim):
+ a = np.ones((4,) + (3,)*ndim)
+ out = np.empty((3,)*ndim, dtype=np.intp)
+ ret = a.argmin(axis=0, out=out)
+ assert ret is out
+
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
@@ -4552,6 +4575,16 @@ class TestTake:
y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap')
assert_equal(y, np.array([1, 2, 3]))
+ @pytest.mark.parametrize('shape', [(1, 2), (1,), ()])
+ def test_ret_is_out(self, shape):
+ # 0d arrays should not be an exception to this rule
+ x = np.arange(5)
+ inds = np.zeros(shape, dtype=np.intp)
+ out = np.zeros(shape, dtype=x.dtype)
+ ret = np.take(x, inds, out=out)
+ assert ret is out
+
+
class TestLexsort:
@pytest.mark.parametrize('dtype',[
np.uint8, np.uint16, np.uint32, np.uint64,
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index bcc6a0c4e..acd442e2f 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -1446,6 +1446,36 @@ class TestArrayComparisons:
assert_(res)
assert_(type(res) is bool)
+ def test_array_equal_equal_nan(self):
+ # Test array_equal with equal_nan kwarg
+ a1 = np.array([1, 2, np.nan])
+ a2 = np.array([1, np.nan, 2])
+ a3 = np.array([1, 2, np.inf])
+
+ # equal_nan=False by default
+ assert_(not np.array_equal(a1, a1))
+ assert_(np.array_equal(a1, a1, equal_nan=True))
+ assert_(not np.array_equal(a1, a2, equal_nan=True))
+ # nan's not conflated with inf's
+ assert_(not np.array_equal(a1, a3, equal_nan=True))
+ # 0-D arrays
+ a = np.array(np.nan)
+ assert_(not np.array_equal(a, a))
+ assert_(np.array_equal(a, a, equal_nan=True))
+ # Non-float dtype - equal_nan should have no effect
+ a = np.array([1, 2, 3], dtype=int)
+ assert_(np.array_equal(a, a))
+ assert_(np.array_equal(a, a, equal_nan=True))
+ # Multi-dimensional array
+ a = np.array([[0, 1], [np.nan, 1]])
+ assert_(not np.array_equal(a, a))
+ assert_(np.array_equal(a, a, equal_nan=True))
+ # Complex values
+ a, b = [np.array([1 + 1j])]*2
+ a.real, b.imag = np.nan, np.nan
+ assert_(not np.array_equal(a, b, equal_nan=False))
+ assert_(np.array_equal(a, b, equal_nan=True))
+
def test_none_compares_elementwise(self):
a = np.array([None, 1, None], dtype=object)
assert_equal(a == None, [True, False, True])
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 60c9fe437..e7965c0ca 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -21,6 +21,27 @@ def on_powerpc():
platform.machine().startswith('ppc')
+def bad_arcsinh():
+ """The blacklisted trig functions are not accurate on aarch64 for
+ complex256. Rather than dig through the actual problem skip the
+ test. This should be fixed when we can move past glibc2.17
+ which is the version in manylinux2014
+ """
+ x = 1.78e-10
+ v1 = np.arcsinh(np.float128(x))
+ v2 = np.arcsinh(np.complex256(x)).real
+ # The eps for float128 is 1-e33, so this is way bigger
+ return abs((v1 / v2) - 1.0) > 1e-23
+
+if platform.machine() == 'aarch64' and bad_arcsinh():
+ skip_longcomplex_msg = ('Trig functions of np.longcomplex values known to be '
+ 'inaccurate on aarch64 for some compilation '
+ 'configurations, should be fixed by building on a '
+ 'platform using glibc>2.17')
+else:
+ skip_longcomplex_msg = ''
+
+
class _FilterInvalids:
def setup(self):
self.olderr = np.seterr(invalid='ignore')
@@ -618,6 +639,12 @@ class TestLogAddExp2(_FilterInvalids):
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
+ def test_reduce(self):
+ assert_equal(np.logaddexp2.identity, -np.inf)
+ assert_equal(np.logaddexp2.reduce([]), -np.inf)
+ assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf)
+ assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0)
+
class TestLog:
def test_log_values(self):
@@ -2823,6 +2850,8 @@ class TestComplexFunctions:
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
# Can use 2.1 for > Ubuntu LTS Trusty (2014), glibc = 2.19.
+ if skip_longcomplex_msg:
+ pytest.skip(skip_longcomplex_msg)
check(x_series, 50.0*eps)
else:
check(x_series, 2.1*eps)
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index d84f38c76..c7b3c2340 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -59,7 +59,7 @@ class IntelFCompiler(BaseIntelFCompiler):
def get_flags_opt(self): # Scipy test failures with -O2
v = self.get_version()
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
- return ['-fp-model strict -O1 -{}'.format(mpopt)]
+ return ['-fp-model', 'strict', '-O1', '-{}'.format(mpopt)]
def get_flags_arch(self):
return []
@@ -125,10 +125,10 @@ class IntelEM64TFCompiler(IntelFCompiler):
def get_flags_opt(self): # Scipy test failures with -O2
v = self.get_version()
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
- return ['-fp-model strict -O1 -{}'.format(mpopt)]
+ return ['-fp-model', 'strict', '-O1', '-{}'.format(mpopt)]
def get_flags_arch(self):
- return ['']
+ return []
# Is there no difference in the version string between the above compilers
# and the Visual compilers?
@@ -210,7 +210,7 @@ class IntelEM64VisualFCompiler(IntelVisualFCompiler):
version_match = simple_version_match(start=r'Intel\(R\).*?64,')
def get_flags_arch(self):
- return ['']
+ return []
if __name__ == '__main__':
diff --git a/numpy/distutils/setup.py b/numpy/distutils/setup.py
index 69d35f5c2..88cd1a160 100644
--- a/numpy/distutils/setup.py
+++ b/numpy/distutils/setup.py
@@ -4,7 +4,7 @@ def configuration(parent_package='',top_path=None):
config = Configuration('distutils', parent_package, top_path)
config.add_subpackage('command')
config.add_subpackage('fcompiler')
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
config.add_data_files('site.cfg')
config.add_data_files('mingw/gfortran_vs2003_hack.c')
config.make_config_py()
diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py
index 72990cf89..359d4f7f4 100644
--- a/numpy/doc/structured_arrays.py
+++ b/numpy/doc/structured_arrays.py
@@ -341,7 +341,7 @@ structured datatype has just a single field::
>>> nostruct[:] = twofield
Traceback (most recent call last):
...
- TypeError: Cannot cast scalar from dtype([('A', '<i4'), ('B', '<i4')]) to dtype('int32') according to the rule 'unsafe'
+ TypeError: Cannot cast array data from dtype([('A', '<i4'), ('B', '<i4')]) to dtype('int32') according to the rule 'unsafe'
Assignment from other Structured Arrays
```````````````````````````````````````
diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py
index 6314c5af3..80b47e527 100644
--- a/numpy/f2py/setup.py
+++ b/numpy/f2py/setup.py
@@ -25,7 +25,8 @@ from __version__ import version
def configuration(parent_package='', top_path=None):
config = Configuration('f2py', parent_package, top_path)
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
+ config.add_data_dir('tests/src')
config.add_data_files(
'src/fortranobject.c',
'src/fortranobject.h')
diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py
index e8204fcd3..9ed824e4f 100644
--- a/numpy/fft/setup.py
+++ b/numpy/fft/setup.py
@@ -4,7 +4,7 @@ def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('fft', parent_package, top_path)
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
# AIX needs to be told to use large file support - at all times
defs = [('_LARGE_FILES', None)] if sys.platform[:3] == "aix" else []
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index ff5b94342..7560bf4da 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -165,7 +165,6 @@ class LineSplitter:
"""
return lambda input: [_.strip() for _ in method(input)]
- #
def __init__(self, delimiter=None, comments='#', autostrip=True,
encoding=None):
@@ -195,7 +194,6 @@ class LineSplitter:
else:
self._handyman = _handyman
self.encoding = encoding
- #
def _delimited_splitter(self, line):
"""Chop off comments, strip, and split at delimiter. """
@@ -205,7 +203,6 @@ class LineSplitter:
if not line:
return []
return line.split(self.delimiter)
- #
def _fixedwidth_splitter(self, line):
if self.comments is not None:
@@ -216,7 +213,6 @@ class LineSplitter:
fixed = self.delimiter
slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
return [line[s] for s in slices]
- #
def _variablewidth_splitter(self, line):
if self.comments is not None:
@@ -225,7 +221,6 @@ class LineSplitter:
return []
slices = self.delimiter
return [line[s] for s in slices]
- #
def __call__(self, line):
return self._handyman(_decode_line(line, self.encoding))
@@ -282,10 +277,9 @@ class NameValidator:
('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
"""
- #
+
defaultexcludelist = ['return', 'file', 'print']
defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
- #
def __init__(self, excludelist=None, deletechars=None,
case_sensitive=None, replace_space='_'):
@@ -311,7 +305,7 @@ class NameValidator:
else:
msg = 'unrecognized case_sensitive value %s.' % case_sensitive
raise ValueError(msg)
- #
+
self.replace_space = replace_space
def validate(self, names, defaultfmt="f%i", nbfields=None):
@@ -362,7 +356,7 @@ class NameValidator:
validatednames = []
seen = dict()
nbempty = 0
- #
+
for item in names:
item = case_converter(item).strip()
if replace_space:
@@ -383,7 +377,6 @@ class NameValidator:
validatednames.append(item)
seen[item] = cnt + 1
return tuple(validatednames)
- #
def __call__(self, names, defaultfmt="f%i", nbfields=None):
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
@@ -502,7 +495,6 @@ class StringConverter:
upgrade or not. Default is False.
"""
- #
_mapper = [(nx.bool_, str2bool, False),
(nx.int_, int, -1),]
@@ -514,63 +506,62 @@ class StringConverter:
_mapper.extend([(nx.float64, float, nx.nan),
(nx.complex128, complex, nx.nan + 0j),
(nx.longdouble, nx.longdouble, nx.nan),
- (nx.unicode_, asunicode, '???'),
- (nx.string_, asbytes, '???'),
# If a non-default dtype is passed, fall back to generic
# ones (should only be used for the converter)
(nx.integer, int, -1),
(nx.floating, float, nx.nan),
- (nx.complexfloating, complex, nx.nan + 0j),])
-
- (_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
+ (nx.complexfloating, complex, nx.nan + 0j),
+ # Last, try with the string types (must be last, because
+ # `_mapper[-1]` is used as default in some cases)
+ (nx.unicode_, asunicode, '???'),
+ (nx.string_, asbytes, '???'),
+ ])
@classmethod
def _getdtype(cls, val):
"""Returns the dtype of the input variable."""
return np.array(val).dtype
- #
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
- #
- # This is a bit annoying. We want to return the "general" type in most
- # cases (ie. "string" rather than "S10"), but we want to return the
- # specific type for datetime64 (ie. "datetime64[us]" rather than
- # "datetime64").
@classmethod
def _dtypeortype(cls, dtype):
"""Returns dtype for datetime64 and type of dtype otherwise."""
+
+ # This is a bit annoying. We want to return the "general" type in most
+ # cases (ie. "string" rather than "S10"), but we want to return the
+ # specific type for datetime64 (ie. "datetime64[us]" rather than
+ # "datetime64").
if dtype.type == np.datetime64:
return dtype
return dtype.type
- #
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
- Upgrade the mapper of a StringConverter by adding a new function and
- its corresponding default.
+ Upgrade the mapper of a StringConverter by adding a new function and
+ its corresponding default.
- The input function (or sequence of functions) and its associated
- default value (if any) is inserted in penultimate position of the
- mapper. The corresponding type is estimated from the dtype of the
- default value.
+ The input function (or sequence of functions) and its associated
+ default value (if any) is inserted in penultimate position of the
+ mapper. The corresponding type is estimated from the dtype of the
+ default value.
- Parameters
- ----------
- func : var
- Function, or sequence of functions
-
- Examples
- --------
- >>> import dateutil.parser
- >>> import datetime
- >>> dateparser = dateutil.parser.parse
- >>> defaultdate = datetime.date(2000, 1, 1)
- >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
+ Parameters
+ ----------
+ func : var
+ Function, or sequence of functions
+
+ Examples
+ --------
+ >>> import dateutil.parser
+ >>> import datetime
+ >>> dateparser = dateutil.parser.parse
+ >>> defaultdate = datetime.date(2000, 1, 1)
+ >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
# Func is a single functions
if hasattr(func, '__call__'):
@@ -586,9 +577,22 @@ class StringConverter:
else:
default = list(default)
default.append([None] * (len(func) - len(default)))
- for (fct, dft) in zip(func, default):
+ for fct, dft in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
- #
+
+ @classmethod
+ def _find_map_entry(cls, dtype):
+ # if a converter for the specific dtype is available use that
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
+ if dtype.type == deftype:
+ return i, (deftype, func, default_def)
+
+ # otherwise find an inexact match
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
+ if np.issubdtype(dtype.type, deftype):
+ return i, (deftype, func, default_def)
+
+ raise LookupError
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
@@ -621,36 +625,26 @@ class StringConverter:
except ValueError:
default = None
dtype = self._getdtype(default)
- # Set the status according to the dtype
- _status = -1
- for (i, (deftype, func, default_def)) in enumerate(self._mapper):
- if np.issubdtype(dtype.type, deftype):
- _status = i
- if default is None:
- self.default = default_def
- else:
- self.default = default
- break
- # if a converter for the specific dtype is available use that
- last_func = func
- for (i, (deftype, func, default_def)) in enumerate(self._mapper):
- if dtype.type == deftype:
- _status = i
- last_func = func
- if default is None:
- self.default = default_def
- else:
- self.default = default
- break
- func = last_func
- if _status == -1:
- # We never found a match in the _mapper...
- _status = 0
+
+ # find the best match in our mapper
+ try:
+ self._status, (_, func, default_def) = self._find_map_entry(dtype)
+ except LookupError:
+ # no match
self.default = default
- self._status = _status
+ _, func, _ = self._mapper[-1]
+ self._status = 0
+ else:
+ # use the found default only if we did not already have one
+ if default is None:
+ self.default = default_def
+ else:
+ self.default = default
+
# If the input was a dtype, set the function to the last we saw
if self.func is None:
self.func = func
+
# If the status is 1 (int), change the function to
# something more robust.
if self.func == self._mapper[1][1]:
@@ -667,19 +661,17 @@ class StringConverter:
if isinstance(missing_values, str):
missing_values = missing_values.split(",")
self.missing_values = set(list(missing_values) + [''])
- #
+
self._callingfunction = self._strict_call
self.type = self._dtypeortype(dtype)
self._checked = False
self._initial_default = default
- #
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
- #
def _strict_call(self, value):
try:
@@ -705,11 +697,9 @@ class StringConverter:
self._checked = False
return self.default
raise ValueError("Cannot convert string '%s'" % value)
- #
def __call__(self, value):
return self._callingfunction(value)
- #
def _do_upgrade(self):
# Raise an exception if we locked the converter...
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index b055bb1ec..709a79dc0 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -466,7 +466,7 @@ def _rbl(rate, per, pmt, pv, when):
This function is here to simply have a different name for the 'fv'
function to not interfere with the 'fv' keyword argument within the 'ipmt'
function. It is the 'remaining balance on loan' which might be useful as
- it's own function, but is easily calculated with the 'fv' function.
+ its own function, but is easily calculated with the 'fv' function.
"""
return fv(rate, (per - 1), pmt, pv, when)
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 74ae3ed6e..0b23dbebd 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -764,6 +764,30 @@ def copy(a, order='K', subok=False):
>>> x[0] == z[0]
False
+ Note that np.copy is a shallow copy and will not copy object
+ elements within arrays. This is mainly important for arrays
+ containing Python objects. The new array will contain the
+ same object which may lead to surprises if that object can
+ be modified (is mutable):
+
+ >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)
+ >>> b = np.copy(a)
+ >>> b[2][0] = 10
+ >>> a
+ array([1, 'm', list([10, 3, 4])], dtype=object)
+
+ To ensure all elements within an ``object`` array are copied,
+ use `copy.deepcopy`:
+
+ >>> import copy
+ >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)
+ >>> c = copy.deepcopy(a)
+ >>> c[2][0] = 10
+ >>> c
+ array([1, 'm', list([10, 3, 4])], dtype=object)
+ >>> a
+ array([1, 'm', list([2, 3, 4])], dtype=object)
+
"""
return array(a, order=order, subok=subok, copy=True)
@@ -2026,7 +2050,7 @@ class vectorize:
self.pyfunc = pyfunc
self.cache = cache
self.signature = signature
- self._ufunc = None # Caching to improve default performance
+ self._ufunc = {} # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
@@ -2091,14 +2115,22 @@ class vectorize:
if self.otypes is not None:
otypes = self.otypes
- nout = len(otypes)
- # Note logic here: We only *use* self._ufunc if func is self.pyfunc
- # even though we set self._ufunc regardless.
- if func is self.pyfunc and self._ufunc is not None:
- ufunc = self._ufunc
+ # self._ufunc is a dictionary whose keys are the number of
+ # arguments (i.e. len(args)) and whose values are ufuncs created
+ # by frompyfunc. len(args) can be different for different calls if
+ # self.pyfunc has parameters with default values. We only use the
+ # cache when func is self.pyfunc, which occurs when the call uses
+ # only positional arguments and no arguments are excluded.
+
+ nin = len(args)
+ nout = len(self.otypes)
+ if func is not self.pyfunc or nin not in self._ufunc:
+ ufunc = frompyfunc(func, nin, nout)
else:
- ufunc = self._ufunc = frompyfunc(func, len(args), nout)
+ ufunc = None # We'll get it from self._ufunc
+ if func is self.pyfunc:
+ ufunc = self._ufunc.setdefault(nin, ufunc)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
@@ -3910,42 +3942,29 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
- weights_above = indices - indices_below
- weights_below = 1 - weights_above
-
- weights_shape = [1, ] * ap.ndim
- weights_shape[axis] = len(indices)
- weights_below.shape = weights_shape
- weights_above.shape = weights_shape
-
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with q-th is first
ap = np.moveaxis(ap, axis, 0)
- weights_below = np.moveaxis(weights_below, axis, 0)
- weights_above = np.moveaxis(weights_above, axis, 0)
axis = 0
+ weights_shape = [1] * ap.ndim
+ weights_shape[axis] = len(indices)
+ weights_above = (indices - indices_below).reshape(weights_shape)
+
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
- x1 = take(ap, indices_below, axis=axis) * weights_below
+ x1 = take(ap, indices_below, axis=axis) * (1 - weights_above)
x2 = take(ap, indices_above, axis=axis) * weights_above
- # ensure axis with q-th is first
- x1 = np.moveaxis(x1, axis, 0)
- x2 = np.moveaxis(x2, axis, 0)
-
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
- if out is not None:
- r = add(x1, x2, out=out)
- else:
- r = add(x1, x2)
+ r = add(x1, x2, out=out)
if np.any(n):
if zerod:
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index f080cc392..1a9b41ced 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -1047,7 +1047,15 @@ def histogramdd(sample, bins=10, range=None, normed=None, weights=None,
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
- edges[i] = np.linspace(smin, smax, bins[i] + 1)
+ try:
+ n = operator.index(bins[i])
+
+ except TypeError as e:
+ raise TypeError(
+ "`bins[{}]` must be an integer, when a scalar".format(i)
+ ) from e
+
+ edges[i] = np.linspace(smin, smax, n + 1)
elif np.ndim(bins[i]) == 1:
edges[i] = np.asarray(bins[i])
if np.any(edges[i][:-1] > edges[i][1:]):
diff --git a/numpy/lib/setup.py b/numpy/lib/setup.py
index 5d0341d86..b3f441f38 100644
--- a/numpy/lib/setup.py
+++ b/numpy/lib/setup.py
@@ -2,7 +2,8 @@ def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('lib', parent_package, top_path)
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
+ config.add_data_dir('tests/data')
return config
if __name__ == '__main__':
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 72a7f79d7..78703555e 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -688,10 +688,12 @@ def dstack(tup):
See Also
--------
- stack : Join a sequence of arrays along a new axis.
- vstack : Stack along first axis.
- hstack : Stack along second axis.
concatenate : Join a sequence of arrays along an existing axis.
+ stack : Join a sequence of arrays along a new axis.
+ block : Assemble an nd-array from nested lists of blocks.
+ vstack : Stack arrays in sequence vertically (row wise).
+ hstack : Stack arrays in sequence horizontally (column wise).
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
dsplit : Split array along third axis.
Examples
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index 6964c1128..a5b787025 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -177,12 +177,12 @@ class TestStringConverter:
# test str
# note that the longdouble type has been skipped, so the
# _status increases by 2. Everything should succeed with
- # unicode conversion (5).
+ # unicode conversion (8).
for s in ['a', b'a']:
res = converter.upgrade(s)
assert_(type(res) is str)
assert_equal(res, 'a')
- assert_equal(converter._status, 5 + status_offset)
+ assert_equal(converter._status, 8 + status_offset)
def test_missing(self):
"Tests the use of missing values."
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 23bf3296d..b4e928273 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -3,6 +3,7 @@ import warnings
import sys
import decimal
from fractions import Fraction
+import math
import pytest
import numpy as np
@@ -1221,6 +1222,16 @@ class TestExtins:
assert_array_equal(a, ac)
+# _foo1 and _foo2 are used in some tests in TestVectorize.
+
+def _foo1(x, y=1.0):
+ return y*math.floor(x)
+
+
+def _foo2(x, y=1.0, z=0.0):
+ return y*math.floor(x) + z
+
+
class TestVectorize:
def test_simple(self):
@@ -1252,7 +1263,6 @@ class TestVectorize:
assert_array_equal(y, x)
def test_ufunc(self):
- import math
f = vectorize(math.cos)
args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi])
r1 = f(args)
@@ -1273,6 +1283,63 @@ class TestVectorize:
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
+ def test_keywords_with_otypes_order1(self):
+ # gh-1620: The second call of f would crash with
+ # `ValueError: invalid number of arguments`.
+ f = vectorize(_foo1, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(np.arange(3.0), 1.0)
+ r2 = f(np.arange(3.0))
+ assert_array_equal(r1, r2)
+
+ def test_keywords_with_otypes_order2(self):
+ # gh-1620: The second call of f would crash with
+ # `ValueError: non-broadcastable output operand with shape ()
+ # doesn't match the broadcast shape (3,)`.
+ f = vectorize(_foo1, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(np.arange(3.0))
+ r2 = f(np.arange(3.0), 1.0)
+ assert_array_equal(r1, r2)
+
+ def test_keywords_with_otypes_order3(self):
+ # gh-1620: The third call of f would crash with
+ # `ValueError: invalid number of arguments`.
+ f = vectorize(_foo1, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(np.arange(3.0))
+ r2 = f(np.arange(3.0), y=1.0)
+ r3 = f(np.arange(3.0))
+ assert_array_equal(r1, r2)
+ assert_array_equal(r1, r3)
+
+ def test_keywords_with_otypes_several_kwd_args1(self):
+ # gh-1620 Make sure different uses of keyword arguments
+ # don't break the vectorized function.
+ f = vectorize(_foo2, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(10.4, z=100)
+ r2 = f(10.4, y=-1)
+ r3 = f(10.4)
+ assert_equal(r1, _foo2(10.4, z=100))
+ assert_equal(r2, _foo2(10.4, y=-1))
+ assert_equal(r3, _foo2(10.4))
+
+ def test_keywords_with_otypes_several_kwd_args2(self):
+ # gh-1620 Make sure different uses of keyword arguments
+ # don't break the vectorized function.
+ f = vectorize(_foo2, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(z=100, x=10.4, y=-1)
+ r2 = f(1, 2, 3)
+ assert_equal(r1, _foo2(z=100, x=10.4, y=-1))
+ assert_equal(r2, _foo2(1, 2, 3))
+
def test_keywords_no_func_code(self):
# This needs to test a function that has keywords but
# no func_code attribute, since otherwise vectorize will
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 9abde3e11..99d119362 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -1567,6 +1567,13 @@ M 33 21.99
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
+ def test_dtype_with_object_no_converter(self):
+ # Object without a converter uses bytes:
+ parsed = np.genfromtxt(TextIO("1"), dtype=object)
+ assert parsed[()] == b"1"
+ parsed = np.genfromtxt(TextIO("string"), dtype=object)
+ assert parsed[()] == b"string"
+
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py
index acfab0a68..57fdd502b 100644
--- a/numpy/linalg/setup.py
+++ b/numpy/linalg/setup.py
@@ -6,7 +6,7 @@ def configuration(parent_package='', top_path=None):
from numpy.distutils.system_info import get_info, system_info
config = Configuration('linalg', parent_package, top_path)
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
# Configure lapack_lite
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index a7214f9bf..8d612b8ed 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -191,14 +191,17 @@ for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps",
default_filler["M8[" + v + "]"] = np.datetime64("NaT", v)
default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v)
+float_types_list = [np.half, np.single, np.double, np.longdouble,
+ np.csingle, np.cdouble, np.clongdouble]
max_filler = ntypes._minvals
-max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]])
+max_filler.update([(k, -np.inf) for k in float_types_list[:4]])
+max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]])
+
min_filler = ntypes._maxvals
-min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]])
-if 'float128' in ntypes.typeDict:
- max_filler.update([(np.float128, -np.inf)])
- min_filler.update([(np.float128, +np.inf)])
+min_filler.update([(k, +np.inf) for k in float_types_list[:4]])
+min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]])
+del float_types_list
def _recursive_fill_value(dtype, f):
"""
@@ -5860,6 +5863,14 @@ class MaskedArray(ndarray):
Return (maximum - minimum) along the given dimension
(i.e. peak-to-peak value).
+ .. warning::
+ `ptp` preserves the data type of the array. This means the
+ return value for an input of signed integers with n bits
+ (e.g. `np.int8`, `np.int16`, etc) is also a signed integer
+ with n bits. In that case, peak-to-peak values greater than
+ ``2**(n-1)-1`` will be returned as negative values. An example
+ with a work-around is shown below.
+
Parameters
----------
axis : {None, int}, optional
@@ -5882,6 +5893,45 @@ class MaskedArray(ndarray):
A new array holding the result, unless ``out`` was
specified, in which case a reference to ``out`` is returned.
+ Examples
+ --------
+ >>> x = np.ma.MaskedArray([[4, 9, 2, 10],
+ ... [6, 9, 7, 12]])
+
+ >>> x.ptp(axis=1)
+ masked_array(data=[8, 6],
+ mask=False,
+ fill_value=999999)
+
+ >>> x.ptp(axis=0)
+ masked_array(data=[2, 0, 5, 2],
+ mask=False,
+ fill_value=999999)
+
+ >>> x.ptp()
+ 10
+
+ This example shows that a negative value can be returned when
+ the input is an array of signed integers.
+
+ >>> y = np.ma.MaskedArray([[1, 127],
+ ... [0, 127],
+ ... [-1, 127],
+ ... [-2, 127]], dtype=np.int8)
+ >>> y.ptp(axis=1)
+ masked_array(data=[ 126, 127, -128, -127],
+ mask=False,
+ fill_value=999999,
+ dtype=int8)
+
+ A work-around is to use the `view()` method to view the result as
+ unsigned integers with the same bit width:
+
+ >>> y.ptp(axis=1).view(np.uint8)
+ masked_array(data=[126, 127, 128, 129],
+ mask=False,
+ fill_value=999999,
+ dtype=uint8)
"""
if out is None:
result = self.max(axis=axis, fill_value=fill_value,
diff --git a/numpy/ma/setup.py b/numpy/ma/setup.py
index 144a961c2..d3f34c874 100644
--- a/numpy/ma/setup.py
+++ b/numpy/ma/setup.py
@@ -2,7 +2,7 @@
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('ma', parent_package, top_path)
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
return config
if __name__ == "__main__":
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 98fc7dd97..6f34144bb 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -1245,6 +1245,48 @@ class TestMaskedArrayArithmetic:
assert_(x.max() is masked)
assert_(x.ptp() is masked)
+ def test_minmax_dtypes(self):
+ # Additional tests on max/min for non-standard float and complex dtypes
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
+ a10 = 10.
+ an10 = -10.0
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
+ xm = masked_array(x, mask=m1)
+ xm.set_fill_value(1e+20)
+ float_dtypes = [np.half, np.single, np.double,
+ np.longdouble, np.cfloat, np.cdouble, np.clongdouble]
+ for float_dtype in float_dtypes:
+ assert_equal(masked_array(x, mask=m1, dtype=float_dtype).max(),
+ float_dtype(a10))
+ assert_equal(masked_array(x, mask=m1, dtype=float_dtype).min(),
+ float_dtype(an10))
+
+ assert_equal(xm.min(), an10)
+ assert_equal(xm.max(), a10)
+
+ # Non-complex type only test
+ for float_dtype in float_dtypes[:4]:
+ assert_equal(masked_array(x, mask=m1, dtype=float_dtype).max(),
+ float_dtype(a10))
+ assert_equal(masked_array(x, mask=m1, dtype=float_dtype).min(),
+ float_dtype(an10))
+
+ # Complex types only test
+ for float_dtype in float_dtypes[-3:]:
+ ym = masked_array([1e20+1j, 1e20-2j, 1e20-1j], mask=[0, 1, 0],
+ dtype=float_dtype)
+ assert_equal(ym.min(), float_dtype(1e20-1j))
+ assert_equal(ym.max(), float_dtype(1e20+1j))
+
+ zm = masked_array([np.inf+2j, np.inf+3j, -np.inf-1j], mask=[0, 1, 0],
+ dtype=float_dtype)
+ assert_equal(zm.min(), float_dtype(-np.inf-1j))
+ assert_equal(zm.max(), float_dtype(np.inf+2j))
+
+ cmax = np.inf - 1j * np.finfo(np.float64).max
+ assert masked_array([-cmax, 0], mask=[0, 1]).max() == -cmax
+ assert masked_array([cmax, 0], mask=[0, 1]).min() == cmax
+
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index ac7d472bc..a9ee74a5b 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -1024,8 +1024,8 @@ def _from_string(str, gdict, ldict):
except KeyError:
try:
thismat = gdict[col]
- except KeyError:
- raise KeyError("%s not found" % (col,))
+ except KeyError as e:
+ raise NameError(f"name {col!r} is not defined") from None
coltup.append(thismat)
rowtup.append(concatenate(coltup, axis=-1))
diff --git a/numpy/matrixlib/setup.py b/numpy/matrixlib/setup.py
index 529d2a2eb..19b3bb2de 100644
--- a/numpy/matrixlib/setup.py
+++ b/numpy/matrixlib/setup.py
@@ -2,7 +2,7 @@
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('matrixlib', parent_package, top_path)
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
return config
if __name__ == "__main__":
diff --git a/numpy/polynomial/setup.py b/numpy/polynomial/setup.py
index 8fc82cba1..641464518 100644
--- a/numpy/polynomial/setup.py
+++ b/numpy/polynomial/setup.py
@@ -1,7 +1,7 @@
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('polynomial', parent_package, top_path)
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
return config
if __name__ == '__main__':
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index 274dba8c4..111c2790c 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -516,7 +516,7 @@ cdef class Generator:
@cython.wraparound(True)
def choice(self, a, size=None, replace=True, p=None, axis=0, bint shuffle=True):
"""
- choice(a, size=None, replace=True, p=None, axis=0, shuffle=True):
+ choice(a, size=None, replace=True, p=None, axis=0, shuffle=True)
Generates a random sample from a given 1-D array
diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx
index 919a96a4c..16a377cc6 100644
--- a/numpy/random/_mt19937.pyx
+++ b/numpy/random/_mt19937.pyx
@@ -226,6 +226,23 @@ cdef class MT19937(BitGenerator):
-------
bit_generator : MT19937
New instance of generator jumped iter times
+
+ Notes
+ -----
+ The jump step is computed using a modified version of Matsumoto's
+ implementation of Horner's method. The step polynomial is precomputed
+ to perform 2**128 steps. The jumped state has been verified to match
+ the state produced using Matsumoto's original code.
+
+ References
+ ----------
+ .. [1] Matsumoto, M, Generating multiple disjoint streams of
+ pseudorandom number sequences. Accessed on: May 6, 2020.
+ http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/JUMP/
+ .. [2] Hiroshi Haramoto, Makoto Matsumoto, Takuji Nishimura, François
+ Panneton, Pierre L\'Ecuyer, "Efficient Jump Ahead for F2-Linear
+ Random Number Generators", INFORMS JOURNAL ON COMPUTING, Vol. 20,
+ No. 3, Summer 2008, pp. 385-390.
"""
cdef MT19937 bit_generator
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index 6f2ba871c..f2805871d 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -807,7 +807,7 @@ cdef class RandomState:
Generates a random sample from a given 1-D array
- .. versionadded:: 1.7.0
+ .. versionadded:: 1.7.0
.. note::
New code should use the ``choice`` method of a ``default_rng()``
diff --git a/numpy/random/setup.py b/numpy/random/setup.py
index 90ec42671..88ddb1268 100644
--- a/numpy/random/setup.py
+++ b/numpy/random/setup.py
@@ -31,7 +31,8 @@ def configuration(parent_package='', top_path=None):
('_LARGEFILE64_SOURCE', '1')]
defs.append(('NPY_NO_DEPRECATED_API', 0))
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
+ config.add_data_dir('tests/data')
config.add_data_dir('_examples')
EXTRA_LINK_ARGS = []
diff --git a/numpy/random/src/mt19937/mt19937-jump.c b/numpy/random/src/mt19937/mt19937-jump.c
index 46b28cf96..1a83a4c2e 100644
--- a/numpy/random/src/mt19937/mt19937-jump.c
+++ b/numpy/random/src/mt19937/mt19937-jump.c
@@ -10,28 +10,6 @@ unsigned long get_coef(unsigned long *pf, unsigned int deg) {
return (0);
}
-/* 32-bit function */
-/* set the coefficient of the polynomial pf with v */
-void set_coef(unsigned long *pf, unsigned int deg, unsigned long v) {
- if (v != 0)
- pf[deg >> 5] ^= (LSB << (deg & 0x1ful));
- else
- ;
-}
-
-void gray_code(unsigned long *h) {
- unsigned int i, j = 1, l = 1, term = LL;
-
- h[0] = 0;
-
- for (i = 1; i <= QQ; i++) {
- l = (l << 1);
- term = (term >> 1);
- for (; j < l; j++)
- h[j] = h[l - j - 1] ^ term;
- }
-}
-
void copy_state(mt19937_state *target_state, mt19937_state *state) {
int i;
@@ -83,69 +61,6 @@ void add_state(mt19937_state *state1, mt19937_state *state2) {
}
}
-/*
-void gen_vec_h(mt19937_state *state, mt19937_state *vec_h,
- unsigned long *h) {
- int i;
- unsigned long k, g;
- mt19937_state v;
-
- gray_code(h);
-
- copy_state(&vec_h[0], state);
-
- for (i = 0; i < QQ; i++)
- gen_next(&vec_h[0]);
-
- for (i = 1; i < LL; i++) {
- copy_state(&v, state);
- g = h[i] ^ h[i - 1];
- for (k = 1; k < g; k = (k << 1))
- gen_next(&v);
- copy_state(&vec_h[h[i]], &vec_h[h[i - 1]]);
- add_state(&vec_h[h[i]], &v);
- }
-}
-*/
-
-/* compute pf(ss) using Sliding window algorithm */
-/*
-void calc_state(unsigned long *pf, mt19937_state *state,
- mt19937_state *vec_h) {
- mt19937_state *temp1;
- int i = MEXP - 1, j, digit, skip = 0;
-
- temp1 = (mt19937_state *)calloc(1, sizeof(mt19937_state));
-
- while (get_coef(pf, i) == 0)
- i--;
-
- for (; i >= QQ; i--) {
- if (get_coef(pf, i) != 0) {
- for (j = 0; j < QQ + 1; j++)
- gen_next(temp1);
- digit = 0;
- for (j = 0; j < QQ; j++)
- digit = (digit << 1) ^ get_coef(pf, i - j - 1);
- add_state(temp1, &vec_h[digit]);
- i -= QQ;
- } else
- gen_next(temp1);
- }
-
- for (; i > -1; i--) {
- gen_next(temp1);
- if (get_coef(pf, i) == 1)
- add_state(temp1, state);
- else
- ;
- }
-
- copy_state(state, temp1);
- free(temp1);
-}
-*/
-
/* compute pf(ss) using standard Horner method */
void horner1(unsigned long *pf, mt19937_state *state) {
int i = MEXP - 1;
@@ -180,18 +95,15 @@ void horner1(unsigned long *pf, mt19937_state *state) {
free(temp);
}
-void mt19937_jump_state(mt19937_state *state, const char *jump_str) {
+void mt19937_jump_state(mt19937_state *state) {
unsigned long *pf;
int i;
pf = (unsigned long *)calloc(P_SIZE, sizeof(unsigned long));
-
- for (i = MEXP - 1; i > -1; i--) {
- if (jump_str[i] == '1')
- set_coef(pf, i, 1);
+ for (i = 0; i<P_SIZE; i++) {
+ pf[i] = poly_coef[i];
}
- /* TODO: Should generate the next set and start from 0, but doesn't matter ??
- */
+
if (state->pos >= N) {
state->pos = 0;
}
@@ -200,25 +112,3 @@ void mt19937_jump_state(mt19937_state *state, const char *jump_str) {
free(pf);
}
-/*
-void mt19937_jump(mt19937_state *state, const char *jump_str)
-{
- unsigned long h[LL];
- mt19937_state vec_h[LL];
- unsigned long *pf;
- int i;
-
- pf = (unsigned long *)calloc(P_SIZE, sizeof(unsigned long));
-
- for (i = MEXP - 1; i > -1; i--)
- {
- if (jump_str[i] == '1')
- set_coef(pf, i, 1);
- }
-
- gen_vec_h(state, &vec_h, &h);
- calc_state(pf, state, &vec_h);
-
- free(pf);
-}
-*/ \ No newline at end of file
diff --git a/numpy/random/src/mt19937/mt19937-jump.h b/numpy/random/src/mt19937/mt19937-jump.h
index 394c150a0..8371cbd5f 100644
--- a/numpy/random/src/mt19937/mt19937-jump.h
+++ b/numpy/random/src/mt19937/mt19937-jump.h
@@ -10,6 +10,142 @@
#define QQ 7
#define LL 128 /* LL = 2^(QQ) */
-void mt19937_jump_state(mt19937_state *state, const char *jump_str);
+void mt19937_jump_state(mt19937_state *state);
-void set_coef(unsigned long *pf, unsigned int deg, unsigned long v); \ No newline at end of file
+void set_coef(unsigned long *pf, unsigned int deg, unsigned long v);
+
+/*
+ * 2**128 step polynomial produced using the file mt19937-generate-jump-poly.c
+ * (randomgen) which is a modified version of minipoly_mt19937.c as distributed
+ * in
+ * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/JUMP/jump_ahead_1.02.tar.gz
+ *
+ * These files are not part of NumPy.
+ */
+
+static const unsigned long poly_coef[624] = {
+ 1927166307UL, 3044056772UL, 2284297142UL, 2820929765UL, 651705945UL,
+ 69149273UL, 3892165397UL, 2337412983UL, 1219880790UL, 3207074517UL,
+ 3836784057UL, 189286826UL, 1049791363UL, 3916249550UL, 2942382547UL,
+ 166392552UL, 861176918UL, 3246476411UL, 2302311555UL, 4273801148UL,
+ 29196903UL, 1363664063UL, 3802562022UL, 2600400244UL, 3090369801UL,
+ 4040416970UL, 1432485208UL, 3632558139UL, 4015816763UL, 3013316418UL,
+ 551532385UL, 3592224467UL, 3479125595UL, 1195467127UL, 2391032553UL,
+ 2393493419UL, 1482493632UL, 1625159565UL, 748389672UL, 4042774030UL,
+ 2998615036UL, 3393119101UL, 2177492569UL, 2265897321UL, 2507383006UL,
+ 3461498961UL, 2003319700UL, 1942857197UL, 1455226044UL, 4097545580UL,
+ 529653268UL, 3204756480UL, 2486748289UL, 495294513UL, 3396001954UL,
+ 2643963605UL, 2655404568UL, 3881604377UL, 624710790UL, 3443737948UL,
+ 1941294296UL, 2139259604UL, 3368734020UL, 422436761UL, 3602810182UL,
+ 1384691081UL, 3035786407UL, 2551797119UL, 537227499UL, 65486120UL,
+ 642436100UL, 2023822537UL, 2515598203UL, 1122953367UL, 2882306242UL,
+ 1743213032UL, 321965189UL, 336496623UL, 2436602518UL, 3556266590UL,
+ 1055117829UL, 463541647UL, 743234441UL, 527083645UL, 2606668346UL,
+ 2274046499UL, 2761475053UL, 2760669048UL, 2538258534UL, 487125077UL,
+ 3365962306UL, 3604906217UL, 2714700608UL, 680709708UL, 2217161159UL,
+ 1614899374UL, 3710119533UL, 3201300658UL, 3752620679UL, 2755041105UL,
+ 3129723037UL, 1247297753UL, 2812642690UL, 4114340845UL, 3485092247UL,
+ 2752814364UL, 3586551747UL, 4073138437UL, 3462966585UL, 2924318358UL,
+ 4061374901UL, 3314086806UL, 2640385723UL, 744590670UL, 3007586513UL,
+ 3959120371UL, 997207767UL, 3420235506UL, 2092400998UL, 3190305685UL,
+ 60965738UL, 549507222UL, 3784354415UL, 3209279509UL, 1238863299UL,
+ 2605037827UL, 178570440UL, 1743491299UL, 4079686640UL, 2136795825UL,
+ 3435430548UL, 1679732443UL, 1835708342UL, 2159367000UL, 1924487218UL,
+ 4059723674UL, 996192116UL, 2308091645UL, 1336281586UL, 674600050UL,
+ 1642572529UL, 1383973289UL, 2202960007UL, 3165481279UL, 3385474038UL,
+ 2501318550UL, 2671842890UL, 3084085109UL, 3475033915UL, 1551329147UL,
+ 4101397249UL, 1205851807UL, 3641536021UL, 3607635071UL, 1609126163UL,
+ 2910426664UL, 3324508658UL, 4244311266UL, 254034382UL, 1258304384UL,
+ 1914048768UL, 1358592011UL, 527610138UL, 3072108727UL, 4289413885UL,
+ 1417001678UL, 2445445945UL, 896462712UL, 339855811UL, 3699378285UL,
+ 2529457297UL, 3049459401UL, 2723472429UL, 2838633181UL, 2520397330UL,
+ 3272339035UL, 1667003847UL, 3742634787UL, 942706520UL, 2301027215UL,
+ 1907791250UL, 2306299096UL, 1021173342UL, 1539334516UL, 2907834628UL,
+ 3199959207UL, 1556251860UL, 3642580275UL, 2355865416UL, 285806145UL,
+ 867932457UL, 1177354172UL, 3291107470UL, 4022765061UL, 1613380116UL,
+ 588147929UL, 650574324UL, 1236855601UL, 1371354511UL, 2085218212UL,
+ 1203081931UL, 420526905UL, 1022192219UL, 2903287064UL, 2470845899UL,
+ 3649873273UL, 2502333582UL, 3972385637UL, 4246356763UL, 199084157UL,
+ 1567178788UL, 2107121836UL, 4293612856UL, 1902910177UL, 332397359UL,
+ 83422598UL, 3614961721UL, 456321943UL, 2277615967UL, 2302518510UL,
+ 3258315116UL, 2521897172UL, 3900282042UL, 4186973154UL, 3146532165UL,
+ 2299685029UL, 3889120948UL, 1293301857UL, 187455105UL, 3395849230UL,
+ 913321567UL, 3093513909UL, 1440944571UL, 1923481911UL, 338680924UL,
+ 1204882963UL, 2739724491UL, 2886241328UL, 2408907774UL, 1299817192UL,
+ 2474012871UL, 45400213UL, 553186784UL, 134558656UL, 2180943666UL,
+ 2870807589UL, 76511085UL, 3053566760UL, 2516601415UL, 4172865902UL,
+ 1751297915UL, 1251975234UL, 2964780642UL, 1412975316UL, 2739978478UL,
+ 2171013719UL, 637935041UL, 975972384UL, 3044407449UL, 3111425639UL,
+ 1938684970UL, 2860857400UL, 13419586UL, 2772079268UL, 3484375614UL,
+ 3184054178UL, 159924837UL, 1386213021UL, 2765617231UL, 2523689118UL,
+ 1283505218UL, 3510789588UL, 4125878259UL, 2990287597UL, 2152014833UL,
+ 3084155970UL, 2815101609UL, 1932985704UL, 114887365UL, 1712687646UL,
+ 2550515629UL, 3299051916UL, 2022747614UL, 2143630992UL, 2244188960UL,
+ 3309469192UL, 3234358520UL, 800720365UL, 3278176634UL, 554357439UL,
+ 2415629802UL, 1620877315UL, 2389462898UL, 2229691332UL, 1007748450UL,
+ 1966873768UL, 2264971043UL, 1214524156UL, 346854700UL, 3471905342UL,
+ 3984889660UL, 4034246840UL, 216712649UL, 4027196762UL, 3754772604UL,
+ 2121785562UL, 2347070732UL, 7457687UL, 1443375102UL, 683948143UL,
+ 2940226032UL, 3211475670UL, 2836507357UL, 774899409UL, 1588968308UL,
+ 780438009UL, 3278878781UL, 2217181540UL, 2184194887UL, 1642129086UL,
+ 69346830UL, 297114710UL, 3841068188UL, 2631265450UL, 4167492314UL,
+ 2613519651UL, 1388582503UL, 2171556668UL, 1201873758UL, 2698772382UL,
+ 207791958UL, 3936134563UL, 3725025702UL, 3306317801UL, 1055730422UL,
+ 4069230694UL, 1767821343UL, 4252407395UL, 2422583118UL, 3158834399UL,
+ 3754582617UL, 1112422556UL, 376187931UL, 3137549150UL, 712221089UL,
+ 3300799453UL, 3868250200UL, 1165257666UL, 2494837767UL, 131304831UL,
+ 1619349427UL, 1958236644UL, 3678218946UL, 3651007751UL, 2261987899UL,
+ 1567368524UL, 2193599522UL, 3034394674UL, 2994602555UL, 3072727647UL,
+ 889094521UL, 1089692095UL, 1822324824UL, 3876999182UL, 1703361286UL,
+ 902229515UL, 4213728487UL, 3838170364UL, 672727494UL, 2240733828UL,
+ 3858539469UL, 1149254245UL, 4166055926UL, 4193525313UL, 1709921593UL,
+ 2278290377UL, 3190784116UL, 2919588882UL, 1012709717UL, 3640562031UL,
+ 2931984863UL, 3515665246UL, 250577343UL, 1147230194UL, 1183856202UL,
+ 3734511989UL, 3243867808UL, 3499383067UL, 2985115159UL, 2036821626UL,
+ 3298159553UL, 2726542838UL, 1686910320UL, 1778823772UL, 965412224UL,
+ 233509772UL, 3843098861UL, 1312622954UL, 500855830UL, 2950562091UL,
+ 1915683607UL, 3405781138UL, 596073719UL, 2195150546UL, 3381728478UL,
+ 546426436UL, 3527890868UL, 2324975353UL, 2241074266UL, 3992514859UL,
+ 2576108287UL, 4077653225UL, 2632319392UL, 3127212632UL, 917000669UL,
+ 2498161805UL, 3980835128UL, 2259526768UL, 1083920509UL, 1187452089UL,
+ 97018536UL, 3056075838UL, 2059706760UL, 2373335692UL, 182196406UL,
+ 2136713111UL, 1762080153UL, 1572125803UL, 1145919955UL, 1023966754UL,
+ 3921694345UL, 1632005969UL, 1418372326UL, 354407429UL, 2438288265UL,
+ 1620072033UL, 1586320921UL, 1044153697UL, 969324572UL, 613487980UL,
+ 4230993062UL, 397726764UL, 2194259193UL, 735511759UL, 2066049260UL,
+ 88093248UL, 1562536153UL, 2114157419UL, 3630951546UL, 589238503UL,
+ 3120654384UL, 2521793793UL, 2746692127UL, 2557723425UL, 889897693UL,
+ 2778878177UL, 643269509UL, 3342389831UL, 19218890UL, 3442706236UL,
+ 3314581273UL, 3503147052UL, 1546343434UL, 1448529060UL, 529038801UL,
+ 2748942264UL, 2213019208UL, 111314040UL, 2488697563UL, 1180642808UL,
+ 2605272289UL, 4207476668UL, 1502558669UL, 2972370981UL, 4204339995UL,
+ 1046225278UL, 992840610UL, 3847290298UL, 2387673094UL, 2221565747UL,
+ 1045901716UL, 3997739302UL, 1556952765UL, 1103336648UL, 279418400UL,
+ 2711316466UL, 2336215718UL, 2317900806UL, 974624729UL, 909575434UL,
+ 1675610631UL, 1922393214UL, 2054896570UL, 3197007361UL, 3932554569UL,
+ 1008619802UL, 3349254938UL, 113511461UL, 932630384UL, 2098759268UL,
+ 3436837432UL, 3119972401UL, 1612590197UL, 2281609013UL, 4174211248UL,
+ 4016332246UL, 2097525539UL, 1398632760UL, 1543697535UL, 2419227174UL,
+ 1676465074UL, 2882923045UL, 23216933UL, 808195649UL, 3690720147UL,
+ 484419260UL, 2254772642UL, 2975434733UL, 288528113UL, 204598404UL,
+ 589968818UL, 3021152400UL, 2463155141UL, 1397846755UL, 157285579UL,
+ 4230258857UL, 2469135246UL, 625357422UL, 3435224647UL, 465239124UL,
+ 1022535736UL, 2823317040UL, 274194469UL, 2214966446UL, 3661001613UL,
+ 518802547UL, 2293436304UL, 1335881988UL, 2247010176UL, 1856732584UL,
+ 1088028094UL, 1877563709UL, 1015352636UL, 1700817932UL, 2960695857UL,
+ 1882229300UL, 1666906557UL, 1838841022UL, 3983797810UL, 1667630361UL,
+ 385998221UL, 241341791UL, 403550441UL, 2629200403UL, 3552759102UL,
+ 2029750442UL, 2247999048UL, 2726665298UL, 2507798776UL, 2419064129UL,
+ 1266444923UL, 526255242UL, 2384866697UL, 1886200981UL, 3954956408UL,
+ 2171436866UL, 2295200753UL, 1047315850UL, 1967809707UL, 2860382973UL,
+ 3918334466UL, 3057439479UL, 952682588UL, 1925559679UL, 3112119050UL,
+ 3833190964UL, 1430139895UL, 2089165610UL, 3009202424UL, 3989186157UL,
+ 3395807230UL, 347600520UL, 120428923UL, 3017004655UL, 1384933954UL,
+ 303039929UL, 234010146UL, 2278760249UL, 315514836UL, 3987659575UL,
+ 1239335668UL, 2387869477UL, 3885908826UL, 1983922602UL, 698609264UL,
+ 3009002846UL, 1520611399UL, 809159940UL, 3089771783UL, 374838722UL,
+ 2789914419UL, 2500831937UL, 3751970335UL, 4279852547UL, 2362894437UL,
+ 1588814060UL, 1671213155UL, 434218829UL, 2126587176UL, 2002526422UL,
+ 2756464095UL, 141700479UL, 2965974322UL, 2211530172UL, 992085992UL,
+ 1943691492UL, 2705131817UL, 2519208889UL, 1938768395UL, 3949294294UL,
+ 354046666UL, 2158272751UL, 602858583UL, 0UL};
diff --git a/numpy/random/src/mt19937/mt19937-poly.h b/numpy/random/src/mt19937/mt19937-poly.h
deleted file mode 100644
index b03747881..000000000
--- a/numpy/random/src/mt19937/mt19937-poly.h
+++ /dev/null
@@ -1,207 +0,0 @@
-static const char * poly =
-"0001000111110111011100100010101111000000010100100101000001110111100010101000110100101001011001010"
-"1110101101100101011100101101011001110011100011110100001000001011100101100010100000010011101110011"
-"0100001001111010000100100101001011100111101101001100000111001000011101100100010000001111110100010"
-"0000111101000101000101101111001011000011001001001011010011001000001000011100100010110101111111101"
-"0010001001100010011011101111101110111010111000010000011010110011111101100000100100101001010000001"
-"1001111000011010011101001101011000111001110010110000011000110101111010110011011000001110110010001"
-"1001101011011101000011001011111111100011001010111100000001111011111101000101000011000011111100101"
-"0100001111101010101100000110100110010010101011011100110011000101100101011110010101110000101011100"
-"0001010100010110100000111001100000011101011001101000001000101101010100010101100000100011110110011"
-"0101100110111101010111100010100110100011111011100111000001110110010000000100000110101010111001111"
-"0011110010000110101101010001110010100111111111100100101010010011101111011000010111101001110110110"
-"1011101101101100110111000100101100111001011111110101001000011111010011000111110011100100001101111"
-"1001010110110001000100001001000010000000001011011100101010010100011000110101001000010101100111101"
-"0011110101100110111100000111001011011001100101111011000101001011011111110110100010001100101001100"
-"1111110011111111110111011011100011000100110011011011011001101011100110010001111100001111100100001"
-"1000100011001010100101010100111110001100111111011111100100011110011101101000110100101110010111111"
-"1001010110000101001110010110001011011010101111111001110001100100011001000010111001011011000111100"
-"1101001011110111111010011000110100001010000000101010101001111101111110101111110101110101010010100"
-"1100100101010110011111001101100110001011000101010001000110011011111101111110001100000010110110101"
-"1111110100001011101011101110111101100001111000011100000110110100100100100101011000111000100110001"
-"0110110001001000111110101111000000100100010100100101101111100011010100111101110010000001011111111"
-"1101010000011001010101111001111110001111100010100010100001011001110001010010100001011111110110111"
-"1100100100001111000111110111000100010101010110100111100001011001101001111101001110010110110011010"
-"1000010011000110000000110110110000111010010000111001100010100101010101111100010111000000011101110"
-"1100011010110001101100110000001010001100111101101011100111110111000110010011011011001101001111100"
-"1011111001100011010110101111100110111101011100000011000010001010001101001011000001111000101000100"
-"0110001011001010110000001101100000011000011110010000101000011010011110001101111111010010101100100"
-"1111010100000011011001111111011011111001101110101010110111110110101000100001011110111010100111100"
-"0000001001111100111111111000100000100100010001011001100001111100100000001111011101100010011000111"
-"0011110110100011011001110011100011011000010000000101101101001010111000010000010101111110000000100"
-"1011010100001001000011001100011000000111100111100101010100000111000000110111011101011111100010101"
-"0011001100110000010101111001000111001001010100011000110010011011101001001100101100000000111000111"
-"0111111000010010010100000101010010000100101011111111111001100101101010011010100010111001011100011"
-"1001001011010000110000111100010110110100000100110010000010010000001000110010101000110101101100100"
-"0001100001100011110110010000100000100010011001010010110111100011011000101011001100001111110110110"
-"0001100110010100011001101000100001110011011111101001101011110011011011111110111110101110010011001"
-"1000000101100000101100100000100000001011000100100001100100101101010111101010111101010001001010110"
-"0011111011001101001110110010100100000011001001111010001001100101110000000010111101000111111101010"
-"0110101110101110001001110000111110100000101101100110010001111101111011001000101110111010110111110"
-"0011001101011010001011000010000111111111101001011100110101011000000001111000101100011101011011100"
-"1111101110000000000110001110011001101100111111010001110000111110100011000100001100110010000110111"
-"1001011011001111011100000000011011000100000011000010010111000111101000011001001100011010001111000"
-"0011110010100010001101011101010011001100000010101001001101111101000111001110110000000010111101001"
-"1110110011101110111010011100101001010101100100011111100110001111011111110010100000011100110110001"
-"1011100000101000010100011101000010111100101111101100110001010001010000101110000000110100010110011"
-"1111110100101010011010100001100110110110011111110010000100001010011110010110001000000100000111000"
-"0111001010011001000010111001100110100110110101111011110111001001000101010010010011000111110010101"
-"1100110001100101001000010001101010011001110011001110001110010100010000000000000110111001010101000"
-"0111111011011101000111011001011011000101110100010001111100101110000100001011111101111101010011001"
-"0010001100011011101100010010101011001000001001010101100110001111001110011100110111111010110010001"
-"1111111101111001001101101001001010011001110000101000110010111110010110111111000100101000101011010"
-"0000101101101100000110101000101000010001111000100000111110011111111110010010001010001111011001100"
-"0011110111000000111111000100001111101110100010101011001010110110011001010010001011100001010110101"
-"0100000010101101000011001101110010000010110011000101100100000111111100011001110011010011001110000"
-"1110011110000000001001001010100000111001010110001110011100011010010010001110010011001010111100000"
-"1110000101101001011010001001010000111000010011010100001010110000101101110110011000011100111100001"
-"1001000011010001110110111001100100001111110010110010011111000010100000001101110100000000101101000"
-"0011000000100011000111110001000011100111110110000110101111101100011110100111111000000011011110110"
-"1101011010111010010001001101000110110010000010101000000001100100100000001111011001001010110100011"
-"1011000010101111010111000001001100111110000010110010011011110011111001000101111011010011010100001"
-"0110011111100001011111101010010100110001001001001000100010101011011000011100111000110101110000001"
-"1100001111100011110010000101011000010101111010001101010101100001100101100000100100000101011001100"
-"0011001000101010101010100111000100100010101000111111101010000000101010101001000101010100100111001"
-"1001100001010001100110111101010001111010011100000001001110100010010011110100001000011111100010001"
-"0010001000100110101011001110100110101110110110100101111000110101101101001000001110011010110011001"
-"0111111101011011101001111001011100001010110111000001100010110110100011010111011000111010100011000"
-"1111010110001001010000110001000101101100010100000000100001111100000010111001000011000101010100001"
-"0001101100011100010100101110010100000010011011010100000111110110000110101011011010010001110000111"
-"0110101000110101110010011100010010100111001101110110010001101001101101010100001010001110111011011"
-"1010011001010111101001011000100111001110011000000001101000001111001100001100000011001110100110011"
-"0011000110001001010111111111110110111111000111100010010101110000101100101000001010001011010100010"
-"1010010100010011101111100111010010010001110101011110110100001000001001000111001110010001001100100"
-"1100100010001010011011110100000101101011101010110110100100010001110000111010111001111011111001011"
-"0000000000011000100100100111001000101111000000110001011110101111110111100000000100101011000111011"
-"1011010011101000001011001001110001111010000100101101010111001010001000100001000111011010000110111"
-"1010110001001110001100001110011000101100000101100000000110101000000110101100100101110001100100100"
-"0110000110101011100001010001010000011101111011111011011000100100101011110101111000001011110010110"
-"0111011011100111101010110001111011010011111000010111110100001001010001011001000110111100000101011"
-"0010111111010100000110111101001100000100001011101010100011010010000001101100100101001000100011000"
-"0101010111111100100000111011101111100000011011111111010001100011001100101101011110101011101100001"
-"0100010011101111111011000111111101001000101101111001111000101110010111001010101011010111000000101"
-"0110010000010010101111100010111110000000011101001000011111001011111100111100100101100101111010110"
-"1010101001110011111100111110100000111100100000111111000010100001111011111110110010001001000000000"
-"1110100110010111100101111111001010001111001101100001011000111011100010100001000010100000011001000"
-"0000111000110111001001100010111010100111111001111101100101000011001001110011100110101110001101110"
-"1110000010110110010110000111001110110000011011100111000101100101000000001110011011001001111001111"
-"0000101100001000000111100110110000110111111001101001111111010000001011110011011011100100110000110"
-"1001011111101100100111111000000010001110111011010011011101001100000011001010000010101111111010110"
-"0001000100101110101101100001001010100110010000110110100110011001000111011110110011001110111110101"
-"0000011111011011001111010010101011000010011101001011100001010001111001000110000010000101010011111"
-"0110011000001111101001110001101011111111001010010110100001101000000011101000101011101000110101111"
-"0000101110011010010000110100000101100011000100101111100011001111011101001010100111001110100001101"
-"0000110111011000000110011001101011110000101100110110000101100000110110100001001001110001110001001"
-"1100110111111100101001100010010110011011110001000111111111001101111110010000011001011010111101001"
-"1101111110101110110100101100110001101101001010111101101000000011111111100101000101110001000011001"
-"1000111110111011010010101011110110110001010001001001100111111010011101111000000111011000011010011"
-"0111010101001110010100101101000110000110001100010101001110101011010100000110110111111111110011110"
-"0100011110100011001000110101111010000001011011110101001100111100010100101100010000010110011001111"
-"0011011110001110010010100100011111110000110011011100010110110101001110011010101111011001010101011"
-"1001001111001000001100100111000001000110110101100111000101011000000100001000100010011000001110011"
-"0000111100000111001101011111010000010001100000010101101000111100001000010011110000001011001001100"
-"0011011011111011100000111101001011101000010010001001111110010101111010110101101110110111010000101"
-"1100011000000000110110100011010100100010001101010101101110110111111011010110011101011010110101011"
-"1101000000010010011111000000101000110001000011100001101111010101100000100000100111111111100000000"
-"0011100011100101110010111100010111110010101110101000011000111111001110111111000001101101011011111"
-"1100110101001000011111001111000000001010001001010101101000001100111010101100010111001001111100000"
-"1110101101110001011100011101101100001001001011100111100110011101111000100010010001111100001010010"
-"1011001001010100101100010010000110010000101010111111001000011100000000101101110010001101110101001"
-"1110000011100101010000011110000010001000001010110001010000100111001100110001111000100100011100110"
-"1100010011110111001001100000100111001010000000000011100011111111101110010101111010100010000100001"
-"0101101001010111111110000110110010100000001011110100010111110111010000001011110110111000000110010"
-"0001100100111110001100010101000010011111100000100010000101110000111001101100100000011111111100010"
-"1001101101001000001111000100100001010110111011110110001001010001110001001100011001001100000000101"
-"1100011110101101011001100001010110001010000111100000011011011001000010101100010101110011001101110"
-"0000101011010001010011111001011000010101010100110110111110101000111110001000010100000000100010100"
-"1000111111000110110010001111000010101011101101111101011110101111100111111100111101000101000010011"
-"0010111010100010011001000000010111100010000101001011001101100011100001001111010100100110101111111"
-"1000010011110101001010011111111011101001110100001001100010000100001001100101101111011100100011001"
-"1111010001011001111101011110101101000111110101001010011101010010010101001000000000011001100110001"
-"0001000010101010101000010100111000001110000111001110001101111111000010101010111001011101001001011"
-"0011001111011010101110101111110001001100100111010001011000010100000100000001001100000011000011101"
-"1100000110000001011001110000101001010111101000110101000011000000111011100101010000111000010010101"
-"1010100101100001011011011110110011000100100101010011111101000000100001001101000011000101010111101"
-"1110111111100010111000111000010110111010010110000000000100101001000111101101100000000110111011001"
-"0100000000100100011110111011101101101101010110001110100001100001001011000000111111110100011110011"
-"0010000010000000010100110011110000000010000011111000111101011110000000000010101101001100000010010"
-"1011001001101110110011100001100011101001101011110011010001011101000100011111001010100000011111111"
-"1010101100000010001000110000110000101000110100110011100000110010110100011111010001000011100001001"
-"1000101000010111111011100010111000111001010100110000000010011011101010101111000110001000110111011"
-"1011100001100011010001101011010100110110011100000010111001011111110010100110100010001100000011100"
-"0001011001011000101011010000001010011010001011000111000011000011110011111001111010001101011010010"
-"0010010001001001101000101001011011101110001100010001010100010111111001100100000010001111100010111"
-"0100001111001100101001011101010010110010100010001100011010100110000100011010111110001011011001000"
-"1001001111011010010011101110100001111100000110101001010111110001101100110010111010111001011111010"
-"1110111011111110000001110010000010011111000111011011000011000010011110011111111101100101111011100"
-"0101101100000110101110000111111111111010110101010100111000011111011001100000100000101011000101110"
-"1011010010100000000100100000010111101110111001000011111011111110100011010010000110001101111101100"
-"1100010111001011011001011001010100100110100101001000111011011001100011001010010101111001100100110"
-"1000110000111011100101110101101000011001010010100011000001111001110110101101010010110110001100100"
-"0100001011101100111001010001111011010110010010110010110111110001001001111001111010010001010001101"
-"1110100110101100011110100100110111000111010110011000100100110110001101111100111110100001000110000"
-"1110011011001101100101100000001010100011101000010100111011111100011010000110000001011100010000101"
-"0100101000010001110010001100010110011111111101111000011001110111011100110010010100100010001000010"
-"0100001110010000011000110001101011101001110100100011011001000111010101110100110011010111001100001"
-"0100001001101010010111110101110111000000010100111101011010101001000001001000001000101101111000000"
-"0110000101110100001111001101110111011110010111101000100101110111010101001101100001110001101101101"
-"0010101100100101000100100100110111000111000111100111000001100001000111101011000110111110001010000"
-"0100110010001101100011010111000111010111000111110000110000101111101110010110111001011000111010001"
-"1011000010010101010010011001000011010110111011010001001010100111001000010110110110101110000110000"
-"1110110010011001011011000100011101001001000111011100100000000000100001101101000101000100000111001"
-"0011100001100110101011011101110111101111000100100011100001010001011001110010101010001110101101110"
-"1011001110111111111010101101000010111111011011011100011100101010001011011100011111011100101011000"
-"1000110100101000011111010011110000000101101110010000101100001000100000000010010110000000000110011"
-"1000000000001111001001000100000111001110111111001111101100001100111000101100011000100111111110011"
-"1110010101011010111100110010110001010000101111111101001010100010001001111010111000010000010010001"
-"1111111101100100001101011011100001010101000111110111111101011010011111111101000111011001011011000"
-"0000101011100011101110110011101111011110011110010000011001111001110111011011111010011011001110111"
-"0101100111110100000100010110010010101001010100010111000101111001011011001001110010100011101111110"
-"1101011110010101101011010010011111110000011010011101000000010000111010100100111110111000001101010"
-"0101100001111001111010101011110001001010000011010110010100011100100100111110100110000010011111001"
-"0100010011001001010101110111111010011101101100000101011110111010011110001111110100111011110011010"
-"0111001010110101010110000011001010000000101101010101001101011000011011010110101010101111101101100"
-"1100100000111101010111011011011110011001100010010000010100101000111111101011100111010101011000111"
-"1100110010101100010011111100000110011111101011100100001110001100001010101001001100010011001000100"
-"1101101000101101110010000001101001001110101111000110111000011101111110100100110111000000101011110"
-"0001100100001010101001101111001000001100000011010000100101100000001110100010010000110110101010111"
-"1100010100000110011100101010111110010110111100000010110011011001011110111001010011011110010001110"
-"1101110000001011101101011111101011111110110110000111110011101100110100010000100000110100010010110"
-"0011000011000110101001110100111010110000100010110101110111100010110001000111100111001011011110010"
-"0001001110101001101101011010111001001101100011101001011011001110011010001010110100111001111100101"
-"1000111001010010000010111010101110001100110111111000011101001000001010010011101000111001100111110"
-"1110100100100110010111111101010011101111011011111011011010011110100101100001011000001001001010010"
-"1100001000000110110011011101010001011110010001001110110100100001101101001011101010001110111111010"
-"1100011100101000011110111110110011111111100010110010110111010010001111101110011011010110000001000"
-"0010110100010101110100001000010011100110001110001110010100010010010110011100100110010100001110011"
-"1100001011010000001101011011011110100000001110100111001000101000001000001001000010000111010000100"
-"0111100000101010110010111010010101100000001100110101001001000110001110111011110001010010010011000"
-"1100001111101101100001111000101100110010001000111001101101011110100110100011101000011111011010101"
-"0101000011111010010110001001100110110111000100100011011101000010001010110001111001111101110001111"
-"0100100000010111010011111110000101001001011110100100010011101110011010100101100001010000001110100"
-"0011111101111000100110011000011001100100001010110011111100111010100011110100010101011110011001000"
-"0000110000100100001011101110111010001001011110010101111100001111101101111011011110001010000100010"
-"1001100100100100110010010101100110000000100000000111110011100111101001010000010000000000101011100"
-"0011101011100110000001100101010101011111111011010011110010011011001010011101010010100010001011010"
-"1100010011101011010111110100001010100011000011001001011011101111110011001110010001100101011001101"
-"0100010001111111100000101000001011010100011100111011010111001100110110001100110101000011010001010"
-"1011100001001010011110001010100100001101110011101011100100101010001100110011110010001100100001000"
-"0110001001110110010111101011101101010111001010011010101110000010100010000111011000010110011000001"
-"1000110010100001110001100010010000001101111110000010010110100000000000001111110010001110111100001"
-"0100111101000011101110010101011011000101011010111100111111001011110001110011110011011010010111101"
-"1010111011101101000001110111001010011001110010100100100100001010001100101010111001110100000110111"
-"1010000111000011101101100101101001100000011100100111100110010110011100101000111110111000110111110"
-"1101100101011101100111011111111001111000011110111110101100000111000101100100110111000010100101000"
-"0110000011011101111101111000110101011000010111010000111011000000100011101010100111001111101010111"
-"0001110100001000100001011101001010001110100000101100001011101111100111101011111001111100101101111"
-"0101100001110011111110110100110010000011011111101101110110000110110011100110111000111101000010111"
-"0111101011100100000000011101111011000100001000111000000111011010101010110000111111101010110001111"
-"0000110100111101111011001010101110000011001101001101000010011001101011111110111101010111010011100"
-"0101010011001111101111001100101000101000111110111001011111100000001101111011000001001100111111111"
-"1010111101000001111011110010001001001110100111110010000011110000011000000101001100011110110011001"
-"1010101001000010001010110000010011110101011110010111010001010111101100001001100011101001111101001"
-"0110110100111001110011100011111010010010100010111000001100001011010010000100100110101010111001001"
-"0110000101011011011100110111111001010000001001011010101010010001011010111100111010101101000101101"
-"0100100001011101110111111001111111110110111011000101010000010000011111001000100101100100100110110"
-"1100000111110010110011010100000100011111110001110010110001000001001111001101110110110101101010111"
-"0000100111101100010001110010110111100011100101100011";
diff --git a/numpy/random/src/mt19937/mt19937.c b/numpy/random/src/mt19937/mt19937.c
index e5ca9e0cf..bec518af8 100644
--- a/numpy/random/src/mt19937/mt19937.c
+++ b/numpy/random/src/mt19937/mt19937.c
@@ -1,6 +1,5 @@
#include "mt19937.h"
#include "mt19937-jump.h"
-#include "mt19937-poly.h"
void mt19937_seed(mt19937_state *state, uint32_t seed) {
int pos;
@@ -104,4 +103,4 @@ extern inline uint32_t mt19937_next32(mt19937_state *state);
extern inline double mt19937_next_double(mt19937_state *state);
-void mt19937_jump(mt19937_state *state) { mt19937_jump_state(state, poly); }
+void mt19937_jump(mt19937_state *state) { mt19937_jump_state(state); }
diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py
index f7efafba9..77353463e 100644
--- a/numpy/random/tests/test_extending.py
+++ b/numpy/random/tests/test_extending.py
@@ -46,14 +46,24 @@ def test_cython(tmp_path):
srcdir = os.path.join(os.path.dirname(__file__), '..')
shutil.copytree(srcdir, tmp_path / 'random')
# build the examples and "install" them into a temporary directory
- env = os.environ.copy()
+ build_dir = tmp_path / 'random' / '_examples' / 'cython'
subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',
'--prefix', str(tmp_path / 'installdir'),
'--single-version-externally-managed',
'--record', str(tmp_path/ 'tmp_install_log.txt'),
],
- cwd=str(tmp_path / 'random' / '_examples' / 'cython'),
- env=env)
+ cwd=str(build_dir),
+ )
+ # gh-16162: make sure numpy's __init__.pxd was used for cython
+ # not really part of this test, but it is a convenient place to check
+ with open(build_dir / 'extending.c') as fid:
+ txt_to_find = 'NumPy API declarations from "numpy/__init__.pxd"'
+ for i, line in enumerate(fid):
+ if txt_to_find in line:
+ break
+ else:
+ assert False, ("Could not find '{}' in C file, "
+ "wrong pxd used".format(txt_to_find))
# get the path to the so's
so1 = so2 = None
with open(tmp_path /'tmp_install_log.txt') as fid:
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index a28b7ca11..f72b748ba 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -1,4 +1,5 @@
import sys
+import hashlib
import pytest
@@ -13,6 +14,26 @@ from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
+JUMP_TEST_DATA = [
+ {
+ "seed": 0,
+ "steps": 10,
+ "initial": {"key_md5": "64eaf265d2203179fb5ffb73380cd589", "pos": 9},
+ "jumped": {"key_md5": "8cb7b061136efceef5217a9ce2cc9a5a", "pos": 598},
+ },
+ {
+ "seed":384908324,
+ "steps":312,
+ "initial": {"key_md5": "e99708a47b82ff51a2c7b0625b81afb5", "pos": 311},
+ "jumped": {"key_md5": "2ecdbfc47a895b253e6e19ccb2e74b90", "pos": 276},
+ },
+ {
+ "seed": [839438204, 980239840, 859048019, 821],
+ "steps": 511,
+ "initial": {"key_md5": "9fcd6280df9199785e17e93162ce283c", "pos": 510},
+ "jumped": {"key_md5": "433b85229f2ed853cde06cd872818305", "pos": 475},
+ },
+]
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
@@ -462,7 +483,6 @@ class TestIntegers:
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
- import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
@@ -487,7 +507,7 @@ class TestIntegers:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
- res = hashlib.md5(val.view(np.int8)).hexdigest()
+ res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
@@ -885,8 +905,6 @@ class TestRandomDist:
assert actual.dtype == np.int64
def test_choice_large_sample(self):
- import hashlib
-
choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
@@ -2351,3 +2369,31 @@ class TestSingleEltArrayInput:
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
+
+
+@pytest.mark.parametrize("config", JUMP_TEST_DATA)
+def test_jumped(config):
+ # Each config contains the initial seed, a number of raw steps
+ # the md5 hashes of the initial and the final states' keys and
+ # the position of of the initial and the final state.
+ # These were produced using the original C implementation.
+ seed = config["seed"]
+ steps = config["steps"]
+
+ mt19937 = MT19937(seed)
+ # Burn step
+ mt19937.random_raw(steps)
+ key = mt19937.state["state"]["key"]
+ if sys.byteorder == 'big':
+ key = key.byteswap()
+ md5 = hashlib.md5(key)
+ assert mt19937.state["state"]["pos"] == config["initial"]["pos"]
+ assert md5.hexdigest() == config["initial"]["key_md5"]
+
+ jumped = mt19937.jumped()
+ key = jumped.state["state"]["key"]
+ if sys.byteorder == 'big':
+ key = key.byteswap()
+ md5 = hashlib.md5(key)
+ assert jumped.state["state"]["pos"] == config["jumped"]["pos"]
+ assert md5.hexdigest() == config["jumped"]["key_md5"]
diff --git a/numpy/setup.py b/numpy/setup.py
index fb9b36b78..52db6a68b 100644
--- a/numpy/setup.py
+++ b/numpy/setup.py
@@ -18,7 +18,7 @@ def configuration(parent_package='',top_path=None):
config.add_subpackage('random')
config.add_subpackage('testing')
config.add_data_dir('doc')
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
config.make_config_py() # installs __config__.py
return config
diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py
index f4970991c..13191f13f 100755
--- a/numpy/testing/setup.py
+++ b/numpy/testing/setup.py
@@ -5,7 +5,7 @@ def configuration(parent_package='',top_path=None):
config = Configuration('testing', parent_package, top_path)
config.add_subpackage('_private')
- config.add_data_dir('tests')
+ config.add_subpackage('tests')
return config
if __name__ == '__main__':