diff options
Diffstat (limited to 'numpy')
33 files changed, 414 insertions, 165 deletions
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt index 5daa52d79..528113a9e 100644 --- a/numpy/core/code_generators/cversions.txt +++ b/numpy/core/code_generators/cversions.txt @@ -50,4 +50,5 @@ # Version 13 (NumPy 1.17) No change. # Version 13 (NumPy 1.18) No change. # Version 13 (NumPy 1.19) No change. +# Version 13 (NumPy 1.20) No change. 0x0000000d = 5b0e8bbded00b166125974fc71e80a33 diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 52ae3cdd7..f10ce9f0f 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -531,7 +531,7 @@ defdict = { TD(flts, f="logaddexp", astype={'e':'f'}) ), 'logaddexp2': - Ufunc(2, 1, None, + Ufunc(2, 1, MinusInfinity, docstrings.get('numpy.core.umath.logaddexp2'), None, TD(flts, f="logaddexp2", astype={'e':'f'}) @@ -1028,8 +1028,11 @@ def make_arrays(funcdict): funclist.append('NULL') try: thedict = arity_lookup[uf.nin, uf.nout] - except KeyError: - raise ValueError("Could not handle {}[{}]".format(name, t.type)) + except KeyError as e: + raise ValueError( + f"Could not handle {name}[{t.type}] " + f"with nin={uf.nin}, nout={uf.nout}" + ) from None astype = '' if not t.astype is None: diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py index cd01c0e77..1d447b86a 100644 --- a/numpy/core/defchararray.py +++ b/numpy/core/defchararray.py @@ -1779,7 +1779,7 @@ def isdecimal(a): Calls `unicode.isdecimal` element-wise. Decimal characters include digit characters, and all characters - that that can be used to form decimal-radix numbers, + that can be used to form decimal-radix numbers, e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``. Parameters diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py index a1e2efdb4..c46ae173d 100644 --- a/numpy/core/einsumfunc.py +++ b/numpy/core/einsumfunc.py @@ -3,6 +3,7 @@ Implementation of optimized einsum. """ import itertools +import operator from numpy.core.multiarray import c_einsum from numpy.core.numeric import asanyarray, tensordot @@ -576,11 +577,13 @@ def _parse_einsum_input(operands): for s in sub: if s is Ellipsis: subscripts += "..." - elif isinstance(s, int): - subscripts += einsum_symbols[s] else: - raise TypeError("For this input type lists must contain " - "either int or Ellipsis") + try: + s = operator.index(s) + except TypeError as e: + raise TypeError("For this input type lists must contain " + "either int or Ellipsis") from e + subscripts += einsum_symbols[s] if num != last: subscripts += "," @@ -589,11 +592,13 @@ def _parse_einsum_input(operands): for s in output_list: if s is Ellipsis: subscripts += "..." - elif isinstance(s, int): - subscripts += einsum_symbols[s] else: - raise TypeError("For this input type lists must contain " - "either int or Ellipsis") + try: + s = operator.index(s) + except TypeError as e: + raise TypeError("For this input type lists must contain " + "either int or Ellipsis") from e + subscripts += einsum_symbols[s] # Check for proper "->" if ("-" in subscripts) or (">" in subscripts): invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index b32ad8d35..7193af839 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -2494,6 +2494,14 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue): The name of the function comes from the acronym for 'peak to peak'. + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `np.int8`, `np.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + Parameters ---------- a : array_like @@ -2531,16 +2539,33 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue): Examples -------- - >>> x = np.arange(4).reshape((2,2)) - >>> x - array([[0, 1], - [2, 3]]) + >>> x = np.array([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> np.ptp(x, axis=1) + array([8, 6]) >>> np.ptp(x, axis=0) - array([2, 2]) + array([2, 0, 5, 2]) - >>> np.ptp(x, axis=1) - array([1, 1]) + >>> np.ptp(x) + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.array([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> np.ptp(y, axis=1) + array([ 126, 127, -128, -127], dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> np.ptp(y, axis=1).view(np.uint8) + array([126, 127, 128, 129], dtype=uint8) """ kwargs = {} @@ -3411,17 +3436,18 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): Notes ----- The standard deviation is the square root of the average of the squared - deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``. - - The average squared deviation is normally calculated as - ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified, - the divisor ``N - ddof`` is used instead. In standard statistical - practice, ``ddof=1`` provides an unbiased estimator of the variance - of the infinite population. ``ddof=0`` provides a maximum likelihood - estimate of the variance for normally distributed variables. The - standard deviation computed in this function is the square root of - the estimated variance, so even with ``ddof=1``, it will not be an - unbiased estimate of the standard deviation per se. + deviations from the mean, i.e., ``std = sqrt(mean(x))``, where + ``x = abs(a - a.mean())**2``. + + The average squared deviation is typically calculated as ``x.sum() / N``, + where ``N = len(x)``. If, however, `ddof` is specified, the divisor + ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1`` + provides an unbiased estimator of the variance of the infinite population. + ``ddof=0`` provides a maximum likelihood estimate of the variance for + normally distributed variables. The standard deviation computed in this + function is the square root of the estimated variance, so even with + ``ddof=1``, it will not be an unbiased estimate of the standard deviation + per se. Note that, for complex numbers, `std` takes the absolute value before squaring, so that the result is always real and nonnegative. @@ -3536,9 +3562,9 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): Notes ----- The variance is the average of the squared deviations from the mean, - i.e., ``var = mean(abs(x - x.mean())**2)``. + i.e., ``var = mean(x)``, where ``x = abs(a - a.mean())**2``. - The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified, the divisor ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1`` provides an unbiased estimator of the variance of a hypothetical infinite population. diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py index e2ff49393..f73c21f67 100644 --- a/numpy/core/getlimits.py +++ b/numpy/core/getlimits.py @@ -337,8 +337,8 @@ class finfo: The approximate decimal resolution of this type, i.e., ``10**-precision``. tiny : float - The smallest positive usable number. Type of `tiny` is an - appropriate floating point type. + The smallest positive floating point number with full precision + (see Notes). Parameters ---------- @@ -359,6 +359,18 @@ class finfo: impacts import times. These objects are cached, so calling ``finfo()`` repeatedly inside your functions is not a problem. + Note that ``tiny`` is not actually the smallest positive representable + value in a NumPy floating point type. As in the IEEE-754 standard [1]_, + NumPy floating point types make use of subnormal numbers to fill the + gap between 0 and ``tiny``. However, subnormal numbers may have + significantly reduced precision [2]_. + + References + ---------- + .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008, + pp.1-70, 2008, http://www.doi.org/10.1109/IEEESTD.2008.4610935 + .. [2] Wikipedia, "Denormal Numbers", + https://en.wikipedia.org/wiki/Denormal_number """ _finfo_cache = {} @@ -546,4 +558,3 @@ class iinfo: def __repr__(self): return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, self.min, self.max, self.dtype) - diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h index 4df4ea438..8eaf446b7 100644 --- a/numpy/core/include/numpy/numpyconfig.h +++ b/numpy/core/include/numpy/numpyconfig.h @@ -41,5 +41,6 @@ #define NPY_1_17_API_VERSION 0x00000008 #define NPY_1_18_API_VERSION 0x00000008 #define NPY_1_19_API_VERSION 0x00000008 +#define NPY_1_20_API_VERSION 0x00000008 #endif diff --git a/numpy/core/machar.py b/numpy/core/machar.py index a48dc3d50..55285fe59 100644 --- a/numpy/core/machar.py +++ b/numpy/core/machar.py @@ -40,8 +40,8 @@ class MachAr: Smallest (most negative) power of `ibeta` consistent with there being no leading zeros in the mantissa. xmin : float - Floating point number ``beta**minexp`` (the smallest [in - magnitude] usable floating value). + Floating-point number ``beta**minexp`` (the smallest [in + magnitude] positive floating point number with full precision). maxexp : int Smallest (positive) power of `ibeta` that causes overflow. xmax : float diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py index f3d48459a..5ae6a4272 100644 --- a/numpy/core/multiarray.py +++ b/numpy/core/multiarray.py @@ -171,14 +171,15 @@ def concatenate(arrays, axis=None, out=None): array_split : Split an array into multiple sub-arrays of equal or near-equal size. split : Split array into a list of multiple sub-arrays of equal size. - hsplit : Split array into multiple sub-arrays horizontally (column wise) - vsplit : Split array into multiple sub-arrays vertically (row wise) + hsplit : Split array into multiple sub-arrays horizontally (column wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). stack : Stack a sequence of arrays along a new axis. - hstack : Stack arrays in sequence horizontally (column wise) - vstack : Stack arrays in sequence vertically (row wise) - dstack : Stack arrays in sequence depth wise (along third dimension) block : Assemble arrays from blocks. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + column_stack : Stack 1-D arrays as columns into a 2-D array. Notes ----- diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 8bd4e241b..05f0b7820 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -2230,6 +2230,7 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): See Also -------- allclose + math.isclose Notes ----- diff --git a/numpy/core/records.py b/numpy/core/records.py index 04c970cf4..af59de425 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -772,8 +772,58 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None): - """Create a (read-only) record array from binary data contained in - a string""" + """Create a record array from binary data + + Note that despite the name of this function it does not accept `str` + instances. + + Parameters + ---------- + datastring : bytes-like + Buffer of binary data + dtype : data-type, optional + Valid dtype for all arrays + shape : int or tuple of ints, optional + Shape of each array. + offset : int, optional + Position in the buffer to start reading from. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + + Returns + ------- + np.recarray + Record array view into the data in datastring. This will be readonly + if `datastring` is readonly. + + See Also + -------- + numpy.frombuffer + + Examples + -------- + >>> a = b'\x01\x02\x03abc' + >>> np.core.records.fromstring(a, dtype='u1,u1,u1,S3') + rec.array([(1, 2, 3, b'abc')], + dtype=[('f0', 'u1'), ('f1', 'u1'), ('f2', 'u1'), ('f3', 'S3')]) + + >>> grades_dtype = [('Name', (np.str_, 10)), ('Marks', np.float64), + ... ('GradeLevel', np.int32)] + >>> grades_array = np.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), + ... ('Aadi', 66.6, 6)], dtype=grades_dtype) + >>> np.core.records.fromstring(grades_array.tobytes(), dtype=grades_dtype) + rec.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), ('Aadi', 66.6, 6)], + dtype=[('Name', '<U10'), ('Marks', '<f8'), ('GradeLevel', '<i4')]) + + >>> s = '\x01\x02\x03abc' + >>> np.core.records.fromstring(s, dtype='u1,u1,u1,S3') + Traceback (most recent call last) + ... + TypeError: a bytes-like object is required, not 'str' + """ if dtype is None and formats is None: raise TypeError("fromstring() needs a 'dtype' or 'formats' argument") diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 76f3f5abe..fcc422545 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -520,7 +520,7 @@ def configuration(parent_package='',top_path=None): def generate_numpyconfig_h(ext, build_dir): """Depends on config.h: generate_config_h has to be called before !""" # put common include directory in build_dir on search path - # allows using code generation in headers headers + # allows using code generation in headers config.add_include_dirs(join(build_dir, "src", "common")) config.add_include_dirs(join(build_dir, "src", "npymath")) diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index ee56dbe43..7a76bbf9d 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -247,12 +247,13 @@ def vstack(tup): See Also -------- + concatenate : Join a sequence of arrays along an existing axis. stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. hstack : Stack arrays in sequence horizontally (column wise). - dstack : Stack arrays in sequence depth wise (along third dimension). - concatenate : Join a sequence of arrays along an existing axis. - vsplit : Split array into a list of multiple sub-arrays vertically. - block : Assemble arrays from blocks. + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). Examples -------- @@ -309,12 +310,13 @@ def hstack(tup): See Also -------- + concatenate : Join a sequence of arrays along an existing axis. stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. vstack : Stack arrays in sequence vertically (row wise). dstack : Stack arrays in sequence depth wise (along third axis). - concatenate : Join a sequence of arrays along an existing axis. - hsplit : Split array along second axis. - block : Assemble arrays from blocks. + column_stack : Stack 1-D arrays as columns into a 2-D array. + hsplit : Split an array into multiple sub-arrays horizontally (column-wise). Examples -------- @@ -385,8 +387,8 @@ def stack(arrays, axis=0, out=None): See Also -------- concatenate : Join a sequence of arrays along an existing axis. + block : Assemble an nd-array from nested lists of blocks. split : Split array into a list of multiple sub-arrays of equal size. - block : Assemble arrays from blocks. Examples -------- @@ -723,12 +725,13 @@ def block(arrays): See Also -------- - concatenate : Join a sequence of arrays together. - stack : Stack arrays in sequence along a new dimension. - hstack : Stack arrays in sequence horizontally (column wise). + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depth wise (along third dimension). - vsplit : Split array into a list of multiple sub-arrays vertically. + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). Notes ----- diff --git a/numpy/core/src/common/mem_overlap.c b/numpy/core/src/common/mem_overlap.c index 21db1893b..9da33bfc1 100644 --- a/numpy/core/src/common/mem_overlap.c +++ b/numpy/core/src/common/mem_overlap.c @@ -127,7 +127,7 @@ ends up considering all values x3=0...5 separately. The upper bound for work done is prod(shape_a)*prod(shape_b), which scales - faster than than work done by binary ufuncs, after broadcasting, + faster than work done by binary ufuncs, after broadcasting, prod(shape_a). The bound may be loose, but it is possible to construct hard instances where ufunc is faster (adapted from [2,3]):: diff --git a/numpy/core/src/common/npy_import.h b/numpy/core/src/common/npy_import.h index 221e1e645..f485514d1 100644 --- a/numpy/core/src/common/npy_import.h +++ b/numpy/core/src/common/npy_import.h @@ -19,7 +19,7 @@ NPY_INLINE static void npy_cache_import(const char *module, const char *attr, PyObject **cache) { - if (*cache == NULL) { + if (NPY_UNLIKELY(*cache == NULL)) { PyObject *mod = PyImport_ImportModule(module); if (mod != NULL) { diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index 024dcab8c..38d5f21eb 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -3846,7 +3846,7 @@ static void */ /* - * Compute correlation of data with with small kernels + * Compute correlation of data with small kernels * Calling a BLAS dot product for the inner loop of the correlation is overkill * for small kernels. It is faster to compute it directly. * Intended to be used by _pyarray_correlate so no input verifications is done diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index 4913eb202..78a15a63c 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -5,6 +5,7 @@ #include <numpy/npy_cpu.h> #include <numpy/ndarraytypes.h> #include <limits.h> +#include "npy_import.h" #define error_converting(x) (((x) == -1) && PyErr_Occurred()) @@ -148,13 +149,9 @@ check_and_adjust_axis_msg(int *axis, int ndim, PyObject *msg_prefix) static PyObject *AxisError_cls = NULL; PyObject *exc; + npy_cache_import("numpy.core._exceptions", "AxisError", &AxisError_cls); if (AxisError_cls == NULL) { - PyObject *mod = PyImport_ImportModule("numpy.core._exceptions"); - - if (mod != NULL) { - AxisError_cls = PyObject_GetAttrString(mod, "AxisError"); - Py_DECREF(mod); - } + return -1; } /* Invoke the AxisError constructor */ diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index df92544fd..9283eefce 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -2183,6 +2183,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) PyErr_Clear(); npy_set_invalid_cast_error( PyArray_DESCR(arr), newtype, casting, PyArray_NDIM(arr) == 0); + Py_DECREF(newtype); return NULL; } diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c index 282cdad28..553d0effb 100644 --- a/numpy/core/src/multiarray/dragon4.c +++ b/numpy/core/src/multiarray/dragon4.c @@ -1566,7 +1566,7 @@ Dragon4(BigInt *bigints, const npy_int32 exponent, * * scientific - boolean controlling whether scientific notation is used * digit_mode - whether to use unique or fixed fractional output - * cutoff_mode - whether 'precision' refers to to all digits, or digits past + * cutoff_mode - whether 'precision' refers to all digits, or digits past * the decimal point. * precision - When negative, prints as many digits as needed for a unique * number. When positive specifies the maximum number of diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index ecaa680ec..a26426d41 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -696,17 +696,15 @@ get_nbo_cast_numeric_transfer_function(int aligned, if (PyTypeNum_ISCOMPLEX(src_type_num) && !PyTypeNum_ISCOMPLEX(dst_type_num) && !PyTypeNum_ISBOOL(dst_type_num)) { - PyObject *cls = NULL, *obj = NULL; + static PyObject *cls = NULL; int ret; - obj = PyImport_ImportModule("numpy.core"); - if (obj) { - cls = PyObject_GetAttrString(obj, "ComplexWarning"); - Py_DECREF(obj); + npy_cache_import("numpy.core", "ComplexWarning", &cls); + if (cls == NULL) { + return NPY_FAIL; } ret = PyErr_WarnEx(cls, "Casting complex values to real discards " "the imaginary part", 1); - Py_XDECREF(cls); if (ret < 0) { return NPY_FAIL; } diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 7047304eb..43dbde2f1 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -2689,7 +2689,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, if (mit->numiter == 0) { /* * For MapIterArray, it is possible that there is no fancy index. - * to support this case, add a a dummy iterator. + * to support this case, add a dummy iterator. * Since it is 0-d its transpose, etc. does not matter. */ diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index e2026ec1c..262514ec6 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -54,33 +54,6 @@ NpyArg_ParseKeywords(PyObject *keys, const char *format, char **kwlist, ...) return ret; } -static PyObject * -get_forwarding_ndarray_method(const char *name) -{ - PyObject *module_methods, *callable; - - /* Get a reference to the function we're calling */ - module_methods = PyImport_ImportModule("numpy.core._methods"); - if (module_methods == NULL) { - return NULL; - } - callable = _PyDict_GetItemStringWithError(PyModule_GetDict(module_methods), name); - if (callable == NULL && PyErr_Occurred()) { - Py_DECREF(module_methods); - return NULL; - } - if (callable == NULL) { - Py_DECREF(module_methods); - PyErr_Format(PyExc_RuntimeError, - "NumPy internal error: could not find function " - "numpy.core._methods.%s", name); - } - else { - Py_INCREF(callable); - } - Py_DECREF(module_methods); - return callable; -} /* * Forwards an ndarray method to a the Python function @@ -121,11 +94,9 @@ forward_ndarray_method(PyArrayObject *self, PyObject *args, PyObject *kwds, */ #define NPY_FORWARD_NDARRAY_METHOD(name) \ static PyObject *callable = NULL; \ + npy_cache_import("numpy.core._methods", name, &callable); \ if (callable == NULL) { \ - callable = get_forwarding_ndarray_method(name); \ - if (callable == NULL) { \ - return NULL; \ - } \ + return NULL; \ } \ return forward_ndarray_method(self, args, kwds, callable) @@ -146,8 +117,15 @@ array_take(PyArrayObject *self, PyObject *args, PyObject *kwds) PyArray_ClipmodeConverter, &mode)) return NULL; - return PyArray_Return((PyArrayObject *) - PyArray_TakeFrom(self, indices, dimension, out, mode)); + PyObject *ret = PyArray_TakeFrom(self, indices, dimension, out, mode); + + /* this matches the unpacking behavior of ufuncs */ + if (out == NULL) { + return PyArray_Return((PyArrayObject *)ret); + } + else { + return ret; + } } static PyObject * @@ -303,7 +281,15 @@ array_argmax(PyArrayObject *self, PyObject *args, PyObject *kwds) PyArray_OutputConverter, &out)) return NULL; - return PyArray_Return((PyArrayObject *)PyArray_ArgMax(self, axis, out)); + PyObject *ret = PyArray_ArgMax(self, axis, out); + + /* this matches the unpacking behavior of ufuncs */ + if (out == NULL) { + return PyArray_Return((PyArrayObject *)ret); + } + else { + return ret; + } } static PyObject * @@ -318,7 +304,15 @@ array_argmin(PyArrayObject *self, PyObject *args, PyObject *kwds) PyArray_OutputConverter, &out)) return NULL; - return PyArray_Return((PyArrayObject *)PyArray_ArgMin(self, axis, out)); + PyObject *ret = PyArray_ArgMin(self, axis, out); + + /* this matches the unpacking behavior of ufuncs */ + if (out == NULL) { + return PyArray_Return((PyArrayObject *)ret); + } + else { + return ret; + } } static PyObject * @@ -1218,7 +1212,15 @@ array_choose(PyArrayObject *self, PyObject *args, PyObject *kwds) return NULL; } - return PyArray_Return((PyArrayObject *)PyArray_Choose(self, choices, out, clipmode)); + PyObject *ret = PyArray_Choose(self, choices, out, clipmode); + + /* this matches the unpacking behavior of ufuncs */ + if (out == NULL) { + return PyArray_Return((PyArrayObject *)ret); + } + else { + return ret; + } } static PyObject * @@ -2319,8 +2321,16 @@ array_compress(PyArrayObject *self, PyObject *args, PyObject *kwds) PyArray_OutputConverter, &out)) { return NULL; } - return PyArray_Return( - (PyArrayObject *)PyArray_Compress(self, condition, axis, out)); + + PyObject *ret = PyArray_Compress(self, condition, axis, out); + + /* this matches the unpacking behavior of ufuncs */ + if (out == NULL) { + return PyArray_Return((PyArrayObject *)ret); + } + else { + return ret; + } } @@ -2355,7 +2365,15 @@ array_trace(PyArrayObject *self, PyObject *args, PyObject *kwds) rtype = _CHKTYPENUM(dtype); Py_XDECREF(dtype); - return PyArray_Return((PyArrayObject *)PyArray_Trace(self, offset, axis1, axis2, rtype, out)); + PyObject *ret = PyArray_Trace(self, offset, axis1, axis2, rtype, out); + + /* this matches the unpacking behavior of ufuncs */ + if (out == NULL) { + return PyArray_Return((PyArrayObject *)ret); + } + else { + return ret; + } } #undef _CHKTYPENUM @@ -2440,7 +2458,16 @@ array_round(PyArrayObject *self, PyObject *args, PyObject *kwds) PyArray_OutputConverter, &out)) { return NULL; } - return PyArray_Return((PyArrayObject *)PyArray_Round(self, decimals, out)); + + PyObject *ret = PyArray_Round(self, decimals, out); + + /* this matches the unpacking behavior of ufuncs */ + if (out == NULL) { + return PyArray_Return((PyArrayObject *)ret); + } + else { + return ret; + } } diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 4c316052d..6915371d8 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -2438,7 +2438,6 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) } size = PySequence_Size(obj); - for (i = 0; i < size; ++i) { item = PySequence_Fast_GET_ITEM(obj, i); /* Ellipsis */ @@ -2461,8 +2460,16 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) ellipsis = 1; } /* Subscript */ - else if (PyInt_Check(item) || PyLong_Check(item)) { - long s = PyInt_AsLong(item); + else { + npy_intp s = PyArray_PyIntAsIntp(item); + /* Invalid */ + if (error_converting(s)) { + PyErr_SetString(PyExc_TypeError, + "each subscript must be either an integer " + "or an ellipsis"); + Py_DECREF(obj); + return -1; + } npy_bool bad_input = 0; if (subindex + 1 >= subsize) { @@ -2472,7 +2479,7 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) return -1; } - if ( s < 0 ) { + if (s < 0) { bad_input = 1; } else if (s < 26) { @@ -2490,16 +2497,9 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) "subscript is not within the valid range [0, 52)"); Py_DECREF(obj); return -1; - } - } - /* Invalid */ - else { - PyErr_SetString(PyExc_ValueError, - "each subscript must be either an integer " - "or an ellipsis"); - Py_DECREF(obj); - return -1; + } } + } Py_DECREF(obj); diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index bb2915e09..90cc7a513 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -16,6 +16,7 @@ #include "numpy/ufuncobject.h" #include "numpy/arrayscalars.h" +#include "npy_import.h" #include "npy_pycompat.h" #include "numpy/halffloat.h" @@ -1339,13 +1340,9 @@ static int emit_complexwarning(void) { static PyObject *cls = NULL; + npy_cache_import("numpy.core", "ComplexWarning", &cls); if (cls == NULL) { - PyObject *mod; - mod = PyImport_ImportModule("numpy.core"); - assert(mod != NULL); - cls = PyObject_GetAttrString(mod, "ComplexWarning"); - assert(cls != NULL); - Py_DECREF(mod); + return -1; } return PyErr_WarnEx(cls, "Casting complex values to real discards the imaginary part", 1); diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py index 68491681a..da84735a0 100644 --- a/numpy/core/tests/test_einsum.py +++ b/numpy/core/tests/test_einsum.py @@ -274,6 +274,13 @@ class TestEinsum: assert_equal(np.einsum(a, [0, 0], optimize=do_opt), np.trace(a).astype(dtype)) + # gh-15961: should accept numpy int64 type in subscript list + np_array = np.asarray([0, 0]) + assert_equal(np.einsum(a, np_array, optimize=do_opt), + np.trace(a).astype(dtype)) + assert_equal(np.einsum(a, list(np_array), optimize=do_opt), + np.trace(a).astype(dtype)) + # multiply(a, b) assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case for n in range(1, 17): diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index f36c27c6c..a698370b6 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -1582,6 +1582,11 @@ class TestMethods: # gh-12031, caused SEGFAULT assert_raises(TypeError, oned.choose,np.void(0), [oned]) + out = np.array(0) + ret = np.choose(np.array(1), [10, 20, 30], out=out) + assert out is ret + assert_equal(out[()], 20) + # gh-6272 check overlap on out x = np.arange(5) y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') @@ -1658,7 +1663,7 @@ class TestMethods: out = np.zeros_like(arr) res = arr.round(*round_args, out=out) assert_equal(out, expected) - assert_equal(out, res) + assert out is res check_round(np.array([1.2, 1.5]), [1, 2]) check_round(np.array(1.5), 2) @@ -3023,6 +3028,10 @@ class TestMethods: assert_equal(b.trace(0, 1, 2), [3, 11]) assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3]) + out = np.array(1) + ret = a.trace(out=out) + assert ret is out + def test_trace_subclass(self): # The class would need to overwrite trace to ensure single-element # output also has the right subclass. @@ -4126,6 +4135,13 @@ class TestArgmax: a.argmax(-1, out=out) assert_equal(out, a.argmax(-1)) + @pytest.mark.parametrize('ndim', [0, 1]) + def test_ret_is_out(self, ndim): + a = np.ones((4,) + (3,)*ndim) + out = np.empty((3,)*ndim, dtype=np.intp) + ret = a.argmax(axis=0, out=out) + assert ret is out + def test_argmax_unicode(self): d = np.zeros(6031, dtype='<U9') d[5942] = "as" @@ -4275,6 +4291,13 @@ class TestArgmin: a.argmin(-1, out=out) assert_equal(out, a.argmin(-1)) + @pytest.mark.parametrize('ndim', [0, 1]) + def test_ret_is_out(self, ndim): + a = np.ones((4,) + (3,)*ndim) + out = np.empty((3,)*ndim, dtype=np.intp) + ret = a.argmin(axis=0, out=out) + assert ret is out + def test_argmin_unicode(self): d = np.ones(6031, dtype='<U9') d[6001] = "0" @@ -4552,6 +4575,16 @@ class TestTake: y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap') assert_equal(y, np.array([1, 2, 3])) + @pytest.mark.parametrize('shape', [(1, 2), (1,), ()]) + def test_ret_is_out(self, shape): + # 0d arrays should not be an exception to this rule + x = np.arange(5) + inds = np.zeros(shape, dtype=np.intp) + out = np.zeros(shape, dtype=x.dtype) + ret = np.take(x, inds, out=out) + assert ret is out + + class TestLexsort: @pytest.mark.parametrize('dtype',[ np.uint8, np.uint16, np.uint32, np.uint64, diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 10c652ad4..e7965c0ca 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -639,6 +639,12 @@ class TestLogAddExp2(_FilterInvalids): assert_(np.isnan(np.logaddexp2(0, np.nan))) assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) + def test_reduce(self): + assert_equal(np.logaddexp2.identity, -np.inf) + assert_equal(np.logaddexp2.reduce([]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0) + class TestLog: def test_log_values(self): diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py index b055bb1ec..709a79dc0 100644 --- a/numpy/lib/financial.py +++ b/numpy/lib/financial.py @@ -466,7 +466,7 @@ def _rbl(rate, per, pmt, pv, when): This function is here to simply have a different name for the 'fv' function to not interfere with the 'fv' keyword argument within the 'ipmt' function. It is the 'remaining balance on loan' which might be useful as - it's own function, but is easily calculated with the 'fv' function. + its own function, but is easily calculated with the 'fv' function. """ return fv(rate, (per - 1), pmt, pv, when) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index dea01d12d..2d1adc362 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3943,42 +3943,29 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, if np.issubdtype(a.dtype, np.inexact): indices_above = concatenate((indices_above, [-1])) - weights_above = indices - indices_below - weights_below = 1 - weights_above - - weights_shape = [1, ] * ap.ndim - weights_shape[axis] = len(indices) - weights_below.shape = weights_shape - weights_above.shape = weights_shape - ap.partition(concatenate((indices_below, indices_above)), axis=axis) # ensure axis with q-th is first ap = np.moveaxis(ap, axis, 0) - weights_below = np.moveaxis(weights_below, axis, 0) - weights_above = np.moveaxis(weights_above, axis, 0) axis = 0 + weights_shape = [1] * ap.ndim + weights_shape[axis] = len(indices) + weights_above = (indices - indices_below).reshape(weights_shape) + # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = indices_above[:-1] n = np.isnan(ap[-1:, ...]) - x1 = take(ap, indices_below, axis=axis) * weights_below + x1 = take(ap, indices_below, axis=axis) * (1 - weights_above) x2 = take(ap, indices_above, axis=axis) * weights_above - # ensure axis with q-th is first - x1 = np.moveaxis(x1, axis, 0) - x2 = np.moveaxis(x2, axis, 0) - if zerod: x1 = x1.squeeze(0) x2 = x2.squeeze(0) - if out is not None: - r = add(x1, x2, out=out) - else: - r = add(x1, x2) + r = add(x1, x2, out=out) if np.any(n): if zerod: diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index 72a7f79d7..78703555e 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -688,10 +688,12 @@ def dstack(tup): See Also -------- - stack : Join a sequence of arrays along a new axis. - vstack : Stack along first axis. - hstack : Stack along second axis. concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + column_stack : Stack 1-D arrays as columns into a 2-D array. dsplit : Split array along third axis. Examples diff --git a/numpy/ma/core.py b/numpy/ma/core.py index a7214f9bf..8d612b8ed 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -191,14 +191,17 @@ for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", default_filler["M8[" + v + "]"] = np.datetime64("NaT", v) default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v) +float_types_list = [np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble] max_filler = ntypes._minvals -max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]]) +max_filler.update([(k, -np.inf) for k in float_types_list[:4]]) +max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]]) + min_filler = ntypes._maxvals -min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]]) -if 'float128' in ntypes.typeDict: - max_filler.update([(np.float128, -np.inf)]) - min_filler.update([(np.float128, +np.inf)]) +min_filler.update([(k, +np.inf) for k in float_types_list[:4]]) +min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]]) +del float_types_list def _recursive_fill_value(dtype, f): """ @@ -5860,6 +5863,14 @@ class MaskedArray(ndarray): Return (maximum - minimum) along the given dimension (i.e. peak-to-peak value). + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `np.int8`, `np.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + Parameters ---------- axis : {None, int}, optional @@ -5882,6 +5893,45 @@ class MaskedArray(ndarray): A new array holding the result, unless ``out`` was specified, in which case a reference to ``out`` is returned. + Examples + -------- + >>> x = np.ma.MaskedArray([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> x.ptp(axis=1) + masked_array(data=[8, 6], + mask=False, + fill_value=999999) + + >>> x.ptp(axis=0) + masked_array(data=[2, 0, 5, 2], + mask=False, + fill_value=999999) + + >>> x.ptp() + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.ma.MaskedArray([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> y.ptp(axis=1) + masked_array(data=[ 126, 127, -128, -127], + mask=False, + fill_value=999999, + dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> y.ptp(axis=1).view(np.uint8) + masked_array(data=[126, 127, 128, 129], + mask=False, + fill_value=999999, + dtype=uint8) """ if out is None: result = self.max(axis=axis, fill_value=fill_value, diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 98fc7dd97..6f34144bb 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1245,6 +1245,48 @@ class TestMaskedArrayArithmetic: assert_(x.max() is masked) assert_(x.ptp() is masked) + def test_minmax_dtypes(self): + # Additional tests on max/min for non-standard float and complex dtypes + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + a10 = 10. + an10 = -10.0 + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + xm = masked_array(x, mask=m1) + xm.set_fill_value(1e+20) + float_dtypes = [np.half, np.single, np.double, + np.longdouble, np.cfloat, np.cdouble, np.clongdouble] + for float_dtype in float_dtypes: + assert_equal(masked_array(x, mask=m1, dtype=float_dtype).max(), + float_dtype(a10)) + assert_equal(masked_array(x, mask=m1, dtype=float_dtype).min(), + float_dtype(an10)) + + assert_equal(xm.min(), an10) + assert_equal(xm.max(), a10) + + # Non-complex type only test + for float_dtype in float_dtypes[:4]: + assert_equal(masked_array(x, mask=m1, dtype=float_dtype).max(), + float_dtype(a10)) + assert_equal(masked_array(x, mask=m1, dtype=float_dtype).min(), + float_dtype(an10)) + + # Complex types only test + for float_dtype in float_dtypes[-3:]: + ym = masked_array([1e20+1j, 1e20-2j, 1e20-1j], mask=[0, 1, 0], + dtype=float_dtype) + assert_equal(ym.min(), float_dtype(1e20-1j)) + assert_equal(ym.max(), float_dtype(1e20+1j)) + + zm = masked_array([np.inf+2j, np.inf+3j, -np.inf-1j], mask=[0, 1, 0], + dtype=float_dtype) + assert_equal(zm.min(), float_dtype(-np.inf-1j)) + assert_equal(zm.max(), float_dtype(np.inf+2j)) + + cmax = np.inf - 1j * np.finfo(np.float64).max + assert masked_array([-cmax, 0], mask=[0, 1]).max() == -cmax + assert masked_array([cmax, 0], mask=[0, 1]).min() == cmax + def test_addsumprod(self): # Tests add, sum, product. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index d1a1211aa..a13ff23f4 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -1024,8 +1024,8 @@ def _from_string(str, gdict, ldict): except KeyError: try: thismat = gdict[col] - except KeyError: - raise KeyError("%s not found" % (col,)) + except KeyError as e: + raise NameError(f"name {col!r} is not defined") from None coltup.append(thismat) rowtup.append(concatenate(coltup, axis=-1)) |