summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorFrançois Le Lay <mfworx@gmail.com>2021-02-17 07:24:39 -0500
committerGitHub <noreply@github.com>2021-02-17 07:24:39 -0500
commitdd20b6241590622e847eb4538a224e87eff483f9 (patch)
treece19093db8a459c59819d64e887bb893f5ed11c6 /numpy
parent98bf466b42aea9bb804275af6f11d1c7cfdebbad (diff)
parent6f65e1fc25f265ff36bdbfb3aa482f65fd84a684 (diff)
downloadnumpy-dd20b6241590622e847eb4538a224e87eff483f9.tar.gz
Merge branch 'master' into random-standard_t-doc
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.pyi19
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py4
-rw-r--r--numpy/core/einsumfunc.pyi138
-rw-r--r--numpy/core/numeric.py6
-rw-r--r--numpy/core/src/common/lowlevel_strided_loops.h3
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c326
-rw-r--r--numpy/core/src/multiarray/item_selection.c233
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c7
-rw-r--r--numpy/core/tests/test_nditer.py31
-rw-r--r--numpy/core/tests/test_numeric.py18
-rw-r--r--numpy/distutils/ccompiler.py6
-rw-r--r--numpy/distutils/command/config.py2
-rw-r--r--numpy/distutils/misc_util.py5
-rw-r--r--numpy/f2py/cb_rules.py8
-rwxr-xr-xnumpy/f2py/crackfortran.py23
-rw-r--r--numpy/f2py/tests/test_callback.py48
-rw-r--r--numpy/f2py/tests/test_crackfortran.py24
-rw-r--r--numpy/lib/__init__.pyi9
-rw-r--r--numpy/lib/arraysetops.py25
-rw-r--r--numpy/lib/histograms.py3
-rw-r--r--numpy/lib/shape_base.py2
-rw-r--r--numpy/lib/tests/test_arraysetops.py79
-rw-r--r--numpy/lib/ufunclike.py6
-rw-r--r--numpy/lib/ufunclike.pyi50
-rw-r--r--numpy/lib/utils.py7
-rwxr-xr-xnumpy/linalg/lapack_lite/make_lite.py4
-rw-r--r--numpy/linalg/tests/test_build.py4
-rw-r--r--numpy/linalg/tests/test_linalg.py4
-rw-r--r--numpy/random/__init__.pyi17
-rw-r--r--numpy/random/_common.pyx4
-rw-r--r--numpy/random/_generator.pyi534
-rw-r--r--numpy/random/_generator.pyx18
-rw-r--r--numpy/random/_mt19937.pyi28
-rw-r--r--numpy/random/_pcg64.pyi34
-rw-r--r--numpy/random/_philox.pyi42
-rw-r--r--numpy/random/_philox.pyx5
-rw-r--r--numpy/random/_sfc64.pyi34
-rw-r--r--numpy/random/bit_generator.pyi124
-rw-r--r--numpy/random/bit_generator.pyx9
-rw-r--r--numpy/random/tests/test_generator_mt19937.py8
-rw-r--r--numpy/typing/__init__.py1
-rw-r--r--numpy/typing/_char_codes.py4
-rw-r--r--numpy/typing/_dtype_like.py8
-rw-r--r--numpy/typing/tests/data/fail/einsumfunc.py15
-rw-r--r--numpy/typing/tests/data/fail/ufunclike.py21
-rw-r--r--numpy/typing/tests/data/pass/einsumfunc.py37
-rw-r--r--numpy/typing/tests/data/pass/ufunclike.py48
-rw-r--r--numpy/typing/tests/data/reveal/einsumfunc.py32
-rw-r--r--numpy/typing/tests/data/reveal/ufunclike.py29
-rw-r--r--numpy/typing/tests/test_typing.py2
50 files changed, 1683 insertions, 465 deletions
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 19711d80b..1c52c7285 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -1,4 +1,5 @@
import builtins
+import os
import sys
import datetime as dt
from abc import abstractmethod
@@ -280,6 +281,11 @@ from numpy.core.arrayprint import (
printoptions as printoptions,
)
+from numpy.core.einsumfunc import (
+ einsum as einsum,
+ einsum_path as einsum_path,
+)
+
from numpy.core.numeric import (
zeros_like as zeros_like,
ones as ones,
@@ -332,6 +338,12 @@ from numpy.core.shape_base import (
vstack as vstack,
)
+from numpy.lib.ufunclike import (
+ fix as fix,
+ isposinf as isposinf,
+ isneginf as isneginf,
+)
+
__all__: List[str]
__path__: List[str]
__version__: str
@@ -394,14 +406,11 @@ dot: Any
dsplit: Any
dstack: Any
ediff1d: Any
-einsum: Any
-einsum_path: Any
expand_dims: Any
extract: Any
eye: Any
fill_diagonal: Any
finfo: Any
-fix: Any
flip: Any
fliplr: Any
flipud: Any
@@ -437,8 +446,6 @@ is_busday: Any
iscomplex: Any
iscomplexobj: Any
isin: Any
-isneginf: Any
-isposinf: Any
isreal: Any
isrealobj: Any
iterable: Any
@@ -904,7 +911,7 @@ class _ArrayOrScalarCommon:
# NOTE: `tostring()` is deprecated and therefore excluded
# def tostring(self, order=...): ...
def tofile(
- self, fid: Union[IO[bytes], str], sep: str = ..., format: str = ...
+ self, fid: Union[IO[bytes], str, bytes, os.PathLike[Any]], sep: str = ..., format: str = ...
) -> None: ...
# generics and 0d arrays return builtin scalars
def tolist(self) -> Any: ...
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 04181fbc2..2f75cb41f 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -2808,8 +2808,8 @@ add_newdoc('numpy.core.umath', 'matmul',
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
- The matmul function implements the semantics of the `@` operator introduced
- in Python 3.5 following PEP465.
+ The matmul function implements the semantics of the ``@`` operator introduced
+ in Python 3.5 following :pep:`465`.
Examples
--------
diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi
new file mode 100644
index 000000000..b33aff29f
--- /dev/null
+++ b/numpy/core/einsumfunc.pyi
@@ -0,0 +1,138 @@
+import sys
+from typing import List, TypeVar, Optional, Any, overload, Union, Tuple, Sequence
+
+from numpy import (
+ ndarray,
+ dtype,
+ bool_,
+ unsignedinteger,
+ signedinteger,
+ floating,
+ complexfloating,
+ number,
+ _OrderKACF,
+)
+from numpy.typing import (
+ _ArrayOrScalar,
+ _ArrayLikeBool_co,
+ _ArrayLikeUInt_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _DTypeLikeBool,
+ _DTypeLikeUInt,
+ _DTypeLikeInt,
+ _DTypeLikeFloat,
+ _DTypeLikeComplex,
+ _DTypeLikeComplex_co,
+)
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from typing_extensions import Literal
+
+_ArrayType = TypeVar(
+ "_ArrayType",
+ bound=ndarray[Any, dtype[Union[bool_, number[Any]]]],
+)
+
+_OptimizeKind = Union[
+ None, bool, Literal["greedy", "optimal"], Sequence[Any]
+]
+_CastingSafe = Literal["no", "equiv", "safe", "same_kind"]
+_CastingUnsafe = Literal["unsafe"]
+
+__all__: List[str]
+
+# TODO: Properly handle the `casting`-based combinatorics
+@overload
+def einsum(
+ __subscripts: str,
+ *operands: _ArrayLikeBool_co,
+ out: None = ...,
+ dtype: Optional[_DTypeLikeBool] = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayOrScalar[bool_]: ...
+@overload
+def einsum(
+ __subscripts: str,
+ *operands: _ArrayLikeUInt_co,
+ out: None = ...,
+ dtype: Optional[_DTypeLikeUInt] = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayOrScalar[unsignedinteger[Any]]: ...
+@overload
+def einsum(
+ __subscripts: str,
+ *operands: _ArrayLikeInt_co,
+ out: None = ...,
+ dtype: Optional[_DTypeLikeInt] = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayOrScalar[signedinteger[Any]]: ...
+@overload
+def einsum(
+ __subscripts: str,
+ *operands: _ArrayLikeFloat_co,
+ out: None = ...,
+ dtype: Optional[_DTypeLikeFloat] = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayOrScalar[floating[Any]]: ...
+@overload
+def einsum(
+ __subscripts: str,
+ *operands: _ArrayLikeComplex_co,
+ out: None = ...,
+ dtype: Optional[_DTypeLikeComplex] = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayOrScalar[complexfloating[Any, Any]]: ...
+@overload
+def einsum(
+ __subscripts: str,
+ *operands: Any,
+ casting: _CastingUnsafe,
+ dtype: Optional[_DTypeLikeComplex_co] = ...,
+ out: None = ...,
+ order: _OrderKACF = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayOrScalar[Any]: ...
+@overload
+def einsum(
+ __subscripts: str,
+ *operands: _ArrayLikeComplex_co,
+ out: _ArrayType,
+ dtype: Optional[_DTypeLikeComplex_co] = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayType: ...
+@overload
+def einsum(
+ __subscripts: str,
+ *operands: Any,
+ out: _ArrayType,
+ casting: _CastingUnsafe,
+ dtype: Optional[_DTypeLikeComplex_co] = ...,
+ order: _OrderKACF = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayType: ...
+
+# NOTE: `einsum_call` is a hidden kwarg unavailable for public use.
+# It is therefore excluded from the signatures below.
+# NOTE: In practice the list consists of a `str` (first element)
+# and a variable number of integer tuples.
+def einsum_path(
+ __subscripts: str,
+ *operands: _ArrayLikeComplex_co,
+ optimize: _OptimizeKind = ...,
+) -> Tuple[List[Any], str]: ...
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 89f56fa09..7675386e7 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -710,6 +710,7 @@ def correlate(a, v, mode='valid'):
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
multiarray.correlate : Old, no conjugate, version of correlate.
+ scipy.signal.correlate : uses FFT which has superior performance on large arrays.
Notes
-----
@@ -720,6 +721,11 @@ def correlate(a, v, mode='valid'):
which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``.
+ `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does
+ not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might
+ be preferable.
+
+
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
diff --git a/numpy/core/src/common/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h
index bda9bb5e4..014103f13 100644
--- a/numpy/core/src/common/lowlevel_strided_loops.h
+++ b/numpy/core/src/common/lowlevel_strided_loops.h
@@ -159,8 +159,7 @@ PyArray_GetDTypeCopySwapFn(int aligned,
* Should be the dst stride if it will always be the same,
* NPY_MAX_INTP otherwise.
* src_dtype:
- * The data type of source data. If this is NULL, a transfer
- * function which sets the destination to zeros is produced.
+ * The data type of source data. Must not be NULL.
* dst_dtype:
* The data type of destination data. If this is NULL and
* move_references is 1, a transfer function which decrements
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 9b8e5f32f..ae3834e15 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -81,29 +81,6 @@ get_decsrcref_transfer_function(int aligned,
NpyAuxData **out_transferdata,
int *out_needs_api);
-/*
- * Returns a transfer function which zeros out the dest values.
- *
- * Returns NPY_SUCCEED or NPY_FAIL.
- */
-static int
-get_setdstzero_transfer_function(int aligned,
- npy_intp dst_stride,
- PyArray_Descr *dst_dtype,
- PyArray_StridedUnaryOp **out_stransfer,
- NpyAuxData **out_transferdata,
- int *out_needs_api);
-
-/*
- * Returns a transfer function which sets a boolean type to ones.
- *
- * Returns NPY_SUCCEED or NPY_FAIL.
- */
-NPY_NO_EXPORT int
-get_bool_setdstone_transfer_function(npy_intp dst_stride,
- PyArray_StridedUnaryOp **out_stransfer,
- NpyAuxData **out_transferdata,
- int *NPY_UNUSED(out_needs_api));
/*************************** COPY REFERENCES *******************************/
@@ -3125,69 +3102,6 @@ get_decsrcref_fields_transfer_function(int aligned,
return NPY_SUCCEED;
}
-static int
-get_setdestzero_fields_transfer_function(int aligned,
- npy_intp dst_stride,
- PyArray_Descr *dst_dtype,
- PyArray_StridedUnaryOp **out_stransfer,
- NpyAuxData **out_transferdata,
- int *out_needs_api)
-{
- PyObject *names, *key, *tup, *title;
- PyArray_Descr *dst_fld_dtype;
- npy_int i, names_size, field_count, structsize;
- int dst_offset;
- _field_transfer_data *data;
- _single_field_transfer *fields;
-
- names = dst_dtype->names;
- names_size = PyTuple_GET_SIZE(dst_dtype->names);
-
- field_count = names_size;
- structsize = sizeof(_field_transfer_data) +
- field_count * sizeof(_single_field_transfer);
- /* Allocate the data and populate it */
- data = (_field_transfer_data *)PyArray_malloc(structsize);
- if (data == NULL) {
- PyErr_NoMemory();
- return NPY_FAIL;
- }
- data->base.free = &_field_transfer_data_free;
- data->base.clone = &_field_transfer_data_clone;
- fields = &data->fields;
-
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(dst_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype,
- &dst_offset, &title)) {
- PyArray_free(data);
- return NPY_FAIL;
- }
- if (get_setdstzero_transfer_function(0,
- dst_stride,
- dst_fld_dtype,
- &fields[i].stransfer,
- &fields[i].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[i].src_offset = 0;
- fields[i].dst_offset = dst_offset;
- fields[i].src_itemsize = 0;
- }
-
- data->field_count = field_count;
-
- *out_stransfer = &_strided_to_strided_field_transfer;
- *out_transferdata = (NpyAuxData *)data;
-
- return NPY_SUCCEED;
-}
/************************* MASKED TRANSFER WRAPPER *************************/
@@ -3341,228 +3255,7 @@ _strided_masked_wrapper_transfer_function(
}
-/************************* DEST BOOL SETONE *******************************/
-
-static int
-_null_to_strided_set_bool_one(char *dst,
- npy_intp dst_stride,
- char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
- npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
- NpyAuxData *NPY_UNUSED(data))
-{
- /* bool type is one byte, so can just use the char */
-
- while (N > 0) {
- *dst = 1;
-
- dst += dst_stride;
- --N;
- }
- return 0;
-}
-
-static int
-_null_to_contig_set_bool_one(char *dst,
- npy_intp NPY_UNUSED(dst_stride),
- char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
- npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
- NpyAuxData *NPY_UNUSED(data))
-{
- /* bool type is one byte, so can just use the char */
-
- memset(dst, 1, N);
- return 0;
-}
-
-/* Only for the bool type, sets the destination to 1 */
-NPY_NO_EXPORT int
-get_bool_setdstone_transfer_function(npy_intp dst_stride,
- PyArray_StridedUnaryOp **out_stransfer,
- NpyAuxData **out_transferdata,
- int *NPY_UNUSED(out_needs_api))
-{
- if (dst_stride == 1) {
- *out_stransfer = &_null_to_contig_set_bool_one;
- }
- else {
- *out_stransfer = &_null_to_strided_set_bool_one;
- }
- *out_transferdata = NULL;
-
- return NPY_SUCCEED;
-}
-
-/*************************** DEST SETZERO *******************************/
-
-/* Sets dest to zero */
-typedef struct {
- NpyAuxData base;
- npy_intp dst_itemsize;
-} _dst_memset_zero_data;
-
-/* zero-padded data copy function */
-static NpyAuxData *_dst_memset_zero_data_clone(NpyAuxData *data)
-{
- _dst_memset_zero_data *newdata =
- (_dst_memset_zero_data *)PyArray_malloc(
- sizeof(_dst_memset_zero_data));
- if (newdata == NULL) {
- return NULL;
- }
-
- memcpy(newdata, data, sizeof(_dst_memset_zero_data));
-
- return (NpyAuxData *)newdata;
-}
-
-static int
-_null_to_strided_memset_zero(char *dst,
- npy_intp dst_stride,
- char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
- npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
- NpyAuxData *data)
-{
- _dst_memset_zero_data *d = (_dst_memset_zero_data *)data;
- npy_intp dst_itemsize = d->dst_itemsize;
-
- while (N > 0) {
- memset(dst, 0, dst_itemsize);
- dst += dst_stride;
- --N;
- }
- return 0;
-}
-
-static int
-_null_to_contig_memset_zero(char *dst,
- npy_intp dst_stride,
- char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
- npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
- NpyAuxData *data)
-{
- _dst_memset_zero_data *d = (_dst_memset_zero_data *)data;
- npy_intp dst_itemsize = d->dst_itemsize;
-
- memset(dst, 0, N*dst_itemsize);
- return 0;
-}
-
-static int
-_null_to_strided_reference_setzero(char *dst,
- npy_intp dst_stride,
- char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
- npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
- NpyAuxData *NPY_UNUSED(data))
-{
- PyObject *dst_ref = NULL;
-
- while (N > 0) {
- memcpy(&dst_ref, dst, sizeof(dst_ref));
-
- /* Release the reference in dst and set it to NULL */
- NPY_DT_DBG_REFTRACE("dec dest ref (to set zero)", dst_ref);
- Py_XDECREF(dst_ref);
- memset(dst, 0, sizeof(PyObject *));
-
- dst += dst_stride;
- --N;
- }
- return 0;
-}
-
-NPY_NO_EXPORT int
-get_setdstzero_transfer_function(int aligned,
- npy_intp dst_stride,
- PyArray_Descr *dst_dtype,
- PyArray_StridedUnaryOp **out_stransfer,
- NpyAuxData **out_transferdata,
- int *out_needs_api)
-{
- _dst_memset_zero_data *data;
-
- /* If there are no references, just set the whole thing to zero */
- if (!PyDataType_REFCHK(dst_dtype)) {
- data = (_dst_memset_zero_data *)
- PyArray_malloc(sizeof(_dst_memset_zero_data));
- if (data == NULL) {
- PyErr_NoMemory();
- return NPY_FAIL;
- }
-
- data->base.free = (NpyAuxData_FreeFunc *)(&PyArray_free);
- data->base.clone = &_dst_memset_zero_data_clone;
- data->dst_itemsize = dst_dtype->elsize;
-
- if (dst_stride == data->dst_itemsize) {
- *out_stransfer = &_null_to_contig_memset_zero;
- }
- else {
- *out_stransfer = &_null_to_strided_memset_zero;
- }
- *out_transferdata = (NpyAuxData *)data;
- }
- /* If it's exactly one reference, use the decref function */
- else if (dst_dtype->type_num == NPY_OBJECT) {
- if (out_needs_api) {
- *out_needs_api = 1;
- }
-
- *out_stransfer = &_null_to_strided_reference_setzero;
- *out_transferdata = NULL;
- }
- /* If there are subarrays, need to wrap it */
- else if (PyDataType_HASSUBARRAY(dst_dtype)) {
- PyArray_Dims dst_shape = {NULL, -1};
- npy_intp dst_size = 1;
- PyArray_StridedUnaryOp *contig_stransfer;
- NpyAuxData *contig_data;
-
- if (out_needs_api) {
- *out_needs_api = 1;
- }
-
- if (!(PyArray_IntpConverter(dst_dtype->subarray->shape,
- &dst_shape))) {
- PyErr_SetString(PyExc_ValueError,
- "invalid subarray shape");
- return NPY_FAIL;
- }
- dst_size = PyArray_MultiplyList(dst_shape.ptr, dst_shape.len);
- npy_free_cache_dim_obj(dst_shape);
-
- /* Get a function for contiguous dst of the subarray type */
- if (get_setdstzero_transfer_function(aligned,
- dst_dtype->subarray->base->elsize,
- dst_dtype->subarray->base,
- &contig_stransfer, &contig_data,
- out_needs_api) != NPY_SUCCEED) {
- return NPY_FAIL;
- }
-
- if (wrap_transfer_function_n_to_n(contig_stransfer, contig_data,
- 0, dst_stride,
- 0, dst_dtype->subarray->base->elsize,
- dst_size,
- out_stransfer, out_transferdata) != NPY_SUCCEED) {
- NPY_AUXDATA_FREE(contig_data);
- return NPY_FAIL;
- }
- }
- /* If there are fields, need to do each field */
- else if (PyDataType_HASFIELDS(dst_dtype)) {
- if (out_needs_api) {
- *out_needs_api = 1;
- }
-
- return get_setdestzero_fields_transfer_function(aligned,
- dst_stride, dst_dtype,
- out_stransfer,
- out_transferdata,
- out_needs_api);
- }
-
- return NPY_SUCCEED;
-}
+/*************************** CLEAR SRC *******************************/
static int
_dec_src_ref_nop(char *NPY_UNUSED(dst),
@@ -3775,13 +3468,6 @@ PyArray_LegacyGetDTypeTransferFunction(int aligned,
return NPY_SUCCEED;
}
}
- else if (src_dtype == NULL) {
- return get_setdstzero_transfer_function(aligned,
- dst_dtype->elsize,
- dst_dtype,
- out_stransfer, out_transferdata,
- out_needs_api);
- }
src_itemsize = src_dtype->elsize;
dst_itemsize = dst_dtype->elsize;
@@ -4468,6 +4154,8 @@ PyArray_GetDTypeTransferFunction(int aligned,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
+ assert(src_dtype != NULL);
+
#if NPY_USE_NEW_CASTINGIMPL
/*
* If one of the dtypes is NULL, we give back either a src decref
@@ -4492,13 +4180,6 @@ PyArray_GetDTypeTransferFunction(int aligned,
return NPY_SUCCEED;
}
}
- else if (src_dtype == NULL) {
- return get_setdstzero_transfer_function(aligned,
- dst_dtype->elsize,
- dst_dtype,
- out_stransfer, out_transferdata,
- out_needs_api);
- }
if (get_transferfunction_for_descrs(aligned,
src_stride, dst_stride,
@@ -4625,6 +4306,7 @@ PyArray_GetMaskedDTypeTransferFunction(int aligned,
/* TODO: Special case some important cases so they're fast */
/* Fall back to wrapping a non-masked transfer function */
+ assert(dst_dtype != NULL);
if (PyArray_GetDTypeTransferFunction(aligned,
src_stride, dst_stride,
src_dtype, dst_dtype,
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index 8e4b2ebe1..fb354ce54 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -2130,7 +2130,6 @@ count_nonzero_bytes_384(const npy_uint64 * w)
}
#if NPY_SIMD
-
/* Count the zero bytes between `*d` and `end`, updating `*d` to point to where to keep counting from. */
static NPY_INLINE NPY_GCC_OPT_3 npyv_u8
count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end, npy_uint8 max_count)
@@ -2166,18 +2165,18 @@ count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end, npy_uint16 max_c
}
return vsum16;
}
-
+#endif // NPY_SIMD
/*
* Counts the number of non-zero values in a raw array.
* The one loop process is shown below(take SSE2 with 128bits vector for example):
- * |------------16 lanes---------|
+ * |------------16 lanes---------|
*[vsum8] 255 255 255 ... 255 255 255 255 count_zero_bytes_u8: counting 255*16 elements
* !!
- * |------------8 lanes---------|
+ * |------------8 lanes---------|
*[vsum16] 65535 65535 65535 ... 65535 count_zero_bytes_u16: counting (2*16-1)*16 elements
* 65535 65535 65535 ... 65535
* !!
- * |------------4 lanes---------|
+ * |------------4 lanes---------|
*[sum_32_0] 65535 65535 65535 65535 count_nonzero_bytes
* 65535 65535 65535 65535
*[sum_32_1] 65535 65535 65535 65535
@@ -2186,40 +2185,143 @@ count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end, npy_uint16 max_c
* (2*16-1)*16
*/
static NPY_INLINE NPY_GCC_OPT_3 npy_intp
-count_nonzero_bytes(const npy_uint8 *d, npy_uintp unrollx)
+count_nonzero_u8(const char *data, npy_intp bstride, npy_uintp len)
{
- npy_intp zero_count = 0;
- const npy_uint8 *end = d + unrollx;
- while (d < end) {
- npyv_u16x2 vsum16 = count_zero_bytes_u16(&d, end, NPY_MAX_UINT16);
- npyv_u32x2 sum_32_0 = npyv_expand_u32_u16(vsum16.val[0]);
- npyv_u32x2 sum_32_1 = npyv_expand_u32_u16(vsum16.val[1]);
- zero_count += npyv_sum_u32(npyv_add_u32(
- npyv_add_u32(sum_32_0.val[0], sum_32_0.val[1]),
- npyv_add_u32(sum_32_1.val[0], sum_32_1.val[1])
- ));
- }
- return unrollx - zero_count;
+ npy_intp count = 0;
+ if (bstride == 1) {
+ #if NPY_SIMD
+ npy_uintp len_m = len & -npyv_nlanes_u8;
+ npy_uintp zcount = 0;
+ for (const char *end = data + len_m; data < end;) {
+ npyv_u16x2 vsum16 = count_zero_bytes_u16((const npy_uint8**)&data, (const npy_uint8*)end, NPY_MAX_UINT16);
+ npyv_u32x2 sum_32_0 = npyv_expand_u32_u16(vsum16.val[0]);
+ npyv_u32x2 sum_32_1 = npyv_expand_u32_u16(vsum16.val[1]);
+ zcount += npyv_sum_u32(npyv_add_u32(
+ npyv_add_u32(sum_32_0.val[0], sum_32_0.val[1]),
+ npyv_add_u32(sum_32_1.val[0], sum_32_1.val[1])
+ ));
+ }
+ len -= len_m;
+ count = len_m - zcount;
+ #else
+ if (!NPY_ALIGNMENT_REQUIRED || npy_is_aligned(data, sizeof(npy_uint64))) {
+ int step = 6 * sizeof(npy_uint64);
+ int left_bytes = len % step;
+ for (const char *end = data + len; data < end - left_bytes; data += step) {
+ count += count_nonzero_bytes_384((const npy_uint64 *)data);
+ }
+ len = left_bytes;
+ }
+ #endif // NPY_SIMD
+ }
+ for (; len > 0; --len, data += bstride) {
+ count += (*data != 0);
+ }
+ return count;
}
+static NPY_INLINE NPY_GCC_OPT_3 npy_intp
+count_nonzero_u16(const char *data, npy_intp bstride, npy_uintp len)
+{
+ npy_intp count = 0;
+#if NPY_SIMD
+ if (bstride == sizeof(npy_uint16)) {
+ npy_uintp zcount = 0, len_m = len & -npyv_nlanes_u16;
+ const npyv_u16 vone = npyv_setall_u16(1);
+ const npyv_u16 vzero = npyv_zero_u16();
+
+ for (npy_uintp lenx = len_m; lenx > 0;) {
+ npyv_u16 vsum16 = npyv_zero_u16();
+ npy_uintp max16 = PyArray_MIN(lenx, NPY_MAX_UINT16*npyv_nlanes_u16);
+
+ for (const char *end = data + max16*bstride; data < end; data += NPY_SIMD_WIDTH) {
+ npyv_u16 mask = npyv_cvt_u16_b16(npyv_cmpeq_u16(npyv_load_u16((npy_uint16*)data), vzero));
+ mask = npyv_and_u16(mask, vone);
+ vsum16 = npyv_add_u16(vsum16, mask);
+ }
+ lenx -= max16;
+ zcount += npyv_sumup_u16(vsum16);
+ }
+ len -= len_m;
+ count = len_m - zcount;
+ }
+#endif
+ for (; len > 0; --len, data += bstride) {
+ count += (*(npy_uint16*)data != 0);
+ }
+ return count;
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 npy_intp
+count_nonzero_u32(const char *data, npy_intp bstride, npy_uintp len)
+{
+ npy_intp count = 0;
+#if NPY_SIMD
+ if (bstride == sizeof(npy_uint32)) {
+ const npy_uintp max_iter = NPY_MAX_UINT32*npyv_nlanes_u32;
+ const npy_uintp len_m = (len > max_iter ? max_iter : len) & -npyv_nlanes_u32;
+ const npyv_u32 vone = npyv_setall_u32(1);
+ const npyv_u32 vzero = npyv_zero_u32();
+
+ npyv_u32 vsum32 = npyv_zero_u32();
+ for (const char *end = data + len_m*bstride; data < end; data += NPY_SIMD_WIDTH) {
+ npyv_u32 mask = npyv_cvt_u32_b32(npyv_cmpeq_u32(npyv_load_u32((npy_uint32*)data), vzero));
+ mask = npyv_and_u32(mask, vone);
+ vsum32 = npyv_add_u32(vsum32, mask);
+ }
+ const npyv_u32 maskevn = npyv_reinterpret_u32_u64(npyv_setall_u64(0xffffffffULL));
+ npyv_u64 odd = npyv_shri_u64(npyv_reinterpret_u64_u32(vsum32), 32);
+ npyv_u64 even = npyv_reinterpret_u64_u32(npyv_and_u32(vsum32, maskevn));
+ count = len_m - npyv_sum_u64(npyv_add_u64(odd, even));
+ len -= len_m;
+ }
+#endif
+ for (; len > 0; --len, data += bstride) {
+ count += (*(npy_uint32*)data != 0);
+ }
+ return count;
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 npy_intp
+count_nonzero_u64(const char *data, npy_intp bstride, npy_uintp len)
+{
+ npy_intp count = 0;
+#if NPY_SIMD
+ if (bstride == sizeof(npy_uint64)) {
+ const npy_uintp len_m = len & -npyv_nlanes_u64;
+ const npyv_u64 vone = npyv_setall_u64(1);
+ const npyv_u64 vzero = npyv_zero_u64();
+
+ npyv_u64 vsum64 = npyv_zero_u64();
+ for (const char *end = data + len_m*bstride; data < end; data += NPY_SIMD_WIDTH) {
+ npyv_u64 mask = npyv_cvt_u64_b64(npyv_cmpeq_u64(npyv_load_u64((npy_uint64*)data), vzero));
+ mask = npyv_and_u64(mask, vone);
+ vsum64 = npyv_add_u64(vsum64, mask);
+ }
+ len -= len_m;
+ count = len_m - npyv_sum_u64(vsum64);
+ }
#endif
+ for (; len > 0; --len, data += bstride) {
+ count += (*(npy_uint64*)data != 0);
+ }
+ return count;
+}
/*
* Counts the number of True values in a raw boolean array. This
* is a low-overhead function which does no heap allocations.
*
* Returns -1 on error.
*/
-NPY_NO_EXPORT npy_intp
-count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const *astrides)
+static NPY_GCC_OPT_3 npy_intp
+count_nonzero_int(int ndim, char *data, const npy_intp *ashape, const npy_intp *astrides, int elsize)
{
-
+ assert(elsize <= 8);
int idim;
npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS];
- npy_intp i, coord[NPY_MAXDIMS];
- npy_intp count = 0;
- NPY_BEGIN_THREADS_DEF;
+ npy_intp coord[NPY_MAXDIMS];
- /* Use raw iteration with no heap memory allocation */
+ // Use raw iteration with no heap memory allocation
if (PyArray_PrepareOneRawArrayIter(
ndim, ashape,
data, astrides,
@@ -2228,51 +2330,44 @@ count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const
return -1;
}
- /* Handle zero-sized array */
+ // Handle zero-sized array
if (shape[0] == 0) {
return 0;
}
+ NPY_BEGIN_THREADS_DEF;
NPY_BEGIN_THREADS_THRESHOLDED(shape[0]);
- /* Special case for contiguous inner loop */
- if (strides[0] == 1) {
- NPY_RAW_ITER_START(idim, ndim, coord, shape) {
- /* Process the innermost dimension */
- const char *d = data;
- const char *e = data + shape[0];
-#if NPY_SIMD
- npy_uintp stride = shape[0] & -npyv_nlanes_u8;
- count += count_nonzero_bytes((const npy_uint8 *)d, stride);
- d += stride;
-#else
- if (!NPY_ALIGNMENT_REQUIRED ||
- npy_is_aligned(d, sizeof(npy_uint64))) {
- npy_uintp stride = 6 * sizeof(npy_uint64);
- for (; d < e - (shape[0] % stride); d += stride) {
- count += count_nonzero_bytes_384((const npy_uint64 *)d);
- }
- }
-#endif
- for (; d < e; ++d) {
- count += (*d != 0);
- }
- } NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord, shape, data, strides);
- }
- /* General inner loop */
- else {
- NPY_RAW_ITER_START(idim, ndim, coord, shape) {
- char *d = data;
- /* Process the innermost dimension */
- for (i = 0; i < shape[0]; ++i, d += strides[0]) {
- count += (*d != 0);
- }
- } NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord, shape, data, strides);
+
+ #define NONZERO_CASE(LEN, SFX) \
+ case LEN: \
+ NPY_RAW_ITER_START(idim, ndim, coord, shape) { \
+ count += count_nonzero_##SFX(data, strides[0], shape[0]); \
+ } NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord, shape, data, strides); \
+ break
+
+ npy_intp count = 0;
+ switch(elsize) {
+ NONZERO_CASE(1, u8);
+ NONZERO_CASE(2, u16);
+ NONZERO_CASE(4, u32);
+ NONZERO_CASE(8, u64);
}
+ #undef NONZERO_CASE
NPY_END_THREADS;
-
return count;
}
+/*
+ * Counts the number of True values in a raw boolean array. This
+ * is a low-overhead function which does no heap allocations.
+ *
+ * Returns -1 on error.
+ */
+NPY_NO_EXPORT NPY_GCC_OPT_3 npy_intp
+count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const *astrides)
+{
+ return count_nonzero_int(ndim, data, ashape, astrides, 1);
+}
/*NUMPY_API
* Counts the number of non-zero elements in the array.
@@ -2295,14 +2390,22 @@ PyArray_CountNonzero(PyArrayObject *self)
npy_intp *strideptr, *innersizeptr;
NPY_BEGIN_THREADS_DEF;
- /* Special low-overhead version specific to the boolean type */
+ // Special low-overhead version specific to the boolean/int types
dtype = PyArray_DESCR(self);
- if (dtype->type_num == NPY_BOOL) {
- return count_boolean_trues(PyArray_NDIM(self), PyArray_DATA(self),
- PyArray_DIMS(self), PyArray_STRIDES(self));
+ switch(dtype->kind) {
+ case 'u':
+ case 'i':
+ case 'b':
+ if (dtype->elsize > 8) {
+ break;
+ }
+ return count_nonzero_int(
+ PyArray_NDIM(self), PyArray_BYTES(self), PyArray_DIMS(self),
+ PyArray_STRIDES(self), dtype->elsize
+ );
}
- nonzero = PyArray_DESCR(self)->f->nonzero;
+ nonzero = PyArray_DESCR(self)->f->nonzero;
/* If it's a trivial one-dimensional loop, don't use an iterator */
if (PyArray_TRIVIALLY_ITERABLE(self)) {
needs_api = PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI);
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 2c00c498b..7915c75be 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -1829,6 +1829,8 @@ array_empty(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
array_function_result = array_implement_c_array_function_creation(
"empty", args, kwds);
if (array_function_result != Py_NotImplemented) {
+ Py_XDECREF(typecode);
+ npy_free_cache_dim_obj(shape);
return array_function_result;
}
@@ -2026,6 +2028,8 @@ array_zeros(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
array_function_result = array_implement_c_array_function_creation(
"zeros", args, kwds);
if (array_function_result != Py_NotImplemented) {
+ Py_XDECREF(typecode);
+ npy_free_cache_dim_obj(shape);
return array_function_result;
}
@@ -2139,11 +2143,13 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
array_function_result = array_implement_c_array_function_creation(
"fromfile", args, keywds);
if (array_function_result != Py_NotImplemented) {
+ Py_XDECREF(type);
return array_function_result;
}
file = NpyPath_PathlikeToFspath(file);
if (file == NULL) {
+ Py_XDECREF(type);
return NULL;
}
@@ -2250,6 +2256,7 @@ array_frombuffer(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds
array_function_result = array_implement_c_array_function_creation(
"frombuffer", args, keywds);
if (array_function_result != Py_NotImplemented) {
+ Py_XDECREF(type);
return array_function_result;
}
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 4271d2d96..411095199 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -2746,6 +2746,37 @@ def test_iter_writemasked():
# were copied back
assert_equal(a, [3, 3, 2.5])
+def test_iter_writemasked_decref():
+ # force casting (to make it interesting) by using a structured dtype.
+ arr = np.arange(10000).astype(">i,O")
+ original = arr.copy()
+ mask = np.random.randint(0, 2, size=10000).astype(bool)
+
+ it = np.nditer([arr, mask], ['buffered', "refs_ok"],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']],
+ op_dtypes=["<i,O", "?"])
+ singleton = object()
+ if HAS_REFCOUNT:
+ count = sys.getrefcount(singleton)
+ for buf, mask_buf in it:
+ buf[...] = (3, singleton)
+
+ del buf, mask_buf, it # delete everything to ensure corrrect cleanup
+
+ if HAS_REFCOUNT:
+ # The buffer would have included additional items, they must be
+ # cleared correctly:
+ assert sys.getrefcount(singleton) - count == np.count_nonzero(mask)
+
+ assert_array_equal(arr[~mask], original[~mask])
+ assert (arr[mask] == np.array((3, singleton), arr.dtype)).all()
+ del arr
+
+ if HAS_REFCOUNT:
+ assert sys.getrefcount(singleton) == count
+
+
def test_iter_non_writable_attribute_deletion():
it = np.nditer(np.ones(2))
attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc",
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index a697e5faf..06511822e 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -1266,20 +1266,30 @@ class TestNonzero:
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
- x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
- dtype=[('a', 'i4'), ('b', 'i2')])
+ # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
+ # dtype=[('a', 'i4'), ('b', 'i2')])
+ x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)],
+ dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')])
assert_equal(np.count_nonzero(x['a']), 3)
assert_equal(np.count_nonzero(x['b']), 4)
+ assert_equal(np.count_nonzero(x['c']), 3)
+ assert_equal(np.count_nonzero(x['d']), 4)
assert_equal(np.nonzero(x['a']), ([0, 2, 3],))
assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))
def test_nonzero_twodim(self):
x = np.array([[0, 1, 0], [2, 0, 3]])
- assert_equal(np.count_nonzero(x), 3)
+ assert_equal(np.count_nonzero(x.astype('i1')), 3)
+ assert_equal(np.count_nonzero(x.astype('i2')), 3)
+ assert_equal(np.count_nonzero(x.astype('i4')), 3)
+ assert_equal(np.count_nonzero(x.astype('i8')), 3)
assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
x = np.eye(3)
- assert_equal(np.count_nonzero(x), 3)
+ assert_equal(np.count_nonzero(x.astype('i1')), 3)
+ assert_equal(np.count_nonzero(x.astype('i2')), 3)
+ assert_equal(np.count_nonzero(x.astype('i4')), 3)
+ assert_equal(np.count_nonzero(x.astype('i8')), 3)
assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
x = np.array([[(0, 1), (0, 0), (1, 11)],
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index 106436e64..f025c8904 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -26,10 +26,8 @@ from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
_commandline_dep_string
# globals for parallel build management
-try:
- import threading
-except ImportError:
- import dummy_threading as threading
+import threading
+
_job_semaphore = None
_global_lock = threading.Lock()
_processing_files = set()
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index 60881f4a3..8b735677a 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -92,6 +92,8 @@ class config(old_config):
save_compiler = self.compiler
if lang in ['f77', 'f90']:
self.compiler = self.fcompiler
+ if self.compiler is None:
+ raise CompileError('%s compiler is not set' % (lang,))
try:
ret = mth(*((self,)+args))
except (DistutilsExecError, CompileError) as e:
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 5392663d6..37e120072 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -10,13 +10,10 @@ import shutil
import multiprocessing
import textwrap
import importlib.util
+from threading import local as tlocal
import distutils
from distutils.errors import DistutilsError
-try:
- from threading import local as tlocal
-except ImportError:
- from dummy_threading import local as tlocal
# stores temporary directory of each thread to only create one per thread
_tdata = tlocal()
diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py
index 60bc1ad11..62aa2fca9 100644
--- a/numpy/f2py/cb_rules.py
+++ b/numpy/f2py/cb_rules.py
@@ -70,7 +70,8 @@ static #name#_t *get_active_#name#(void) {
/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/
#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) {
- #name#_t *cb;
+ #name#_t cb_local = { NULL, NULL, 0 };
+ #name#_t *cb = NULL;
PyTupleObject *capi_arglist = NULL;
PyObject *capi_return = NULL;
PyObject *capi_tmp = NULL;
@@ -82,12 +83,17 @@ static #name#_t *get_active_#name#(void) {
f2py_cb_start_clock();
#endif
cb = get_active_#name#();
+ if (cb == NULL) {
+ capi_longjmp_ok = 0;
+ cb = &cb_local;
+ }
capi_arglist = cb->args_capi;
CFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\");
CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi);
if (cb->capi==NULL) {
capi_longjmp_ok = 0;
cb->capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\");
+ CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi);
}
if (cb->capi==NULL) {
PyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\");
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 1149633c0..660cdd206 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -341,7 +341,9 @@ def readfortrancode(ffile, dowithline=show, istop=1):
if ffile == []:
return
localdolowercase = dolowercase
- cont = 0
+ # cont: set to True when the content of the last line read
+ # indicates statement continuation
+ cont = False
finalline = ''
ll = ''
includeline = re.compile(
@@ -392,14 +394,26 @@ def readfortrancode(ffile, dowithline=show, istop=1):
if rl[:5].lower() == '!f2py': # f2py directive
l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!')
if l.strip() == '': # Skip empty line
- cont = 0
+ if sourcecodeform == 'free':
+ # In free form, a statement continues in the next line
+ # that is not a comment line [3.3.2.4^1], lines with
+ # blanks are comment lines [3.3.2.3^1]. Hence, the
+ # line continuation flag must retain its state.
+ pass
+ else:
+ # In fixed form, statement continuation is determined
+ # by a non-blank character at the 6-th position. Empty
+ # line indicates a start of a new statement
+ # [3.3.3.3^1]. Hence, the line continuation flag must
+ # be reset.
+ cont = False
continue
if sourcecodeform == 'fix':
if l[0] in ['*', 'c', '!', 'C', '#']:
if l[1:5].lower() == 'f2py': # f2py directive
l = ' ' + l[5:]
else: # Skip comment line
- cont = 0
+ cont = False
continue
elif strictf77:
if len(l) > 72:
@@ -560,8 +574,7 @@ groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|'
r'endinterface|endsubroutine|endfunction')
endpattern = re.compile(
beforethisafter % ('', groupends, groupends, r'[\w\s]*'), re.I), 'end'
-# endifs='end\s*(if|do|where|select|while|forall)'
-endifs = r'(end\s*(if|do|where|select|while|forall))|(module\s*procedure)'
+endifs = r'(end\s*(if|do|where|select|while|forall|associate|block|critical|enum|team))|(module\s*procedure)'
endifpattern = re.compile(
beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif'
#
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index 4d4f2b443..2cb429ec2 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -61,6 +61,21 @@ cf2py intent(out) a
a = callback(cu, lencu)
end
+
+ subroutine hidden_callback(a, r)
+ external global_f
+cf2py intent(callback, hide) global_f
+ integer a, r, global_f
+cf2py intent(out) r
+ r = global_f(a)
+ end
+
+ subroutine hidden_callback2(a, r)
+ external global_f
+ integer a, r, global_f
+cf2py intent(out) r
+ r = global_f(a)
+ end
"""
@pytest.mark.parametrize('name', 't,t2'.split(','))
@@ -204,6 +219,39 @@ cf2py intent(out) a
if errors:
raise AssertionError(errors)
+ def test_hidden_callback(self):
+ try:
+ self.module.hidden_callback(2)
+ except Exception as msg:
+ assert_(str(msg).startswith('Callback global_f not defined'))
+
+ try:
+ self.module.hidden_callback2(2)
+ except Exception as msg:
+ assert_(str(msg).startswith('cb: Callback global_f not defined'))
+
+ self.module.global_f = lambda x: x + 1
+ r = self.module.hidden_callback(2)
+ assert_(r == 3)
+
+ self.module.global_f = lambda x: x + 2
+ r = self.module.hidden_callback(2)
+ assert_(r == 4)
+
+ del self.module.global_f
+ try:
+ self.module.hidden_callback(2)
+ except Exception as msg:
+ assert_(str(msg).startswith('Callback global_f not defined'))
+
+ self.module.global_f = lambda x=0: x + 3
+ r = self.module.hidden_callback(2)
+ assert_(r == 5)
+
+ # reproducer of gh18341
+ r = self.module.hidden_callback2(2)
+ assert_(r == 3)
+
class TestF77CallbackPythonTLS(TestF77Callback):
"""
diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py
index 827c71ae9..d26917f0c 100644
--- a/numpy/f2py/tests/test_crackfortran.py
+++ b/numpy/f2py/tests/test_crackfortran.py
@@ -115,3 +115,27 @@ class TestExternal(util.F2PyTest):
return x + 123
r = self.module.external_as_attribute(incr)
assert r == 123
+
+class TestCrackFortran(util.F2PyTest):
+
+ suffix = '.f90'
+
+ code = textwrap.dedent("""
+ subroutine gh2848( &
+ ! first 2 parameters
+ par1, par2,&
+ ! last 2 parameters
+ par3, par4)
+
+ integer, intent(in) :: par1, par2
+ integer, intent(out) :: par3, par4
+
+ par3 = par1
+ par4 = par2
+
+ end subroutine gh2848
+ """)
+
+ def test_gh2848(self):
+ r = self.module.gh2848(1, 2)
+ assert r == (1, 2)
diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi
index a8eb24207..4468d27e9 100644
--- a/numpy/lib/__init__.pyi
+++ b/numpy/lib/__init__.pyi
@@ -1,5 +1,11 @@
from typing import Any, List
+from numpy.lib.ufunclike import (
+ fix as fix,
+ isposinf as isposinf,
+ isneginf as isneginf,
+)
+
__all__: List[str]
emath: Any
@@ -108,9 +114,6 @@ tril_indices: Any
tril_indices_from: Any
triu_indices: Any
triu_indices_from: Any
-fix: Any
-isneginf: Any
-isposinf: Any
pad: Any
poly: Any
roots: Any
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 6c6c1ff80..7600e17be 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -209,6 +209,16 @@ def unique(ar, return_index=False, return_inverse=False,
flattened subarrays are sorted in lexicographic order starting with the
first element.
+ .. versionchanged: NumPy 1.21
+ If nan values are in the input array, a single nan is put
+ to the end of the sorted unique values.
+
+ Also for complex arrays all NaN values are considered equivalent
+ (no matter whether the NaN is in the real or imaginary part).
+ As the representant for the returned array the smallest one in the
+ lexicographical order is chosen - see np.sort for how the lexicographical
+ order is defined for complex arrays.
+
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
@@ -324,7 +334,16 @@ def _unique1d(ar, return_index=False, return_inverse=False,
aux = ar
mask = np.empty(aux.shape, dtype=np.bool_)
mask[:1] = True
- mask[1:] = aux[1:] != aux[:-1]
+ if aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and np.isnan(aux[-1]):
+ if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent
+ aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left')
+ else:
+ aux_firstnan = np.searchsorted(aux, aux[-1], side='left')
+ mask[1:aux_firstnan] = (aux[1:aux_firstnan] != aux[:aux_firstnan - 1])
+ mask[aux_firstnan] = True
+ mask[aux_firstnan + 1:] = False
+ else:
+ mask[1:] = aux[1:] != aux[:-1]
ret = (aux[mask],)
if return_index:
@@ -565,6 +584,10 @@ def in1d(ar1, ar2, assume_unique=False, invert=False):
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
+ # Ensure that iteration through object arrays yields size-1 arrays
+ if ar2.dtype == object:
+ ar2 = ar2.reshape(-1, 1)
+
# Check if one of the arrays may contain arbitrary objects
contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index 7af67a7ee..b6909bc1d 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -562,7 +562,8 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
- ``np.round(np.ceil(range / h))``.
+ ``np.round(np.ceil(range / h))``. The final bin width is often less
+ than what is returned by the estimators below.
'auto' (maximum of the 'sturges' and 'fd' estimators)
A compromise to get a good value. For small datasets the Sturges
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 9dfeee527..d19bfb8f8 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -775,7 +775,7 @@ def array_split(ary, indices_or_sections, axis=0):
# indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
- raise ValueError('number sections must be larger than 0.')
+ raise ValueError('number sections must be larger than 0.') from None
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] +
extras * [Neach_section+1] +
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index 847e6cb8a..d62da9efb 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -358,6 +358,39 @@ class TestSetOps:
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
+ def test_in1d_with_arrays_containing_tuples(self):
+ ar1 = np.array([(1,), 2], dtype=object)
+ ar2 = np.array([(1,), 2], dtype=object)
+ expected = np.array([True, True])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+ result = np.in1d(ar1, ar2, invert=True)
+ assert_array_equal(result, np.invert(expected))
+
+ # An integer is added at the end of the array to make sure
+ # that the array builder will create the array with tuples
+ # and after it's created the integer is removed.
+ # There's a bug in the array constructor that doesn't handle
+ # tuples properly and adding the integer fixes that.
+ ar1 = np.array([(1,), (2, 1), 1], dtype=object)
+ ar1 = ar1[:-1]
+ ar2 = np.array([(1,), (2, 1), 1], dtype=object)
+ ar2 = ar2[:-1]
+ expected = np.array([True, True])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+ result = np.in1d(ar1, ar2, invert=True)
+ assert_array_equal(result, np.invert(expected))
+
+ ar1 = np.array([(1,), (2, 3), 1], dtype=object)
+ ar1 = ar1[:-1]
+ ar2 = np.array([(1,), 2], dtype=object)
+ expected = np.array([True, False])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+ result = np.in1d(ar1, ar2, invert=True)
+ assert_array_equal(result, np.invert(expected))
+
def test_union1d(self):
a = np.array([5, 4, 7, 1, 2])
b = np.array([2, 4, 3, 3, 2, 1, 5])
@@ -531,6 +564,52 @@ class TestUnique:
assert_equal(a3_idx.dtype, np.intp)
assert_equal(a3_inv.dtype, np.intp)
+ # test for ticket 2111 - float
+ a = [2.0, np.nan, 1.0, np.nan]
+ ua = [1.0, 2.0, np.nan]
+ ua_idx = [2, 0, 1]
+ ua_inv = [1, 2, 0, 2]
+ ua_cnt = [1, 1, 2]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
+ # test for ticket 2111 - complex
+ a = [2.0-1j, np.nan, 1.0+1j, complex(0.0, np.nan), complex(1.0, np.nan)]
+ ua = [1.0+1j, 2.0-1j, complex(0.0, np.nan)]
+ ua_idx = [2, 0, 3]
+ ua_inv = [1, 2, 0, 2, 2]
+ ua_cnt = [1, 1, 3]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
+ # test for ticket 2111 - datetime64
+ nat = np.datetime64('nat')
+ a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat]
+ ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat]
+ ua_idx = [2, 0, 1]
+ ua_inv = [1, 2, 0, 2]
+ ua_cnt = [1, 1, 2]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
+ # test for ticket 2111 - timedelta
+ nat = np.timedelta64('nat')
+ a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat]
+ ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat]
+ ua_idx = [2, 0, 1]
+ ua_inv = [1, 2, 0, 2]
+ ua_cnt = [1, 1, 2]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
def test_unique_axis_errors(self):
assert_raises(TypeError, self._run_axis_tests, object)
assert_raises(TypeError, self._run_axis_tests,
diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py
index 1f26a1845..0956de82b 100644
--- a/numpy/lib/ufunclike.py
+++ b/numpy/lib/ufunclike.py
@@ -189,7 +189,8 @@ def isposinf(x, out=None):
try:
signbit = ~nx.signbit(x)
except TypeError as e:
- raise TypeError('This operation is not supported for complex values '
+ dtype = nx.asanyarray(x).dtype
+ raise TypeError(f'This operation is not supported for {dtype} values '
'because it would be ambiguous.') from e
else:
return nx.logical_and(is_inf, signbit, out)
@@ -260,7 +261,8 @@ def isneginf(x, out=None):
try:
signbit = nx.signbit(x)
except TypeError as e:
- raise TypeError('This operation is not supported for complex values '
+ dtype = nx.asanyarray(x).dtype
+ raise TypeError(f'This operation is not supported for {dtype} values '
'because it would be ambiguous.') from e
else:
return nx.logical_and(is_inf, signbit, out)
diff --git a/numpy/lib/ufunclike.pyi b/numpy/lib/ufunclike.pyi
new file mode 100644
index 000000000..3443fa7ae
--- /dev/null
+++ b/numpy/lib/ufunclike.pyi
@@ -0,0 +1,50 @@
+from typing import Any, overload, TypeVar, List, Union
+
+from numpy import floating, bool_, ndarray
+from numpy.typing import (
+ _ArrayLikeFloat_co,
+ _ArrayLikeObject_co,
+ _ArrayOrScalar,
+)
+
+_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
+
+__all__: List[str]
+
+@overload
+def fix(
+ x: _ArrayLikeFloat_co,
+ out: None = ...,
+) -> _ArrayOrScalar[floating[Any]]: ...
+@overload
+def fix(
+ x: _ArrayLikeObject_co,
+ out: None = ...,
+) -> Any: ...
+@overload
+def fix(
+ x: Union[_ArrayLikeFloat_co, _ArrayLikeObject_co],
+ out: _ArrayType,
+) -> _ArrayType: ...
+
+@overload
+def isposinf(
+ x: _ArrayLikeFloat_co,
+ out: None = ...,
+) -> _ArrayOrScalar[bool_]: ...
+@overload
+def isposinf(
+ x: _ArrayLikeFloat_co,
+ out: _ArrayType,
+) -> _ArrayType: ...
+
+@overload
+def isneginf(
+ x: _ArrayLikeFloat_co,
+ out: None = ...,
+) -> _ArrayOrScalar[bool_]: ...
+@overload
+def isneginf(
+ x: _ArrayLikeFloat_co,
+ out: _ArrayType,
+) -> _ArrayType: ...
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index f7e176cf3..24252c834 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -1015,8 +1015,8 @@ def _median_nancheck(data, result, axis, out):
Input data to median function
result : Array or MaskedArray
Result of median function
- axis : {int, sequence of int, None}, optional
- Axis or axes along which the median was computed.
+ axis : int
+ Axis along which the median was computed.
out : ndarray, optional
Output array in which to place the result.
Returns
@@ -1026,8 +1026,7 @@ def _median_nancheck(data, result, axis, out):
"""
if data.size == 0:
return result
- data = np.moveaxis(data, axis, -1)
- n = np.isnan(data[..., -1])
+ n = np.isnan(data.take(-1, axis=axis))
# masked NaN values are ok
if np.ma.isMaskedArray(n):
n = n.filled(False)
diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py
index cf15b2541..b145f6c4f 100755
--- a/numpy/linalg/lapack_lite/make_lite.py
+++ b/numpy/linalg/lapack_lite/make_lite.py
@@ -261,8 +261,8 @@ def runF2C(fortran_filename, output_dir):
subprocess.check_call(
["f2c"] + F2C_ARGS + ['-d', output_dir, fortran_filename]
)
- except subprocess.CalledProcessError:
- raise F2CError
+ except subprocess.CalledProcessError as e:
+ raise F2CError from e
def scrubF2CSource(c_file):
with open(c_file) as fo:
diff --git a/numpy/linalg/tests/test_build.py b/numpy/linalg/tests/test_build.py
index 4859226d9..868341ff2 100644
--- a/numpy/linalg/tests/test_build.py
+++ b/numpy/linalg/tests/test_build.py
@@ -15,8 +15,8 @@ class FindDependenciesLdd:
try:
p = Popen(self.cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
- except OSError:
- raise RuntimeError(f'command {self.cmd} cannot be run')
+ except OSError as e:
+ raise RuntimeError(f'command {self.cmd} cannot be run') from e
def get_dependencies(self, lfile):
p = Popen(self.cmd + [lfile], stdout=PIPE, stderr=PIPE)
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 21fab58e1..8a270f194 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -348,10 +348,10 @@ class LinalgTestCase:
try:
case.check(self.do)
- except Exception:
+ except Exception as e:
msg = f'In test case: {case!r}\n\n'
msg += traceback.format_exc()
- raise AssertionError(msg)
+ raise AssertionError(msg) from e
class LinalgSquareTestCase(LinalgTestCase):
diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi
index bd5ece536..b99c002ae 100644
--- a/numpy/random/__init__.pyi
+++ b/numpy/random/__init__.pyi
@@ -1,5 +1,14 @@
from typing import Any, List
+from numpy.random._generator import Generator as Generator
+from numpy.random._generator import default_rng as default_rng
+from numpy.random._mt19937 import MT19937 as MT19937
+from numpy.random._pcg64 import PCG64 as PCG64
+from numpy.random._philox import Philox as Philox
+from numpy.random._sfc64 import SFC64 as SFC64
+from numpy.random.bit_generator import BitGenerator as BitGenerator
+from numpy.random.bit_generator import SeedSequence as SeedSequence
+
__all__: List[str]
beta: Any
@@ -52,12 +61,4 @@ vonmises: Any
wald: Any
weibull: Any
zipf: Any
-Generator: Any
RandomState: Any
-SeedSequence: Any
-MT19937: Any
-Philox: Any
-PCG64: Any
-SFC64: Any
-default_rng: Any
-BitGenerator: Any
diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx
index 6d77aed03..19fb34d4d 100644
--- a/numpy/random/_common.pyx
+++ b/numpy/random/_common.pyx
@@ -25,11 +25,11 @@ cdef uint64_t MAXSIZE = <uint64_t>sys.maxsize
cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method):
"""Benchmark command used by BitGenerator"""
cdef Py_ssize_t i
- if method==u'uint64':
+ if method=='uint64':
with lock, nogil:
for i in range(cnt):
bitgen.next_uint64(bitgen.state)
- elif method==u'double':
+ elif method=='double':
with lock, nogil:
for i in range(cnt):
bitgen.next_double(bitgen.state)
diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi
new file mode 100644
index 000000000..aadc4d0f8
--- /dev/null
+++ b/numpy/random/_generator.pyi
@@ -0,0 +1,534 @@
+import sys
+from typing import Any, Callable, Dict, Literal, Optional, Sequence, Tuple, Type, Union, overload
+
+from numpy import (
+ bool_,
+ double,
+ dtype,
+ float32,
+ float64,
+ int8,
+ int16,
+ int32,
+ int64,
+ int_,
+ ndarray,
+ single,
+ uint,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+from numpy.random import BitGenerator, SeedSequence
+from numpy.typing import (
+ ArrayLike,
+ _ArrayLikeFloat_co,
+ _ArrayLikeInt_co,
+ _BoolCodes,
+ _DoubleCodes,
+ _DTypeLikeBool,
+ _DTypeLikeInt,
+ _DTypeLikeUInt,
+ _Float32Codes,
+ _Float64Codes,
+ _Int8Codes,
+ _Int16Codes,
+ _Int32Codes,
+ _Int64Codes,
+ _IntCodes,
+ _ShapeLike,
+ _SingleCodes,
+ _SupportsDType,
+ _UInt8Codes,
+ _UInt16Codes,
+ _UInt32Codes,
+ _UInt64Codes,
+ _UIntCodes,
+)
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from typing_extensions import Literal
+
+_DTypeLikeFloat32 = Union[
+ dtype[float32],
+ _SupportsDType[dtype[float32]],
+ Type[float32],
+ _Float32Codes,
+ _SingleCodes,
+]
+
+_DTypeLikeFloat64 = Union[
+ dtype[float64],
+ _SupportsDType[dtype[float64]],
+ Type[float],
+ Type[float64],
+ _Float64Codes,
+ _DoubleCodes,
+]
+
+class Generator:
+ # COMPLETE
+ def __init__(self, bit_generator: BitGenerator) -> None: ...
+ def __repr__(self) -> str: ...
+ def __str__(self) -> str: ...
+ # Pickling support:
+ def __getstate__(self) -> Dict[str, Any]: ...
+ def __setstate__(self, state: Dict[str, Any]) -> None: ...
+ def __reduce__(self) -> Tuple[Callable[[str], BitGenerator], Tuple[str], Dict[str, Any]]: ...
+ @property
+ def bit_generator(self) -> BitGenerator: ...
+ def bytes(self, length: int) -> str: ...
+ @overload
+ def standard_normal( # type: ignore[misc]
+ self,
+ size: None = ...,
+ dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ...,
+ out: None = ...,
+ ) -> float: ...
+ @overload
+ def standard_normal( # type: ignore[misc]
+ self,
+ size: _ShapeLike = ...,
+ dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ...,
+ out: Optional[ndarray[Any, dtype[Union[float32, float64]]]] = ...,
+ ) -> ndarray[Any, dtype[Union[float32, float64]]]: ...
+ @overload
+ def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def permutation(self, x: ArrayLike, axis: int = ...) -> ndarray[Any, Any]: ...
+ @overload
+ def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def standard_cauchy(self, size: Optional[_ShapeLike] = ...) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_exponential( # type: ignore[misc]
+ self,
+ size: None = ...,
+ dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ...,
+ method: Literal["zig", "inv"] = ...,
+ out: None = ...,
+ ) -> float: ...
+ @overload
+ def standard_exponential(
+ self,
+ size: _ShapeLike = ...,
+ dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ...,
+ method: Literal["zig", "inv"] = ...,
+ out: Optional[ndarray[Any, dtype[Union[float32, float64]]]] = ...,
+ ) -> ndarray[Any, dtype[Union[float32, float64]]]: ...
+ @overload
+ def standard_exponential(
+ self,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ...,
+ method: Literal["zig", "inv"] = ...,
+ out: ndarray[Any, dtype[Union[float32, float64]]] = ...,
+ ) -> ndarray[Any, dtype[Union[float32, float64]]]: ...
+ @overload
+ def random( # type: ignore[misc]
+ self,
+ size: None = ...,
+ dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ...,
+ out: None = ...,
+ ) -> float: ...
+ @overload
+ def random(
+ self,
+ size: _ShapeLike = ...,
+ dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ...,
+ out: Optional[ndarray[Any, dtype[Union[float32, float64]]]] = ...,
+ ) -> ndarray[Any, dtype[Union[float32, float64]]]: ...
+ @overload
+ def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def beta(
+ self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def exponential(
+ self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: int,
+ high: Optional[int] = ...,
+ size: None = ...,
+ dtype: _DTypeLikeBool = ...,
+ endpoint: bool = ...,
+ ) -> bool: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: int,
+ high: Optional[int] = ...,
+ size: None = ...,
+ dtype: Union[_DTypeLikeInt, _DTypeLikeUInt] = ...,
+ endpoint: bool = ...,
+ ) -> int: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: Optional[_ArrayLikeInt_co] = ...,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[
+ dtype[bool_], Type[bool], Type[bool_], _BoolCodes, _SupportsDType[dtype[bool_]]
+ ] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[bool_]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: Optional[_ArrayLikeInt_co] = ...,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[
+ dtype[int_], Type[int], Type[int_], _IntCodes, _SupportsDType[dtype[int_]]
+ ] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: Optional[_ArrayLikeInt_co] = ...,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[dtype[uint], Type[uint], _UIntCodes, _SupportsDType[dtype[uint]]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[uint]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: Optional[_ArrayLikeInt_co] = ...,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[dtype[int8], Type[int8], _Int8Codes, _SupportsDType[dtype[int8]]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[int8]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: Optional[_ArrayLikeInt_co] = ...,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[dtype[int16], Type[int16], _Int16Codes, _SupportsDType[dtype[int16]]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[int16]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: Optional[_ArrayLikeInt_co] = ...,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[dtype[int32], Type[int32], _Int32Codes, _SupportsDType[dtype[int32]]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[Union[int32]]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: Optional[_ArrayLikeInt_co] = ...,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Optional[
+ Union[dtype[int64], Type[int64], _Int64Codes, _SupportsDType[dtype[int64]]]
+ ] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: Optional[_ArrayLikeInt_co] = ...,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[dtype[uint8], Type[uint8], _UInt8Codes, _SupportsDType[dtype[uint8]]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[uint8]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: Optional[_ArrayLikeInt_co] = ...,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[
+ dtype[uint16], Type[uint16], _UInt16Codes, _SupportsDType[dtype[uint16]]
+ ] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[Union[uint16]]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: Optional[_ArrayLikeInt_co] = ...,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[
+ dtype[uint32], Type[uint32], _UInt32Codes, _SupportsDType[dtype[uint32]]
+ ] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[uint32]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: Optional[_ArrayLikeInt_co] = ...,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[
+ dtype[uint64], Type[uint64], _UInt64Codes, _SupportsDType[dtype[uint64]]
+ ] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[uint64]]: ...
+ # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> Union[_T, ndarray[Any,Any]]
+ def choice(
+ self,
+ a: ArrayLike,
+ size: Optional[_ShapeLike] = ...,
+ replace: bool = ...,
+ p: Optional[_ArrayLikeFloat_co] = ...,
+ axis: Optional[int] = ...,
+ shuffle: bool = ...,
+ ) -> Any: ...
+ @overload
+ def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def uniform(
+ self,
+ low: _ArrayLikeFloat_co = ...,
+ high: _ArrayLikeFloat_co = ...,
+ size: Optional[_ShapeLike] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def normal(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: Optional[_ShapeLike] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_gamma( # type: ignore[misc]
+ self,
+ shape: float,
+ size: None = ...,
+ dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ...,
+ out: None = ...,
+ ) -> float: ...
+ @overload
+ def standard_gamma(
+ self,
+ shape: _ArrayLikeFloat_co,
+ size: Optional[_ShapeLike] = ...,
+ dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ...,
+ out: Optional[ndarray[Any, dtype[Union[float32, float64]]]] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def gamma(
+ self,
+ shape: _ArrayLikeFloat_co,
+ scale: _ArrayLikeFloat_co = ...,
+ size: Optional[_ShapeLike] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def f(
+ self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def noncentral_f(
+ self,
+ dfnum: _ArrayLikeFloat_co,
+ dfden: _ArrayLikeFloat_co,
+ nonc: _ArrayLikeFloat_co,
+ size: Optional[_ShapeLike] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def chisquare(
+ self, df: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def noncentral_chisquare(
+ self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def standard_t(
+ self, df: _ArrayLikeFloat_co, size: None = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_t(
+ self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def vonmises(
+ self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def pareto(
+ self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def weibull(
+ self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def power(
+ self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def laplace(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: Optional[_ShapeLike] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def gumbel(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: Optional[_ShapeLike] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def logistic(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: Optional[_ShapeLike] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def lognormal(
+ self,
+ mean: _ArrayLikeFloat_co = ...,
+ sigma: _ArrayLikeFloat_co = ...,
+ size: Optional[_ShapeLike] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def rayleigh(
+ self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def wald(
+ self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def triangular(
+ self,
+ left: _ArrayLikeFloat_co,
+ mode: _ArrayLikeFloat_co,
+ right: _ArrayLikeFloat_co,
+ size: Optional[_ShapeLike] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ # Complicated, discrete distributions:
+ @overload
+ def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def binomial(
+ self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def negative_binomial(
+ self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def poisson(
+ self, lam: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def zipf(
+ self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def geometric(
+ self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def hypergeometric(
+ self,
+ ngood: _ArrayLikeInt_co,
+ nbad: _ArrayLikeInt_co,
+ nsample: _ArrayLikeInt_co,
+ size: Optional[_ShapeLike] = ...,
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def logseries(
+ self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ # Multivariate distributions:
+ def multivariate_normal(
+ self,
+ mean: _ArrayLikeFloat_co,
+ cov: _ArrayLikeFloat_co,
+ size: Optional[_ShapeLike] = ...,
+ check_valid: Literal["warn", "raise", "ignore"] = ...,
+ tol: float = ...,
+ *,
+ method: Literal["svd", "eigh", "cholesky"] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ def multinomial(
+ self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ def multivariate_hypergeometric(
+ self,
+ colors: _ArrayLikeInt_co,
+ nsample: int,
+ size: Optional[_ShapeLike] = ...,
+ method: Literal["marginals", "count"] = ...,
+ ) -> ndarray[Any, dtype[int64]]: ...
+ def dirichlet(
+ self, alpha: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ def permuted(
+ self, x: ArrayLike, *, axis: Optional[int] = ..., out: Optional[ndarray[Any, Any]] = ...
+ ) -> ndarray[Any, Any]: ...
+ def shuffle(self, x: ArrayLike, axis: int = ...) -> Sequence[Any]: ...
+
+def default_rng(seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> Generator: ...
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index 3a84470ea..1903dce37 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -386,7 +386,7 @@ cdef class Generator:
0.0, '', CONS_NONE,
None)
- def standard_exponential(self, size=None, dtype=np.float64, method=u'zig', out=None):
+ def standard_exponential(self, size=None, dtype=np.float64, method='zig', out=None):
"""
standard_exponential(size=None, dtype=np.float64, method='zig', out=None)
@@ -426,12 +426,12 @@ cdef class Generator:
"""
_dtype = np.dtype(dtype)
if _dtype == np.float64:
- if method == u'zig':
+ if method == 'zig':
return double_fill(&random_standard_exponential_fill, &self._bitgen, size, self.lock, out)
else:
return double_fill(&random_standard_exponential_inv_fill, &self._bitgen, size, self.lock, out)
elif _dtype == np.float32:
- if method == u'zig':
+ if method == 'zig':
return float_fill(&random_standard_exponential_fill_f, &self._bitgen, size, self.lock, out)
else:
return float_fill(&random_standard_exponential_inv_fill_f, &self._bitgen, size, self.lock, out)
@@ -579,13 +579,13 @@ cdef class Generator:
Returns
-------
- out : str
+ out : bytes
String of length `length`.
Examples
--------
>>> np.random.default_rng().bytes(10)
- ' eh\\x85\\x022SZ\\xbf\\xa4' #random
+ b'\xfeC\x9b\x86\x17\xf2\xa1\xafcp' # random
"""
cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
@@ -996,7 +996,7 @@ cdef class Generator:
--------
>>> rng = np.random.default_rng()
>>> rng.standard_normal()
- 2.1923875335537315 #random
+ 2.1923875335537315 # random
>>> s = rng.standard_normal(8000)
>>> s
@@ -3208,7 +3208,7 @@ cdef class Generator:
How many trials succeeded after a single run?
>>> (z == 1).sum() / 10000.
- 0.34889999999999999 #random
+ 0.34889999999999999 # random
"""
return disc(&random_geometric, &self._bitgen, size, self.lock, 1, 0,
@@ -4410,7 +4410,9 @@ cdef class Generator:
char* x_ptr
char* buf_ptr
- axis = normalize_axis_index(axis, np.ndim(x))
+ if isinstance(x, np.ndarray):
+ # Only call ndim on ndarrays, see GH 18142
+ axis = normalize_axis_index(axis, np.ndim(x))
if type(x) is np.ndarray and x.ndim == 1 and x.size:
# Fast, statically typed path: shuffle the underlying buffer.
diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi
new file mode 100644
index 000000000..1b8bacdae
--- /dev/null
+++ b/numpy/random/_mt19937.pyi
@@ -0,0 +1,28 @@
+import sys
+from typing import Any, Union
+
+from numpy import dtype, ndarray, uint32
+from numpy.random.bit_generator import BitGenerator, SeedSequence
+from numpy.typing import _ArrayLikeInt_co
+
+if sys.version_info >= (3, 8):
+ from typing import TypedDict
+else:
+ from typing_extensions import TypedDict
+
+class _MT19937Internal(TypedDict):
+ key: ndarray[Any, dtype[uint32]]
+ pos: int
+
+class _MT19937State(TypedDict):
+ bit_generator: str
+ state: _MT19937Internal
+
+class MT19937(BitGenerator):
+ def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ...
+ def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ...
+ def jumped(self, jumps: int = ...) -> MT19937: ...
+ @property
+ def state(self) -> _MT19937State: ...
+ @state.setter
+ def state(self, value: _MT19937State) -> None: ...
diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi
new file mode 100644
index 000000000..a4f6e0760
--- /dev/null
+++ b/numpy/random/_pcg64.pyi
@@ -0,0 +1,34 @@
+import sys
+from typing import Union
+
+from numpy.random.bit_generator import BitGenerator, SeedSequence
+from numpy.typing import _ArrayLikeInt_co
+
+if sys.version_info >= (3, 8):
+ from typing import TypedDict
+else:
+ from typing_extensions import TypedDict
+
+class _PCG64Internal(TypedDict):
+ state: int
+ inc: int
+
+class _PCG64State(TypedDict):
+ bit_generator: str
+ state: _PCG64Internal
+ has_uint32: int
+ uinteger: int
+
+class PCG64(BitGenerator):
+ def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ...
+ def jumped(self, jumps: int = ...) -> PCG64: ...
+ @property
+ def state(
+ self,
+ ) -> _PCG64State: ...
+ @state.setter
+ def state(
+ self,
+ value: _PCG64State,
+ ) -> None: ...
+ def advance(self, delta: int) -> PCG64: ...
diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi
new file mode 100644
index 000000000..f6a5b9b9b
--- /dev/null
+++ b/numpy/random/_philox.pyi
@@ -0,0 +1,42 @@
+import sys
+from typing import Any, Union
+
+from numpy import dtype, ndarray, uint64
+from numpy.random.bit_generator import BitGenerator, SeedSequence
+from numpy.typing import _ArrayLikeInt_co
+
+if sys.version_info >= (3, 8):
+ from typing import TypedDict
+else:
+ from typing_extensions import TypedDict
+
+class _PhiloxInternal(TypedDict):
+ counter: ndarray[Any, dtype[uint64]]
+ key: ndarray[Any, dtype[uint64]]
+
+class _PhiloxState(TypedDict):
+ bit_generator: str
+ state: _PhiloxInternal
+ buffer: ndarray[Any, dtype[uint64]]
+ buffer_pos: int
+ has_uint32: int
+ uinteger: int
+
+class Philox(BitGenerator):
+ def __init__(
+ self,
+ seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...,
+ counter: Union[None, _ArrayLikeInt_co] = ...,
+ key: Union[None, _ArrayLikeInt_co] = ...,
+ ) -> None: ...
+ @property
+ def state(
+ self,
+ ) -> _PhiloxState: ...
+ @state.setter
+ def state(
+ self,
+ value: _PhiloxState,
+ ) -> None: ...
+ def jumped(self, jumps: int = ...) -> Philox: ...
+ def advance(self, delta: int) -> Philox: ...
diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx
index 7e8880490..0fe8ebd7c 100644
--- a/numpy/random/_philox.pyx
+++ b/numpy/random/_philox.pyx
@@ -1,10 +1,5 @@
from cpython.pycapsule cimport PyCapsule_New
-try:
- from threading import Lock
-except ImportError:
- from dummy_threading import Lock
-
import numpy as np
cimport numpy as np
diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi
new file mode 100644
index 000000000..72a271c92
--- /dev/null
+++ b/numpy/random/_sfc64.pyi
@@ -0,0 +1,34 @@
+import sys
+from typing import Any, Union
+
+from numpy import dtype as dtype
+from numpy import ndarray as ndarray
+from numpy import uint64
+from numpy.random.bit_generator import BitGenerator, SeedSequence
+from numpy.typing import _ArrayLikeInt_co
+
+if sys.version_info >= (3, 8):
+ from typing import TypedDict
+else:
+ from typing_extensions import TypedDict
+
+class _SFC64Internal(TypedDict):
+ state: ndarray[Any, dtype[uint64]]
+
+class _SFC64State(TypedDict):
+ bit_generator: str
+ state: _SFC64Internal
+ has_uint32: int
+ uinteger: int
+
+class SFC64(BitGenerator):
+ def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ...
+ @property
+ def state(
+ self,
+ ) -> _SFC64State: ...
+ @state.setter
+ def state(
+ self,
+ value: _SFC64State,
+ ) -> None: ...
diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi
new file mode 100644
index 000000000..80a2e829b
--- /dev/null
+++ b/numpy/random/bit_generator.pyi
@@ -0,0 +1,124 @@
+import abc
+import sys
+from threading import Lock
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ List,
+ Mapping,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ TypedDict,
+ TypeVar,
+ Union,
+ overload,
+)
+
+from numpy import dtype, ndarray, uint32, uint64, unsignedinteger
+from numpy.typing import DTypeLike, _ArrayLikeInt_co, _DTypeLikeUInt, _ShapeLike, _SupportsDType
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from typing_extensions import Literal
+
+_T = TypeVar("_T")
+
+_UIntType = TypeVar("_UIntType", uint64, uint32)
+_DTypeLike = Union[
+ Type[_UIntType],
+ dtype[_UIntType],
+ _SupportsDType[dtype[_UIntType]],
+]
+
+class _SeedSeqState(TypedDict):
+ entropy: Union[None, int, Sequence[int]]
+ spawn_key: Tuple[int, ...]
+ pool_size: int
+ n_children_spawned: int
+
+class _Interface(NamedTuple):
+ state_address: Any
+ state: Any
+ next_uint64: Any
+ next_uint32: Any
+ next_double: Any
+ bit_generator: Any
+
+class ISeedSequence(abc.ABC):
+ @overload
+ @abc.abstractmethod
+ def generate_state(
+ self, n_words: int, dtype: _DTypeLike[_UIntType] = ...
+ ) -> ndarray[Any, dtype[_UIntType]]: ...
+ @overload
+ @abc.abstractmethod
+ def generate_state(
+ self, n_words: int, dtype: _DTypeLikeUInt = ...
+ ) -> ndarray[Any, dtype[unsignedinteger[Any]]]: ...
+
+class ISpawnableSeedSequence(ISeedSequence):
+ @abc.abstractmethod
+ def spawn(self: _T, n_children: int) -> List[_T]: ...
+
+class SeedlessSeedSequence(ISpawnableSeedSequence):
+ @overload
+ def generate_state(
+ self, n_words: int, dtype: _DTypeLike[_UIntType] = ...
+ ) -> ndarray[Any, dtype[_UIntType]]: ...
+ @overload
+ def generate_state(
+ self, n_words: int, dtype: _DTypeLikeUInt = ...
+ ) -> ndarray[Any, dtype[unsignedinteger[Any]]]: ...
+ def spawn(self: _T, n_children: int) -> List[_T]: ...
+
+class SeedSequence(ISpawnableSeedSequence):
+ entropy: Union[None, int, Sequence[int]]
+ spawn_key: Tuple[int, ...]
+ pool_size: int
+ n_children_spawned: int
+ pool: ndarray[Any, dtype[uint32]]
+ def __init__(
+ self,
+ entropy: Union[None, int, Sequence[int]] = ...,
+ *,
+ spawn_key: Sequence[int] = ...,
+ pool_size: int = ...,
+ n_children_spawned: int = ...,
+ ) -> None: ...
+ def __repr__(self) -> str: ...
+ @property
+ def state(
+ self,
+ ) -> _SeedSeqState: ...
+ def generate_state(self, n_words: int, dtype: DTypeLike = ...) -> ndarray[Any, Any]: ...
+ def spawn(self, n_children: int) -> List[SeedSequence]: ...
+
+class BitGenerator(abc.ABC):
+ lock: Lock
+ def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ...
+ def __getstate__(self) -> Dict[str, Any]: ...
+ def __setstate__(self, state: Dict[str, Any]) -> None: ...
+ def __reduce__(
+ self,
+ ) -> Tuple[Callable[[str], BitGenerator], Tuple[str], Tuple[Dict[str, Any]]]: ...
+ @abc.abstractmethod
+ @property
+ def state(self) -> Mapping[str, Any]: ...
+ @state.setter
+ def state(self, value: Mapping[str, Any]) -> None: ...
+ @overload
+ def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> ndarray[Any, dtype[uint64]]: ... # type: ignore[misc]
+ @overload
+ def random_raw(self, size: Optional[_ShapeLike] = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc]
+ def _benchmark(self, cnt: int, method: str = ...) -> None: ...
+ @property
+ def ctypes(self) -> _Interface: ...
+ @property
+ def cffi(self) -> _Interface: ...
diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx
index 9b0c363ef..123d77b40 100644
--- a/numpy/random/bit_generator.pyx
+++ b/numpy/random/bit_generator.pyx
@@ -43,10 +43,7 @@ except ImportError:
from random import SystemRandom
randbits = SystemRandom().getrandbits
-try:
- from threading import Lock
-except ImportError:
- from dummy_threading import Lock
+from threading import Lock
from cpython.pycapsule cimport PyCapsule_New
@@ -587,8 +584,8 @@ cdef class BitGenerator():
"""
return random_raw(&self._bitgen, self.lock, size, output)
- def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- '''Used in tests'''
+ def _benchmark(self, Py_ssize_t cnt, method='uint64'):
+ """Used in tests"""
return benchmark(&self._bitgen, self.lock, cnt, method)
@property
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index 47c81584c..d68bcd38b 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -2526,3 +2526,11 @@ def test_broadcast_size_scalar():
random.normal(mu, sigma, size=3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=2)
+
+
+def test_ragged_shuffle():
+ # GH 18142
+ seq = [[], [], 1]
+ gen = Generator(MT19937(0))
+ assert_no_warnings(gen.shuffle, seq)
+ assert seq == [1, [], []]
diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py
index 8f5df483b..61d780b85 100644
--- a/numpy/typing/__init__.py
+++ b/numpy/typing/__init__.py
@@ -317,6 +317,7 @@ from ._dtype_like import (
_DTypeLikeVoid,
_DTypeLikeStr,
_DTypeLikeBytes,
+ _DTypeLikeComplex_co,
)
from ._array_like import (
ArrayLike as ArrayLike,
diff --git a/numpy/typing/_char_codes.py b/numpy/typing/_char_codes.py
index 143644e88..6b6f7ae88 100644
--- a/numpy/typing/_char_codes.py
+++ b/numpy/typing/_char_codes.py
@@ -48,11 +48,11 @@ if TYPE_CHECKING or HAVE_LITERAL:
_HalfCodes = Literal["half", "e", "=e", "<e", ">e"]
_SingleCodes = Literal["single", "f", "=f", "<f", ">f"]
- _DoubleCodes = Literal["double" "float", "float_", "d", "=d", "<d", ">d"]
+ _DoubleCodes = Literal["double", "float", "float_", "d", "=d", "<d", ">d"]
_LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "<g", ">g"]
_CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "<F", ">F"]
- _CDoubleCodes = Literal["cdouble" "complex", "complex_", "cfloat", "D", "=D", "<D", ">D"]
+ _CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "<D", ">D"]
_CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "<G", ">G"]
_StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "<U", ">U"]
diff --git a/numpy/typing/_dtype_like.py b/numpy/typing/_dtype_like.py
index f86b4a67c..a41e2f358 100644
--- a/numpy/typing/_dtype_like.py
+++ b/numpy/typing/_dtype_like.py
@@ -228,3 +228,11 @@ _DTypeLikeObject = Union[
"_SupportsDType[np.dtype[np.object_]]",
_ObjectCodes,
]
+
+_DTypeLikeComplex_co = Union[
+ _DTypeLikeBool,
+ _DTypeLikeUInt,
+ _DTypeLikeInt,
+ _DTypeLikeFloat,
+ _DTypeLikeComplex,
+]
diff --git a/numpy/typing/tests/data/fail/einsumfunc.py b/numpy/typing/tests/data/fail/einsumfunc.py
new file mode 100644
index 000000000..33722f861
--- /dev/null
+++ b/numpy/typing/tests/data/fail/einsumfunc.py
@@ -0,0 +1,15 @@
+from typing import List, Any
+import numpy as np
+
+AR_i: np.ndarray[Any, np.dtype[np.int64]]
+AR_f: np.ndarray[Any, np.dtype[np.float64]]
+AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
+AR_O: np.ndarray[Any, np.dtype[np.object_]]
+AR_U: np.ndarray[Any, np.dtype[np.str_]]
+
+np.einsum("i,i->i", AR_i, AR_m) # E: incompatible type
+np.einsum("i,i->i", AR_O, AR_O) # E: incompatible type
+np.einsum("i,i->i", AR_f, AR_f, dtype=np.int32) # E: incompatible type
+np.einsum("i,i->i", AR_i, AR_i, dtype=np.timedelta64, casting="unsafe") # E: No overload variant
+np.einsum("i,i->i", AR_i, AR_i, out=AR_U) # E: Value of type variable "_ArrayType" of "einsum" cannot be
+np.einsum("i,i->i", AR_i, AR_i, out=AR_U, casting="unsafe") # E: No overload variant
diff --git a/numpy/typing/tests/data/fail/ufunclike.py b/numpy/typing/tests/data/fail/ufunclike.py
new file mode 100644
index 000000000..82a5f3a1d
--- /dev/null
+++ b/numpy/typing/tests/data/fail/ufunclike.py
@@ -0,0 +1,21 @@
+from typing import List, Any
+import numpy as np
+
+AR_c: np.ndarray[Any, np.dtype[np.complex128]]
+AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
+AR_M: np.ndarray[Any, np.dtype[np.datetime64]]
+AR_O: np.ndarray[Any, np.dtype[np.object_]]
+
+np.fix(AR_c) # E: incompatible type
+np.fix(AR_m) # E: incompatible type
+np.fix(AR_M) # E: incompatible type
+
+np.isposinf(AR_c) # E: incompatible type
+np.isposinf(AR_m) # E: incompatible type
+np.isposinf(AR_M) # E: incompatible type
+np.isposinf(AR_O) # E: incompatible type
+
+np.isneginf(AR_c) # E: incompatible type
+np.isneginf(AR_m) # E: incompatible type
+np.isneginf(AR_M) # E: incompatible type
+np.isneginf(AR_O) # E: incompatible type
diff --git a/numpy/typing/tests/data/pass/einsumfunc.py b/numpy/typing/tests/data/pass/einsumfunc.py
new file mode 100644
index 000000000..914eed4cc
--- /dev/null
+++ b/numpy/typing/tests/data/pass/einsumfunc.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+from typing import List, Any
+
+import pytest
+import numpy as np
+
+AR_LIKE_b = [True, True, True]
+AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)]
+AR_LIKE_i = [1, 2, 3]
+AR_LIKE_f = [1.0, 2.0, 3.0]
+AR_LIKE_c = [1j, 2j, 3j]
+AR_LIKE_U = ["1", "2", "3"]
+
+OUT_c: np.ndarray[Any, np.dtype[np.complex128]] = np.empty(3, dtype=np.complex128)
+
+np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b)
+np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u)
+np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i)
+np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f)
+np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c)
+np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i)
+np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)
+
+np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16")
+np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe")
+np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, out=OUT_c)
+with pytest.raises(np.ComplexWarning):
+ np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=float, casting="unsafe", out=OUT_c)
+
+np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b)
+np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u)
+np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i)
+np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f)
+np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c)
+np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i)
+np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)
diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py
new file mode 100644
index 000000000..448ee66bb
--- /dev/null
+++ b/numpy/typing/tests/data/pass/ufunclike.py
@@ -0,0 +1,48 @@
+from __future__ import annotations
+from typing import Any
+import numpy as np
+
+
+class Object:
+ def __ceil__(self) -> Object:
+ return self
+
+ def __floor__(self) -> Object:
+ return self
+
+ def __ge__(self, value: object) -> bool:
+ return True
+
+ def __array__(
+ self, dtype: None = None
+ ) -> np.ndarray[Any, np.dtype[np.object_]]:
+ ret = np.empty((), dtype=object)
+ ret[()] = self
+ return ret
+
+
+AR_LIKE_b = [True, True, False]
+AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)]
+AR_LIKE_i = [1, 2, 3]
+AR_LIKE_f = [1.0, 2.0, 3.0]
+AR_LIKE_O = [Object(), Object(), Object()]
+AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5")
+
+np.fix(AR_LIKE_b)
+np.fix(AR_LIKE_u)
+np.fix(AR_LIKE_i)
+np.fix(AR_LIKE_f)
+np.fix(AR_LIKE_O)
+np.fix(AR_LIKE_f, out=AR_U)
+
+np.isposinf(AR_LIKE_b)
+np.isposinf(AR_LIKE_u)
+np.isposinf(AR_LIKE_i)
+np.isposinf(AR_LIKE_f)
+np.isposinf(AR_LIKE_f, out=AR_U)
+
+np.isneginf(AR_LIKE_b)
+np.isneginf(AR_LIKE_u)
+np.isneginf(AR_LIKE_i)
+np.isneginf(AR_LIKE_f)
+np.isneginf(AR_LIKE_f, out=AR_U)
diff --git a/numpy/typing/tests/data/reveal/einsumfunc.py b/numpy/typing/tests/data/reveal/einsumfunc.py
new file mode 100644
index 000000000..18c192b0b
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/einsumfunc.py
@@ -0,0 +1,32 @@
+from typing import List, Any
+import numpy as np
+
+AR_LIKE_b: List[bool]
+AR_LIKE_u: List[np.uint32]
+AR_LIKE_i: List[int]
+AR_LIKE_f: List[float]
+AR_LIKE_c: List[complex]
+AR_LIKE_U: List[str]
+
+OUT_f: np.ndarray[Any, np.dtype[np.float64]]
+
+reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: Union[numpy.unsignedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]
+reveal_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
+reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
+reveal_type(np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+
+reveal_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c, out=OUT_f)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]
+reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe", out=OUT_f)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]
+reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16")) # E: Union[numpy.complexfloating[Any, Any], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe")) # E: Any
+
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: Tuple[builtins.list[Any], builtins.str]
diff --git a/numpy/typing/tests/data/reveal/ufunclike.py b/numpy/typing/tests/data/reveal/ufunclike.py
new file mode 100644
index 000000000..35e45a824
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/ufunclike.py
@@ -0,0 +1,29 @@
+from typing import List, Any
+import numpy as np
+
+AR_LIKE_b: List[bool]
+AR_LIKE_u: List[np.uint32]
+AR_LIKE_i: List[int]
+AR_LIKE_f: List[float]
+AR_LIKE_O: List[np.object_]
+
+AR_U: np.ndarray[Any, np.dtype[np.str_]]
+
+reveal_type(np.fix(AR_LIKE_b)) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
+reveal_type(np.fix(AR_LIKE_u)) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
+reveal_type(np.fix(AR_LIKE_i)) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
+reveal_type(np.fix(AR_LIKE_f)) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
+reveal_type(np.fix(AR_LIKE_O)) # E: Any
+reveal_type(np.fix(AR_LIKE_f, out=AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+
+reveal_type(np.isposinf(AR_LIKE_b)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]]
+reveal_type(np.isposinf(AR_LIKE_u)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]]
+reveal_type(np.isposinf(AR_LIKE_i)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]]
+reveal_type(np.isposinf(AR_LIKE_f)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]]
+reveal_type(np.isposinf(AR_LIKE_f, out=AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+
+reveal_type(np.isneginf(AR_LIKE_b)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]]
+reveal_type(np.isneginf(AR_LIKE_u)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]]
+reveal_type(np.isneginf(AR_LIKE_i)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]]
+reveal_type(np.isneginf(AR_LIKE_f)) # E: Union[numpy.bool_, numpy.ndarray[Any, numpy.dtype[numpy.bool_]]]
+reveal_type(np.isneginf(AR_LIKE_f, out=AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py
index eb7e0b09e..e80282420 100644
--- a/numpy/typing/tests/test_typing.py
+++ b/numpy/typing/tests/test_typing.py
@@ -91,7 +91,7 @@ def test_success(path):
# Alias `OUTPUT_MYPY` so that it appears in the local namespace
output_mypy = OUTPUT_MYPY
if path in output_mypy:
- raise AssertionError("\n".join(v for v in output_mypy[path].values()))
+ raise AssertionError("\n".join(v for v in output_mypy[path]))
@pytest.mark.slow