summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py8
-rw-r--r--numpy/__init__.pyi55
-rw-r--r--numpy/core/_add_newdocs.py23
-rw-r--r--numpy/core/_internal.py9
-rw-r--r--numpy/core/shape_base.pyi65
-rw-r--r--numpy/core/src/multiarray/array_method.c8
-rw-r--r--numpy/core/src/multiarray/common.h28
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c6
-rw-r--r--numpy/core/src/multiarray/conversion_utils.h11
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c24
-rw-r--r--numpy/core/src/multiarray/descriptor.c4
-rw-r--r--numpy/core/src/multiarray/getset.c24
-rw-r--r--numpy/core/src/multiarray/iterators.c39
-rw-r--r--numpy/core/src/umath/reduction.c46
-rw-r--r--numpy/core/src/umath/ufunc_object.c523
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c18
-rw-r--r--numpy/core/tests/test_dtype.py18
-rw-r--r--numpy/core/tests/test_multiarray.py13
-rw-r--r--numpy/core/tests/test_umath.py27
-rw-r--r--numpy/f2py/cfuncs.py180
-rwxr-xr-xnumpy/f2py/rules.py42
-rw-r--r--numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c2
-rw-r--r--numpy/f2py/tests/test_return_character.py6
-rw-r--r--numpy/f2py/tests/test_string.py148
-rw-r--r--numpy/lib/function_base.py7
-rw-r--r--numpy/lib/twodim_base.pyi265
-rw-r--r--numpy/lib/type_check.pyi246
-rw-r--r--numpy/typing/tests/data/fail/array_constructors.py4
-rw-r--r--numpy/typing/tests/data/fail/twodim_base.py37
-rw-r--r--numpy/typing/tests/data/fail/type_check.py13
-rw-r--r--numpy/typing/tests/data/reveal/array_constructors.py34
-rw-r--r--numpy/typing/tests/data/reveal/dtype.py51
-rw-r--r--numpy/typing/tests/data/reveal/twodim_base.py72
-rw-r--r--numpy/typing/tests/data/reveal/type_check.py73
34 files changed, 1532 insertions, 597 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index baff5e141..8546238ec 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -383,10 +383,10 @@ else:
error_message = "{}: {}".format(w[-1].category.__name__, str(w[-1].message))
msg = (
"Polyfit sanity test emitted a warning, most likely due "
- "to using a buggy Accelerate backend. If you compiled "
- "yourself, more information is available at "
- "https://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries "
- "Otherwise report this to the vendor "
+ "to using a buggy Accelerate backend."
+ "\nIf you compiled yourself, more information is available at:"
+ "\nhttps://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries"
+ "\nOtherwise report this to the vendor "
"that provided NumPy.\n{}\n".format(error_message))
raise RuntimeError(msg)
del _mac_os_check
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 6e24f6bff..1786535a2 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -5,7 +5,7 @@ import mmap
import array as _array
import datetime as dt
from abc import abstractmethod
-from types import TracebackType
+from types import TracebackType, MappingProxyType
from contextlib import ContextDecorator
from numpy.core._internal import _ctypes
@@ -897,7 +897,7 @@ _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic)
_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"]
class dtype(Generic[_DTypeScalar_co]):
- names: Optional[Tuple[str, ...]]
+ names: None | Tuple[str, ...]
# Overload for subclass of generic
@overload
def __new__(
@@ -920,7 +920,7 @@ class dtype(Generic[_DTypeScalar_co]):
@overload
def __new__(cls, dtype: Type[int], align: bool = ..., copy: bool = ...) -> dtype[int_]: ...
@overload
- def __new__(cls, dtype: Optional[Type[float]], align: bool = ..., copy: bool = ...) -> dtype[float_]: ...
+ def __new__(cls, dtype: None | Type[float], align: bool = ..., copy: bool = ...) -> dtype[float_]: ...
@overload
def __new__(cls, dtype: Type[complex], align: bool = ..., copy: bool = ...) -> dtype[complex_]: ...
@overload
@@ -1051,22 +1051,24 @@ class dtype(Generic[_DTypeScalar_co]):
@overload
def __getitem__(self: dtype[void], key: List[str]) -> dtype[void]: ...
@overload
- def __getitem__(self: dtype[void], key: Union[str, int]) -> dtype[Any]: ...
+ def __getitem__(self: dtype[void], key: str | SupportsIndex) -> dtype[Any]: ...
- # NOTE: In the future 1-based multiplications will also yield `void` dtypes
- @overload
- def __mul__(self, value: L[0]) -> None: ... # type: ignore[misc]
+ # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes
@overload
def __mul__(self: _DType, value: L[1]) -> _DType: ...
@overload
- def __mul__(self, value: int) -> dtype[void]: ...
+ def __mul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ...
+ @overload
+ def __mul__(self, value: SupportsIndex) -> dtype[void]: ...
# NOTE: `__rmul__` seems to be broken when used in combination with
- # literals as of mypy 0.800. Set the return-type to `Any` for now.
- def __rmul__(self, value: int) -> Any: ...
+ # literals as of mypy 0.902. Set the return-type to `dtype[Any]` for
+ # now for non-flexible dtypes.
+ @overload
+ def __rmul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ...
+ @overload
+ def __rmul__(self, value: SupportsIndex) -> dtype[Any]: ...
- def __eq__(self, other: DTypeLike) -> bool: ...
- def __ne__(self, other: DTypeLike) -> bool: ...
def __gt__(self, other: DTypeLike) -> bool: ...
def __ge__(self, other: DTypeLike) -> bool: ...
def __lt__(self, other: DTypeLike) -> bool: ...
@@ -1074,17 +1076,17 @@ class dtype(Generic[_DTypeScalar_co]):
@property
def alignment(self) -> int: ...
@property
- def base(self: _DType) -> _DType: ...
+ def base(self) -> dtype[Any]: ...
@property
def byteorder(self) -> str: ...
@property
def char(self) -> str: ...
@property
- def descr(self) -> List[Union[Tuple[str, str], Tuple[str, str, _Shape]]]: ...
+ def descr(self) -> List[Tuple[str, str] | Tuple[str, str, _Shape]]: ...
@property
def fields(
self,
- ) -> Optional[Mapping[str, Union[Tuple[dtype[Any], int], Tuple[dtype[Any], int, Any]]]]: ...
+ ) -> None | MappingProxyType[str, Tuple[dtype[Any], int] | Tuple[dtype[Any], int, Any]]: ...
@property
def flags(self) -> int: ...
@property
@@ -1100,19 +1102,17 @@ class dtype(Generic[_DTypeScalar_co]):
@property
def kind(self) -> str: ...
@property
- def metadata(self) -> Optional[Mapping[str, Any]]: ...
+ def metadata(self) -> None | MappingProxyType[str, Any]: ...
@property
def name(self) -> str: ...
@property
- def names(self) -> Optional[Tuple[str, ...]]: ...
- @property
def num(self) -> int: ...
@property
def shape(self) -> _Shape: ...
@property
def ndim(self) -> int: ...
@property
- def subdtype(self: _DType) -> Optional[Tuple[_DType, _Shape]]: ...
+ def subdtype(self) -> None | Tuple[dtype[Any], _Shape]: ...
def newbyteorder(self: _DType, __new_order: _ByteOrder = ...) -> _DType: ...
# Leave str and type for end to avoid having to use `builtins.str`
# everywhere. See https://github.com/python/mypy/issues/3775
@@ -1596,6 +1596,7 @@ class _ArrayOrScalarCommon:
_DType = TypeVar("_DType", bound=dtype[Any])
_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any])
+_FlexDType = TypeVar("_FlexDType", bound=dtype[flexible])
# TODO: Set the `bound` to something more suitable once we
# have proper shape support
@@ -1635,6 +1636,14 @@ _ArrayTD64_co = NDArray[Union[bool_, integer[Any], timedelta64]]
class _SupportsItem(Protocol[_T_co]):
def item(self, __args: Any) -> _T_co: ...
+class _SupportsReal(Protocol[_T_co]):
+ @property
+ def real(self) -> _T_co: ...
+
+class _SupportsImag(Protocol[_T_co]):
+ @property
+ def imag(self) -> _T_co: ...
+
class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@property
def base(self) -> Optional[ndarray]: ...
@@ -1643,11 +1652,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@property
def size(self) -> int: ...
@property
- def real(self: _ArraySelf) -> _ArraySelf: ...
+ def real(
+ self: NDArray[_SupportsReal[_ScalarType]], # type: ignore[type-var]
+ ) -> ndarray[_ShapeType, dtype[_ScalarType]]: ...
@real.setter
def real(self, value: ArrayLike) -> None: ...
@property
- def imag(self: _ArraySelf) -> _ArraySelf: ...
+ def imag(
+ self: NDArray[_SupportsImag[_ScalarType]], # type: ignore[type-var]
+ ) -> ndarray[_ShapeType, dtype[_ScalarType]]: ...
@imag.setter
def imag(self, value: ArrayLike) -> None: ...
def __new__(
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 15b2b3ad3..a29e2e8a8 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -1585,8 +1585,8 @@ add_newdoc('numpy.core.multiarray', 'arange',
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
- When using a non-integer step, such as 0.1, the results will often not
- be consistent. It is better to use `numpy.linspace` for these cases.
+ When using a non-integer step, such as 0.1, it is often better to use
+ `numpy.linspace`. See the warnings section below for more information.
Parameters
----------
@@ -1619,6 +1619,25 @@ add_newdoc('numpy.core.multiarray', 'arange',
this rule may result in the last element of `out` being greater
than `stop`.
+ Warnings
+ --------
+ The length of the output might not be numerically stable.
+
+ Another stability issue is due to the internal implementation of
+ `numpy.arange`.
+ The actual step value used to populate the array is
+ ``dtype(start + step) - dtype(start)`` and not `step`. Precision loss
+ can occur here, due to casting or due to using floating points when
+ `start` is much larger than `step`. This can lead to unexpected
+ behaviour. For example::
+
+ >>> np.arange(0, 5, 0.5, dtype=int)
+ array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+ >>> np.arange(-3, 3, 0.5, dtype=int)
+ array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
+
+ In such cases, the use of `numpy.linspace` should be preferred.
+
See Also
--------
numpy.linspace : Evenly spaced numbers with careful handling of endpoints.
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index 3b0c46467..3ed22818f 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -324,10 +324,10 @@ class _ctypes:
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to ``dtype('p')`` on this
- platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or
- `ctypes.c_longlong` depending on the platform.
- The c_intp type is defined accordingly in `numpy.ctypeslib`.
- The ctypes array contains the shape of the underlying array.
+ platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
+ `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
+ the platform. The ctypes array contains the shape of
+ the underlying array.
"""
return self.shape_as(_getintp_ctype())
@@ -907,4 +907,3 @@ class recursive:
self.func = func
def __call__(self, *args, **kwargs):
return self.func(self, *args, **kwargs)
-
diff --git a/numpy/core/shape_base.pyi b/numpy/core/shape_base.pyi
index ec40a8814..9aaeceed7 100644
--- a/numpy/core/shape_base.pyi
+++ b/numpy/core/shape_base.pyi
@@ -1,39 +1,72 @@
import sys
-from typing import TypeVar, overload, List, Sequence
+from typing import TypeVar, overload, List, Sequence, Any
-from numpy import ndarray
-from numpy.typing import ArrayLike
+from numpy import generic, dtype
+from numpy.typing import ArrayLike, NDArray, _NestedSequence, _SupportsArray
if sys.version_info >= (3, 8):
from typing import SupportsIndex
else:
from typing_extensions import SupportsIndex
-_ArrayType = TypeVar("_ArrayType", bound=ndarray)
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]]
+
+__all__: List[str]
+
+@overload
+def atleast_1d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def atleast_1d(__arys: ArrayLike) -> NDArray[Any]: ...
+@overload
+def atleast_1d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
+
+@overload
+def atleast_2d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def atleast_2d(__arys: ArrayLike) -> NDArray[Any]: ...
@overload
-def atleast_1d(__arys: ArrayLike) -> ndarray: ...
+def atleast_2d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
+
+@overload
+def atleast_3d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
@overload
-def atleast_1d(*arys: ArrayLike) -> List[ndarray]: ...
+def atleast_3d(__arys: ArrayLike) -> NDArray[Any]: ...
+@overload
+def atleast_3d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
@overload
-def atleast_2d(__arys: ArrayLike) -> ndarray: ...
+def vstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
@overload
-def atleast_2d(*arys: ArrayLike) -> List[ndarray]: ...
+def vstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
@overload
-def atleast_3d(__arys: ArrayLike) -> ndarray: ...
+def hstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
@overload
-def atleast_3d(*arys: ArrayLike) -> List[ndarray]: ...
+def hstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
-def vstack(tup: Sequence[ArrayLike]) -> ndarray: ...
-def hstack(tup: Sequence[ArrayLike]) -> ndarray: ...
@overload
def stack(
- arrays: Sequence[ArrayLike], axis: SupportsIndex = ..., out: None = ...
-) -> ndarray: ...
+ arrays: Sequence[_ArrayLike[_SCT]],
+ axis: SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def stack(
+ arrays: Sequence[ArrayLike],
+ axis: SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
@overload
def stack(
- arrays: Sequence[ArrayLike], axis: SupportsIndex = ..., out: _ArrayType = ...
+ arrays: Sequence[ArrayLike],
+ axis: SupportsIndex = ...,
+ out: _ArrayType = ...,
) -> _ArrayType: ...
-def block(arrays: ArrayLike) -> ndarray: ...
+
+@overload
+def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def block(arrays: ArrayLike) -> NDArray[Any]: ...
diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c
index 3ecc20d1d..ab992a3ae 100644
--- a/numpy/core/src/multiarray/array_method.c
+++ b/numpy/core/src/multiarray/array_method.c
@@ -36,6 +36,7 @@
#include "dtypemeta.h"
#include "common_dtype.h"
#include "convert_datatype.h"
+#include "common.h"
/*
@@ -471,14 +472,11 @@ static PyObject *
boundarraymethod_repr(PyBoundArrayMethodObject *self)
{
int nargs = self->method->nin + self->method->nout;
- PyObject *dtypes = PyTuple_New(nargs);
+ PyObject *dtypes = PyArray_TupleFromItems(
+ nargs, (PyObject **)self->dtypes, 0);
if (dtypes == NULL) {
return NULL;
}
- for (int i = 0; i < nargs; i++) {
- Py_INCREF(self->dtypes[i]);
- PyTuple_SET_ITEM(dtypes, i, (PyObject *)self->dtypes[i]);
- }
return PyUnicode_FromFormat(
"<np._BoundArrayMethod `%s` for dtypes %S>",
self->method->name, dtypes);
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index 83209cd38..203decaa0 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -291,6 +291,34 @@ npy_memchr(char * haystack, char needle,
return p;
}
+
+/*
+ * Simple helper to create a tuple from an array of items. The `make_null_none`
+ * flag means that NULL entries are replaced with None, which is occasionally
+ * useful.
+ */
+static NPY_INLINE PyObject *
+PyArray_TupleFromItems(int n, PyObject *const *items, int make_null_none)
+{
+ PyObject *tuple = PyTuple_New(n);
+ if (tuple == NULL) {
+ return NULL;
+ }
+ for (int i = 0; i < n; i ++) {
+ PyObject *tmp;
+ if (!make_null_none || items[i] != NULL) {
+ tmp = items[i];
+ }
+ else {
+ tmp = Py_None;
+ }
+ Py_INCREF(tmp);
+ PyTuple_SET_ITEM(tuple, i, tmp);
+ }
+ return tuple;
+}
+
+
#include "ucsnarrow.h"
/*
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 3c4c21ded..adfff1129 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -1222,11 +1222,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp const *vals)
goto fail;
}
for (i = 0; i < len; i++) {
-#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- PyObject *o = PyLong_FromLong((long) vals[i]);
-#else
- PyObject *o = PyLong_FromLongLong((npy_longlong) vals[i]);
-#endif
+ PyObject *o = PyArray_PyIntFromIntp(vals[i]);
if (!o) {
Py_DECREF(intTuple);
intTuple = NULL;
diff --git a/numpy/core/src/multiarray/conversion_utils.h b/numpy/core/src/multiarray/conversion_utils.h
index 7d1871c43..55c0cdd35 100644
--- a/numpy/core/src/multiarray/conversion_utils.h
+++ b/numpy/core/src/multiarray/conversion_utils.h
@@ -39,6 +39,17 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals);
NPY_NO_EXPORT int
PyArray_TypestrConvert(int itemsize, int gentype);
+
+static NPY_INLINE PyObject *
+PyArray_PyIntFromIntp(npy_intp const value)
+{
+#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
+ return PyLong_FromLong((long)value);
+#else
+ return PyLong_FromLongLong((npy_longlong)value);
+#endif
+}
+
NPY_NO_EXPORT PyObject *
PyArray_IntTupleFromIntp(int len, npy_intp const *vals);
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 1a962ef78..d197a4bea 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -476,6 +476,7 @@ PyArray_CheckCastSafety(NPY_CASTING casting,
if (PyArray_MinCastSafety(castingimpl->casting, casting) == casting) {
/* No need to check using `castingimpl.resolve_descriptors()` */
+ Py_DECREF(meth);
return 1;
}
@@ -1648,14 +1649,14 @@ PyArray_ResultType(
Py_DECREF(all_DTypes[i]);
}
if (common_dtype == NULL) {
- goto finish;
+ goto error;
}
if (common_dtype->abstract) {
/* (ab)use default descriptor to define a default */
PyArray_Descr *tmp_descr = common_dtype->default_descr(common_dtype);
if (tmp_descr == NULL) {
- goto finish;
+ goto error;
}
Py_INCREF(NPY_DTYPE(tmp_descr));
Py_SETREF(common_dtype, NPY_DTYPE(tmp_descr));
@@ -1688,20 +1689,18 @@ PyArray_ResultType(
PyObject *tmp = PyArray_GETITEM(
arrs[i-ndtypes], PyArray_BYTES(arrs[i-ndtypes]));
if (tmp == NULL) {
- Py_SETREF(result, NULL);
- goto finish;
+ goto error;
}
curr = common_dtype->discover_descr_from_pyobject(common_dtype, tmp);
Py_DECREF(tmp);
}
if (curr == NULL) {
- Py_SETREF(result, NULL);
- goto finish;
+ goto error;
}
Py_SETREF(result, common_dtype->common_instance(result, curr));
Py_DECREF(curr);
if (result == NULL) {
- goto finish;
+ goto error;
}
}
}
@@ -1722,16 +1721,21 @@ PyArray_ResultType(
* Going from error to success should not really happen, but is
* probably OK if it does.
*/
- Py_SETREF(result, NULL);
- goto finish;
+ goto error;
}
/* Return the old "legacy" result (could warn here if different) */
Py_SETREF(result, legacy_result);
}
- finish:
+ Py_DECREF(common_dtype);
PyMem_Free(info_on_heap);
return result;
+
+ error:
+ Py_XDECREF(result);
+ Py_XDECREF(common_dtype);
+ PyMem_Free(info_on_heap);
+ return NULL;
}
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index f0dfac55d..b8b477e5d 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -3228,7 +3228,9 @@ arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op)
{
PyArray_Descr *new = _convert_from_any(other, 0);
if (new == NULL) {
- return NULL;
+ /* Cannot convert `other` to dtype */
+ PyErr_Clear();
+ Py_RETURN_NOTIMPLEMENTED;
}
npy_bool ret;
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index 3575d6fad..bccbb7b0c 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -419,33 +419,13 @@ array_itemsize_get(PyArrayObject *self)
static PyObject *
array_size_get(PyArrayObject *self)
{
- npy_intp size=PyArray_SIZE(self);
-#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyLong_FromLong((long) size);
-#else
- if (size > NPY_MAX_LONG || size < NPY_MIN_LONG) {
- return PyLong_FromLongLong(size);
- }
- else {
- return PyLong_FromLong((long) size);
- }
-#endif
+ return PyArray_PyIntFromIntp(PyArray_SIZE(self));
}
static PyObject *
array_nbytes_get(PyArrayObject *self)
{
- npy_intp nbytes = PyArray_NBYTES(self);
-#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyLong_FromLong((long) nbytes);
-#else
- if (nbytes > NPY_MAX_LONG || nbytes < NPY_MIN_LONG) {
- return PyLong_FromLongLong(nbytes);
- }
- else {
- return PyLong_FromLong((long) nbytes);
- }
-#endif
+ return PyArray_PyIntFromIntp(PyArray_NBYTES(self));
}
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 3ebd4c858..576ea89b3 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -15,6 +15,7 @@
#include "iterators.h"
#include "ctors.h"
#include "common.h"
+#include "conversion_utils.h"
#include "array_coercion.h"
#define NEWAXIS_INDEX -1
@@ -1062,14 +1063,16 @@ static PyMemberDef iter_members[] = {
T_OBJECT,
offsetof(PyArrayIterObject, ao),
READONLY, NULL},
- {"index",
- T_INT,
- offsetof(PyArrayIterObject, index),
- READONLY, NULL},
{NULL, 0, 0, 0, NULL},
};
static PyObject *
+iter_index_get(PyArrayIterObject *self)
+{
+ return PyArray_PyIntFromIntp(self->index);
+}
+
+static PyObject *
iter_coords_get(PyArrayIterObject *self)
{
int nd;
@@ -1095,10 +1098,12 @@ iter_coords_get(PyArrayIterObject *self)
}
static PyGetSetDef iter_getsets[] = {
+ {"index",
+ (getter)iter_index_get,
+ NULL, NULL, NULL},
{"coords",
(getter)iter_coords_get,
- NULL,
- NULL, NULL},
+ NULL, NULL, NULL},
{NULL, NULL, NULL, NULL, NULL},
};
@@ -1410,31 +1415,13 @@ arraymultiter_dealloc(PyArrayMultiIterObject *multi)
static PyObject *
arraymultiter_size_get(PyArrayMultiIterObject *self)
{
-#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyLong_FromLong((long) self->size);
-#else
- if (self->size < NPY_MAX_LONG) {
- return PyLong_FromLong((long) self->size);
- }
- else {
- return PyLong_FromLongLong((npy_longlong) self->size);
- }
-#endif
+ return PyArray_PyIntFromIntp(self->size);
}
static PyObject *
arraymultiter_index_get(PyArrayMultiIterObject *self)
{
-#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyLong_FromLong((long) self->index);
-#else
- if (self->size < NPY_MAX_LONG) {
- return PyLong_FromLong((long) self->index);
- }
- else {
- return PyLong_FromLongLong((npy_longlong) self->index);
- }
-#endif
+ return PyArray_PyIntFromIntp(self->index);
}
static PyObject *
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index f1423d8b9..86cc20eb1 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -166,7 +166,7 @@ PyArray_CopyInitialReduceValues(
* identity : If Py_None, PyArray_CopyInitialReduceValues is used, otherwise
* this value is used to initialize the result to
* the reduction's unit.
- * loop : The loop which does the reduction.
+ * loop : `reduce_loop` from `ufunc_object.c`. TODO: Refactor
* data : Data which is passed to the inner loop.
* buffersize : Buffer size for the iterator. For the default, pass in 0.
* funcname : The name of the reduction function, for error messages.
@@ -182,18 +182,15 @@ PyArray_CopyInitialReduceValues(
* generalized ufuncs!)
*/
NPY_NO_EXPORT PyArrayObject *
-PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
- PyArrayObject *wheremask,
- PyArray_Descr *operand_dtype,
- PyArray_Descr *result_dtype,
- NPY_CASTING casting,
- npy_bool *axis_flags, int reorderable,
- int keepdims,
- PyObject *identity,
- PyArray_ReduceLoopFunc *loop,
- void *data, npy_intp buffersize, const char *funcname,
- int errormask)
+PyUFunc_ReduceWrapper(
+ PyArrayObject *operand, PyArrayObject *out, PyArrayObject *wheremask,
+ PyArray_Descr *operand_dtype, PyArray_Descr *result_dtype,
+ NPY_CASTING casting,
+ npy_bool *axis_flags, int reorderable, int keepdims,
+ PyObject *identity, PyArray_ReduceLoopFunc *loop,
+ void *data, npy_intp buffersize, const char *funcname, int errormask)
{
+ assert(loop != NULL);
PyArrayObject *result = NULL;
npy_intp skip_first_count = 0;
@@ -201,7 +198,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
NpyIter *iter = NULL;
PyArrayObject *op[3];
PyArray_Descr *op_dtypes[3];
- npy_uint32 flags, op_flags[3];
+ npy_uint32 it_flags, op_flags[3];
/* More than one axis means multiple orders are possible */
if (!reorderable && count_axes(PyArray_NDIM(operand), axis_flags) > 1) {
@@ -227,7 +224,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
op_dtypes[0] = result_dtype;
op_dtypes[1] = operand_dtype;
- flags = NPY_ITER_BUFFERED |
+ it_flags = NPY_ITER_BUFFERED |
NPY_ITER_EXTERNAL_LOOP |
NPY_ITER_GROWINNER |
NPY_ITER_DONT_NEGATE_STRIDES |
@@ -293,7 +290,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
}
}
- iter = NpyIter_AdvancedNew(wheremask == NULL ? 2 : 3, op, flags,
+ iter = NpyIter_AdvancedNew(wheremask == NULL ? 2 : 3, op, it_flags,
NPY_KEEPORDER, casting,
op_flags,
op_dtypes,
@@ -304,11 +301,14 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
result = NpyIter_GetOperandArray(iter)[0];
+ int needs_api = NpyIter_IterationNeedsAPI(iter);
+ /* Start with the floating-point exception flags cleared */
+ npy_clear_floatstatus_barrier((char*)&iter);
+
/*
* Initialize the result to the reduction unit if possible,
* otherwise copy the initial values and get a view to the rest.
*/
-
if (identity != Py_None) {
if (PyArray_FillWithScalar(result, identity) < 0) {
goto fail;
@@ -331,15 +331,11 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
goto fail;
}
- /* Start with the floating-point exception flags cleared */
- npy_clear_floatstatus_barrier((char*)&iter);
-
if (NpyIter_GetIterSize(iter) != 0) {
NpyIter_IterNextFunc *iternext;
char **dataptr;
npy_intp *strideptr;
npy_intp *countptr;
- int needs_api;
iternext = NpyIter_GetIterNext(iter, NULL);
if (iternext == NULL) {
@@ -349,16 +345,6 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
strideptr = NpyIter_GetInnerStrideArray(iter);
countptr = NpyIter_GetInnerLoopSizePtr(iter);
- needs_api = NpyIter_IterationNeedsAPI(iter);
-
- /* Straightforward reduction */
- if (loop == NULL) {
- PyErr_Format(PyExc_RuntimeError,
- "reduction operation %s did not supply an "
- "inner loop function", funcname);
- goto fail;
- }
-
if (loop(iter, dataptr, strideptr, countptr,
iternext, needs_api, skip_first_count, data) < 0) {
goto fail;
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 0644a28c0..cdb5b720d 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -1263,23 +1263,10 @@ iterator_loop(PyUFuncObject *ufunc,
void *innerloopdata,
npy_uint32 *op_flags)
{
- npy_intp i, nin = ufunc->nin, nout = ufunc->nout;
- npy_intp nop = nin + nout;
- NpyIter *iter;
- char *baseptrs[NPY_MAXARGS];
-
- NpyIter_IterNextFunc *iternext;
- char **dataptr;
- npy_intp *stride;
- npy_intp *count_ptr;
- int needs_api;
-
- PyArrayObject **op_it;
- npy_uint32 iter_flags;
-
- NPY_BEGIN_THREADS_DEF;
+ int nin = ufunc->nin, nout = ufunc->nout;
+ int nop = nin + nout;
- iter_flags = ufunc->iter_flags |
+ npy_uint32 iter_flags = ufunc->iter_flags |
NPY_ITER_EXTERNAL_LOOP |
NPY_ITER_REFS_OK |
NPY_ITER_ZEROSIZE_OK |
@@ -1288,16 +1275,17 @@ iterator_loop(PyUFuncObject *ufunc,
NPY_ITER_DELAY_BUFALLOC |
NPY_ITER_COPY_IF_OVERLAP;
- /* Call the __array_prepare__ functions for already existing output arrays.
+ /*
+ * Call the __array_prepare__ functions for already existing output arrays.
* Do this before creating the iterator, as the iterator may UPDATEIFCOPY
* some of them.
*/
- for (i = 0; i < nout; ++i) {
+ for (int i = 0; i < nout; i++) {
if (op[nin+i] == NULL) {
continue;
}
if (prepare_ufunc_output(ufunc, &op[nin+i],
- arr_prep[i], full_args, i) < 0) {
+ arr_prep[i], full_args, i) < 0) {
return -1;
}
}
@@ -1307,7 +1295,7 @@ iterator_loop(PyUFuncObject *ufunc,
* were already checked, we use the casting rule 'unsafe' which
* is faster to calculate.
*/
- iter = NpyIter_AdvancedNew(nop, op,
+ NpyIter *iter = NpyIter_AdvancedNew(nop, op,
iter_flags,
order, NPY_UNSAFE_CASTING,
op_flags, dtype,
@@ -1316,16 +1304,20 @@ iterator_loop(PyUFuncObject *ufunc,
return -1;
}
- /* Copy any allocated outputs */
- op_it = NpyIter_GetOperandArray(iter);
- for (i = 0; i < nout; ++i) {
- if (op[nin+i] == NULL) {
- op[nin+i] = op_it[nin+i];
- Py_INCREF(op[nin+i]);
+ NPY_UF_DBG_PRINT("Made iterator\n");
+
+ /* Call the __array_prepare__ functions for newly allocated arrays */
+ PyArrayObject **op_it = NpyIter_GetOperandArray(iter);
+ char *baseptrs[NPY_MAXARGS];
+
+ for (int i = 0; i < nout; ++i) {
+ if (op[nin + i] == NULL) {
+ op[nin + i] = op_it[nin + i];
+ Py_INCREF(op[nin + i]);
/* Call the __array_prepare__ functions for the new array */
- if (prepare_ufunc_output(ufunc, &op[nin+i],
- arr_prep[i], full_args, i) < 0) {
+ if (prepare_ufunc_output(ufunc,
+ &op[nin + i], arr_prep[i], full_args, i) < 0) {
NpyIter_Deallocate(iter);
return -1;
}
@@ -1340,45 +1332,59 @@ iterator_loop(PyUFuncObject *ufunc,
* with other operands --- the op[nin+i] array passed to it is newly
* allocated and doesn't have any overlap.
*/
- baseptrs[nin+i] = PyArray_BYTES(op[nin+i]);
+ baseptrs[nin + i] = PyArray_BYTES(op[nin + i]);
}
else {
- baseptrs[nin+i] = PyArray_BYTES(op_it[nin+i]);
+ baseptrs[nin + i] = PyArray_BYTES(op_it[nin + i]);
}
}
-
/* Only do the loop if the iteration size is non-zero */
- if (NpyIter_GetIterSize(iter) != 0) {
- /* Reset the iterator with the base pointers from possible __array_prepare__ */
- for (i = 0; i < nin; ++i) {
- baseptrs[i] = PyArray_BYTES(op_it[i]);
- }
- if (NpyIter_ResetBasePointers(iter, baseptrs, NULL) != NPY_SUCCEED) {
- NpyIter_Deallocate(iter);
+ npy_intp full_size = NpyIter_GetIterSize(iter);
+ if (full_size == 0) {
+ if (!NpyIter_Deallocate(iter)) {
return -1;
}
+ return 0;
+ }
- /* Get the variables needed for the loop */
- iternext = NpyIter_GetIterNext(iter, NULL);
- if (iternext == NULL) {
- NpyIter_Deallocate(iter);
- return -1;
- }
- dataptr = NpyIter_GetDataPtrArray(iter);
- stride = NpyIter_GetInnerStrideArray(iter);
- count_ptr = NpyIter_GetInnerLoopSizePtr(iter);
- needs_api = NpyIter_IterationNeedsAPI(iter);
+ /*
+ * Reset the iterator with the base pointers possibly modified by
+ * `__array_prepare__`.
+ */
+ for (int i = 0; i < nin; i++) {
+ baseptrs[i] = PyArray_BYTES(op_it[i]);
+ }
+ if (NpyIter_ResetBasePointers(iter, baseptrs, NULL) != NPY_SUCCEED) {
+ NpyIter_Deallocate(iter);
+ return -1;
+ }
- NPY_BEGIN_THREADS_NDITER(iter);
+ /* Get the variables needed for the loop */
+ NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL);
+ if (iternext == NULL) {
+ NpyIter_Deallocate(iter);
+ return -1;
+ }
+ char **dataptr = NpyIter_GetDataPtrArray(iter);
+ npy_intp *strides = NpyIter_GetInnerStrideArray(iter);
+ npy_intp *countptr = NpyIter_GetInnerLoopSizePtr(iter);
+ int needs_api = NpyIter_IterationNeedsAPI(iter);
- /* Execute the loop */
- do {
- NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)*count_ptr);
- innerloop(dataptr, count_ptr, stride, innerloopdata);
- } while (!(needs_api && PyErr_Occurred()) && iternext(iter));
+ NPY_BEGIN_THREADS_DEF;
- NPY_END_THREADS;
+ if (!needs_api) {
+ NPY_BEGIN_THREADS_THRESHOLDED(full_size);
}
+
+ NPY_UF_DBG_PRINT("Actual inner loop:\n");
+ /* Execute the loop */
+ do {
+ NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)*count_ptr);
+ innerloop(dataptr, countptr, strides, innerloopdata);
+ } while (!(needs_api && PyErr_Occurred()) && iternext(iter));
+
+ NPY_END_THREADS;
+
/*
* Currently `innerloop` may leave an error set, in this case
* NpyIter_Deallocate will always return an error as well.
@@ -1517,24 +1523,24 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
/* Call the __array_prepare__ functions where necessary */
op_it = NpyIter_GetOperandArray(iter);
- for (i = nin; i < nop; ++i) {
- PyArrayObject *op_tmp, *orig_op_tmp;
+ for (i = 0; i < nout; ++i) {
+ PyArrayObject *op_tmp;
/*
* The array can be allocated by the iterator -- it is placed in op[i]
* and returned to the caller, and this needs an extra incref.
*/
- if (op[i] == NULL) {
- op_tmp = op_it[i];
+ if (op[i+nin] == NULL) {
+ op_tmp = op_it[i+nin];
Py_INCREF(op_tmp);
}
else {
- op_tmp = op[i];
+ op_tmp = op[i+nin];
+ op[i+nin] = NULL;
}
/* prepare_ufunc_output may decref & replace the pointer */
- orig_op_tmp = op_tmp;
- Py_INCREF(op_tmp);
+ char *original_data = PyArray_BYTES(op_tmp);
if (prepare_ufunc_output(ufunc, &op_tmp,
arr_prep[i], full_args, i) < 0) {
@@ -1543,7 +1549,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
}
/* Validate that the prepare_ufunc_output didn't mess with pointers */
- if (PyArray_BYTES(op_tmp) != PyArray_BYTES(orig_op_tmp)) {
+ if (PyArray_BYTES(op_tmp) != original_data) {
PyErr_SetString(PyExc_ValueError,
"The __array_prepare__ functions modified the data "
"pointer addresses in an invalid fashion");
@@ -1553,12 +1559,11 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
}
/*
- * Put the updated operand back and undo the DECREF above. If
- * COPY_IF_OVERLAP made a temporary copy, the output will be copied
- * by UPDATEIFCOPY even if op[i] was changed by prepare_ufunc_output.
+ * Put the updated operand back. If COPY_IF_OVERLAP made a temporary
+ * copy, the output will be copied by WRITEBACKIFCOPY even if op[i]
+ * was changed by prepare_ufunc_output.
*/
- op[i] = op_tmp;
- Py_DECREF(op_tmp);
+ op[i+nin] = op_tmp;
}
/* Only do the loop if the iteration size is non-zero */
@@ -2105,9 +2110,10 @@ _initialize_variable_parts(PyUFuncObject *ufunc,
}
static int
-PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
- ufunc_full_args full_args, PyObject *type_tup, PyObject *extobj,
- NPY_CASTING casting, NPY_ORDER order, npy_bool subok,
+PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc,
+ PyArray_Descr *operation_descrs[],
+ PyArrayObject *op[], PyObject *extobj,
+ NPY_ORDER order,
PyObject *axis, PyObject *axes, int keepdims)
{
int nin, nout;
@@ -2116,8 +2122,6 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
int retval;
int needs_api = 0;
- PyArray_Descr *dtypes[NPY_MAXARGS];
-
/* Use remapped axes for generalized ufunc */
int broadcast_ndim, iter_ndim;
int op_core_num_dims[NPY_MAXARGS];
@@ -2148,8 +2152,6 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
/* swapping around of axes */
int *remap_axis_memory = NULL;
int **remap_axis = NULL;
- /* The __array_prepare__ function to call for each output */
- PyObject *arr_prep[NPY_MAXARGS];
nin = ufunc->nin;
nout = ufunc->nout;
@@ -2159,11 +2161,6 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name);
- /* Initialize all dtypes and __array_prepare__ call-backs to NULL */
- for (i = 0; i < nop; ++i) {
- dtypes[i] = NULL;
- arr_prep[i] = NULL;
- }
/* Initialize possibly variable parts to the values from the ufunc */
retval = _initialize_variable_parts(ufunc, op_core_num_dims,
core_dim_sizes, core_dim_flags);
@@ -2369,12 +2366,6 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
NPY_UF_DBG_PRINT("Finding inner loop\n");
-
- retval = ufunc->type_resolver(ufunc, casting,
- op, type_tup, dtypes);
- if (retval < 0) {
- goto fail;
- }
/*
* We don't write to all elements, and the iterator may make
* UPDATEIFCOPY temporary copies. The output arrays (unless they are
@@ -2388,34 +2379,12 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
NPY_UFUNC_DEFAULT_OUTPUT_FLAGS,
op_flags);
/* For the generalized ufunc, we get the loop right away too */
- retval = ufunc->legacy_inner_loop_selector(ufunc, dtypes,
- &innerloop, &innerloopdata, &needs_api);
+ retval = ufunc->legacy_inner_loop_selector(ufunc,
+ operation_descrs, &innerloop, &innerloopdata, &needs_api);
if (retval < 0) {
goto fail;
}
-#if NPY_UF_DBG_TRACING
- printf("input types:\n");
- for (i = 0; i < nin; ++i) {
- PyObject_Print((PyObject *)dtypes[i], stdout, 0);
- printf(" ");
- }
- printf("\noutput types:\n");
- for (i = nin; i < nop; ++i) {
- PyObject_Print((PyObject *)dtypes[i], stdout, 0);
- printf(" ");
- }
- printf("\n");
-#endif
-
- if (subok) {
- /*
- * Get the appropriate __array_prepare__ function to call
- * for each output
- */
- _find_array_prepare(full_args, arr_prep, nout);
- }
-
/*
* Set up the iterator per-op flags. For generalized ufuncs, we
* can't do buffering, so must COPY or UPDATEIFCOPY.
@@ -2430,7 +2399,7 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
/* Create the iterator */
iter = NpyIter_AdvancedNew(nop, op, iter_flags,
order, NPY_UNSAFE_CASTING, op_flags,
- dtypes, iter_ndim,
+ operation_descrs, iter_ndim,
op_axes, iter_shape, 0);
if (iter == NULL) {
retval = -1;
@@ -2589,11 +2558,6 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
retval = -1;
}
- /* The caller takes ownership of all the references in op */
- for (i = 0; i < nop; ++i) {
- Py_XDECREF(dtypes[i]);
- Py_XDECREF(arr_prep[i]);
- }
PyArray_free(remap_axis_memory);
PyArray_free(remap_axis);
@@ -2605,10 +2569,6 @@ fail:
NPY_UF_DBG_PRINT1("Returning failure code %d\n", retval);
PyArray_free(inner_strides);
NpyIter_Deallocate(iter);
- for (i = 0; i < nop; ++i) {
- Py_XDECREF(dtypes[i]);
- Py_XDECREF(arr_prep[i]);
- }
PyArray_free(remap_axis_memory);
PyArray_free(remap_axis);
return retval;
@@ -2616,56 +2576,33 @@ fail:
static int
-PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
- ufunc_full_args full_args, PyObject *type_tup, PyObject *extobj,
- NPY_CASTING casting, NPY_ORDER order, npy_bool subok,
+PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc,
+ PyArray_Descr *operation_descrs[],
+ PyArrayObject *op[], PyObject *extobj, NPY_ORDER order,
+ PyObject *output_array_prepare[], ufunc_full_args full_args,
PyArrayObject *wheremask)
{
- int nin, nout;
- int i, nop;
- const char *ufunc_name;
+ int nin = ufunc->nin, nout = ufunc->nout, nop = nin + nout;
+
+ const char *ufunc_name = ufunc_name = ufunc_get_name_cstr(ufunc);;
int retval = -1;
npy_uint32 op_flags[NPY_MAXARGS];
npy_intp default_op_out_flags;
- PyArray_Descr *dtypes[NPY_MAXARGS];
-
/* These parameters come from extobj= or from a TLS global */
int buffersize = 0, errormask = 0;
- /* The __array_prepare__ function to call for each output */
- PyObject *arr_prep[NPY_MAXARGS];
-
int trivial_loop_ok = 0;
- nin = ufunc->nin;
- nout = ufunc->nout;
- nop = nin + nout;
-
- ufunc_name = ufunc_get_name_cstr(ufunc);
-
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name);
- /* Initialize all the dtypes and __array_prepare__ callbacks to NULL */
- for (i = 0; i < nop; ++i) {
- dtypes[i] = NULL;
- arr_prep[i] = NULL;
- }
-
/* Get the buffersize and errormask */
if (_get_bufsize_errmask(extobj, ufunc_name, &buffersize, &errormask) < 0) {
- retval = -1;
- goto fail;
+ return -1;
}
NPY_UF_DBG_PRINT("Finding inner loop\n");
- retval = ufunc->type_resolver(ufunc, casting,
- op, type_tup, dtypes);
- if (retval < 0) {
- goto fail;
- }
-
if (wheremask != NULL) {
/* Set up the flags. */
default_op_out_flags = NPY_ITER_NO_SUBTYPE |
@@ -2682,31 +2619,9 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
default_op_out_flags, op_flags);
}
-#if NPY_UF_DBG_TRACING
- printf("input types:\n");
- for (i = 0; i < nin; ++i) {
- PyObject_Print((PyObject *)dtypes[i], stdout, 0);
- printf(" ");
- }
- printf("\noutput types:\n");
- for (i = nin; i < nop; ++i) {
- PyObject_Print((PyObject *)dtypes[i], stdout, 0);
- printf(" ");
- }
- printf("\n");
-#endif
-
- if (subok) {
- /*
- * Get the appropriate __array_prepare__ function to call
- * for each output
- */
- _find_array_prepare(full_args, arr_prep, nout);
- }
-
/* Do the ufunc loop */
if (wheremask != NULL) {
- NPY_UF_DBG_PRINT("Executing fancy inner loop\n");
+ NPY_UF_DBG_PRINT("Executing masked inner loop\n");
if (nop + 1 > NPY_MAXARGS) {
PyErr_SetString(PyExc_ValueError,
@@ -2714,14 +2629,15 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
return -1;
}
op[nop] = wheremask;
- dtypes[nop] = NULL;
+ operation_descrs[nop] = NULL;
/* Set up the flags */
npy_clear_floatstatus_barrier((char*)&ufunc);
retval = execute_fancy_ufunc_loop(ufunc, wheremask,
- op, dtypes, order,
- buffersize, arr_prep, full_args, op_flags);
+ op, operation_descrs, order,
+ buffersize, output_array_prepare,
+ full_args, op_flags);
}
else {
NPY_UF_DBG_PRINT("Executing legacy inner loop\n");
@@ -2732,20 +2648,22 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
* Since it requires dtypes, it can only be called after
* ufunc->type_resolver
*/
- trivial_loop_ok = check_for_trivial_loop(ufunc, op, dtypes, buffersize);
+ trivial_loop_ok = check_for_trivial_loop(ufunc,
+ op, operation_descrs, buffersize);
if (trivial_loop_ok < 0) {
- goto fail;
+ return -1;
}
/* check_for_trivial_loop on half-floats can overflow */
npy_clear_floatstatus_barrier((char*)&ufunc);
retval = execute_legacy_ufunc_loop(ufunc, trivial_loop_ok,
- op, dtypes, order,
- buffersize, arr_prep, full_args, op_flags);
+ op, operation_descrs, order,
+ buffersize, output_array_prepare,
+ full_args, op_flags);
}
if (retval < 0) {
- goto fail;
+ return -1;
}
/*
@@ -2755,26 +2673,7 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, PyArrayObject **op,
*/
if (PyErr_Occurred() ||
_check_ufunc_fperr(errormask, extobj, ufunc_name) < 0) {
- retval = -1;
- goto fail;
- }
-
-
- /* The caller takes ownership of all the references in op */
- for (i = 0; i < nop; ++i) {
- Py_XDECREF(dtypes[i]);
- Py_XDECREF(arr_prep[i]);
- }
-
- NPY_UF_DBG_PRINT("Returning success code 0\n");
-
- return 0;
-
-fail:
- NPY_UF_DBG_PRINT1("Returning failure code %d\n", retval);
- for (i = 0; i < nop; ++i) {
- Py_XDECREF(dtypes[i]);
- Py_XDECREF(arr_prep[i]);
+ return -1;
}
return retval;
@@ -3186,7 +3085,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
int idim, ndim, otype_final;
int needs_api, need_outer_iterator;
- NpyIter *iter = NULL, *iter_inner = NULL;
+ NpyIter *iter = NULL;
/* The selected inner loop */
PyUFuncGenericFunction innerloop = NULL;
@@ -3512,9 +3411,6 @@ finish:
if (!NpyIter_Deallocate(iter)) {
res = -1;
}
- if (!NpyIter_Deallocate(iter_inner)) {
- res = -1;
- }
if (res < 0) {
Py_DECREF(out);
return NULL;
@@ -3527,7 +3423,6 @@ fail:
Py_XDECREF(op_dtypes[0]);
NpyIter_Deallocate(iter);
- NpyIter_Deallocate(iter_inner);
return NULL;
}
@@ -4263,8 +4158,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
*/
int typenum = PyArray_TYPE(mp);
if ((PyTypeNum_ISBOOL(typenum) || PyTypeNum_ISINTEGER(typenum))
- && ((strcmp(ufunc->name,"add") == 0)
- || (strcmp(ufunc->name,"multiply") == 0))) {
+ && ((strcmp(ufunc->name, "add") == 0)
+ || (strcmp(ufunc->name, "multiply") == 0))) {
if (PyTypeNum_ISBOOL(typenum)) {
typenum = NPY_LONG;
}
@@ -4310,9 +4205,9 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
"reduceat does not allow multiple axes");
goto fail;
}
- ret = (PyArrayObject *)PyUFunc_Reduceat(ufunc, mp, indices, out,
- axes[0], otype->type_num);
- Py_DECREF(indices);
+ ret = (PyArrayObject *)PyUFunc_Reduceat(ufunc,
+ mp, indices, out, axes[0], otype->type_num);
+ Py_SETREF(indices, NULL);
break;
}
Py_DECREF(mp);
@@ -4354,6 +4249,7 @@ fail:
Py_XDECREF(otype);
Py_XDECREF(mp);
Py_XDECREF(wheremask);
+ Py_XDECREF(indices);
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
return NULL;
@@ -4667,6 +4563,81 @@ _get_normalized_typetup(PyUFuncObject *ufunc,
}
+/**
+ * Wraps all outputs and returns the result (which may be NULL on error).
+ *
+ * Use __array_wrap__ on all outputs
+ * if present on one of the input arguments.
+ * If present for multiple inputs:
+ * use __array_wrap__ of input object with largest
+ * __array_priority__ (default = 0.0)
+ *
+ * Exception: we should not wrap outputs for items already
+ * passed in as output-arguments. These items should either
+ * be left unwrapped or wrapped by calling their own __array_wrap__
+ * routine.
+ *
+ * For each output argument, wrap will be either
+ * NULL --- call PyArray_Return() -- default if no output arguments given
+ * None --- array-object passed in don't call PyArray_Return
+ * method --- the __array_wrap__ method to call.
+ *
+ * @param ufunc
+ * @param full_args Original inputs and outputs
+ * @param subok Whether subclasses are allowed
+ * @param result_arrays The ufunc result(s). REFERENCES ARE STOLEN!
+ */
+static PyObject *
+replace_with_wrapped_result_and_return(PyUFuncObject *ufunc,
+ ufunc_full_args full_args, npy_bool subok,
+ PyArrayObject *result_arrays[])
+{
+ PyObject *retobj[NPY_MAXARGS];
+ PyObject *wraparr[NPY_MAXARGS];
+ _find_array_wrap(full_args, subok, wraparr, ufunc->nin, ufunc->nout);
+
+ /* wrap outputs */
+ for (int i = 0; i < ufunc->nout; i++) {
+ _ufunc_context context;
+
+ context.ufunc = ufunc;
+ context.args = full_args;
+ context.out_i = i;
+
+ retobj[i] = _apply_array_wrap(wraparr[i], result_arrays[i], &context);
+ result_arrays[i] = NULL; /* Was DECREF'ed and (probably) wrapped */
+ if (retobj[i] == NULL) {
+ goto fail;
+ }
+ }
+
+ if (ufunc->nout == 1) {
+ return retobj[0];
+ }
+ else {
+ PyObject *result = PyTuple_New(ufunc->nout);
+ if (result == NULL) {
+ return NULL;
+ }
+ for (int i = 0; i < ufunc->nout; i++) {
+ PyTuple_SET_ITEM(result, i, retobj[i]);
+ }
+ return result;
+ }
+
+ fail:
+ for (int i = 0; i < ufunc->nout; i++) {
+ if (result_arrays[i] != NULL) {
+ Py_DECREF(result_arrays[i]);
+ }
+ else {
+ Py_XDECREF(retobj[i]);
+ }
+ }
+ return NULL;
+}
+
+
/*
* Main ufunc call implementation.
*
@@ -4681,16 +4652,22 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames,
npy_bool outer)
{
- PyArrayObject *operands[NPY_MAXARGS] = {NULL};
- PyObject *retobj[NPY_MAXARGS];
- PyObject *wraparr[NPY_MAXARGS];
- PyObject *override = NULL;
- ufunc_full_args full_args = {NULL, NULL};
- PyObject *typetup = NULL;
-
int errval;
int nin = ufunc->nin, nout = ufunc->nout, nop = ufunc->nargs;
+ /* All following variables are cleared in the `fail` error path */
+ ufunc_full_args full_args;
+ PyArrayObject *wheremask = NULL;
+ PyObject *typetup = NULL;
+
+ PyArrayObject *operands[NPY_MAXARGS];
+ PyArray_Descr *operation_descrs[NPY_MAXARGS];
+ PyObject *output_array_prepare[NPY_MAXARGS];
+ /* Initialize all arrays (we usually only need a small part) */
+ memset(operands, 0, nop * sizeof(*operands));
+ memset(operation_descrs, 0, nop * sizeof(*operation_descrs));
+ memset(output_array_prepare, 0, nout * sizeof(*output_array_prepare));
+
/*
* Note that the input (and possibly output) arguments are passed in as
* positional arguments. We extract these first and check for `out`
@@ -4709,15 +4686,10 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
}
/* Fetch input arguments. */
- full_args.in = PyTuple_New(ufunc->nin);
+ full_args.in = PyArray_TupleFromItems(ufunc->nin, args, 0);
if (full_args.in == NULL) {
return NULL;
}
- for (int i = 0; i < ufunc->nin; i++) {
- PyObject *tmp = args[i];
- Py_INCREF(tmp);
- PyTuple_SET_ITEM(full_args.in, i, tmp);
- }
/*
* If there are more arguments, they define the out args. Otherwise
@@ -4838,6 +4810,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
method = "outer";
}
/* We now have all the information required to check for Overrides */
+ PyObject *override = NULL;
errval = PyUFunc_CheckOverride(ufunc, method,
full_args.in, full_args.out,
args, len_args, kwnames, &override);
@@ -4871,7 +4844,6 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING;
npy_bool subok = NPY_TRUE;
int keepdims = -1; /* We need to know if it was passed */
- PyArrayObject *wheremask = NULL;
if (convert_ufunc_arguments(ufunc, full_args, operands,
order_obj, &order,
casting_obj, &casting,
@@ -4881,15 +4853,30 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
goto fail;
}
+ if (ufunc->type_resolver(ufunc,
+ casting, operands, typetup, operation_descrs) < 0) {
+ goto fail;
+ }
+
+ if (subok) {
+ _find_array_prepare(full_args, output_array_prepare, nout);
+ }
+
+ /*
+ * Do the final preparations and call the inner-loop.
+ */
if (!ufunc->core_enabled) {
- errval = PyUFunc_GenericFunctionInternal(ufunc, operands,
- full_args, typetup, extobj, casting, order, subok,
+ errval = PyUFunc_GenericFunctionInternal(ufunc,
+ operation_descrs, operands,
+ extobj, order,
+ output_array_prepare, full_args, /* for __array_prepare__ */
wheremask);
- Py_XDECREF(wheremask);
}
else {
- errval = PyUFunc_GeneralizedFunctionInternal(ufunc, operands,
- full_args, typetup, extobj, casting, order, subok,
+ errval = PyUFunc_GeneralizedFunctionInternal(ufunc,
+ operation_descrs, operands,
+ extobj, order,
+ /* GUFuncs never (ever) called __array_prepare__! */
axis_obj, axes_obj, keepdims);
}
@@ -4897,74 +4884,40 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
goto fail;
}
- /* Free the input references */
- for (int i = 0; i < ufunc->nin; i++) {
- Py_XSETREF(operands[i], NULL);
- }
-
/*
- * Use __array_wrap__ on all outputs
- * if present on one of the input arguments.
- * If present for multiple inputs:
- * use __array_wrap__ of input object with largest
- * __array_priority__ (default = 0.0)
- *
- * Exception: we should not wrap outputs for items already
- * passed in as output-arguments. These items should either
- * be left unwrapped or wrapped by calling their own __array_wrap__
- * routine.
- *
- * For each output argument, wrap will be either
- * NULL --- call PyArray_Return() -- default if no output arguments given
- * None --- array-object passed in don't call PyArray_Return
- * method --- the __array_wrap__ method to call.
+ * Clear all variables which are not needed any further.
+ * (From here on, we cannot `goto fail` any more.)
*/
- _find_array_wrap(full_args, subok, wraparr, ufunc->nin, ufunc->nout);
-
- /* wrap outputs */
- for (int i = 0; i < ufunc->nout; i++) {
- int j = ufunc->nin+i;
- _ufunc_context context;
- PyObject *wrapped;
-
- context.ufunc = ufunc;
- context.args = full_args;
- context.out_i = i;
-
- wrapped = _apply_array_wrap(wraparr[i], operands[j], &context);
- operands[j] = NULL; /* Prevent fail double-freeing this */
- if (wrapped == NULL) {
- for (int j = 0; j < i; j++) {
- Py_DECREF(retobj[j]);
- }
- goto fail;
+ Py_XDECREF(wheremask);
+ for (int i = 0; i < nop; i++) {
+ Py_DECREF(operation_descrs[i]);
+ if (i < nin) {
+ Py_DECREF(operands[i]);
+ }
+ else {
+ Py_XDECREF(output_array_prepare[i-nin]);
}
-
- retobj[i] = wrapped;
}
-
Py_XDECREF(typetup);
+
+ /* The following steals the references to the outputs: */
+ PyObject *result = replace_with_wrapped_result_and_return(ufunc,
+ full_args, subok, operands+nin);
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
- if (ufunc->nout == 1) {
- return retobj[0];
- }
- else {
- PyTupleObject *ret;
-
- ret = (PyTupleObject *)PyTuple_New(ufunc->nout);
- for (int i = 0; i < ufunc->nout; i++) {
- PyTuple_SET_ITEM(ret, i, retobj[i]);
- }
- return (PyObject *)ret;
- }
+ return result;
fail:
Py_XDECREF(typetup);
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
+ Py_XDECREF(wheremask);
for (int i = 0; i < ufunc->nargs; i++) {
Py_XDECREF(operands[i]);
+ Py_XDECREF(operation_descrs[i]);
+ if (i < nout) {
+ Py_XDECREF(output_array_prepare[i]);
+ }
}
return NULL;
}
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 2834235e4..211d837dd 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -94,9 +94,6 @@ raise_no_loop_found_error(
PyUFuncObject *ufunc, PyArray_Descr **dtypes)
{
static PyObject *exc_type = NULL;
- PyObject *exc_value;
- PyObject *dtypes_tup;
- npy_intp i;
npy_cache_import(
"numpy.core._exceptions", "_UFuncNoLoopError",
@@ -105,22 +102,13 @@ raise_no_loop_found_error(
return -1;
}
- /* convert dtypes to a tuple */
- dtypes_tup = PyTuple_New(ufunc->nargs);
+ PyObject *dtypes_tup = PyArray_TupleFromItems(
+ ufunc->nargs, (PyObject **)dtypes, 1);
if (dtypes_tup == NULL) {
return -1;
}
- for (i = 0; i < ufunc->nargs; ++i) {
- PyObject *tmp = Py_None;
- if (dtypes[i] != NULL) {
- tmp = (PyObject *)dtypes[i];
- }
- Py_INCREF(tmp);
- PyTuple_SET_ITEM(dtypes_tup, i, tmp);
- }
-
/* produce an error object */
- exc_value = PyTuple_Pack(2, ufunc, dtypes_tup);
+ PyObject *exc_value = PyTuple_Pack(2, ufunc, dtypes_tup);
Py_DECREF(dtypes_tup);
if (exc_value == NULL) {
return -1;
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 8a6b7dcd5..3d15009ea 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -88,6 +88,24 @@ class TestBuiltin:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
+ def test_richcompare_invalid_dtype_equality(self):
+ # Make sure objects that cannot be converted to valid
+ # dtypes results in False/True when compared to valid dtypes.
+ # Here 7 cannot be converted to dtype. No exceptions should be raised
+
+ assert not np.dtype(np.int32) == 7, "dtype richcompare failed for =="
+ assert np.dtype(np.int32) != 7, "dtype richcompare failed for !="
+
+ @pytest.mark.parametrize(
+ 'operation',
+ [operator.le, operator.lt, operator.ge, operator.gt])
+ def test_richcompare_invalid_dtype_comparison(self, operation):
+ # Make sure TypeError is raised for comparison operators
+ # for invalid dtypes. Here 7 is an invalid dtype.
+
+ with pytest.raises(TypeError):
+ operation(np.dtype(np.int32), 7)
+
@pytest.mark.parametrize("dtype",
['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 25dd76256..7ee7253ef 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -5364,6 +5364,17 @@ class TestFlat:
assert_(abs(sys.getrefcount(ind) - rc_ind) < 50)
assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
+ def test_index_getset(self):
+ it = np.arange(10).reshape(2, 1, 5).flat
+ with pytest.raises(AttributeError):
+ it.index = 10
+
+ for _ in it:
+ pass
+ # Check the value of `.index` is updated correctly (see also gh-19153)
+ # If the type was incorrect, this would show up on big-endian machines
+ assert it.index == it.base.size
+
class TestResize:
@@ -6723,7 +6734,7 @@ class TestMatmulOperator(MatmulCommon):
def test_matmul_raises(self):
assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
- assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc'))
+ assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc'))
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 2378b11e9..a2d6b3989 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -2102,6 +2102,10 @@ class TestSpecialMethods:
do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a))
do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a))
+ # Also check the where mask handling:
+ do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0))
+ do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a))
+
def test_wrap_with_iterable(self):
# test fix for bug #1026:
@@ -2251,7 +2255,8 @@ class TestSpecialMethods:
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
- def test_prepare(self):
+ @pytest.mark.parametrize("use_where", [True, False])
+ def test_prepare(self, use_where):
class with_prepare(np.ndarray):
__array_priority__ = 10
@@ -2261,11 +2266,18 @@ class TestSpecialMethods:
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
- x = np.add(a, a)
+ if use_where:
+ # Currently raises, due to the array being replaced during prepare
+ with pytest.raises(ValueError):
+ x = np.add(a, a, where=np.array(True))
+ return
+ else:
+ x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
- def test_prepare_out(self):
+ @pytest.mark.parametrize("use_where", [True, False])
+ def test_prepare_out(self, use_where):
class with_prepare(np.ndarray):
__array_priority__ = 10
@@ -2274,7 +2286,13 @@ class TestSpecialMethods:
return np.array(arr).view(type=with_prepare)
a = np.array([1]).view(type=with_prepare)
- x = np.add(a, a, a)
+ if use_where:
+ # Currently raises, due to the array being replaced during prepare
+ with pytest.raises(ValueError):
+ x = np.add(a, a, a, where=[True])
+ return
+ else:
+ x = np.add(a, a, a)
# Returned array is new, because of the strange
# __array_prepare__ above
assert_(not np.shares_memory(x, a))
@@ -2292,6 +2310,7 @@ class TestSpecialMethods:
a = A()
assert_raises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum, a, a, where=False)
def test_array_too_many_args(self):
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index f403a66b5..714f9a932 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -469,7 +469,7 @@ cppmacros['MEMCOPY'] = """\
"""
cppmacros['STRINGMALLOC'] = """\
#define STRINGMALLOC(str,len)\\
- if ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\
+ if ((str = (string)malloc(len+1)) == NULL) {\\
PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
goto capi_fail;\\
} else {\\
@@ -479,20 +479,41 @@ cppmacros['STRINGMALLOC'] = """\
cppmacros['STRINGFREE'] = """\
#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0)
"""
+needs['STRINGPADN'] = ['string.h']
+cppmacros['STRINGPADN'] = """\
+/*
+STRINGPADN replaces null values with padding values from the right.
+
+`to` must have size of at least N bytes.
+
+If the `to[N-1]` has null value, then replace it and all the
+preceeding nulls with the given padding.
+
+STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation.
+*/
+#define STRINGPADN(to, N, NULLVALUE, PADDING) \\
+ do { \\
+ int _m = (N); \\
+ char *_to = (to); \\
+ for (_m -= 1; _m >= 0 && _to[_m] == NULLVALUE; _m--) { \\
+ _to[_m] = PADDING; \\
+ } \\
+ } while (0)
+"""
needs['STRINGCOPYN'] = ['string.h', 'FAILNULL']
cppmacros['STRINGCOPYN'] = """\
-#define STRINGCOPYN(to,from,buf_size) \\
+/*
+STRINGCOPYN copies N bytes.
+
+`to` and `from` buffers must have sizes of at least N bytes.
+*/
+#define STRINGCOPYN(to,from,N) \\
do { \\
- int _m = (buf_size); \\
+ int _m = (N); \\
char *_to = (to); \\
char *_from = (from); \\
FAILNULL(_to); FAILNULL(_from); \\
- (void)strncpy(_to, _from, sizeof(char)*_m); \\
- _to[_m-1] = '\\0'; \\
- /* Padding with spaces instead of nulls */ \\
- for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\
- _to[_m] = ' '; \\
- } \\
+ (void)strncpy(_to, _from, _m); \\
} while (0)
"""
needs['STRINGCOPY'] = ['string.h', 'FAILNULL']
@@ -623,71 +644,127 @@ static int *nextforcomb(void) {
}"""
needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string']
cfuncs['try_pyarr_from_string'] = """\
-static int try_pyarr_from_string(PyObject *obj,const string str) {
- PyArrayObject *arr = NULL;
- if (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL)))
- { STRINGCOPYN(PyArray_DATA(arr),str,PyArray_NBYTES(arr)); }
- return 1;
+/*
+ try_pyarr_from_string copies str[:len(obj)] to the data of an `ndarray`.
+
+ If obj is an `ndarray`, it is assumed to be contiguous.
+
+ If the specified len==-1, str must be null-terminated.
+*/
+static int try_pyarr_from_string(PyObject *obj,
+ const string str, const int len) {
+#ifdef DEBUGCFUNCS
+fprintf(stderr, "try_pyarr_from_string(str='%s', len=%d, obj=%p)\\n",
+ (char*)str,len, obj);
+#endif
+ if (PyArray_Check(obj)) {
+ PyArrayObject *arr = (PyArrayObject *)obj;
+ assert(ISCONTIGUOUS(arr));
+ string buf = PyArray_DATA(arr);
+ npy_intp n = len;
+ if (n == -1) {
+ /* Assuming null-terminated str. */
+ n = strlen(str);
+ }
+ if (n > PyArray_NBYTES(arr)) {
+ n = PyArray_NBYTES(arr);
+ }
+ STRINGCOPYN(buf, str, n);
+ return 1;
+ }
capi_fail:
PRINTPYOBJERR(obj);
- PyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\");
+ PyErr_SetString(#modulename#_error, \"try_pyarr_from_string failed\");
return 0;
}
"""
needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN']
cfuncs['string_from_pyobj'] = """\
+/*
+ Create a new string buffer `str` of at most length `len` from a
+ Python string-like object `obj`.
+
+ The string buffer has given size (len) or the size of inistr when len==-1.
+
+ The string buffer is padded with blanks: in Fortran, trailing blanks
+ are insignificant contrary to C nulls.
+ */
static int
-string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess)
+string_from_pyobj(string *str, int *len, const string inistr, PyObject *obj,
+ const char *errmess)
{
- PyArrayObject *arr = NULL;
PyObject *tmp = NULL;
+ string buf = NULL;
+ npy_intp n = -1;
#ifdef DEBUGCFUNCS
-fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj);
+fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",
+ (char*)str, *len, (char *)inistr, obj);
#endif
if (obj == Py_None) {
- if (*len == -1)
- *len = strlen(inistr); /* Will this cause problems? */
- STRINGMALLOC(*str,*len);
- STRINGCOPYN(*str,inistr,*len+1);
- return 1;
+ n = strlen(inistr);
+ buf = inistr;
}
- if (PyArray_Check(obj)) {
- if ((arr = (PyArrayObject *)obj) == NULL)
- goto capi_fail;
+ else if (PyArray_Check(obj)) {
+ PyArrayObject *arr = (PyArrayObject *)obj;
if (!ISCONTIGUOUS(arr)) {
- PyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\");
+ PyErr_SetString(PyExc_ValueError,
+ \"array object is non-contiguous.\");
goto capi_fail;
}
- if (*len == -1)
- *len = (PyArray_ITEMSIZE(arr))*PyArray_SIZE(arr);
- STRINGMALLOC(*str,*len);
- STRINGCOPYN(*str,PyArray_DATA(arr),*len+1);
- return 1;
- }
- if (PyBytes_Check(obj)) {
- tmp = obj;
- Py_INCREF(tmp);
- }
- else if (PyUnicode_Check(obj)) {
- tmp = PyUnicode_AsASCIIString(obj);
+ n = PyArray_NBYTES(arr);
+ buf = PyArray_DATA(arr);
+ n = strnlen(buf, n);
}
else {
- PyObject *tmp2;
- tmp2 = PyObject_Str(obj);
- if (tmp2) {
- tmp = PyUnicode_AsASCIIString(tmp2);
- Py_DECREF(tmp2);
+ if (PyBytes_Check(obj)) {
+ tmp = obj;
+ Py_INCREF(tmp);
+ }
+ else if (PyUnicode_Check(obj)) {
+ tmp = PyUnicode_AsASCIIString(obj);
}
else {
- tmp = NULL;
+ PyObject *tmp2;
+ tmp2 = PyObject_Str(obj);
+ if (tmp2) {
+ tmp = PyUnicode_AsASCIIString(tmp2);
+ Py_DECREF(tmp2);
+ }
+ else {
+ tmp = NULL;
+ }
+ }
+ if (tmp == NULL) goto capi_fail;
+ n = PyBytes_GET_SIZE(tmp);
+ buf = PyBytes_AS_STRING(tmp);
+ }
+ if (*len == -1) {
+ /* TODO: change the type of `len` so that we can remove this */
+ if (n > NPY_MAX_INT) {
+ PyErr_SetString(PyExc_OverflowError,
+ "object too large for a 32-bit int");
+ goto capi_fail;
}
+ *len = n;
}
- if (tmp == NULL) goto capi_fail;
- if (*len == -1)
- *len = PyBytes_GET_SIZE(tmp);
- STRINGMALLOC(*str,*len);
- STRINGCOPYN(*str,PyBytes_AS_STRING(tmp),*len+1);
- Py_DECREF(tmp);
+ else if (*len < n) {
+ /* discard the last (len-n) bytes of input buf */
+ n = *len;
+ }
+ if (n < 0 || *len < 0 || buf == NULL) {
+ goto capi_fail;
+ }
+ STRINGMALLOC(*str, *len); // *str is allocated with size (*len + 1)
+ if (n < *len) {
+ /*
+ Pad fixed-width string with nulls. The caller will replace
+ nulls with blanks when the corresponding argument is not
+ intent(c).
+ */
+ memset(*str + n, '\\0', *len - n);
+ }
+ STRINGCOPYN(*str, buf, n);
+ Py_XDECREF(tmp);
return 1;
capi_fail:
Py_XDECREF(tmp);
@@ -702,7 +779,6 @@ capi_fail:
}
"""
-
needs['char_from_pyobj'] = ['int_from_pyobj']
cfuncs['char_from_pyobj'] = """\
static int
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 63e47baa2..587ae2e5f 100755
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -561,7 +561,8 @@ rout_rules = [
'\tint #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
- '\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {',
+ '\tif ((#name#_return_value = (string)malloc('
+ '#name#_return_value_len+1) == NULL) {',
'\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
'\t\tf2py_success = 0;',
'\t} else {',
@@ -942,19 +943,35 @@ if (#varname#_cb.capi==Py_None) {
'\tPyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
- 'pyobjfrom':{debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
+ 'pyobjfrom':[
+ {debugcapi:
+ '\tfprintf(stderr,'
+ '"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
+ # The trailing null value for Fortran is blank.
+ {l_and(isintent_out, l_not(isintent_c)):
+ "\t\tSTRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
+ ],
'return': {isintent_out: ',#varname#'},
- 'need': ['len..'], # 'STRINGFREE'],
+ 'need': ['len..',
+ {l_and(isintent_out, l_not(isintent_c)): 'STRINGPADN'}],
'_check':isstring
}, { # Common
- 'frompyobj': """\
+ 'frompyobj': [
+ """\
\tslen(#varname#) = #length#;
-\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\");
+\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,"""
+"""#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#"""
+"""`#varname#\' of #pyname# to C #ctype#\");
\tif (f2py_success) {""",
+ # The trailing null value for Fortran is blank.
+ {l_not(isintent_c):
+ "\t\tSTRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"},
+ ],
'cleanupfrompyobj': """\
\t\tSTRINGFREE(#varname#);
\t} /*if (f2py_success) of #varname#*/""",
- 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE'],
+ 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE',
+ {l_not(isintent_c): 'STRINGPADN'}],
'_check':isstring,
'_depend':''
}, { # Not hidden
@@ -962,11 +979,16 @@ if (#varname#_cb.capi==Py_None) {
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
- 'pyobjfrom': {isintent_inout: '''\
-\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#);
-\tif (f2py_success) {'''},
+ 'pyobjfrom': [
+ {l_and(isintent_inout, l_not(isintent_c)):
+ "\t\tSTRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
+ {isintent_inout: '''\
+\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#,
+\t slen(#varname#));
+\tif (f2py_success) {'''}],
'closepyobjfrom': {isintent_inout: '\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
- 'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
+ 'need': {isintent_inout: 'try_pyarr_from_#ctype#',
+ l_and(isintent_inout, l_not(isintent_c)): 'STRINGPADN'},
'_check': l_and(isstring, isintent_nothide)
}, { # Hidden
'_check': l_and(isstring, isintent_hide)
diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
index 0411b62e0..fe21d4b9b 100644
--- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
+++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
@@ -93,7 +93,7 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self,
PyObject *strides = NULL;
char s[100];
int i;
- memset(s,0,100*sizeof(char));
+ memset(s,0,100);
if (!PyArg_ParseTuple(capi_args,"O!|:wrap.attrs",
&PyArray_Type,&arr_capi))
return NULL;
diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py
index 429e69bb4..7d4ced914 100644
--- a/numpy/f2py/tests/test_return_character.py
+++ b/numpy/f2py/tests/test_return_character.py
@@ -21,11 +21,11 @@ class TestReturnCharacter(util.F2PyTest):
#assert_(_raises(ValueError, t, array([77,87])))
#assert_(_raises(ValueError, t, array(77)))
elif tname in ['ts', 'ss']:
- assert_(t(23) == b'23 ', repr(t(23)))
+ assert_(t(23) == b'23', repr(t(23)))
assert_(t('123456789abcdef') == b'123456789a')
elif tname in ['t5', 's5']:
- assert_(t(23) == b'23 ', repr(t(23)))
- assert_(t('ab') == b'ab ', repr(t('ab')))
+ assert_(t(23) == b'23', repr(t(23)))
+ assert_(t('ab') == b'ab', repr(t('ab')))
assert_(t('123456789abcdef') == b'12345')
else:
raise NotImplementedError
diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py
index e3ec96af9..7b27f8786 100644
--- a/numpy/f2py/tests/test_string.py
+++ b/numpy/f2py/tests/test_string.py
@@ -1,6 +1,6 @@
import os
import pytest
-
+import textwrap
from numpy.testing import assert_array_equal
import numpy as np
from . import util
@@ -9,14 +9,158 @@ from . import util
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
+
class TestString(util.F2PyTest):
sources = [_path('src', 'string', 'char.f90')]
@pytest.mark.slow
def test_char(self):
strings = np.array(['ab', 'cd', 'ef'], dtype='c').T
- inp, out = self.module.char_test.change_strings(strings, strings.shape[1])
+ inp, out = self.module.char_test.change_strings(strings,
+ strings.shape[1])
assert_array_equal(inp, strings)
expected = strings.copy()
expected[1, :] = 'AAA'
assert_array_equal(out, expected)
+
+
+class TestDocStringArguments(util.F2PyTest):
+ suffix = '.f'
+
+ code = """
+C FILE: STRING.F
+ SUBROUTINE FOO(A,B,C,D)
+ CHARACTER*5 A, B
+ CHARACTER*(*) C,D
+Cf2py intent(in) a,c
+Cf2py intent(inout) b,d
+ PRINT*, "A=",A
+ PRINT*, "B=",B
+ PRINT*, "C=",C
+ PRINT*, "D=",D
+ PRINT*, "CHANGE A,B,C,D"
+ A(1:1) = 'A'
+ B(1:1) = 'B'
+ C(1:1) = 'C'
+ D(1:1) = 'D'
+ PRINT*, "A=",A
+ PRINT*, "B=",B
+ PRINT*, "C=",C
+ PRINT*, "D=",D
+ END
+C END OF FILE STRING.F
+ """
+
+ def test_example(self):
+ a = np.array(b'123\0\0')
+ b = np.array(b'123\0\0')
+ c = np.array(b'123')
+ d = np.array(b'123')
+
+ self.module.foo(a, b, c, d)
+
+ assert a.tobytes() == b'123\0\0'
+ assert b.tobytes() == b'B23\0\0', (b.tobytes(),)
+ assert c.tobytes() == b'123'
+ assert d.tobytes() == b'D23'
+
+
+class TestFixedString(util.F2PyTest):
+ suffix = '.f90'
+
+ code = textwrap.dedent("""
+ function sint(s) result(i)
+ implicit none
+ character(len=*) :: s
+ integer :: j, i
+ i = 0
+ do j=len(s), 1, -1
+ if (.not.((i.eq.0).and.(s(j:j).eq.' '))) then
+ i = i + ichar(s(j:j)) * 10 ** (j - 1)
+ endif
+ end do
+ return
+ end function sint
+
+ function test_in_bytes4(a) result (i)
+ implicit none
+ integer :: sint
+ character(len=4) :: a
+ integer :: i
+ i = sint(a)
+ a(1:1) = 'A'
+ return
+ end function test_in_bytes4
+
+ function test_inout_bytes4(a) result (i)
+ implicit none
+ integer :: sint
+ character(len=4), intent(inout) :: a
+ integer :: i
+ if (a(1:1).ne.' ') then
+ a(1:1) = 'E'
+ endif
+ i = sint(a)
+ return
+ end function test_inout_bytes4
+ """)
+
+ @staticmethod
+ def _sint(s, start=0, end=None):
+ """Return the content of a string buffer as integer value.
+
+ For example:
+ _sint('1234') -> 4321
+ _sint('123A') -> 17321
+ """
+ if isinstance(s, np.ndarray):
+ s = s.tobytes()
+ elif isinstance(s, str):
+ s = s.encode()
+ assert isinstance(s, bytes)
+ if end is None:
+ end = len(s)
+ i = 0
+ for j in range(start, min(end, len(s))):
+ i += s[j] * 10 ** j
+ return i
+
+ def _get_input(self, intent='in'):
+ if intent in ['in']:
+ yield ''
+ yield '1'
+ yield '1234'
+ yield '12345'
+ yield b''
+ yield b'\0'
+ yield b'1'
+ yield b'\01'
+ yield b'1\0'
+ yield b'1234'
+ yield b'12345'
+ yield np.ndarray((), np.bytes_, buffer=b'') # array(b'', dtype='|S0')
+ yield np.array(b'') # array(b'', dtype='|S1')
+ yield np.array(b'\0')
+ yield np.array(b'1')
+ yield np.array(b'1\0')
+ yield np.array(b'\01')
+ yield np.array(b'1234')
+ yield np.array(b'123\0')
+ yield np.array(b'12345')
+
+ def test_intent_in(self):
+ for s in self._get_input():
+ r = self.module.test_in_bytes4(s)
+ # also checks that s is not changed inplace
+ expected = self._sint(s, end=4)
+ assert r == expected, (s)
+
+ def test_intent_inout(self):
+ for s in self._get_input(intent='inout'):
+ rest = self._sint(s, start=4)
+ r = self.module.test_inout_bytes4(s)
+ expected = self._sint(s, end=4)
+ assert r == expected
+
+ # check that the rest of input string is preserved
+ assert rest == self._sint(s, start=4)
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 783d45c2f..2e9ae6644 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -88,8 +88,11 @@ def rot90(m, k=1, axes=(0, 1)):
Notes
-----
- rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
- rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
+ ``rot90(m, k=1, axes=(1,0))`` is the reverse of
+ ``rot90(m, k=1, axes=(0,1))``
+
+ ``rot90(m, k=1, axes=(1,0))`` is equivalent to
+ ``rot90(m, k=-1, axes=(0,1))``
Examples
--------
diff --git a/numpy/lib/twodim_base.pyi b/numpy/lib/twodim_base.pyi
index 79b9511b8..007338d77 100644
--- a/numpy/lib/twodim_base.pyi
+++ b/numpy/lib/twodim_base.pyi
@@ -1,32 +1,255 @@
-from typing import List, Optional, Any
+from typing import (
+ Any,
+ Callable,
+ List,
+ Sequence,
+ overload,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+)
-from numpy import ndarray, _OrderCF
-from numpy.typing import ArrayLike, DTypeLike
+from numpy import (
+ ndarray,
+ dtype,
+ generic,
+ number,
+ bool_,
+ timedelta64,
+ datetime64,
+ int_,
+ intp,
+ float64,
+ signedinteger,
+ floating,
+ complexfloating,
+ object_,
+ _OrderCF,
+)
+
+from numpy.typing import (
+ DTypeLike,
+ _SupportsDType,
+ ArrayLike,
+ NDArray,
+ _NestedSequence,
+ _SupportsArray,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeObject_co,
+)
+
+_T = TypeVar("_T")
+_SCT = TypeVar("_SCT", bound=generic)
+
+# The returned arrays dtype must be compatible with `np.equal`
+_MaskFunc = Callable[
+ [NDArray[int_], _T],
+ NDArray[Union[number[Any], bool_, timedelta64, datetime64, object_]],
+]
+
+_DTypeLike = Union[
+ Type[_SCT],
+ dtype[_SCT],
+ _SupportsDType[dtype[_SCT]],
+]
+_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]]
__all__: List[str]
-def fliplr(m): ...
-def flipud(m): ...
+@overload
+def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def fliplr(m: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def flipud(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def flipud(m: ArrayLike) -> NDArray[Any]: ...
+@overload
def eye(
N: int,
- M: Optional[int] = ...,
+ M: None | int = ...,
+ k: int = ...,
+ dtype: None = ...,
+ order: _OrderCF = ...,
+ *,
+ like: None | ArrayLike = ...,
+) -> NDArray[float64]: ...
+@overload
+def eye(
+ N: int,
+ M: None | int = ...,
+ k: int = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ order: _OrderCF = ...,
+ *,
+ like: None | ArrayLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def eye(
+ N: int,
+ M: None | int = ...,
k: int = ...,
dtype: DTypeLike = ...,
order: _OrderCF = ...,
*,
- like: Optional[ArrayLike] = ...
-) -> ndarray[Any, Any]: ...
-
-def diag(v, k=...): ...
-def diagflat(v, k=...): ...
-def tri(N, M=..., k=..., dtype = ..., *, like=...): ...
-def tril(m, k=...): ...
-def triu(m, k=...): ...
-def vander(x, N=..., increasing=...): ...
-def histogram2d(x, y, bins=..., range=..., normed=..., weights=..., density=...): ...
-def mask_indices(n, mask_func, k=...): ...
-def tril_indices(n, k=..., m=...): ...
-def tril_indices_from(arr, k=...): ...
-def triu_indices(n, k=..., m=...): ...
-def triu_indices_from(arr, k=...): ...
+ like: None | ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def diag(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def diagflat(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def tri(
+ N: int,
+ M: None | int = ...,
+ k: int = ...,
+ dtype: None = ...,
+ *,
+ like: None | ArrayLike = ...
+) -> NDArray[float64]: ...
+@overload
+def tri(
+ N: int,
+ M: None | int = ...,
+ k: int = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ *,
+ like: None | ArrayLike = ...
+) -> NDArray[_SCT]: ...
+@overload
+def tri(
+ N: int,
+ M: None | int = ...,
+ k: int = ...,
+ dtype: DTypeLike = ...,
+ *,
+ like: None | ArrayLike = ...
+) -> NDArray[Any]: ...
+
+@overload
+def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def vander( # type: ignore[misc]
+ x: _ArrayLikeInt_co,
+ N: None | int = ...,
+ increasing: bool = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def vander( # type: ignore[misc]
+ x: _ArrayLikeFloat_co,
+ N: None | int = ...,
+ increasing: bool = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def vander(
+ x: _ArrayLikeComplex_co,
+ N: None | int = ...,
+ increasing: bool = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def vander(
+ x: _ArrayLikeObject_co,
+ N: None | int = ...,
+ increasing: bool = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def histogram2d( # type: ignore[misc]
+ x: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co,
+ bins: int | Sequence[int] = ...,
+ range: None | _ArrayLikeFloat_co = ...,
+ normed: None | bool = ...,
+ weights: None | _ArrayLikeFloat_co = ...,
+ density: None | bool = ...,
+) -> Tuple[
+ NDArray[float64],
+ NDArray[floating[Any]],
+ NDArray[floating[Any]],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ bins: int | Sequence[int] = ...,
+ range: None | _ArrayLikeFloat_co = ...,
+ normed: None | bool = ...,
+ weights: None | _ArrayLikeFloat_co = ...,
+ density: None | bool = ...,
+) -> Tuple[
+ NDArray[float64],
+ NDArray[complexfloating[Any, Any]],
+ NDArray[complexfloating[Any, Any]],
+]: ...
+@overload # TODO: Sort out `bins`
+def histogram2d(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ bins: Sequence[_ArrayLikeInt_co],
+ range: None | _ArrayLikeFloat_co = ...,
+ normed: None | bool = ...,
+ weights: None | _ArrayLikeFloat_co = ...,
+ density: None | bool = ...,
+) -> Tuple[
+ NDArray[float64],
+ NDArray[Any],
+ NDArray[Any],
+]: ...
+
+# NOTE: we're assuming/demanding here the `mask_func` returns
+# an ndarray of shape `(n, n)`; otherwise there is the possibility
+# of the output tuple having more or less than 2 elements
+@overload
+def mask_indices(
+ n: int,
+ mask_func: _MaskFunc[int],
+ k: int = ...,
+) -> Tuple[NDArray[intp], NDArray[intp]]: ...
+@overload
+def mask_indices(
+ n: int,
+ mask_func: _MaskFunc[_T],
+ k: _T,
+) -> Tuple[NDArray[intp], NDArray[intp]]: ...
+
+def tril_indices(
+ n: int,
+ k: int = ...,
+ m: None | int = ...,
+) -> Tuple[NDArray[int_], NDArray[int_]]: ...
+
+def tril_indices_from(
+ arr: NDArray[Any],
+ k: int = ...,
+) -> Tuple[NDArray[int_], NDArray[int_]]: ...
+
+def triu_indices(
+ n: int,
+ k: int = ...,
+ m: None | int = ...,
+) -> Tuple[NDArray[int_], NDArray[int_]]: ...
+
+def triu_indices_from(
+ arr: NDArray[Any],
+ k: int = ...,
+) -> Tuple[NDArray[int_], NDArray[int_]]: ...
diff --git a/numpy/lib/type_check.pyi b/numpy/lib/type_check.pyi
index 7da02bb9f..fbe325858 100644
--- a/numpy/lib/type_check.pyi
+++ b/numpy/lib/type_check.pyi
@@ -1,19 +1,235 @@
-from typing import List
+import sys
+from typing import (
+ Any,
+ Container,
+ Iterable,
+ List,
+ overload,
+ Type,
+ TypeVar,
+)
+
+from numpy import (
+ dtype,
+ generic,
+ bool_,
+ floating,
+ float64,
+ complexfloating,
+ integer,
+)
+
+from numpy.typing import (
+ ArrayLike,
+ DTypeLike,
+ NBitBase,
+ NDArray,
+ _64Bit,
+ _SupportsDType,
+ _ScalarLike_co,
+ _NestedSequence,
+ _SupportsArray,
+ _DTypeLikeComplex,
+)
+
+if sys.version_info >= (3, 8):
+ from typing import Protocol, Literal as L
+else:
+ from typing_extensions import Protocol, Literal as L
+
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_NBit1 = TypeVar("_NBit1", bound=NBitBase)
+_NBit2 = TypeVar("_NBit2", bound=NBitBase)
+
+_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]]
+
+class _SupportsReal(Protocol[_T_co]):
+ @property
+ def real(self) -> _T_co: ...
+
+class _SupportsImag(Protocol[_T_co]):
+ @property
+ def imag(self) -> _T_co: ...
__all__: List[str]
-def mintypecode(typechars, typeset=..., default=...): ...
-def asfarray(a, dtype = ...): ...
-def real(val): ...
-def imag(val): ...
-def iscomplex(x): ...
-def isreal(x): ...
-def iscomplexobj(x): ...
-def isrealobj(x): ...
-def nan_to_num(x, copy=..., nan=..., posinf=..., neginf=...): ...
-def real_if_close(a, tol=...): ...
-def typename(char): ...
-def common_type(*arrays): ...
-
-# NOTE: Deprecated
+def mintypecode(
+ typechars: Iterable[str | ArrayLike],
+ typeset: Container[str] = ...,
+ default: str = ...,
+) -> str: ...
+
+# `asfarray` ignores dtypes if they're not inexact
+
+@overload
+def asfarray(
+ a: object,
+ dtype: None | Type[float] = ...,
+) -> NDArray[float64]: ...
+@overload
+def asfarray( # type: ignore[misc]
+ a: Any,
+ dtype: _DTypeLikeComplex,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def asfarray(
+ a: Any,
+ dtype: DTypeLike,
+) -> NDArray[floating[Any]]: ...
+
+@overload
+def real(val: _SupportsReal[_T]) -> _T: ...
+@overload
+def real(val: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def imag(val: _SupportsImag[_T]) -> _T: ...
+@overload
+def imag(val: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def iscomplex(x: _ScalarLike_co) -> bool_: ... # type: ignore[misc]
+@overload
+def iscomplex(x: ArrayLike) -> NDArray[bool_]: ...
+
+@overload
+def isreal(x: _ScalarLike_co) -> bool_: ... # type: ignore[misc]
+@overload
+def isreal(x: ArrayLike) -> NDArray[bool_]: ...
+
+def iscomplexobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ...
+
+def isrealobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ...
+
+@overload
+def nan_to_num( # type: ignore[misc]
+ x: _SCT,
+ copy: bool = ...,
+ nan: float = ...,
+ posinf: None | float = ...,
+ neginf: None | float = ...,
+) -> _SCT: ...
+@overload
+def nan_to_num(
+ x: _ScalarLike_co,
+ copy: bool = ...,
+ nan: float = ...,
+ posinf: None | float = ...,
+ neginf: None | float = ...,
+) -> Any: ...
+@overload
+def nan_to_num(
+ x: _ArrayLike[_SCT],
+ copy: bool = ...,
+ nan: float = ...,
+ posinf: None | float = ...,
+ neginf: None | float = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def nan_to_num(
+ x: ArrayLike,
+ copy: bool = ...,
+ nan: float = ...,
+ posinf: None | float = ...,
+ neginf: None | float = ...,
+) -> NDArray[Any]: ...
+
+# If one passes a complex array to `real_if_close`, then one is reasonably
+# expected to verify the output dtype (so we can return an unsafe union here)
+
+@overload
+def real_if_close( # type: ignore[misc]
+ a: _ArrayLike[complexfloating[_NBit1, _NBit1]],
+ tol: float = ...,
+) -> NDArray[floating[_NBit1]] | NDArray[complexfloating[_NBit1, _NBit1]]: ...
+@overload
+def real_if_close(
+ a: _ArrayLike[_SCT],
+ tol: float = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def real_if_close(
+ a: ArrayLike,
+ tol: float = ...,
+) -> NDArray[Any]: ...
+
+# NOTE: deprecated
# def asscalar(a): ...
+
+@overload
+def typename(char: L['S1']) -> L['character']: ...
+@overload
+def typename(char: L['?']) -> L['bool']: ...
+@overload
+def typename(char: L['b']) -> L['signed char']: ...
+@overload
+def typename(char: L['B']) -> L['unsigned char']: ...
+@overload
+def typename(char: L['h']) -> L['short']: ...
+@overload
+def typename(char: L['H']) -> L['unsigned short']: ...
+@overload
+def typename(char: L['i']) -> L['integer']: ...
+@overload
+def typename(char: L['I']) -> L['unsigned integer']: ...
+@overload
+def typename(char: L['l']) -> L['long integer']: ...
+@overload
+def typename(char: L['L']) -> L['unsigned long integer']: ...
+@overload
+def typename(char: L['q']) -> L['long long integer']: ...
+@overload
+def typename(char: L['Q']) -> L['unsigned long long integer']: ...
+@overload
+def typename(char: L['f']) -> L['single precision']: ...
+@overload
+def typename(char: L['d']) -> L['double precision']: ...
+@overload
+def typename(char: L['g']) -> L['long precision']: ...
+@overload
+def typename(char: L['F']) -> L['complex single precision']: ...
+@overload
+def typename(char: L['D']) -> L['complex double precision']: ...
+@overload
+def typename(char: L['G']) -> L['complex long double precision']: ...
+@overload
+def typename(char: L['S']) -> L['string']: ...
+@overload
+def typename(char: L['U']) -> L['unicode']: ...
+@overload
+def typename(char: L['V']) -> L['void']: ...
+@overload
+def typename(char: L['O']) -> L['object']: ...
+
+@overload
+def common_type( # type: ignore[misc]
+ *arrays: _SupportsDType[dtype[
+ integer[Any]
+ ]]
+) -> Type[floating[_64Bit]]: ...
+@overload
+def common_type( # type: ignore[misc]
+ *arrays: _SupportsDType[dtype[
+ floating[_NBit1]
+ ]]
+) -> Type[floating[_NBit1]]: ...
+@overload
+def common_type( # type: ignore[misc]
+ *arrays: _SupportsDType[dtype[
+ integer[Any] | floating[_NBit1]
+ ]]
+) -> Type[floating[_NBit1 | _64Bit]]: ...
+@overload
+def common_type( # type: ignore[misc]
+ *arrays: _SupportsDType[dtype[
+ floating[_NBit1] | complexfloating[_NBit2, _NBit2]
+ ]]
+) -> Type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]: ...
+@overload
+def common_type(
+ *arrays: _SupportsDType[dtype[
+ integer[Any] | floating[_NBit1] | complexfloating[_NBit2, _NBit2]
+ ]]
+) -> Type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]: ...
diff --git a/numpy/typing/tests/data/fail/array_constructors.py b/numpy/typing/tests/data/fail/array_constructors.py
index eb57e5c00..0e2250513 100644
--- a/numpy/typing/tests/data/fail/array_constructors.py
+++ b/numpy/typing/tests/data/fail/array_constructors.py
@@ -27,5 +27,5 @@ np.logspace(0, 2, base=None) # E: Argument "base"
np.geomspace(None, 'bob') # E: Argument 1
np.stack(generator) # E: No overload variant
-np.hstack({1, 2}) # E: incompatible type
-np.vstack(1) # E: incompatible type
+np.hstack({1, 2}) # E: No overload variant
+np.vstack(1) # E: No overload variant
diff --git a/numpy/typing/tests/data/fail/twodim_base.py b/numpy/typing/tests/data/fail/twodim_base.py
new file mode 100644
index 000000000..ab34a374c
--- /dev/null
+++ b/numpy/typing/tests/data/fail/twodim_base.py
@@ -0,0 +1,37 @@
+from typing import Any, List, TypeVar
+
+import numpy as np
+import numpy.typing as npt
+
+
+def func1(ar: npt.NDArray[Any], a: int) -> npt.NDArray[np.str_]:
+ pass
+
+
+def func2(ar: npt.NDArray[Any], a: float) -> float:
+ pass
+
+
+AR_b: npt.NDArray[np.bool_]
+AR_m: npt.NDArray[np.timedelta64]
+
+AR_LIKE_b: List[bool]
+
+np.eye(10, M=20.0) # E: No overload variant
+np.eye(10, k=2.5, dtype=int) # E: No overload variant
+
+np.diag(AR_b, k=0.5) # E: No overload variant
+np.diagflat(AR_b, k=0.5) # E: No overload variant
+
+np.tri(10, M=20.0) # E: No overload variant
+np.tri(10, k=2.5, dtype=int) # E: No overload variant
+
+np.tril(AR_b, k=0.5) # E: No overload variant
+np.triu(AR_b, k=0.5) # E: No overload variant
+
+np.vander(AR_m) # E: incompatible type
+
+np.histogram2d(AR_m) # E: No overload variant
+
+np.mask_indices(10, func1) # E: incompatible type
+np.mask_indices(10, func2, 10.5) # E: incompatible type
diff --git a/numpy/typing/tests/data/fail/type_check.py b/numpy/typing/tests/data/fail/type_check.py
new file mode 100644
index 000000000..95f52bfbd
--- /dev/null
+++ b/numpy/typing/tests/data/fail/type_check.py
@@ -0,0 +1,13 @@
+import numpy as np
+import numpy.typing as npt
+
+DTYPE_i8: np.dtype[np.int64]
+
+np.mintypecode(DTYPE_i8) # E: incompatible type
+np.iscomplexobj(DTYPE_i8) # E: incompatible type
+np.isrealobj(DTYPE_i8) # E: incompatible type
+
+np.typename(DTYPE_i8) # E: No overload variant
+np.typename("invalid") # E: No overload variant
+
+np.common_type(np.timedelta64()) # E: incompatible type
diff --git a/numpy/typing/tests/data/reveal/array_constructors.py b/numpy/typing/tests/data/reveal/array_constructors.py
index 1b9006220..44c85e988 100644
--- a/numpy/typing/tests/data/reveal/array_constructors.py
+++ b/numpy/typing/tests/data/reveal/array_constructors.py
@@ -147,25 +147,27 @@ reveal_type(np.fromfunction(func, (3, 5))) # E: SubClass[{float64}]
reveal_type(np.identity(10)) # E: numpy.ndarray[Any, Any]
-reveal_type(np.atleast_1d(A)) # E: numpy.ndarray[Any, Any]
-reveal_type(np.atleast_1d(C)) # E: numpy.ndarray[Any, Any]
-reveal_type(np.atleast_1d(A, A)) # E: list[numpy.ndarray[Any, Any]]
-reveal_type(np.atleast_1d(A, C)) # E: list[numpy.ndarray[Any, Any]]
-reveal_type(np.atleast_1d(C, C)) # E: list[numpy.ndarray[Any, Any]]
+reveal_type(np.atleast_1d(A)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.atleast_1d(C)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.atleast_1d(A, A)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]]
+reveal_type(np.atleast_1d(A, C)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]]
+reveal_type(np.atleast_1d(C, C)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]]
-reveal_type(np.atleast_2d(A)) # E: numpy.ndarray[Any, Any]
+reveal_type(np.atleast_2d(A)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
-reveal_type(np.atleast_3d(A)) # E: numpy.ndarray[Any, Any]
+reveal_type(np.atleast_3d(A)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
-reveal_type(np.vstack([A, A])) # E: numpy.ndarray[Any, Any]
-reveal_type(np.vstack([A, C])) # E: numpy.ndarray[Any, Any]
-reveal_type(np.vstack([C, C])) # E: numpy.ndarray[Any, Any]
+reveal_type(np.vstack([A, A])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.vstack([A, C])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.vstack([C, C])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
-reveal_type(np.hstack([A, A])) # E: numpy.ndarray[Any, Any]
+reveal_type(np.hstack([A, A])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
-reveal_type(np.stack([A, A])) # E: numpy.ndarray[Any, Any]
-reveal_type(np.stack([A, A], axis=0)) # E: numpy.ndarray[Any, Any]
-reveal_type(np.stack([A, A], out=B)) # E: SubClass
+reveal_type(np.stack([A, A])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.stack([A, C])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.stack([C, C])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.stack([A, A], axis=0)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.stack([A, A], out=B)) # E: SubClass[{float64}]
-reveal_type(np.block([[A, A], [A, A]])) # E: numpy.ndarray[Any, Any]
-reveal_type(np.block(C)) # E: numpy.ndarray[Any, Any]
+reveal_type(np.block([[A, A], [A, A]])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.block(C)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
diff --git a/numpy/typing/tests/data/reveal/dtype.py b/numpy/typing/tests/data/reveal/dtype.py
index 215d89ead..eb1489bf3 100644
--- a/numpy/typing/tests/data/reveal/dtype.py
+++ b/numpy/typing/tests/data/reveal/dtype.py
@@ -1,7 +1,8 @@
import numpy as np
-dtype_obj: np.dtype[np.str_]
-void_dtype_obj: np.dtype[np.void]
+dtype_U: np.dtype[np.str_]
+dtype_V: np.dtype[np.void]
+dtype_i8: np.dtype[np.int64]
reveal_type(np.dtype(np.float64)) # E: numpy.dtype[{float64}]
reveal_type(np.dtype(np.int64)) # E: numpy.dtype[{int64}]
@@ -36,22 +37,30 @@ reveal_type(np.dtype("S8")) # E: numpy.dtype
reveal_type(np.dtype(("U", 10))) # E: numpy.dtype[numpy.void]
# Methods and attributes
-reveal_type(dtype_obj.base) # E: numpy.dtype[numpy.str_]
-reveal_type(dtype_obj.subdtype) # E: Union[Tuple[numpy.dtype[numpy.str_], builtins.tuple[builtins.int]], None]
-reveal_type(dtype_obj.newbyteorder()) # E: numpy.dtype[numpy.str_]
-reveal_type(dtype_obj.type) # E: Type[numpy.str_]
-reveal_type(dtype_obj.name) # E: str
-reveal_type(dtype_obj.names) # E: Union[builtins.tuple[builtins.str], None]
-
-reveal_type(dtype_obj * 0) # E: None
-reveal_type(dtype_obj * 1) # E: numpy.dtype[numpy.str_]
-reveal_type(dtype_obj * 2) # E: numpy.dtype[numpy.void]
-
-reveal_type(0 * dtype_obj) # E: Any
-reveal_type(1 * dtype_obj) # E: Any
-reveal_type(2 * dtype_obj) # E: Any
-
-reveal_type(void_dtype_obj["f0"]) # E: numpy.dtype[Any]
-reveal_type(void_dtype_obj[0]) # E: numpy.dtype[Any]
-reveal_type(void_dtype_obj[["f0", "f1"]]) # E: numpy.dtype[numpy.void]
-reveal_type(void_dtype_obj[["f0"]]) # E: numpy.dtype[numpy.void]
+reveal_type(dtype_U.base) # E: numpy.dtype[Any]
+reveal_type(dtype_U.subdtype) # E: Union[None, Tuple[numpy.dtype[Any], builtins.tuple[builtins.int]]]
+reveal_type(dtype_U.newbyteorder()) # E: numpy.dtype[numpy.str_]
+reveal_type(dtype_U.type) # E: Type[numpy.str_]
+reveal_type(dtype_U.name) # E: str
+reveal_type(dtype_U.names) # E: Union[None, builtins.tuple[builtins.str]]
+
+reveal_type(dtype_U * 0) # E: numpy.dtype[numpy.str_]
+reveal_type(dtype_U * 1) # E: numpy.dtype[numpy.str_]
+reveal_type(dtype_U * 2) # E: numpy.dtype[numpy.str_]
+
+reveal_type(dtype_i8 * 0) # E: numpy.dtype[numpy.void]
+reveal_type(dtype_i8 * 1) # E: numpy.dtype[{int64}]
+reveal_type(dtype_i8 * 2) # E: numpy.dtype[numpy.void]
+
+reveal_type(0 * dtype_U) # E: numpy.dtype[numpy.str_]
+reveal_type(1 * dtype_U) # E: numpy.dtype[numpy.str_]
+reveal_type(2 * dtype_U) # E: numpy.dtype[numpy.str_]
+
+reveal_type(0 * dtype_i8) # E: numpy.dtype[Any]
+reveal_type(1 * dtype_i8) # E: numpy.dtype[Any]
+reveal_type(2 * dtype_i8) # E: numpy.dtype[Any]
+
+reveal_type(dtype_V["f0"]) # E: numpy.dtype[Any]
+reveal_type(dtype_V[0]) # E: numpy.dtype[Any]
+reveal_type(dtype_V[["f0", "f1"]]) # E: numpy.dtype[numpy.void]
+reveal_type(dtype_V[["f0"]]) # E: numpy.dtype[numpy.void]
diff --git a/numpy/typing/tests/data/reveal/twodim_base.py b/numpy/typing/tests/data/reveal/twodim_base.py
new file mode 100644
index 000000000..b95fbc71e
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/twodim_base.py
@@ -0,0 +1,72 @@
+from typing import Any, List, TypeVar
+
+import numpy as np
+import numpy.typing as npt
+
+_SCT = TypeVar("_SCT", bound=np.generic)
+
+
+def func1(ar: npt.NDArray[_SCT], a: int) -> npt.NDArray[_SCT]:
+ pass
+
+
+def func2(ar: npt.NDArray[np.number[Any]], a: str) -> npt.NDArray[np.float64]:
+ pass
+
+
+AR_b: npt.NDArray[np.bool_]
+AR_u: npt.NDArray[np.uint64]
+AR_i: npt.NDArray[np.int64]
+AR_f: npt.NDArray[np.float64]
+AR_c: npt.NDArray[np.complex128]
+AR_O: npt.NDArray[np.object_]
+
+AR_LIKE_b: List[bool]
+
+reveal_type(np.fliplr(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(np.fliplr(AR_LIKE_b)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.flipud(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(np.flipud(AR_LIKE_b)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.eye(10)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.eye(10, M=20, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
+reveal_type(np.eye(10, k=2, dtype=int)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.diag(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(np.diag(AR_LIKE_b, k=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.diagflat(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(np.diagflat(AR_LIKE_b, k=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.tri(10)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.tri(10, M=20, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
+reveal_type(np.tri(10, k=2, dtype=int)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.tril(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(np.tril(AR_LIKE_b, k=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.triu(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(np.triu(AR_LIKE_b, k=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.vander(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
+reveal_type(np.vander(AR_u)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
+reveal_type(np.vander(AR_i, N=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
+reveal_type(np.vander(AR_f, increasing=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.vander(AR_c)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.vander(AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(np.histogram2d(AR_i, AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
+reveal_type(np.histogram2d(AR_f, AR_f)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
+reveal_type(np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]]
+
+reveal_type(np.mask_indices(10, func1)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
+reveal_type(np.mask_indices(8, func2, "0")) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
+
+reveal_type(np.tril_indices(10)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{int_}]], numpy.ndarray[Any, numpy.dtype[{int_}]]]
+
+reveal_type(np.tril_indices_from(AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{int_}]], numpy.ndarray[Any, numpy.dtype[{int_}]]]
+
+reveal_type(np.triu_indices(10)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{int_}]], numpy.ndarray[Any, numpy.dtype[{int_}]]]
+
+reveal_type(np.triu_indices_from(AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{int_}]], numpy.ndarray[Any, numpy.dtype[{int_}]]]
diff --git a/numpy/typing/tests/data/reveal/type_check.py b/numpy/typing/tests/data/reveal/type_check.py
new file mode 100644
index 000000000..416dd42a8
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/type_check.py
@@ -0,0 +1,73 @@
+from typing import List
+import numpy as np
+import numpy.typing as npt
+
+f8: np.float64
+f: float
+
+# NOTE: Avoid importing the platform specific `np.float128` type
+AR_i8: npt.NDArray[np.int64]
+AR_i4: npt.NDArray[np.int32]
+AR_f2: npt.NDArray[np.float16]
+AR_f8: npt.NDArray[np.float64]
+AR_f16: npt.NDArray[np.floating[npt._128Bit]]
+AR_c8: npt.NDArray[np.complex64]
+AR_c16: npt.NDArray[np.complex128]
+
+AR_LIKE_f: List[float]
+
+class RealObj:
+ real: slice
+
+class ImagObj:
+ imag: slice
+
+reveal_type(np.mintypecode(["f8"], typeset="qfQF"))
+
+reveal_type(np.asfarray(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.asfarray(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.asfarray(AR_f8, dtype="c16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.asfarray(AR_f8, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+
+reveal_type(np.real(RealObj())) # E: slice
+reveal_type(np.real(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.real(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.real(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.imag(ImagObj())) # E: slice
+reveal_type(np.imag(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.imag(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.imag(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.iscomplex(f8)) # E: numpy.bool_
+reveal_type(np.iscomplex(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(np.iscomplex(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(np.isreal(f8)) # E: numpy.bool_
+reveal_type(np.isreal(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(np.isreal(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(np.iscomplexobj(f8)) # E: bool
+reveal_type(np.isrealobj(f8)) # E: bool
+
+reveal_type(np.nan_to_num(f8)) # E: {float64}
+reveal_type(np.nan_to_num(f, copy=True)) # E: Any
+reveal_type(np.nan_to_num(AR_f8, nan=1.5)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.nan_to_num(AR_LIKE_f, posinf=9999)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.real_if_close(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.real_if_close(AR_c16)) # E: Union[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{complex128}]]]
+reveal_type(np.real_if_close(AR_c8)) # E: Union[numpy.ndarray[Any, numpy.dtype[{float32}]], numpy.ndarray[Any, numpy.dtype[{complex64}]]]
+reveal_type(np.real_if_close(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.typename("h")) # E: Literal['short']
+reveal_type(np.typename("B")) # E: Literal['unsigned char']
+reveal_type(np.typename("V")) # E: Literal['void']
+reveal_type(np.typename("S1")) # E: Literal['character']
+
+reveal_type(np.common_type(AR_i4)) # E: Type[{float64}]
+reveal_type(np.common_type(AR_f2)) # E: Type[{float16}]
+reveal_type(np.common_type(AR_f2, AR_i4)) # E: Type[{float64}]
+reveal_type(np.common_type(AR_f16, AR_i4)) # E: Type[{float128}]
+reveal_type(np.common_type(AR_c8, AR_f2)) # E: Type[{complex64}]
+reveal_type(np.common_type(AR_f2, AR_c8, AR_i4)) # E: Type[{complex128}]