summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.pyi31
-rw-r--r--numpy/core/_add_newdocs.py17
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py2
-rw-r--r--numpy/core/fromnumeric.pyi448
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h8
-rw-r--r--numpy/core/multiarray.pyi16
-rw-r--r--numpy/core/numeric.py8
-rw-r--r--numpy/core/overrides.py2
-rw-r--r--numpy/core/setup.py25
-rw-r--r--numpy/core/src/common/array_assign.c4
-rw-r--r--numpy/core/src/common/npy_partition.h27
-rw-r--r--numpy/core/src/common/npy_partition.h.src134
-rw-r--r--numpy/core/src/multiarray/argfunc.dispatch.c.src394
-rw-r--r--numpy/core/src/multiarray/array_method.c2
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.c14
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src127
-rw-r--r--numpy/core/src/multiarray/arraytypes.h.src (renamed from numpy/core/src/multiarray/arraytypes.h)21
-rw-r--r--numpy/core/src/multiarray/buffer.c20
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c30
-rw-r--r--numpy/core/src/multiarray/convert_datatype.h3
-rw-r--r--numpy/core/src/multiarray/ctors.c14
-rw-r--r--numpy/core/src/multiarray/datetime.c14
-rw-r--r--numpy/core/src/multiarray/descriptor.c12
-rw-r--r--numpy/core/src/multiarray/descriptor.h12
-rw-r--r--numpy/core/src/multiarray/dlpack.c29
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c4
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c129
-rw-r--r--numpy/core/src/multiarray/dtypemeta.h4
-rw-r--r--numpy/core/src/multiarray/flagsobject.c42
-rw-r--r--numpy/core/src/multiarray/mapping.c130
-rw-r--r--numpy/core/src/multiarray/methods.c2
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c5
-rw-r--r--numpy/core/src/multiarray/shape.c7
-rw-r--r--numpy/core/src/multiarray/textreading/growth.h8
-rw-r--r--numpy/core/src/multiarray/textreading/parser_config.h8
-rw-r--r--numpy/core/src/multiarray/textreading/readtext.h3
-rw-r--r--numpy/core/src/multiarray/textreading/stream.h8
-rw-r--r--numpy/core/src/multiarray/textreading/tokenize.cpp (renamed from numpy/core/src/multiarray/textreading/tokenize.c.src)88
-rw-r--r--numpy/core/src/multiarray/textreading/tokenize.h8
-rw-r--r--numpy/core/src/npysort/binsearch.cpp218
-rw-r--r--numpy/core/src/npysort/selection.cpp371
-rw-r--r--numpy/core/src/umath/dispatching.c34
-rw-r--r--numpy/core/src/umath/legacy_array_method.c6
-rw-r--r--numpy/core/src/umath/loops_hyperbolic.dispatch.c.src2
-rw-r--r--numpy/core/src/umath/loops_trigonometric.dispatch.c.src2
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c57
-rw-r--r--numpy/core/tests/test_api.py38
-rw-r--r--numpy/core/tests/test_deprecations.py16
-rw-r--r--numpy/core/tests/test_dlpack.py10
-rw-r--r--numpy/core/tests/test_dtype.py121
-rw-r--r--numpy/core/tests/test_indexing.py6
-rw-r--r--numpy/core/tests/test_multiarray.py177
-rw-r--r--numpy/core/tests/test_overrides.py1
-rw-r--r--numpy/core/tests/test_regression.py12
-rw-r--r--numpy/distutils/__init__.py13
-rw-r--r--numpy/f2py/__init__.py24
-rwxr-xr-xnumpy/f2py/f2py2e.py8
-rw-r--r--numpy/lib/function_base.py41
-rw-r--r--numpy/lib/tests/test_function_base.py13
-rw-r--r--numpy/lib/tests/test_loadtxt.py6
-rw-r--r--numpy/lib/utils.py3
-rw-r--r--numpy/linalg/linalg.py14
-rw-r--r--numpy/typing/_nested_sequence.py2
-rw-r--r--numpy/typing/_ufunc.pyi10
-rw-r--r--numpy/typing/tests/data/fail/false_positives.pyi11
-rw-r--r--numpy/typing/tests/data/fail/fromnumeric.pyi54
-rw-r--r--numpy/typing/tests/data/fail/ndarray_misc.pyi2
-rw-r--r--numpy/typing/tests/data/reveal/array_constructors.pyi5
-rw-r--r--numpy/typing/tests/data/reveal/chararray.pyi3
-rw-r--r--numpy/typing/tests/data/reveal/false_positives.pyi10
-rw-r--r--numpy/typing/tests/data/reveal/fromnumeric.pyi108
-rw-r--r--numpy/typing/tests/data/reveal/memmap.pyi2
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_misc.pyi4
-rw-r--r--numpy/typing/tests/data/reveal/rec.pyi1
74 files changed, 2043 insertions, 1222 deletions
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 80b4047fc..297c482e5 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -676,6 +676,7 @@ _NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray)
_DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic)
_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"]
+@final
class dtype(Generic[_DTypeScalar_co]):
names: None | tuple[builtins.str, ...]
# Overload for subclass of generic
@@ -930,6 +931,7 @@ _ArrayLikeInt = Union[
_FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter)
+@final
class flatiter(Generic[_NdArraySubClass]):
@property
def base(self) -> _NdArraySubClass: ...
@@ -1095,7 +1097,7 @@ class _ArrayOrScalarCommon:
@overload
def argmax(
self,
- axis: _ShapeLike = ...,
+ axis: SupportsIndex = ...,
out: None = ...,
*,
keepdims: bool = ...,
@@ -1103,7 +1105,7 @@ class _ArrayOrScalarCommon:
@overload
def argmax(
self,
- axis: None | _ShapeLike = ...,
+ axis: None | SupportsIndex = ...,
out: _NdArraySubClass = ...,
*,
keepdims: bool = ...,
@@ -1120,7 +1122,7 @@ class _ArrayOrScalarCommon:
@overload
def argmin(
self,
- axis: _ShapeLike = ...,
+ axis: SupportsIndex = ...,
out: None = ...,
*,
keepdims: bool = ...,
@@ -1128,7 +1130,7 @@ class _ArrayOrScalarCommon:
@overload
def argmin(
self,
- axis: None | _ShapeLike = ...,
+ axis: None | SupportsIndex = ...,
out: _NdArraySubClass = ...,
*,
keepdims: bool = ...,
@@ -1357,7 +1359,7 @@ class _ArrayOrScalarCommon:
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: None = ...,
- ddof: int = ...,
+ ddof: float = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
@@ -1368,7 +1370,7 @@ class _ArrayOrScalarCommon:
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: _NdArraySubClass = ...,
- ddof: int = ...,
+ ddof: float = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
@@ -1401,7 +1403,7 @@ class _ArrayOrScalarCommon:
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: None = ...,
- ddof: int = ...,
+ ddof: float = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
@@ -1412,7 +1414,7 @@ class _ArrayOrScalarCommon:
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: _NdArraySubClass = ...,
- ddof: int = ...,
+ ddof: float = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
@@ -1524,7 +1526,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
kwargs: Mapping[str, Any],
) -> Any: ...
- @property
+ # NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__`
+ # is a pseudo-abstract method the type has been narrowed down in order to
+ # grant subclasses a bit more flexiblity
def __array_finalize__(self, obj: None | NDArray[Any], /) -> None: ...
def __array_wrap__(
@@ -3136,6 +3140,7 @@ UFUNC_PYVALS_NAME: L["UFUNC_PYVALS"]
newaxis: None
# See `npt._ufunc` for more concrete nin-/nout-specific stubs
+@final
class ufunc:
@property
def __name__(self) -> str: ...
@@ -3372,6 +3377,7 @@ class DataSource:
# TODO: The type of each `__next__` and `iters` return-type depends
# on the length and dtype of `args`; we can't describe this behavior yet
# as we lack variadics (PEP 646).
+@final
class broadcast:
def __new__(cls, *args: ArrayLike) -> broadcast: ...
@property
@@ -3392,6 +3398,7 @@ class broadcast:
def __iter__(self: _T) -> _T: ...
def reset(self) -> None: ...
+@final
class busdaycalendar:
def __new__(
cls,
@@ -3694,7 +3701,7 @@ class memmap(ndarray[_ShapeType, _DType_co]):
shape: None | int | tuple[int, ...] = ...,
order: _OrderKACF = ...,
) -> memmap[Any, dtype[Any]]: ...
- def __array_finalize__(self, obj: memmap[Any, Any]) -> None: ...
+ def __array_finalize__(self, obj: object) -> None: ...
def __array_wrap__(
self,
array: memmap[_ShapeType, _DType_co],
@@ -3806,7 +3813,7 @@ class matrix(ndarray[_ShapeType, _DType_co]):
dtype: DTypeLike = ...,
copy: bool = ...,
) -> matrix[Any, Any]: ...
- def __array_finalize__(self, obj: NDArray[Any]) -> None: ...
+ def __array_finalize__(self, obj: object) -> None: ...
@overload
def __getitem__(self, key: (
@@ -3967,7 +3974,7 @@ class chararray(ndarray[_ShapeType, _CharDType]):
order: _OrderKACF = ...,
) -> chararray[Any, dtype[str_]]: ...
- def __array_finalize__(self, obj: NDArray[str_ | bytes_]) -> None: ...
+ def __array_finalize__(self, obj: object) -> None: ...
def __mul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ...
def __rmul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ...
def __mod__(self, i: Any) -> chararray[Any, _CharDType]: ...
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index e432f6a11..6ac9951fb 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -1598,15 +1598,24 @@ add_newdoc('numpy.core.multiarray', 'arange',
Return evenly spaced values within a given interval.
- Values are generated within the half-open interval ``[start, stop)``
- (in other words, the interval including `start` but excluding `stop`).
+ ``arange`` can be called with a varying number of positional arguments:
+
+ * ``arange(stop)``: Values are generated within the half-open interval
+ ``[0, stop)`` (in other words, the interval including `start` but
+ excluding `stop`).
+ * ``arange(start, stop)``: Values are generated within the half-open
+ interval ``[start, stop)``.
+ * ``arange(start, stop, step)`` Values are generated within the half-open
+ interval ``[start, stop)``, with spacing between values given by
+ ``step``.
+
For integer arguments the function is roughly equivalent to the Python
built-in :py:class:`range`, but returns an ndarray rather than a ``range``
instance.
When using a non-integer step, such as 0.1, it is often better to use
`numpy.linspace`.
-
+
See the Warning sections below for more information.
Parameters
@@ -1623,7 +1632,7 @@ add_newdoc('numpy.core.multiarray', 'arange',
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
- dtype : dtype
+ dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
${ARRAY_FUNCTION_LIKE}
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index d9035fbab..ba3a3a188 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -512,7 +512,7 @@ add_newdoc('numpy.core.umath', 'arctan2',
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
- array([ 0. , 3.14159265, 0.78539816])
+ array([0. , 3.14159265, 0.78539816])
""")
diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi
index 9f52ae18d..7d8671c76 100644
--- a/numpy/core/fromnumeric.pyi
+++ b/numpy/core/fromnumeric.pyi
@@ -1,12 +1,19 @@
import datetime as dt
from collections.abc import Sequence
-from typing import Union, Any, overload, TypeVar, Literal
+from typing import Union, Any, overload, TypeVar, Literal, SupportsIndex
from numpy import (
ndarray,
number,
+ uint64,
+ int_,
+ int64,
intp,
+ float16,
bool_,
+ floating,
+ complexfloating,
+ object_,
generic,
_OrderKACF,
_OrderACF,
@@ -24,13 +31,20 @@ from numpy.typing import (
_ShapeLike,
_Shape,
_ArrayLikeBool_co,
+ _ArrayLikeUInt_co,
_ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeObject_co,
_IntLike_co,
+ _BoolLike_co,
+ _ComplexLike_co,
_NumberLike_co,
_ScalarLike_co,
)
_SCT = TypeVar("_SCT", bound=generic)
+_SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_)
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
__all__: list[str]
@@ -47,7 +61,7 @@ def take(
def take(
a: ArrayLike,
indices: _IntLike_co,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
out: None = ...,
mode: _ModeKind = ...,
) -> Any: ...
@@ -55,7 +69,7 @@ def take(
def take(
a: _ArrayLike[_SCT],
indices: _ArrayLikeInt_co,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
out: None = ...,
mode: _ModeKind = ...,
) -> NDArray[_SCT]: ...
@@ -63,7 +77,7 @@ def take(
def take(
a: ArrayLike,
indices: _ArrayLikeInt_co,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
out: None = ...,
mode: _ModeKind = ...,
) -> NDArray[Any]: ...
@@ -71,7 +85,7 @@ def take(
def take(
a: ArrayLike,
indices: _ArrayLikeInt_co,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
out: _ArrayType = ...,
mode: _ModeKind = ...,
) -> _ArrayType: ...
@@ -122,13 +136,13 @@ def choose(
def repeat(
a: _ArrayLike[_SCT],
repeats: _ArrayLikeInt_co,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
) -> NDArray[_SCT]: ...
@overload
def repeat(
a: ArrayLike,
repeats: _ArrayLikeInt_co,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
) -> NDArray[Any]: ...
def put(
@@ -141,14 +155,14 @@ def put(
@overload
def swapaxes(
a: _ArrayLike[_SCT],
- axis1: int,
- axis2: int,
+ axis1: SupportsIndex,
+ axis2: SupportsIndex,
) -> NDArray[_SCT]: ...
@overload
def swapaxes(
a: ArrayLike,
- axis1: int,
- axis2: int,
+ axis1: SupportsIndex,
+ axis2: SupportsIndex,
) -> NDArray[Any]: ...
@overload
@@ -166,7 +180,7 @@ def transpose(
def partition(
a: _ArrayLike[_SCT],
kth: _ArrayLikeInt_co,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
kind: _PartitionKind = ...,
order: None | str | Sequence[str] = ...,
) -> NDArray[_SCT]: ...
@@ -174,7 +188,7 @@ def partition(
def partition(
a: ArrayLike,
kth: _ArrayLikeInt_co,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
kind: _PartitionKind = ...,
order: None | str | Sequence[str] = ...,
) -> NDArray[Any]: ...
@@ -182,7 +196,7 @@ def partition(
def argpartition(
a: ArrayLike,
kth: _ArrayLikeInt_co,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
kind: _PartitionKind = ...,
order: None | str | Sequence[str] = ...,
) -> NDArray[intp]: ...
@@ -190,21 +204,21 @@ def argpartition(
@overload
def sort(
a: _ArrayLike[_SCT],
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
kind: None | _SortKind = ...,
order: None | str | Sequence[str] = ...,
) -> NDArray[_SCT]: ...
@overload
def sort(
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
kind: None | _SortKind = ...,
order: None | str | Sequence[str] = ...,
) -> NDArray[Any]: ...
def argsort(
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
kind: None | _SortKind = ...,
order: None | str | Sequence[str] = ...,
) -> NDArray[intp]: ...
@@ -220,7 +234,7 @@ def argmax(
@overload
def argmax(
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
out: None = ...,
*,
keepdims: bool = ...,
@@ -228,7 +242,7 @@ def argmax(
@overload
def argmax(
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
out: _ArrayType = ...,
*,
keepdims: bool = ...,
@@ -245,7 +259,7 @@ def argmin(
@overload
def argmin(
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
out: None = ...,
*,
keepdims: bool = ...,
@@ -253,7 +267,7 @@ def argmin(
@overload
def argmin(
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
out: _ArrayType = ...,
*,
keepdims: bool = ...,
@@ -304,33 +318,33 @@ def squeeze(
@overload
def diagonal(
a: _ArrayLike[_SCT],
- offset: int = ...,
- axis1: int = ...,
- axis2: int = ..., # >= 2D array
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ..., # >= 2D array
) -> NDArray[_SCT]: ...
@overload
def diagonal(
a: ArrayLike,
- offset: int = ...,
- axis1: int = ...,
- axis2: int = ..., # >= 2D array
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ..., # >= 2D array
) -> NDArray[Any]: ...
@overload
def trace(
a: ArrayLike, # >= 2D array
- offset: int = ...,
- axis1: int = ...,
- axis2: int = ...,
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
dtype: DTypeLike = ...,
out: None = ...,
) -> Any: ...
@overload
def trace(
a: ArrayLike, # >= 2D array
- offset: int = ...,
- axis1: int = ...,
- axis2: int = ...,
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
dtype: DTypeLike = ...,
out: _ArrayType = ...,
) -> _ArrayType: ...
@@ -348,21 +362,21 @@ def shape(a: ArrayLike) -> _Shape: ...
def compress(
condition: _ArrayLikeBool_co, # 1D bool array
a: _ArrayLike[_SCT],
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
out: None = ...,
) -> NDArray[_SCT]: ...
@overload
def compress(
condition: _ArrayLikeBool_co, # 1D bool array
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
out: None = ...,
) -> NDArray[Any]: ...
@overload
def compress(
condition: _ArrayLikeBool_co, # 1D bool array
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
out: _ArrayType = ...,
) -> _ArrayType: ...
@@ -541,35 +555,35 @@ def any(
@overload
def cumsum(
a: _ArrayLike[_SCT],
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
dtype: None = ...,
out: None = ...,
) -> NDArray[_SCT]: ...
@overload
def cumsum(
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
dtype: None = ...,
out: None = ...,
) -> NDArray[Any]: ...
@overload
def cumsum(
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
dtype: _DTypeLike[_SCT] = ...,
out: None = ...,
) -> NDArray[_SCT]: ...
@overload
def cumsum(
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
dtype: DTypeLike = ...,
out: None = ...,
) -> NDArray[Any]: ...
@overload
def cumsum(
a: ArrayLike,
- axis: None | int = ...,
+ axis: None | SupportsIndex = ...,
dtype: DTypeLike = ...,
out: _ArrayType = ...,
) -> _ArrayType: ...
@@ -659,61 +673,377 @@ def amin(
# Note that the same situation holds for all wrappers around
# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`).
+@overload
def prod(
- a: ArrayLike,
+ a: _ArrayLikeBool_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> int_: ...
+@overload
+def prod(
+ a: _ArrayLikeUInt_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> uint64: ...
+@overload
+def prod(
+ a: _ArrayLikeInt_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> int64: ...
+@overload
+def prod(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
axis: None | _ShapeLike = ...,
- dtype: DTypeLike = ...,
- out: None | ndarray = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: None | DTypeLike = ...,
+ out: None = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: None | DTypeLike = ...,
+ out: _ArrayType = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+@overload
def cumprod(
- a: ArrayLike,
- axis: None | int = ...,
+ a: _ArrayLikeBool_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[int_]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeUInt_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[uint64]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[int64]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeFloat_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeObject_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[object_]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | SupportsIndex = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ out: None = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | SupportsIndex = ...,
dtype: DTypeLike = ...,
- out: None | ndarray = ...,
-) -> ndarray: ...
+ out: _ArrayType = ...,
+) -> _ArrayType: ...
def ndim(a: ArrayLike) -> int: ...
def size(a: ArrayLike, axis: None | int = ...) -> int: ...
+@overload
def around(
- a: ArrayLike,
- decimals: int = ...,
- out: None | ndarray = ...,
+ a: _BoolLike_co,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> float16: ...
+@overload
+def around(
+ a: _SCT_uifcO,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> _SCT_uifcO: ...
+@overload
+def around(
+ a: _ComplexLike_co | object_,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
) -> Any: ...
+@overload
+def around(
+ a: _ArrayLikeBool_co,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[float16]: ...
+@overload
+def around(
+ a: _ArrayLike[_SCT_uifcO],
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[_SCT_uifcO]: ...
+@overload
+def around(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def around(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ decimals: SupportsIndex = ...,
+ out: _ArrayType = ...,
+) -> _ArrayType: ...
+@overload
def mean(
- a: ArrayLike,
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
- out: None | ndarray = ...,
+ out: None = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayType = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+@overload
def std(
- a: ArrayLike,
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
- out: None | ndarray = ...,
- ddof: int = ...,
+ out: None = ...,
+ ddof: float = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayType = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+@overload
def var(
- a: ArrayLike,
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
- out: None | ndarray = ...,
- ddof: int = ...,
+ out: None = ...,
+ ddof: float = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayType = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index 35d82ec03..c295f34bb 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -834,11 +834,9 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
* 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with
* more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS
* at the same time if they have either zero or one element.
- * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional
- * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements
- * and the array is contiguous if ndarray.squeeze() is contiguous.
- * I.e. dimensions for which `ndarray.shape[dimension] == 1` are
- * ignored.
+ * A higher dimensional array always has the same contiguity flags as
+ * `array.squeeze()`; dimensions with `array.shape[dimension] == 1` are
+ * effectively ignored when checking for contiguity.
*/
/*
diff --git a/numpy/core/multiarray.pyi b/numpy/core/multiarray.pyi
index 5e924c2f4..9117ce17c 100644
--- a/numpy/core/multiarray.pyi
+++ b/numpy/core/multiarray.pyi
@@ -11,6 +11,7 @@ from typing import (
SupportsIndex,
final,
Final,
+ Protocol,
)
from numpy import (
@@ -77,6 +78,8 @@ from numpy.typing import (
_TD64Like_co,
)
+_T_co = TypeVar("_T_co", covariant=True)
+_T_contra = TypeVar("_T_contra", contravariant=True)
_SCT = TypeVar("_SCT", bound=generic)
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
@@ -105,6 +108,10 @@ _RollKind = L[ # `raise` is deliberately excluded
"modifiedpreceding",
]
+class _SupportsLenAndGetItem(Protocol[_T_contra, _T_co]):
+ def __len__(self) -> int: ...
+ def __getitem__(self, key: _T_contra, /) -> _T_co: ...
+
__all__: list[str]
ALLOW_THREADS: Final[int] # 0 or 1 (system-specific)
@@ -292,6 +299,7 @@ def ravel_multi_index(
order: _OrderCF = ...,
) -> NDArray[intp]: ...
+# NOTE: Allow any sequence of array-like objects
@overload
def concatenate( # type: ignore[misc]
arrays: _ArrayLike[_SCT],
@@ -304,7 +312,7 @@ def concatenate( # type: ignore[misc]
) -> NDArray[_SCT]: ...
@overload
def concatenate( # type: ignore[misc]
- arrays: ArrayLike,
+ arrays: _SupportsLenAndGetItem[int, ArrayLike],
/,
axis: None | SupportsIndex = ...,
out: None = ...,
@@ -314,7 +322,7 @@ def concatenate( # type: ignore[misc]
) -> NDArray[Any]: ...
@overload
def concatenate( # type: ignore[misc]
- arrays: ArrayLike,
+ arrays: _SupportsLenAndGetItem[int, ArrayLike],
/,
axis: None | SupportsIndex = ...,
out: None = ...,
@@ -324,7 +332,7 @@ def concatenate( # type: ignore[misc]
) -> NDArray[_SCT]: ...
@overload
def concatenate( # type: ignore[misc]
- arrays: ArrayLike,
+ arrays: _SupportsLenAndGetItem[int, ArrayLike],
/,
axis: None | SupportsIndex = ...,
out: None = ...,
@@ -334,7 +342,7 @@ def concatenate( # type: ignore[misc]
) -> NDArray[Any]: ...
@overload
def concatenate(
- arrays: ArrayLike,
+ arrays: _SupportsLenAndGetItem[int, ArrayLike],
/,
axis: None | SupportsIndex = ...,
out: _ArrayType = ...,
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 3e9b6c414..2c5265709 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -364,7 +364,7 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
- fill_value : scalar
+ fill_value : array_like
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
@@ -412,6 +412,12 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+ >>> y = np.zeros([2, 2, 3], dtype=int)
+ >>> np.full_like(y, [0, 0, 255])
+ array([[[ 0, 0, 255],
+ [ 0, 0, 255]],
+ [[ 0, 0, 255],
+ [ 0, 0, 255]]])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, fill_value, casting='unsafe')
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index 840cf38c9..cb550152e 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -12,7 +12,7 @@ ARRAY_FUNCTION_ENABLED = bool(
int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1)))
array_function_like_doc = (
- """like : array_like
+ """like : array_like, optional
Reference object to allow the creation of arrays which are not
NumPy arrays. If an array-like passed in as ``like`` supports
the ``__array_function__`` protocol, the result will be defined
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index c5465196f..f6b31075d 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -17,6 +17,10 @@ from setup_common import * # noqa: F403
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
+if not NPY_RELAXED_STRIDES_CHECKING:
+ raise SystemError(
+ "Support for NPY_RELAXED_STRIDES_CHECKING=0 has been remove as of "
+ "NumPy 1.23. This error will eventually be removed entirely.")
# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a
# bogus value for affected strides in order to help smoke out bad stride usage
@@ -482,13 +486,9 @@ def configuration(parent_package='',top_path=None):
if can_link_svml():
moredefs.append(('NPY_CAN_LINK_SVML', 1))
- # Use relaxed stride checking
- if NPY_RELAXED_STRIDES_CHECKING:
- moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
- else:
- moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 0))
-
- # Use bogus stride debug aid when relaxed strides are enabled
+ # Use bogus stride debug aid to flush out bugs where users use
+ # strides of dimensions with length 1 to index a full contiguous
+ # array.
if NPY_RELAXED_STRIDES_DEBUG:
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
else:
@@ -584,9 +584,6 @@ def configuration(parent_package='',top_path=None):
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
- if NPY_RELAXED_STRIDES_CHECKING:
- moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
-
if NPY_RELAXED_STRIDES_DEBUG:
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
@@ -833,7 +830,7 @@ def configuration(parent_package='',top_path=None):
multiarray_deps = [
join('src', 'multiarray', 'abstractdtypes.h'),
join('src', 'multiarray', 'arrayobject.h'),
- join('src', 'multiarray', 'arraytypes.h'),
+ join('src', 'multiarray', 'arraytypes.h.src'),
join('src', 'multiarray', 'arrayfunction_override.h'),
join('src', 'multiarray', 'array_coercion.h'),
join('src', 'multiarray', 'array_method.h'),
@@ -895,7 +892,9 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'abstractdtypes.c'),
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
+ join('src', 'multiarray', 'arraytypes.h.src'),
join('src', 'multiarray', 'arraytypes.c.src'),
+ join('src', 'multiarray', 'argfunc.dispatch.c.src'),
join('src', 'multiarray', 'array_coercion.c'),
join('src', 'multiarray', 'array_method.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
@@ -954,7 +953,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'npysort', 'timsort.cpp'),
join('src', 'npysort', 'heapsort.cpp'),
join('src', 'npysort', 'radixsort.cpp'),
- join('src', 'common', 'npy_partition.h.src'),
+ join('src', 'common', 'npy_partition.h'),
join('src', 'npysort', 'selection.cpp'),
join('src', 'common', 'npy_binsearch.h'),
join('src', 'npysort', 'binsearch.cpp'),
@@ -965,7 +964,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'textreading', 'rows.c'),
join('src', 'multiarray', 'textreading', 'stream_pyobject.c'),
join('src', 'multiarray', 'textreading', 'str_to_int.c'),
- join('src', 'multiarray', 'textreading', 'tokenize.c.src'),
+ join('src', 'multiarray', 'textreading', 'tokenize.cpp'),
]
#######################################################################
diff --git a/numpy/core/src/common/array_assign.c b/numpy/core/src/common/array_assign.c
index b7495fc09..956e55d30 100644
--- a/numpy/core/src/common/array_assign.c
+++ b/numpy/core/src/common/array_assign.c
@@ -110,7 +110,6 @@ raw_array_is_aligned(int ndim, npy_intp const *shape,
int i;
for (i = 0; i < ndim; i++) {
-#if NPY_RELAXED_STRIDES_CHECKING
/* skip dim == 1 as it is not required to have stride 0 */
if (shape[i] > 1) {
/* if shape[i] == 1, the stride is never used */
@@ -120,9 +119,6 @@ raw_array_is_aligned(int ndim, npy_intp const *shape,
/* an array with zero elements is always aligned */
return 1;
}
-#else /* not NPY_RELAXED_STRIDES_CHECKING */
- align_check |= (npy_uintp)strides[i];
-#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
return npy_is_aligned((void *)align_check, alignment);
diff --git a/numpy/core/src/common/npy_partition.h b/numpy/core/src/common/npy_partition.h
new file mode 100644
index 000000000..85a0727c5
--- /dev/null
+++ b/numpy/core/src/common/npy_partition.h
@@ -0,0 +1,27 @@
+#ifndef NUMPY_CORE_SRC_COMMON_PARTITION_H_
+#define NUMPY_CORE_SRC_COMMON_PARTITION_H_
+
+#include "npy_sort.h"
+
+/* Python include is for future object sorts */
+#include <Python.h>
+
+#include <numpy/ndarraytypes.h>
+#include <numpy/npy_common.h>
+
+#define NPY_MAX_PIVOT_STACK 50
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+NPY_NO_EXPORT PyArray_PartitionFunc *
+get_partition_func(int type, NPY_SELECTKIND which);
+NPY_NO_EXPORT PyArray_ArgPartitionFunc *
+get_argpartition_func(int type, NPY_SELECTKIND which);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/numpy/core/src/common/npy_partition.h.src b/numpy/core/src/common/npy_partition.h.src
deleted file mode 100644
index 5ba652b41..000000000
--- a/numpy/core/src/common/npy_partition.h.src
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- *****************************************************************************
- ** IMPORTANT NOTE for npy_partition.h.src -> npy_partition.h **
- *****************************************************************************
- * The template file loops.h.src is not automatically converted into
- * loops.h by the build system. If you edit this file, you must manually
- * do the conversion using numpy/distutils/conv_template.py from the
- * command line as follows:
- *
- * $ cd <NumPy source root directory>
- * $ python numpy/distutils/conv_template.py numpy/core/src/private/npy_partition.h.src
- * $
- */
-
-
-#ifndef __NPY_PARTITION_H__
-#define __NPY_PARTITION_H__
-
-
-#include "npy_sort.h"
-
-/* Python include is for future object sorts */
-#include <Python.h>
-#include <numpy/npy_common.h>
-#include <numpy/ndarraytypes.h>
-
-#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
-
-#define NPY_MAX_PIVOT_STACK 50
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**begin repeat
- *
- * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
- * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE#
- * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
- * longlong, ulonglong, half, float, double, longdouble,
- * cfloat, cdouble, clongdouble#
- * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int,
- * npy_uint, npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_ushort, npy_float, npy_double, npy_longdouble, npy_cfloat,
- * npy_cdouble, npy_clongdouble#
- */
-
-NPY_NO_EXPORT int introselect_@suff@(@type@ *v, npy_intp num,
- npy_intp kth,
- npy_intp * pivots,
- npy_intp * npiv,
- void *NOT_USED);
-NPY_NO_EXPORT int aintroselect_@suff@(@type@ *v, npy_intp* tosort, npy_intp num,
- npy_intp kth,
- npy_intp * pivots,
- npy_intp * npiv,
- void *NOT_USED);
-
-
-/**end repeat**/
-
-#ifdef __cplusplus
-}
-#endif
-
-typedef struct {
- int typenum;
- PyArray_PartitionFunc * part[NPY_NSELECTS];
- PyArray_ArgPartitionFunc * argpart[NPY_NSELECTS];
-} part_map;
-
-static part_map _part_map[] = {
-/**begin repeat
- *
- * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
- * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE#
- * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
- * longlong, ulonglong, half, float, double, longdouble,
- * cfloat, cdouble, clongdouble#
- * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int,
- * npy_uint, npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_ushort, npy_float, npy_double, npy_longdouble, npy_cfloat,
- * npy_cdouble, npy_clongdouble#
- */
- {
- NPY_@TYPE@,
- {
- (PyArray_PartitionFunc *)&introselect_@suff@,
- },
- {
- (PyArray_ArgPartitionFunc *)&aintroselect_@suff@,
- }
- },
-/**end repeat**/
-};
-
-
-static NPY_INLINE PyArray_PartitionFunc *
-get_partition_func(int type, NPY_SELECTKIND which)
-{
- npy_intp i;
- npy_intp ntypes = ARRAY_SIZE(_part_map);
-
- if (which >= NPY_NSELECTS) {
- return NULL;
- }
- for (i = 0; i < ntypes; i++) {
- if (type == _part_map[i].typenum) {
- return _part_map[i].part[which];
- }
- }
- return NULL;
-}
-
-
-static NPY_INLINE PyArray_ArgPartitionFunc *
-get_argpartition_func(int type, NPY_SELECTKIND which)
-{
- npy_intp i;
- npy_intp ntypes = ARRAY_SIZE(_part_map);
-
- for (i = 0; i < ntypes; i++) {
- if (type == _part_map[i].typenum) {
- return _part_map[i].argpart[which];
- }
- }
- return NULL;
-}
-
-#undef ARRAY_SIZE
-
-#endif
diff --git a/numpy/core/src/multiarray/argfunc.dispatch.c.src b/numpy/core/src/multiarray/argfunc.dispatch.c.src
new file mode 100644
index 000000000..cbfaebdb4
--- /dev/null
+++ b/numpy/core/src/multiarray/argfunc.dispatch.c.src
@@ -0,0 +1,394 @@
+/* -*- c -*- */
+/*@targets
+ ** $maxopt baseline
+ ** sse2 sse42 xop avx2 avx512_skx
+ ** vsx2
+ ** neon asimd
+ **/
+
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "simd/simd.h"
+#include "numpy/npy_math.h"
+
+#include "arraytypes.h"
+
+#define MIN(a,b) (((a)<(b))?(a):(b))
+
+#if NPY_SIMD
+#if NPY_SIMD > 512 || NPY_SIMD < 0
+ #error "the following 8/16-bit argmax kernel isn't applicable for larger SIMD"
+ // TODO: add special loop for large SIMD width.
+ // i.e avoid unroll by x4 should be numerically safe till 2048-bit SIMD width
+ // or maybe expand the indices to 32|64-bit vectors(slower).
+#endif
+/**begin repeat
+ * #sfx = u8, s8, u16, s16#
+ * #usfx = u8, u8, u16, u16#
+ * #bsfx = b8, b8, b16, b16#
+ * #idx_max = NPY_MAX_UINT8*2, NPY_MAX_UINT16*2#
+ */
+/**begin repeat1
+ * #intrin = cmpgt, cmplt#
+ * #func = argmax, argmin#
+ * #op = >, <#
+ */
+static inline npy_intp
+simd_@func@_@sfx@(npyv_lanetype_@sfx@ *ip, npy_intp len)
+{
+ npyv_lanetype_@sfx@ s_acc = *ip;
+ npy_intp ret_idx = 0, i = 0;
+
+ const int vstep = npyv_nlanes_@sfx@;
+ const int wstep = vstep*4;
+ npyv_lanetype_@usfx@ d_vindices[npyv_nlanes_@sfx@*4];
+ for (int vi = 0; vi < wstep; ++vi) {
+ d_vindices[vi] = vi;
+ }
+ const npyv_@usfx@ vindices_0 = npyv_load_@usfx@(d_vindices);
+ const npyv_@usfx@ vindices_1 = npyv_load_@usfx@(d_vindices + vstep);
+ const npyv_@usfx@ vindices_2 = npyv_load_@usfx@(d_vindices + vstep*2);
+ const npyv_@usfx@ vindices_3 = npyv_load_@usfx@(d_vindices + vstep*3);
+
+ const npy_intp max_block = @idx_max@*wstep & -wstep;
+ npy_intp len0 = len & -wstep;
+ while (i < len0) {
+ npyv_@sfx@ acc = npyv_setall_@sfx@(s_acc);
+ npyv_@usfx@ acc_indices = npyv_zero_@usfx@();
+ npyv_@usfx@ acc_indices_scale = npyv_zero_@usfx@();
+
+ npy_intp n = i + MIN(len0 - i, max_block);
+ npy_intp ik = i, i2 = 0;
+ for (; i < n; i += wstep, ++i2) {
+ npyv_@usfx@ vi = npyv_setall_@usfx@((npyv_lanetype_@usfx@)i2);
+ npyv_@sfx@ a = npyv_load_@sfx@(ip + i);
+ npyv_@sfx@ b = npyv_load_@sfx@(ip + i + vstep);
+ npyv_@sfx@ c = npyv_load_@sfx@(ip + i + vstep*2);
+ npyv_@sfx@ d = npyv_load_@sfx@(ip + i + vstep*3);
+
+ // reverse to put lowest index first in case of matched values
+ npyv_@bsfx@ m_ba = npyv_@intrin@_@sfx@(b, a);
+ npyv_@bsfx@ m_dc = npyv_@intrin@_@sfx@(d, c);
+ npyv_@sfx@ x_ba = npyv_select_@sfx@(m_ba, b, a);
+ npyv_@sfx@ x_dc = npyv_select_@sfx@(m_dc, d, c);
+ npyv_@bsfx@ m_dcba = npyv_@intrin@_@sfx@(x_dc, x_ba);
+ npyv_@sfx@ x_dcba = npyv_select_@sfx@(m_dcba, x_dc, x_ba);
+
+ npyv_@usfx@ idx_ba = npyv_select_@usfx@(m_ba, vindices_1, vindices_0);
+ npyv_@usfx@ idx_dc = npyv_select_@usfx@(m_dc, vindices_3, vindices_2);
+ npyv_@usfx@ idx_dcba = npyv_select_@usfx@(m_dcba, idx_dc, idx_ba);
+ npyv_@bsfx@ m_acc = npyv_@intrin@_@sfx@(x_dcba, acc);
+ acc = npyv_select_@sfx@(m_acc, x_dcba, acc);
+ acc_indices = npyv_select_@usfx@(m_acc, idx_dcba, acc_indices);
+ acc_indices_scale = npyv_select_@usfx@(m_acc, vi, acc_indices_scale);
+ }
+ // reduce
+ npyv_lanetype_@sfx@ dacc[npyv_nlanes_@sfx@];
+ npyv_lanetype_@usfx@ dacc_i[npyv_nlanes_@sfx@];
+ npyv_lanetype_@usfx@ dacc_s[npyv_nlanes_@sfx@];
+ npyv_store_@sfx@(dacc, acc);
+ npyv_store_@usfx@(dacc_i, acc_indices);
+ npyv_store_@usfx@(dacc_s, acc_indices_scale);
+
+ for (int vi = 0; vi < vstep; ++vi) {
+ if (dacc[vi] @op@ s_acc) {
+ s_acc = dacc[vi];
+ ret_idx = ik + (npy_intp)dacc_s[vi]*wstep + dacc_i[vi];
+ }
+ }
+ // get the lowest index in case of matched values
+ for (int vi = 0; vi < vstep; ++vi) {
+ npy_intp idx = ik + (npy_intp)dacc_s[vi]*wstep + dacc_i[vi];
+ if (s_acc == dacc[vi] && ret_idx > idx) {
+ ret_idx = idx;
+ }
+ }
+ }
+ for (; i < len; ++i) {
+ npyv_lanetype_@sfx@ a = ip[i];
+ if (a @op@ s_acc) {
+ s_acc = a;
+ ret_idx = i;
+ }
+ }
+ return ret_idx;
+}
+/**end repeat1**/
+/**end repeat**/
+#endif
+
+/**begin repeat
+ * #sfx = u32, s32, u64, s64, f32, f64#
+ * #usfx = u32, u32, u64, u64, u32, u64#
+ * #bsfx = b32, b32, b64, b64, b32, b64#
+ * #is_fp = 0*4, 1*2#
+ * #is_idx32 = 1*2, 0*2, 1, 0#
+ * #chk_simd = NPY_SIMD*5, NPY_SIMD_F64#
+ */
+#if @chk_simd@
+/**begin repeat1
+ * #intrin = cmpgt, cmplt#
+ * #func = argmax, argmin#
+ * #op = >, <#
+ * #iop = <, >#
+ */
+static inline npy_intp
+simd_@func@_@sfx@(npyv_lanetype_@sfx@ *ip, npy_intp len)
+{
+ npyv_lanetype_@sfx@ s_acc = *ip;
+ npy_intp ret_idx = 0, i = 0;
+ const int vstep = npyv_nlanes_@sfx@;
+ const int wstep = vstep*4;
+ // loop by a scalar will perform better for small arrays
+ if (len < wstep) {
+ goto scalar_loop;
+ }
+ npy_intp len0 = len;
+ // guard against wraparound vector addition for 32-bit indices
+ // in case of the array length is larger than 16gb
+#if @is_idx32@
+ if (len0 > NPY_MAX_UINT32) {
+ len0 = NPY_MAX_UINT32;
+ }
+#endif
+ // create index for vector indices
+ npyv_lanetype_@usfx@ d_vindices[npyv_nlanes_@sfx@*4];
+ for (int vi = 0; vi < wstep; ++vi) {
+ d_vindices[vi] = vi;
+ }
+ const npyv_@usfx@ vindices_0 = npyv_load_@usfx@(d_vindices);
+ const npyv_@usfx@ vindices_1 = npyv_load_@usfx@(d_vindices + vstep);
+ const npyv_@usfx@ vindices_2 = npyv_load_@usfx@(d_vindices + vstep*2);
+ const npyv_@usfx@ vindices_3 = npyv_load_@usfx@(d_vindices + vstep*3);
+ // initialize vector accumulator for highest values and its indexes
+ npyv_@usfx@ acc_indices = npyv_zero_@usfx@();
+ npyv_@sfx@ acc = npyv_setall_@sfx@(s_acc);
+ for (npy_intp n = len0 & -wstep; i < n; i += wstep) {
+ npyv_@usfx@ vi = npyv_setall_@usfx@((npyv_lanetype_@usfx@)i);
+ npyv_@sfx@ a = npyv_load_@sfx@(ip + i);
+ npyv_@sfx@ b = npyv_load_@sfx@(ip + i + vstep);
+ npyv_@sfx@ c = npyv_load_@sfx@(ip + i + vstep*2);
+ npyv_@sfx@ d = npyv_load_@sfx@(ip + i + vstep*3);
+
+ // reverse to put lowest index first in case of matched values
+ npyv_@bsfx@ m_ba = npyv_@intrin@_@sfx@(b, a);
+ npyv_@bsfx@ m_dc = npyv_@intrin@_@sfx@(d, c);
+ npyv_@sfx@ x_ba = npyv_select_@sfx@(m_ba, b, a);
+ npyv_@sfx@ x_dc = npyv_select_@sfx@(m_dc, d, c);
+ npyv_@bsfx@ m_dcba = npyv_@intrin@_@sfx@(x_dc, x_ba);
+ npyv_@sfx@ x_dcba = npyv_select_@sfx@(m_dcba, x_dc, x_ba);
+
+ npyv_@usfx@ idx_ba = npyv_select_@usfx@(m_ba, vindices_1, vindices_0);
+ npyv_@usfx@ idx_dc = npyv_select_@usfx@(m_dc, vindices_3, vindices_2);
+ npyv_@usfx@ idx_dcba = npyv_select_@usfx@(m_dcba, idx_dc, idx_ba);
+ npyv_@bsfx@ m_acc = npyv_@intrin@_@sfx@(x_dcba, acc);
+ acc = npyv_select_@sfx@(m_acc, x_dcba, acc);
+ acc_indices = npyv_select_@usfx@(m_acc, npyv_add_@usfx@(vi, idx_dcba), acc_indices);
+
+ #if @is_fp@
+ npyv_@bsfx@ nnan_a = npyv_notnan_@sfx@(a);
+ npyv_@bsfx@ nnan_b = npyv_notnan_@sfx@(b);
+ npyv_@bsfx@ nnan_c = npyv_notnan_@sfx@(c);
+ npyv_@bsfx@ nnan_d = npyv_notnan_@sfx@(d);
+ npyv_@bsfx@ nnan_ab = npyv_and_@bsfx@(nnan_a, nnan_b);
+ npyv_@bsfx@ nnan_cd = npyv_and_@bsfx@(nnan_c, nnan_d);
+ npy_uint64 nnan = npyv_tobits_@bsfx@(npyv_and_@bsfx@(nnan_ab, nnan_cd));
+ if (nnan != ((1LL << vstep) - 1)) {
+ npy_uint64 nnan_4[4];
+ nnan_4[0] = npyv_tobits_@bsfx@(nnan_a);
+ nnan_4[1] = npyv_tobits_@bsfx@(nnan_b);
+ nnan_4[2] = npyv_tobits_@bsfx@(nnan_c);
+ nnan_4[3] = npyv_tobits_@bsfx@(nnan_d);
+ for (int ni = 0; ni < 4; ++ni) {
+ for (int vi = 0; vi < vstep; ++vi) {
+ if (!((nnan_4[ni] >> vi) & 1)) {
+ return i + ni*vstep + vi;
+ }
+ }
+ }
+ }
+ #endif
+ }
+ for (npy_intp n = len0 & -vstep; i < n; i += vstep) {
+ npyv_@usfx@ vi = npyv_setall_@usfx@((npyv_lanetype_@usfx@)i);
+ npyv_@sfx@ a = npyv_load_@sfx@(ip + i);
+ npyv_@bsfx@ m_acc = npyv_@intrin@_@sfx@(a, acc);
+ acc = npyv_select_@sfx@(m_acc, a, acc);
+ acc_indices = npyv_select_@usfx@(m_acc, npyv_add_@usfx@(vi, vindices_0), acc_indices);
+ #if @is_fp@
+ npyv_@bsfx@ nnan_a = npyv_notnan_@sfx@(a);
+ npy_uint64 nnan = npyv_tobits_@bsfx@(nnan_a);
+ if (nnan != ((1LL << vstep) - 1)) {
+ for (int vi = 0; vi < vstep; ++vi) {
+ if (!((nnan >> vi) & 1)) {
+ return i + vi;
+ }
+ }
+ }
+ #endif
+ }
+
+ // reduce
+ npyv_lanetype_@sfx@ dacc[npyv_nlanes_@sfx@];
+ npyv_lanetype_@usfx@ dacc_i[npyv_nlanes_@sfx@];
+ npyv_store_@usfx@(dacc_i, acc_indices);
+ npyv_store_@sfx@(dacc, acc);
+
+ s_acc = dacc[0];
+ ret_idx = dacc_i[0];
+ for (int vi = 1; vi < vstep; ++vi) {
+ if (dacc[vi] @op@ s_acc) {
+ s_acc = dacc[vi];
+ ret_idx = (npy_intp)dacc_i[vi];
+ }
+ }
+ // get the lowest index in case of matched values
+ for (int vi = 0; vi < vstep; ++vi) {
+ if (s_acc == dacc[vi] && ret_idx > (npy_intp)dacc_i[vi]) {
+ ret_idx = dacc_i[vi];
+ }
+ }
+scalar_loop:
+ for (; i < len; ++i) {
+ npyv_lanetype_@sfx@ a = ip[i];
+ #if @is_fp@
+ if (!(a @iop@= s_acc)) { // negated, for correct nan handling
+ #else
+ if (a @op@ s_acc) {
+ #endif
+ s_acc = a;
+ ret_idx = i;
+ #if @is_fp@
+ if (npy_isnan(s_acc)) {
+ // nan encountered, it's maximal
+ return ret_idx;
+ }
+ #endif
+ }
+ }
+ return ret_idx;
+}
+/**end repeat1**/
+#endif // chk_simd
+/**end repeat**/
+
+/**begin repeat
+ * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG,
+ * BYTE, SHORT, INT, LONG, LONGLONG,
+ * FLOAT, DOUBLE, LONGDOUBLE#
+ *
+ * #BTYPE = BYTE, SHORT, INT, LONG, LONGLONG,
+ * BYTE, SHORT, INT, LONG, LONGLONG,
+ * FLOAT, DOUBLE, LONGDOUBLE#
+ * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
+ * npy_byte, npy_short, npy_int, npy_long, npy_longlong,
+ * npy_float, npy_double, npy_longdouble#
+ *
+ * #is_fp = 0*10, 1*3#
+ * #is_unsigned = 1*5, 0*5, 0*3#
+ */
+#undef TO_SIMD_SFX
+#if 0
+/**begin repeat1
+ * #len = 8, 16, 32, 64#
+ */
+#elif NPY_SIMD && NPY_BITSOF_@BTYPE@ == @len@
+ #if @is_fp@
+ #define TO_SIMD_SFX(X) X##_f@len@
+ #if NPY_BITSOF_@BTYPE@ == 64 && !NPY_SIMD_F64
+ #undef TO_SIMD_SFX
+ #endif
+ #elif @is_unsigned@
+ #define TO_SIMD_SFX(X) X##_u@len@
+ #else
+ #define TO_SIMD_SFX(X) X##_s@len@
+ #endif
+/**end repeat1**/
+#endif
+
+/**begin repeat1
+ * #func = argmax, argmin#
+ * #op = >, <#
+ * #iop = <, >#
+ */
+NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@)
+(@type@ *ip, npy_intp n, npy_intp *mindx, PyArrayObject *NPY_UNUSED(aip))
+{
+#if @is_fp@
+ if (npy_isnan(*ip)) {
+ // nan encountered; it's maximal|minimal
+ *mindx = 0;
+ return 0;
+ }
+#endif
+#ifdef TO_SIMD_SFX
+ *mindx = TO_SIMD_SFX(simd_@func@)((TO_SIMD_SFX(npyv_lanetype)*)ip, n);
+ npyv_cleanup();
+#else
+ @type@ mp = *ip;
+ *mindx = 0;
+ npy_intp i = 1;
+
+ for (; i < n; ++i) {
+ @type@ a = ip[i];
+ #if @is_fp@
+ if (!(a @iop@= mp)) { // negated, for correct nan handling
+ #else
+ if (a @op@ mp) {
+ #endif
+ mp = a;
+ *mindx = i;
+ #if @is_fp@
+ if (npy_isnan(mp)) {
+ // nan encountered, it's maximal|minimal
+ break;
+ }
+ #endif
+ }
+ }
+#endif // TO_SIMD_SFX
+ return 0;
+}
+/**end repeat1**/
+/**end repeat**/
+
+NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(BOOL_argmax)
+(npy_bool *ip, npy_intp len, npy_intp *mindx, PyArrayObject *NPY_UNUSED(aip))
+
+{
+ npy_intp i = 0;
+#if NPY_SIMD
+ const npyv_u8 zero = npyv_zero_u8();
+ const int vstep = npyv_nlanes_u8;
+ const int wstep = vstep * 4;
+ for (npy_intp n = len & -wstep; i < n; i += wstep) {
+ npyv_u8 a = npyv_load_u8(ip + i + vstep*0);
+ npyv_u8 b = npyv_load_u8(ip + i + vstep*1);
+ npyv_u8 c = npyv_load_u8(ip + i + vstep*2);
+ npyv_u8 d = npyv_load_u8(ip + i + vstep*3);
+ npyv_b8 m_a = npyv_cmpeq_u8(a, zero);
+ npyv_b8 m_b = npyv_cmpeq_u8(b, zero);
+ npyv_b8 m_c = npyv_cmpeq_u8(c, zero);
+ npyv_b8 m_d = npyv_cmpeq_u8(d, zero);
+ npyv_b8 m_ab = npyv_and_b8(m_a, m_b);
+ npyv_b8 m_cd = npyv_and_b8(m_c, m_d);
+ npy_uint64 m = npyv_tobits_b8(npyv_and_b8(m_ab, m_cd));
+ #if NPY_SIMD == 512
+ if (m != NPY_MAX_UINT64) {
+ #else
+ if ((npy_int64)m != ((1LL << vstep) - 1)) {
+ #endif
+ break;
+ }
+ }
+ npyv_cleanup();
+#endif // NPY_SIMD
+ for (; i < len; ++i) {
+ if (ip[i]) {
+ *mindx = i;
+ return 0;
+ }
+ }
+ *mindx = 0;
+ return 0;
+}
diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c
index e3436c573..a56781527 100644
--- a/numpy/core/src/multiarray/array_method.c
+++ b/numpy/core/src/multiarray/array_method.c
@@ -68,7 +68,7 @@ default_resolve_descriptors(
for (int i = 0; i < nin + nout; i++) {
PyArray_DTypeMeta *dtype = dtypes[i];
if (input_descrs[i] != NULL) {
- output_descrs[i] = ensure_dtype_nbo(input_descrs[i]);
+ output_descrs[i] = NPY_DT_CALL_ensure_canonical(input_descrs[i]);
}
else {
output_descrs[i] = NPY_DT_CALL_default_descr(dtype);
diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c
index 463a2d4d8..4800ed360 100644
--- a/numpy/core/src/multiarray/arrayfunction_override.c
+++ b/numpy/core/src/multiarray/arrayfunction_override.c
@@ -333,7 +333,7 @@ NPY_NO_EXPORT PyObject *
array_implement_array_function(
PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
{
- PyObject *implementation, *public_api, *relevant_args, *args, *kwargs;
+ PyObject *res, *implementation, *public_api, *relevant_args, *args, *kwargs;
if (!PyArg_UnpackTuple(
positional_args, "implement_array_function", 5, 5,
@@ -357,10 +357,20 @@ array_implement_array_function(
}
Py_DECREF(tmp_has_override);
PyDict_DelItem(kwargs, npy_ma_str_like);
+
+ /*
+ * If `like=` kwarg was removed, `implementation` points to the NumPy
+ * public API, as `public_api` is in that case the wrapper dispatcher
+ * function. For example, in the `np.full` case, `implementation` is
+ * `np.full`, whereas `public_api` is `_full_with_like`. This is done
+ * to ensure `__array_function__` implementations can do
+ * equality/identity comparisons when `like=` is present.
+ */
+ public_api = implementation;
}
}
- PyObject *res = array_implement_array_function_internal(
+ res = array_implement_array_function_internal(
public_api, relevant_args, args, kwargs);
if (res == Py_NotImplemented) {
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 71401c60e..1dc6c9bb1 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -27,12 +27,6 @@
#include "arrayobject.h"
#include "alloc.h"
#include "typeinfo.h"
-#if defined(__ARM_NEON__) || defined (__ARM_NEON)
-#include <arm_neon.h>
-#endif
-#ifdef NPY_HAVE_SSE2_INTRINSICS
-#include <emmintrin.h>
-#endif
#include "npy_longdouble.h"
#include "numpyos.h"
@@ -42,7 +36,7 @@
#include "npy_cblas.h"
#include "npy_buffer.h"
-
+#include "arraytypes.h"
/*
* Define a stack allocated dummy array with only the minimum information set:
* 1. The descr, the main field interesting here.
@@ -3176,77 +3170,21 @@ finish:
** ARGFUNC **
*****************************************************************************
*/
-#if defined(__ARM_NEON__) || defined (__ARM_NEON)
- int32_t _mm_movemask_epi8_neon(uint8x16_t input)
- {
- int8x8_t m0 = vcreate_s8(0x0706050403020100ULL);
- uint8x16_t v0 = vshlq_u8(vshrq_n_u8(input, 7), vcombine_s8(m0, m0));
- uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(v0)));
- return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 8);
- }
-#endif
-#define _LESS_THAN_OR_EQUAL(a,b) ((a) <= (b))
-static int
-BOOL_argmax(npy_bool *ip, npy_intp n, npy_intp *max_ind,
- PyArrayObject *NPY_UNUSED(aip))
-
-{
- npy_intp i = 0;
- /* memcmp like logical_and on i386 is maybe slower for small arrays */
-#ifdef NPY_HAVE_SSE2_INTRINSICS
- const __m128i zero = _mm_setzero_si128();
- for (; i < n - (n % 32); i+=32) {
- __m128i d1 = _mm_loadu_si128((__m128i*)&ip[i]);
- __m128i d2 = _mm_loadu_si128((__m128i*)&ip[i + 16]);
- d1 = _mm_cmpeq_epi8(d1, zero);
- d2 = _mm_cmpeq_epi8(d2, zero);
- if (_mm_movemask_epi8(_mm_min_epu8(d1, d2)) != 0xFFFF) {
- break;
- }
- }
-#else
- #if defined(__ARM_NEON__) || defined (__ARM_NEON)
- uint8x16_t zero = vdupq_n_u8(0);
- for(; i < n - (n % 32); i+=32) {
- uint8x16_t d1 = vld1q_u8((uint8_t *)&ip[i]);
- uint8x16_t d2 = vld1q_u8((uint8_t *)&ip[i + 16]);
- d1 = vceqq_u8(d1, zero);
- d2 = vceqq_u8(d2, zero);
- if(_mm_movemask_epi8_neon(vminq_u8(d1, d2)) != 0xFFFF) {
- break;
- }
- }
- #endif
-#endif
- for (; i < n; i++) {
- if (ip[i]) {
- *max_ind = i;
- return 0;
- }
- }
- *max_ind = 0;
- return 0;
-}
+#define _LESS_THAN_OR_EQUAL(a,b) ((a) <= (b))
/**begin repeat
*
- * #fname = BYTE, UBYTE, SHORT, USHORT, INT, UINT,
- * LONG, ULONG, LONGLONG, ULONGLONG,
- * HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * #fname = HALF, CFLOAT, CDOUBLE, CLONGDOUBLE,
* DATETIME, TIMEDELTA#
- * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
- * npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_half, npy_float, npy_double, npy_longdouble,
- * npy_float, npy_double, npy_longdouble,
+ * #type = npy_half, npy_float, npy_double, npy_longdouble,
* npy_datetime, npy_timedelta#
- * #isfloat = 0*10, 1*7, 0*2#
- * #isnan = nop*10, npy_half_isnan, npy_isnan*6, nop*2#
- * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*8#
- * #iscomplex = 0*14, 1*3, 0*2#
- * #incr = ip++*14, ip+=2*3, ip++*2#
- * #isdatetime = 0*17, 1*2#
+ * #isfloat = 1*4, 0*2#
+ * #isnan = npy_half_isnan, npy_isnan*3, nop*2#
+ * #le = npy_half_le, _LESS_THAN_OR_EQUAL*5#
+ * #iscomplex = 0, 1*3, 0*2#
+ * #incr = ip++, ip+=2*3, ip++*2#
+ * #isdatetime = 0*4, 1*2#
*/
static int
@fname@_argmax(@type@ *ip, npy_intp n, npy_intp *max_ind,
@@ -3337,22 +3275,16 @@ BOOL_argmin(npy_bool *ip, npy_intp n, npy_intp *min_ind,
/**begin repeat
*
- * #fname = BYTE, UBYTE, SHORT, USHORT, INT, UINT,
- * LONG, ULONG, LONGLONG, ULONGLONG,
- * HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * #fname = HALF, CFLOAT, CDOUBLE, CLONGDOUBLE,
* DATETIME, TIMEDELTA#
- * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
- * npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_half, npy_float, npy_double, npy_longdouble,
- * npy_float, npy_double, npy_longdouble,
+ * #type = npy_half, npy_float, npy_double, npy_longdouble,
* npy_datetime, npy_timedelta#
- * #isfloat = 0*10, 1*7, 0*2#
- * #isnan = nop*10, npy_half_isnan, npy_isnan*6, nop*2#
- * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*8#
- * #iscomplex = 0*14, 1*3, 0*2#
- * #incr = ip++*14, ip+=2*3, ip++*2#
- * #isdatetime = 0*17, 1*2#
+ * #isfloat = 1*4, 0*2#
+ * #isnan = npy_half_isnan, npy_isnan*3, nop*2#
+ * #le = npy_half_le, _LESS_THAN_OR_EQUAL*5#
+ * #iscomplex = 0, 1*3, 0*2#
+ * #incr = ip++, ip+=2*3, ip++*2#
+ * #isdatetime = 0*4, 1*2#
*/
static int
@fname@_argmin(@type@ *ip, npy_intp n, npy_intp *min_ind,
@@ -3409,7 +3341,7 @@ static int
*min_ind = i;
break;
}
-#endif
+#endif
if (!@le@(mp, *ip)) { /* negated, for correct nan handling */
mp = *ip;
*min_ind = i;
@@ -4494,6 +4426,27 @@ set_typeinfo(PyObject *dict)
PyArray_Descr *dtype;
PyObject *cobj, *key;
+ // SIMD runtime dispatching
+ #ifndef NPY_DISABLE_OPTIMIZATION
+ #include "argfunc.dispatch.h"
+ #endif
+ /**begin repeat
+ * #FROM = BYTE, UBYTE, SHORT, USHORT, INT, UINT,
+ * LONG, ULONG, LONGLONG, ULONGLONG,
+ * FLOAT, DOUBLE, LONGDOUBLE#
+ *
+ * #NAME = Byte, UByte, Short, UShort, Int, UInt,
+ * Long, ULong, LongLong, ULongLong,
+ * Float, Double, LongDouble#
+ */
+ /**begin repeat1
+ * #func = argmax, argmin#
+ */
+ NPY_CPU_DISPATCH_CALL_XB(_Py@NAME@_ArrFuncs.@func@ = (PyArray_ArgFunc*)@FROM@_@func@);
+ /**end repeat1**/
+ /**end repeat**/
+ NPY_CPU_DISPATCH_CALL_XB(_PyBool_ArrFuncs.argmax = (PyArray_ArgFunc*)BOOL_argmax);
+
/*
* Override the base class for all types, eventually all of this logic
* should be defined on the class and inherited to the scalar.
diff --git a/numpy/core/src/multiarray/arraytypes.h b/numpy/core/src/multiarray/arraytypes.h.src
index b3a13b297..4c7487189 100644
--- a/numpy/core/src/multiarray/arraytypes.h
+++ b/numpy/core/src/multiarray/arraytypes.h.src
@@ -28,4 +28,25 @@ small_correlate(const char * d_, npy_intp dstride,
npy_intp nk, enum NPY_TYPES ktype,
char * out_, npy_intp ostride);
+#ifndef NPY_DISABLE_OPTIMIZATION
+ #include "argfunc.dispatch.h"
+#endif
+/**begin repeat
+ * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT,
+ * LONG, ULONG, LONGLONG, ULONGLONG,
+ * FLOAT, DOUBLE, LONGDOUBLE#
+ * #type = byte, ubyte, short, ushort, int, uint,
+ * long, ulong, longlong, ulonglong,
+ * float, double, longdouble#
+ */
+/**begin repeat1
+ * #func = argmax, argmin#
+ */
+NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT int @TYPE@_@func@,
+ (npy_@type@ *ip, npy_intp n, npy_intp *max_ind, PyArrayObject *aip))
+/**end repeat1**/
+/**end repeat**/
+NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT int BOOL_argmax,
+ (npy_bool *ip, npy_intp n, npy_intp *max_ind, PyArrayObject *aip))
+
#endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ */
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index 13d7038d3..0307d41a8 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -498,14 +498,11 @@ _buffer_info_new(PyObject *obj, int flags)
assert((size_t)info->shape % sizeof(npy_intp) == 0);
info->strides = info->shape + PyArray_NDIM(arr);
-#if NPY_RELAXED_STRIDES_CHECKING
/*
- * When NPY_RELAXED_STRIDES_CHECKING is used, some buffer users
- * may expect a contiguous buffer to have well formatted strides
- * also when a dimension is 1, but we do not guarantee this
- * internally. Thus, recalculate strides for contiguous arrays.
- * (This is unnecessary, but has no effect in the case where
- * NPY_RELAXED_STRIDES CHECKING is disabled.)
+ * Some buffer users may expect a contiguous buffer to have well
+ * formatted strides also when a dimension is 1, but we do not
+ * guarantee this internally. Thus, recalculate strides for
+ * contiguous arrays.
*/
int f_contiguous = (flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS;
if (PyArray_IS_C_CONTIGUOUS(arr) && !(
@@ -526,11 +523,6 @@ _buffer_info_new(PyObject *obj, int flags)
}
}
else {
-#else /* NPY_RELAXED_STRIDES_CHECKING */
- /* We can always use the arrays strides directly */
- {
-#endif
-
for (k = 0; k < PyArray_NDIM(arr); ++k) {
info->shape[k] = PyArray_DIMS(arr)[k];
info->strides[k] = PyArray_STRIDES(arr)[k];
@@ -708,8 +700,8 @@ _buffer_get_info(void **buffer_info_cache_ptr, PyObject *obj, int flags)
if (info->ndim > 1 && next_info != NULL) {
/*
* Some arrays are C- and F-contiguous and if they have more
- * than one dimension, the buffer-info may differ between
- * the two due to RELAXED_STRIDES_CHECKING.
+ * than one dimension, the buffer-info may differ between the
+ * two because strides for length 1 dimension may be adjusted.
* If we export both buffers, the first stored one may be
* the one for the other contiguity, so check both.
* This is generally very unlikely in all other cases, since
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index b8d443752..b4a7aad34 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -933,22 +933,6 @@ promote_types(PyArray_Descr *type1, PyArray_Descr *type2,
}
-/*
- * Returns a new reference to type if it is already NBO, otherwise
- * returns a copy converted to NBO.
- */
-NPY_NO_EXPORT PyArray_Descr *
-ensure_dtype_nbo(PyArray_Descr *type)
-{
- if (PyArray_ISNBO(type->byteorder)) {
- Py_INCREF(type);
- return type;
- }
- else {
- return PyArray_DescrNewByteorder(type, NPY_NATIVE);
- }
-}
-
/**
* This function should possibly become public API eventually. At this
@@ -1642,7 +1626,7 @@ PyArray_ResultType(
"no arrays or types available to calculate result type");
return NULL;
}
- return ensure_dtype_nbo(result);
+ return NPY_DT_CALL_ensure_canonical(result);
}
void **info_on_heap = NULL;
@@ -2321,7 +2305,7 @@ legacy_same_dtype_resolve_descriptors(
loop_descrs[0] = given_descrs[0];
if (given_descrs[1] == NULL) {
- loop_descrs[1] = ensure_dtype_nbo(loop_descrs[0]);
+ loop_descrs[1] = NPY_DT_CALL_ensure_canonical(loop_descrs[0]);
if (loop_descrs[1] == NULL) {
Py_DECREF(loop_descrs[0]);
return -1;
@@ -2386,12 +2370,12 @@ simple_cast_resolve_descriptors(
{
assert(NPY_DT_is_legacy(dtypes[0]) && NPY_DT_is_legacy(dtypes[1]));
- loop_descrs[0] = ensure_dtype_nbo(given_descrs[0]);
+ loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]);
if (loop_descrs[0] == NULL) {
return -1;
}
if (given_descrs[1] != NULL) {
- loop_descrs[1] = ensure_dtype_nbo(given_descrs[1]);
+ loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]);
if (loop_descrs[1] == NULL) {
Py_DECREF(loop_descrs[0]);
return -1;
@@ -2678,14 +2662,14 @@ cast_to_string_resolve_descriptors(
}
else {
/* The legacy loop can handle mismatching itemsizes */
- loop_descrs[1] = ensure_dtype_nbo(given_descrs[1]);
+ loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]);
if (loop_descrs[1] == NULL) {
return -1;
}
}
/* Set the input one as well (late for easier error management) */
- loop_descrs[0] = ensure_dtype_nbo(given_descrs[0]);
+ loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]);
if (loop_descrs[0] == NULL) {
return -1;
}
@@ -2760,7 +2744,7 @@ string_to_string_resolve_descriptors(
loop_descrs[0] = given_descrs[0];
if (given_descrs[1] == NULL) {
- loop_descrs[1] = ensure_dtype_nbo(loop_descrs[0]);
+ loop_descrs[1] = NPY_DT_CALL_ensure_canonical(loop_descrs[0]);
if (loop_descrs[1] == NULL) {
return -1;
}
diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h
index 6b4413959..d1865d1c2 100644
--- a/numpy/core/src/multiarray/convert_datatype.h
+++ b/numpy/core/src/multiarray/convert_datatype.h
@@ -36,9 +36,6 @@ NPY_NO_EXPORT npy_bool
can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data,
PyArray_Descr *to, NPY_CASTING casting);
-NPY_NO_EXPORT PyArray_Descr *
-ensure_dtype_nbo(PyArray_Descr *type);
-
NPY_NO_EXPORT int
should_use_min_scalar(npy_intp narrs, PyArrayObject **arr,
npy_intp ndtypes, PyArray_Descr **dtypes);
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index c2842d7ba..58ba0c2a1 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -4020,7 +4020,6 @@ _array_fill_strides(npy_intp *strides, npy_intp const *dims, int nd, size_t item
int inflag, int *objflags)
{
int i;
-#if NPY_RELAXED_STRIDES_CHECKING
npy_bool not_cf_contig = 0;
npy_bool nod = 0; /* A dim != 1 was found */
@@ -4034,7 +4033,6 @@ _array_fill_strides(npy_intp *strides, npy_intp const *dims, int nd, size_t item
nod = 1;
}
}
-#endif /* NPY_RELAXED_STRIDES_CHECKING */
/* Only make Fortran strides if not contiguous as well */
if ((inflag & (NPY_ARRAY_F_CONTIGUOUS|NPY_ARRAY_C_CONTIGUOUS)) ==
@@ -4044,7 +4042,6 @@ _array_fill_strides(npy_intp *strides, npy_intp const *dims, int nd, size_t item
if (dims[i]) {
itemsize *= dims[i];
}
-#if NPY_RELAXED_STRIDES_CHECKING
else {
not_cf_contig = 0;
}
@@ -4054,13 +4051,8 @@ _array_fill_strides(npy_intp *strides, npy_intp const *dims, int nd, size_t item
strides[i] = NPY_MAX_INTP;
}
#endif /* NPY_RELAXED_STRIDES_DEBUG */
-#endif /* NPY_RELAXED_STRIDES_CHECKING */
}
-#if NPY_RELAXED_STRIDES_CHECKING
if (not_cf_contig) {
-#else /* not NPY_RELAXED_STRIDES_CHECKING */
- if ((nd > 1) && ((strides[0] != strides[nd-1]) || (dims[nd-1] > 1))) {
-#endif /* not NPY_RELAXED_STRIDES_CHECKING */
*objflags = ((*objflags)|NPY_ARRAY_F_CONTIGUOUS) &
~NPY_ARRAY_C_CONTIGUOUS;
}
@@ -4074,7 +4066,6 @@ _array_fill_strides(npy_intp *strides, npy_intp const *dims, int nd, size_t item
if (dims[i]) {
itemsize *= dims[i];
}
-#if NPY_RELAXED_STRIDES_CHECKING
else {
not_cf_contig = 0;
}
@@ -4084,13 +4075,8 @@ _array_fill_strides(npy_intp *strides, npy_intp const *dims, int nd, size_t item
strides[i] = NPY_MAX_INTP;
}
#endif /* NPY_RELAXED_STRIDES_DEBUG */
-#endif /* NPY_RELAXED_STRIDES_CHECKING */
}
-#if NPY_RELAXED_STRIDES_CHECKING
if (not_cf_contig) {
-#else /* not NPY_RELAXED_STRIDES_CHECKING */
- if ((nd > 1) && ((strides[0] != strides[nd-1]) || (dims[0] > 1))) {
-#endif /* not NPY_RELAXED_STRIDES_CHECKING */
*objflags = ((*objflags)|NPY_ARRAY_C_CONTIGUOUS) &
~NPY_ARRAY_F_CONTIGUOUS;
}
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 03ebaa7ce..99096be56 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -3761,7 +3761,7 @@ time_to_time_resolve_descriptors(
Py_INCREF(given_descrs[0]);
loop_descrs[0] = given_descrs[0];
if (given_descrs[1] == NULL) {
- loop_descrs[1] = ensure_dtype_nbo(given_descrs[0]);
+ loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[0]);
}
else {
Py_INCREF(given_descrs[1]);
@@ -3880,8 +3880,8 @@ time_to_time_get_loop(
return 0;
}
- PyArray_Descr *src_wrapped_dtype = ensure_dtype_nbo(descrs[0]);
- PyArray_Descr *dst_wrapped_dtype = ensure_dtype_nbo(descrs[1]);
+ PyArray_Descr *src_wrapped_dtype = NPY_DT_CALL_ensure_canonical(descrs[0]);
+ PyArray_Descr *dst_wrapped_dtype = NPY_DT_CALL_ensure_canonical(descrs[1]);
int needs_api = 0;
int res = wrap_aligned_transferfunction(
@@ -3906,7 +3906,7 @@ datetime_to_timedelta_resolve_descriptors(
PyArray_Descr *given_descrs[2],
PyArray_Descr *loop_descrs[2])
{
- loop_descrs[0] = ensure_dtype_nbo(given_descrs[0]);
+ loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]);
if (loop_descrs[0] == NULL) {
return -1;
}
@@ -3916,7 +3916,7 @@ datetime_to_timedelta_resolve_descriptors(
loop_descrs[1] = create_datetime_dtype(dtypes[1]->type_num, meta);
}
else {
- loop_descrs[1] = ensure_dtype_nbo(given_descrs[1]);
+ loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]);
}
if (loop_descrs[1] == NULL) {
Py_DECREF(loop_descrs[0]);
@@ -3973,7 +3973,7 @@ time_to_string_resolve_descriptors(
loop_descrs[1]->elsize = size;
}
- loop_descrs[0] = ensure_dtype_nbo(given_descrs[0]);
+ loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]);
if (loop_descrs[0] == NULL) {
Py_DECREF(loop_descrs[1]);
return -1;
@@ -4028,7 +4028,7 @@ string_to_datetime_cast_resolve_descriptors(
}
}
else {
- loop_descrs[1] = ensure_dtype_nbo(given_descrs[1]);
+ loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]);
if (loop_descrs[1] == NULL) {
return -1;
}
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 07abc755f..a23ee6d2c 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -22,18 +22,6 @@
#include "npy_buffer.h"
#include "dtypemeta.h"
-/*
- * offset: A starting offset.
- * alignment: A power-of-two alignment.
- *
- * This macro returns the smallest value >= 'offset'
- * that is divisible by 'alignment'. Because 'alignment'
- * is a power of two and integers are twos-complement,
- * it is possible to use some simple bit-fiddling to do this.
- */
-#define NPY_NEXT_ALIGNED_OFFSET(offset, alignment) \
- (((offset) + (alignment) - 1) & (-(alignment)))
-
#ifndef PyDictProxy_Check
#define PyDictProxy_Check(obj) (Py_TYPE(obj) == &PyDictProxy_Type)
#endif
diff --git a/numpy/core/src/multiarray/descriptor.h b/numpy/core/src/multiarray/descriptor.h
index f832958da..7e6f212f2 100644
--- a/numpy/core/src/multiarray/descriptor.h
+++ b/numpy/core/src/multiarray/descriptor.h
@@ -6,6 +6,18 @@ NPY_NO_EXPORT PyObject *arraydescr_protocol_typestr_get(
NPY_NO_EXPORT PyObject *arraydescr_protocol_descr_get(
PyArray_Descr *self, void *);
+/*
+ * offset: A starting offset.
+ * alignment: A power-of-two alignment.
+ *
+ * This macro returns the smallest value >= 'offset'
+ * that is divisible by 'alignment'. Because 'alignment'
+ * is a power of two and integers are twos-complement,
+ * it is possible to use some simple bit-fiddling to do this.
+ */
+#define NPY_NEXT_ALIGNED_OFFSET(offset, alignment) \
+ (((offset) + (alignment) - 1) & (-(alignment)))
+
NPY_NO_EXPORT PyObject *
array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args);
diff --git a/numpy/core/src/multiarray/dlpack.c b/numpy/core/src/multiarray/dlpack.c
index 291e60a22..980b85395 100644
--- a/numpy/core/src/multiarray/dlpack.c
+++ b/numpy/core/src/multiarray/dlpack.c
@@ -15,8 +15,7 @@ static void
array_dlpack_deleter(DLManagedTensor *self)
{
PyArrayObject *array = (PyArrayObject *)self->manager_ctx;
- // This will also free the strides as it's one allocation.
- PyMem_Free(self->dl_tensor.shape);
+ // This will also free the shape and strides as it's one allocation.
PyMem_Free(self);
Py_XDECREF(array);
}
@@ -88,6 +87,12 @@ array_get_dl_device(PyArrayObject *self) {
ret.device_type = kDLCPU;
ret.device_id = 0;
PyObject *base = PyArray_BASE(self);
+
+ // walk the bases (see gh-20340)
+ while (base != NULL && PyArray_Check(base)) {
+ base = PyArray_BASE((PyArrayObject *)base);
+ }
+
// The outer if is due to the fact that NumPy arrays are on the CPU
// by default (if not created from DLPack).
if (PyCapsule_IsValid(base, NPY_DLPACK_INTERNAL_CAPSULE_NAME)) {
@@ -191,12 +196,17 @@ array_dlpack(PyArrayObject *self,
return NULL;
}
- DLManagedTensor *managed = PyMem_Malloc(sizeof(DLManagedTensor));
- if (managed == NULL) {
+ // ensure alignment
+ int offset = sizeof(DLManagedTensor) % sizeof(void *);
+ void *ptr = PyMem_Malloc(sizeof(DLManagedTensor) + offset +
+ (sizeof(int64_t) * ndim * 2));
+ if (ptr == NULL) {
PyErr_NoMemory();
return NULL;
}
+ DLManagedTensor *managed = ptr;
+
/*
* Note: the `dlpack.h` header suggests/standardizes that `data` must be
* 256-byte aligned. We ignore this intentionally, because `__dlpack__`
@@ -215,12 +225,8 @@ array_dlpack(PyArrayObject *self,
managed->dl_tensor.device = device;
managed->dl_tensor.dtype = managed_dtype;
- int64_t *managed_shape_strides = PyMem_Malloc(sizeof(int64_t) * ndim * 2);
- if (managed_shape_strides == NULL) {
- PyErr_NoMemory();
- PyMem_Free(managed);
- return NULL;
- }
+ int64_t *managed_shape_strides = (int64_t *)((char *)ptr +
+ sizeof(DLManagedTensor) + offset);
int64_t *managed_shape = managed_shape_strides;
int64_t *managed_strides = managed_shape_strides + ndim;
@@ -243,8 +249,7 @@ array_dlpack(PyArrayObject *self,
PyObject *capsule = PyCapsule_New(managed, NPY_DLPACK_CAPSULE_NAME,
dlpack_capsule_deleter);
if (capsule == NULL) {
- PyMem_Free(managed);
- PyMem_Free(managed_shape_strides);
+ PyMem_Free(ptr);
return NULL;
}
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 91b8aac98..18de5d132 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -3457,11 +3457,11 @@ get_wrapped_legacy_cast_function(int aligned,
* If we are here, use the legacy code to wrap the above cast (which
* does not support unaligned data) into copyswapn.
*/
- PyArray_Descr *src_wrapped_dtype = ensure_dtype_nbo(src_dtype);
+ PyArray_Descr *src_wrapped_dtype = NPY_DT_CALL_ensure_canonical(src_dtype);
if (src_wrapped_dtype == NULL) {
goto fail;
}
- PyArray_Descr *dst_wrapped_dtype = ensure_dtype_nbo(dst_dtype);
+ PyArray_Descr *dst_wrapped_dtype = NPY_DT_CALL_ensure_canonical(dst_dtype);
if (dst_wrapped_dtype == NULL) {
goto fail;
}
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index 53f38e8e8..519b998d7 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -12,6 +12,7 @@
#include "common.h"
#include "dtypemeta.h"
+#include "descriptor.h"
#include "_datetime.h"
#include "array_coercion.h"
#include "scalartypes.h"
@@ -222,6 +223,23 @@ nonparametric_default_descr(PyArray_DTypeMeta *cls)
}
+/*
+ * For most builtin (and legacy) dtypes, the canonical property means to
+ * ensure native byte-order. (We do not care about metadata here.)
+ */
+static PyArray_Descr *
+ensure_native_byteorder(PyArray_Descr *descr)
+{
+ if (PyArray_ISNBO(descr->byteorder)) {
+ Py_INCREF(descr);
+ return descr;
+ }
+ else {
+ return PyArray_DescrNewByteorder(descr, NPY_NATIVE);
+ }
+}
+
+
/* Ensure a copy of the singleton (just in case we do adapt it somewhere) */
static PyArray_Descr *
datetime_and_timedelta_default_descr(PyArray_DTypeMeta *cls)
@@ -265,10 +283,115 @@ static PyArray_Descr *
string_unicode_common_instance(PyArray_Descr *descr1, PyArray_Descr *descr2)
{
if (descr1->elsize >= descr2->elsize) {
- return ensure_dtype_nbo(descr1);
+ return NPY_DT_CALL_ensure_canonical(descr1);
+ }
+ else {
+ return NPY_DT_CALL_ensure_canonical(descr2);
+ }
+}
+
+
+static PyArray_Descr *
+void_ensure_canonical(PyArray_Descr *self)
+{
+ if (self->subarray != NULL) {
+ PyArray_Descr *new_base = NPY_DT_CALL_ensure_canonical(
+ self->subarray->base);
+ if (new_base == NULL) {
+ return NULL;
+ }
+ if (new_base == self->subarray->base) {
+ /* just return self, no need to modify */
+ Py_DECREF(new_base);
+ Py_INCREF(self);
+ return self;
+ }
+ PyArray_Descr *new = PyArray_DescrNew(self);
+ if (new == NULL) {
+ return NULL;
+ }
+ Py_SETREF(new->subarray->base, new_base);
+ return new;
+ }
+ else if (self->names != NULL) {
+ /*
+ * This branch is fairly complex, since it needs to build a new
+ * descriptor that is in canonical form. This means that the new
+ * descriptor should be an aligned struct if the old one was, and
+ * otherwise it should be an unaligned struct.
+ * Any unnecessary empty space is stripped from the struct.
+ *
+ * TODO: In principle we could/should try to provide the identity when
+ * no change is necessary. (Simple if we add a flag.)
+ */
+ Py_ssize_t field_num = PyTuple_GET_SIZE(self->names);
+
+ PyArray_Descr *new = PyArray_DescrNew(self);
+ if (new == NULL) {
+ return NULL;
+ }
+ Py_SETREF(new->fields, PyDict_New());
+ if (new->fields == NULL) {
+ Py_DECREF(new);
+ return NULL;
+ }
+ int aligned = PyDataType_FLAGCHK(new, NPY_ALIGNED_STRUCT);
+ new->flags = new->flags & ~NPY_FROM_FIELDS;
+ new->flags |= NPY_NEEDS_PYAPI; /* always needed for field access */
+ int totalsize = 0;
+ int maxalign = 1;
+ for (Py_ssize_t i = 0; i < field_num; i++) {
+ PyObject *name = PyTuple_GET_ITEM(self->names, i);
+ PyObject *tuple = PyDict_GetItem(self->fields, name);
+ PyObject *new_tuple = PyTuple_New(PyTuple_GET_SIZE(tuple));
+ PyArray_Descr *field_descr = NPY_DT_CALL_ensure_canonical(
+ (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0));
+ if (field_descr == NULL) {
+ Py_DECREF(new_tuple);
+ Py_DECREF(new);
+ return NULL;
+ }
+ new->flags |= field_descr->flags & NPY_FROM_FIELDS;
+ PyTuple_SET_ITEM(new_tuple, 0, (PyObject *)field_descr);
+
+ if (aligned) {
+ totalsize = NPY_NEXT_ALIGNED_OFFSET(
+ totalsize, field_descr->alignment);
+ maxalign = PyArray_MAX(maxalign, field_descr->alignment);
+ }
+ PyObject *offset_obj = PyLong_FromLong(totalsize);
+ if (offset_obj == NULL) {
+ Py_DECREF(new_tuple);
+ Py_DECREF(new);
+ return NULL;
+ }
+ PyTuple_SET_ITEM(new_tuple, 1, (PyObject *)offset_obj);
+ if (PyDict_SetItem(new->fields, name, new_tuple) < 0) {
+ Py_DECREF(new_tuple);
+ Py_DECREF(new);
+ return NULL;
+ }
+ Py_DECREF(new_tuple); /* Reference now owned by new->fields */
+ if (PyTuple_GET_SIZE(new_tuple) == 3) {
+ PyObject *title = PyTuple_GET_ITEM(tuple, 2);
+ Py_INCREF(title);
+ PyTuple_SET_ITEM(new_tuple, 2, title);
+ if (PyDict_SetItem(new->fields, title, new_tuple) < 0) {
+ Py_DECREF(new);
+ return NULL;
+ }
+ }
+ totalsize += field_descr->elsize;
+ }
+ totalsize = NPY_NEXT_ALIGNED_OFFSET(totalsize, maxalign);
+ new->elsize = totalsize;
+ new->alignment = maxalign;
+ return new;
}
else {
- return ensure_dtype_nbo(descr2);
+ /* unstructured voids are always canonical. */
+ Py_INCREF(self);
+ return self;
}
}
@@ -621,6 +744,7 @@ dtypemeta_wrap_legacy_descriptor(PyArray_Descr *descr)
dt_slots->is_known_scalar_type = python_builtins_are_known_scalar_types;
dt_slots->common_dtype = default_builtin_common_dtype;
dt_slots->common_instance = NULL;
+ dt_slots->ensure_canonical = ensure_native_byteorder;
if (PyTypeNum_ISSIGNED(dtype_class->type_num)) {
/* Convert our scalars (raise on too large unsigned and NaN, etc.) */
@@ -652,6 +776,7 @@ dtypemeta_wrap_legacy_descriptor(PyArray_Descr *descr)
dt_slots->discover_descr_from_pyobject = (
void_discover_descr_from_pyobject);
dt_slots->common_instance = void_common_instance;
+ dt_slots->ensure_canonical = void_ensure_canonical;
}
else {
dt_slots->default_descr = string_and_unicode_default_descr;
diff --git a/numpy/core/src/multiarray/dtypemeta.h b/numpy/core/src/multiarray/dtypemeta.h
index 2a61fe39d..e7d5505d8 100644
--- a/numpy/core/src/multiarray/dtypemeta.h
+++ b/numpy/core/src/multiarray/dtypemeta.h
@@ -25,6 +25,7 @@ typedef PyArray_DTypeMeta *(common_dtype_function)(
PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2);
typedef PyArray_Descr *(common_instance_function)(
PyArray_Descr *dtype1, PyArray_Descr *dtype2);
+typedef PyArray_Descr *(ensure_canonical_function)(PyArray_Descr *dtype);
/*
* TODO: These two functions are currently only used for experimental DType
@@ -44,6 +45,7 @@ typedef struct {
default_descr_function *default_descr;
common_dtype_function *common_dtype;
common_instance_function *common_instance;
+ ensure_canonical_function *ensure_canonical;
/*
* Currently only used for experimental user DTypes.
* Typing as `void *` until NumPy itself uses these (directly).
@@ -93,6 +95,8 @@ typedef struct {
NPY_DT_SLOTS(dtype)->default_descr(dtype)
#define NPY_DT_CALL_common_dtype(dtype, other) \
NPY_DT_SLOTS(dtype)->common_dtype(dtype, other)
+#define NPY_DT_CALL_ensure_canonical(descr) \
+ NPY_DT_SLOTS(NPY_DTYPE(descr))->ensure_canonical(descr)
#define NPY_DT_CALL_getitem(descr, data_ptr) \
NPY_DT_SLOTS(NPY_DTYPE(descr))->getitem(descr, data_ptr)
#define NPY_DT_CALL_setitem(descr, value, data_ptr) \
diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c
index b5bd7c8c1..adbfb22e7 100644
--- a/numpy/core/src/multiarray/flagsobject.c
+++ b/numpy/core/src/multiarray/flagsobject.c
@@ -105,20 +105,11 @@ PyArray_UpdateFlags(PyArrayObject *ret, int flagmask)
*
* According to these rules, a 0- or 1-dimensional array is either both
* C- and F-contiguous, or neither; and an array with 2+ dimensions
- * can be C- or F- contiguous, or neither, but not both. Though there
- * there are exceptions for arrays with zero or one item, in the first
- * case the check is relaxed up to and including the first dimension
- * with shape[i] == 0. In the second case `strides == itemsize` will
- * can be true for all dimensions and both flags are set.
- *
- * When NPY_RELAXED_STRIDES_CHECKING is set, we use a more accurate
- * definition of C- and F-contiguity, in which all 0-sized arrays are
- * contiguous (regardless of dimensionality), and if shape[i] == 1
- * then we ignore strides[i] (since it has no affect on memory layout).
- * With these new rules, it is possible for e.g. a 10x1 array to be both
- * C- and F-contiguous -- but, they break downstream code which assumes
- * that for contiguous arrays strides[-1] (resp. strides[0]) always
- * contains the itemsize.
+ * can be C- or F- contiguous, or neither, but not both (unless it has only
+ * a single element).
+ * We correct this, however. When a dimension has length 1, its stride is
+ * never used and thus has no effect on the memory layout.
+ * The above rules thus only apply when ignorning all size 1 dimenions.
*/
static void
_UpdateContiguousFlags(PyArrayObject *ap)
@@ -131,7 +122,6 @@ _UpdateContiguousFlags(PyArrayObject *ap)
sd = PyArray_ITEMSIZE(ap);
for (i = PyArray_NDIM(ap) - 1; i >= 0; --i) {
dim = PyArray_DIMS(ap)[i];
-#if NPY_RELAXED_STRIDES_CHECKING
/* contiguous by definition */
if (dim == 0) {
PyArray_ENABLEFLAGS(ap, NPY_ARRAY_C_CONTIGUOUS);
@@ -144,17 +134,6 @@ _UpdateContiguousFlags(PyArrayObject *ap)
}
sd *= dim;
}
-#else /* not NPY_RELAXED_STRIDES_CHECKING */
- if (PyArray_STRIDES(ap)[i] != sd) {
- is_c_contig = 0;
- break;
- }
- /* contiguous, if it got this far */
- if (dim == 0) {
- break;
- }
- sd *= dim;
-#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
if (is_c_contig) {
PyArray_ENABLEFLAGS(ap, NPY_ARRAY_C_CONTIGUOUS);
@@ -167,7 +146,6 @@ _UpdateContiguousFlags(PyArrayObject *ap)
sd = PyArray_ITEMSIZE(ap);
for (i = 0; i < PyArray_NDIM(ap); ++i) {
dim = PyArray_DIMS(ap)[i];
-#if NPY_RELAXED_STRIDES_CHECKING
if (dim != 1) {
if (PyArray_STRIDES(ap)[i] != sd) {
PyArray_CLEARFLAGS(ap, NPY_ARRAY_F_CONTIGUOUS);
@@ -175,16 +153,6 @@ _UpdateContiguousFlags(PyArrayObject *ap)
}
sd *= dim;
}
-#else /* not NPY_RELAXED_STRIDES_CHECKING */
- if (PyArray_STRIDES(ap)[i] != sd) {
- PyArray_CLEARFLAGS(ap, NPY_ARRAY_F_CONTIGUOUS);
- return;
- }
- if (dim == 0) {
- break;
- }
- sd *= dim;
-#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
PyArray_ENABLEFLAGS(ap, NPY_ARRAY_F_CONTIGUOUS);
return;
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 5d515d013..1a2ade11b 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -197,21 +197,8 @@ unpack_scalar(PyObject *index, PyObject **result, npy_intp NPY_UNUSED(result_n))
/**
* Turn an index argument into a c-array of `PyObject *`s, one for each index.
*
- * When a scalar is passed, this is written directly to the buffer. When a
- * tuple is passed, the tuple elements are unpacked into the buffer.
- *
- * When some other sequence is passed, this implements the following section
- * from the advanced indexing docs to decide whether to unpack or just write
- * one element:
- *
- * > In order to remain backward compatible with a common usage in Numeric,
- * > basic slicing is also initiated if the selection object is any non-ndarray
- * > sequence (such as a list) containing slice objects, the Ellipsis object,
- * > or the newaxis object, but not for integer arrays or other embedded
- * > sequences.
- *
- * It might be worth deprecating this behaviour (gh-4434), in which case the
- * entire function should become a simple check of PyTuple_Check.
+ * When a tuple is passed, the tuple elements are unpacked into the buffer.
+ * Anything else is handled by unpack_scalar().
*
* @param index The index object, which may or may not be a tuple. This is
* a borrowed reference.
@@ -228,129 +215,32 @@ unpack_scalar(PyObject *index, PyObject **result, npy_intp NPY_UNUSED(result_n))
NPY_NO_EXPORT npy_intp
unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
{
- npy_intp n, i;
- npy_bool commit_to_unpack;
+ /* It is likely that the logic here can be simplified. See the discussion
+ * on https://github.com/numpy/numpy/pull/21029
+ */
/* Fast route for passing a tuple */
if (PyTuple_CheckExact(index)) {
return unpack_tuple((PyTupleObject *)index, result, result_n);
}
- /* Obvious single-entry cases */
- if (0 /* to aid macros below */
- || PyLong_CheckExact(index)
- || index == Py_None
- || PySlice_Check(index)
- || PyArray_Check(index)
- || !PySequence_Check(index)
- || PyUnicode_Check(index)) {
-
- return unpack_scalar(index, result, result_n);
- }
-
/*
* Passing a tuple subclass - coerce to the base type. This incurs an
- * allocation, but doesn't need to be a fast path anyway
+ * allocation, but doesn't need to be a fast path anyway. Note that by
+ * calling `PySequence_Tuple`, we ensure that the subclass `__iter__` is
+ * called.
*/
if (PyTuple_Check(index)) {
PyTupleObject *tup = (PyTupleObject *) PySequence_Tuple(index);
if (tup == NULL) {
return -1;
}
- n = unpack_tuple(tup, result, result_n);
+ npy_intp n = unpack_tuple(tup, result, result_n);
Py_DECREF(tup);
return n;
}
- /*
- * At this point, we're left with a non-tuple, non-array, sequence:
- * typically, a list. We use some somewhat-arbitrary heuristics from here
- * onwards to decided whether to treat that list as a single index, or a
- * list of indices.
- */
-
- /* if len fails, treat like a scalar */
- n = PySequence_Size(index);
- if (n < 0) {
- PyErr_Clear();
- return unpack_scalar(index, result, result_n);
- }
-
- /*
- * Backwards compatibility only takes effect for short sequences - otherwise
- * we treat it like any other scalar.
- *
- * Sequences < NPY_MAXDIMS with any slice objects
- * or newaxis, Ellipsis or other arrays or sequences
- * embedded, are considered equivalent to an indexing
- * tuple. (`a[[[1,2], [3,4]]] == a[[1,2], [3,4]]`)
- */
- if (n >= NPY_MAXDIMS) {
- return unpack_scalar(index, result, result_n);
- }
-
- /* In case we change result_n elsewhere */
- assert(n <= result_n);
-
- /*
- * Some other type of short sequence - assume we should unpack it like a
- * tuple, and then decide whether that was actually necessary.
- */
- commit_to_unpack = 0;
- for (i = 0; i < n; i++) {
- PyObject *tmp_obj = result[i] = PySequence_GetItem(index, i);
-
- if (commit_to_unpack) {
- /* propagate errors */
- if (tmp_obj == NULL) {
- goto fail;
- }
- }
- else {
- /*
- * if getitem fails (unusual) before we've committed, then stop
- * unpacking
- */
- if (tmp_obj == NULL) {
- PyErr_Clear();
- break;
- }
-
- /* decide if we should treat this sequence like a tuple */
- if (PyArray_Check(tmp_obj)
- || PySequence_Check(tmp_obj)
- || PySlice_Check(tmp_obj)
- || tmp_obj == Py_Ellipsis
- || tmp_obj == Py_None) {
- if (DEPRECATE_FUTUREWARNING(
- "Using a non-tuple sequence for multidimensional "
- "indexing is deprecated; use `arr[tuple(seq)]` "
- "instead of `arr[seq]`. In the future this will be "
- "interpreted as an array index, `arr[np.array(seq)]`, "
- "which will result either in an error or a different "
- "result.") < 0) {
- i++; /* since loop update doesn't run */
- goto fail;
- }
- commit_to_unpack = 1;
- }
- }
- }
-
- /* unpacking was the right thing to do, and we already did it */
- if (commit_to_unpack) {
- return n;
- }
- /* got to the end, never found an indication that we should have unpacked */
- else {
- /* we partially filled result, so empty it first */
- multi_DECREF(result, i);
- return unpack_scalar(index, result, result_n);
- }
-
-fail:
- multi_DECREF(result, i);
- return -1;
+ return unpack_scalar(index, result, result_n);
}
/**
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 792686cf0..bd6318206 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -2205,7 +2205,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
Py_DECREF(typecode);
}
else {
- memcpy(PyArray_DATA(self), datastr, num);
+ memcpy(PyArray_DATA(self), datastr, PyArray_NBYTES(self));
}
PyArray_ENABLEFLAGS(self, NPY_ARRAY_OWNDATA);
fa->base = NULL;
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index a7b6898e1..12923a6c6 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -1533,8 +1533,9 @@ PyArray_EquivTypenums(int typenum1, int typenum2)
/*** END C-API FUNCTIONS **/
/*
- * NPY_RELAXED_STRIDES_CHECKING: If the strides logic is changed, the
- * order specific stride setting is not necessary.
+ * NOTE: The order specific stride setting is not necessary to preserve
+ * contiguity and could be removed. However, this way the resulting
+ * strides strides look better for fortran order inputs.
*/
static NPY_STEALS_REF_TO_ARG(1) PyObject *
_prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order)
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 162abd6a4..98f65415b 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -244,11 +244,9 @@ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims,
* in order to get the right orientation and
* because we can't just re-use the buffer with the
* data in the order it is in.
- * NPY_RELAXED_STRIDES_CHECKING: size check is unnecessary when set.
*/
Py_INCREF(self);
- if ((PyArray_SIZE(self) > 1) &&
- ((order == NPY_CORDER && !PyArray_IS_C_CONTIGUOUS(self)) ||
+ if (((order == NPY_CORDER && !PyArray_IS_C_CONTIGUOUS(self)) ||
(order == NPY_FORTRANORDER && !PyArray_IS_F_CONTIGUOUS(self)))) {
int success = 0;
success = _attempt_nocopy_reshape(self, ndim, dimensions,
@@ -1000,7 +998,6 @@ PyArray_Flatten(PyArrayObject *a, NPY_ORDER order)
* If an axis flagged for removal has a shape larger than one,
* the aligned flag (and in the future the contiguous flags),
* may need explicit update.
- * (check also NPY_RELAXED_STRIDES_CHECKING)
*
* For example, this can be used to remove the reduction axes
* from a reduction result once its computation is complete.
@@ -1024,6 +1021,6 @@ PyArray_RemoveAxesInPlace(PyArrayObject *arr, const npy_bool *flags)
/* The final number of dimensions */
fa->nd = idim_out;
- /* May not be necessary for NPY_RELAXED_STRIDES_CHECKING (see comment) */
+ /* NOTE: This is only necessary if a dimension with size != 1 was removed */
PyArray_UpdateFlags(arr, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS);
}
diff --git a/numpy/core/src/multiarray/textreading/growth.h b/numpy/core/src/multiarray/textreading/growth.h
index 237b77ad3..c7ebe3651 100644
--- a/numpy/core/src/multiarray/textreading/growth.h
+++ b/numpy/core/src/multiarray/textreading/growth.h
@@ -1,7 +1,15 @@
#ifndef NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_GROWTH_H_
#define NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_GROWTH_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
NPY_NO_EXPORT npy_intp
grow_size_and_multiply(npy_intp *size, npy_intp min_grow, npy_intp itemsize);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_GROWTH_H_ */
diff --git a/numpy/core/src/multiarray/textreading/parser_config.h b/numpy/core/src/multiarray/textreading/parser_config.h
index 00e911667..67b5c8483 100644
--- a/numpy/core/src/multiarray/textreading/parser_config.h
+++ b/numpy/core/src/multiarray/textreading/parser_config.h
@@ -4,6 +4,10 @@
#include <stdbool.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
/*
* Field delimiter character.
@@ -58,4 +62,8 @@ typedef struct {
} parser_config;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_PARSER_CONFIG_H_ */
diff --git a/numpy/core/src/multiarray/textreading/readtext.h b/numpy/core/src/multiarray/textreading/readtext.h
index 5cf48c555..133c7883e 100644
--- a/numpy/core/src/multiarray/textreading/readtext.h
+++ b/numpy/core/src/multiarray/textreading/readtext.h
@@ -2,6 +2,7 @@
#define NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_READTEXT_H_
NPY_NO_EXPORT PyObject *
-_load_from_filelike(PyObject *self, PyObject *args, PyObject *kwargs);
+_load_from_filelike(PyObject *mod,
+ PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames);
#endif /* NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_READTEXT_H_ */
diff --git a/numpy/core/src/multiarray/textreading/stream.h b/numpy/core/src/multiarray/textreading/stream.h
index 59bd14074..42ca654db 100644
--- a/numpy/core/src/multiarray/textreading/stream.h
+++ b/numpy/core/src/multiarray/textreading/stream.h
@@ -3,6 +3,10 @@
#include <stdint.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/*
* When getting the next line, we hope that the buffer provider can already
* give some information about the newlines, because for Python iterables
@@ -38,4 +42,8 @@ typedef struct _stream {
((s)->stream_nextbuf((s), start, end, kind))
#define stream_close(s) ((s)->stream_close((s)))
+#ifdef __cplusplus
+}
+#endif
+
#endif /* NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_STREAM_H_ */
diff --git a/numpy/core/src/multiarray/textreading/tokenize.c.src b/numpy/core/src/multiarray/textreading/tokenize.cpp
index 6ddba3345..b6d9f882b 100644
--- a/numpy/core/src/multiarray/textreading/tokenize.c.src
+++ b/numpy/core/src/multiarray/textreading/tokenize.cpp
@@ -1,11 +1,6 @@
#include <Python.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <string.h>
-
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#include "numpy/ndarraytypes.h"
@@ -15,7 +10,6 @@
#include "textreading/parser_config.h"
#include "textreading/growth.h"
-
/*
How parsing quoted fields works:
@@ -45,12 +39,10 @@
*/
-/**begin repeat
- * #type = Py_UCS1, Py_UCS2, Py_UCS4#
- */
+template <typename UCS>
static NPY_INLINE int
-copy_to_field_buffer_@type@(tokenizer_state *ts,
- const @type@ *chunk_start, const @type@ *chunk_end)
+copy_to_field_buffer(tokenizer_state *ts,
+ const UCS *chunk_start, const UCS *chunk_end)
{
npy_intp chunk_length = chunk_end - chunk_start;
npy_intp size = chunk_length + ts->field_buffer_pos + 2;
@@ -62,8 +54,8 @@ copy_to_field_buffer_@type@(tokenizer_state *ts,
"line too long to handle while reading file.");
return -1;
}
- Py_UCS4 *grown = PyMem_Realloc(ts->field_buffer, alloc_size);
- if (grown == NULL) {
+ Py_UCS4 *grown = (Py_UCS4 *)PyMem_Realloc(ts->field_buffer, alloc_size);
+ if (grown == nullptr) {
PyErr_NoMemory();
return -1;
}
@@ -79,7 +71,6 @@ copy_to_field_buffer_@type@(tokenizer_state *ts,
ts->field_buffer_pos += chunk_length;
return 0;
}
-/**end repeat**/
static NPY_INLINE int
@@ -99,8 +90,8 @@ add_field(tokenizer_state *ts)
"too many columns found; cannot read file.");
return -1;
}
- field_info *fields = PyMem_Realloc(ts->fields, alloc_size);
- if (fields == NULL) {
+ field_info *fields = (field_info *)PyMem_Realloc(ts->fields, alloc_size);
+ if (fields == nullptr) {
PyErr_NoMemory();
return -1;
}
@@ -117,16 +108,13 @@ add_field(tokenizer_state *ts)
}
-/**begin repeat
- * #kind = PyUnicode_1BYTE_KIND, PyUnicode_2BYTE_KIND, PyUnicode_4BYTE_KIND#
- * #type = Py_UCS1, Py_UCS2, Py_UCS4#
- */
+template <typename UCS>
static NPY_INLINE int
-tokenizer_core_@type@(tokenizer_state *ts, parser_config *const config)
+tokenizer_core(tokenizer_state *ts, parser_config *const config)
{
- @type@ *pos = (@type@ *)ts->pos;
- @type@ *stop = (@type@ *)ts->end;
- @type@ *chunk_start;
+ UCS *pos = (UCS *)ts->pos;
+ UCS *stop = (UCS *)ts->end;
+ UCS *chunk_start;
if (ts->state == TOKENIZE_CHECK_QUOTED) {
/* before we can check for quotes, strip leading whitespace */
@@ -174,7 +162,7 @@ tokenizer_core_@type@(tokenizer_state *ts, parser_config *const config)
break;
}
}
- if (copy_to_field_buffer_@type@(ts, chunk_start, pos) < 0) {
+ if (copy_to_field_buffer(ts, chunk_start, pos) < 0) {
return -1;
}
pos++;
@@ -201,7 +189,7 @@ tokenizer_core_@type@(tokenizer_state *ts, parser_config *const config)
break;
}
}
- if (copy_to_field_buffer_@type@(ts, chunk_start, pos) < 0) {
+ if (copy_to_field_buffer(ts, chunk_start, pos) < 0) {
return -1;
}
pos++;
@@ -215,7 +203,7 @@ tokenizer_core_@type@(tokenizer_state *ts, parser_config *const config)
break;
}
}
- if (copy_to_field_buffer_@type@(ts, chunk_start, pos) < 0) {
+ if (copy_to_field_buffer(ts, chunk_start, pos) < 0) {
return -1;
}
pos++;
@@ -224,7 +212,7 @@ tokenizer_core_@type@(tokenizer_state *ts, parser_config *const config)
case TOKENIZE_QUOTED_CHECK_DOUBLE_QUOTE:
if (*pos == config->quote) {
/* Copy the quote character directly from the config: */
- if (copy_to_field_buffer_Py_UCS4(ts,
+ if (copy_to_field_buffer(ts,
&config->quote, &config->quote+1) < 0) {
return -1;
}
@@ -271,7 +259,6 @@ tokenizer_core_@type@(tokenizer_state *ts, parser_config *const config)
ts->pos = (char *)pos;
return 0;
}
-/**end repeat**/
/*
@@ -308,7 +295,7 @@ tokenize(stream *s, tokenizer_state *ts, parser_config *const config)
ts->field_buffer_pos = 0;
ts->num_fields = 0;
- while (1) {
+ while (true) {
/*
* This loop adds new fields to the result (to make up a full row)
* until the row ends (typically a line end or the file end)
@@ -352,14 +339,14 @@ tokenize(stream *s, tokenizer_state *ts, parser_config *const config)
}
int status;
if (ts->unicode_kind == PyUnicode_1BYTE_KIND) {
- status = tokenizer_core_Py_UCS1(ts, config);
+ status = tokenizer_core<Py_UCS1>(ts, config);
}
else if (ts->unicode_kind == PyUnicode_2BYTE_KIND) {
- status = tokenizer_core_Py_UCS2(ts, config);
+ status = tokenizer_core<Py_UCS2>(ts, config);
}
else {
assert(ts->unicode_kind == PyUnicode_4BYTE_KIND);
- status = tokenizer_core_Py_UCS4(ts, config);
+ status = tokenizer_core<Py_UCS4>(ts, config);
}
if (status < 0) {
return -1;
@@ -391,13 +378,22 @@ tokenize(stream *s, tokenizer_state *ts, parser_config *const config)
ts->num_fields -= 1;
/*
- * If have one field, but that field is completely empty, this is an
- * empty line, and we just ignore it.
+ * We always start a new field (at the very beginning and whenever a
+ * delimiter was found).
+ * This gives us two scenarios where we need to ignore the last field
+ * if it is empty:
+ * 1. If there is exactly one empty (unquoted) field, the whole line is
+ * empty.
+ * 2. If we are splitting on whitespace we always ignore a last empty
+ * field to match Python's splitting: `" 1 ".split()`.
*/
if (ts->num_fields == 1
- && ts->fields[1].offset - ts->fields[0].offset == 1
- && !ts->fields->quoted) {
- ts->num_fields--;
+ || ts->unquoted_state == TOKENIZE_UNQUOTED_WHITESPACE) {
+ size_t offset_last = ts->fields[ts->num_fields-1].offset;
+ size_t end_last = ts->fields[ts->num_fields].offset;
+ if (!ts->fields->quoted && end_last - offset_last == 1) {
+ ts->num_fields--;
+ }
}
ts->state = TOKENIZE_INIT;
return finished_reading_file;
@@ -408,11 +404,11 @@ NPY_NO_EXPORT void
tokenizer_clear(tokenizer_state *ts)
{
PyMem_FREE(ts->field_buffer);
- ts->field_buffer = NULL;
+ ts->field_buffer = nullptr;
ts->field_buffer_length = 0;
PyMem_FREE(ts->fields);
- ts->fields = NULL;
+ ts->fields = nullptr;
ts->fields_size = 0;
}
@@ -437,18 +433,18 @@ tokenizer_init(tokenizer_state *ts, parser_config *config)
ts->num_fields = 0;
ts->buf_state = 0;
- ts->pos = NULL;
- ts->end = NULL;
+ ts->pos = nullptr;
+ ts->end = nullptr;
- ts->field_buffer = PyMem_Malloc(32 * sizeof(Py_UCS4));
- if (ts->field_buffer == NULL) {
+ ts->field_buffer = (Py_UCS4 *)PyMem_Malloc(32 * sizeof(Py_UCS4));
+ if (ts->field_buffer == nullptr) {
PyErr_NoMemory();
return -1;
}
ts->field_buffer_length = 32;
- ts->fields = PyMem_Malloc(4 * sizeof(*ts->fields));
- if (ts->fields == NULL) {
+ ts->fields = (field_info *)PyMem_Malloc(4 * sizeof(*ts->fields));
+ if (ts->fields == nullptr) {
PyErr_NoMemory();
return -1;
}
diff --git a/numpy/core/src/multiarray/textreading/tokenize.h b/numpy/core/src/multiarray/textreading/tokenize.h
index fa10bb9b0..a78c6d936 100644
--- a/numpy/core/src/multiarray/textreading/tokenize.h
+++ b/numpy/core/src/multiarray/textreading/tokenize.h
@@ -8,6 +8,10 @@
#include "textreading/stream.h"
#include "textreading/parser_config.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum {
/* Initialization of fields */
@@ -75,4 +79,8 @@ tokenizer_init(tokenizer_state *ts, parser_config *config);
NPY_NO_EXPORT int
tokenize(stream *s, tokenizer_state *ts, parser_config *const config);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_TOKENIZE_H_ */
diff --git a/numpy/core/src/npysort/binsearch.cpp b/numpy/core/src/npysort/binsearch.cpp
index cd5f03470..8dd72c094 100644
--- a/numpy/core/src/npysort/binsearch.cpp
+++ b/numpy/core/src/npysort/binsearch.cpp
@@ -2,46 +2,65 @@
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-#include "npy_sort.h"
-#include "numpy_tag.h"
-#include <numpy/npy_common.h>
-#include <numpy/ndarraytypes.h>
+#include "numpy/ndarraytypes.h"
+#include "numpy/npy_common.h"
#include "npy_binsearch.h"
+#include "npy_sort.h"
+#include "numpy_tag.h"
#include <array>
#include <functional> // for std::less and std::less_equal
// Enumerators for the variant of binsearch
-enum arg_t { noarg, arg};
-enum side_t { left, right};
+enum arg_t
+{
+ noarg,
+ arg
+};
+enum side_t
+{
+ left,
+ right
+};
// Mapping from enumerators to comparators
-template<class Tag, side_t side>
+template <class Tag, side_t side>
struct side_to_cmp;
-template<class Tag>
-struct side_to_cmp<Tag, left> { static constexpr auto value = Tag::less; };
-template<class Tag>
-struct side_to_cmp<Tag, right> { static constexpr auto value = Tag::less_equal; };
-template<side_t side>
+template <class Tag>
+struct side_to_cmp<Tag, left> {
+ static constexpr auto value = Tag::less;
+};
+
+template <class Tag>
+struct side_to_cmp<Tag, right> {
+ static constexpr auto value = Tag::less_equal;
+};
+
+template <side_t side>
struct side_to_generic_cmp;
-template<>
-struct side_to_generic_cmp<left> { using type = std::less<int>; };
-template<>
-struct side_to_generic_cmp<right> { using type = std::less_equal<int>; };
+
+template <>
+struct side_to_generic_cmp<left> {
+ using type = std::less<int>;
+};
+
+template <>
+struct side_to_generic_cmp<right> {
+ using type = std::less_equal<int>;
+};
/*
*****************************************************************************
** NUMERIC SEARCHES **
*****************************************************************************
*/
-template<class Tag, side_t side>
+template <class Tag, side_t side>
static void
-binsearch(const char *arr, const char *key, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
- PyArrayObject*)
+binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len,
+ npy_intp key_len, npy_intp arr_str, npy_intp key_str,
+ npy_intp ret_str, PyArrayObject *)
{
using T = typename Tag::type;
auto cmp = side_to_cmp<Tag, side>::value;
@@ -73,7 +92,7 @@ binsearch(const char *arr, const char *key, char *ret,
while (min_idx < max_idx) {
const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
- const T mid_val = *(const T *)(arr + mid_idx*arr_str);
+ const T mid_val = *(const T *)(arr + mid_idx * arr_str);
if (cmp(mid_val, key_val)) {
min_idx = mid_idx + 1;
}
@@ -85,13 +104,12 @@ binsearch(const char *arr, const char *key, char *ret,
}
}
-template<class Tag, side_t side>
+template <class Tag, side_t side>
static int
-argbinsearch(const char *arr, const char *key,
- const char *sort, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str,
- npy_intp sort_str, npy_intp ret_str, PyArrayObject*)
+argbinsearch(const char *arr, const char *key, const char *sort, char *ret,
+ npy_intp arr_len, npy_intp key_len, npy_intp arr_str,
+ npy_intp key_str, npy_intp sort_str, npy_intp ret_str,
+ PyArrayObject *)
{
using T = typename Tag::type;
auto cmp = side_to_cmp<Tag, side>::value;
@@ -123,14 +141,14 @@ argbinsearch(const char *arr, const char *key,
while (min_idx < max_idx) {
const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
- const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx*sort_str);
+ const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx * sort_str);
T mid_val;
if (sort_idx < 0 || sort_idx >= arr_len) {
return -1;
}
- mid_val = *(const T *)(arr + sort_idx*arr_str);
+ mid_val = *(const T *)(arr + sort_idx * arr_str);
if (cmp(mid_val, key_val)) {
min_idx = mid_idx + 1;
@@ -150,12 +168,11 @@ argbinsearch(const char *arr, const char *key,
*****************************************************************************
*/
-template<side_t side>
+template <side_t side>
static void
-npy_binsearch(const char *arr, const char *key, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
- PyArrayObject *cmp)
+npy_binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len,
+ npy_intp key_len, npy_intp arr_str, npy_intp key_str,
+ npy_intp ret_str, PyArrayObject *cmp)
{
using Cmp = typename side_to_generic_cmp<side>::type;
PyArray_CompareFunc *compare = PyArray_DESCR(cmp)->f->compare;
@@ -181,7 +198,7 @@ npy_binsearch(const char *arr, const char *key, char *ret,
while (min_idx < max_idx) {
const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
- const char *arr_ptr = arr + mid_idx*arr_str;
+ const char *arr_ptr = arr + mid_idx * arr_str;
if (Cmp{}(compare(arr_ptr, key, cmp), 0)) {
min_idx = mid_idx + 1;
@@ -194,14 +211,12 @@ npy_binsearch(const char *arr, const char *key, char *ret,
}
}
-template<side_t side>
+template <side_t side>
static int
-npy_argbinsearch(const char *arr, const char *key,
- const char *sort, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str,
- npy_intp sort_str, npy_intp ret_str,
- PyArrayObject *cmp)
+npy_argbinsearch(const char *arr, const char *key, const char *sort, char *ret,
+ npy_intp arr_len, npy_intp key_len, npy_intp arr_str,
+ npy_intp key_str, npy_intp sort_str, npy_intp ret_str,
+ PyArrayObject *cmp)
{
using Cmp = typename side_to_generic_cmp<side>::type;
PyArray_CompareFunc *compare = PyArray_DESCR(cmp)->f->compare;
@@ -227,14 +242,14 @@ npy_argbinsearch(const char *arr, const char *key,
while (min_idx < max_idx) {
const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
- const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx*sort_str);
+ const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx * sort_str);
const char *arr_ptr;
if (sort_idx < 0 || sort_idx >= arr_len) {
return -1;
}
- arr_ptr = arr + sort_idx*arr_str;
+ arr_ptr = arr + sort_idx * arr_str;
if (Cmp{}(compare(arr_ptr, key, cmp), 0)) {
min_idx = mid_idx + 1;
@@ -254,88 +269,86 @@ npy_argbinsearch(const char *arr, const char *key,
*****************************************************************************
*/
-template<arg_t arg>
+template <arg_t arg>
struct binsearch_base;
-template<>
+template <>
struct binsearch_base<arg> {
- using function_type = PyArray_ArgBinSearchFunc*;
+ using function_type = PyArray_ArgBinSearchFunc *;
struct value_type {
int typenum;
function_type binsearch[NPY_NSEARCHSIDES];
};
- template<class... Tags>
- static constexpr std::array<value_type, sizeof...(Tags)> make_binsearch_map(npy::taglist<Tags...>) {
+ template <class... Tags>
+ static constexpr std::array<value_type, sizeof...(Tags)>
+ make_binsearch_map(npy::taglist<Tags...>)
+ {
return std::array<value_type, sizeof...(Tags)>{
- value_type{
- Tags::type_value,
- {
- (function_type)&argbinsearch<Tags, left>,
- (function_type)argbinsearch<Tags, right>
- }
- }...
- };
+ value_type{Tags::type_value,
+ {(function_type)&argbinsearch<Tags, left>,
+ (function_type)argbinsearch<Tags, right>}}...};
}
static constexpr std::array<function_type, 2> npy_map = {
- (function_type)&npy_argbinsearch<left>,
- (function_type)&npy_argbinsearch<right>
- };
+ (function_type)&npy_argbinsearch<left>,
+ (function_type)&npy_argbinsearch<right>};
};
-constexpr std::array<binsearch_base<arg>::function_type, 2> binsearch_base<arg>::npy_map;
+constexpr std::array<binsearch_base<arg>::function_type, 2>
+ binsearch_base<arg>::npy_map;
-template<>
+template <>
struct binsearch_base<noarg> {
- using function_type = PyArray_BinSearchFunc*;
+ using function_type = PyArray_BinSearchFunc *;
struct value_type {
int typenum;
function_type binsearch[NPY_NSEARCHSIDES];
};
- template<class... Tags>
- static constexpr std::array<value_type, sizeof...(Tags)> make_binsearch_map(npy::taglist<Tags...>) {
+ template <class... Tags>
+ static constexpr std::array<value_type, sizeof...(Tags)>
+ make_binsearch_map(npy::taglist<Tags...>)
+ {
return std::array<value_type, sizeof...(Tags)>{
- value_type{
- Tags::type_value,
- {
- (function_type)&binsearch<Tags, left>,
- (function_type)binsearch<Tags, right>
- }
- }...
- };
+ value_type{Tags::type_value,
+ {(function_type)&binsearch<Tags, left>,
+ (function_type)binsearch<Tags, right>}}...};
}
static constexpr std::array<function_type, 2> npy_map = {
- (function_type)&npy_binsearch<left>,
- (function_type)&npy_binsearch<right>
- };
+ (function_type)&npy_binsearch<left>,
+ (function_type)&npy_binsearch<right>};
};
-constexpr std::array<binsearch_base<noarg>::function_type, 2> binsearch_base<noarg>::npy_map;
+constexpr std::array<binsearch_base<noarg>::function_type, 2>
+ binsearch_base<noarg>::npy_map;
// Handle generation of all binsearch variants
-template<arg_t arg>
+template <arg_t arg>
struct binsearch_t : binsearch_base<arg> {
using binsearch_base<arg>::make_binsearch_map;
using value_type = typename binsearch_base<arg>::value_type;
using taglist = npy::taglist<
- /* If adding new types, make sure to keep them ordered by type num */
- npy::bool_tag, npy::byte_tag, npy::ubyte_tag, npy::short_tag,
- npy::ushort_tag, npy::int_tag, npy::uint_tag, npy::long_tag,
- npy::ulong_tag, npy::longlong_tag, npy::ulonglong_tag, npy::half_tag,
- npy::float_tag, npy::double_tag, npy::longdouble_tag, npy::cfloat_tag,
- npy::cdouble_tag, npy::clongdouble_tag, npy::datetime_tag,
- npy::timedelta_tag>;
-
- static constexpr std::array<value_type, taglist::size> map = make_binsearch_map(taglist());
+ /* If adding new types, make sure to keep them ordered by type num
+ */
+ npy::bool_tag, npy::byte_tag, npy::ubyte_tag, npy::short_tag,
+ npy::ushort_tag, npy::int_tag, npy::uint_tag, npy::long_tag,
+ npy::ulong_tag, npy::longlong_tag, npy::ulonglong_tag,
+ npy::half_tag, npy::float_tag, npy::double_tag,
+ npy::longdouble_tag, npy::cfloat_tag, npy::cdouble_tag,
+ npy::clongdouble_tag, npy::datetime_tag, npy::timedelta_tag>;
+
+ static constexpr std::array<value_type, taglist::size> map =
+ make_binsearch_map(taglist());
};
-template<arg_t arg>
-constexpr std::array<typename binsearch_t<arg>::value_type, binsearch_t<arg>::taglist::size> binsearch_t<arg>::map;
+template <arg_t arg>
+constexpr std::array<typename binsearch_t<arg>::value_type,
+ binsearch_t<arg>::taglist::size>
+ binsearch_t<arg>::map;
-template<arg_t arg>
+template <arg_t arg>
static NPY_INLINE typename binsearch_t<arg>::function_type
_get_binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side)
{
using binsearch = binsearch_t<arg>;
- npy_intp nfuncs = binsearch::map.size();;
+ npy_intp nfuncs = binsearch::map.size();
npy_intp min_idx = 0;
npy_intp max_idx = nfuncs;
int type = dtype->type_num;
@@ -359,8 +372,7 @@ _get_binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side)
}
}
- if (min_idx < nfuncs &&
- binsearch::map[min_idx].typenum == type) {
+ if (min_idx < nfuncs && binsearch::map[min_idx].typenum == type) {
return binsearch::map[min_idx].binsearch[side];
}
@@ -371,17 +383,21 @@ _get_binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side)
return NULL;
}
-
/*
*****************************************************************************
** C INTERFACE **
*****************************************************************************
*/
extern "C" {
- NPY_NO_EXPORT PyArray_BinSearchFunc* get_binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side) {
- return _get_binsearch_func<noarg>(dtype, side);
- }
- NPY_NO_EXPORT PyArray_ArgBinSearchFunc* get_argbinsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side) {
- return _get_binsearch_func<arg>(dtype, side);
- }
+NPY_NO_EXPORT PyArray_BinSearchFunc *
+get_binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side)
+{
+ return _get_binsearch_func<noarg>(dtype, side);
+}
+
+NPY_NO_EXPORT PyArray_ArgBinSearchFunc *
+get_argbinsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side)
+{
+ return _get_binsearch_func<arg>(dtype, side);
+}
}
diff --git a/numpy/core/src/npysort/selection.cpp b/numpy/core/src/npysort/selection.cpp
index 8eb4a57aa..7fd04a660 100644
--- a/numpy/core/src/npysort/selection.cpp
+++ b/numpy/core/src/npysort/selection.cpp
@@ -22,6 +22,7 @@
#include "npysort_common.h"
#include "numpy_tag.h"
+#include <array>
#include <cstdlib>
#include <utility>
@@ -110,13 +111,16 @@ median3_swap_(type *v, npy_intp *tosort, npy_intp low, npy_intp mid,
Idx<arg> idx(tosort);
Sortee<type, arg> sortee(v, tosort);
- if (Tag::less(v[idx(high)], v[idx(mid)]))
+ if (Tag::less(v[idx(high)], v[idx(mid)])) {
std::swap(sortee(high), sortee(mid));
- if (Tag::less(v[idx(high)], v[idx(low)]))
+ }
+ if (Tag::less(v[idx(high)], v[idx(low)])) {
std::swap(sortee(high), sortee(low));
+ }
/* move pivot to low */
- if (Tag::less(v[idx(low)], v[idx(mid)]))
+ if (Tag::less(v[idx(low)], v[idx(mid)])) {
std::swap(sortee(low), sortee(mid));
+ }
/* move 3-lowest element to low + 1 */
std::swap(sortee(mid), sortee(low + 1));
}
@@ -174,13 +178,16 @@ unguarded_partition_(type *v, npy_intp *tosort, const type pivot, npy_intp *ll,
Sortee<type, arg> sortee(v, tosort);
for (;;) {
- do (*ll)++;
- while (Tag::less(v[idx(*ll)], pivot));
- do (*hh)--;
- while (Tag::less(pivot, v[idx(*hh)]));
-
- if (*hh < *ll)
+ do {
+ (*ll)++;
+ } while (Tag::less(v[idx(*ll)], pivot));
+ do {
+ (*hh)--;
+ } while (Tag::less(pivot, v[idx(*hh)]));
+
+ if (*hh < *ll) {
break;
+ }
std::swap(sortee(*ll), sortee(*hh));
}
@@ -208,8 +215,9 @@ median_of_median5_(type *v, npy_intp *tosort, const npy_intp num,
std::swap(sortee(subleft + m), sortee(i));
}
- if (nmed > 2)
+ if (nmed > 2) {
introselect_<Tag, arg>(v, tosort, nmed, nmed / 2, pivots, npiv);
+ }
return nmed / 2;
}
@@ -267,8 +275,9 @@ introselect_(type *v, npy_intp *tosort, npy_intp num, npy_intp kth,
npy_intp high = num - 1;
int depth_limit;
- if (npiv == NULL)
+ if (npiv == NULL) {
pivots = NULL;
+ }
while (pivots != NULL && *npiv > 0) {
if (pivots[*npiv - 1] > kth) {
@@ -360,10 +369,12 @@ introselect_(type *v, npy_intp *tosort, npy_intp num, npy_intp kth,
store_pivot(hh, kth, pivots, npiv);
}
- if (hh >= kth)
+ if (hh >= kth) {
high = hh - 1;
- if (hh <= kth)
+ }
+ if (hh <= kth) {
low = ll;
+ }
}
/* two elements */
@@ -377,269 +388,105 @@ introselect_(type *v, npy_intp *tosort, npy_intp num, npy_intp kth,
return 0;
}
-/***************************************
- * C > C++ dispatch
- ***************************************/
+/*
+ *****************************************************************************
+ ** GENERATOR **
+ *****************************************************************************
+ */
-extern "C" {
-NPY_NO_EXPORT int
-introselect_bool(npy_bool *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::bool_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_byte(npy_byte *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::byte_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_ubyte(npy_ubyte *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::ubyte_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_short(npy_short *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::short_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_ushort(npy_ushort *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::ushort_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_int(npy_int *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::int_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_uint(npy_uint *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::uint_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_long(npy_long *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::long_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_ulong(npy_ulong *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::ulong_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_longlong(npy_longlong *v, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::longlong_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_ulonglong(npy_ulonglong *v, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::ulonglong_tag, false>(v, nullptr, num, kth,
- pivots, npiv);
-}
-NPY_NO_EXPORT int
-introselect_half(npy_half *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::half_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_float(npy_float *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::float_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_double(npy_double *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::double_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_longdouble(npy_longdouble *v, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::longdouble_tag, false>(v, nullptr, num, kth,
- pivots, npiv);
-}
-NPY_NO_EXPORT int
-introselect_cfloat(npy_cfloat *v, npy_intp num, npy_intp kth, npy_intp *pivots,
- npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::cfloat_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_cdouble(npy_cdouble *v, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::cdouble_tag, false>(v, nullptr, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-introselect_clongdouble(npy_clongdouble *v, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
+template <typename Tag>
+static int
+introselect_noarg(void *v, npy_intp num, npy_intp kth, npy_intp *pivots,
+ npy_intp *npiv, void *)
{
- return introselect_<npy::clongdouble_tag, false>(v, nullptr, num, kth,
- pivots, npiv);
+ return introselect_<Tag, false>((typename Tag::type *)v, nullptr, num, kth,
+ pivots, npiv);
}
-NPY_NO_EXPORT int
-aintroselect_bool(npy_bool *v, npy_intp *tosort, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::bool_tag, true>(v, tosort, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_byte(npy_byte *v, npy_intp *tosort, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::byte_tag, true>(v, tosort, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_ubyte(npy_ubyte *v, npy_intp *tosort, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::ubyte_tag, true>(v, tosort, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_short(npy_short *v, npy_intp *tosort, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::short_tag, true>(v, tosort, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_ushort(npy_ushort *v, npy_intp *tosort, npy_intp num,
- npy_intp kth, npy_intp *pivots, npy_intp *npiv,
- void *NOT_USED)
-{
- return introselect_<npy::ushort_tag, true>(v, tosort, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_int(npy_int *v, npy_intp *tosort, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::int_tag, true>(v, tosort, num, kth, pivots, npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_uint(npy_uint *v, npy_intp *tosort, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::uint_tag, true>(v, tosort, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_long(npy_long *v, npy_intp *tosort, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::long_tag, true>(v, tosort, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_ulong(npy_ulong *v, npy_intp *tosort, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::ulong_tag, true>(v, tosort, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_longlong(npy_longlong *v, npy_intp *tosort, npy_intp num,
- npy_intp kth, npy_intp *pivots, npy_intp *npiv,
- void *NOT_USED)
-{
- return introselect_<npy::longlong_tag, true>(v, tosort, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_ulonglong(npy_ulonglong *v, npy_intp *tosort, npy_intp num,
- npy_intp kth, npy_intp *pivots, npy_intp *npiv,
- void *NOT_USED)
-{
- return introselect_<npy::ulonglong_tag, true>(v, tosort, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_half(npy_half *v, npy_intp *tosort, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
-{
- return introselect_<npy::half_tag, true>(v, tosort, num, kth, pivots,
- npiv);
-}
-NPY_NO_EXPORT int
-aintroselect_float(npy_float *v, npy_intp *tosort, npy_intp num, npy_intp kth,
- npy_intp *pivots, npy_intp *npiv, void *NOT_USED)
+template <typename Tag>
+static int
+introselect_arg(void *v, npy_intp *tosort, npy_intp num, npy_intp kth,
+ npy_intp *pivots, npy_intp *npiv, void *)
{
- return introselect_<npy::float_tag, true>(v, tosort, num, kth, pivots,
- npiv);
+ return introselect_<Tag, true>((typename Tag::type *)v, tosort, num, kth,
+ pivots, npiv);
}
-NPY_NO_EXPORT int
-aintroselect_double(npy_double *v, npy_intp *tosort, npy_intp num,
- npy_intp kth, npy_intp *pivots, npy_intp *npiv,
- void *NOT_USED)
+
+struct arg_map {
+ int typenum;
+ PyArray_PartitionFunc *part[NPY_NSELECTS];
+ PyArray_ArgPartitionFunc *argpart[NPY_NSELECTS];
+};
+
+template <class... Tags>
+static constexpr std::array<arg_map, sizeof...(Tags)>
+make_partition_map(npy::taglist<Tags...>)
{
- return introselect_<npy::double_tag, true>(v, tosort, num, kth, pivots,
- npiv);
+ return std::array<arg_map, sizeof...(Tags)>{
+ arg_map{Tags::type_value, &introselect_noarg<Tags>,
+ &introselect_arg<Tags>}...};
}
-NPY_NO_EXPORT int
-aintroselect_longdouble(npy_longdouble *v, npy_intp *tosort, npy_intp num,
- npy_intp kth, npy_intp *pivots, npy_intp *npiv,
- void *NOT_USED)
+
+struct partition_t {
+ using taglist =
+ npy::taglist<npy::bool_tag, npy::byte_tag, npy::ubyte_tag,
+ npy::short_tag, npy::ushort_tag, npy::int_tag,
+ npy::uint_tag, npy::long_tag, npy::ulong_tag,
+ npy::longlong_tag, npy::ulonglong_tag, npy::half_tag,
+ npy::float_tag, npy::double_tag, npy::longdouble_tag,
+ npy::cfloat_tag, npy::cdouble_tag,
+ npy::clongdouble_tag>;
+
+ static constexpr std::array<arg_map, taglist::size> map =
+ make_partition_map(taglist());
+};
+constexpr std::array<arg_map, partition_t::taglist::size> partition_t::map;
+
+static NPY_INLINE PyArray_PartitionFunc *
+_get_partition_func(int type, NPY_SELECTKIND which)
{
- return introselect_<npy::longdouble_tag, true>(v, tosort, num, kth, pivots,
- npiv);
+ npy_intp i;
+ npy_intp ntypes = partition_t::map.size();
+
+ if (which >= NPY_NSELECTS) {
+ return NULL;
+ }
+ for (i = 0; i < ntypes; i++) {
+ if (type == partition_t::map[i].typenum) {
+ return partition_t::map[i].part[which];
+ }
+ }
+ return NULL;
}
-NPY_NO_EXPORT int
-aintroselect_cfloat(npy_cfloat *v, npy_intp *tosort, npy_intp num,
- npy_intp kth, npy_intp *pivots, npy_intp *npiv,
- void *NOT_USED)
+
+static NPY_INLINE PyArray_ArgPartitionFunc *
+_get_argpartition_func(int type, NPY_SELECTKIND which)
{
- return introselect_<npy::cfloat_tag, true>(v, tosort, num, kth, pivots,
- npiv);
+ npy_intp i;
+ npy_intp ntypes = partition_t::map.size();
+
+ for (i = 0; i < ntypes; i++) {
+ if (type == partition_t::map[i].typenum) {
+ return partition_t::map[i].argpart[which];
+ }
+ }
+ return NULL;
}
-NPY_NO_EXPORT int
-aintroselect_cdouble(npy_cdouble *v, npy_intp *tosort, npy_intp num,
- npy_intp kth, npy_intp *pivots, npy_intp *npiv,
- void *NOT_USED)
+
+/*
+ *****************************************************************************
+ ** C INTERFACE **
+ *****************************************************************************
+ */
+extern "C" {
+NPY_NO_EXPORT PyArray_PartitionFunc *
+get_partition_func(int type, NPY_SELECTKIND which)
{
- return introselect_<npy::cdouble_tag, true>(v, tosort, num, kth, pivots,
- npiv);
+ return _get_partition_func(type, which);
}
-NPY_NO_EXPORT int
-aintroselect_clongdouble(npy_clongdouble *v, npy_intp *tosort, npy_intp num,
- npy_intp kth, npy_intp *pivots, npy_intp *npiv,
- void *NOT_USED)
+NPY_NO_EXPORT PyArray_ArgPartitionFunc *
+get_argpartition_func(int type, NPY_SELECTKIND which)
{
- return introselect_<npy::clongdouble_tag, true>(v, tosort, num, kth,
- pivots, npiv);
+ return _get_argpartition_func(type, which);
}
}
diff --git a/numpy/core/src/umath/dispatching.c b/numpy/core/src/umath/dispatching.c
index c3f0e1e67..b8f102b3d 100644
--- a/numpy/core/src/umath/dispatching.c
+++ b/numpy/core/src/umath/dispatching.c
@@ -746,6 +746,40 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
}
info = promote_and_get_info_and_ufuncimpl(ufunc,
ops, signature, new_op_dtypes, NPY_FALSE);
+ if (info == NULL) {
+ /*
+ * NOTE: This block exists solely to support numba's DUFuncs which add
+ * new loops dynamically, so our list may get outdated. Thus, we
+ * have to make sure that the loop exists.
+ *
+ * Before adding a new loop, ensure that it actually exists. There
+ * is a tiny chance that this would not work, but it would require an
+ * extension additionally have a custom loop getter.
+ * This check should ensure a the right error message, but in principle
+ * we could try to call the loop getter here.
+ */
+ char *types = ufunc->types;
+ npy_bool loop_exists = NPY_FALSE;
+ for (int i = 0; i < ufunc->ntypes; ++i) {
+ loop_exists = NPY_TRUE; /* assume it exists, break if not */
+ for (int j = 0; j < ufunc->nargs; ++j) {
+ if (types[j] != new_op_dtypes[j]->type_num) {
+ loop_exists = NPY_FALSE;
+ break;
+ }
+ }
+ if (loop_exists) {
+ break;
+ }
+ types += ufunc->nargs;
+ }
+
+ if (loop_exists) {
+ info = add_and_return_legacy_wrapping_ufunc_loop(
+ ufunc, new_op_dtypes, 0);
+ }
+ }
+
for (int i = 0; i < ufunc->nargs; i++) {
Py_XDECREF(new_op_dtypes[i]);
}
diff --git a/numpy/core/src/umath/legacy_array_method.c b/numpy/core/src/umath/legacy_array_method.c
index 171b53efd..c3d421d9b 100644
--- a/numpy/core/src/umath/legacy_array_method.c
+++ b/numpy/core/src/umath/legacy_array_method.c
@@ -136,7 +136,7 @@ simple_legacy_resolve_descriptors(
* (identity) at least currently. This is because `op[0] is op[2]`.
* (If the output descriptor is not passed, the below works.)
*/
- output_descrs[2] = ensure_dtype_nbo(given_descrs[2]);
+ output_descrs[2] = NPY_DT_CALL_ensure_canonical(given_descrs[2]);
if (output_descrs[2] == NULL) {
Py_CLEAR(output_descrs[2]);
return -1;
@@ -149,7 +149,7 @@ simple_legacy_resolve_descriptors(
output_descrs[1] = output_descrs[2];
}
else {
- output_descrs[1] = ensure_dtype_nbo(given_descrs[1]);
+ output_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]);
if (output_descrs[1] == NULL) {
i = 2;
goto fail;
@@ -160,7 +160,7 @@ simple_legacy_resolve_descriptors(
for (; i < nin + nout; i++) {
if (given_descrs[i] != NULL) {
- output_descrs[i] = ensure_dtype_nbo(given_descrs[i]);
+ output_descrs[i] = NPY_DT_CALL_ensure_canonical(given_descrs[i]);
}
else if (dtypes[i] == dtypes[0] && i > 0) {
/* Preserve metadata from the first operand if same dtype */
diff --git a/numpy/core/src/umath/loops_hyperbolic.dispatch.c.src b/numpy/core/src/umath/loops_hyperbolic.dispatch.c.src
index 367b102c6..8cccc18f0 100644
--- a/numpy/core/src/umath/loops_hyperbolic.dispatch.c.src
+++ b/numpy/core/src/umath/loops_hyperbolic.dispatch.c.src
@@ -1,7 +1,7 @@
/*@targets
** $maxopt baseline
** (avx2 fma3) AVX512_SKX
- ** vsx2
+ ** vsx2 vsx4
** neon_vfpv4
**/
#include "numpy/npy_math.h"
diff --git a/numpy/core/src/umath/loops_trigonometric.dispatch.c.src b/numpy/core/src/umath/loops_trigonometric.dispatch.c.src
index cd9b2ed54..44c47d14f 100644
--- a/numpy/core/src/umath/loops_trigonometric.dispatch.c.src
+++ b/numpy/core/src/umath/loops_trigonometric.dispatch.c.src
@@ -1,7 +1,7 @@
/*@targets
** $maxopt baseline
** (avx2 fma3) avx512f
- ** vsx2
+ ** vsx2 vsx3 vsx4
** neon_vfpv4
**/
#include "numpy/npy_math.h"
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index a7df09b8f..6edd00e65 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -41,6 +41,7 @@
#include "ufunc_object.h"
#include "common.h"
#include "convert_datatype.h"
+#include "dtypemeta.h"
#include "mem_overlap.h"
#if defined(HAVE_CBLAS)
@@ -421,7 +422,7 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc,
operands, type_tup, out_dtypes);
}
- out_dtypes[0] = ensure_dtype_nbo(descr);
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(descr);
if (out_dtypes[0] == NULL) {
return -1;
}
@@ -545,7 +546,8 @@ PyUFunc_SimpleUniformOperationTypeResolver(
if (type_tup == NULL) {
/* PyArray_ResultType forgets to force a byte order when n == 1 */
if (ufunc->nin == 1){
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[0]));
}
else {
int iop;
@@ -629,7 +631,7 @@ PyUFunc_SimpleUniformOperationTypeResolver(
/* Prefer the input descriptor if it matches (preserve metadata) */
descr = PyArray_DESCR(operands[0]);
}
- out_dtypes[0] = ensure_dtype_nbo(descr);
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(descr);
}
/* All types are the same - copy the first one to the rest */
@@ -695,7 +697,7 @@ PyUFunc_IsNaTTypeResolver(PyUFuncObject *ufunc,
return -1;
}
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0]));
out_dtypes[1] = PyArray_DescrFromType(NPY_BOOL);
return 0;
@@ -714,7 +716,7 @@ PyUFunc_IsFiniteTypeResolver(PyUFuncObject *ufunc,
type_tup, out_dtypes);
}
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0]));
out_dtypes[1] = PyArray_DescrFromType(NPY_BOOL);
return 0;
@@ -816,7 +818,8 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
/* m8[<A>] + int => m8[<A>] + m8[<A>] */
else if (PyTypeNum_ISINTEGER(type_num2) ||
PyTypeNum_ISBOOL(type_num2)) {
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[0]));
if (out_dtypes[0] == NULL) {
return -1;
}
@@ -852,7 +855,8 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
/* M8[<A>] + int => M8[<A>] + m8[<A>] */
else if (PyTypeNum_ISINTEGER(type_num2) ||
PyTypeNum_ISBOOL(type_num2)) {
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[0]));
if (out_dtypes[0] == NULL) {
return -1;
}
@@ -876,7 +880,8 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
else if (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) {
/* int + m8[<A>] => m8[<A>] + m8[<A>] */
if (type_num2 == NPY_TIMEDELTA) {
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[1]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[1]));
if (out_dtypes[0] == NULL) {
return -1;
}
@@ -894,7 +899,8 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
if (out_dtypes[0] == NULL) {
return -1;
}
- out_dtypes[1] = ensure_dtype_nbo(PyArray_DESCR(operands[1]));
+ out_dtypes[1] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[1]));
if (out_dtypes[1] == NULL) {
Py_DECREF(out_dtypes[0]);
out_dtypes[0] = NULL;
@@ -985,7 +991,8 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
/* m8[<A>] - int => m8[<A>] - m8[<A>] */
else if (PyTypeNum_ISINTEGER(type_num2) ||
PyTypeNum_ISBOOL(type_num2)) {
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[0]));
if (out_dtypes[0] == NULL) {
return -1;
}
@@ -1021,7 +1028,8 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
/* M8[<A>] - int => M8[<A>] - m8[<A>] */
else if (PyTypeNum_ISINTEGER(type_num2) ||
PyTypeNum_ISBOOL(type_num2)) {
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[0]));
if (out_dtypes[0] == NULL) {
return -1;
}
@@ -1061,7 +1069,8 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
else if (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) {
/* int - m8[<A>] => m8[<A>] - m8[<A>] */
if (type_num2 == NPY_TIMEDELTA) {
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[1]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[1]));
if (out_dtypes[0] == NULL) {
return -1;
}
@@ -1122,7 +1131,8 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
if (type_num1 == NPY_TIMEDELTA) {
/* m8[<A>] * int## => m8[<A>] * int64 */
if (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2)) {
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[0]));
if (out_dtypes[0] == NULL) {
return -1;
}
@@ -1139,7 +1149,8 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
}
/* m8[<A>] * float## => m8[<A>] * float64 */
else if (PyTypeNum_ISFLOAT(type_num2)) {
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[0]));
if (out_dtypes[0] == NULL) {
return -1;
}
@@ -1165,7 +1176,8 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
if (out_dtypes[0] == NULL) {
return -1;
}
- out_dtypes[1] = ensure_dtype_nbo(PyArray_DESCR(operands[1]));
+ out_dtypes[1] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[1]));
if (out_dtypes[1] == NULL) {
Py_DECREF(out_dtypes[0]);
out_dtypes[0] = NULL;
@@ -1187,7 +1199,8 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
if (out_dtypes[0] == NULL) {
return -1;
}
- out_dtypes[1] = ensure_dtype_nbo(PyArray_DESCR(operands[1]));
+ out_dtypes[1] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[1]));
if (out_dtypes[1] == NULL) {
Py_DECREF(out_dtypes[0]);
out_dtypes[0] = NULL;
@@ -1278,7 +1291,8 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
}
/* m8[<A>] / int## => m8[<A>] / int64 */
else if (PyTypeNum_ISINTEGER(type_num2)) {
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[0]));
if (out_dtypes[0] == NULL) {
return -1;
}
@@ -1295,7 +1309,8 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
}
/* m8[<A>] / float## => m8[<A>] / float64 */
else if (PyTypeNum_ISFLOAT(type_num2)) {
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[0] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(operands[0]));
if (out_dtypes[0] == NULL) {
return -1;
}
@@ -1672,7 +1687,8 @@ set_ufunc_loop_data_types(PyUFuncObject *self, PyArrayObject **op,
}
else if (op[i] != NULL &&
PyArray_DESCR(op[i])->type_num == type_nums[i]) {
- out_dtypes[i] = ensure_dtype_nbo(PyArray_DESCR(op[i]));
+ out_dtypes[i] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(op[i]));
}
/*
* For outputs, copy the dtype from op[0] if the type_num
@@ -1680,7 +1696,8 @@ set_ufunc_loop_data_types(PyUFuncObject *self, PyArrayObject **op,
*/
else if (i >= nin && op[0] != NULL &&
PyArray_DESCR(op[0])->type_num == type_nums[i]) {
- out_dtypes[i] = ensure_dtype_nbo(PyArray_DESCR(op[0]));
+ out_dtypes[i] = NPY_DT_CALL_ensure_canonical(
+ PyArray_DESCR(op[0]));
}
/* Otherwise create a plain descr from the type number */
else {
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 22a06619c..b3f3e947d 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -8,9 +8,6 @@ from numpy.testing import (
HAS_REFCOUNT
)
-# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set.
-NPY_RELAXED_STRIDES_CHECKING = np.ones((10, 1), order='C').flags.f_contiguous
-
def test_array_array():
tobj = type(object)
@@ -482,13 +479,6 @@ def test_copy_order():
assert_equal(x, y)
assert_equal(res.flags.c_contiguous, ccontig)
assert_equal(res.flags.f_contiguous, fcontig)
- # This check is impossible only because
- # NPY_RELAXED_STRIDES_CHECKING changes the strides actively
- if not NPY_RELAXED_STRIDES_CHECKING:
- if strides:
- assert_equal(x.strides, y.strides)
- else:
- assert_(x.strides != y.strides)
# Validate the initial state of a, b, and c
assert_(a.flags.c_contiguous)
@@ -542,8 +532,7 @@ def test_copy_order():
def test_contiguous_flags():
a = np.ones((4, 4, 1))[::2,:,:]
- if NPY_RELAXED_STRIDES_CHECKING:
- a.strides = a.strides[:2] + (-123,)
+ a.strides = a.strides[:2] + (-123,)
b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
def check_contig(a, ccontig, fcontig):
@@ -553,12 +542,8 @@ def test_contiguous_flags():
# Check if new arrays are correct:
check_contig(a, False, False)
check_contig(b, False, False)
- if NPY_RELAXED_STRIDES_CHECKING:
- check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
- check_contig(np.array([[[1], [2]]], order='F'), True, True)
- else:
- check_contig(np.empty((2, 2, 0, 2, 2)), True, False)
- check_contig(np.array([[[1], [2]]], order='F'), False, True)
+ check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
+ check_contig(np.array([[[1], [2]]], order='F'), True, True)
check_contig(np.empty((2, 2)), True, False)
check_contig(np.empty((2, 2), order='F'), False, True)
@@ -567,18 +552,11 @@ def test_contiguous_flags():
check_contig(np.array(a, copy=False, order='C'), True, False)
check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
- if NPY_RELAXED_STRIDES_CHECKING:
- # Check slicing update of flags and :
- check_contig(a[0], True, True)
- check_contig(a[None, ::4, ..., None], True, True)
- check_contig(b[0, 0, ...], False, True)
- check_contig(b[:,:, 0:0,:,:], True, True)
- else:
- # Check slicing update of flags:
- check_contig(a[0], True, False)
- # Would be nice if this was C-Contiguous:
- check_contig(a[None, 0, ..., None], False, False)
- check_contig(b[0, 0, 0, ...], False, True)
+ # Check slicing update of flags and :
+ check_contig(a[0], True, True)
+ check_contig(a[None, ::4, ..., None], True, True)
+ check_contig(b[0, 0, ...], False, True)
+ check_contig(b[:, :, 0:0, :, :], True, True)
# Test ravel and squeeze.
check_contig(a.ravel(), True, True)
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 89fcc48bb..c46b294eb 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -138,22 +138,6 @@ class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning
-class TestNonTupleNDIndexDeprecation:
- def test_basic(self):
- a = np.zeros((5, 5))
- with warnings.catch_warnings():
- warnings.filterwarnings('always')
- assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
- assert_warns(FutureWarning, a.__getitem__, [slice(None)])
-
- warnings.filterwarnings('error')
- assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
- assert_raises(FutureWarning, a.__getitem__, [slice(None)])
-
- # a a[[0, 1]] always was advanced indexing, so no error/warning
- a[[0, 1]]
-
-
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-element-wise comparison logic.
This used to mean that when an error occurred during element-wise comparison
diff --git a/numpy/core/tests/test_dlpack.py b/numpy/core/tests/test_dlpack.py
index f848b2008..203cf02c0 100644
--- a/numpy/core/tests/test_dlpack.py
+++ b/numpy/core/tests/test_dlpack.py
@@ -91,7 +91,10 @@ class TestDLPack:
def test_dlpack_device(self):
x = np.arange(5)
assert x.__dlpack_device__() == (1, 0)
- assert np._from_dlpack(x).__dlpack_device__() == (1, 0)
+ y = np._from_dlpack(x)
+ assert y.__dlpack_device__() == (1, 0)
+ z = y[::2]
+ assert z.__dlpack_device__() == (1, 0)
def dlpack_deleter_exception(self):
x = np.arange(5)
@@ -107,3 +110,8 @@ class TestDLPack:
x.flags.writeable = False
with pytest.raises(TypeError):
x.__dlpack__()
+
+ def test_ndim0(self):
+ x = np.array(1.0)
+ y = np._from_dlpack(x)
+ assert_array_equal(x, y)
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index e49604e4d..1a8e747e1 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -14,6 +14,11 @@ from numpy.testing import (
IS_PYSTON)
from numpy.compat import pickle
from itertools import permutations
+import random
+
+import hypothesis
+from hypothesis.extra import numpy as hynp
+
def assert_dtype_equal(a, b):
@@ -1060,6 +1065,122 @@ class TestDtypeAttributes:
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
+class TestDTypeMakeCanonical:
+ def check_canonical(self, dtype, canonical):
+ """
+ Check most properties relevant to "canonical" versions of a dtype,
+ which is mainly native byte order for datatypes supporting this.
+
+ The main work is checking structured dtypes with fields, where we
+ reproduce most the actual logic used in the C-code.
+ """
+ assert type(dtype) is type(canonical)
+
+ # a canonical DType should always have equivalent casting (both ways)
+ assert np.can_cast(dtype, canonical, casting="equiv")
+ assert np.can_cast(canonical, dtype, casting="equiv")
+ # a canonical dtype (and its fields) is always native (checks fields):
+ assert canonical.isnative
+
+ # Check that canonical of canonical is the same (no casting):
+ assert np.result_type(canonical) == canonical
+
+ if not dtype.names:
+ # The flags currently never change for unstructured dtypes
+ assert dtype.flags == canonical.flags
+ return
+
+ # Must have all the needs API flag set:
+ assert dtype.flags & 0b10000
+
+ # Check that the fields are identical (including titles):
+ assert dtype.fields.keys() == canonical.fields.keys()
+
+ def aligned_offset(offset, alignment):
+ # round up offset:
+ return - (-offset // alignment) * alignment
+
+ totalsize = 0
+ max_alignment = 1
+ for name in dtype.names:
+ # each field is also canonical:
+ new_field_descr = canonical.fields[name][0]
+ self.check_canonical(dtype.fields[name][0], new_field_descr)
+
+ # Must have the "inherited" object related flags:
+ expected = 0b11011 & new_field_descr.flags
+ assert (canonical.flags & expected) == expected
+
+ if canonical.isalignedstruct:
+ totalsize = aligned_offset(totalsize, new_field_descr.alignment)
+ max_alignment = max(new_field_descr.alignment, max_alignment)
+
+ assert canonical.fields[name][1] == totalsize
+ # if a title exists, they must match (otherwise empty tuple):
+ assert dtype.fields[name][2:] == canonical.fields[name][2:]
+
+ totalsize += new_field_descr.itemsize
+
+ if canonical.isalignedstruct:
+ totalsize = aligned_offset(totalsize, max_alignment)
+ assert canonical.itemsize == totalsize
+ assert canonical.alignment == max_alignment
+
+ def test_simple(self):
+ dt = np.dtype(">i4")
+ assert np.result_type(dt).isnative
+ assert np.result_type(dt).num == dt.num
+
+ # dtype with empty space:
+ struct_dt = np.dtype(">i4,<i1,i8,V3")[["f0", "f2"]]
+ canonical = np.result_type(struct_dt)
+ assert canonical.itemsize == 4+8
+ assert canonical.isnative
+
+ # aligned struct dtype with empty space:
+ struct_dt = np.dtype(">i1,<i4,i8,V3", align=True)[["f0", "f2"]]
+ canonical = np.result_type(struct_dt)
+ assert canonical.isalignedstruct
+ assert canonical.itemsize == np.dtype("i8").alignment + 8
+ assert canonical.isnative
+
+ def test_object_flag_not_inherited(self):
+ # The following dtype still indicates "object", because its included
+ # in the unaccessible space (maybe this could change at some point):
+ arr = np.ones(3, "i,O,i")[["f0", "f2"]]
+ assert arr.dtype.hasobject
+ canonical_dt = np.result_type(arr.dtype)
+ assert not canonical_dt.hasobject
+
+ @pytest.mark.slow
+ @hypothesis.given(dtype=hynp.nested_dtypes())
+ def test_make_canonical_hypothesis(self, dtype):
+ canonical = np.result_type(dtype)
+ self.check_canonical(dtype, canonical)
+
+ @pytest.mark.slow
+ @hypothesis.given(
+ dtype=hypothesis.extra.numpy.array_dtypes(
+ subtype_strategy=hypothesis.extra.numpy.array_dtypes(),
+ min_size=5, max_size=10, allow_subarrays=True))
+ def test_structured(self, dtype):
+ # Pick 4 of the fields at random. This will leave empty space in the
+ # dtype (since we do not canonicalize it here).
+ field_subset = random.sample(dtype.names, k=4)
+ dtype_with_empty_space = dtype[field_subset]
+ assert dtype_with_empty_space.itemsize == dtype.itemsize
+ canonicalized = np.result_type(dtype_with_empty_space)
+ self.check_canonical(dtype_with_empty_space, canonicalized)
+
+ # Ensure that we also check aligned struct (check the opposite, in
+ # case hypothesis grows support for `align`. Then repeat the test:
+ dtype_aligned = np.dtype(dtype.descr, align=not dtype.isalignedstruct)
+ dtype_with_empty_space = dtype_aligned[field_subset]
+ assert dtype_with_empty_space.itemsize == dtype_aligned.itemsize
+ canonicalized = np.result_type(dtype_with_empty_space)
+ self.check_canonical(dtype_with_empty_space, canonicalized)
+
+
class TestPickling:
def check_pickling(self, dtype):
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index ff999a7b9..efcb92c2e 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -587,6 +587,12 @@ class TestIndexing:
assert arr.dtype is dt
+ def test_nontuple_ndindex(self):
+ a = np.arange(25).reshape((5, 5))
+ assert_equal(a[[0, 1]], np.array([a[0], a[1]]))
+ assert_equal(a[[0, 1], [0, 1]], np.array([0, 6]))
+ assert_raises(IndexError, a.__getitem__, [slice(None)])
+
class TestFieldIndexing:
def test_scalar_return_type(self):
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 73bb5e2d8..6ba90a97f 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -1635,6 +1635,15 @@ class TestZeroSizeFlexible:
assert_equal(zs.dtype, zs2.dtype)
+ def test_pickle_empty(self):
+ """Checking if an empty array pickled and un-pickled will not cause a
+ segmentation fault"""
+ arr = np.array([]).reshape(999999, 0)
+ pk_dmp = pickle.dumps(arr)
+ pk_load = pickle.loads(pk_dmp)
+
+ assert pk_load.size == 0
+
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_pickle_with_buffercallback(self):
@@ -3295,11 +3304,11 @@ class TestMethods:
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
- # 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
+ # 1-element tidy strides test:
a = np.array([[1]])
a.strides = (123, 432)
- # If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
- # them up on purpose:
+ # If the following stride is not 8, NPY_RELAXED_STRIDES_DEBUG is
+ # messing them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
@@ -4190,7 +4199,8 @@ class TestArgmaxArgminCommon:
sizes = [(), (3,), (3, 2), (2, 3),
(3, 3), (2, 3, 4), (4, 3, 2),
(1, 2, 3, 4), (2, 3, 4, 1),
- (3, 4, 1, 2), (4, 1, 2, 3)]
+ (3, 4, 1, 2), (4, 1, 2, 3),
+ (64,), (128,), (256,)]
@pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis)
for axis in list(range(-len(size), len(size))) + [None]]
@@ -4304,9 +4314,9 @@ class TestArgmaxArgminCommon:
@pytest.mark.parametrize('ndim', [0, 1])
@pytest.mark.parametrize('method', ['argmax', 'argmin'])
def test_ret_is_out(self, ndim, method):
- a = np.ones((4,) + (3,)*ndim)
+ a = np.ones((4,) + (256,)*ndim)
arg_method = getattr(a, method)
- out = np.empty((3,)*ndim, dtype=np.intp)
+ out = np.empty((256,)*ndim, dtype=np.intp)
ret = arg_method(axis=0, out=out)
assert ret is out
@@ -4357,12 +4367,44 @@ class TestArgmaxArgminCommon:
assert_equal(arg_method(), 1)
class TestArgmax:
-
- nan_arr = [
- ([0, 1, 2, 3, np.nan], 4),
- ([0, 1, 2, np.nan, 3], 3),
- ([np.nan, 0, 1, 2, 3], 0),
- ([np.nan, 0, np.nan, 2, 3], 0),
+ usg_data = [
+ ([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 0),
+ ([3, 3, 3, 3, 2, 2, 2, 2], 0),
+ ([0, 1, 2, 3, 4, 5, 6, 7], 7),
+ ([7, 6, 5, 4, 3, 2, 1, 0], 0)
+ ]
+ sg_data = usg_data + [
+ ([1, 2, 3, 4, -4, -3, -2, -1], 3),
+ ([1, 2, 3, 4, -1, -2, -3, -4], 3)
+ ]
+ darr = [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product(usg_data, (
+ np.uint8, np.uint16, np.uint32, np.uint64
+ ))
+ )]
+ darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product(sg_data, (
+ np.int8, np.int16, np.int32, np.int64, np.float32, np.float64
+ ))
+ )]
+ darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product((
+ ([0, 1, 2, 3, np.nan], 4),
+ ([0, 1, 2, np.nan, 3], 3),
+ ([np.nan, 0, 1, 2, 3], 0),
+ ([np.nan, 0, np.nan, 2, 3], 0),
+ # To hit the tail of SIMD multi-level(x4, x1) inner loops
+ # on varient SIMD widthes
+ ([1] * (2*5-1) + [np.nan], 2*5-1),
+ ([1] * (4*5-1) + [np.nan], 4*5-1),
+ ([1] * (8*5-1) + [np.nan], 8*5-1),
+ ([1] * (16*5-1) + [np.nan], 16*5-1),
+ ([1] * (32*5-1) + [np.nan], 32*5-1)
+ ), (
+ np.float32, np.float64
+ ))
+ )]
+ nan_arr = darr + [
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
@@ -4432,28 +4474,80 @@ class TestArgmax:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], val, err_msg="%r" % arr)
+ # add padding to test SIMD loops
+ rarr = np.repeat(arr, 129)
+ rpos = pos * 129
+ assert_equal(np.argmax(rarr), rpos, err_msg="%r" % rarr)
+ assert_equal(rarr[np.argmax(rarr)], val, err_msg="%r" % rarr)
+
+ padd = np.repeat(np.min(arr), 513)
+ rarr = np.concatenate((arr, padd))
+ rpos = pos
+ assert_equal(np.argmax(rarr), rpos, err_msg="%r" % rarr)
+ assert_equal(rarr[np.argmax(rarr)], val, err_msg="%r" % rarr)
+
+
def test_maximum_signed_integers(self):
a = np.array([1, 2**7 - 1, -2**7], dtype=np.int8)
assert_equal(np.argmax(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmax(a), 1)
a = np.array([1, 2**15 - 1, -2**15], dtype=np.int16)
assert_equal(np.argmax(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmax(a), 1)
a = np.array([1, 2**31 - 1, -2**31], dtype=np.int32)
assert_equal(np.argmax(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmax(a), 1)
a = np.array([1, 2**63 - 1, -2**63], dtype=np.int64)
assert_equal(np.argmax(a), 1)
-
+ a.repeat(129)
+ assert_equal(np.argmax(a), 1)
class TestArgmin:
-
- nan_arr = [
- ([0, 1, 2, 3, np.nan], 4),
- ([0, 1, 2, np.nan, 3], 3),
- ([np.nan, 0, 1, 2, 3], 0),
- ([np.nan, 0, np.nan, 2, 3], 0),
+ usg_data = [
+ ([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 8),
+ ([3, 3, 3, 3, 2, 2, 2, 2], 4),
+ ([0, 1, 2, 3, 4, 5, 6, 7], 0),
+ ([7, 6, 5, 4, 3, 2, 1, 0], 7)
+ ]
+ sg_data = usg_data + [
+ ([1, 2, 3, 4, -4, -3, -2, -1], 4),
+ ([1, 2, 3, 4, -1, -2, -3, -4], 7)
+ ]
+ darr = [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product(usg_data, (
+ np.uint8, np.uint16, np.uint32, np.uint64
+ ))
+ )]
+ darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product(sg_data, (
+ np.int8, np.int16, np.int32, np.int64, np.float32, np.float64
+ ))
+ )]
+ darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product((
+ ([0, 1, 2, 3, np.nan], 4),
+ ([0, 1, 2, np.nan, 3], 3),
+ ([np.nan, 0, 1, 2, 3], 0),
+ ([np.nan, 0, np.nan, 2, 3], 0),
+ # To hit the tail of SIMD multi-level(x4, x1) inner loops
+ # on varient SIMD widthes
+ ([1] * (2*5-1) + [np.nan], 2*5-1),
+ ([1] * (4*5-1) + [np.nan], 4*5-1),
+ ([1] * (8*5-1) + [np.nan], 8*5-1),
+ ([1] * (16*5-1) + [np.nan], 16*5-1),
+ ([1] * (32*5-1) + [np.nan], 32*5-1)
+ ), (
+ np.float32, np.float64
+ ))
+ )]
+ nan_arr = darr + [
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
@@ -4512,30 +4606,50 @@ class TestArgmin:
([False, True, False, True, True], 0),
]
- def test_combinations(self):
- for arr, pos in self.nan_arr:
- with suppress_warnings() as sup:
- sup.filter(RuntimeWarning,
- "invalid value encountered in reduce")
- min_val = np.min(arr)
+ @pytest.mark.parametrize('data', nan_arr)
+ def test_combinations(self, data):
+ arr, pos = data
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ min_val = np.min(arr)
+
+ assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
+ assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
+
+ # add padding to test SIMD loops
+ rarr = np.repeat(arr, 129)
+ rpos = pos * 129
+ assert_equal(np.argmin(rarr), rpos, err_msg="%r" % rarr)
+ assert_equal(rarr[np.argmin(rarr)], min_val, err_msg="%r" % rarr)
- assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
- assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
+ padd = np.repeat(np.max(arr), 513)
+ rarr = np.concatenate((arr, padd))
+ rpos = pos
+ assert_equal(np.argmin(rarr), rpos, err_msg="%r" % rarr)
+ assert_equal(rarr[np.argmin(rarr)], min_val, err_msg="%r" % rarr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1, 2**7 - 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1, 2**15 - 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1, 2**31 - 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1, 2**63 - 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
-
+ a.repeat(129)
+ assert_equal(np.argmin(a), 1)
class TestMinMax:
@@ -7540,7 +7654,7 @@ class TestNewBufferProtocol:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
- # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
+ # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
@@ -7630,10 +7744,7 @@ class TestNewBufferProtocol:
def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')):
# Note: c defined as parameter so that it is persistent and leak
# checks will notice gh-16934 (buffer info cache leak).
-
- # Check for NPY_RELAXED_STRIDES_CHECKING:
- if np.ones((10, 1), order="C").flags.f_contiguous:
- c.strides = (-1, 80, 8)
+ c.strides = (-1, 80, 8) # strides need to be fixed at export
assert_(memoryview(c).strides == (800, 80, 8))
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
index 9216a3f5f..36970dbc0 100644
--- a/numpy/core/tests/test_overrides.py
+++ b/numpy/core/tests/test_overrides.py
@@ -437,6 +437,7 @@ class TestArrayLike:
self.function = function
def __array_function__(self, func, types, args, kwargs):
+ assert func is getattr(np, func.__name__)
try:
my_func = getattr(self, func.__name__)
except AttributeError:
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 21cc8c159..e073df376 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -658,10 +658,10 @@ class TestRegression:
a = np.ones((0, 2))
a.shape = (-1, 2)
- # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
- # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
+ # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
+ # With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous.
@pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
- reason="Using relaxed stride checking")
+ reason="Using relaxed stride debug")
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
@@ -918,11 +918,11 @@ class TestRegression:
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
- # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
- # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
+ # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
+ # With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
- reason="Using relaxed stride checking")
+ reason="Using relaxed stride debug")
def test_copy_detection_corner_case2(self):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py
index 79974d1c2..f74ed4d3f 100644
--- a/numpy/distutils/__init__.py
+++ b/numpy/distutils/__init__.py
@@ -19,6 +19,8 @@ LAPACK, and for setting include paths and similar build options, please see
"""
+import warnings
+
# Must import local ccompiler ASAP in order to get
# customized CCompiler.spawn effective.
from . import ccompiler
@@ -26,6 +28,17 @@ from . import unixccompiler
from .npy_pkg_config import *
+warnings.warn("\n\n"
+ " `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n"
+ " of the deprecation of `distutils` itself. It will be removed for\n"
+ " Python >= 3.12. For older Python versions it will remain present.\n"
+ " It is recommended to use `setuptools < 60.0` for those Python versions.\n"
+ " For more details, see:\n"
+ " https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n",
+ DeprecationWarning, stacklevel=2
+)
+del warnings
+
# If numpy is installed, add distutils.test()
try:
from . import __config__
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
index b1cb74fae..15c219ad4 100644
--- a/numpy/f2py/__init__.py
+++ b/numpy/f2py/__init__.py
@@ -47,11 +47,11 @@ def compile(source,
source_fn : str, optional
Name of the file where the fortran source is written.
The default is to use a temporary file with the extension
- provided by the `extension` parameter
- extension : {'.f', '.f90'}, optional
+ provided by the ``extension`` parameter
+ extension : ``{'.f', '.f90'}``, optional
Filename extension if `source_fn` is not provided.
The extension tells which fortran standard is used.
- The default is `.f`, which implies F77 standard.
+ The default is ``.f``, which implies F77 standard.
.. versionadded:: 1.11.0
@@ -124,7 +124,7 @@ def compile(source,
def get_include():
"""
- Return the directory that contains the fortranobject.c and .h files.
+ Return the directory that contains the ``fortranobject.c`` and ``.h`` files.
.. note::
@@ -151,15 +151,15 @@ def get_include():
building a Python extension using a ``.pyf`` signature file is a two-step
process. For a module ``mymod``:
- - Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This
- generates ``_mymodmodule.c`` and (if needed)
- ``_fblas-f2pywrappers.f`` files next to ``mymod.pyf``.
- - Step 2: build your Python extension module. This requires the
- following source files:
+ * Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This
+ generates ``_mymodmodule.c`` and (if needed)
+ ``_fblas-f2pywrappers.f`` files next to ``mymod.pyf``.
+ * Step 2: build your Python extension module. This requires the
+ following source files:
- - ``_mymodmodule.c``
- - ``_mymod-f2pywrappers.f`` (if it was generated in step 1)
- - ``fortranobject.c``
+ * ``_mymodmodule.c``
+ * ``_mymod-f2pywrappers.f`` (if it was generated in Step 1)
+ * ``fortranobject.c``
See Also
--------
diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py
index bb5b32878..5bc9113af 100755
--- a/numpy/f2py/f2py2e.py
+++ b/numpy/f2py/f2py2e.py
@@ -411,14 +411,16 @@ def run_main(comline_list):
where ``<args>=string.join(<list>,' ')``, but in Python. Unless
``-h`` is used, this function returns a dictionary containing
information on generated modules and their dependencies on source
- files. For example, the command ``f2py -m scalar scalar.f`` can be
- executed from Python as follows
+ files.
You cannot build extension modules with this function, that is,
- using ``-c`` is not allowed. Use ``compile`` command instead
+ using ``-c`` is not allowed. Use the ``compile`` command instead.
Examples
--------
+ The command ``f2py -m scalar scalar.f`` can be executed from Python as
+ follows.
+
.. literalinclude:: ../../source/f2py/code/results/run_main_session.dat
:language: python
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 900538134..ff56196c3 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -4190,7 +4190,7 @@ def quantile(a,
8. 'median_unbiased'
9. 'normal_unbiased'
- The first three methods are discontiuous. NumPy further defines the
+ The first three methods are discontinuous. NumPy further defines the
following discontinuous variations of the default 'linear' (7.) option:
* 'lower'
@@ -4241,10 +4241,10 @@ def quantile(a,
same as the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and
the same as the maximum if ``q=1.0``.
- This optional `method` parameter specifies the method to use when the
+ The optional `method` parameter specifies the method to use when the
desired quantile lies between two data points ``i < j``.
- If ``g`` is the fractional part of the index surrounded by ``i`` and
- alpha and beta are correction constants modifying i and j.
+ If ``g`` is the fractional part of the index surrounded by ``i`` and ``j``,
+ and alpha and beta are correction constants modifying i and j:
.. math::
i + g = (q - alpha) / ( n - alpha - beta + 1 )
@@ -4259,38 +4259,38 @@ def quantile(a,
averaged_inverted_cdf:
method 2 of H&F [1]_.
- This method give discontinuous results:
+ This method gives discontinuous results:
* if g > 0 ; then take j
* if g = 0 ; then average between bounds
closest_observation:
method 3 of H&F [1]_.
- This method give discontinuous results:
+ This method gives discontinuous results:
* if g > 0 ; then take j
* if g = 0 and index is odd ; then take j
* if g = 0 and index is even ; then take i
interpolated_inverted_cdf:
method 4 of H&F [1]_.
- This method give continuous results using:
+ This method gives continuous results using:
* alpha = 0
* beta = 1
hazen:
method 5 of H&F [1]_.
- This method give continuous results using:
+ This method gives continuous results using:
* alpha = 1/2
* beta = 1/2
weibull:
method 6 of H&F [1]_.
- This method give continuous results using:
+ This method gives continuous results using:
* alpha = 0
* beta = 0
linear:
method 7 of H&F [1]_.
- This method give continuous results using:
+ This method gives continuous results using:
* alpha = 1
* beta = 1
@@ -4298,7 +4298,7 @@ def quantile(a,
method 8 of H&F [1]_.
This method is probably the best method if the sample
distribution function is unknown (see reference).
- This method give continuous results using:
+ This method gives continuous results using:
* alpha = 1/3
* beta = 1/3
@@ -4306,7 +4306,7 @@ def quantile(a,
method 9 of H&F [1]_.
This method is probably the best method if the sample
distribution function is known to be normal.
- This method give continuous results using:
+ This method gives continuous results using:
* alpha = 3/8
* beta = 3/8
@@ -5094,6 +5094,18 @@ def delete(arr, obj, axis=None):
return new
if isinstance(obj, (int, integer)) and not isinstance(obj, bool):
+ single_value = True
+ else:
+ single_value = False
+ _obj = obj
+ obj = np.asarray(obj)
+ if obj.size == 0 and not isinstance(_obj, np.ndarray):
+ obj = obj.astype(intp)
+ elif obj.size == 1 and not isinstance(_obj, bool):
+ obj = obj.astype(intp).reshape(())
+ single_value = True
+
+ if single_value:
# optimization for a single value
if (obj < -N or obj >= N):
raise IndexError(
@@ -5110,11 +5122,6 @@ def delete(arr, obj, axis=None):
slobj2[axis] = slice(obj+1, None)
new[tuple(slobj)] = arr[tuple(slobj2)]
else:
- _obj = obj
- obj = np.asarray(obj)
- if obj.size == 0 and not isinstance(_obj, np.ndarray):
- obj = obj.astype(intp)
-
if obj.dtype == bool:
if obj.shape != (N,):
raise ValueError('boolean array argument obj to delete '
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index b67a31b18..874754a64 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -890,6 +890,19 @@ class TestDelete:
with pytest.raises(IndexError):
np.delete([0, 1, 2], np.array([], dtype=float))
+ def test_single_item_array(self):
+ a_del = delete(self.a, 1)
+ a_del_arr = delete(self.a, np.array([1]))
+ a_del_lst = delete(self.a, [1])
+ a_del_obj = delete(self.a, np.array([1], dtype=object))
+ assert_equal(a_del, a_del_arr, a_del_lst, a_del_obj)
+
+ nd_a_del = delete(self.nd_a, 1, axis=1)
+ nd_a_del_arr = delete(self.nd_a, np.array([1]), axis=1)
+ nd_a_del_lst = delete(self.nd_a, [1], axis=1)
+ nd_a_del_obj = delete(self.nd_a, np.array([1], dtype=object), axis=1)
+ assert_equal(nd_a_del, nd_a_del_arr, nd_a_del_lst, nd_a_del_obj)
+
class TestGradient:
diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py
index cca328b16..f50e3b8ad 100644
--- a/numpy/lib/tests/test_loadtxt.py
+++ b/numpy/lib/tests/test_loadtxt.py
@@ -165,6 +165,7 @@ def test_bad_ndmin(badval):
@pytest.mark.parametrize(
"ws",
(
+ " ", # space
"\t", # tab
"\u2003", # em
"\u00A0", # non-break
@@ -173,7 +174,10 @@ def test_bad_ndmin(badval):
)
def test_blank_lines_spaces_delimit(ws):
txt = StringIO(
- f"1 2{ws}30\n\n4 5 60\n {ws} \n7 8 {ws} 90\n # comment\n3 2 1"
+ f"1 2{ws}30\n\n{ws}\n"
+ f"4 5 60{ws}\n {ws} \n"
+ f"7 8 {ws} 90\n # comment\n"
+ f"3 2 1"
)
# NOTE: It is unclear that the ` # comment` should succeed. Except
# for delimiter=None, which should use any whitespace (and maybe
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index c74ee127d..e8f4952d3 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -25,8 +25,7 @@ def get_include():
Notes
-----
- When using ``distutils``, for example in ``setup.py``.
- ::
+ When using ``distutils``, for example in ``setup.py``::
import numpy as np
...
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index d831886c0..d3acc5938 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -697,8 +697,8 @@ def cholesky(a):
Returns
-------
L : (..., M, M) array_like
- Upper or lower-triangular Cholesky factor of `a`. Returns a
- matrix object if `a` is a matrix object.
+ Lower-triangular Cholesky factor of `a`. Returns a matrix object if
+ `a` is a matrix object.
Raises
------
@@ -1478,10 +1478,12 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
"""
Singular Value Decomposition.
- When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh
- = (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D
- array of `a`'s singular values. When `a` is higher-dimensional, SVD is
- applied in stacked mode as explained below.
+ When `a` is a 2D array, and ``full_matrices=False``, then it is
+ factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where
+ `u` and the Hermitian transpose of `vh` are 2D arrays with
+ orthonormal columns and `s` is a 1D array of `a`'s singular
+ values. When `a` is higher-dimensional, SVD is applied in
+ stacked mode as explained below.
Parameters
----------
diff --git a/numpy/typing/_nested_sequence.py b/numpy/typing/_nested_sequence.py
index a853303ca..3db226ddf 100644
--- a/numpy/typing/_nested_sequence.py
+++ b/numpy/typing/_nested_sequence.py
@@ -25,7 +25,7 @@ class _NestedSequence(Protocol[_T_co]):
See Also
--------
- `collections.abc.Sequence`
+ collections.abc.Sequence
ABCs for read-only and mutable :term:`sequences`.
Examples
diff --git a/numpy/typing/_ufunc.pyi b/numpy/typing/_ufunc.pyi
index 703b7f925..ee0317cf9 100644
--- a/numpy/typing/_ufunc.pyi
+++ b/numpy/typing/_ufunc.pyi
@@ -48,7 +48,7 @@ _NameType = TypeVar("_NameType", bound=str)
# NOTE: If 2 output types are returned then `out` must be a
# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable
-class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
+class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
@property
def __name__(self) -> _NameType: ...
@property
@@ -108,7 +108,7 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
/,
) -> None: ...
-class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
+class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
@property
def __name__(self) -> _NameType: ...
@property
@@ -223,7 +223,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
extobj: list[Any] = ...,
) -> NDArray[Any]: ...
-class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]):
+class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
@property
def __name__(self) -> _NameType: ...
@property
@@ -281,7 +281,7 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]):
extobj: list[Any] = ...,
) -> _2Tuple[NDArray[Any]]: ...
-class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]):
+class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
@property
def __name__(self) -> _NameType: ...
@property
@@ -341,7 +341,7 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]):
extobj: list[Any] = ...,
) -> _2Tuple[NDArray[Any]]: ...
-class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
+class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
@property
def __name__(self) -> _NameType: ...
@property
diff --git a/numpy/typing/tests/data/fail/false_positives.pyi b/numpy/typing/tests/data/fail/false_positives.pyi
new file mode 100644
index 000000000..7e7923066
--- /dev/null
+++ b/numpy/typing/tests/data/fail/false_positives.pyi
@@ -0,0 +1,11 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+
+# NOTE: Mypy bug presumably due to the special-casing of heterogeneous tuples;
+# xref numpy/numpy#20901
+#
+# The expected output should be no different than, e.g., when using a
+# list instead of a tuple
+np.concatenate(([1], AR_f8)) # E: Argument 1 to "concatenate" has incompatible type
diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi
index 5f292999a..b679703c7 100644
--- a/numpy/typing/tests/data/fail/fromnumeric.pyi
+++ b/numpy/typing/tests/data/fail/fromnumeric.pyi
@@ -1,9 +1,11 @@
"""Tests for :mod:`numpy.core.fromnumeric`."""
import numpy as np
+import numpy.typing as npt
A = np.array(True, ndmin=2, dtype=bool)
A.setflags(write=False)
+AR_U: npt.NDArray[np.str_]
a = np.bool_(True)
@@ -124,30 +126,36 @@ np.amin(a, out=1.0) # E: No overload variant
np.amin(a, initial=[1.0]) # E: No overload variant
np.amin(a, where=[1.0]) # E: incompatible type
-np.prod(a, axis=1.0) # E: incompatible type
-np.prod(a, out=False) # E: incompatible type
-np.prod(a, keepdims=1.0) # E: incompatible type
-np.prod(a, initial=int) # E: incompatible type
-np.prod(a, where=1.0) # E: incompatible type
+np.prod(a, axis=1.0) # E: No overload variant
+np.prod(a, out=False) # E: No overload variant
+np.prod(a, keepdims=1.0) # E: No overload variant
+np.prod(a, initial=int) # E: No overload variant
+np.prod(a, where=1.0) # E: No overload variant
+np.prod(AR_U) # E: incompatible type
-np.cumprod(a, axis=1.0) # E: Argument "axis" to "cumprod" has incompatible type
-np.cumprod(a, out=False) # E: Argument "out" to "cumprod" has incompatible type
+np.cumprod(a, axis=1.0) # E: No overload variant
+np.cumprod(a, out=False) # E: No overload variant
+np.cumprod(AR_U) # E: incompatible type
np.size(a, axis=1.0) # E: Argument "axis" to "size" has incompatible type
-np.around(a, decimals=1.0) # E: incompatible type
-np.around(a, out=type) # E: incompatible type
-
-np.mean(a, axis=1.0) # E: incompatible type
-np.mean(a, out=False) # E: incompatible type
-np.mean(a, keepdims=1.0) # E: incompatible type
-
-np.std(a, axis=1.0) # E: incompatible type
-np.std(a, out=False) # E: incompatible type
-np.std(a, ddof='test') # E: incompatible type
-np.std(a, keepdims=1.0) # E: incompatible type
-
-np.var(a, axis=1.0) # E: incompatible type
-np.var(a, out=False) # E: incompatible type
-np.var(a, ddof='test') # E: incompatible type
-np.var(a, keepdims=1.0) # E: incompatible type
+np.around(a, decimals=1.0) # E: No overload variant
+np.around(a, out=type) # E: No overload variant
+np.around(AR_U) # E: incompatible type
+
+np.mean(a, axis=1.0) # E: No overload variant
+np.mean(a, out=False) # E: No overload variant
+np.mean(a, keepdims=1.0) # E: No overload variant
+np.mean(AR_U) # E: incompatible type
+
+np.std(a, axis=1.0) # E: No overload variant
+np.std(a, out=False) # E: No overload variant
+np.std(a, ddof='test') # E: No overload variant
+np.std(a, keepdims=1.0) # E: No overload variant
+np.std(AR_U) # E: incompatible type
+
+np.var(a, axis=1.0) # E: No overload variant
+np.var(a, out=False) # E: No overload variant
+np.var(a, ddof='test') # E: No overload variant
+np.var(a, keepdims=1.0) # E: No overload variant
+np.var(AR_U) # E: incompatible type
diff --git a/numpy/typing/tests/data/fail/ndarray_misc.pyi b/numpy/typing/tests/data/fail/ndarray_misc.pyi
index 8320a44f3..77bd9a44e 100644
--- a/numpy/typing/tests/data/fail/ndarray_misc.pyi
+++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi
@@ -39,3 +39,5 @@ AR_b.__index__() # E: Invalid self argument
AR_f8[1.5] # E: No overload variant
AR_f8["field_a"] # E: No overload variant
AR_f8[["field_a", "field_b"]] # E: Invalid index type
+
+AR_f8.__array_finalize__(object()) # E: incompatible type
diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi
index c629454df..52aabd126 100644
--- a/numpy/typing/tests/data/reveal/array_constructors.pyi
+++ b/numpy/typing/tests/data/reveal/array_constructors.pyi
@@ -39,6 +39,11 @@ reveal_type(np.empty([1, 5, 6], dtype=np.int64)) # E: ndarray[Any, dtype[{int64
reveal_type(np.empty([1, 5, 6], dtype='c16')) # E: ndarray[Any, dtype[Any]]
reveal_type(np.concatenate(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.concatenate([A, A])) # E: Any
+reveal_type(np.concatenate([[1], A])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.concatenate([[1], [1]])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.concatenate((A, A))) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.concatenate(([1], [1]))) # E: ndarray[Any, dtype[Any]]
reveal_type(np.concatenate([1, 1.0])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.concatenate(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.concatenate(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi
index 3da2e1599..61906c860 100644
--- a/numpy/typing/tests/data/reveal/chararray.pyi
+++ b/numpy/typing/tests/data/reveal/chararray.pyi
@@ -127,3 +127,6 @@ reveal_type(AR_S.istitle()) # E: ndarray[Any, dtype[bool_]]
reveal_type(AR_U.isupper()) # E: ndarray[Any, dtype[bool_]]
reveal_type(AR_S.isupper()) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U.__array_finalize__(object())) # E: None
+reveal_type(AR_S.__array_finalize__(object())) # E: None
diff --git a/numpy/typing/tests/data/reveal/false_positives.pyi b/numpy/typing/tests/data/reveal/false_positives.pyi
new file mode 100644
index 000000000..2d7156642
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/false_positives.pyi
@@ -0,0 +1,10 @@
+from typing import Any
+import numpy.typing as npt
+
+AR_Any: npt.NDArray[Any]
+
+# Mypy bug where overload ambiguity is ignored for `Any`-parametrized types;
+# xref numpy/numpy#20099 and python/mypy#11347
+#
+# The expected output would be something akin to `ndarray[Any, dtype[Any]]`
+reveal_type(AR_Any + 2) # E: ndarray[Any, dtype[signedinteger[Any]]]
diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi
index f9f0ff625..6adbc35bf 100644
--- a/numpy/typing/tests/data/reveal/fromnumeric.pyi
+++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi
@@ -8,11 +8,15 @@ class NDArraySubclass(npt.NDArray[np.complex128]):
AR_b: npt.NDArray[np.bool_]
AR_f4: npt.NDArray[np.float32]
+AR_c16: npt.NDArray[np.complex128]
+AR_u8: npt.NDArray[np.uint64]
AR_i8: npt.NDArray[np.int64]
+AR_O: npt.NDArray[np.object_]
AR_subclass: NDArraySubclass
b: np.bool_
f4: np.float32
+i8: np.int64
f: float
reveal_type(np.take(b, 0)) # E: bool_
@@ -214,23 +218,28 @@ reveal_type(np.amin(AR_b, keepdims=True)) # E: Any
reveal_type(np.amin(AR_f4, keepdims=True)) # E: Any
reveal_type(np.amin(AR_f4, out=AR_subclass)) # E: NDArraySubclass
-reveal_type(np.prod(b)) # E: Any
-reveal_type(np.prod(f4)) # E: Any
-reveal_type(np.prod(f)) # E: Any
-reveal_type(np.prod(AR_b)) # E: Any
-reveal_type(np.prod(AR_f4)) # E: Any
-reveal_type(np.prod(AR_b, axis=0)) # E: Any
+reveal_type(np.prod(AR_b)) # E: {int_}
+reveal_type(np.prod(AR_u8)) # E: {uint64}
+reveal_type(np.prod(AR_i8)) # E: {int64}
+reveal_type(np.prod(AR_f4)) # E: floating[Any]
+reveal_type(np.prod(AR_c16)) # E: complexfloating[Any, Any]
+reveal_type(np.prod(AR_O)) # E: Any
reveal_type(np.prod(AR_f4, axis=0)) # E: Any
-reveal_type(np.prod(AR_b, keepdims=True)) # E: Any
reveal_type(np.prod(AR_f4, keepdims=True)) # E: Any
-reveal_type(np.prod(f4, out=AR_i8)) # E: Any
-reveal_type(np.prod(AR_f4, out=AR_i8)) # E: Any
-
-reveal_type(np.cumprod(b)) # E: ndarray[Any, Any]
-reveal_type(np.cumprod(f4)) # E: ndarray[Any, Any]
-reveal_type(np.cumprod(f)) # E: ndarray[Any, Any]
-reveal_type(np.cumprod(AR_b)) # E: ndarray[Any, Any]
-reveal_type(np.cumprod(AR_f4)) # E: ndarray[Any, Any]
+reveal_type(np.prod(AR_f4, dtype=np.float64)) # E: {float64}
+reveal_type(np.prod(AR_f4, dtype=float)) # E: Any
+reveal_type(np.prod(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.cumprod(AR_b)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.cumprod(AR_u8)) # E: ndarray[Any, dtype[{uint64}]]
+reveal_type(np.cumprod(AR_i8)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.cumprod(AR_f4)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.cumprod(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.cumprod(AR_O)) # E: ndarray[Any, dtype[object_]]
+reveal_type(np.cumprod(AR_f4, axis=0)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.cumprod(AR_f4, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.cumprod(AR_f4, dtype=float)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.cumprod(AR_f4, out=AR_subclass)) # E: NDArraySubclass
reveal_type(np.ndim(b)) # E: int
reveal_type(np.ndim(f4)) # E: int
@@ -244,44 +253,45 @@ reveal_type(np.size(f)) # E: int
reveal_type(np.size(AR_b)) # E: int
reveal_type(np.size(AR_f4)) # E: int
-reveal_type(np.around(b)) # E: Any
-reveal_type(np.around(f4)) # E: Any
+reveal_type(np.around(b)) # E: {float16}
reveal_type(np.around(f)) # E: Any
-reveal_type(np.around(AR_b)) # E: Any
-reveal_type(np.around(AR_f4)) # E: Any
-
-reveal_type(np.mean(b)) # E: Any
-reveal_type(np.mean(f4)) # E: Any
-reveal_type(np.mean(f)) # E: Any
-reveal_type(np.mean(AR_b)) # E: Any
-reveal_type(np.mean(AR_f4)) # E: Any
-reveal_type(np.mean(AR_b, axis=0)) # E: Any
+reveal_type(np.around(i8)) # E: {int64}
+reveal_type(np.around(f4)) # E: {float32}
+reveal_type(np.around(AR_b)) # E: ndarray[Any, dtype[{float16}]]
+reveal_type(np.around(AR_i8)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.around(AR_f4)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.around([1.5])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.around(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.mean(AR_b)) # E: floating[Any]
+reveal_type(np.mean(AR_i8)) # E: floating[Any]
+reveal_type(np.mean(AR_f4)) # E: floating[Any]
+reveal_type(np.mean(AR_c16)) # E: complexfloating[Any, Any]
+reveal_type(np.mean(AR_O)) # E: Any
reveal_type(np.mean(AR_f4, axis=0)) # E: Any
-reveal_type(np.mean(AR_b, keepdims=True)) # E: Any
reveal_type(np.mean(AR_f4, keepdims=True)) # E: Any
-reveal_type(np.mean(f4, out=AR_i8)) # E: Any
-reveal_type(np.mean(AR_f4, out=AR_i8)) # E: Any
-
-reveal_type(np.std(b)) # E: Any
-reveal_type(np.std(f4)) # E: Any
-reveal_type(np.std(f)) # E: Any
-reveal_type(np.std(AR_b)) # E: Any
-reveal_type(np.std(AR_f4)) # E: Any
-reveal_type(np.std(AR_b, axis=0)) # E: Any
+reveal_type(np.mean(AR_f4, dtype=float)) # E: Any
+reveal_type(np.mean(AR_f4, dtype=np.float64)) # E: {float64}
+reveal_type(np.mean(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.std(AR_b)) # E: floating[Any]
+reveal_type(np.std(AR_i8)) # E: floating[Any]
+reveal_type(np.std(AR_f4)) # E: floating[Any]
+reveal_type(np.std(AR_c16)) # E: floating[Any]
+reveal_type(np.std(AR_O)) # E: Any
reveal_type(np.std(AR_f4, axis=0)) # E: Any
-reveal_type(np.std(AR_b, keepdims=True)) # E: Any
reveal_type(np.std(AR_f4, keepdims=True)) # E: Any
-reveal_type(np.std(f4, out=AR_i8)) # E: Any
-reveal_type(np.std(AR_f4, out=AR_i8)) # E: Any
-
-reveal_type(np.var(b)) # E: Any
-reveal_type(np.var(f4)) # E: Any
-reveal_type(np.var(f)) # E: Any
-reveal_type(np.var(AR_b)) # E: Any
-reveal_type(np.var(AR_f4)) # E: Any
-reveal_type(np.var(AR_b, axis=0)) # E: Any
+reveal_type(np.std(AR_f4, dtype=float)) # E: Any
+reveal_type(np.std(AR_f4, dtype=np.float64)) # E: {float64}
+reveal_type(np.std(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.var(AR_b)) # E: floating[Any]
+reveal_type(np.var(AR_i8)) # E: floating[Any]
+reveal_type(np.var(AR_f4)) # E: floating[Any]
+reveal_type(np.var(AR_c16)) # E: floating[Any]
+reveal_type(np.var(AR_O)) # E: Any
reveal_type(np.var(AR_f4, axis=0)) # E: Any
-reveal_type(np.var(AR_b, keepdims=True)) # E: Any
reveal_type(np.var(AR_f4, keepdims=True)) # E: Any
-reveal_type(np.var(f4, out=AR_i8)) # E: Any
-reveal_type(np.var(AR_f4, out=AR_i8)) # E: Any
+reveal_type(np.var(AR_f4, dtype=float)) # E: Any
+reveal_type(np.var(AR_f4, dtype=np.float64)) # E: {float64}
+reveal_type(np.var(AR_f4, out=AR_subclass)) # E: NDArraySubclass
diff --git a/numpy/typing/tests/data/reveal/memmap.pyi b/numpy/typing/tests/data/reveal/memmap.pyi
index 86de8eb08..af7307499 100644
--- a/numpy/typing/tests/data/reveal/memmap.pyi
+++ b/numpy/typing/tests/data/reveal/memmap.pyi
@@ -14,3 +14,5 @@ reveal_type(np.memmap("file.txt", offset=5)) # E: memmap[Any, dtype[{uint8}]]
reveal_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3))) # E: memmap[Any, dtype[{float64}]]
with open("file.txt", "rb") as f:
reveal_type(np.memmap(f, dtype=float, order="K")) # E: memmap[Any, dtype[Any]]
+
+reveal_type(memmap_obj.__array_finalize__(object())) # E: None
diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi
index f91d6351b..c9a42b3e8 100644
--- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi
+++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi
@@ -212,3 +212,7 @@ reveal_type(AR_f8.dump("test_file")) # E: None
reveal_type(AR_f8.dump(b"test_file")) # E: None
with open("test_file", "wb") as f:
reveal_type(AR_f8.dump(f)) # E: None
+
+reveal_type(AR_f8.__array_finalize__(None)) # E: None
+reveal_type(AR_f8.__array_finalize__(B)) # E: None
+reveal_type(AR_f8.__array_finalize__(AR_f8)) # E: None
diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi
index b2eaca899..8ea4a6ee8 100644
--- a/numpy/typing/tests/data/reveal/rec.pyi
+++ b/numpy/typing/tests/data/reveal/rec.pyi
@@ -33,6 +33,7 @@ reveal_type(REC_AR_V.field(0, AR_i8)) # E: None
reveal_type(REC_AR_V.field("field_a", AR_i8)) # E: None
reveal_type(REC_AR_V["field_a"]) # E: Any
reveal_type(REC_AR_V.field_a) # E: Any
+reveal_type(REC_AR_V.__array_finalize__(object())) # E: None
reveal_type(np.recarray( # recarray[Any, dtype[record]]
shape=(10, 5),