summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.pyi384
-rw-r--r--numpy/core/fromnumeric.py12
-rw-r--r--numpy/core/fromnumeric.pyi28
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src2
-rw-r--r--numpy/core/src/multiarray/array_coercion.c8
-rw-r--r--numpy/core/src/multiarray/ctors.c127
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c6
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src2
-rw-r--r--numpy/core/src/umath/ufunc_object.c9
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c251
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.h1
-rw-r--r--numpy/core/tests/test_deprecations.py5
-rw-r--r--numpy/core/tests/test_memmap.py19
-rw-r--r--numpy/core/tests/test_multiarray.py398
-rw-r--r--numpy/core/tests/test_regression.py16
-rw-r--r--numpy/core/tests/test_scalar_methods.py3
-rw-r--r--numpy/core/tests/test_ufunc.py28
-rw-r--r--numpy/distutils/checks/extra_avx512f_reduce.c2
-rw-r--r--numpy/ma/extras.py2
-rw-r--r--numpy/ma/tests/test_extras.py17
-rw-r--r--numpy/ma/tests/test_mrecords.py2
-rw-r--r--numpy/typing/tests/data/fail/datasource.py15
-rw-r--r--numpy/typing/tests/data/fail/fromnumeric.py4
-rw-r--r--numpy/typing/tests/data/fail/ndarray_misc.py9
-rw-r--r--numpy/typing/tests/data/pass/multiarray.py37
-rw-r--r--numpy/typing/tests/data/pass/ndarray_misc.py32
-rw-r--r--numpy/typing/tests/data/pass/scalars.py48
-rw-r--r--numpy/typing/tests/data/reveal/datasource.py21
-rw-r--r--numpy/typing/tests/data/reveal/multiarray.py35
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_misc.py189
-rw-r--r--numpy/typing/tests/data/reveal/scalars.py79
31 files changed, 1233 insertions, 558 deletions
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 318c39fc3..2d23f926d 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -42,6 +42,7 @@ from numpy.typing import (
_ComplexLike_co,
_TD64Like_co,
_NumberLike_co,
+ _ScalarLike_co,
# `number` precision
NBitBase,
@@ -558,13 +559,6 @@ __git_version__: str
#
# Placeholders for classes
# TODO: Remove `__getattr__` once the classes are stubbed out
-class DataSource:
- def __init__(self, destpath: Any = ...) -> None: ...
- def __del__(self): ...
- def abspath(self, path): ...
- def exists(self, path): ...
- def open(self, path, mode=..., encoding=..., newline=...): ...
-
class MachAr:
def __init__(
self,
@@ -576,10 +570,6 @@ class MachAr:
) -> None: ...
def __getattr__(self, key: str) -> Any: ...
-class broadcast:
- def __new__(cls: Any, *args: Any) -> Any: ...
- def __getattr__(self, key: str) -> Any: ...
-
class busdaycalendar:
def __new__(cls, weekmask: Any = ..., holidays: Any = ...) -> Any: ...
def __getattr__(self, key: str) -> Any: ...
@@ -1207,14 +1197,6 @@ _PartitionKind = Literal["introselect"]
_SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"]
_SortSide = Literal["left", "right"]
-_ArrayLikeBool = Union[_BoolLike_co, Sequence[_BoolLike_co], ndarray]
-_ArrayLikeIntOrBool = Union[
- _IntLike_co,
- ndarray,
- Sequence[_IntLike_co],
- Sequence[Sequence[Any]], # TODO: wait for support for recursive types
-]
-
_ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon)
class _ArrayOrScalarCommon:
@@ -1247,19 +1229,9 @@ class _ArrayOrScalarCommon:
def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
def dump(self, file: str) -> None: ...
def dumps(self) -> bytes: ...
- def flatten(self, order: _OrderKACF = ...) -> ndarray: ...
def getfield(
self: _ArraySelf, dtype: DTypeLike, offset: int = ...
) -> _ArraySelf: ...
- def ravel(self, order: _OrderKACF = ...) -> ndarray: ...
- @overload
- def reshape(
- self, __shape: _ShapeLike, *, order: _OrderACF = ...
- ) -> ndarray: ...
- @overload
- def reshape(
- self, *shape: SupportsIndex, order: _OrderACF = ...
- ) -> ndarray: ...
def tobytes(self, order: _OrderKACF = ...) -> bytes: ...
# NOTE: `tostring()` is deprecated and therefore excluded
# def tostring(self, order=...): ...
@@ -1482,7 +1454,7 @@ class _ArrayOrScalarCommon:
out: None = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
+ where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def max(
@@ -1491,7 +1463,7 @@ class _ArrayOrScalarCommon:
out: _NdArraySubClass = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
+ where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
@overload
@@ -1518,7 +1490,7 @@ class _ArrayOrScalarCommon:
out: None = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
+ where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def min(
@@ -1527,7 +1499,7 @@ class _ArrayOrScalarCommon:
out: _NdArraySubClass = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
+ where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
def newbyteorder(
@@ -1543,7 +1515,7 @@ class _ArrayOrScalarCommon:
out: None = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
+ where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def prod(
@@ -1553,7 +1525,7 @@ class _ArrayOrScalarCommon:
out: _NdArraySubClass = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
+ where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
@overload
@@ -1571,12 +1543,6 @@ class _ArrayOrScalarCommon:
keepdims: bool = ...,
) -> _NdArraySubClass: ...
- def repeat(
- self,
- repeats: _ArrayLikeIntOrBool,
- axis: Optional[SupportsIndex] = ...,
- ) -> ndarray: ...
-
@overload
def round(
self: _ArraySelf,
@@ -1617,7 +1583,7 @@ class _ArrayOrScalarCommon:
out: None = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
+ where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def sum(
@@ -1627,32 +1593,7 @@ class _ArrayOrScalarCommon:
out: _NdArraySubClass = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
- ) -> _NdArraySubClass: ...
-
- @overload
- def take(
- self,
- indices: _IntLike_co,
- axis: Optional[SupportsIndex] = ...,
- out: None = ...,
- mode: _ModeKind = ...,
- ) -> Any: ...
- @overload
- def take(
- self,
- indices: _ArrayLikeIntOrBool,
- axis: Optional[SupportsIndex] = ...,
- out: None = ...,
- mode: _ModeKind = ...,
- ) -> ndarray: ...
- @overload
- def take(
- self,
- indices: _ArrayLikeIntOrBool,
- axis: Optional[SupportsIndex] = ...,
- out: _NdArraySubClass = ...,
- mode: _ModeKind = ...,
+ where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
@overload
@@ -1684,6 +1625,7 @@ _NumberType = TypeVar("_NumberType", bound=number[Any])
_BufferType = Union[ndarray, bytes, bytearray, memoryview]
_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
_2Tuple = Tuple[_T, _T]
_Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"]
@@ -1694,6 +1636,9 @@ _ArrayComplex_co = NDArray[Union[bool_, integer[Any], floating[Any], complexfloa
_ArrayNumber_co = NDArray[Union[bool_, number[Any]]]
_ArrayTD64_co = NDArray[Union[bool_, integer[Any], timedelta64]]
+class _SupportsItem(Protocol[_T_co]):
+ def item(self, __args: Any) -> _T_co: ...
+
class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@property
def base(self) -> Optional[ndarray]: ...
@@ -1727,84 +1672,131 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@property
def shape(self) -> _Shape: ...
@shape.setter
- def shape(self, value: _ShapeLike): ...
+ def shape(self, value: _ShapeLike) -> None: ...
@property
def strides(self) -> _Shape: ...
@strides.setter
- def strides(self, value: _ShapeLike): ...
+ def strides(self, value: _ShapeLike) -> None: ...
def byteswap(self: _ArraySelf, inplace: bool = ...) -> _ArraySelf: ...
def fill(self, value: Any) -> None: ...
@property
def flat(self: _NdArraySubClass) -> flatiter[_NdArraySubClass]: ...
+
+ # Use the same output type as that of the underlying `generic`
@overload
- def item(self, *args: SupportsIndex) -> Any: ...
+ def item(
+ self: ndarray[Any, dtype[_SupportsItem[_T]]], # type: ignore[type-var]
+ *args: SupportsIndex,
+ ) -> _T: ...
@overload
- def item(self, __args: Tuple[SupportsIndex, ...]) -> Any: ...
+ def item(
+ self: ndarray[Any, dtype[_SupportsItem[_T]]], # type: ignore[type-var]
+ __args: Tuple[SupportsIndex, ...],
+ ) -> _T: ...
+
@overload
def itemset(self, __value: Any) -> None: ...
@overload
def itemset(self, __item: _ShapeLike, __value: Any) -> None: ...
+
@overload
def resize(self, __new_shape: _ShapeLike, *, refcheck: bool = ...) -> None: ...
@overload
def resize(self, *new_shape: SupportsIndex, refcheck: bool = ...) -> None: ...
+
def setflags(
self, write: bool = ..., align: bool = ..., uic: bool = ...
) -> None: ...
+
def squeeze(
- self: _ArraySelf, axis: Union[SupportsIndex, Tuple[SupportsIndex, ...]] = ...
- ) -> _ArraySelf: ...
- def swapaxes(self: _ArraySelf, axis1: SupportsIndex, axis2: SupportsIndex) -> _ArraySelf: ...
+ self,
+ axis: Union[SupportsIndex, Tuple[SupportsIndex, ...]] = ...,
+ ) -> ndarray[Any, _DType_co]: ...
+
+ def swapaxes(
+ self,
+ axis1: SupportsIndex,
+ axis2: SupportsIndex,
+ ) -> ndarray[Any, _DType_co]: ...
+
@overload
def transpose(self: _ArraySelf, __axes: _ShapeLike) -> _ArraySelf: ...
@overload
def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ...
+
def argpartition(
self,
- kth: _ArrayLikeIntOrBool,
+ kth: _ArrayLikeInt_co,
axis: Optional[SupportsIndex] = ...,
kind: _PartitionKind = ...,
order: Union[None, str, Sequence[str]] = ...,
- ) -> ndarray: ...
+ ) -> ndarray[Any, dtype[intp]]: ...
+
def diagonal(
- self: _ArraySelf,
+ self,
offset: SupportsIndex = ...,
axis1: SupportsIndex = ...,
axis2: SupportsIndex = ...,
- ) -> _ArraySelf: ...
+ ) -> ndarray[Any, _DType_co]: ...
+
+ # 1D + 1D returns a scalar;
+ # all other with at least 1 non-0D array return an ndarray.
+ @overload
+ def dot(self, b: _ScalarLike_co, out: None = ...) -> ndarray: ...
@overload
- def dot(self, b: ArrayLike, out: None = ...) -> ndarray: ...
+ def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc]
@overload
- def dot(self, b: ArrayLike, out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+ def dot(self, b: ArrayLike, out: _NdArraySubClass) -> _NdArraySubClass: ...
+
# `nonzero()` is deprecated for 0d arrays/generics
- def nonzero(self) -> Tuple[ndarray, ...]: ...
+ def nonzero(self) -> Tuple[ndarray[Any, dtype[intp]], ...]: ...
+
def partition(
self,
- kth: _ArrayLikeIntOrBool,
+ kth: _ArrayLikeInt_co,
axis: SupportsIndex = ...,
kind: _PartitionKind = ...,
order: Union[None, str, Sequence[str]] = ...,
) -> None: ...
+
# `put` is technically available to `generic`,
# but is pointless as `generic`s are immutable
def put(
- self, ind: _ArrayLikeIntOrBool, v: ArrayLike, mode: _ModeKind = ...
+ self,
+ ind: _ArrayLikeInt_co,
+ v: ArrayLike,
+ mode: _ModeKind = ...,
) -> None: ...
+
+ @overload
+ def searchsorted( # type: ignore[misc]
+ self, # >= 1D array
+ v: _ScalarLike_co, # 0D array-like
+ side: _SortSide = ...,
+ sorter: Optional[_ArrayLikeInt_co] = ...,
+ ) -> intp: ...
+ @overload
def searchsorted(
self, # >= 1D array
v: ArrayLike,
side: _SortSide = ...,
- sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array
- ) -> ndarray: ...
+ sorter: Optional[_ArrayLikeInt_co] = ...,
+ ) -> ndarray[Any, dtype[intp]]: ...
+
def setfield(
- self, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = ...
+ self,
+ val: ArrayLike,
+ dtype: DTypeLike,
+ offset: SupportsIndex = ...,
) -> None: ...
+
def sort(
self,
axis: SupportsIndex = ...,
kind: Optional[_SortKind] = ...,
order: Union[None, str, Sequence[str]] = ...,
) -> None: ...
+
@overload
def trace(
self, # >= 2D array
@@ -1823,17 +1815,78 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
dtype: DTypeLike = ...,
out: _NdArraySubClass = ...,
) -> _NdArraySubClass: ...
- # Many of these special methods are irrelevant currently, since protocols
- # aren't supported yet. That said, I'm adding them for completeness.
- # https://docs.python.org/3/reference/datamodel.html
- def __int__(self) -> int: ...
- def __float__(self) -> float: ...
- def __complex__(self) -> complex: ...
+
+ @overload
+ def take( # type: ignore[misc]
+ self: ndarray[Any, dtype[_ScalarType]],
+ indices: _IntLike_co,
+ axis: Optional[SupportsIndex] = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def take( # type: ignore[misc]
+ self,
+ indices: _ArrayLikeInt_co,
+ axis: Optional[SupportsIndex] = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def take(
+ self,
+ indices: _ArrayLikeInt_co,
+ axis: Optional[SupportsIndex] = ...,
+ out: _NdArraySubClass = ...,
+ mode: _ModeKind = ...,
+ ) -> _NdArraySubClass: ...
+
+ def repeat(
+ self,
+ repeats: _ArrayLikeInt_co,
+ axis: Optional[SupportsIndex] = ...,
+ ) -> ndarray[Any, _DType_co]: ...
+
+ def flatten(
+ self,
+ order: _OrderKACF = ...,
+ ) -> ndarray[Any, _DType_co]: ...
+
+ def ravel(
+ self,
+ order: _OrderKACF = ...,
+ ) -> ndarray[Any, _DType_co]: ...
+
+ @overload
+ def reshape(
+ self, __shape: _ShapeLike, *, order: _OrderACF = ...
+ ) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def reshape(
+ self, *shape: SupportsIndex, order: _OrderACF = ...
+ ) -> ndarray[Any, _DType_co]: ...
+
+ # Dispatch to the underlying `generic` via protocols
+ def __int__(
+ self: ndarray[Any, dtype[SupportsInt]], # type: ignore[type-var]
+ ) -> int: ...
+
+ def __float__(
+ self: ndarray[Any, dtype[SupportsFloat]], # type: ignore[type-var]
+ ) -> float: ...
+
+ def __complex__(
+ self: ndarray[Any, dtype[SupportsComplex]], # type: ignore[type-var]
+ ) -> complex: ...
+
+ def __index__(
+ self: ndarray[Any, dtype[SupportsIndex]], # type: ignore[type-var]
+ ) -> int: ...
+
def __len__(self) -> int: ...
def __setitem__(self, key, value): ...
def __iter__(self) -> Any: ...
def __contains__(self, key) -> bool: ...
- def __index__(self) -> int: ...
# The last overload is for catching recursive objects whose
# nesting is too deep.
@@ -2786,9 +2839,60 @@ class generic(_ArrayOrScalarCommon):
@property
def flat(self: _ScalarType) -> flatiter[ndarray[Any, dtype[_ScalarType]]]: ...
def item(
- self: _ScalarType,
+ self,
__args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ...,
) -> Any: ...
+
+ @overload
+ def take( # type: ignore[misc]
+ self: _ScalarType,
+ indices: _IntLike_co,
+ axis: Optional[SupportsIndex] = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def take( # type: ignore[misc]
+ self: _ScalarType,
+ indices: _ArrayLikeInt_co,
+ axis: Optional[SupportsIndex] = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> ndarray[Any, dtype[_ScalarType]]: ...
+ @overload
+ def take(
+ self,
+ indices: _ArrayLikeInt_co,
+ axis: Optional[SupportsIndex] = ...,
+ out: _NdArraySubClass = ...,
+ mode: _ModeKind = ...,
+ ) -> _NdArraySubClass: ...
+
+ def repeat(
+ self: _ScalarType,
+ repeats: _ArrayLikeInt_co,
+ axis: Optional[SupportsIndex] = ...,
+ ) -> ndarray[Any, dtype[_ScalarType]]: ...
+
+ def flatten(
+ self: _ScalarType,
+ order: _OrderKACF = ...,
+ ) -> ndarray[Any, dtype[_ScalarType]]: ...
+
+ def ravel(
+ self: _ScalarType,
+ order: _OrderKACF = ...,
+ ) -> ndarray[Any, dtype[_ScalarType]]: ...
+
+ @overload
+ def reshape(
+ self: _ScalarType, __shape: _ShapeLike, *, order: _OrderACF = ...
+ ) -> ndarray[Any, dtype[_ScalarType]]: ...
+ @overload
+ def reshape(
+ self: _ScalarType, *shape: SupportsIndex, order: _OrderACF = ...
+ ) -> ndarray[Any, dtype[_ScalarType]]: ...
+
def squeeze(
self: _ScalarType, axis: Union[Literal[0], Tuple[()]] = ...
) -> _ScalarType: ...
@@ -2828,6 +2932,11 @@ class number(generic, Generic[_NBit1]): # type: ignore
class bool_(generic):
def __init__(self, __value: object = ...) -> None: ...
+ def item(
+ self,
+ __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ...,
+ ) -> bool: ...
+ def tolist(self) -> bool: ...
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
@@ -2876,6 +2985,11 @@ class object_(generic):
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
+ # The 3 protocols below may or may not raise,
+ # depending on the underlying object
+ def __int__(self) -> int: ...
+ def __float__(self) -> float: ...
+ def __complex__(self) -> complex: ...
object0 = object_
@@ -2889,7 +3003,8 @@ class _DatetimeScalar(Protocol):
@property
def year(self) -> int: ...
-
+# TODO: `item`/`tolist` returns either `dt.date`, `dt.datetime` or `int`
+# depending on the unit
class datetime64(generic):
@overload
def __init__(
@@ -2928,6 +3043,11 @@ else:
class integer(number[_NBit1]): # type: ignore
# NOTE: `__index__` is technically defined in the bottom-most
# sub-classes (`int64`, `uint32`, etc)
+ def item(
+ self,
+ __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ...,
+ ) -> int: ...
+ def tolist(self) -> int: ...
def __index__(self) -> int: ...
__truediv__: _IntTrueDiv[_NBit1]
__rtruediv__: _IntTrueDiv[_NBit1]
@@ -2986,12 +3106,17 @@ int0 = signedinteger[_NBitIntP]
int_ = signedinteger[_NBitInt]
longlong = signedinteger[_NBitLongLong]
+# TODO: `item`/`tolist` returns either `dt.timedelta` or `int`
+# depending on the unit
class timedelta64(generic):
def __init__(
self,
__value: Union[None, int, _CharLike_co, dt.timedelta, timedelta64] = ...,
__format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ...,
) -> None: ...
+
+ # NOTE: Only a limited number of units support conversion
+ # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as`
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __complex__(self) -> complex: ...
@@ -3065,6 +3190,11 @@ _FloatType = TypeVar('_FloatType', bound=floating)
class floating(inexact[_NBit1]):
def __init__(self, __value: _FloatValue = ...) -> None: ...
+ def item(
+ self,
+ __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ...,
+ ) -> float: ...
+ def tolist(self) -> float: ...
__add__: _FloatOp[_NBit1]
__radd__: _FloatOp[_NBit1]
__sub__: _FloatOp[_NBit1]
@@ -3099,6 +3229,11 @@ longfloat = floating[_NBitLongDouble]
class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]):
def __init__(self, __value: _ComplexValue = ...) -> None: ...
+ def item(
+ self,
+ __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ...,
+ ) -> complex: ...
+ def tolist(self) -> complex: ...
@property
def real(self) -> floating[_NBit1]: ... # type: ignore[override]
@property
@@ -3131,8 +3266,11 @@ longcomplex = complexfloating[_NBitLongDouble, _NBitLongDouble]
class flexible(generic): ... # type: ignore
+# TODO: `item`/`tolist` returns either `bytes` or `tuple`
+# depending on whether or not it's used as an opaque bytes sequence
+# or a structure
class void(flexible):
- def __init__(self, __value: Union[_IntLike_co, bytes]): ...
+ def __init__(self, __value: Union[_IntLike_co, bytes]) -> None: ...
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
@@ -3159,6 +3297,11 @@ class bytes_(character, bytes):
def __init__(
self, __value: str, encoding: str = ..., errors: str = ...
) -> None: ...
+ def item(
+ self,
+ __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ...,
+ ) -> bytes: ...
+ def tolist(self) -> bytes: ...
string_ = bytes_
bytes0 = bytes_
@@ -3170,6 +3313,11 @@ class str_(character, str):
def __init__(
self, __value: bytes, encoding: str = ..., errors: str = ...
) -> None: ...
+ def item(
+ self,
+ __args: Union[Literal[0], Tuple[()], Tuple[Literal[0]]] = ...,
+ ) -> str: ...
+ def tolist(self) -> str: ...
unicode_ = str_
str0 = str_
@@ -3479,3 +3627,45 @@ class ndindex:
def __init__(self, *shape: SupportsIndex) -> None: ...
def __iter__(self: _T) -> _T: ...
def __next__(self) -> _Shape: ...
+
+class DataSource:
+ def __init__(
+ self,
+ destpath: Union[None, str, os.PathLike[str]] = ...,
+ ) -> None: ...
+ def __del__(self) -> None: ...
+ def abspath(self, path: str) -> str: ...
+ def exists(self, path: str) -> bool: ...
+
+ # Whether the file-object is opened in string or bytes mode (by default)
+ # depends on the file-extension of `path`
+ def open(
+ self,
+ path: str,
+ mode: str = ...,
+ encoding: Optional[str] = ...,
+ newline: Optional[str] = ...,
+ ) -> IO[Any]: ...
+
+# TODO: The type of each `__next__` and `iters` return-type depends
+# on the length and dtype of `args`; we can't describe this behavior yet
+# as we lack variadics (PEP 646).
+class broadcast:
+ def __new__(cls, *args: ArrayLike) -> broadcast: ...
+ @property
+ def index(self) -> int: ...
+ @property
+ def iters(self) -> Tuple[flatiter[Any], ...]: ...
+ @property
+ def nd(self) -> int: ...
+ @property
+ def ndim(self) -> int: ...
+ @property
+ def numiter(self) -> int: ...
+ @property
+ def shape(self) -> _Shape: ...
+ @property
+ def size(self) -> int: ...
+ def __next__(self) -> Tuple[Any, ...]: ...
+ def __iter__(self: _T) -> _T: ...
+ def reset(self) -> None: ...
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 5c7b3372b..65a42eb1e 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -2498,6 +2498,10 @@ def cumsum(a, axis=None, dtype=None, out=None):
Arithmetic is modular when using integer types, and no error is
raised on overflow.
+ ``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point
+ values since ``sum`` may use a pairwise summation routine, reducing
+ the roundoff-error. See `sum` for more information.
+
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
@@ -2516,6 +2520,14 @@ def cumsum(a, axis=None, dtype=None, out=None):
array([[ 1, 3, 6],
[ 4, 9, 15]])
+ ``cumsum(b)[-1]`` may not be equal to ``sum(b)``
+
+ >>> b = np.array([1, 2e-9, 3e-9] * 1000000)
+ >>> b.cumsum()[-1]
+ 1000000.0050045159
+ >>> b.sum()
+ 1000000.0050000029
+
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi
index 26a43c1a0..3342ec3ac 100644
--- a/numpy/core/fromnumeric.pyi
+++ b/numpy/core/fromnumeric.pyi
@@ -11,8 +11,6 @@ from numpy import (
generic,
_OrderKACF,
_OrderACF,
- _ArrayLikeBool,
- _ArrayLikeIntOrBool,
_ModeKind,
_PartitionKind,
_SortKind,
@@ -23,6 +21,8 @@ from numpy.typing import (
ArrayLike,
_ShapeLike,
_Shape,
+ _ArrayLikeBool_co,
+ _ArrayLikeInt_co,
_NumberLike_co,
)
@@ -52,7 +52,7 @@ _Number = TypeVar("_Number", bound=number)
# 4. An array-like object comes in; an ndarray or generic comes out
def take(
a: ArrayLike,
- indices: _ArrayLikeIntOrBool,
+ indices: _ArrayLikeInt_co,
axis: Optional[int] = ...,
out: Optional[ndarray] = ...,
mode: _ModeKind = ...,
@@ -65,7 +65,7 @@ def reshape(
) -> ndarray: ...
def choose(
- a: _ArrayLikeIntOrBool,
+ a: _ArrayLikeInt_co,
choices: ArrayLike,
out: Optional[ndarray] = ...,
mode: _ModeKind = ...,
@@ -73,13 +73,13 @@ def choose(
def repeat(
a: ArrayLike,
- repeats: _ArrayLikeIntOrBool,
+ repeats: _ArrayLikeInt_co,
axis: Optional[int] = ...,
) -> ndarray: ...
def put(
a: ndarray,
- ind: _ArrayLikeIntOrBool,
+ ind: _ArrayLikeInt_co,
v: ArrayLike,
mode: _ModeKind = ...,
) -> None: ...
@@ -97,7 +97,7 @@ def transpose(
def partition(
a: ArrayLike,
- kth: _ArrayLikeIntOrBool,
+ kth: _ArrayLikeInt_co,
axis: Optional[int] = ...,
kind: _PartitionKind = ...,
order: Union[None, str, Sequence[str]] = ...,
@@ -105,7 +105,7 @@ def partition(
def argpartition(
a: ArrayLike,
- kth: _ArrayLikeIntOrBool,
+ kth: _ArrayLikeInt_co,
axis: Optional[int] = ...,
kind: _PartitionKind = ...,
order: Union[None, str, Sequence[str]] = ...,
@@ -156,14 +156,14 @@ def searchsorted(
a: ArrayLike,
v: _Scalar,
side: _SortSide = ...,
- sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array
+ sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array
) -> intp: ...
@overload
def searchsorted(
a: ArrayLike,
v: ArrayLike,
side: _SortSide = ...,
- sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array
+ sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array
) -> ndarray: ...
def resize(
@@ -235,7 +235,7 @@ def sum(
out: Optional[ndarray] = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
+ where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
@@ -288,7 +288,7 @@ def amax(
out: Optional[ndarray] = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
+ where: _ArrayLikeBool_co = ...,
) -> Any: ...
def amin(
@@ -297,7 +297,7 @@ def amin(
out: Optional[ndarray] = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
+ where: _ArrayLikeBool_co = ...,
) -> Any: ...
# TODO: `np.prod()``: For object arrays `initial` does not necessarily
@@ -314,7 +314,7 @@ def prod(
out: Optional[ndarray] = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
- where: _ArrayLikeBool = ...,
+ where: _ArrayLikeBool_co = ...,
) -> Any: ...
def cumprod(
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index febcc8512..ba10573d9 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -407,7 +407,7 @@ test_neighborhood_iterator_oob(PyObject* NPY_UNUSED(self), PyObject* args)
niterx2 = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew(
(PyArrayIterObject*)niterx1, bounds,
mode2, NULL);
- if (niterx1 == NULL) {
+ if (niterx2 == NULL) {
goto clean_niterx1;
}
diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c
index ef99ae479..6b7c3888d 100644
--- a/numpy/core/src/multiarray/array_coercion.c
+++ b/numpy/core/src/multiarray/array_coercion.c
@@ -622,8 +622,12 @@ handle_promotion(PyArray_Descr **out_descr, PyArray_Descr *descr,
}
PyArray_Descr *new_descr = PyArray_PromoteTypes(descr, *out_descr);
if (NPY_UNLIKELY(new_descr == NULL)) {
- if (fixed_DType != NULL) {
- /* If a DType is fixed, promotion must not fail. */
+ if (fixed_DType != NULL || PyErr_ExceptionMatches(PyExc_FutureWarning)) {
+ /*
+ * If a DType is fixed, promotion must not fail. Do not catch
+ * FutureWarning (raised for string+numeric promotions). We could
+ * only catch TypeError here or even always raise the error.
+ */
return -1;
}
PyErr_Clear();
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 7907fb930..671ce49e4 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -665,13 +665,11 @@ PyArray_NewFromDescr_int(
int allow_emptystring)
{
PyArrayObject_fields *fa;
- int i;
npy_intp nbytes;
- if ((unsigned int)nd > (unsigned int)NPY_MAXDIMS) {
+ if (nd > NPY_MAXDIMS || nd < 0) {
PyErr_Format(PyExc_ValueError,
- "number of dimensions must be within [0, %d]",
- NPY_MAXDIMS);
+ "number of dimensions must be within [0, %d]", NPY_MAXDIMS);
Py_DECREF(descr);
return NULL;
}
@@ -718,39 +716,6 @@ PyArray_NewFromDescr_int(
}
}
- /* Check dimensions and multiply them to nbytes */
- for (i = 0; i < nd; i++) {
- npy_intp dim = dims[i];
-
- if (dim == 0) {
- /*
- * Compare to PyArray_OverflowMultiplyList that
- * returns 0 in this case.
- */
- continue;
- }
-
- if (dim < 0) {
- PyErr_SetString(PyExc_ValueError,
- "negative dimensions are not allowed");
- Py_DECREF(descr);
- return NULL;
- }
-
- /*
- * Care needs to be taken to avoid integer overflow when
- * multiplying the dimensions together to get the total size of the
- * array.
- */
- if (npy_mul_with_overflow_intp(&nbytes, nbytes, dim)) {
- PyErr_SetString(PyExc_ValueError,
- "array is too big; `arr.size * arr.dtype.itemsize` "
- "is larger than the maximum possible size.");
- Py_DECREF(descr);
- return NULL;
- }
- }
-
fa = (PyArrayObject_fields *) subtype->tp_alloc(subtype, 0);
if (fa == NULL) {
Py_DECREF(descr);
@@ -786,26 +751,57 @@ PyArray_NewFromDescr_int(
goto fail;
}
fa->strides = fa->dimensions + nd;
- if (nd) {
- memcpy(fa->dimensions, dims, sizeof(npy_intp)*nd);
+
+ /* Copy dimensions, check them, and find total array size `nbytes` */
+ for (int i = 0; i < nd; i++) {
+ fa->dimensions[i] = dims[i];
+
+ if (fa->dimensions[i] == 0) {
+ /*
+ * Compare to PyArray_OverflowMultiplyList that
+ * returns 0 in this case.
+ */
+ continue;
+ }
+
+ if (fa->dimensions[i] < 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "negative dimensions are not allowed");
+ goto fail;
+ }
+
+ /*
+ * Care needs to be taken to avoid integer overflow when multiplying
+ * the dimensions together to get the total size of the array.
+ */
+ if (npy_mul_with_overflow_intp(&nbytes, nbytes, fa->dimensions[i])) {
+ PyErr_SetString(PyExc_ValueError,
+ "array is too big; `arr.size * arr.dtype.itemsize` "
+ "is larger than the maximum possible size.");
+ goto fail;
+ }
}
- if (strides == NULL) { /* fill it in */
+
+ /* Fill the strides (or copy them if they were passed in) */
+ if (strides == NULL) {
+ /* fill the strides and set the contiguity flags */
_array_fill_strides(fa->strides, dims, nd, descr->elsize,
flags, &(fa->flags));
}
else {
- /*
- * we allow strides even when we create
- * the memory, but be careful with this...
- */
- if (nd) {
- memcpy(fa->strides, strides, sizeof(npy_intp)*nd);
+ /* User to provided strides (user is responsible for correctness) */
+ for (int i = 0; i < nd; i++) {
+ fa->strides[i] = strides[i];
}
+ /* Since the strides were passed in must update contiguity */
+ PyArray_UpdateFlags((PyArrayObject *)fa,
+ NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_F_CONTIGUOUS);
}
}
else {
- fa->dimensions = fa->strides = NULL;
- fa->flags |= NPY_ARRAY_F_CONTIGUOUS;
+ fa->dimensions = NULL;
+ fa->strides = NULL;
+ fa->flags |= NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_F_CONTIGUOUS;
}
if (data == NULL) {
@@ -844,12 +840,11 @@ PyArray_NewFromDescr_int(
fa->data = data;
/*
- * always update the flags to get the right CONTIGUOUS, ALIGN properties
- * not owned data and input strides may not be aligned and on some
- * platforms (debian sparc) malloc does not provide enough alignment for
- * long double types
+ * Always update the aligned flag. Not owned data or input strides may
+ * not be aligned. Also on some platforms (debian sparc) malloc does not
+ * provide enough alignment for long double types.
*/
- PyArray_UpdateFlags((PyArrayObject *)fa, NPY_ARRAY_UPDATE_ALL);
+ PyArray_UpdateFlags((PyArrayObject *)fa, NPY_ARRAY_ALIGNED);
/* Set the base object. It's important to do it here so that
* __array_finalize__ below receives it
@@ -862,15 +857,20 @@ PyArray_NewFromDescr_int(
}
/*
- * call the __array_finalize__
- * method if a subtype.
- * If obj is NULL, then call method with Py_None
+ * call the __array_finalize__ method if a subtype was requested.
+ * If obj is NULL use Py_None for the Python callback.
*/
- if ((subtype != &PyArray_Type)) {
- PyObject *res, *func, *args;
+ if (subtype != &PyArray_Type) {
+ PyObject *res, *func;
func = PyObject_GetAttr((PyObject *)fa, npy_ma_str_array_finalize);
- if (func && func != Py_None) {
+ if (func == NULL) {
+ goto fail;
+ }
+ else if (func == Py_None) {
+ Py_DECREF(func);
+ }
+ else {
if (PyCapsule_CheckExact(func)) {
/* A C-function is stored here */
PyArray_FinalizeFunc *cfunc;
@@ -884,14 +884,10 @@ PyArray_NewFromDescr_int(
}
}
else {
- args = PyTuple_New(1);
if (obj == NULL) {
- obj=Py_None;
+ obj = Py_None;
}
- Py_INCREF(obj);
- PyTuple_SET_ITEM(args, 0, obj);
- res = PyObject_Call(func, args, NULL);
- Py_DECREF(args);
+ res = PyObject_CallFunctionObjArgs(func, obj, NULL);
Py_DECREF(func);
if (res == NULL) {
goto fail;
@@ -901,7 +897,6 @@ PyArray_NewFromDescr_int(
}
}
}
- else Py_XDECREF(func);
}
return (PyObject *)fa;
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 2197fe798..a0154e474 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -594,8 +594,10 @@ NpyIter_Copy(NpyIter *iter)
if (buffers[iop] == NULL) {
out_of_memory = 1;
}
- if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) {
- memset(buffers[iop], '\0', itemsize*buffersize);
+ else {
+ if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) {
+ memset(buffers[iop], '\0', itemsize*buffersize);
+ }
}
}
}
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index 7cc74a4f3..2e79d377e 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -532,7 +532,7 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
}
if (f->core_dim_ixs != NULL) {
core_dim_ixs = PyTuple_New(core_num_ixs);
- if (core_num_dims == NULL) {
+ if (core_dim_ixs == NULL) {
goto fail;
}
for (i = 0; i < core_num_ixs; i++) {
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 7dffb482f..0644a28c0 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -4542,10 +4542,15 @@ _get_normalized_typetup(PyUFuncObject *ufunc,
"Cannot provide `dtype` when a ufunc has no outputs");
return -1;
}
- signature[nin] = _get_dtype(dtype_obj);
- if (signature[nin] == NULL) {
+ PyArray_DTypeMeta *dtype = _get_dtype(dtype_obj);
+ if (dtype == NULL) {
return -1;
}
+ for (int i = nin; i < nop; i++) {
+ Py_INCREF(dtype);
+ signature[i] = dtype;
+ }
+ Py_DECREF(dtype);
res = _make_new_typetup(nop, signature, out_typetup);
goto finish;
}
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index a3f97a8f3..2834235e4 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -288,7 +288,7 @@ PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc,
} else {
/* Find the specified ufunc inner loop, and fill in the dtypes */
retval = type_tuple_type_resolver(ufunc, type_tup,
- operands, casting, any_object, out_dtypes);
+ operands, input_casting, casting, any_object, out_dtypes);
}
return retval;
@@ -558,6 +558,11 @@ PyUFunc_SimpleUniformOperationTypeResolver(
* This is a fast-path, since all descriptors will be identical, mainly
* when only a single descriptor was passed (which would set the out
* one in the tuple), there is no need to check all loops.
+ * Note that this also allows (None, None, float64) to resolve to
+ * (float64, float64, float64), even when the inputs do not match,
+ * i.e. fixing the output part of the signature can fix all of them.
+ * This is necessary to support `nextafter(1., inf, dtype=float32)`,
+ * where it is "clear" we want to cast 1. and inf to float32.
*/
PyArray_Descr *descr = NULL;
if (PyTuple_CheckExact(type_tup) &&
@@ -565,7 +570,12 @@ PyUFunc_SimpleUniformOperationTypeResolver(
for (int i = 0; i < nop; i++) {
PyObject *item = PyTuple_GET_ITEM(type_tup, i);
if (item == Py_None) {
- continue;
+ if (i < ufunc->nin) {
+ continue;
+ }
+ /* All outputs must be set (this could be relaxed) */
+ descr = NULL;
+ break;
}
if (!PyArray_DescrCheck(item)) {
/* Defer to default resolver (will raise an error there) */
@@ -1661,6 +1671,9 @@ ufunc_loop_matches(PyUFuncObject *self,
if (types[i] == NPY_OBJECT && !any_object && self->ntypes > 1) {
return 0;
}
+ if (types[i] == NPY_NOTYPE) {
+ continue; /* Matched by being explicitly specified. */
+ }
/*
* If type num is NPY_VOID and struct dtypes have been passed in,
@@ -1710,6 +1723,9 @@ ufunc_loop_matches(PyUFuncObject *self,
* outputs.
*/
for (i = nin; i < nop; ++i) {
+ if (types[i] == NPY_NOTYPE) {
+ continue; /* Matched by being explicitly specified. */
+ }
if (op[i] != NULL) {
PyArray_Descr *tmp = PyArray_DescrFromType(types[i]);
if (tmp == NULL) {
@@ -1728,7 +1744,6 @@ ufunc_loop_matches(PyUFuncObject *self,
Py_DECREF(tmp);
}
}
-
return 1;
}
@@ -1869,12 +1884,15 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
int n_specified,
int *specified_types,
PyArrayObject **op,
+ NPY_CASTING input_casting,
NPY_CASTING casting,
int any_object,
int use_min_scalar,
PyArray_Descr **out_dtype)
{
int i, j, nin = self->nin, nop = nin + self->nout;
+ assert(n_specified == nop);
+ int types[NPY_MAXARGS];
/* Use this to try to avoid repeating the same userdef loop search */
int last_userdef = -1;
@@ -1907,28 +1925,31 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
return -1;
}
for (; funcdata != NULL; funcdata = funcdata->next) {
- int *types = funcdata->arg_types;
- int matched = 1;
-
- if (n_specified == nop) {
- for (j = 0; j < nop; ++j) {
- if (types[j] != specified_types[j] &&
- specified_types[j] != NPY_NOTYPE) {
- matched = 0;
- break;
- }
+ int *orig_types = funcdata->arg_types;
+
+ /*
+ * Copy the types into an int array for matching
+ * (Mostly duplicated in `type_tuple_type_resolver`)
+ */
+ for (j = 0; j < nop; ++j) {
+ if (specified_types[j] == NPY_NOTYPE) {
+ types[j] = orig_types[j];
+ continue;
}
- } else {
- if (types[nin] != specified_types[0]) {
- matched = 0;
+ if (orig_types[j] != specified_types[j]) {
+ break;
}
+ /* indicate that we do not have to check this type anymore. */
+ types[j] = NPY_NOTYPE;
}
- if (!matched) {
+
+ if (j != nop) {
+ /* no match */
continue;
}
switch (ufunc_loop_matches(self, op,
- casting, casting,
+ input_casting, casting,
any_object, use_min_scalar,
types, NULL,
&no_castable_output, &err_src_typecode,
@@ -1936,7 +1957,19 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
/* It works */
case 1:
set_ufunc_loop_data_types(self, op,
- out_dtype, types, NULL);
+ out_dtype, orig_types, NULL);
+ /*
+ * In principle, we only need to validate the
+ * NPY_NOTYPE ones
+ */
+ if (PyUFunc_ValidateCasting(self,
+ casting, op, out_dtype) < 0) {
+ for (j = 0; j < self->nargs; j++) {
+ Py_DECREF(out_dtype[j]);
+ out_dtype[j] = NULL;
+ }
+ return -1;
+ }
return 1;
/* Didn't match */
case 0:
@@ -2069,6 +2102,94 @@ linear_search_type_resolver(PyUFuncObject *self,
return -1;
}
+
+static int
+type_tuple_type_resolver_core(PyUFuncObject *self,
+ PyArrayObject **op,
+ NPY_CASTING input_casting, NPY_CASTING casting,
+ int specified_types[],
+ int any_object,
+ int no_castable_output, int use_min_scalar,
+ PyArray_Descr **out_dtype)
+{
+ int i, j;
+ int nop = self->nargs;
+ int types[NPY_MAXARGS];
+
+ /* For making a better error message on coercion error */
+ char err_dst_typecode = '-', err_src_typecode = '-';
+
+ /* If the ufunc has userloops, search for them. */
+ if (self->userloops) {
+ switch (type_tuple_userloop_type_resolver(self,
+ nop, specified_types,
+ op, input_casting, casting,
+ any_object, use_min_scalar,
+ out_dtype)) {
+ /* Error */
+ case -1:
+ return -1;
+ /* Found matching loop */
+ case 1:
+ return 0;
+ }
+ }
+
+ for (i = 0; i < self->ntypes; ++i) {
+ char *orig_types = self->types + i*self->nargs;
+
+ /*
+ * Check specified types and copy into an int array for matching
+ * (Mostly duplicated in `type_tuple_userloop_type_resolver`)
+ */
+ for (j = 0; j < nop; ++j) {
+ if (specified_types[j] == NPY_NOTYPE) {
+ types[j] = orig_types[j];
+ continue;
+ }
+ if (orig_types[j] != specified_types[j]) {
+ break;
+ }
+ /* indicate that we do not have to check this type anymore. */
+ types[j] = NPY_NOTYPE;
+ }
+ if (j < nop) {
+ /* no match */
+ continue;
+ }
+
+ switch (ufunc_loop_matches(self, op,
+ input_casting, casting,
+ any_object, use_min_scalar,
+ types, NULL,
+ &no_castable_output, &err_src_typecode,
+ &err_dst_typecode)) {
+ case -1:
+ /* Error */
+ return -1;
+ case 0:
+ /* Cannot cast inputs */
+ continue;
+ case 1:
+ /* Success, fill also the NPY_NOTYPE (cast from char to int) */
+ for (j = 0; j < nop; j++) {
+ types[j] = orig_types[j];
+ }
+ set_ufunc_loop_data_types(self, op, out_dtype, types, NULL);
+ /* In principle, we only need to validate the NPY_NOTYPE ones */
+ if (PyUFunc_ValidateCasting(self, casting, op, out_dtype) < 0) {
+ for (j = 0; j < self->nargs; j++) {
+ Py_DECREF(out_dtype[j]);
+ out_dtype[j] = NULL;
+ }
+ return -1;
+ }
+ return 0;
+ }
+ }
+ return -2;
+}
+
/*
* Does a linear search for the inner loop of the ufunc specified by type_tup.
*
@@ -2079,18 +2200,16 @@ NPY_NO_EXPORT int
type_tuple_type_resolver(PyUFuncObject *self,
PyObject *type_tup,
PyArrayObject **op,
+ NPY_CASTING input_casting,
NPY_CASTING casting,
int any_object,
PyArray_Descr **out_dtype)
{
- int i, j, nin = self->nin, nop = nin + self->nout;
- int specified_types[NPY_MAXARGS], types[NPY_MAXARGS];
+ int nin = self->nin, nop = nin + self->nout;
+ int specified_types[NPY_MAXARGS];
const char *ufunc_name;
int no_castable_output = 0, use_min_scalar;
- /* For making a better error message on coercion error */
- char err_dst_typecode = '-', err_src_typecode = '-';
-
ufunc_name = ufunc_get_name_cstr(self);
use_min_scalar = should_use_min_scalar(nin, op, 0, NULL);
@@ -2112,7 +2231,7 @@ type_tuple_type_resolver(PyUFuncObject *self,
PyErr_SetString(PyExc_RuntimeError, bad_type_tup_msg);
return -1;
}
- for (i = 0; i < nop; ++i) {
+ for (int i = 0; i < nop; ++i) {
PyObject *item = PyTuple_GET_ITEM(type_tup, i);
if (item == Py_None) {
specified_types[i] = NPY_NOTYPE;
@@ -2131,57 +2250,51 @@ type_tuple_type_resolver(PyUFuncObject *self,
return -1;
}
- /* If the ufunc has userloops, search for them. */
- if (self->userloops) {
- switch (type_tuple_userloop_type_resolver(self,
- nop, specified_types,
- op, casting,
- any_object, use_min_scalar,
- out_dtype)) {
- /* Error */
- case -1:
- return -1;
- /* Found matching loop */
- case 1:
- return 0;
- }
- }
-
- for (i = 0; i < self->ntypes; ++i) {
- char *orig_types = self->types + i*self->nargs;
+ int res = type_tuple_type_resolver_core(self,
+ op, input_casting, casting, specified_types, any_object,
+ no_castable_output, use_min_scalar, out_dtype);
- /* Copy the types into an int array for matching */
- for (j = 0; j < nop; ++j) {
- types[j] = orig_types[j];
- }
+ if (res != -2) {
+ return res;
+ }
- for (j = 0; j < nop; ++j) {
- if (types[j] != specified_types[j] &&
- specified_types[j] != NPY_NOTYPE) {
+ /*
+ * When the user passes `dtype=dtype`, it gets translated to
+ * `signature=(None,)*nin + (dtype,)*nout`. If the signature matches that
+ * exactly (could be relaxed but that is not necessary for backcompat),
+ * we also try `signature=(dtype,)*(nin+nout)`.
+ * This used to be the main meaning for `dtype=dtype`, but some calls broke
+ * the expectation, and changing it allows for `dtype=dtype` to be useful
+ * for ufuncs like `np.ldexp` in the future while also normalizing it to
+ * a `signature` early on.
+ */
+ int homogeneous_type = NPY_NOTYPE;
+ if (self->nout > 0) {
+ homogeneous_type = specified_types[nin];
+ for (int i = nin+1; i < nop; i++) {
+ if (specified_types[i] != homogeneous_type) {
+ homogeneous_type = NPY_NOTYPE;
break;
}
}
- if (j < nop) {
- /* no match */
- continue;
+ }
+ if (homogeneous_type != NPY_NOTYPE) {
+ for (int i = 0; i < nin; i++) {
+ if (specified_types[i] != NPY_NOTYPE) {
+ homogeneous_type = NPY_NOTYPE;
+ break;
+ }
+ specified_types[i] = homogeneous_type;
}
+ }
+ if (homogeneous_type != NPY_NOTYPE) {
+ /* Try again with the homogeneous specified types. */
+ res = type_tuple_type_resolver_core(self,
+ op, input_casting, casting, specified_types, any_object,
+ no_castable_output, use_min_scalar, out_dtype);
- switch (ufunc_loop_matches(self, op,
- casting, casting,
- any_object, use_min_scalar,
- types, NULL,
- &no_castable_output, &err_src_typecode,
- &err_dst_typecode)) {
- case -1:
- /* Error */
- return -1;
- case 0:
- /* Cannot cast inputs */
- continue;
- case 1:
- /* Success */
- set_ufunc_loop_data_types(self, op, out_dtype, types, NULL);
- return 0;
+ if (res != -2) {
+ return res;
}
}
diff --git a/numpy/core/src/umath/ufunc_type_resolution.h b/numpy/core/src/umath/ufunc_type_resolution.h
index 1d6ad3358..b11c69852 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.h
+++ b/numpy/core/src/umath/ufunc_type_resolution.h
@@ -123,6 +123,7 @@ NPY_NO_EXPORT int
type_tuple_type_resolver(PyUFuncObject *self,
PyObject *type_tup,
PyArrayObject **op,
+ NPY_CASTING input_casting,
NPY_CASTING casting,
int any_object,
PyArray_Descr **out_dtype);
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index ec4112e69..ed1688374 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -1121,10 +1121,7 @@ class TestStringPromotion(_DeprecationTestCase):
self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=0))
self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=None))
- # coercing to an array is similar, but will fall-back to `object`
- # (when raising the FutureWarning, this already happens)
- self.assert_deprecated(lambda: np.array([arr1[0], arr2[0]]),
- exceptions=())
+ self.assert_deprecated(lambda: np.array([arr1[0], arr2[0]]))
@pytest.mark.parametrize("dtype", "?bhilqpBHILQPefdgFDG")
@pytest.mark.parametrize("string_dt", ["S", "U"])
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index a1e0c8f8f..e4f0a6b3f 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -1,10 +1,9 @@
import sys
import os
-import shutil
import mmap
import pytest
from pathlib import Path
-from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp
+from tempfile import NamedTemporaryFile, TemporaryFile
from numpy import (
memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
@@ -18,7 +17,6 @@ from numpy.testing import (
class TestMemmap:
def setup(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
- self.tempdir = mkdtemp()
self.shape = (3, 4)
self.dtype = 'float32'
self.data = arange(12, dtype=self.dtype)
@@ -30,7 +28,6 @@ class TestMemmap:
if IS_PYPY:
break_cycles()
break_cycles()
- shutil.rmtree(self.tempdir)
def test_roundtrip(self):
# Write data to file
@@ -46,8 +43,8 @@ class TestMemmap:
assert_array_equal(self.data, newfp)
assert_equal(newfp.flags.writeable, False)
- def test_open_with_filename(self):
- tmpname = mktemp('', 'mmap', dir=self.tempdir)
+ def test_open_with_filename(self, tmp_path):
+ tmpname = tmp_path / 'mmap'
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
@@ -67,11 +64,11 @@ class TestMemmap:
assert_equal(mode, fp.mode)
del fp
- def test_filename(self):
- tmpname = mktemp('', 'mmap', dir=self.tempdir)
+ def test_filename(self, tmp_path):
+ tmpname = tmp_path / "mmap"
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
- abspath = os.path.abspath(tmpname)
+ abspath = Path(os.path.abspath(tmpname))
fp[:] = self.data[:]
assert_equal(abspath, fp.filename)
b = fp[:1]
@@ -79,8 +76,8 @@ class TestMemmap:
del b
del fp
- def test_path(self):
- tmpname = mktemp('', 'mmap', dir=self.tempdir)
+ def test_path(self, tmp_path):
+ tmpname = tmp_path / "mmap"
fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
shape=self.shape)
# os.path.realpath does not resolve symlinks on Windows
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index b355c4618..d567653f5 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -1,7 +1,6 @@
import collections.abc
import tempfile
import sys
-import shutil
import warnings
import operator
import io
@@ -4811,17 +4810,23 @@ class TestLexsort:
class TestIO:
"""Test tofile, fromfile, tobytes, and fromstring"""
- def setup(self):
+ @pytest.fixture()
+ def x(self):
shape = (2, 4, 3)
rand = np.random.random
- self.x = rand(shape) + rand(shape).astype(complex)*1j
- self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
- self.dtype = self.x.dtype
- self.tempdir = tempfile.mkdtemp()
- self.filename = tempfile.mktemp(dir=self.tempdir)
+ x = rand(shape) + rand(shape).astype(complex) * 1j
+ x[0, :, 1] = [np.nan, np.inf, -np.inf, np.nan]
+ return x
- def teardown(self):
- shutil.rmtree(self.tempdir)
+ @pytest.fixture(params=["string", "path_obj"])
+ def tmp_filename(self, tmp_path, request):
+ # This fixture covers two cases:
+ # one where the filename is a string and
+ # another where it is a pathlib object
+ filename = tmp_path / "file"
+ if request.param == "string":
+ filename = str(filename)
+ yield filename
def test_nofile(self):
# this should probably be supported as a file
@@ -4852,54 +4857,48 @@ class TestIO:
d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0)
assert d.shape == (0,)
- def test_empty_files_binary(self):
- with open(self.filename, 'w') as f:
+ def test_empty_files_text(self, tmp_filename):
+ with open(tmp_filename, 'w') as f:
pass
- y = np.fromfile(self.filename)
+ y = np.fromfile(tmp_filename)
assert_(y.size == 0, "Array not empty")
- def test_empty_files_text(self):
- with open(self.filename, 'wb') as f:
+ def test_empty_files_binary(self, tmp_filename):
+ with open(tmp_filename, 'wb') as f:
pass
- y = np.fromfile(self.filename, sep=" ")
+ y = np.fromfile(tmp_filename, sep=" ")
assert_(y.size == 0, "Array not empty")
- def test_roundtrip_file(self):
- with open(self.filename, 'wb') as f:
- self.x.tofile(f)
+ def test_roundtrip_file(self, x, tmp_filename):
+ with open(tmp_filename, 'wb') as f:
+ x.tofile(f)
# NB. doesn't work with flush+seek, due to use of C stdio
- with open(self.filename, 'rb') as f:
- y = np.fromfile(f, dtype=self.dtype)
- assert_array_equal(y, self.x.flat)
-
- def test_roundtrip_filename(self):
- self.x.tofile(self.filename)
- y = np.fromfile(self.filename, dtype=self.dtype)
- assert_array_equal(y, self.x.flat)
-
- def test_roundtrip_pathlib(self):
- p = pathlib.Path(self.filename)
- self.x.tofile(p)
- y = np.fromfile(p, dtype=self.dtype)
- assert_array_equal(y, self.x.flat)
-
- def test_roundtrip_dump_pathlib(self):
- p = pathlib.Path(self.filename)
- self.x.dump(p)
+ with open(tmp_filename, 'rb') as f:
+ y = np.fromfile(f, dtype=x.dtype)
+ assert_array_equal(y, x.flat)
+
+ def test_roundtrip(self, x, tmp_filename):
+ x.tofile(tmp_filename)
+ y = np.fromfile(tmp_filename, dtype=x.dtype)
+ assert_array_equal(y, x.flat)
+
+ def test_roundtrip_dump_pathlib(self, x, tmp_filename):
+ p = pathlib.Path(tmp_filename)
+ x.dump(p)
y = np.load(p, allow_pickle=True)
- assert_array_equal(y, self.x)
+ assert_array_equal(y, x)
- def test_roundtrip_binary_str(self):
- s = self.x.tobytes()
- y = np.frombuffer(s, dtype=self.dtype)
- assert_array_equal(y, self.x.flat)
+ def test_roundtrip_binary_str(self, x):
+ s = x.tobytes()
+ y = np.frombuffer(s, dtype=x.dtype)
+ assert_array_equal(y, x.flat)
- s = self.x.tobytes('F')
- y = np.frombuffer(s, dtype=self.dtype)
- assert_array_equal(y, self.x.flatten('F'))
+ s = x.tobytes('F')
+ y = np.frombuffer(s, dtype=x.dtype)
+ assert_array_equal(y, x.flatten('F'))
- def test_roundtrip_str(self):
- x = self.x.real.ravel()
+ def test_roundtrip_str(self, x):
+ x = x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
@@ -4907,79 +4906,79 @@ class TestIO:
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
- def test_roundtrip_repr(self):
- x = self.x.real.ravel()
+ def test_roundtrip_repr(self, x):
+ x = x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
- def test_unseekable_fromfile(self):
+ def test_unseekable_fromfile(self, x, tmp_filename):
# gh-6246
- self.x.tofile(self.filename)
+ x.tofile(tmp_filename)
def fail(*args, **kwargs):
raise IOError('Can not tell or seek')
- with io.open(self.filename, 'rb', buffering=0) as f:
+ with io.open(tmp_filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
- assert_raises(IOError, np.fromfile, f, dtype=self.dtype)
+ assert_raises(IOError, np.fromfile, f, dtype=x.dtype)
- def test_io_open_unbuffered_fromfile(self):
+ def test_io_open_unbuffered_fromfile(self, x, tmp_filename):
# gh-6632
- self.x.tofile(self.filename)
- with io.open(self.filename, 'rb', buffering=0) as f:
- y = np.fromfile(f, dtype=self.dtype)
- assert_array_equal(y, self.x.flat)
+ x.tofile(tmp_filename)
+ with io.open(tmp_filename, 'rb', buffering=0) as f:
+ y = np.fromfile(f, dtype=x.dtype)
+ assert_array_equal(y, x.flat)
- def test_largish_file(self):
+ def test_largish_file(self, tmp_filename):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
- d.tofile(self.filename)
- assert_equal(os.path.getsize(self.filename), d.nbytes)
- assert_array_equal(d, np.fromfile(self.filename))
+ d.tofile(tmp_filename)
+ assert_equal(os.path.getsize(tmp_filename), d.nbytes)
+ assert_array_equal(d, np.fromfile(tmp_filename))
# check offset
- with open(self.filename, "r+b") as f:
+ with open(tmp_filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
- assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
+ assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
# check append mode (gh-8329)
- open(self.filename, "w").close() # delete file contents
- with open(self.filename, "ab") as f:
+ open(tmp_filename, "w").close() # delete file contents
+ with open(tmp_filename, "ab") as f:
d.tofile(f)
- assert_array_equal(d, np.fromfile(self.filename))
- with open(self.filename, "ab") as f:
+ assert_array_equal(d, np.fromfile(tmp_filename))
+ with open(tmp_filename, "ab") as f:
d.tofile(f)
- assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
+ assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
- def test_io_open_buffered_fromfile(self):
+ def test_io_open_buffered_fromfile(self, x, tmp_filename):
# gh-6632
- self.x.tofile(self.filename)
- with io.open(self.filename, 'rb', buffering=-1) as f:
- y = np.fromfile(f, dtype=self.dtype)
- assert_array_equal(y, self.x.flat)
+ x.tofile(tmp_filename)
+ with io.open(tmp_filename, 'rb', buffering=-1) as f:
+ y = np.fromfile(f, dtype=x.dtype)
+ assert_array_equal(y, x.flat)
- def test_file_position_after_fromfile(self):
+ def test_file_position_after_fromfile(self, tmp_filename):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
- with open(self.filename, 'wb') as f:
+ with open(tmp_filename, 'wb') as f:
f.seek(size-1)
f.write(b'\0')
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
- with open(self.filename, mode) as f:
+ with open(tmp_filename, mode) as f:
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
- def test_file_position_after_tofile(self):
+ def test_file_position_after_tofile(self, tmp_filename):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
@@ -4988,7 +4987,7 @@ class TestIO:
for size in sizes:
err_msg = "%d" % (size,)
- with open(self.filename, 'wb') as f:
+ with open(tmp_filename, 'wb') as f:
f.seek(size-1)
f.write(b'\0')
f.seek(10)
@@ -4997,58 +4996,62 @@ class TestIO:
pos = f.tell()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
- with open(self.filename, 'r+b') as f:
+ with open(tmp_filename, 'r+b') as f:
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
- def test_load_object_array_fromfile(self):
+ def test_load_object_array_fromfile(self, tmp_filename):
# gh-12300
- with open(self.filename, 'w') as f:
+ with open(tmp_filename, 'w') as f:
# Ensure we have a file with consistent contents
pass
- with open(self.filename, 'rb') as f:
+ with open(tmp_filename, 'rb') as f:
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, f, dtype=object)
assert_raises_regex(ValueError, "Cannot read into object array",
- np.fromfile, self.filename, dtype=object)
-
- def test_fromfile_offset(self):
- with open(self.filename, 'wb') as f:
- self.x.tofile(f)
-
- with open(self.filename, 'rb') as f:
- y = np.fromfile(f, dtype=self.dtype, offset=0)
- assert_array_equal(y, self.x.flat)
-
- with open(self.filename, 'rb') as f:
- count_items = len(self.x.flat) // 8
- offset_items = len(self.x.flat) // 4
- offset_bytes = self.dtype.itemsize * offset_items
- y = np.fromfile(f, dtype=self.dtype, count=count_items, offset=offset_bytes)
- assert_array_equal(y, self.x.flat[offset_items:offset_items+count_items])
+ np.fromfile, tmp_filename, dtype=object)
+
+ def test_fromfile_offset(self, x, tmp_filename):
+ with open(tmp_filename, 'wb') as f:
+ x.tofile(f)
+
+ with open(tmp_filename, 'rb') as f:
+ y = np.fromfile(f, dtype=x.dtype, offset=0)
+ assert_array_equal(y, x.flat)
+
+ with open(tmp_filename, 'rb') as f:
+ count_items = len(x.flat) // 8
+ offset_items = len(x.flat) // 4
+ offset_bytes = x.dtype.itemsize * offset_items
+ y = np.fromfile(
+ f, dtype=x.dtype, count=count_items, offset=offset_bytes
+ )
+ assert_array_equal(
+ y, x.flat[offset_items:offset_items+count_items]
+ )
# subsequent seeks should stack
- offset_bytes = self.dtype.itemsize
- z = np.fromfile(f, dtype=self.dtype, offset=offset_bytes)
- assert_array_equal(z, self.x.flat[offset_items+count_items+1:])
+ offset_bytes = x.dtype.itemsize
+ z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes)
+ assert_array_equal(z, x.flat[offset_items+count_items+1:])
- with open(self.filename, 'wb') as f:
- self.x.tofile(f, sep=",")
+ with open(tmp_filename, 'wb') as f:
+ x.tofile(f, sep=",")
- with open(self.filename, 'rb') as f:
+ with open(tmp_filename, 'rb') as f:
assert_raises_regex(
TypeError,
"'offset' argument only permitted for binary files",
- np.fromfile, self.filename, dtype=self.dtype,
+ np.fromfile, tmp_filename, dtype=x.dtype,
sep=",", offset=1)
@pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t")
- def test_fromfile_bad_dup(self):
+ def test_fromfile_bad_dup(self, x, tmp_filename):
def dup_str(fd):
return 'abc'
@@ -5057,46 +5060,81 @@ class TestIO:
old_dup = os.dup
try:
- with open(self.filename, 'wb') as f:
- self.x.tofile(f)
+ with open(tmp_filename, 'wb') as f:
+ x.tofile(f)
for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)):
os.dup = dup
assert_raises(exc, np.fromfile, f)
finally:
os.dup = old_dup
- def _check_from(self, s, value, **kw):
+ def _check_from(self, s, value, filename, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
else:
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
- with open(self.filename, 'wb') as f:
+ with open(filename, 'wb') as f:
f.write(s)
- y = np.fromfile(self.filename, **kw)
+ y = np.fromfile(filename, **kw)
assert_array_equal(y, value)
- def test_nan(self):
+ @pytest.fixture(params=["period", "comma"])
+ def decimal_sep_localization(self, request):
+ """
+ Including this fixture in a test will automatically
+ execute it with both types of decimal separator.
+
+ So::
+
+ def test_decimal(decimal_sep_localization):
+ pass
+
+ is equivalent to the following two tests::
+
+ def test_decimal_period_separator():
+ pass
+
+ def test_decimal_comma_separator():
+ with CommaDecimalPointLocale():
+ pass
+ """
+ if request.param == "period":
+ yield
+ elif request.param == "comma":
+ with CommaDecimalPointLocale():
+ yield
+ else:
+ assert False, request.param
+
+ def test_nan(self, tmp_filename, decimal_sep_localization):
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
+ tmp_filename,
sep=' ')
- def test_inf(self):
+ def test_inf(self, tmp_filename, decimal_sep_localization):
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
+ tmp_filename,
sep=' ')
- def test_numbers(self):
- self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
- [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
+ def test_numbers(self, tmp_filename, decimal_sep_localization):
+ self._check_from(
+ b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
+ [1.234, -1.234, .3, .3e55, -123133.1231e+133],
+ tmp_filename,
+ sep=' ')
- def test_binary(self):
- self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
- np.array([1, 2, 3, 4]),
- dtype='<f4')
+ def test_binary(self, tmp_filename):
+ self._check_from(
+ b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
+ np.array([1, 2, 3, 4]),
+ tmp_filename,
+ dtype='<f4')
@pytest.mark.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
@@ -5123,91 +5161,89 @@ class TestIO:
except (MemoryError, ValueError):
pass
- def test_string(self):
- self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',')
+ def test_string(self, tmp_filename):
+ self._check_from(b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, sep=',')
- def test_counted_string(self):
- self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
- self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',')
- self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
+ def test_counted_string(self, tmp_filename, decimal_sep_localization):
+ self._check_from(
+ b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=4, sep=',')
+ self._check_from(
+ b'1,2,3,4', [1., 2., 3.], tmp_filename, count=3, sep=',')
+ self._check_from(
+ b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=-1, sep=',')
- def test_string_with_ws(self):
- self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
+ def test_string_with_ws(self, tmp_filename):
+ self._check_from(
+ b'1 2 3 4 ', [1, 2, 3, 4], tmp_filename, dtype=int, sep=' ')
- def test_counted_string_with_ws(self):
- self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
- sep=' ')
+ def test_counted_string_with_ws(self, tmp_filename):
+ self._check_from(
+ b'1 2 3 4 ', [1, 2, 3], tmp_filename, count=3, dtype=int,
+ sep=' ')
- def test_ascii(self):
- self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
- self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
+ def test_ascii(self, tmp_filename, decimal_sep_localization):
+ self._check_from(
+ b'1 , 2 , 3 , 4', [1., 2., 3., 4.], tmp_filename, sep=',')
+ self._check_from(
+ b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, dtype=float, sep=',')
- def test_malformed(self):
+ def test_malformed(self, tmp_filename, decimal_sep_localization):
with assert_warns(DeprecationWarning):
- self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
+ self._check_from(
+ b'1.234 1,234', [1.234, 1.], tmp_filename, sep=' ')
- def test_long_sep(self):
- self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
+ def test_long_sep(self, tmp_filename):
+ self._check_from(
+ b'1_x_3_x_4_x_5', [1, 3, 4, 5], tmp_filename, sep='_x_')
- def test_dtype(self):
+ def test_dtype(self, tmp_filename):
v = np.array([1, 2, 3, 4], dtype=np.int_)
- self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_)
+ self._check_from(b'1,2,3,4', v, tmp_filename, sep=',', dtype=np.int_)
- def test_dtype_bool(self):
+ def test_dtype_bool(self, tmp_filename):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
- with open(self.filename, 'wb') as f:
+ with open(tmp_filename, 'wb') as f:
f.write(s)
- y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
+ y = np.fromfile(tmp_filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
- def test_tofile_sep(self):
+ def test_tofile_sep(self, tmp_filename, decimal_sep_localization):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
- with open(self.filename, 'w') as f:
+ with open(tmp_filename, 'w') as f:
x.tofile(f, sep=',')
- with open(self.filename, 'r') as f:
+ with open(tmp_filename, 'r') as f:
s = f.read()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
- def test_tofile_format(self):
+ def test_tofile_format(self, tmp_filename, decimal_sep_localization):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
- with open(self.filename, 'w') as f:
+ with open(tmp_filename, 'w') as f:
x.tofile(f, sep=',', format='%.2f')
- with open(self.filename, 'r') as f:
+ with open(tmp_filename, 'r') as f:
s = f.read()
assert_equal(s, '1.51,2.00,3.51,4.00')
- def test_tofile_cleanup(self):
+ def test_tofile_cleanup(self, tmp_filename):
x = np.zeros((10), dtype=object)
- with open(self.filename, 'wb') as f:
+ with open(tmp_filename, 'wb') as f:
assert_raises(IOError, lambda: x.tofile(f, sep=''))
# Dup-ed file handle should be closed or remove will fail on Windows OS
- os.remove(self.filename)
+ os.remove(tmp_filename)
# Also make sure that we close the Python handle
- assert_raises(IOError, lambda: x.tofile(self.filename))
- os.remove(self.filename)
-
- def test_locale(self):
- with CommaDecimalPointLocale():
- self.test_numbers()
- self.test_nan()
- self.test_inf()
- self.test_counted_string()
- self.test_ascii()
- self.test_malformed()
- self.test_tofile_sep()
- self.test_tofile_format()
-
- def test_fromfile_subarray_binary(self):
+ assert_raises(IOError, lambda: x.tofile(tmp_filename))
+ os.remove(tmp_filename)
+
+ def test_fromfile_subarray_binary(self, tmp_filename):
# Test subarray dtypes which are absorbed into the shape
x = np.arange(24, dtype="i4").reshape(2, 3, 4)
- x.tofile(self.filename)
- res = np.fromfile(self.filename, dtype="(3,4)i4")
+ x.tofile(tmp_filename)
+ res = np.fromfile(tmp_filename, dtype="(3,4)i4")
assert_array_equal(x, res)
x_str = x.tobytes()
@@ -5216,21 +5252,21 @@ class TestIO:
res = np.fromstring(x_str, dtype="(3,4)i4")
assert_array_equal(x, res)
- def test_parsing_subarray_unsupported(self):
+ def test_parsing_subarray_unsupported(self, tmp_filename):
# We currently do not support parsing subarray dtypes
data = "12,42,13," * 50
with pytest.raises(ValueError):
expected = np.fromstring(data, dtype="(3,)i", sep=",")
- with open(self.filename, "w") as f:
+ with open(tmp_filename, "w") as f:
f.write(data)
with pytest.raises(ValueError):
- np.fromfile(self.filename, dtype="(3,)i", sep=",")
+ np.fromfile(tmp_filename, dtype="(3,)i", sep=",")
- def test_read_shorter_than_count_subarray(self):
+ def test_read_shorter_than_count_subarray(self, tmp_filename):
# Test that requesting more values does not cause any problems
- # in conjuction with subarray dimensions being absored into the
+ # in conjunction with subarray dimensions being absorbed into the
# array dimension.
expected = np.arange(511 * 10, dtype="i").reshape(-1, 10)
@@ -5239,8 +5275,8 @@ class TestIO:
with pytest.warns(DeprecationWarning):
np.fromstring(binary, dtype="(10,)i", count=10000)
- expected.tofile(self.filename)
- res = np.fromfile(self.filename, dtype="(10,)i", count=10000)
+ expected.tofile(tmp_filename)
+ res = np.fromfile(tmp_filename, dtype="(10,)i", count=10000)
assert_array_equal(res, expected)
@@ -5946,6 +5982,7 @@ class TestStats:
res = dat.var(1)
assert_(res.info == dat.info)
+
class TestVdot:
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
@@ -8664,6 +8701,15 @@ class TestArrayFinalize:
a = np.array(1).view(SavesBase)
assert_(a.saved_base is a.base)
+ def test_bad_finalize(self):
+ class BadAttributeArray(np.ndarray):
+ @property
+ def __array_finalize__(self):
+ raise RuntimeError("boohoo!")
+
+ with pytest.raises(RuntimeError, match="boohoo!"):
+ np.arange(10).view(BadAttributeArray)
+
def test_lifetime_on_error(self):
# gh-11237
class RaisesInFinalize(np.ndarray):
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index d1af7f1d8..25198bba9 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -503,8 +503,8 @@ class TestRegression:
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self):
- # Ticket #270
- assert_(np.array([1, 'A', None]).shape == (3,))
+ # Ticket #270 (gh-868)
+ assert_(np.array([1, None, 'A']).shape == (3,))
def test_multiple_assign(self):
# Ticket #273
@@ -2052,18 +2052,18 @@ class TestRegression:
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
- # mixed sequence of numeric values and strings
+ # mixed sequence of numeric values and strings (gh-2583)
for val in [True, 1234, 123.4, complex(1, 234)]:
- for tostr in [asunicode, asbytes]:
- b = np.array([val, tostr('xx')])
+ for tostr, dtype in [(asunicode, "U"), (asbytes, "S")]:
+ b = np.array([val, tostr('xx')], dtype=dtype)
assert_equal(tostr(b[0]), tostr(val))
- b = np.array([tostr('xx'), val])
+ b = np.array([tostr('xx'), val], dtype=dtype)
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
- b = np.array([val, tostr('xxxxxxxxxx')])
+ b = np.array([val, tostr('xxxxxxxxxx')], dtype=dtype)
assert_equal(tostr(b[0]), tostr(val))
- b = np.array([tostr('xxxxxxxxxx'), val])
+ b = np.array([tostr('xxxxxxxxxx'), val], dtype=dtype)
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
diff --git a/numpy/core/tests/test_scalar_methods.py b/numpy/core/tests/test_scalar_methods.py
index 4f5fd2988..3693bba59 100644
--- a/numpy/core/tests/test_scalar_methods.py
+++ b/numpy/core/tests/test_scalar_methods.py
@@ -89,7 +89,8 @@ class TestAsIntegerRatio:
])
def test_roundtrip(self, ftype, frac_vals, exp_vals):
for frac, exp in zip(frac_vals, exp_vals):
- f = np.ldexp(frac, exp, dtype=ftype)
+ f = np.ldexp(ftype(frac), exp)
+ assert f.dtype == ftype
n, d = f.as_integer_ratio()
try:
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 64ecb3780..a47f1df49 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -457,6 +457,34 @@ class TestUfunc:
float_dtype = type(np.dtype(np.float64))
np.add(3, 4, signature=(float_dtype, float_dtype, None))
+ @pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"])
+ def test_partial_signature_mismatch(self, casting):
+ # If the second argument matches already, no need to specify it:
+ res = np.ldexp(np.float32(1.), np.int_(2), dtype="d")
+ assert res.dtype == "d"
+ res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d"))
+ assert res.dtype == "d"
+
+ # ldexp only has a loop for long input as second argument, overriding
+ # the output cannot help with that (no matter the casting)
+ with pytest.raises(TypeError):
+ np.ldexp(1., np.uint64(3), dtype="d")
+ with pytest.raises(TypeError):
+ np.ldexp(1., np.uint64(3), signature=(None, None, "d"))
+
+ def test_use_output_signature_for_all_arguments(self):
+ # Test that providing only `dtype=` or `signature=(None, None, dtype)`
+ # is sufficient if falling back to a homogeneous signature works.
+ # In this case, the `intp, intp -> intp` loop is chosen.
+ res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe")
+ assert res == 1 # the cast happens first.
+ res = np.power(1.5, 2.8, signature=(None, None, np.intp),
+ casting="unsafe")
+ assert res == 1
+ with pytest.raises(TypeError):
+ # the unsafe casting would normally cause errors though:
+ np.power(1.5, 2.8, dtype=np.intp)
+
def test_signature_errors(self):
with pytest.raises(TypeError,
match="the signature object to ufunc must be a string or"):
diff --git a/numpy/distutils/checks/extra_avx512f_reduce.c b/numpy/distutils/checks/extra_avx512f_reduce.c
index f979d504e..db01aaeef 100644
--- a/numpy/distutils/checks/extra_avx512f_reduce.c
+++ b/numpy/distutils/checks/extra_avx512f_reduce.c
@@ -8,7 +8,7 @@ int main(void)
{
__m512 one_ps = _mm512_set1_ps(1.0f);
__m512d one_pd = _mm512_set1_pd(1.0);
- __m512i one_i64 = _mm512_set1_epi64(1.0);
+ __m512i one_i64 = _mm512_set1_epi64(1);
// add
float sum_ps = _mm512_reduce_add_ps(one_ps);
double sum_pd = _mm512_reduce_add_pd(one_pd);
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 8c123bc3b..bd5fc2ca3 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -614,7 +614,7 @@ def average(a, axis=None, weights=None, returned=False):
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
- wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
+ wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape, subok=True)
wgt = wgt.swapaxes(-1, axis)
if m is not nomask:
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index d237829cb..e735b9bc7 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -292,6 +292,23 @@ class TestAverage:
assert_almost_equal(wav1.real, expected1.real)
assert_almost_equal(wav1.imag, expected1.imag)
+ def test_masked_weights(self):
+ # Test with masked weights.
+ # (Regression test for https://github.com/numpy/numpy/issues/10438)
+ a = np.ma.array(np.arange(9).reshape(3, 3),
+ mask=[[1, 0, 0], [1, 0, 0], [0, 0, 0]])
+ weights_unmasked = masked_array([5, 28, 31], mask=False)
+ weights_masked = masked_array([5, 28, 31], mask=[1, 0, 0])
+
+ avg_unmasked = average(a, axis=0,
+ weights=weights_unmasked, returned=False)
+ expected_unmasked = np.array([6.0, 5.21875, 6.21875])
+ assert_almost_equal(avg_unmasked, expected_unmasked)
+
+ avg_masked = average(a, axis=0, weights=weights_masked, returned=False)
+ expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678])
+ assert_almost_equal(avg_masked, expected_masked)
+
class TestConcatenator:
# Tests for mr_, the equivalent of r_ for masked arrays.
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index c2f859273..27df519d2 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -405,7 +405,7 @@ class TestMRecordsImport:
for (f, l) in zip(('a', 'b', 'c'), (_a, _b, _c)):
assert_equal(getattr(mrec, f)._mask, l._mask)
# One record only
- _x = ma.array([1, 1.1, 'one'], mask=[1, 0, 0],)
+ _x = ma.array([1, 1.1, 'one'], mask=[1, 0, 0], dtype=object)
assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0])
def test_fromrecords(self):
diff --git a/numpy/typing/tests/data/fail/datasource.py b/numpy/typing/tests/data/fail/datasource.py
new file mode 100644
index 000000000..345277d45
--- /dev/null
+++ b/numpy/typing/tests/data/fail/datasource.py
@@ -0,0 +1,15 @@
+from pathlib import Path
+import numpy as np
+
+path: Path
+d1: np.DataSource
+
+d1.abspath(path) # E: incompatible type
+d1.abspath(b"...") # E: incompatible type
+
+d1.exists(path) # E: incompatible type
+d1.exists(b"...") # E: incompatible type
+
+d1.open(path, "r") # E: incompatible type
+d1.open(b"...", encoding="utf8") # E: incompatible type
+d1.open(None, newline="/n") # E: incompatible type
diff --git a/numpy/typing/tests/data/fail/fromnumeric.py b/numpy/typing/tests/data/fail/fromnumeric.py
index d8f7a5d69..8fafed1b7 100644
--- a/numpy/typing/tests/data/fail/fromnumeric.py
+++ b/numpy/typing/tests/data/fail/fromnumeric.py
@@ -117,13 +117,13 @@ np.amax(a, axis=1.0) # E: incompatible type
np.amax(a, keepdims=1.0) # E: incompatible type
np.amax(a, out=1.0) # E: incompatible type
np.amax(a, initial=[1.0]) # E: incompatible type
-np.amax(a, where=[1.0]) # E: List item 0 has incompatible type
+np.amax(a, where=[1.0]) # E: incompatible type
np.amin(a, axis=1.0) # E: incompatible type
np.amin(a, keepdims=1.0) # E: incompatible type
np.amin(a, out=1.0) # E: incompatible type
np.amin(a, initial=[1.0]) # E: incompatible type
-np.amin(a, where=[1.0]) # E: List item 0 has incompatible type
+np.amin(a, where=[1.0]) # E: incompatible type
np.prod(a, axis=1.0) # E: incompatible type
np.prod(a, out=False) # E: incompatible type
diff --git a/numpy/typing/tests/data/fail/ndarray_misc.py b/numpy/typing/tests/data/fail/ndarray_misc.py
index 1e1496bfe..653b9267b 100644
--- a/numpy/typing/tests/data/fail/ndarray_misc.py
+++ b/numpy/typing/tests/data/fail/ndarray_misc.py
@@ -6,9 +6,13 @@ function-based counterpart in `../from_numeric.py`.
"""
+from typing import Any
import numpy as np
f8: np.float64
+AR_f8: np.ndarray[Any, np.dtype[np.float64]]
+AR_M: np.ndarray[Any, np.dtype[np.datetime64]]
+AR_b: np.ndarray[Any, np.dtype[np.bool_]]
f8.argpartition(0) # E: has no attribute
f8.diagonal() # E: has no attribute
@@ -19,3 +23,8 @@ f8.put(0, 2) # E: has no attribute
f8.setfield(2, np.float64) # E: has no attribute
f8.sort() # E: has no attribute
f8.trace() # E: has no attribute
+
+AR_M.__int__() # E: Invalid self argument
+AR_M.__float__() # E: Invalid self argument
+AR_M.__complex__() # E: Invalid self argument
+AR_b.__index__() # E: Invalid self argument
diff --git a/numpy/typing/tests/data/pass/multiarray.py b/numpy/typing/tests/data/pass/multiarray.py
new file mode 100644
index 000000000..e2b5d16a0
--- /dev/null
+++ b/numpy/typing/tests/data/pass/multiarray.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+from typing import Any
+import numpy as np
+
+AR_f8: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0])
+AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.array([1])
+
+b_f8 = np.broadcast(AR_f8)
+b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8)
+
+next(b_f8)
+next(b_i8_f8_f8)
+
+b_f8.reset()
+b_i8_f8_f8.reset()
+
+b_f8.index
+b_i8_f8_f8.index
+
+b_f8.iters
+b_i8_f8_f8.iters
+
+b_f8.nd
+b_i8_f8_f8.nd
+
+b_f8.ndim
+b_i8_f8_f8.ndim
+
+b_f8.numiter
+b_i8_f8_f8.numiter
+
+b_f8.shape
+b_i8_f8_f8.shape
+
+b_f8.size
+b_i8_f8_f8.size
diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py
index 6c6f5d50b..62024603c 100644
--- a/numpy/typing/tests/data/pass/ndarray_misc.py
+++ b/numpy/typing/tests/data/pass/ndarray_misc.py
@@ -6,17 +6,21 @@ function-based counterpart in `../from_numeric.py`.
"""
-from typing import cast
+from __future__ import annotations
+
+import operator
+from typing import cast, Any
+
import numpy as np
class SubClass(np.ndarray): ...
i4 = np.int32(1)
-A = np.array([[1]], dtype=np.int32)
+A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32)
B0 = np.empty((), dtype=np.int32).view(SubClass)
B1 = np.empty((1,), dtype=np.int32).view(SubClass)
B2 = np.empty((1, 1), dtype=np.int32).view(SubClass)
-C = np.array([0, 1, 2], dtype=np.int32)
+C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32)
D = np.empty(3).view(SubClass)
i4.all()
@@ -157,3 +161,25 @@ A.trace(out=B0)
void = cast(np.void, np.array(1, dtype=[("f", np.float64)]).take(0))
void.setfield(10, np.float64)
+
+A.item(0)
+C.item(0)
+
+A.ravel()
+C.ravel()
+
+A.flatten()
+C.flatten()
+
+A.reshape(1)
+C.reshape(3)
+
+int(np.array(1.0, dtype=np.float64))
+int(np.array("1", dtype=np.str_))
+
+float(np.array(1.0, dtype=np.float64))
+float(np.array("1", dtype=np.str_))
+
+complex(np.array(1.0, dtype=np.float64))
+
+operator.index(np.array(1, dtype=np.int64))
diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py
index 815566b68..b258db49f 100644
--- a/numpy/typing/tests/data/pass/scalars.py
+++ b/numpy/typing/tests/data/pass/scalars.py
@@ -4,6 +4,14 @@ import datetime as dt
import pytest
import numpy as np
+b = np.bool_()
+u8 = np.uint64()
+i8 = np.int64()
+f8 = np.float64()
+c16 = np.complex128()
+U = np.str_()
+S = np.bytes_()
+
# Construction
class D:
@@ -204,3 +212,43 @@ np.cfloat()
np.clongdouble()
np.clongfloat()
np.longcomplex()
+
+b.item()
+i8.item()
+u8.item()
+f8.item()
+c16.item()
+U.item()
+S.item()
+
+b.tolist()
+i8.tolist()
+u8.tolist()
+f8.tolist()
+c16.tolist()
+U.tolist()
+S.tolist()
+
+b.ravel()
+i8.ravel()
+u8.ravel()
+f8.ravel()
+c16.ravel()
+U.ravel()
+S.ravel()
+
+b.flatten()
+i8.flatten()
+u8.flatten()
+f8.flatten()
+c16.flatten()
+U.flatten()
+S.flatten()
+
+b.reshape(1)
+i8.reshape(1)
+u8.reshape(1)
+f8.reshape(1)
+c16.reshape(1)
+U.reshape(1)
+S.reshape(1)
diff --git a/numpy/typing/tests/data/reveal/datasource.py b/numpy/typing/tests/data/reveal/datasource.py
new file mode 100644
index 000000000..245ac7649
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/datasource.py
@@ -0,0 +1,21 @@
+from pathlib import Path
+import numpy as np
+
+path1: Path
+path2: str
+
+d1 = np.DataSource(path1)
+d2 = np.DataSource(path2)
+d3 = np.DataSource(None)
+
+reveal_type(d1.abspath("...")) # E: str
+reveal_type(d2.abspath("...")) # E: str
+reveal_type(d3.abspath("...")) # E: str
+
+reveal_type(d1.exists("...")) # E: bool
+reveal_type(d2.exists("...")) # E: bool
+reveal_type(d3.exists("...")) # E: bool
+
+reveal_type(d1.open("...", "r")) # E: IO[Any]
+reveal_type(d2.open("...", encoding="utf8")) # E: IO[Any]
+reveal_type(d3.open("...", newline="/n")) # E: IO[Any]
diff --git a/numpy/typing/tests/data/reveal/multiarray.py b/numpy/typing/tests/data/reveal/multiarray.py
new file mode 100644
index 000000000..33e9ede7c
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/multiarray.py
@@ -0,0 +1,35 @@
+from typing import Any
+import numpy as np
+
+AR_f8: np.ndarray[Any, np.dtype[np.float64]]
+AR_i8: np.ndarray[Any, np.dtype[np.int64]]
+
+b_f8 = np.broadcast(AR_f8)
+b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8)
+
+reveal_type(next(b_f8)) # E: tuple[Any]
+reveal_type(next(b_i8_f8_f8)) # E: tuple[Any]
+
+reveal_type(b_f8.reset()) # E: None
+reveal_type(b_i8_f8_f8.reset()) # E: None
+
+reveal_type(b_f8.index) # E: int
+reveal_type(b_i8_f8_f8.index) # E: int
+
+reveal_type(b_f8.iters) # E: tuple[numpy.flatiter[Any]]
+reveal_type(b_i8_f8_f8.iters) # E: tuple[numpy.flatiter[Any]]
+
+reveal_type(b_f8.nd) # E: int
+reveal_type(b_i8_f8_f8.nd) # E: int
+
+reveal_type(b_f8.ndim) # E: int
+reveal_type(b_i8_f8_f8.ndim) # E: int
+
+reveal_type(b_f8.numiter) # E: int
+reveal_type(b_i8_f8_f8.numiter) # E: int
+
+reveal_type(b_f8.shape) # E: tuple[builtins.int]
+reveal_type(b_i8_f8_f8.shape) # E: tuple[builtins.int]
+
+reveal_type(b_f8.size) # E: int
+reveal_type(b_i8_f8_f8.size) # E: int
diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.py
index e0f44bcbc..ecc322251 100644
--- a/numpy/typing/tests/data/reveal/ndarray_misc.py
+++ b/numpy/typing/tests/data/reveal/ndarray_misc.py
@@ -6,145 +6,174 @@ function-based counterpart in `../from_numeric.py`.
"""
+import operator
+from typing import Any
+
import numpy as np
class SubClass(np.ndarray): ...
f8: np.float64
-A: np.ndarray
B: SubClass
+AR_f8: np.ndarray[Any, np.dtype[np.float64]]
+AR_i8: np.ndarray[Any, np.dtype[np.int64]]
+AR_U: np.ndarray[Any, np.dtype[np.str_]]
reveal_type(f8.all()) # E: numpy.bool_
-reveal_type(A.all()) # E: numpy.bool_
-reveal_type(A.all(axis=0)) # E: Any
-reveal_type(A.all(keepdims=True)) # E: Any
-reveal_type(A.all(out=B)) # E: SubClass
+reveal_type(AR_f8.all()) # E: numpy.bool_
+reveal_type(AR_f8.all(axis=0)) # E: Any
+reveal_type(AR_f8.all(keepdims=True)) # E: Any
+reveal_type(AR_f8.all(out=B)) # E: SubClass
reveal_type(f8.any()) # E: numpy.bool_
-reveal_type(A.any()) # E: numpy.bool_
-reveal_type(A.any(axis=0)) # E: Any
-reveal_type(A.any(keepdims=True)) # E: Any
-reveal_type(A.any(out=B)) # E: SubClass
+reveal_type(AR_f8.any()) # E: numpy.bool_
+reveal_type(AR_f8.any(axis=0)) # E: Any
+reveal_type(AR_f8.any(keepdims=True)) # E: Any
+reveal_type(AR_f8.any(out=B)) # E: SubClass
reveal_type(f8.argmax()) # E: {intp}
-reveal_type(A.argmax()) # E: {intp}
-reveal_type(A.argmax(axis=0)) # E: Any
-reveal_type(A.argmax(out=B)) # E: SubClass
+reveal_type(AR_f8.argmax()) # E: {intp}
+reveal_type(AR_f8.argmax(axis=0)) # E: Any
+reveal_type(AR_f8.argmax(out=B)) # E: SubClass
reveal_type(f8.argmin()) # E: {intp}
-reveal_type(A.argmin()) # E: {intp}
-reveal_type(A.argmin(axis=0)) # E: Any
-reveal_type(A.argmin(out=B)) # E: SubClass
+reveal_type(AR_f8.argmin()) # E: {intp}
+reveal_type(AR_f8.argmin(axis=0)) # E: Any
+reveal_type(AR_f8.argmin(out=B)) # E: SubClass
reveal_type(f8.argsort()) # E: numpy.ndarray[Any, Any]
-reveal_type(A.argsort()) # E: numpy.ndarray[Any, Any]
+reveal_type(AR_f8.argsort()) # E: numpy.ndarray[Any, Any]
reveal_type(f8.astype(np.int64).choose([()])) # E: numpy.ndarray[Any, Any]
-reveal_type(A.choose([0])) # E: numpy.ndarray[Any, Any]
-reveal_type(A.choose([0], out=B)) # E: SubClass
+reveal_type(AR_f8.choose([0])) # E: numpy.ndarray[Any, Any]
+reveal_type(AR_f8.choose([0], out=B)) # E: SubClass
reveal_type(f8.clip(1)) # E: Any
-reveal_type(A.clip(1)) # E: Any
-reveal_type(A.clip(None, 1)) # E: Any
-reveal_type(A.clip(1, out=B)) # E: SubClass
-reveal_type(A.clip(None, 1, out=B)) # E: SubClass
+reveal_type(AR_f8.clip(1)) # E: Any
+reveal_type(AR_f8.clip(None, 1)) # E: Any
+reveal_type(AR_f8.clip(1, out=B)) # E: SubClass
+reveal_type(AR_f8.clip(None, 1, out=B)) # E: SubClass
reveal_type(f8.compress([0])) # E: numpy.ndarray[Any, Any]
-reveal_type(A.compress([0])) # E: numpy.ndarray[Any, Any]
-reveal_type(A.compress([0], out=B)) # E: SubClass
+reveal_type(AR_f8.compress([0])) # E: numpy.ndarray[Any, Any]
+reveal_type(AR_f8.compress([0], out=B)) # E: SubClass
reveal_type(f8.conj()) # E: {float64}
-reveal_type(A.conj()) # E: numpy.ndarray[Any, Any]
+reveal_type(AR_f8.conj()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(B.conj()) # E: SubClass
reveal_type(f8.conjugate()) # E: {float64}
-reveal_type(A.conjugate()) # E: numpy.ndarray[Any, Any]
+reveal_type(AR_f8.conjugate()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(B.conjugate()) # E: SubClass
reveal_type(f8.cumprod()) # E: numpy.ndarray[Any, Any]
-reveal_type(A.cumprod()) # E: numpy.ndarray[Any, Any]
-reveal_type(A.cumprod(out=B)) # E: SubClass
+reveal_type(AR_f8.cumprod()) # E: numpy.ndarray[Any, Any]
+reveal_type(AR_f8.cumprod(out=B)) # E: SubClass
reveal_type(f8.cumsum()) # E: numpy.ndarray[Any, Any]
-reveal_type(A.cumsum()) # E: numpy.ndarray[Any, Any]
-reveal_type(A.cumsum(out=B)) # E: SubClass
+reveal_type(AR_f8.cumsum()) # E: numpy.ndarray[Any, Any]
+reveal_type(AR_f8.cumsum(out=B)) # E: SubClass
reveal_type(f8.max()) # E: Any
-reveal_type(A.max()) # E: Any
-reveal_type(A.max(axis=0)) # E: Any
-reveal_type(A.max(keepdims=True)) # E: Any
-reveal_type(A.max(out=B)) # E: SubClass
+reveal_type(AR_f8.max()) # E: Any
+reveal_type(AR_f8.max(axis=0)) # E: Any
+reveal_type(AR_f8.max(keepdims=True)) # E: Any
+reveal_type(AR_f8.max(out=B)) # E: SubClass
reveal_type(f8.mean()) # E: Any
-reveal_type(A.mean()) # E: Any
-reveal_type(A.mean(axis=0)) # E: Any
-reveal_type(A.mean(keepdims=True)) # E: Any
-reveal_type(A.mean(out=B)) # E: SubClass
+reveal_type(AR_f8.mean()) # E: Any
+reveal_type(AR_f8.mean(axis=0)) # E: Any
+reveal_type(AR_f8.mean(keepdims=True)) # E: Any
+reveal_type(AR_f8.mean(out=B)) # E: SubClass
reveal_type(f8.min()) # E: Any
-reveal_type(A.min()) # E: Any
-reveal_type(A.min(axis=0)) # E: Any
-reveal_type(A.min(keepdims=True)) # E: Any
-reveal_type(A.min(out=B)) # E: SubClass
+reveal_type(AR_f8.min()) # E: Any
+reveal_type(AR_f8.min(axis=0)) # E: Any
+reveal_type(AR_f8.min(keepdims=True)) # E: Any
+reveal_type(AR_f8.min(out=B)) # E: SubClass
reveal_type(f8.newbyteorder()) # E: {float64}
-reveal_type(A.newbyteorder()) # E: numpy.ndarray[Any, Any]
+reveal_type(AR_f8.newbyteorder()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(B.newbyteorder('|')) # E: SubClass
reveal_type(f8.prod()) # E: Any
-reveal_type(A.prod()) # E: Any
-reveal_type(A.prod(axis=0)) # E: Any
-reveal_type(A.prod(keepdims=True)) # E: Any
-reveal_type(A.prod(out=B)) # E: SubClass
+reveal_type(AR_f8.prod()) # E: Any
+reveal_type(AR_f8.prod(axis=0)) # E: Any
+reveal_type(AR_f8.prod(keepdims=True)) # E: Any
+reveal_type(AR_f8.prod(out=B)) # E: SubClass
reveal_type(f8.ptp()) # E: Any
-reveal_type(A.ptp()) # E: Any
-reveal_type(A.ptp(axis=0)) # E: Any
-reveal_type(A.ptp(keepdims=True)) # E: Any
-reveal_type(A.ptp(out=B)) # E: SubClass
+reveal_type(AR_f8.ptp()) # E: Any
+reveal_type(AR_f8.ptp(axis=0)) # E: Any
+reveal_type(AR_f8.ptp(keepdims=True)) # E: Any
+reveal_type(AR_f8.ptp(out=B)) # E: SubClass
reveal_type(f8.round()) # E: {float64}
-reveal_type(A.round()) # E: numpy.ndarray[Any, Any]
-reveal_type(A.round(out=B)) # E: SubClass
+reveal_type(AR_f8.round()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(AR_f8.round(out=B)) # E: SubClass
-reveal_type(f8.repeat(1)) # E: numpy.ndarray[Any, Any]
-reveal_type(A.repeat(1)) # E: numpy.ndarray[Any, Any]
+reveal_type(f8.repeat(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(AR_f8.repeat(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(B.repeat(1)) # E: numpy.ndarray[Any, Any]
reveal_type(f8.std()) # E: Any
-reveal_type(A.std()) # E: Any
-reveal_type(A.std(axis=0)) # E: Any
-reveal_type(A.std(keepdims=True)) # E: Any
-reveal_type(A.std(out=B)) # E: SubClass
+reveal_type(AR_f8.std()) # E: Any
+reveal_type(AR_f8.std(axis=0)) # E: Any
+reveal_type(AR_f8.std(keepdims=True)) # E: Any
+reveal_type(AR_f8.std(out=B)) # E: SubClass
reveal_type(f8.sum()) # E: Any
-reveal_type(A.sum()) # E: Any
-reveal_type(A.sum(axis=0)) # E: Any
-reveal_type(A.sum(keepdims=True)) # E: Any
-reveal_type(A.sum(out=B)) # E: SubClass
+reveal_type(AR_f8.sum()) # E: Any
+reveal_type(AR_f8.sum(axis=0)) # E: Any
+reveal_type(AR_f8.sum(keepdims=True)) # E: Any
+reveal_type(AR_f8.sum(out=B)) # E: SubClass
-reveal_type(f8.take(0)) # E: Any
-reveal_type(A.take(0)) # E: Any
-reveal_type(A.take([0])) # E: numpy.ndarray[Any, Any]
-reveal_type(A.take(0, out=B)) # E: SubClass
-reveal_type(A.take([0], out=B)) # E: SubClass
+reveal_type(f8.take(0)) # E: {float64}
+reveal_type(AR_f8.take(0)) # E: {float64}
+reveal_type(AR_f8.take([0])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(AR_f8.take(0, out=B)) # E: SubClass
+reveal_type(AR_f8.take([0], out=B)) # E: SubClass
reveal_type(f8.var()) # E: Any
-reveal_type(A.var()) # E: Any
-reveal_type(A.var(axis=0)) # E: Any
-reveal_type(A.var(keepdims=True)) # E: Any
-reveal_type(A.var(out=B)) # E: SubClass
+reveal_type(AR_f8.var()) # E: Any
+reveal_type(AR_f8.var(axis=0)) # E: Any
+reveal_type(AR_f8.var(keepdims=True)) # E: Any
+reveal_type(AR_f8.var(out=B)) # E: SubClass
+
+reveal_type(AR_f8.argpartition([0])) # E: numpy.ndarray[Any, numpy.dtype[{intp}]]
+
+reveal_type(AR_f8.diagonal()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(AR_f8.dot(1)) # E: numpy.ndarray[Any, Any]
+reveal_type(AR_f8.dot([1])) # E: Any
+reveal_type(AR_f8.dot(1, out=B)) # E: SubClass
+
+reveal_type(AR_f8.nonzero()) # E: tuple[numpy.ndarray[Any, numpy.dtype[{intp}]]]
+
+reveal_type(AR_f8.searchsorted(1)) # E: {intp}
+reveal_type(AR_f8.searchsorted([1])) # E: numpy.ndarray[Any, numpy.dtype[{intp}]]
+
+reveal_type(AR_f8.trace()) # E: Any
+reveal_type(AR_f8.trace(out=B)) # E: SubClass
+
+reveal_type(AR_f8.item()) # E: float
+reveal_type(AR_U.item()) # E: str
+
+reveal_type(AR_f8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(AR_U.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
-reveal_type(A.argpartition([0])) # E: numpy.ndarray[Any, Any]
+reveal_type(AR_f8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(AR_U.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
-reveal_type(A.diagonal()) # E: numpy.ndarray[Any, Any]
+reveal_type(AR_f8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(AR_U.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
-reveal_type(A.dot(1)) # E: Any
-reveal_type(A.dot(1, out=B)) # E: SubClass
+reveal_type(int(AR_f8)) # E: int
+reveal_type(int(AR_U)) # E: int
-reveal_type(A.nonzero()) # E: tuple[numpy.ndarray[Any, Any]]
+reveal_type(float(AR_f8)) # E: float
+reveal_type(float(AR_U)) # E: float
-reveal_type(A.searchsorted([1])) # E: numpy.ndarray[Any, Any]
+reveal_type(complex(AR_f8)) # E: complex
-reveal_type(A.trace()) # E: Any
-reveal_type(A.trace(out=B)) # E: SubClass
+reveal_type(operator.index(AR_i8)) # E: int
diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py
index fa94aa49b..d98388422 100644
--- a/numpy/typing/tests/data/reveal/scalars.py
+++ b/numpy/typing/tests/data/reveal/scalars.py
@@ -1,28 +1,35 @@
import numpy as np
-x = np.complex64(3 + 2j)
+b: np.bool_
+u8: np.uint64
+i8: np.int64
+f8: np.float64
+c8: np.complex64
+c16: np.complex128
+U: np.str_
+S: np.bytes_
-reveal_type(x.real) # E: {float32}
-reveal_type(x.imag) # E: {float32}
+reveal_type(c8.real) # E: {float32}
+reveal_type(c8.imag) # E: {float32}
-reveal_type(x.real.real) # E: {float32}
-reveal_type(x.real.imag) # E: {float32}
+reveal_type(c8.real.real) # E: {float32}
+reveal_type(c8.real.imag) # E: {float32}
-reveal_type(x.itemsize) # E: int
-reveal_type(x.shape) # E: Tuple[]
-reveal_type(x.strides) # E: Tuple[]
+reveal_type(c8.itemsize) # E: int
+reveal_type(c8.shape) # E: Tuple[]
+reveal_type(c8.strides) # E: Tuple[]
-reveal_type(x.ndim) # E: Literal[0]
-reveal_type(x.size) # E: Literal[1]
+reveal_type(c8.ndim) # E: Literal[0]
+reveal_type(c8.size) # E: Literal[1]
-reveal_type(x.squeeze()) # E: {complex64}
-reveal_type(x.byteswap()) # E: {complex64}
-reveal_type(x.transpose()) # E: {complex64}
+reveal_type(c8.squeeze()) # E: {complex64}
+reveal_type(c8.byteswap()) # E: {complex64}
+reveal_type(c8.transpose()) # E: {complex64}
-reveal_type(x.dtype) # E: numpy.dtype[{complex64}]
+reveal_type(c8.dtype) # E: numpy.dtype[{complex64}]
-reveal_type(np.complex64().real) # E: {float32}
-reveal_type(np.complex128().imag) # E: {float64}
+reveal_type(c8.real) # E: {float32}
+reveal_type(c16.imag) # E: {float64}
reveal_type(np.unicode_('foo')) # E: numpy.str_
reveal_type(np.str0('foo')) # E: numpy.str_
@@ -67,3 +74,43 @@ reveal_type(np.cfloat()) # E: {cdouble}
reveal_type(np.clongdouble()) # E: {clongdouble}
reveal_type(np.clongfloat()) # E: {clongdouble}
reveal_type(np.longcomplex()) # E: {clongdouble}
+
+reveal_type(b.item()) # E: bool
+reveal_type(i8.item()) # E: int
+reveal_type(u8.item()) # E: int
+reveal_type(f8.item()) # E: float
+reveal_type(c16.item()) # E: complex
+reveal_type(U.item()) # E: str
+reveal_type(S.item()) # E: bytes
+
+reveal_type(b.tolist()) # E: bool
+reveal_type(i8.tolist()) # E: int
+reveal_type(u8.tolist()) # E: int
+reveal_type(f8.tolist()) # E: float
+reveal_type(c16.tolist()) # E: complex
+reveal_type(U.tolist()) # E: str
+reveal_type(S.tolist()) # E: bytes
+
+reveal_type(b.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(i8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
+reveal_type(u8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]]
+reveal_type(f8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(c16.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
+reveal_type(U.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(S.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(b.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(i8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
+reveal_type(u8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]]
+reveal_type(f8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(c16.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
+reveal_type(U.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(S.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(b.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(i8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
+reveal_type(u8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]]
+reveal_type(f8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(c16.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
+reveal_type(U.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(S.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]]