diff options
Diffstat (limited to 'numpy')
293 files changed, 6893 insertions, 5110 deletions
diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 42a46d0b8..5fd6086e0 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -133,7 +133,6 @@ cdef extern from "numpy/arrayobject.h": NPY_ALIGNED NPY_NOTSWAPPED NPY_WRITEABLE - NPY_UPDATEIFCOPY NPY_ARR_HAS_DESCR NPY_BEHAVED @@ -165,7 +164,7 @@ cdef extern from "numpy/arrayobject.h": NPY_ARRAY_ALIGNED NPY_ARRAY_NOTSWAPPED NPY_ARRAY_WRITEABLE - NPY_ARRAY_UPDATEIFCOPY + NPY_ARRAY_WRITEBACKIFCOPY NPY_ARRAY_BEHAVED NPY_ARRAY_BEHAVED_NS diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index 97f3da2e5..03db9a0c1 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -130,7 +130,6 @@ cdef extern from "numpy/arrayobject.h": NPY_ALIGNED NPY_NOTSWAPPED NPY_WRITEABLE - NPY_UPDATEIFCOPY NPY_ARR_HAS_DESCR NPY_BEHAVED @@ -162,7 +161,7 @@ cdef extern from "numpy/arrayobject.h": NPY_ARRAY_ALIGNED NPY_ARRAY_NOTSWAPPED NPY_ARRAY_WRITEABLE - NPY_ARRAY_UPDATEIFCOPY + NPY_ARRAY_WRITEBACKIFCOPY NPY_ARRAY_BEHAVED NPY_ARRAY_BEHAVED_NS diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e01df7c90..8e92e0f42 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -167,31 +167,25 @@ from numpy.typing._extended_precision import ( complex512 as complex512, ) -from typing import ( - Literal as L, - Any, - ByteString, +from collections.abc import ( Callable, Container, - Callable, - Dict, - Generic, - IO, Iterable, Iterator, - List, Mapping, - NoReturn, - Optional, - overload, Sequence, Sized, +) +from typing import ( + Literal as L, + Any, + Generic, + IO, + NoReturn, + overload, SupportsComplex, SupportsFloat, SupportsInt, - Text, - Tuple, - Type, TypeVar, Union, Protocol, @@ -199,7 +193,6 @@ from typing import ( Final, final, ClassVar, - Set, ) # Ensures that the stubs are picked up @@ -655,8 +648,8 @@ class _MemMapIOProtocol(Protocol): class _SupportsWrite(Protocol[_AnyStr_contra]): def write(self, s: _AnyStr_contra, /) -> object: ... -__all__: List[str] -__path__: List[str] +__all__: list[str] +__path__: list[str] __version__: str __git_version__: str test: PytestTester @@ -683,12 +676,12 @@ _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) _ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"] class dtype(Generic[_DTypeScalar_co]): - names: None | Tuple[builtins.str, ...] + names: None | tuple[builtins.str, ...] # Overload for subclass of generic @overload def __new__( cls, - dtype: Type[_DTypeScalar_co], + dtype: type[_DTypeScalar_co], align: bool = ..., copy: bool = ..., ) -> dtype[_DTypeScalar_co]: ... @@ -702,64 +695,64 @@ class dtype(Generic[_DTypeScalar_co]): # first. # Builtin types @overload - def __new__(cls, dtype: Type[bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ... + def __new__(cls, dtype: type[bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ... @overload - def __new__(cls, dtype: Type[int], align: bool = ..., copy: bool = ...) -> dtype[int_]: ... + def __new__(cls, dtype: type[int], align: bool = ..., copy: bool = ...) -> dtype[int_]: ... @overload - def __new__(cls, dtype: None | Type[float], align: bool = ..., copy: bool = ...) -> dtype[float_]: ... + def __new__(cls, dtype: None | type[float], align: bool = ..., copy: bool = ...) -> dtype[float_]: ... @overload - def __new__(cls, dtype: Type[complex], align: bool = ..., copy: bool = ...) -> dtype[complex_]: ... + def __new__(cls, dtype: type[complex], align: bool = ..., copy: bool = ...) -> dtype[complex_]: ... @overload - def __new__(cls, dtype: Type[builtins.str], align: bool = ..., copy: bool = ...) -> dtype[str_]: ... + def __new__(cls, dtype: type[builtins.str], align: bool = ..., copy: bool = ...) -> dtype[str_]: ... @overload - def __new__(cls, dtype: Type[bytes], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ... + def __new__(cls, dtype: type[bytes], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ... # `unsignedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _UInt8Codes | Type[ct.c_uint8], align: bool = ..., copy: bool = ...) -> dtype[uint8]: ... + def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: bool = ..., copy: bool = ...) -> dtype[uint8]: ... @overload - def __new__(cls, dtype: _UInt16Codes | Type[ct.c_uint16], align: bool = ..., copy: bool = ...) -> dtype[uint16]: ... + def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: bool = ..., copy: bool = ...) -> dtype[uint16]: ... @overload - def __new__(cls, dtype: _UInt32Codes | Type[ct.c_uint32], align: bool = ..., copy: bool = ...) -> dtype[uint32]: ... + def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: bool = ..., copy: bool = ...) -> dtype[uint32]: ... @overload - def __new__(cls, dtype: _UInt64Codes | Type[ct.c_uint64], align: bool = ..., copy: bool = ...) -> dtype[uint64]: ... + def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: bool = ..., copy: bool = ...) -> dtype[uint64]: ... @overload - def __new__(cls, dtype: _UByteCodes | Type[ct.c_ubyte], align: bool = ..., copy: bool = ...) -> dtype[ubyte]: ... + def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: bool = ..., copy: bool = ...) -> dtype[ubyte]: ... @overload - def __new__(cls, dtype: _UShortCodes | Type[ct.c_ushort], align: bool = ..., copy: bool = ...) -> dtype[ushort]: ... + def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: bool = ..., copy: bool = ...) -> dtype[ushort]: ... @overload - def __new__(cls, dtype: _UIntCCodes | Type[ct.c_uint], align: bool = ..., copy: bool = ...) -> dtype[uintc]: ... + def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: bool = ..., copy: bool = ...) -> dtype[uintc]: ... # NOTE: We're assuming here that `uint_ptr_t == size_t`, # an assumption that does not hold in rare cases (same for `ssize_t`) @overload - def __new__(cls, dtype: _UIntPCodes | Type[ct.c_void_p] | Type[ct.c_size_t], align: bool = ..., copy: bool = ...) -> dtype[uintp]: ... + def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: bool = ..., copy: bool = ...) -> dtype[uintp]: ... @overload - def __new__(cls, dtype: _UIntCodes | Type[ct.c_ulong], align: bool = ..., copy: bool = ...) -> dtype[uint]: ... + def __new__(cls, dtype: _UIntCodes | type[ct.c_ulong], align: bool = ..., copy: bool = ...) -> dtype[uint]: ... @overload - def __new__(cls, dtype: _ULongLongCodes | Type[ct.c_ulonglong], align: bool = ..., copy: bool = ...) -> dtype[ulonglong]: ... + def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: bool = ..., copy: bool = ...) -> dtype[ulonglong]: ... # `signedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _Int8Codes | Type[ct.c_int8], align: bool = ..., copy: bool = ...) -> dtype[int8]: ... + def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: bool = ..., copy: bool = ...) -> dtype[int8]: ... @overload - def __new__(cls, dtype: _Int16Codes | Type[ct.c_int16], align: bool = ..., copy: bool = ...) -> dtype[int16]: ... + def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: bool = ..., copy: bool = ...) -> dtype[int16]: ... @overload - def __new__(cls, dtype: _Int32Codes | Type[ct.c_int32], align: bool = ..., copy: bool = ...) -> dtype[int32]: ... + def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: bool = ..., copy: bool = ...) -> dtype[int32]: ... @overload - def __new__(cls, dtype: _Int64Codes | Type[ct.c_int64], align: bool = ..., copy: bool = ...) -> dtype[int64]: ... + def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: bool = ..., copy: bool = ...) -> dtype[int64]: ... @overload - def __new__(cls, dtype: _ByteCodes | Type[ct.c_byte], align: bool = ..., copy: bool = ...) -> dtype[byte]: ... + def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: bool = ..., copy: bool = ...) -> dtype[byte]: ... @overload - def __new__(cls, dtype: _ShortCodes | Type[ct.c_short], align: bool = ..., copy: bool = ...) -> dtype[short]: ... + def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: bool = ..., copy: bool = ...) -> dtype[short]: ... @overload - def __new__(cls, dtype: _IntCCodes | Type[ct.c_int], align: bool = ..., copy: bool = ...) -> dtype[intc]: ... + def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: bool = ..., copy: bool = ...) -> dtype[intc]: ... @overload - def __new__(cls, dtype: _IntPCodes | Type[ct.c_ssize_t], align: bool = ..., copy: bool = ...) -> dtype[intp]: ... + def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: bool = ..., copy: bool = ...) -> dtype[intp]: ... @overload - def __new__(cls, dtype: _IntCodes | Type[ct.c_long], align: bool = ..., copy: bool = ...) -> dtype[int_]: ... + def __new__(cls, dtype: _IntCodes | type[ct.c_long], align: bool = ..., copy: bool = ...) -> dtype[int_]: ... @overload - def __new__(cls, dtype: _LongLongCodes | Type[ct.c_longlong], align: bool = ..., copy: bool = ...) -> dtype[longlong]: ... + def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: bool = ..., copy: bool = ...) -> dtype[longlong]: ... # `floating` string-based representations and ctypes @overload @@ -771,11 +764,11 @@ class dtype(Generic[_DTypeScalar_co]): @overload def __new__(cls, dtype: _HalfCodes, align: bool = ..., copy: bool = ...) -> dtype[half]: ... @overload - def __new__(cls, dtype: _SingleCodes | Type[ct.c_float], align: bool = ..., copy: bool = ...) -> dtype[single]: ... + def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: bool = ..., copy: bool = ...) -> dtype[single]: ... @overload - def __new__(cls, dtype: _DoubleCodes | Type[ct.c_double], align: bool = ..., copy: bool = ...) -> dtype[double]: ... + def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: bool = ..., copy: bool = ...) -> dtype[double]: ... @overload - def __new__(cls, dtype: _LongDoubleCodes | Type[ct.c_longdouble], align: bool = ..., copy: bool = ...) -> dtype[longdouble]: ... + def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: bool = ..., copy: bool = ...) -> dtype[longdouble]: ... # `complexfloating` string-based representations @overload @@ -791,7 +784,7 @@ class dtype(Generic[_DTypeScalar_co]): # Miscellaneous string-based representations and ctypes @overload - def __new__(cls, dtype: _BoolCodes | Type[ct.c_bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ... + def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ... @overload def __new__(cls, dtype: _TD64Codes, align: bool = ..., copy: bool = ...) -> dtype[timedelta64]: ... @overload @@ -799,11 +792,11 @@ class dtype(Generic[_DTypeScalar_co]): @overload def __new__(cls, dtype: _StrCodes, align: bool = ..., copy: bool = ...) -> dtype[str_]: ... @overload - def __new__(cls, dtype: _BytesCodes | Type[ct.c_char], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ... + def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ... @overload def __new__(cls, dtype: _VoidCodes, align: bool = ..., copy: bool = ...) -> dtype[void]: ... @overload - def __new__(cls, dtype: _ObjectCodes | Type[ct.py_object], align: bool = ..., copy: bool = ...) -> dtype[object_]: ... + def __new__(cls, dtype: _ObjectCodes | type[ct.py_object], align: bool = ..., copy: bool = ...) -> dtype[object_]: ... # dtype of a dtype is the same dtype @overload @@ -840,7 +833,7 @@ class dtype(Generic[_DTypeScalar_co]): @overload def __new__( cls, - dtype: Type[object], + dtype: type[object], align: bool = ..., copy: bool = ..., ) -> dtype[object_]: ... @@ -849,7 +842,7 @@ class dtype(Generic[_DTypeScalar_co]): def __class_getitem__(self, item: Any) -> GenericAlias: ... @overload - def __getitem__(self: dtype[void], key: List[builtins.str]) -> dtype[void]: ... + def __getitem__(self: dtype[void], key: list[builtins.str]) -> dtype[void]: ... @overload def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex) -> dtype[Any]: ... @@ -889,11 +882,11 @@ class dtype(Generic[_DTypeScalar_co]): @property def char(self) -> builtins.str: ... @property - def descr(self) -> List[Tuple[builtins.str, builtins.str] | Tuple[builtins.str, builtins.str, _Shape]]: ... + def descr(self) -> list[tuple[builtins.str, builtins.str] | tuple[builtins.str, builtins.str, _Shape]]: ... @property def fields( self, - ) -> None | MappingProxyType[builtins.str, Tuple[dtype[Any], int] | Tuple[dtype[Any], int, Any]]: ... + ) -> None | MappingProxyType[builtins.str, tuple[dtype[Any], int] | tuple[dtype[Any], int, Any]]: ... @property def flags(self) -> int: ... @property @@ -919,12 +912,12 @@ class dtype(Generic[_DTypeScalar_co]): @property def ndim(self) -> int: ... @property - def subdtype(self) -> None | Tuple[dtype[Any], _Shape]: ... + def subdtype(self) -> None | tuple[dtype[Any], _Shape]: ... def newbyteorder(self: _DType, __new_order: _ByteOrder = ...) -> _DType: ... @property def str(self) -> builtins.str: ... @property - def type(self) -> Type[_DTypeScalar_co]: ... + def type(self) -> type[_DTypeScalar_co]: ... _ArrayLikeInt = Union[ int, @@ -950,20 +943,20 @@ class flatiter(Generic[_NdArraySubClass]): @overload def __getitem__( self: flatiter[ndarray[Any, dtype[_ScalarType]]], - key: Union[int, integer], + key: int | integer, ) -> _ScalarType: ... @overload def __getitem__( - self, key: Union[_ArrayLikeInt, slice, ellipsis], + self, key: _ArrayLikeInt | slice | ellipsis ) -> _NdArraySubClass: ... @overload def __array__(self: flatiter[ndarray[Any, _DType]], dtype: None = ..., /) -> ndarray[Any, _DType]: ... @overload def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... -_OrderKACF = Optional[L["K", "A", "C", "F"]] -_OrderACF = Optional[L["A", "C", "F"]] -_OrderCF = Optional[L["C", "F"]] +_OrderKACF = L[None, "K", "A", "C", "F"] +_OrderACF = L[None, "A", "C", "F"] +_OrderCF = L[None, "C", "F"] _ModeKind = L["raise", "wrap", "clip"] _PartitionKind = L["introselect"] @@ -988,7 +981,7 @@ class _ArrayOrScalarCommon: def __str__(self) -> str: ... def __repr__(self) -> str: ... def __copy__(self: _ArraySelf) -> _ArraySelf: ... - def __deepcopy__(self: _ArraySelf, memo: None | Dict[int, Any], /) -> _ArraySelf: ... + def __deepcopy__(self: _ArraySelf, memo: None | dict[int, Any], /) -> _ArraySelf: ... # TODO: How to deal with the non-commutative nature of `==` and `!=`? # xref numpy/numpy#17368 @@ -1010,17 +1003,17 @@ class _ArrayOrScalarCommon: def tolist(self) -> Any: ... @property - def __array_interface__(self) -> Dict[str, Any]: ... + def __array_interface__(self) -> dict[str, Any]: ... @property def __array_priority__(self) -> float: ... @property def __array_struct__(self) -> Any: ... # builtins.PyCapsule - def __setstate__(self, state: Tuple[ + def __setstate__(self, state: tuple[ SupportsIndex, # version _ShapeLike, # Shape _DType_co, # DType bool, # F-continuous - bytes | List[Any], # Data + bytes | list[Any], # Data ], /) -> None: ... # a `bool_` is returned when `keepdims=True` and `self` is a 0d array @@ -1034,14 +1027,14 @@ class _ArrayOrScalarCommon: @overload def all( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: None = ..., keepdims: bool = ..., ) -> Any: ... @overload def all( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., ) -> _NdArraySubClass: ... @@ -1056,14 +1049,14 @@ class _ArrayOrScalarCommon: @overload def any( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: None = ..., keepdims: bool = ..., ) -> Any: ... @overload def any( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., ) -> _NdArraySubClass: ... @@ -1087,7 +1080,7 @@ class _ArrayOrScalarCommon: @overload def argmax( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: _NdArraySubClass = ..., *, keepdims: bool = ..., @@ -1112,7 +1105,7 @@ class _ArrayOrScalarCommon: @overload def argmin( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: _NdArraySubClass = ..., *, keepdims: bool = ..., @@ -1120,9 +1113,9 @@ class _ArrayOrScalarCommon: def argsort( self, - axis: Optional[SupportsIndex] = ..., - kind: Optional[_SortKind] = ..., - order: Union[None, str, Sequence[str]] = ..., + axis: None | SupportsIndex = ..., + kind: None | _SortKind = ..., + order: None | str | Sequence[str] = ..., ) -> ndarray: ... @overload @@ -1144,7 +1137,7 @@ class _ArrayOrScalarCommon: def clip( self, min: ArrayLike = ..., - max: Optional[ArrayLike] = ..., + max: None | ArrayLike = ..., out: None = ..., **kwargs: Any, ) -> ndarray: ... @@ -1160,7 +1153,7 @@ class _ArrayOrScalarCommon: def clip( self, min: ArrayLike = ..., - max: Optional[ArrayLike] = ..., + max: None | ArrayLike = ..., out: _NdArraySubClass = ..., **kwargs: Any, ) -> _NdArraySubClass: ... @@ -1177,14 +1170,14 @@ class _ArrayOrScalarCommon: def compress( self, a: ArrayLike, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: None = ..., ) -> ndarray: ... @overload def compress( self, a: ArrayLike, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... @@ -1195,14 +1188,14 @@ class _ArrayOrScalarCommon: @overload def cumprod( self, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., dtype: DTypeLike = ..., out: None = ..., ) -> ndarray: ... @overload def cumprod( self, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... @@ -1210,14 +1203,14 @@ class _ArrayOrScalarCommon: @overload def cumsum( self, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., dtype: DTypeLike = ..., out: None = ..., ) -> ndarray: ... @overload def cumsum( self, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ) -> _NdArraySubClass: ... @@ -1225,7 +1218,7 @@ class _ArrayOrScalarCommon: @overload def max( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., @@ -1234,7 +1227,7 @@ class _ArrayOrScalarCommon: @overload def max( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., @@ -1244,7 +1237,7 @@ class _ArrayOrScalarCommon: @overload def mean( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: None = ..., keepdims: bool = ..., @@ -1252,7 +1245,7 @@ class _ArrayOrScalarCommon: @overload def mean( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., @@ -1261,7 +1254,7 @@ class _ArrayOrScalarCommon: @overload def min( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., @@ -1270,7 +1263,7 @@ class _ArrayOrScalarCommon: @overload def min( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., @@ -1285,7 +1278,7 @@ class _ArrayOrScalarCommon: @overload def prod( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: None = ..., keepdims: bool = ..., @@ -1295,7 +1288,7 @@ class _ArrayOrScalarCommon: @overload def prod( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., @@ -1306,14 +1299,14 @@ class _ArrayOrScalarCommon: @overload def ptp( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: None = ..., keepdims: bool = ..., ) -> Any: ... @overload def ptp( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., ) -> _NdArraySubClass: ... @@ -1334,7 +1327,7 @@ class _ArrayOrScalarCommon: @overload def std( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: None = ..., ddof: int = ..., @@ -1343,7 +1336,7 @@ class _ArrayOrScalarCommon: @overload def std( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: int = ..., @@ -1353,7 +1346,7 @@ class _ArrayOrScalarCommon: @overload def sum( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: None = ..., keepdims: bool = ..., @@ -1363,7 +1356,7 @@ class _ArrayOrScalarCommon: @overload def sum( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., keepdims: bool = ..., @@ -1374,7 +1367,7 @@ class _ArrayOrScalarCommon: @overload def var( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: None = ..., ddof: int = ..., @@ -1383,7 +1376,7 @@ class _ArrayOrScalarCommon: @overload def var( self, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: int = ..., @@ -1415,12 +1408,12 @@ _SupportsBuffer = Union[ _T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) _T_contra = TypeVar("_T_contra", contravariant=True) -_2Tuple = Tuple[_T, _T] +_2Tuple = tuple[_T, _T] _CastingKind = L["no", "equiv", "safe", "same_kind", "unsafe"] _DTypeLike = Union[ dtype[_ScalarType], - Type[_ScalarType], + type[_ScalarType], _SupportsDType[dtype[_ScalarType]], ] @@ -1451,7 +1444,7 @@ class _SupportsImag(Protocol[_T_co]): class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @property - def base(self) -> Optional[ndarray]: ... + def base(self) -> None | ndarray: ... @property def ndim(self) -> int: ... @property @@ -1469,7 +1462,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @imag.setter def imag(self, value: ArrayLike) -> None: ... def __new__( - cls: Type[_ArraySelf], + cls: type[_ArraySelf], shape: _ShapeLike, dtype: DTypeLike = ..., buffer: None | _SupportsBuffer = ..., @@ -1503,37 +1496,37 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): ) -> Any: ... @property - def __array_finalize__(self) -> None: ... + def __array_finalize__(self, obj: None | NDArray[Any], /) -> None: ... def __array_wrap__( self, array: ndarray[_ShapeType2, _DType], - context: None | Tuple[ufunc, Tuple[Any, ...], int] = ..., + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., /, ) -> ndarray[_ShapeType2, _DType]: ... def __array_prepare__( self, array: ndarray[_ShapeType2, _DType], - context: None | Tuple[ufunc, Tuple[Any, ...], int] = ..., + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., /, ) -> ndarray[_ShapeType2, _DType]: ... @overload - def __getitem__(self, key: Union[ - SupportsIndex, - _ArrayLikeInt_co, - Tuple[SupportsIndex | _ArrayLikeInt_co, ...], - ]) -> Any: ... + def __getitem__(self, key: ( + SupportsIndex + | _ArrayLikeInt_co + | tuple[SupportsIndex | _ArrayLikeInt_co, ...] + )) -> Any: ... @overload - def __getitem__(self, key: Union[ - None, - slice, - ellipsis, - SupportsIndex, - _ArrayLikeInt_co, - Tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...], - ]) -> ndarray[Any, _DType_co]: ... + def __getitem__(self, key: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> ndarray[Any, _DType_co]: ... @overload def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ... @overload @@ -1563,7 +1556,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def item( self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var] - args: Tuple[SupportsIndex, ...], + args: tuple[SupportsIndex, ...], /, ) -> _T: ... @@ -1583,7 +1576,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def squeeze( self, - axis: Union[SupportsIndex, Tuple[SupportsIndex, ...]] = ..., + axis: SupportsIndex | tuple[SupportsIndex, ...] = ..., ) -> ndarray[Any, _DType_co]: ... def swapaxes( @@ -1600,9 +1593,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def argpartition( self, kth: _ArrayLikeInt_co, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., kind: _PartitionKind = ..., - order: Union[None, str, Sequence[str]] = ..., + order: None | str | Sequence[str] = ..., ) -> ndarray[Any, _dtype[intp]]: ... def diagonal( @@ -1622,14 +1615,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def dot(self, b: ArrayLike, out: _NdArraySubClass) -> _NdArraySubClass: ... # `nonzero()` is deprecated for 0d arrays/generics - def nonzero(self) -> Tuple[ndarray[Any, _dtype[intp]], ...]: ... + def nonzero(self) -> tuple[ndarray[Any, _dtype[intp]], ...]: ... def partition( self, kth: _ArrayLikeInt_co, axis: SupportsIndex = ..., kind: _PartitionKind = ..., - order: Union[None, str, Sequence[str]] = ..., + order: None | str | Sequence[str] = ..., ) -> None: ... # `put` is technically available to `generic`, @@ -1646,14 +1639,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): self, # >= 1D array v: _ScalarLike_co, # 0D array-like side: _SortSide = ..., - sorter: Optional[_ArrayLikeInt_co] = ..., + sorter: None | _ArrayLikeInt_co = ..., ) -> intp: ... @overload def searchsorted( self, # >= 1D array v: ArrayLike, side: _SortSide = ..., - sorter: Optional[_ArrayLikeInt_co] = ..., + sorter: None | _ArrayLikeInt_co = ..., ) -> ndarray[Any, _dtype[intp]]: ... def setfield( @@ -1666,8 +1659,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def sort( self, axis: SupportsIndex = ..., - kind: Optional[_SortKind] = ..., - order: Union[None, str, Sequence[str]] = ..., + kind: None | _SortKind = ..., + order: None | str | Sequence[str] = ..., ) -> None: ... @overload @@ -1693,7 +1686,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def take( # type: ignore[misc] self: ndarray[Any, _dtype[_ScalarType]], indices: _IntLike_co, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: None = ..., mode: _ModeKind = ..., ) -> _ScalarType: ... @@ -1701,7 +1694,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def take( # type: ignore[misc] self, indices: _ArrayLikeInt_co, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: None = ..., mode: _ModeKind = ..., ) -> ndarray[Any, _DType_co]: ... @@ -1709,7 +1702,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def take( self, indices: _ArrayLikeInt_co, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: _NdArraySubClass = ..., mode: _ModeKind = ..., ) -> _NdArraySubClass: ... @@ -1717,7 +1710,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def repeat( self, repeats: _ArrayLikeInt_co, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., ) -> ndarray[Any, _DType_co]: ... def flatten( @@ -1761,7 +1754,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def view(self: _ArraySelf) -> _ArraySelf: ... @overload - def view(self, type: Type[_NdArraySubClass]) -> _NdArraySubClass: ... + def view(self, type: type[_NdArraySubClass]) -> _NdArraySubClass: ... @overload def view(self, dtype: _DTypeLike[_ScalarType]) -> NDArray[_ScalarType]: ... @overload @@ -1770,7 +1763,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def view( self, dtype: DTypeLike, - type: Type[_NdArraySubClass], + type: type[_NdArraySubClass], ) -> _NdArraySubClass: ... @overload @@ -1962,7 +1955,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> Tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload def __rdivmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] @@ -1973,7 +1966,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> Tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @@ -2445,12 +2438,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __ior__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload + def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ... - @overload - def __dlpack_device__(self) -> Tuple[int, L[0]]: ... + def __dlpack_device__(self) -> tuple[int, L[0]]: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property @@ -2482,9 +2472,9 @@ class generic(_ArrayOrScalarCommon): @property def size(self) -> L[1]: ... @property - def shape(self) -> Tuple[()]: ... + def shape(self) -> tuple[()]: ... @property - def strides(self) -> Tuple[()]: ... + def strides(self) -> tuple[()]: ... def byteswap(self: _ScalarType, inplace: L[False] = ...) -> _ScalarType: ... @property def flat(self: _ScalarType) -> flatiter[ndarray[Any, _dtype[_ScalarType]]]: ... @@ -2513,19 +2503,19 @@ class generic(_ArrayOrScalarCommon): @overload def view( self: _ScalarType, - type: Type[ndarray[Any, Any]] = ..., + type: type[ndarray[Any, Any]] = ..., ) -> _ScalarType: ... @overload def view( self, dtype: _DTypeLike[_ScalarType], - type: Type[ndarray[Any, Any]] = ..., + type: type[ndarray[Any, Any]] = ..., ) -> _ScalarType: ... @overload def view( self, dtype: DTypeLike, - type: Type[ndarray[Any, Any]] = ..., + type: type[ndarray[Any, Any]] = ..., ) -> Any: ... @overload @@ -2542,14 +2532,14 @@ class generic(_ArrayOrScalarCommon): ) -> Any: ... def item( - self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /, + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, ) -> Any: ... @overload def take( # type: ignore[misc] self: _ScalarType, indices: _IntLike_co, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: None = ..., mode: _ModeKind = ..., ) -> _ScalarType: ... @@ -2557,7 +2547,7 @@ class generic(_ArrayOrScalarCommon): def take( # type: ignore[misc] self: _ScalarType, indices: _ArrayLikeInt_co, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: None = ..., mode: _ModeKind = ..., ) -> ndarray[Any, _dtype[_ScalarType]]: ... @@ -2565,7 +2555,7 @@ class generic(_ArrayOrScalarCommon): def take( self, indices: _ArrayLikeInt_co, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: _NdArraySubClass = ..., mode: _ModeKind = ..., ) -> _NdArraySubClass: ... @@ -2573,7 +2563,7 @@ class generic(_ArrayOrScalarCommon): def repeat( self: _ScalarType, repeats: _ArrayLikeInt_co, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., ) -> ndarray[Any, _dtype[_ScalarType]]: ... def flatten( @@ -2596,9 +2586,9 @@ class generic(_ArrayOrScalarCommon): ) -> ndarray[Any, _dtype[_ScalarType]]: ... def squeeze( - self: _ScalarType, axis: Union[L[0], Tuple[()]] = ... + self: _ScalarType, axis: L[0] | tuple[()] = ... ) -> _ScalarType: ... - def transpose(self: _ScalarType, axes: Tuple[()] = ..., /) -> _ScalarType: ... + def transpose(self: _ScalarType, axes: tuple[()] = ..., /) -> _ScalarType: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property def dtype(self: _ScalarType) -> _dtype[_ScalarType]: ... @@ -2637,7 +2627,7 @@ class number(generic, Generic[_NBit1]): # type: ignore class bool_(generic): def __init__(self, value: object = ..., /) -> None: ... def item( - self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /, + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, ) -> bool: ... def tolist(self) -> bool: ... @property @@ -2713,14 +2703,14 @@ class datetime64(generic): def __init__( self, value: None | datetime64 | _CharLike_co | _DatetimeScalar = ..., - format: _CharLike_co | Tuple[_CharLike_co, _IntLike_co] = ..., + format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ..., /, ) -> None: ... @overload def __init__( self, value: int, - format: _CharLike_co | Tuple[_CharLike_co, _IntLike_co], + format: _CharLike_co | tuple[_CharLike_co, _IntLike_co], /, ) -> None: ... def __add__(self, other: _TD64Like_co) -> datetime64: ... @@ -2759,7 +2749,7 @@ class integer(number[_NBit1]): # type: ignore # NOTE: `__index__` is technically defined in the bottom-most # sub-classes (`int64`, `uint32`, etc) def item( - self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /, + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, ) -> int: ... def tolist(self) -> int: ... def is_integer(self) -> L[True]: ... @@ -2828,7 +2818,7 @@ class timedelta64(generic): def __init__( self, value: None | int | _CharLike_co | dt.timedelta | timedelta64 = ..., - format: _CharLike_co | Tuple[_CharLike_co, _IntLike_co] = ..., + format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ..., /, ) -> None: ... @property @@ -2856,8 +2846,8 @@ class timedelta64(generic): def __rfloordiv__(self, other: timedelta64) -> int64: ... def __mod__(self, other: timedelta64) -> timedelta64: ... def __rmod__(self, other: timedelta64) -> timedelta64: ... - def __divmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... - def __rdivmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... + def __divmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... + def __rdivmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... __lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] __le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] __gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] @@ -2905,7 +2895,7 @@ uint = unsignedinteger[_NBitInt] ulonglong = unsignedinteger[_NBitLongLong] class inexact(number[_NBit1]): # type: ignore - def __getnewargs__(self: inexact[_64Bit]) -> Tuple[float, ...]: ... + def __getnewargs__(self: inexact[_64Bit]) -> tuple[float, ...]: ... _IntType = TypeVar("_IntType", bound=integer) _FloatType = TypeVar('_FloatType', bound=floating) @@ -2913,20 +2903,20 @@ _FloatType = TypeVar('_FloatType', bound=floating) class floating(inexact[_NBit1]): def __init__(self, value: _FloatValue = ..., /) -> None: ... def item( - self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, ) -> float: ... def tolist(self) -> float: ... def is_integer(self) -> bool: ... def hex(self: float64) -> str: ... @classmethod - def fromhex(cls: Type[float64], string: str, /) -> float64: ... - def as_integer_ratio(self) -> Tuple[int, int]: ... + def fromhex(cls: type[float64], string: str, /) -> float64: ... + def as_integer_ratio(self) -> tuple[int, int]: ... if sys.version_info >= (3, 9): def __ceil__(self: float64) -> int: ... def __floor__(self: float64) -> int: ... def __trunc__(self: float64) -> int: ... - def __getnewargs__(self: float64) -> Tuple[float]: ... + def __getnewargs__(self: float64) -> tuple[float]: ... def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ... @overload def __round__(self, ndigits: None = ...) -> int: ... @@ -2967,7 +2957,7 @@ longfloat = floating[_NBitLongDouble] class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): def __init__(self, value: _ComplexValue = ..., /) -> None: ... def item( - self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /, + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, ) -> complex: ... def tolist(self) -> complex: ... @property @@ -2975,7 +2965,7 @@ class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): @property def imag(self) -> floating[_NBit2]: ... # type: ignore[override] def __abs__(self) -> floating[_NBit1]: ... # type: ignore[override] - def __getnewargs__(self: complex128) -> Tuple[float, float]: ... + def __getnewargs__(self: complex128) -> tuple[float, float]: ... # NOTE: Deprecated # def __round__(self, ndigits=...): ... __add__: _ComplexOp[_NBit1] @@ -3021,7 +3011,7 @@ class void(flexible): def __getitem__(self, key: list[str]) -> void: ... def __setitem__( self, - key: str | List[str] | SupportsIndex, + key: str | list[str] | SupportsIndex, value: ArrayLike, ) -> None: ... @@ -3042,7 +3032,7 @@ class bytes_(character, bytes): self, value: str, /, encoding: str = ..., errors: str = ... ) -> None: ... def item( - self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /, + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, ) -> bytes: ... def tolist(self) -> bytes: ... @@ -3057,7 +3047,7 @@ class str_(character, str): self, value: bytes, /, encoding: str = ..., errors: str = ... ) -> None: ... def item( - self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /, + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, ) -> str: ... def tolist(self) -> str: ... @@ -3132,7 +3122,7 @@ class ufunc: @property def ntypes(self) -> int: ... @property - def types(self) -> List[str]: ... + def types(self) -> list[str]: ... # Broad return type because it has to encompass things like # # >>> np.logical_and.identity is True @@ -3147,7 +3137,7 @@ class ufunc: def identity(self) -> Any: ... # This is None for ufuncs and a string for gufuncs. @property - def signature(self) -> Optional[str]: ... + def signature(self) -> None | str: ... # The next four methods will always exist, but they will just # raise a ValueError ufuncs with that don't accept two input # arguments and return one output argument. Because of that we @@ -3276,7 +3266,7 @@ class AxisError(ValueError, IndexError): @overload def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None: ... -_CallType = TypeVar("_CallType", bound=Union[_ErrFunc, _SupportsWrite[str]]) +_CallType = TypeVar("_CallType", bound=_ErrFunc | _SupportsWrite[str]) class errstate(Generic[_CallType], ContextDecorator): call: _CallType @@ -3287,18 +3277,18 @@ class errstate(Generic[_CallType], ContextDecorator): self, *, call: _CallType = ..., - all: Optional[_ErrKind] = ..., - divide: Optional[_ErrKind] = ..., - over: Optional[_ErrKind] = ..., - under: Optional[_ErrKind] = ..., - invalid: Optional[_ErrKind] = ..., + all: None | _ErrKind = ..., + divide: None | _ErrKind = ..., + over: None | _ErrKind = ..., + under: None | _ErrKind = ..., + invalid: None | _ErrKind = ..., ) -> None: ... def __enter__(self) -> None: ... def __exit__( self, - exc_type: Optional[Type[BaseException]], - exc_value: Optional[BaseException], - traceback: Optional[TracebackType], + exc_type: None | type[BaseException], + exc_value: None | BaseException, + traceback: None | TracebackType, /, ) -> None: ... @@ -3320,10 +3310,13 @@ class ndenumerate(Generic[_ScalarType]): def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float_]: ... @overload def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex_]: ... - def __next__(self: ndenumerate[_ScalarType]) -> Tuple[_Shape, _ScalarType]: ... + def __next__(self: ndenumerate[_ScalarType]) -> tuple[_Shape, _ScalarType]: ... def __iter__(self: _T) -> _T: ... class ndindex: + @overload + def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... + @overload def __init__(self, *shape: SupportsIndex) -> None: ... def __iter__(self: _T) -> _T: ... def __next__(self) -> _Shape: ... @@ -3331,7 +3324,7 @@ class ndindex: class DataSource: def __init__( self, - destpath: Union[None, str, os.PathLike[str]] = ..., + destpath: None | str | os.PathLike[str] = ..., ) -> None: ... def __del__(self) -> None: ... def abspath(self, path: str) -> str: ... @@ -3343,8 +3336,8 @@ class DataSource: self, path: str, mode: str = ..., - encoding: Optional[str] = ..., - newline: Optional[str] = ..., + encoding: None | str = ..., + newline: None | str = ..., ) -> IO[Any]: ... # TODO: The type of each `__next__` and `iters` return-type depends @@ -3355,7 +3348,7 @@ class broadcast: @property def index(self) -> int: ... @property - def iters(self) -> Tuple[flatiter[Any], ...]: ... + def iters(self) -> tuple[flatiter[Any], ...]: ... @property def nd(self) -> int: ... @property @@ -3366,7 +3359,7 @@ class broadcast: def shape(self) -> _Shape: ... @property def size(self) -> int: ... - def __next__(self) -> Tuple[Any, ...]: ... + def __next__(self) -> tuple[Any, ...]: ... def __iter__(self: _T) -> _T: ... def reset(self) -> None: ... @@ -3374,7 +3367,7 @@ class busdaycalendar: def __new__( cls, weekmask: ArrayLike = ..., - holidays: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ..., ) -> busdaycalendar: ... @property def weekmask(self) -> NDArray[bool_]: ... @@ -3408,7 +3401,7 @@ class finfo(Generic[_FloatType]): ) -> finfo[floating[_NBit1]]: ... @overload def __new__( - cls, dtype: complex | float | Type[complex] | Type[float] + cls, dtype: complex | float | type[complex] | type[float] ) -> finfo[float_]: ... @overload def __new__( @@ -3428,7 +3421,7 @@ class iinfo(Generic[_IntType]): @overload def __new__(cls, dtype: _IntType | _DTypeLike[_IntType]) -> iinfo[_IntType]: ... @overload - def __new__(cls, dtype: int | Type[int]) -> iinfo[int_]: ... + def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ... @overload def __new__(cls, dtype: str) -> iinfo[Any]: ... @@ -3482,29 +3475,29 @@ class recarray(ndarray[_ShapeType, _DType_co]): def __getattribute__(self, attr: str) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike) -> None: ... @overload - def __getitem__(self, indx: Union[ - SupportsIndex, - _ArrayLikeInt_co, - Tuple[SupportsIndex | _ArrayLikeInt_co, ...], - ]) -> Any: ... - @overload - def __getitem__(self: recarray[Any, dtype[void]], indx: Union[ - None, - slice, - ellipsis, - SupportsIndex, - _ArrayLikeInt_co, - Tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...], - ]) -> recarray[Any, _DType_co]: ... - @overload - def __getitem__(self, indx: Union[ - None, - slice, - ellipsis, - SupportsIndex, - _ArrayLikeInt_co, - Tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...], - ]) -> ndarray[Any, _DType_co]: ... + def __getitem__(self, indx: ( + SupportsIndex + | _ArrayLikeInt_co + | tuple[SupportsIndex | _ArrayLikeInt_co, ...] + )) -> Any: ... + @overload + def __getitem__(self: recarray[Any, dtype[void]], indx: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> recarray[Any, _DType_co]: ... + @overload + def __getitem__(self, indx: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> ndarray[Any, _DType_co]: ... @overload def __getitem__(self, indx: str) -> NDArray[Any]: ... @overload @@ -3574,18 +3567,18 @@ class nditer: def __enter__(self) -> nditer: ... def __exit__( self, - exc_type: None | Type[BaseException], + exc_type: None | type[BaseException], exc_value: None | BaseException, traceback: None | TracebackType, ) -> None: ... def __iter__(self) -> nditer: ... - def __next__(self) -> Tuple[NDArray[Any], ...]: ... + def __next__(self) -> tuple[NDArray[Any], ...]: ... def __len__(self) -> int: ... def __copy__(self) -> nditer: ... @overload def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ... @overload - def __getitem__(self, index: slice) -> Tuple[NDArray[Any], ...]: ... + def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ... def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... def close(self) -> None: ... def copy(self) -> nditer: ... @@ -3596,7 +3589,7 @@ class nditer: def remove_multi_index(self) -> None: ... def reset(self) -> None: ... @property - def dtypes(self) -> Tuple[dtype[Any], ...]: ... + def dtypes(self) -> tuple[dtype[Any], ...]: ... @property def finished(self) -> bool: ... @property @@ -3612,23 +3605,23 @@ class nditer: @property def iterindex(self) -> int: ... @property - def iterrange(self) -> Tuple[int, ...]: ... + def iterrange(self) -> tuple[int, ...]: ... @property def itersize(self) -> int: ... @property - def itviews(self) -> Tuple[NDArray[Any], ...]: ... + def itviews(self) -> tuple[NDArray[Any], ...]: ... @property - def multi_index(self) -> Tuple[int, ...]: ... + def multi_index(self) -> tuple[int, ...]: ... @property def ndim(self) -> int: ... @property def nop(self) -> int: ... @property - def operands(self) -> Tuple[NDArray[Any], ...]: ... + def operands(self) -> tuple[NDArray[Any], ...]: ... @property - def shape(self) -> Tuple[int, ...]: ... + def shape(self) -> tuple[int, ...]: ... @property - def value(self) -> Tuple[NDArray[Any], ...]: ... + def value(self) -> tuple[NDArray[Any], ...]: ... _MemMapModeKind = L[ "readonly", "r", @@ -3646,10 +3639,10 @@ class memmap(ndarray[_ShapeType, _DType_co]): def __new__( subtype, filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, - dtype: Type[uint8] = ..., + dtype: type[uint8] = ..., mode: _MemMapModeKind = ..., offset: int = ..., - shape: None | int | Tuple[int, ...] = ..., + shape: None | int | tuple[int, ...] = ..., order: _OrderKACF = ..., ) -> memmap[Any, dtype[uint8]]: ... @overload @@ -3659,7 +3652,7 @@ class memmap(ndarray[_ShapeType, _DType_co]): dtype: _DTypeLike[_ScalarType], mode: _MemMapModeKind = ..., offset: int = ..., - shape: None | int | Tuple[int, ...] = ..., + shape: None | int | tuple[int, ...] = ..., order: _OrderKACF = ..., ) -> memmap[Any, dtype[_ScalarType]]: ... @overload @@ -3669,14 +3662,14 @@ class memmap(ndarray[_ShapeType, _DType_co]): dtype: DTypeLike, mode: _MemMapModeKind = ..., offset: int = ..., - shape: None | int | Tuple[int, ...] = ..., + shape: None | int | tuple[int, ...] = ..., order: _OrderKACF = ..., ) -> memmap[Any, dtype[Any]]: ... def __array_finalize__(self, obj: memmap[Any, Any]) -> None: ... def __array_wrap__( self, array: memmap[_ShapeType, _DType_co], - context: None | Tuple[ufunc, Tuple[Any, ...], int] = ..., + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., ) -> Any: ... def flush(self) -> None: ... @@ -3685,7 +3678,7 @@ class vectorize: cache: bool signature: None | str otypes: None | str - excluded: Set[int | str] + excluded: set[int | str] __doc__: None | str def __init__( self, @@ -3785,20 +3778,20 @@ class matrix(ndarray[_ShapeType, _DType_co]): def __array_finalize__(self, obj: NDArray[Any]) -> None: ... @overload - def __getitem__(self, key: Union[ - SupportsIndex, - _ArrayLikeInt_co, - Tuple[SupportsIndex | _ArrayLikeInt_co, ...], - ]) -> Any: ... + def __getitem__(self, key: ( + SupportsIndex + | _ArrayLikeInt_co + | tuple[SupportsIndex | _ArrayLikeInt_co, ...] + )) -> Any: ... @overload - def __getitem__(self, key: Union[ - None, - slice, - ellipsis, - SupportsIndex, - _ArrayLikeInt_co, - Tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...], - ]) -> matrix[Any, _DType_co]: ... + def __getitem__(self, key: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> matrix[Any, _DType_co]: ... @overload def __getitem__(self: NDArray[void], key: str) -> matrix[Any, dtype[Any]]: ... @overload @@ -3895,7 +3888,7 @@ class matrix(ndarray[_ShapeType, _DType_co]): def ptp(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[Any, _DType_co]: ... - def tolist(self: matrix[Any, dtype[_SupportsItem[_T]]]) -> List[List[_T]]: ... # type: ignore[typevar] + def tolist(self: matrix[Any, dtype[_SupportsItem[_T]]]) -> list[list[_T]]: ... # type: ignore[typevar] def ravel(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... def flatten(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... @@ -4342,4 +4335,3 @@ class _SupportsDLPack(Protocol[_T_contra]): def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ... def _from_dlpack(__obj: _SupportsDLPack[None]) -> NDArray[Any]: ... - diff --git a/numpy/_pytesttester.pyi b/numpy/_pytesttester.pyi index 0be64b3f7..67ac87b33 100644 --- a/numpy/_pytesttester.pyi +++ b/numpy/_pytesttester.pyi @@ -1,6 +1,7 @@ -from typing import List, Iterable, Literal as L +from collections.abc import Iterable +from typing import Literal as L -__all__: List[str] +__all__: list[str] class PytestTester: module_name: str diff --git a/numpy/array_api/_array_object.py b/numpy/array_api/_array_object.py index 8794c5ea5..04d9f8a24 100644 --- a/numpy/array_api/_array_object.py +++ b/numpy/array_api/_array_object.py @@ -30,9 +30,11 @@ from ._dtypes import ( ) from typing import TYPE_CHECKING, Optional, Tuple, Union, Any +import types if TYPE_CHECKING: from ._typing import Any, PyCapsule, Device, Dtype + import numpy.typing as npt import numpy as np @@ -54,6 +56,7 @@ class Array: functions, such as asarray(). """ + _array: np.ndarray # Use a custom constructor instead of __init__, as manually initializing # this class is not supported API. @@ -108,11 +111,22 @@ class Array: mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix) return prefix + mid + suffix + # This function is not required by the spec, but we implement it here for + # convenience so that np.asarray(np.array_api.Array) will work. + def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]: + """ + Warning: this method is NOT part of the array API spec. Implementers + of other libraries need not include it, and users should not assume it + will be present in other implementations. + + """ + return np.asarray(self._array, dtype=dtype) + # These are various helper functions to make the array behavior match the # spec in places where it either deviates from or is more strict than # NumPy behavior - def _check_allowed_dtypes(self, other, dtype_category, op): + def _check_allowed_dtypes(self, other: bool | int | float | Array, dtype_category: str, op: str) -> Array: """ Helper function for operators to only allow specific input dtypes @@ -188,7 +202,7 @@ class Array: return Array._new(np.array(scalar, self.dtype)) @staticmethod - def _normalize_two_args(x1, x2): + def _normalize_two_args(x1, x2) -> Tuple[Array, Array]: """ Normalize inputs to two arg functions to fix type promotion rules @@ -403,7 +417,7 @@ class Array: def __array_namespace__( self: Array, /, *, api_version: Optional[str] = None - ) -> Any: + ) -> types.ModuleType: if api_version is not None and not api_version.startswith("2021."): raise ValueError(f"Unrecognized array API version: {api_version!r}") return array_api @@ -1072,4 +1086,4 @@ class Array: # https://data-apis.org/array-api/latest/API_specification/array_object.html#t if self.ndim != 2: raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.") - return self._array.T + return self.__class__._new(self._array.T) diff --git a/numpy/array_api/_statistical_functions.py b/numpy/array_api/_statistical_functions.py index 7bee3f4db..5bc831ac2 100644 --- a/numpy/array_api/_statistical_functions.py +++ b/numpy/array_api/_statistical_functions.py @@ -65,8 +65,8 @@ def prod( # Note: sum() and prod() always upcast float32 to float64 for dtype=None # We need to do so here before computing the product to avoid overflow if dtype is None and x.dtype == float32: - x = asarray(x, dtype=float64) - return Array._new(np.prod(x._array, axis=axis, keepdims=keepdims)) + dtype = float64 + return Array._new(np.prod(x._array, dtype=dtype, axis=axis, keepdims=keepdims)) def std( diff --git a/numpy/array_api/tests/test_array_object.py b/numpy/array_api/tests/test_array_object.py index 12479d765..b980bacca 100644 --- a/numpy/array_api/tests/test_array_object.py +++ b/numpy/array_api/tests/test_array_object.py @@ -4,6 +4,7 @@ from numpy.testing import assert_raises import numpy as np from .. import ones, asarray, result_type, all, equal +from .._array_object import Array from .._dtypes import ( _all_dtypes, _boolean_dtypes, @@ -301,3 +302,23 @@ def test_device_property(): assert all(equal(asarray(a, device='cpu'), a)) assert_raises(ValueError, lambda: asarray(a, device='gpu')) + +def test_array_properties(): + a = ones((1, 2, 3)) + b = ones((2, 3)) + assert_raises(ValueError, lambda: a.T) + + assert isinstance(b.T, Array) + assert b.T.shape == (3, 2) + + assert isinstance(a.mT, Array) + assert a.mT.shape == (1, 3, 2) + assert isinstance(b.mT, Array) + assert b.mT.shape == (3, 2) + +def test___array__(): + a = ones((2, 3), dtype=int16) + assert np.asarray(a) is a._array + b = np.asarray(a, dtype=np.float64) + assert np.all(np.equal(b, np.ones((2, 3), dtype=np.float64))) + assert b.dtype == np.float64 diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py index 1fa17621a..3d10bb988 100644 --- a/numpy/compat/py3k.py +++ b/numpy/compat/py3k.py @@ -107,7 +107,9 @@ class contextlib_nullcontext: def npy_load_module(name, fn, info=None): """ - Load a module. + Load a module. Uses ``load_module`` which will be deprecated in python + 3.12. An alternative that uses ``exec_module`` is in + numpy.distutils.misc_util.exec_mod_from_location .. versionadded:: 1.11.2 diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index 078c58976..219383e1e 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -2265,10 +2265,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', """Array protocol: Python side.""")) -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', - """None.""")) - - add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', """Array priority.""")) @@ -2278,12 +2274,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack__', """a.__dlpack__(*, stream=None) - + DLPack Protocol: Part of the Array API.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack_device__', """a.__dlpack_device__() - + DLPack Protocol: Part of the Array API.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('base', @@ -2392,6 +2388,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', """ Data-type of the array's elements. + .. warning:: + + Setting ``arr.dtype`` is discouraged and may be deprecated in the + future. Setting will replace the ``dtype`` without modifying the + memory (see also `ndarray.view` and `ndarray.astype`). + Parameters ---------- None @@ -2402,6 +2404,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', See Also -------- + ndarray.astype : Cast the values contained in the array to a new data-type. + ndarray.view : Create a view of the same data but a different data-type. numpy.dtype Examples @@ -2477,11 +2481,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', This array is a copy of some other array. The C-API function PyArray_ResolveWritebackIfCopy must be called before deallocating to the base array will be updated with the contents of this array. - UPDATEIFCOPY (U) - (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array. - When this array is - deallocated, the base array will be updated with the contents of - this array. FNC F_CONTIGUOUS and not C_CONTIGUOUS. FORC @@ -2499,13 +2498,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag names are only supported in dictionary access. - Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be + Only the WRITEBACKIFCOPY, WRITEABLE, and ALIGNED flags can be changed by the user, via direct assignment to the attribute or dictionary entry, or by calling `ndarray.setflags`. The array flags cannot be set arbitrarily: - - UPDATEIFCOPY can only be set ``False``. - WRITEBACKIFCOPY can only be set ``False``. - ALIGNED can only be set ``True`` if the data is truly aligned. - WRITEABLE can only be set ``True`` if the array owns its own memory @@ -2633,6 +2631,11 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', the array and the remaining dimensions. Reshaping an array in-place will fail if a copy is required. + .. warning:: + + Setting ``arr.shape`` is discouraged and may be deprecated in the + future. Using `ndarray.reshape` is the preferred approach. + Examples -------- >>> x = np.array([1, 2, 3, 4]) @@ -2658,8 +2661,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', See Also -------- - numpy.reshape : similar function - ndarray.reshape : similar method + numpy.shape : Equivalent getter function. + numpy.reshape : Function similar to setting ``shape``. + ndarray.reshape : Method similar to setting ``shape``. """)) @@ -2702,6 +2706,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', A more detailed explanation of strides can be found in the "ndarray.rst" file in the NumPy reference guide. + .. warning:: + + Setting ``arr.strides`` is discouraged and may be deprecated in the + future. `numpy.lib.stride_tricks.as_strided` should be preferred + to create a new view of the same data in a safer way. + Notes ----- Imagine an array of 32-bit integers (each 4 bytes):: @@ -2797,6 +2807,14 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', """)) +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', + """a.__array_finalize__(obj, /) + + Present so subclasses can call super. Does nothing. + + """)) + + add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', """a.__array_prepare__(array[, context], /) @@ -3906,13 +3924,13 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', """ a.setflags(write=None, align=None, uic=None) - Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY), + Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY, respectively. These Boolean-valued flags affect how numpy interprets the memory area used by `a` (see Notes below). The ALIGNED flag can only be set to True if the data is actually aligned according to the type. - The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set + The WRITEBACKIFCOPY and flag can never be set to True. The flag WRITEABLE can only be set to True if the array owns its own memory, or the ultimate owner of the memory exposes a writeable buffer interface, or is a string. (The exception for string is made so that @@ -3932,15 +3950,13 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', Array flags provide information about how the memory area used for the array is to be interpreted. There are 7 Boolean flags in use, only four of which can be changed by the user: - WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED. + WRITEBACKIFCOPY, WRITEABLE, and ALIGNED. WRITEABLE (W) the data area can be written to; ALIGNED (A) the data and strides are aligned appropriately for the hardware (as determined by the compiler); - UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY; - WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced by .base). When the C-API function PyArray_ResolveWritebackIfCopy is called, the base array will be updated with the contents of this array. @@ -3964,7 +3980,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False - UPDATEIFCOPY : False >>> y.setflags(write=0, align=0) >>> y.flags C_CONTIGUOUS : True @@ -3973,7 +3988,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', WRITEABLE : False ALIGNED : False WRITEBACKIFCOPY : False - UPDATEIFCOPY : False >>> y.setflags(uic=1) Traceback (most recent call last): File "<stdin>", line 1, in <module> @@ -4459,14 +4473,13 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view', memory. For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of - bytes per entry than the previous dtype (for example, converting a - regular array to a structured array), then the behavior of the view - cannot be predicted just from the superficial appearance of ``a`` (shown - by ``print(a)``). It also depends on exactly how ``a`` is stored in - memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus - defined as a slice or transpose, etc., the view may give different - results. + bytes per entry than the previous dtype (for example, converting a regular + array to a structured array), then the last axis of ``a`` must be + contiguous. This axis will be resized in the result. + .. versionchanged:: 1.23.0 + Only the last axis needs to be contiguous. Previously, the entire array + had to be C-contiguous. Examples -------- @@ -4511,19 +4524,34 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view', Views that change the dtype size (bytes per entry) should normally be avoided on arrays defined by slices, transposes, fortran-ordering, etc.: - >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16) - >>> y = x[:, 0:2] + >>> x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16) + >>> y = x[:, ::2] >>> y - array([[1, 2], - [4, 5]], dtype=int16) + array([[1, 3], + [4, 6]], dtype=int16) >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) Traceback (most recent call last): ... - ValueError: To change to a dtype of a different size, the array must be C-contiguous + ValueError: To change to a dtype of a different size, the last axis must be contiguous >>> z = y.copy() >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) - array([[(1, 2)], - [(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')]) + array([[(1, 3)], + [(4, 6)]], dtype=[('width', '<i2'), ('length', '<i2')]) + + However, views that change dtype are totally fine for arrays with a + contiguous last axis, even if the rest of the axes are not C-contiguous: + + >>> x = np.arange(2 * 3 * 4, dtype=np.int8).reshape(2, 3, 4) + >>> x.transpose(1, 0, 2).view(np.int16) + array([[[ 256, 770], + [3340, 3854]], + <BLANKLINE> + [[1284, 1798], + [4368, 4882]], + <BLANKLINE> + [[2312, 2826], + [5396, 5910]]], dtype=int16) + """)) diff --git a/numpy/core/_asarray.py b/numpy/core/_asarray.py index ecb4e7c39..89d422e99 100644 --- a/numpy/core/_asarray.py +++ b/numpy/core/_asarray.py @@ -78,7 +78,6 @@ def require(a, dtype=None, requirements=None, *, like=None): WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False - UPDATEIFCOPY : False >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) >>> y.flags @@ -88,7 +87,6 @@ def require(a, dtype=None, requirements=None, *, like=None): WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False - UPDATEIFCOPY : False """ if like is not None: diff --git a/numpy/core/_asarray.pyi b/numpy/core/_asarray.pyi index fee9b7b6e..0da2de912 100644 --- a/numpy/core/_asarray.pyi +++ b/numpy/core/_asarray.pyi @@ -1,4 +1,5 @@ -from typing import TypeVar, Union, Iterable, overload, Literal +from collections.abc import Iterable +from typing import TypeVar, Union, overload, Literal from numpy import ndarray from numpy.typing import ArrayLike, DTypeLike @@ -19,7 +20,7 @@ _RequirementsWithE = Union[_Requirements, _E] def require( a: _ArrayType, dtype: None = ..., - requirements: Union[None, _Requirements, Iterable[_Requirements]] = ..., + requirements: None | _Requirements | Iterable[_Requirements] = ..., *, like: ArrayLike = ... ) -> _ArrayType: ... @@ -27,7 +28,7 @@ def require( def require( a: object, dtype: DTypeLike = ..., - requirements: Union[_E, Iterable[_RequirementsWithE]] = ..., + requirements: _E | Iterable[_RequirementsWithE] = ..., *, like: ArrayLike = ... ) -> ndarray: ... @@ -35,7 +36,7 @@ def require( def require( a: object, dtype: DTypeLike = ..., - requirements: Union[None, _Requirements, Iterable[_Requirements]] = ..., + requirements: None | _Requirements | Iterable[_Requirements] = ..., *, like: ArrayLike = ... ) -> ndarray: ... diff --git a/numpy/core/_internal.pyi b/numpy/core/_internal.pyi index f4bfd770f..8a25ef2cb 100644 --- a/numpy/core/_internal.pyi +++ b/numpy/core/_internal.pyi @@ -1,4 +1,4 @@ -from typing import Any, TypeVar, Type, overload, Optional, Generic +from typing import Any, TypeVar, overload, Generic import ctypes as ct from numpy import ndarray @@ -6,7 +6,7 @@ from numpy.ctypeslib import c_intp _CastT = TypeVar("_CastT", bound=ct._CanCastTo) # Copied from `ctypes.cast` _CT = TypeVar("_CT", bound=ct._CData) -_PT = TypeVar("_PT", bound=Optional[int]) +_PT = TypeVar("_PT", bound=None | int) # TODO: Let the likes of `shape_as` and `strides_as` return `None` # for 0D arrays once we've got shape-support @@ -25,6 +25,6 @@ class _ctypes(Generic[_PT]): @property def _as_parameter_(self) -> ct.c_void_p: ... - def data_as(self, obj: Type[_CastT]) -> _CastT: ... - def shape_as(self, obj: Type[_CT]) -> ct.Array[_CT]: ... - def strides_as(self, obj: Type[_CT]) -> ct.Array[_CT]: ... + def data_as(self, obj: type[_CastT]) -> _CastT: ... + def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]: ... + def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]: ... diff --git a/numpy/core/_type_aliases.pyi b/numpy/core/_type_aliases.pyi index c10d072f9..bbead0cb5 100644 --- a/numpy/core/_type_aliases.pyi +++ b/numpy/core/_type_aliases.pyi @@ -1,13 +1,13 @@ -from typing import Dict, Union, Type, List, TypedDict +from typing import TypedDict from numpy import generic, signedinteger, unsignedinteger, floating, complexfloating class _SCTypes(TypedDict): - int: List[Type[signedinteger]] - uint: List[Type[unsignedinteger]] - float: List[Type[floating]] - complex: List[Type[complexfloating]] - others: List[type] + int: list[type[signedinteger]] + uint: list[type[unsignedinteger]] + float: list[type[floating]] + complex: list[type[complexfloating]] + others: list[type] -sctypeDict: Dict[Union[int, str], Type[generic]] +sctypeDict: dict[int | str, type[generic]] sctypes: _SCTypes diff --git a/numpy/core/_ufunc_config.py b/numpy/core/_ufunc_config.py index b40e7445e..a731f6bf7 100644 --- a/numpy/core/_ufunc_config.py +++ b/numpy/core/_ufunc_config.py @@ -290,7 +290,7 @@ def seterrcall(func): >>> save_err = np.seterr(all='log') >>> np.array([1, 2, 3]) / 0.0 - LOG: Warning: divide by zero encountered in true_divide + LOG: Warning: divide by zero encountered in divide array([inf, inf, inf]) >>> np.seterrcall(saved_handler) diff --git a/numpy/core/_ufunc_config.pyi b/numpy/core/_ufunc_config.pyi index cd7129bcb..b7c2ebefc 100644 --- a/numpy/core/_ufunc_config.pyi +++ b/numpy/core/_ufunc_config.pyi @@ -1,4 +1,5 @@ -from typing import Optional, Union, Callable, Any, Literal, TypedDict +from collections.abc import Callable +from typing import Any, Literal, TypedDict from numpy import _SupportsWrite @@ -12,25 +13,25 @@ class _ErrDict(TypedDict): invalid: _ErrKind class _ErrDictOptional(TypedDict, total=False): - all: Optional[_ErrKind] - divide: Optional[_ErrKind] - over: Optional[_ErrKind] - under: Optional[_ErrKind] - invalid: Optional[_ErrKind] + all: None | _ErrKind + divide: None | _ErrKind + over: None | _ErrKind + under: None | _ErrKind + invalid: None | _ErrKind def seterr( - all: Optional[_ErrKind] = ..., - divide: Optional[_ErrKind] = ..., - over: Optional[_ErrKind] = ..., - under: Optional[_ErrKind] = ..., - invalid: Optional[_ErrKind] = ..., + all: None | _ErrKind = ..., + divide: None | _ErrKind = ..., + over: None | _ErrKind = ..., + under: None | _ErrKind = ..., + invalid: None | _ErrKind = ..., ) -> _ErrDict: ... def geterr() -> _ErrDict: ... def setbufsize(size: int) -> int: ... def getbufsize() -> int: ... def seterrcall( - func: Union[None, _ErrFunc, _SupportsWrite[str]] -) -> Union[None, _ErrFunc, _SupportsWrite[str]]: ... -def geterrcall() -> Union[None, _ErrFunc, _SupportsWrite[str]]: ... + func: None | _ErrFunc | _SupportsWrite[str] +) -> None | _ErrFunc | _SupportsWrite[str]: ... +def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]: ... # See `numpy/__init__.pyi` for the `errstate` class diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi index 0d338206f..996d4c782 100644 --- a/numpy/core/arrayprint.pyi +++ b/numpy/core/arrayprint.pyi @@ -1,5 +1,6 @@ from types import TracebackType -from typing import Any, Optional, Callable, Union, Type, Literal, TypedDict, SupportsIndex +from collections.abc import Callable +from typing import Any, Literal, TypedDict, SupportsIndex # Using a private class is by no means ideal, but it is simply a consequence # of a `contextlib.context` returning an instance of aforementioned class @@ -50,92 +51,92 @@ class _FormatOptions(TypedDict): suppress: bool nanstr: str infstr: str - formatter: Optional[_FormatDict] + formatter: None | _FormatDict sign: Literal["-", "+", " "] floatmode: _FloatMode legacy: Literal[False, "1.13", "1.21"] def set_printoptions( - precision: Optional[SupportsIndex] = ..., - threshold: Optional[int] = ..., - edgeitems: Optional[int] = ..., - linewidth: Optional[int] = ..., - suppress: Optional[bool] = ..., - nanstr: Optional[str] = ..., - infstr: Optional[str] = ..., - formatter: Optional[_FormatDict] = ..., - sign: Optional[Literal["-", "+", " "]] = ..., - floatmode: Optional[_FloatMode] = ..., + precision: None | SupportsIndex = ..., + threshold: None | int = ..., + edgeitems: None | int = ..., + linewidth: None | int = ..., + suppress: None | bool = ..., + nanstr: None | str = ..., + infstr: None | str = ..., + formatter: None | _FormatDict = ..., + sign: Literal[None, "-", "+", " "] = ..., + floatmode: None | _FloatMode = ..., *, - legacy: Optional[Literal[False, "1.13", "1.21"]] = ... + legacy: Literal[None, False, "1.13", "1.21"] = ... ) -> None: ... def get_printoptions() -> _FormatOptions: ... def array2string( a: ndarray[Any, Any], - max_line_width: Optional[int] = ..., - precision: Optional[SupportsIndex] = ..., - suppress_small: Optional[bool] = ..., + max_line_width: None | int = ..., + precision: None | SupportsIndex = ..., + suppress_small: None | bool = ..., separator: str = ..., prefix: str = ..., # NOTE: With the `style` argument being deprecated, # all arguments between `formatter` and `suffix` are de facto # keyworld-only arguments *, - formatter: Optional[_FormatDict] = ..., - threshold: Optional[int] = ..., - edgeitems: Optional[int] = ..., - sign: Optional[Literal["-", "+", " "]] = ..., - floatmode: Optional[_FloatMode] = ..., + formatter: None | _FormatDict = ..., + threshold: None | int = ..., + edgeitems: None | int = ..., + sign: Literal[None, "-", "+", " "] = ..., + floatmode: None | _FloatMode = ..., suffix: str = ..., - legacy: Optional[Literal[False, "1.13", "1.21"]] = ..., + legacy: Literal[None, False, "1.13", "1.21"] = ..., ) -> str: ... def format_float_scientific( x: _FloatLike_co, - precision: Optional[int] = ..., + precision: None | int = ..., unique: bool = ..., trim: Literal["k", ".", "0", "-"] = ..., sign: bool = ..., - pad_left: Optional[int] = ..., - exp_digits: Optional[int] = ..., - min_digits: Optional[int] = ..., + pad_left: None | int = ..., + exp_digits: None | int = ..., + min_digits: None | int = ..., ) -> str: ... def format_float_positional( x: _FloatLike_co, - precision: Optional[int] = ..., + precision: None | int = ..., unique: bool = ..., fractional: bool = ..., trim: Literal["k", ".", "0", "-"] = ..., sign: bool = ..., - pad_left: Optional[int] = ..., - pad_right: Optional[int] = ..., - min_digits: Optional[int] = ..., + pad_left: None | int = ..., + pad_right: None | int = ..., + min_digits: None | int = ..., ) -> str: ... def array_repr( arr: ndarray[Any, Any], - max_line_width: Optional[int] = ..., - precision: Optional[SupportsIndex] = ..., - suppress_small: Optional[bool] = ..., + max_line_width: None | int = ..., + precision: None | SupportsIndex = ..., + suppress_small: None | bool = ..., ) -> str: ... def array_str( a: ndarray[Any, Any], - max_line_width: Optional[int] = ..., - precision: Optional[SupportsIndex] = ..., - suppress_small: Optional[bool] = ..., + max_line_width: None | int = ..., + precision: None | SupportsIndex = ..., + suppress_small: None | bool = ..., ) -> str: ... def set_string_function( - f: Optional[Callable[[ndarray[Any, Any]], str]], repr: bool = ... + f: None | Callable[[ndarray[Any, Any]], str], repr: bool = ... ) -> None: ... def printoptions( - precision: Optional[SupportsIndex] = ..., - threshold: Optional[int] = ..., - edgeitems: Optional[int] = ..., - linewidth: Optional[int] = ..., - suppress: Optional[bool] = ..., - nanstr: Optional[str] = ..., - infstr: Optional[str] = ..., - formatter: Optional[_FormatDict] = ..., - sign: Optional[Literal["-", "+", " "]] = ..., - floatmode: Optional[_FloatMode] = ..., + precision: None | SupportsIndex = ..., + threshold: None | int = ..., + edgeitems: None | int = ..., + linewidth: None | int = ..., + suppress: None | bool = ..., + nanstr: None | str = ..., + infstr: None | str = ..., + formatter: None | _FormatDict = ..., + sign: Literal[None, "-", "+", " "] = ..., + floatmode: None | _FloatMode = ..., *, - legacy: Optional[Literal[False, "1.13", "1.21"]] = ... + legacy: Literal[None, False, "1.13", "1.21"] = ... ) -> _GeneratorContextManager[_FormatOptions]: ... diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index dc71fc5c9..054150b28 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -4,10 +4,6 @@ import struct import sys import textwrap -sys.path.insert(0, os.path.dirname(__file__)) -import ufunc_docstrings as docstrings -sys.path.pop(0) - Zero = "PyLong_FromLong(0)" One = "PyLong_FromLong(1)" True_ = "(Py_INCREF(Py_True), Py_True)" @@ -17,6 +13,16 @@ AllOnes = "PyLong_FromLong(-1)" MinusInfinity = 'PyFloat_FromDouble(-NPY_INFINITY)' ReorderableNone = "(Py_INCREF(Py_None), Py_None)" +class docstrings: + @staticmethod + def get(place): + """ + Returns the C #definition name of docstring according + to ufunc place. C #definitions are generated by generate_umath_doc.py + in a separate C header. + """ + return 'DOC_' + place.upper().replace('.', '_') + # Sentinel value to specify using the full type description in the # function name class FullTypeDescr: @@ -322,7 +328,7 @@ defdict = { ], TD(O, f='PyNumber_Multiply'), ), -#'divide' : aliased to true_divide in umathmodule.c:initumath +#'true_divide' : aliased to divide in umathmodule.c:initumath 'floor_divide': Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy.core.umath.floor_divide'), @@ -336,9 +342,9 @@ defdict = { ], TD(O, f='PyNumber_FloorDivide'), ), -'true_divide': +'divide': Ufunc(2, 1, None, # One is only a unit to the right, not the left - docstrings.get('numpy.core.umath.true_divide'), + docstrings.get('numpy.core.umath.divide'), 'PyUFunc_TrueDivisionTypeResolver', TD(flts+cmplx, cfunc_alias='divide', dispatch=[('loops_arithm_fp', 'fd')]), [TypeDescription('m', FullTypeDescr, 'mq', 'm', cfunc_alias='divide'), @@ -1153,14 +1159,6 @@ def make_ufuncs(funcdict): for name in names: uf = funcdict[name] mlist = [] - docstring = textwrap.dedent(uf.docstring).strip() - docstring = docstring.encode('unicode-escape').decode('ascii') - docstring = docstring.replace(r'"', r'\"') - docstring = docstring.replace(r"'", r"\'") - # Split the docstring because some compilers (like MS) do not like big - # string literal in C code. We split at endlines because textwrap.wrap - # do not play well with \n - docstring = '\\n\"\"'.join(docstring.split(r"\n")) if uf.signature is None: sig = "NULL" else: @@ -1173,7 +1171,7 @@ def make_ufuncs(funcdict): f = PyUFunc_FromFuncAndDataAndSignatureAndIdentity( {name}_functions, {name}_data, {name}_signatures, {nloops}, {nin}, {nout}, {identity}, "{name}", - "{doc}", 0, {sig}, identity + {doc}, 0, {sig}, identity ); if ({has_identity}) {{ Py_DECREF(identity); @@ -1188,7 +1186,7 @@ def make_ufuncs(funcdict): has_identity='0' if uf.identity is None_ else '1', identity='PyUFunc_IdentityValue', identity_expr=uf.identity, - doc=docstring, + doc=uf.docstring, sig=sig, ) @@ -1224,6 +1222,7 @@ def make_code(funcdict, filename): #include "loops.h" #include "matmul.h" #include "clip.h" + #include "_umath_doc_generated.h" %s static int diff --git a/numpy/core/code_generators/generate_umath_doc.py b/numpy/core/code_generators/generate_umath_doc.py new file mode 100644 index 000000000..9888730fd --- /dev/null +++ b/numpy/core/code_generators/generate_umath_doc.py @@ -0,0 +1,30 @@ +import sys +import os +import textwrap + +sys.path.insert(0, os.path.dirname(__file__)) +import ufunc_docstrings as docstrings +sys.path.pop(0) + +def normalize_doc(docstring): + docstring = textwrap.dedent(docstring).strip() + docstring = docstring.encode('unicode-escape').decode('ascii') + docstring = docstring.replace(r'"', r'\"') + docstring = docstring.replace(r"'", r"\'") + # Split the docstring because some compilers (like MS) do not like big + # string literal in C code. We split at endlines because textwrap.wrap + # do not play well with \n + docstring = '\\n\"\"'.join(docstring.split(r"\n")) + return docstring + +def write_code(target): + with open(target, 'w') as fid: + fid.write( + "#ifndef NUMPY_CORE_INCLUDE__UMATH_DOC_GENERATED_H_\n" + "#define NUMPY_CORE_INCLUDE__UMATH_DOC_GENERATED_H_\n" + ) + for place, string in docstrings.docdict.items(): + cdef_name = f"DOC_{place.upper().replace('.', '_')}" + cdef_str = normalize_doc(string) + fid.write(f"#define {cdef_name} \"{cdef_str}\"\n") + fid.write("#endif //NUMPY_CORE_INCLUDE__UMATH_DOC_GENERATED_H\n") diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index cd584eea7..24e2eef3f 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -4,18 +4,15 @@ Docstrings for generated ufuncs The syntax is designed to look like the function add_newdoc is being called from numpy.lib, but in this file add_newdoc puts the docstrings in a dictionary. This dictionary is used in -numpy/core/code_generators/generate_umath.py to generate the docstrings -for the ufuncs in numpy.core at the C level when the ufuncs are created -at compile time. +numpy/core/code_generators/generate_umath_doc.py to generate the docstrings +as a C #definitions for the ufuncs in numpy.core at the C level when the +ufuncs are created at compile time. """ import textwrap docdict = {} -def get(name): - return docdict.get(name) - # common parameter text to all ufuncs subst = { 'PARAMS': textwrap.dedent(""" @@ -1089,9 +1086,8 @@ add_newdoc('numpy.core.umath', 'divide', ----- Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting. - Behavior on division by zero can be changed using ``seterr``. - - Behaves like ``true_divide``. + The ``true_divide(x1, x2)`` function is an alias for + ``divide(x1, x2)``. Examples -------- @@ -1100,13 +1096,9 @@ add_newdoc('numpy.core.umath', 'divide', >>> x1 = np.arange(9.0).reshape((3, 3)) >>> x2 = np.arange(3.0) >>> np.divide(x1, x2) - array([[ NaN, 1. , 1. ], - [ Inf, 4. , 2.5], - [ Inf, 7. , 4. ]]) - - >>> ignored_states = np.seterr(**old_err_state) - >>> np.divide(1, 0) - 0 + array([[nan, 1. , 1. ], + [inf, 4. , 2.5], + [inf, 7. , 4. ]]) The ``/`` operator can be used as a shorthand for ``np.divide`` on ndarrays. @@ -4052,54 +4044,6 @@ add_newdoc('numpy.core.umath', 'tanh', """) -add_newdoc('numpy.core.umath', 'true_divide', - """ - Returns a true division of the inputs, element-wise. - - Unlike 'floor division', true division adjusts the output type - to present the best answer, regardless of input types. - - Parameters - ---------- - x1 : array_like - Dividend array. - x2 : array_like - Divisor array. - $BROADCASTABLE_2 - $PARAMS - - Returns - ------- - out : ndarray or scalar - $OUT_SCALAR_2 - - Notes - ----- - In Python, ``//`` is the floor division operator and ``/`` the - true division operator. The ``true_divide(x1, x2)`` function is - equivalent to true division in Python. - - Examples - -------- - >>> x = np.arange(5) - >>> np.true_divide(x, 4) - array([ 0. , 0.25, 0.5 , 0.75, 1. ]) - - >>> x/4 - array([ 0. , 0.25, 0.5 , 0.75, 1. ]) - - >>> x//4 - array([0, 0, 0, 0, 1]) - - The ``/`` operator can be used as a shorthand for ``np.true_divide`` on - ndarrays. - - >>> x = np.arange(5) - >>> x / 4 - array([0. , 0.25, 0.5 , 0.75, 1. ]) - - """) - add_newdoc('numpy.core.umath', 'frexp', """ Decompose the elements of x into mantissa and twos exponent. diff --git a/numpy/core/defchararray.pyi b/numpy/core/defchararray.pyi index 28d247b05..250706eb1 100644 --- a/numpy/core/defchararray.pyi +++ b/numpy/core/defchararray.pyi @@ -3,7 +3,6 @@ from typing import ( overload, TypeVar, Any, - List, ) from numpy import ( @@ -30,7 +29,7 @@ from numpy.core.multiarray import compare_chararrays as compare_chararrays _SCT = TypeVar("_SCT", str_, bytes_) _CharArray = chararray[Any, dtype[_SCT]] -__all__: List[str] +__all__: list[str] # Comparison @overload diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi index aabb04c47..278fa2044 100644 --- a/numpy/core/einsumfunc.pyi +++ b/numpy/core/einsumfunc.pyi @@ -1,4 +1,5 @@ -from typing import List, TypeVar, Optional, Any, overload, Union, Tuple, Sequence, Literal +from collections.abc import Sequence +from typing import TypeVar, Any, overload, Union, Literal from numpy import ( ndarray, @@ -30,13 +31,11 @@ _ArrayType = TypeVar( bound=ndarray[Any, dtype[Union[bool_, number[Any]]]], ) -_OptimizeKind = Union[ - None, bool, Literal["greedy", "optimal"], Sequence[Any] -] +_OptimizeKind = None | bool | Literal["greedy", "optimal"] | Sequence[Any] _CastingSafe = Literal["no", "equiv", "safe", "same_kind"] _CastingUnsafe = Literal["unsafe"] -__all__: List[str] +__all__: list[str] # TODO: Properly handle the `casting`-based combinatorics # TODO: We need to evaluate the content `__subscripts` in order @@ -50,7 +49,7 @@ def einsum( /, *operands: _ArrayLikeBool_co, out: None = ..., - dtype: Optional[_DTypeLikeBool] = ..., + dtype: None | _DTypeLikeBool = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ..., @@ -61,7 +60,7 @@ def einsum( /, *operands: _ArrayLikeUInt_co, out: None = ..., - dtype: Optional[_DTypeLikeUInt] = ..., + dtype: None | _DTypeLikeUInt = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ..., @@ -72,7 +71,7 @@ def einsum( /, *operands: _ArrayLikeInt_co, out: None = ..., - dtype: Optional[_DTypeLikeInt] = ..., + dtype: None | _DTypeLikeInt = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ..., @@ -83,7 +82,7 @@ def einsum( /, *operands: _ArrayLikeFloat_co, out: None = ..., - dtype: Optional[_DTypeLikeFloat] = ..., + dtype: None | _DTypeLikeFloat = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ..., @@ -94,7 +93,7 @@ def einsum( /, *operands: _ArrayLikeComplex_co, out: None = ..., - dtype: Optional[_DTypeLikeComplex] = ..., + dtype: None | _DTypeLikeComplex = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ..., @@ -105,7 +104,7 @@ def einsum( /, *operands: Any, casting: _CastingUnsafe, - dtype: Optional[_DTypeLikeComplex_co] = ..., + dtype: None | _DTypeLikeComplex_co = ..., out: None = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = ..., @@ -116,7 +115,7 @@ def einsum( /, *operands: _ArrayLikeComplex_co, out: _ArrayType, - dtype: Optional[_DTypeLikeComplex_co] = ..., + dtype: None | _DTypeLikeComplex_co = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = ..., @@ -128,7 +127,7 @@ def einsum( *operands: Any, out: _ArrayType, casting: _CastingUnsafe, - dtype: Optional[_DTypeLikeComplex_co] = ..., + dtype: None | _DTypeLikeComplex_co = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = ..., ) -> _ArrayType: ... @@ -142,4 +141,4 @@ def einsum_path( /, *operands: _ArrayLikeComplex_co, optimize: _OptimizeKind = ..., -) -> Tuple[List[Any], str]: ... +) -> tuple[list[Any], str]: ... diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 3242124ac..f26f306fa 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -17,7 +17,7 @@ _dt_ = nt.sctype2char # functions that are methods __all__ = [ - 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', + 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', @@ -1980,25 +1980,27 @@ def shape(a): See Also -------- - len + len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with + ``N>=1``. ndarray.shape : Equivalent array method. Examples -------- >>> np.shape(np.eye(3)) (3, 3) - >>> np.shape([[1, 2]]) + >>> np.shape([[1, 3]]) (1, 2) >>> np.shape([0]) (1,) >>> np.shape(0) () - >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + >>> a = np.array([(1, 2), (3, 4), (5, 6)], + ... dtype=[('x', 'i4'), ('y', 'i4')]) >>> np.shape(a) - (2,) + (3,) >>> a.shape - (2,) + (3,) """ try: @@ -2917,51 +2919,6 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, keepdims=keepdims, initial=initial, where=where) -def _alen_dispathcer(a): - return (a,) - - -@array_function_dispatch(_alen_dispathcer) -def alen(a): - """ - Return the length of the first dimension of the input array. - - .. deprecated:: 1.18 - `numpy.alen` is deprecated, use `len` instead. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - alen : int - Length of the first dimension of `a`. - - See Also - -------- - shape, size - - Examples - -------- - >>> a = np.zeros((7,4,5)) - >>> a.shape[0] - 7 - >>> np.alen(a) - 7 - - """ - # NumPy 1.18.0, 2019-08-02 - warnings.warn( - "`np.alen` is deprecated, use `len` instead", - DeprecationWarning, stacklevel=2) - try: - return len(a) - except TypeError: - return len(array(a, ndmin=1)) - - def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None): return (a, out) diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi index 3cbe1d5c5..472c4ded5 100644 --- a/numpy/core/fromnumeric.pyi +++ b/numpy/core/fromnumeric.pyi @@ -1,5 +1,6 @@ import datetime as dt -from typing import Optional, Union, Sequence, Tuple, Any, overload, TypeVar, Literal +from collections.abc import Sequence +from typing import Union, Any, overload, TypeVar, Literal from numpy import ( ndarray, @@ -18,6 +19,7 @@ from numpy import ( from numpy.typing import ( DTypeLike, ArrayLike, + NDArray, _ShapeLike, _Shape, _ArrayLikeBool_co, @@ -47,8 +49,8 @@ _Number = TypeVar("_Number", bound=number) def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: Optional[int] = ..., - out: Optional[ndarray] = ..., + axis: None | int = ..., + out: None | ndarray = ..., mode: _ModeKind = ..., ) -> Any: ... @@ -61,14 +63,14 @@ def reshape( def choose( a: _ArrayLikeInt_co, choices: ArrayLike, - out: Optional[ndarray] = ..., + out: None | ndarray = ..., mode: _ModeKind = ..., ) -> Any: ... def repeat( a: ArrayLike, repeats: _ArrayLikeInt_co, - axis: Optional[int] = ..., + axis: None | int = ..., ) -> ndarray: ... def put( @@ -86,52 +88,52 @@ def swapaxes( def transpose( a: ArrayLike, - axes: Union[None, Sequence[int], ndarray] = ... + axes: None | Sequence[int] | NDArray[Any] = ... ) -> ndarray: ... def partition( a: ArrayLike, kth: _ArrayLikeInt_co, - axis: Optional[int] = ..., + axis: None | int = ..., kind: _PartitionKind = ..., - order: Union[None, str, Sequence[str]] = ..., + order: None | str | Sequence[str] = ..., ) -> ndarray: ... def argpartition( a: ArrayLike, kth: _ArrayLikeInt_co, - axis: Optional[int] = ..., + axis: None | int = ..., kind: _PartitionKind = ..., - order: Union[None, str, Sequence[str]] = ..., + order: None | str | Sequence[str] = ..., ) -> Any: ... def sort( a: ArrayLike, - axis: Optional[int] = ..., - kind: Optional[_SortKind] = ..., - order: Union[None, str, Sequence[str]] = ..., + axis: None | int = ..., + kind: None | _SortKind = ..., + order: None | str | Sequence[str] = ..., ) -> ndarray: ... def argsort( a: ArrayLike, - axis: Optional[int] = ..., - kind: Optional[_SortKind] = ..., - order: Union[None, str, Sequence[str]] = ..., + axis: None | int = ..., + kind: None | _SortKind = ..., + order: None | str | Sequence[str] = ..., ) -> ndarray: ... @overload def argmax( a: ArrayLike, axis: None = ..., - out: Optional[ndarray] = ..., + out: None | ndarray = ..., *, keepdims: Literal[False] = ..., ) -> intp: ... @overload def argmax( a: ArrayLike, - axis: Optional[int] = ..., - out: Optional[ndarray] = ..., + axis: None | int = ..., + out: None | ndarray = ..., *, keepdims: bool = ..., ) -> Any: ... @@ -140,15 +142,15 @@ def argmax( def argmin( a: ArrayLike, axis: None = ..., - out: Optional[ndarray] = ..., + out: None | ndarray = ..., *, keepdims: Literal[False] = ..., ) -> intp: ... @overload def argmin( a: ArrayLike, - axis: Optional[int] = ..., - out: Optional[ndarray] = ..., + axis: None | int = ..., + out: None | ndarray = ..., *, keepdims: bool = ..., ) -> Any: ... @@ -158,14 +160,14 @@ def searchsorted( a: ArrayLike, v: _Scalar, side: _SortSide = ..., - sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array + sorter: None | _ArrayLikeInt_co = ..., # 1D int array ) -> intp: ... @overload def searchsorted( a: ArrayLike, v: ArrayLike, side: _SortSide = ..., - sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array + sorter: None | _ArrayLikeInt_co = ..., # 1D int array ) -> ndarray: ... def resize( @@ -176,12 +178,12 @@ def resize( @overload def squeeze( a: _ScalarGeneric, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., ) -> _ScalarGeneric: ... @overload def squeeze( a: ArrayLike, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., ) -> ndarray: ... def diagonal( @@ -197,28 +199,28 @@ def trace( axis1: int = ..., axis2: int = ..., dtype: DTypeLike = ..., - out: Optional[ndarray] = ..., + out: None | ndarray = ..., ) -> Any: ... def ravel(a: ArrayLike, order: _OrderKACF = ...) -> ndarray: ... -def nonzero(a: ArrayLike) -> Tuple[ndarray, ...]: ... +def nonzero(a: ArrayLike) -> tuple[ndarray, ...]: ... def shape(a: ArrayLike) -> _Shape: ... def compress( condition: ArrayLike, # 1D bool array a: ArrayLike, - axis: Optional[int] = ..., - out: Optional[ndarray] = ..., + axis: None | int = ..., + out: None | ndarray = ..., ) -> ndarray: ... @overload def clip( a: ArrayLike, a_min: ArrayLike, - a_max: Optional[ArrayLike], - out: Optional[ndarray] = ..., + a_max: None | ArrayLike, + out: None | ndarray = ..., **kwargs: Any, ) -> Any: ... @overload @@ -226,7 +228,7 @@ def clip( a: ArrayLike, a_min: None, a_max: ArrayLike, - out: Optional[ndarray] = ..., + out: None | ndarray = ..., **kwargs: Any, ) -> Any: ... @@ -234,7 +236,7 @@ def sum( a: ArrayLike, axis: _ShapeLike = ..., dtype: DTypeLike = ..., - out: Optional[ndarray] = ..., + out: None | ndarray = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -250,8 +252,8 @@ def all( @overload def all( a: ArrayLike, - axis: Optional[_ShapeLike] = ..., - out: Optional[ndarray] = ..., + axis: None | _ShapeLike = ..., + out: None | ndarray = ..., keepdims: bool = ..., ) -> Any: ... @@ -265,29 +267,29 @@ def any( @overload def any( a: ArrayLike, - axis: Optional[_ShapeLike] = ..., - out: Optional[ndarray] = ..., + axis: None | _ShapeLike = ..., + out: None | ndarray = ..., keepdims: bool = ..., ) -> Any: ... def cumsum( a: ArrayLike, - axis: Optional[int] = ..., + axis: None | int = ..., dtype: DTypeLike = ..., - out: Optional[ndarray] = ..., + out: None | ndarray = ..., ) -> ndarray: ... def ptp( a: ArrayLike, - axis: Optional[_ShapeLike] = ..., - out: Optional[ndarray] = ..., + axis: None | _ShapeLike = ..., + out: None | ndarray = ..., keepdims: bool = ..., ) -> Any: ... def amax( a: ArrayLike, - axis: Optional[_ShapeLike] = ..., - out: Optional[ndarray] = ..., + axis: None | _ShapeLike = ..., + out: None | ndarray = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -295,8 +297,8 @@ def amax( def amin( a: ArrayLike, - axis: Optional[_ShapeLike] = ..., - out: Optional[ndarray] = ..., + axis: None | _ShapeLike = ..., + out: None | ndarray = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -311,9 +313,9 @@ def amin( # `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). def prod( a: ArrayLike, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., - out: Optional[ndarray] = ..., + out: None | ndarray = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -321,43 +323,43 @@ def prod( def cumprod( a: ArrayLike, - axis: Optional[int] = ..., + axis: None | int = ..., dtype: DTypeLike = ..., - out: Optional[ndarray] = ..., + out: None | ndarray = ..., ) -> ndarray: ... def ndim(a: ArrayLike) -> int: ... -def size(a: ArrayLike, axis: Optional[int] = ...) -> int: ... +def size(a: ArrayLike, axis: None | int = ...) -> int: ... def around( a: ArrayLike, decimals: int = ..., - out: Optional[ndarray] = ..., + out: None | ndarray = ..., ) -> Any: ... def mean( a: ArrayLike, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., - out: Optional[ndarray] = ..., + out: None | ndarray = ..., keepdims: bool = ..., ) -> Any: ... def std( a: ArrayLike, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., - out: Optional[ndarray] = ..., + out: None | ndarray = ..., ddof: int = ..., keepdims: bool = ..., ) -> Any: ... def var( a: ArrayLike, - axis: Optional[_ShapeLike] = ..., + axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., - out: Optional[ndarray] = ..., + out: None | ndarray = ..., ddof: int = ..., keepdims: bool = ..., ) -> Any: ... diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi index 68d3b3a98..b21892177 100644 --- a/numpy/core/function_base.pyi +++ b/numpy/core/function_base.pyi @@ -1,60 +1,197 @@ -from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal, List +from typing import ( + Literal as L, + overload, + Union, + Any, + SupportsIndex, + TypeVar, +) -from numpy import ndarray -from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co +from numpy import floating, complexfloating, generic, dtype +from numpy.typing import ( + NDArray, + ArrayLike, + DTypeLike, + _SupportsDType, + _SupportsArray, + _NumberLike_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, +) -# TODO: wait for support for recursive types -_ArrayLikeNested = Sequence[Sequence[Any]] -_ArrayLikeNumber = Union[ - _NumberLike_co, Sequence[_NumberLike_co], ndarray, _SupportsArray, _ArrayLikeNested +_SCT = TypeVar("_SCT", bound=generic) + +_DTypeLike = Union[ + dtype[_SCT], + type[_SCT], + _SupportsDType[dtype[_SCT]], ] -__all__: List[str] +__all__: list[str] @overload def linspace( - start: _ArrayLikeNumber, - stop: _ArrayLikeNumber, + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[False] = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[floating[Any]]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[False] = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[False] = ..., + dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., - retstep: Literal[False] = ..., + retstep: L[False] = ..., dtype: DTypeLike = ..., axis: SupportsIndex = ..., -) -> ndarray: ... +) -> NDArray[Any]: ... @overload def linspace( - start: _ArrayLikeNumber, - stop: _ArrayLikeNumber, + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, num: SupportsIndex = ..., endpoint: bool = ..., - retstep: Literal[True] = ..., + retstep: L[True] = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> tuple[NDArray[floating[Any]], floating[Any]]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[True] = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[True] = ..., + dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex = ..., +) -> tuple[NDArray[_SCT], _SCT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[True] = ..., dtype: DTypeLike = ..., axis: SupportsIndex = ..., -) -> Tuple[ndarray, Any]: ... +) -> tuple[NDArray[Any], Any]: ... +@overload def logspace( - start: _ArrayLikeNumber, - stop: _ArrayLikeNumber, + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, num: SupportsIndex = ..., endpoint: bool = ..., - base: _ArrayLikeNumber = ..., + base: _ArrayLikeFloat_co = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[floating[Any]]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + base: _ArrayLikeComplex_co = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + base: _ArrayLikeComplex_co = ..., + dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + base: _ArrayLikeComplex_co = ..., dtype: DTypeLike = ..., axis: SupportsIndex = ..., -) -> ndarray: ... +) -> NDArray[Any]: ... +@overload +def geomspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[floating[Any]]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload def geomspace( - start: _ArrayLikeNumber, - stop: _ArrayLikeNumber, + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., dtype: DTypeLike = ..., axis: SupportsIndex = ..., -) -> ndarray: ... +) -> NDArray[Any]: ... # Re-exported to `np.lib.function_base` def add_newdoc( place: str, obj: str, - doc: str | Tuple[str, str] | List[Tuple[str, str]], + doc: str | tuple[str, str] | list[tuple[str, str]], warn_on_python: bool = ..., ) -> None: ... diff --git a/numpy/core/getlimits.pyi b/numpy/core/getlimits.pyi index 66d062995..da5e3c23e 100644 --- a/numpy/core/getlimits.pyi +++ b/numpy/core/getlimits.pyi @@ -1,8 +1,6 @@ -from typing import List - from numpy import ( finfo as finfo, iinfo as iinfo, ) -__all__: List[str] +__all__: list[str] diff --git a/numpy/core/include/numpy/experimental_dtype_api.h b/numpy/core/include/numpy/experimental_dtype_api.h index 554c7fb6c..83639f186 100644 --- a/numpy/core/include/numpy/experimental_dtype_api.h +++ b/numpy/core/include/numpy/experimental_dtype_api.h @@ -82,6 +82,15 @@ * The new DType API is designed in a way to make it potentially useful for * alternative "array-like" implementations. This will require careful * exposure of details and functions and is not part of this experimental API. + * + * Brief (incompatibility) changelog + * ================================= + * + * 2. None (only additions). + * 3. New `npy_intp *view_offset` argument for `resolve_descriptors`. + * This replaces the `NPY_CAST_IS_VIEW` flag. It can be set to 0 if the + * operation is a view, and is pre-initialized to `NPY_MIN_INTP` indicating + * that the operation is not a view. */ #ifndef NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_ @@ -181,6 +190,12 @@ typedef PyObject *_ufunc_addloop_fromspec_func( /* * Type of the C promoter function, which must be wrapped into a * PyCapsule with name "numpy._ufunc_promoter". + * + * Note that currently the output dtypes are always NULL unless they are + * also part of the signature. This is an implementation detail and could + * change in the future. However, in general promoters should not have a + * need for output dtypes. + * (There are potential use-cases, these are currently unsupported.) */ typedef int promoter_function(PyObject *ufunc, PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], @@ -200,16 +215,6 @@ typedef int _ufunc_addpromoter_func( #define PyUFunc_AddPromoter \ (*(_ufunc_addpromoter_func *)(__experimental_dtype_api_table[1])) -/* - * In addition to the normal casting levels, NPY_CAST_IS_VIEW indicates - * that no cast operation is necessary at all (although a copy usually will be) - * - * NOTE: The most likely modification here is to add an additional - * `view_offset` output to resolve_descriptors. If set, it would - * indicate both that it is a view and what offset to use. This means that - * e.g. `arr.imag` could be implemented by an ArrayMethod. - */ -#define NPY_CAST_IS_VIEW _NPY_CAST_IS_VIEW /* * The resolve descriptors function, must be able to handle NULL values for @@ -230,7 +235,8 @@ typedef NPY_CASTING (resolve_descriptors_function)( /* Input descriptors (instances). Outputs may be NULL. */ PyArray_Descr **given_descrs, /* Exact loop descriptors to use, must not hold references on error */ - PyArray_Descr **loop_descrs); + PyArray_Descr **loop_descrs, + npy_intp *view_offset); /* NOT public yet: Signature needs adapting as external API. */ #define _NPY_METH_get_loop 2 diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h index 2eb951486..aaaefd7de 100644 --- a/numpy/core/include/numpy/ndarrayobject.h +++ b/numpy/core/include/numpy/ndarrayobject.h @@ -152,19 +152,16 @@ extern "C" { (k)*PyArray_STRIDES(obj)[2] + \ (l)*PyArray_STRIDES(obj)[3])) -/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */ static NPY_INLINE void PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) { PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; if (fa && fa->base) { - if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) || - (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) { + if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) { PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE); Py_DECREF(fa->base); fa->base = NULL; PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); - PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); } } } @@ -246,20 +243,6 @@ NPY_TITLE_KEY_check(PyObject *key, PyObject *value) #define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) #define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_14_API_VERSION) -static NPY_INLINE void -PyArray_XDECREF_ERR(PyArrayObject *arr) -{ - /* 2017-Nov-10 1.14 */ - DEPRECATE("PyArray_XDECREF_ERR is deprecated, call " - "PyArray_DiscardWritebackIfCopy then Py_XDECREF instead"); - PyArray_DiscardWritebackIfCopy(arr); - Py_XDECREF(arr); -} -#endif - - #ifdef __cplusplus } #endif diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 6240adc0c..47d063178 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -221,13 +221,6 @@ typedef enum { NPY_SAME_KIND_CASTING=3, /* Allow any casts */ NPY_UNSAFE_CASTING=4, - /* - * Flag to allow signalling that a cast is a view, this flag is not - * valid when requesting a cast of specific safety. - * _NPY_CAST_IS_VIEW|NPY_EQUIV_CASTING means the same as NPY_NO_CASTING. - */ - // TODO-DTYPES: Needs to be documented. - _NPY_CAST_IS_VIEW = 1 << 16, } NPY_CASTING; typedef enum { @@ -934,7 +927,6 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); * This flag may be requested in constructor functions. * This flag may be tested for in PyArray_FLAGS(arr). */ -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 /* Deprecated in 1.14 */ #define NPY_ARRAY_WRITEBACKIFCOPY 0x2000 /* @@ -965,14 +957,12 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); #define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) #define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) #define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY) #define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \ NPY_ARRAY_WRITEBACKIFCOPY) #define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) #define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY) #define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \ NPY_ARRAY_WRITEBACKIFCOPY) diff --git a/numpy/core/include/numpy/noprefix.h b/numpy/core/include/numpy/noprefix.h index 2c0ce1420..cea5b0d46 100644 --- a/numpy/core/include/numpy/noprefix.h +++ b/numpy/core/include/numpy/noprefix.h @@ -165,7 +165,6 @@ #define ALIGNED NPY_ALIGNED #define NOTSWAPPED NPY_NOTSWAPPED #define WRITEABLE NPY_WRITEABLE -#define UPDATEIFCOPY NPY_UPDATEIFCOPY #define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY #define ARR_HAS_DESCR NPY_ARR_HAS_DESCR #define BEHAVED NPY_BEHAVED diff --git a/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/numpy/core/include/numpy/npy_1_7_deprecated_api.h index 4fd4015a9..6455d40d2 100644 --- a/numpy/core/include/numpy/npy_1_7_deprecated_api.h +++ b/numpy/core/include/numpy/npy_1_7_deprecated_api.h @@ -48,7 +48,6 @@ #define NPY_ALIGNED NPY_ARRAY_ALIGNED #define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED #define NPY_WRITEABLE NPY_ARRAY_WRITEABLE -#define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY #define NPY_BEHAVED NPY_ARRAY_BEHAVED #define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS #define NPY_CARRAY NPY_ARRAY_CARRAY diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h index 4eac083e7..b2e7c458e 100644 --- a/numpy/core/include/numpy/numpyconfig.h +++ b/numpy/core/include/numpy/numpyconfig.h @@ -23,12 +23,18 @@ #undef NPY_SIZEOF_LONGDOUBLE #undef NPY_SIZEOF_COMPLEX_LONGDOUBLE - #ifdef __x86_64 - #define NPY_SIZEOF_LONGDOUBLE 16 - #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 - #elif defined(__arm64__) + #if defined(__arm64__) #define NPY_SIZEOF_LONGDOUBLE 8 #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16 + #elif defined(__x86_64) + #define NPY_SIZEOF_LONGDOUBLE 16 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 + #elif defined (__i386) + #define NPY_SIZEOF_LONGDOUBLE 12 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24 + #elif defined(__ppc__) || defined (__ppc64__) + #define NPY_SIZEOF_LONGDOUBLE 16 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 #else #error "unknown architecture" #endif diff --git a/numpy/core/memmap.pyi b/numpy/core/memmap.pyi index ba595bf1e..03c6b772d 100644 --- a/numpy/core/memmap.pyi +++ b/numpy/core/memmap.pyi @@ -1,5 +1,3 @@ -from typing import List - from numpy import memmap as memmap -__all__: List[str] +__all__: list[str] diff --git a/numpy/core/multiarray.pyi b/numpy/core/multiarray.pyi index a9f68e181..423aed85e 100644 --- a/numpy/core/multiarray.pyi +++ b/numpy/core/multiarray.pyi @@ -2,19 +2,13 @@ import os import datetime as dt +from collections.abc import Sequence, Callable, Iterable from typing import ( Literal as L, Any, - Callable, - Iterable, - Optional, overload, TypeVar, - List, - Type, Union, - Sequence, - Tuple, SupportsIndex, final, Final, @@ -67,6 +61,7 @@ from numpy.typing import ( NDArray, ArrayLike, _SupportsArray, + _NestedSequence, _FiniteNestedSequence, _ArrayLikeBool_co, _ArrayLikeUInt_co, @@ -90,7 +85,7 @@ _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) # Subscriptable subsets of `npt.DTypeLike` and `npt.ArrayLike` _DTypeLike = Union[ dtype[_SCT], - Type[_SCT], + type[_SCT], _SupportsDType[dtype[_SCT]], ] _ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] @@ -120,7 +115,7 @@ _RollKind = L[ # `raise` is deliberately excluded "modifiedpreceding", ] -__all__: List[str] +__all__: list[str] ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) BUFSIZE: L[8192] @@ -138,7 +133,7 @@ def empty_like( dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., - shape: Optional[_ShapeLike] = ..., + shape: None | _ShapeLike = ..., ) -> _ArrayType: ... @overload def empty_like( @@ -146,7 +141,7 @@ def empty_like( dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., - shape: Optional[_ShapeLike] = ..., + shape: None | _ShapeLike = ..., ) -> NDArray[_SCT]: ... @overload def empty_like( @@ -154,7 +149,7 @@ def empty_like( dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., - shape: Optional[_ShapeLike] = ..., + shape: None | _ShapeLike = ..., ) -> NDArray[Any]: ... @overload def empty_like( @@ -162,7 +157,7 @@ def empty_like( dtype: _DTypeLike[_SCT], order: _OrderKACF = ..., subok: bool = ..., - shape: Optional[_ShapeLike] = ..., + shape: None | _ShapeLike = ..., ) -> NDArray[_SCT]: ... @overload def empty_like( @@ -170,7 +165,7 @@ def empty_like( dtype: DTypeLike, order: _OrderKACF = ..., subok: bool = ..., - shape: Optional[_ShapeLike] = ..., + shape: None | _ShapeLike = ..., ) -> NDArray[Any]: ... @overload @@ -284,26 +279,26 @@ def unravel_index( # type: ignore[misc] indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = ..., -) -> Tuple[intp, ...]: ... +) -> tuple[intp, ...]: ... @overload def unravel_index( indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = ..., -) -> Tuple[NDArray[intp], ...]: ... +) -> tuple[NDArray[intp], ...]: ... @overload def ravel_multi_index( # type: ignore[misc] multi_index: Sequence[_IntLike_co], dims: Sequence[SupportsIndex], - mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ..., + mode: _ModeKind | tuple[_ModeKind, ...] = ..., order: _OrderCF = ..., ) -> intp: ... @overload def ravel_multi_index( multi_index: Sequence[_ArrayLikeInt_co], dims: Sequence[SupportsIndex], - mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ..., + mode: _ModeKind | tuple[_ModeKind, ...] = ..., order: _OrderCF = ..., ) -> NDArray[intp]: ... @@ -311,51 +306,51 @@ def ravel_multi_index( def concatenate( # type: ignore[misc] arrays: _ArrayLike[_SCT], /, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: None = ..., *, dtype: None = ..., - casting: Optional[_CastingKind] = ... + casting: None | _CastingKind = ... ) -> NDArray[_SCT]: ... @overload def concatenate( # type: ignore[misc] arrays: ArrayLike, /, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: None = ..., *, dtype: None = ..., - casting: Optional[_CastingKind] = ... + casting: None | _CastingKind = ... ) -> NDArray[Any]: ... @overload def concatenate( # type: ignore[misc] arrays: ArrayLike, /, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: None = ..., *, dtype: _DTypeLike[_SCT], - casting: Optional[_CastingKind] = ... + casting: None | _CastingKind = ... ) -> NDArray[_SCT]: ... @overload def concatenate( # type: ignore[misc] arrays: ArrayLike, /, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: None = ..., *, dtype: DTypeLike, - casting: Optional[_CastingKind] = ... + casting: None | _CastingKind = ... ) -> NDArray[Any]: ... @overload def concatenate( arrays: ArrayLike, /, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., out: _ArrayType = ..., *, dtype: DTypeLike = ..., - casting: Optional[_CastingKind] = ... + casting: None | _CastingKind = ... ) -> _ArrayType: ... def inner( @@ -368,7 +363,7 @@ def inner( def where( condition: ArrayLike, /, -) -> Tuple[NDArray[intp], ...]: ... +) -> tuple[NDArray[intp], ...]: ... @overload def where( condition: ArrayLike, @@ -379,13 +374,13 @@ def where( def lexsort( keys: ArrayLike, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., ) -> Any: ... def can_cast( - from_: Union[ArrayLike, DTypeLike], + from_: ArrayLike | DTypeLike, to: DTypeLike, - casting: Optional[_CastingKind] = ..., + casting: None | _CastingKind = ..., ) -> bool: ... def min_scalar_type( @@ -393,7 +388,7 @@ def min_scalar_type( ) -> dtype[Any]: ... def result_type( - *arrays_and_dtypes: Union[ArrayLike, DTypeLike], + *arrays_and_dtypes: ArrayLike | DTypeLike, ) -> dtype[Any]: ... @overload @@ -421,15 +416,15 @@ def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ... def bincount( x: ArrayLike, /, - weights: Optional[ArrayLike] = ..., + weights: None | ArrayLike = ..., minlength: SupportsIndex = ..., ) -> NDArray[intp]: ... def copyto( dst: NDArray[Any], src: ArrayLike, - casting: Optional[_CastingKind] = ..., - where: Optional[_ArrayLikeBool_co] = ..., + casting: None | _CastingKind = ..., + where: None | _ArrayLikeBool_co = ..., ) -> None: ... def putmask( @@ -441,15 +436,15 @@ def putmask( def packbits( a: _ArrayLikeInt_co, /, - axis: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., bitorder: L["big", "little"] = ..., ) -> NDArray[uint8]: ... def unpackbits( a: _ArrayLike[uint8], /, - axis: Optional[SupportsIndex] = ..., - count: Optional[SupportsIndex] = ..., + axis: None | SupportsIndex = ..., + count: None | SupportsIndex = ..., bitorder: L["big", "little"] = ..., ) -> NDArray[uint8]: ... @@ -457,14 +452,14 @@ def shares_memory( a: object, b: object, /, - max_work: Optional[int] = ..., + max_work: None | int = ..., ) -> bool: ... def may_share_memory( a: object, b: object, /, - max_work: Optional[int] = ..., + max_work: None | int = ..., ) -> bool: ... @overload @@ -599,10 +594,10 @@ def asfortranarray( like: ArrayLike = ..., ) -> NDArray[Any]: ... -# In practice `List[Any]` is list with an int, int and a valid +# In practice `list[Any]` is list with an int, int and a valid # `np.seterrcall()` object -def geterrobj() -> List[Any]: ... -def seterrobj(errobj: List[Any], /) -> None: ... +def geterrobj() -> list[Any]: ... +def seterrobj(errobj: list[Any], /) -> None: ... def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]: ... @@ -811,35 +806,35 @@ def arange( def datetime_data( dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, -) -> Tuple[str, int]: ... +) -> tuple[str, int]: ... # The datetime functions perform unsafe casts to `datetime64[D]`, # so a lot of different argument types are allowed here @overload def busday_count( # type: ignore[misc] - begindates: _ScalarLike_co, - enddates: _ScalarLike_co, + begindates: _ScalarLike_co | dt.date, + enddates: _ScalarLike_co | dt.date, weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> int_: ... @overload def busday_count( # type: ignore[misc] - begindates: ArrayLike, - enddates: ArrayLike, + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> NDArray[int_]: ... @overload def busday_count( - begindates: ArrayLike, - enddates: ArrayLike, + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ..., ) -> _ArrayType: ... @@ -847,100 +842,100 @@ def busday_count( # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @overload def busday_offset( # type: ignore[misc] - dates: datetime64, - offsets: _TD64Like_co, + dates: datetime64 | dt.date, + offsets: _TD64Like_co | dt.timedelta, roll: L["raise"] = ..., weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> datetime64: ... @overload def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64], - offsets: _ArrayLikeTD64_co, + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], roll: L["raise"] = ..., weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> NDArray[datetime64]: ... @overload def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64], - offsets: _ArrayLike[timedelta64], + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], roll: L["raise"] = ..., weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ..., ) -> _ArrayType: ... @overload def busday_offset( # type: ignore[misc] - dates: _ScalarLike_co, - offsets: _ScalarLike_co, + dates: _ScalarLike_co | dt.date, + offsets: _ScalarLike_co | dt.timedelta, roll: _RollKind, weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> datetime64: ... @overload def busday_offset( # type: ignore[misc] - dates: ArrayLike, - offsets: ArrayLike, + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], roll: _RollKind, weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> NDArray[datetime64]: ... @overload def busday_offset( - dates: ArrayLike, - offsets: ArrayLike, + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], roll: _RollKind, weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ..., ) -> _ArrayType: ... @overload def is_busday( # type: ignore[misc] - dates: _ScalarLike_co, + dates: _ScalarLike_co | dt.date, weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> bool_: ... @overload def is_busday( # type: ignore[misc] - dates: ArrayLike, + dates: ArrayLike | _NestedSequence[dt.date], weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> NDArray[bool_]: ... @overload def is_busday( - dates: ArrayLike, + dates: ArrayLike | _NestedSequence[dt.date], weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ..., ) -> _ArrayType: ... @overload def datetime_as_string( # type: ignore[misc] - arr: datetime64, + arr: datetime64 | dt.date, unit: None | L["auto"] | _UnitKind = ..., timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., casting: _CastingKind = ..., ) -> str_: ... @overload def datetime_as_string( - arr: _ArrayLikeDT64_co, + arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], unit: None | L["auto"] | _UnitKind = ..., timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., casting: _CastingKind = ..., @@ -1024,4 +1019,4 @@ def nested_iters( order: _OrderKACF = ..., casting: _CastingKind = ..., buffersize: SupportsIndex = ..., -) -> Tuple[nditer, ...]: ... +) -> tuple[nditer, ...]: ... diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi index d7ec30351..8b92abab4 100644 --- a/numpy/core/numeric.pyi +++ b/numpy/core/numeric.pyi @@ -1,14 +1,10 @@ +from collections.abc import Callable, Sequence from typing import ( Any, Union, - Sequence, - Tuple, - Callable, - List, overload, TypeVar, Literal, - Type, SupportsAbs, SupportsIndex, NoReturn, @@ -57,13 +53,13 @@ _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) _DTypeLike = Union[ dtype[_SCT], - Type[_SCT], + type[_SCT], _SupportsDType[dtype[_SCT]], ] _ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] _CorrelateMode = Literal["valid", "same", "full"] -__all__: List[str] +__all__: list[str] @overload def zeros_like( @@ -406,43 +402,43 @@ def outer( def tensordot( a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, - axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., ) -> NDArray[bool_]: ... @overload def tensordot( a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, - axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., ) -> NDArray[unsignedinteger[Any]]: ... @overload def tensordot( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., ) -> NDArray[signedinteger[Any]]: ... @overload def tensordot( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., ) -> NDArray[floating[Any]]: ... @overload def tensordot( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., ) -> NDArray[complexfloating[Any, Any]]: ... @overload def tensordot( a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, - axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., ) -> NDArray[timedelta64]: ... @overload def tensordot( a: _ArrayLikeObject_co, b: _ArrayLikeObject_co, - axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., ) -> NDArray[object_]: ... @overload @@ -528,15 +524,15 @@ def cross( @overload def indices( dimensions: Sequence[int], - dtype: Type[int] = ..., + dtype: type[int] = ..., sparse: Literal[False] = ..., ) -> NDArray[int_]: ... @overload def indices( dimensions: Sequence[int], - dtype: Type[int] = ..., + dtype: type[int] = ..., sparse: Literal[True] = ..., -) -> Tuple[NDArray[int_], ...]: ... +) -> tuple[NDArray[int_], ...]: ... @overload def indices( dimensions: Sequence[int], @@ -548,7 +544,7 @@ def indices( dimensions: Sequence[int], dtype: _DTypeLike[_SCT], sparse: Literal[True], -) -> Tuple[NDArray[_SCT], ...]: ... +) -> tuple[NDArray[_SCT], ...]: ... @overload def indices( dimensions: Sequence[int], @@ -560,7 +556,7 @@ def indices( dimensions: Sequence[int], dtype: DTypeLike, sparse: Literal[True], -) -> Tuple[NDArray[Any], ...]: ... +) -> tuple[NDArray[Any], ...]: ... def fromfunction( function: Callable[..., _T], diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 8e5de852b..3d1cb6fd1 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -516,7 +516,7 @@ def _scalar_type_key(typ): return (dt.kind.lower(), dt.itemsize) -ScalarType = [int, float, complex, int, bool, bytes, str, memoryview] +ScalarType = [int, float, complex, bool, bytes, str, memoryview] ScalarType += sorted(_concrete_types, key=_scalar_type_key) ScalarType = tuple(ScalarType) diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi index 1d3ff773b..09c180a0f 100644 --- a/numpy/core/numerictypes.pyi +++ b/numpy/core/numerictypes.pyi @@ -1,16 +1,12 @@ import sys import types +from collections.abc import Iterable from typing import ( Literal as L, - Type, Union, - Tuple, overload, Any, TypeVar, - Dict, - List, - Iterable, Protocol, TypedDict, ) @@ -57,7 +53,7 @@ _SCT = TypeVar("_SCT", bound=generic) # A paramtrizable subset of `npt.DTypeLike` _DTypeLike = Union[ - Type[_SCT], + type[_SCT], dtype[_SCT], _SupportsDType[dtype[_SCT]], ] @@ -78,48 +74,48 @@ class _TypeCodes(TypedDict): Datetime: L['Mm'] All: L['?bhilqpBHILQPefdgFDGSUVOMm'] -class _typedict(Dict[Type[generic], _T]): +class _typedict(dict[type[generic], _T]): def __getitem__(self, key: DTypeLike) -> _T: ... if sys.version_info >= (3, 10): _TypeTuple = Union[ - Type[Any], + type[Any], types.UnionType, - Tuple[Union[Type[Any], types.UnionType, Tuple[Any, ...]], ...], + tuple[Union[type[Any], types.UnionType, tuple[Any, ...]], ...], ] else: _TypeTuple = Union[ - Type[Any], - Tuple[Union[Type[Any], Tuple[Any, ...]], ...], + type[Any], + tuple[Union[type[Any], tuple[Any, ...]], ...], ] -__all__: List[str] +__all__: list[str] @overload -def maximum_sctype(t: _DTypeLike[_SCT]) -> Type[_SCT]: ... +def maximum_sctype(t: _DTypeLike[_SCT]) -> type[_SCT]: ... @overload -def maximum_sctype(t: DTypeLike) -> Type[Any]: ... +def maximum_sctype(t: DTypeLike) -> type[Any]: ... @overload -def issctype(rep: dtype[Any] | Type[Any]) -> bool: ... +def issctype(rep: dtype[Any] | type[Any]) -> bool: ... @overload def issctype(rep: object) -> L[False]: ... @overload -def obj2sctype(rep: _DTypeLike[_SCT], default: None = ...) -> None | Type[_SCT]: ... +def obj2sctype(rep: _DTypeLike[_SCT], default: None = ...) -> None | type[_SCT]: ... @overload -def obj2sctype(rep: _DTypeLike[_SCT], default: _T) -> _T | Type[_SCT]: ... +def obj2sctype(rep: _DTypeLike[_SCT], default: _T) -> _T | type[_SCT]: ... @overload -def obj2sctype(rep: DTypeLike, default: None = ...) -> None | Type[Any]: ... +def obj2sctype(rep: DTypeLike, default: None = ...) -> None | type[Any]: ... @overload -def obj2sctype(rep: DTypeLike, default: _T) -> _T | Type[Any]: ... +def obj2sctype(rep: DTypeLike, default: _T) -> _T | type[Any]: ... @overload def obj2sctype(rep: object, default: None = ...) -> None: ... @overload def obj2sctype(rep: object, default: _T) -> _T: ... @overload -def issubclass_(arg1: Type[Any], arg2: _TypeTuple) -> bool: ... +def issubclass_(arg1: type[Any], arg2: _TypeTuple) -> bool: ... @overload def issubclass_(arg1: object, arg2: object) -> L[False]: ... @@ -137,37 +133,36 @@ def find_common_type( cast: _typedict[_CastFunc] nbytes: _typedict[int] typecodes: _TypeCodes -ScalarType: Tuple[ - Type[int], - Type[float], - Type[complex], - Type[int], - Type[bool], - Type[bytes], - Type[str], - Type[memoryview], - Type[bool_], - Type[csingle], - Type[cdouble], - Type[clongdouble], - Type[half], - Type[single], - Type[double], - Type[longdouble], - Type[byte], - Type[short], - Type[intc], - Type[int_], - Type[longlong], - Type[timedelta64], - Type[datetime64], - Type[object_], - Type[bytes_], - Type[str_], - Type[ubyte], - Type[ushort], - Type[uintc], - Type[uint], - Type[ulonglong], - Type[void], +ScalarType: tuple[ + type[int], + type[float], + type[complex], + type[bool], + type[bytes], + type[str], + type[memoryview], + type[bool_], + type[csingle], + type[cdouble], + type[clongdouble], + type[half], + type[single], + type[double], + type[longdouble], + type[byte], + type[short], + type[intc], + type[int_], + type[longlong], + type[timedelta64], + type[datetime64], + type[object_], + type[bytes_], + type[str_], + type[ubyte], + type[ushort], + type[uintc], + type[uint], + type[ulonglong], + type[void], ] diff --git a/numpy/core/records.pyi b/numpy/core/records.pyi index fda118276..4eee93d87 100644 --- a/numpy/core/records.pyi +++ b/numpy/core/records.pyi @@ -1,12 +1,9 @@ import os +from collections.abc import Sequence, Iterable from typing import ( - List, - Sequence, Any, TypeVar, - Iterable, overload, - Tuple, Protocol, ) @@ -39,7 +36,7 @@ class _SupportsReadInto(Protocol): def tell(self, /) -> int: ... def readinto(self, buffer: memoryview, /) -> int: ... -__all__: List[str] +__all__: list[str] @overload def fromarrays( @@ -67,7 +64,7 @@ def fromarrays( @overload def fromrecords( - recList: _ArrayLikeVoid_co | Tuple[Any, ...] | _NestedSequence[Tuple[Any, ...]], + recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], dtype: DTypeLike = ..., shape: None | _ShapeLike = ..., formats: None = ..., @@ -78,7 +75,7 @@ def fromrecords( ) -> _RecArray[record]: ... @overload def fromrecords( - recList: _ArrayLikeVoid_co | Tuple[Any, ...] | _NestedSequence[Tuple[Any, ...]], + recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], dtype: None = ..., shape: None | _ShapeLike = ..., *, @@ -181,3 +178,57 @@ def array( byteorder: None | _ByteOrder = ..., copy: bool = ..., ) -> _RecArray[record]: ... +@overload +def array( + obj: None, + dtype: DTypeLike, + shape: _ShapeLike, + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., + copy: bool = ..., +) -> _RecArray[Any]: ... +@overload +def array( + obj: None, + dtype: None = ..., + *, + shape: _ShapeLike, + offset: int = ..., + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., + copy: bool = ..., +) -> _RecArray[record]: ... +@overload +def array( + obj: _SupportsReadInto, + dtype: DTypeLike, + shape: None | _ShapeLike = ..., + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., + copy: bool = ..., +) -> _RecArray[Any]: ... +@overload +def array( + obj: _SupportsReadInto, + dtype: None = ..., + shape: None | _ShapeLike = ..., + offset: int = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., + copy: bool = ..., +) -> _RecArray[record]: ... diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 1ec178445..22cac1e9a 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -24,6 +24,11 @@ NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', " NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0") NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING +# Set NPY_DISABLE_SVML=1 in the environment to disable the vendored SVML +# library. This option only has significance on a Linux x86_64 host and is most +# useful to avoid improperly requiring SVML when cross compiling. +NPY_DISABLE_SVML = (os.environ.get('NPY_DISABLE_SVML', "0") == "1") + # XXX: ugly, we use a class to avoid calling twice some expensive functions in # config.h/numpyconfig.h. I don't see a better way because distutils force # config.h generation inside an Extension class, and as such sharing @@ -68,6 +73,8 @@ def can_link_svml(): """SVML library is supported only on x86_64 architecture and currently only on linux """ + if NPY_DISABLE_SVML: + return False machine = platform.machine() system = platform.system() return "x86_64" in machine and system == "Linux" @@ -411,7 +418,8 @@ def visibility_define(config): return '' def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration, dot_join + from numpy.distutils.misc_util import (Configuration, dot_join, + exec_mod_from_location) from numpy.distutils.system_info import (get_info, blas_opt_info, lapack_opt_info) @@ -428,8 +436,8 @@ def configuration(parent_package='',top_path=None): generate_umath_py = join(codegen_dir, 'generate_umath.py') n = dot_join(config.name, 'generate_umath') - generate_umath = npy_load_module('_'.join(n.split('.')), - generate_umath_py, ('.py', 'U', 1)) + generate_umath = exec_mod_from_location('_'.join(n.split('.')), + generate_umath_py) header_dir = 'include/numpy' # this is relative to config.path_in_package @@ -945,8 +953,8 @@ def configuration(parent_package='',top_path=None): join('src', 'npysort', 'radixsort.cpp'), join('src', 'common', 'npy_partition.h.src'), join('src', 'npysort', 'selection.c.src'), - join('src', 'common', 'npy_binsearch.h.src'), - join('src', 'npysort', 'binsearch.c.src'), + join('src', 'common', 'npy_binsearch.h'), + join('src', 'npysort', 'binsearch.cpp'), ] ####################################################################### @@ -965,6 +973,21 @@ def configuration(parent_package='',top_path=None): generate_umath.__file__)) return [] + def generate_umath_doc_header(ext, build_dir): + from numpy.distutils.misc_util import exec_mod_from_location + + target = join(build_dir, header_dir, '_umath_doc_generated.h') + dir = os.path.dirname(target) + if not os.path.exists(dir): + os.makedirs(dir) + + generate_umath_doc_py = join(codegen_dir, 'generate_umath_doc.py') + if newer(generate_umath_doc_py, target): + n = dot_join(config.name, 'generate_umath_doc') + generate_umath_doc = exec_mod_from_location( + '_'.join(n.split('.')), generate_umath_doc_py) + generate_umath_doc.write_code(target) + umath_src = [ join('src', 'umath', 'umathmodule.c'), join('src', 'umath', 'reduction.c'), @@ -1005,6 +1028,7 @@ def configuration(parent_package='',top_path=None): join('src', 'umath', 'simd.inc.src'), join('src', 'umath', 'override.h'), join(codegen_dir, 'generate_ufunc_api.py'), + join(codegen_dir, 'ufunc_docstrings.py'), ] svml_path = join('numpy', 'core', 'src', 'umath', 'svml') @@ -1024,6 +1048,7 @@ def configuration(parent_package='',top_path=None): join(codegen_dir, 'generate_numpy_api.py'), join('*.py'), generate_umath_c, + generate_umath_doc_header, generate_ufunc_api, ], depends=deps + multiarray_deps + umath_deps + @@ -1066,7 +1091,7 @@ def configuration(parent_package='',top_path=None): ####################################################################### config.add_extension('_operand_flag_tests', - sources=[join('src', 'umath', '_operand_flag_tests.c.src')]) + sources=[join('src', 'umath', '_operand_flag_tests.c')]) ####################################################################### # SIMD module # diff --git a/numpy/core/shape_base.pyi b/numpy/core/shape_base.pyi index 159ad2781..63cbd773c 100644 --- a/numpy/core/shape_base.pyi +++ b/numpy/core/shape_base.pyi @@ -1,4 +1,5 @@ -from typing import TypeVar, overload, List, Sequence, Any, SupportsIndex +from collections.abc import Sequence +from typing import TypeVar, overload, Any, SupportsIndex from numpy import generic, dtype from numpy.typing import ArrayLike, NDArray, _FiniteNestedSequence, _SupportsArray @@ -8,28 +9,28 @@ _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) _ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] -__all__: List[str] +__all__: list[str] @overload def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... @overload def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ... @overload -def atleast_1d(*arys: ArrayLike) -> List[NDArray[Any]]: ... +def atleast_1d(*arys: ArrayLike) -> list[NDArray[Any]]: ... @overload def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... @overload def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ... @overload -def atleast_2d(*arys: ArrayLike) -> List[NDArray[Any]]: ... +def atleast_2d(*arys: ArrayLike) -> list[NDArray[Any]]: ... @overload def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... @overload def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ... @overload -def atleast_3d(*arys: ArrayLike) -> List[NDArray[Any]]: ... +def atleast_3d(*arys: ArrayLike) -> list[NDArray[Any]]: ... @overload def vstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ... diff --git a/numpy/core/src/_simd/_simd_convert.inc b/numpy/core/src/_simd/_simd_convert.inc index 73869ef1f..46e044479 100644 --- a/numpy/core/src/_simd/_simd_convert.inc +++ b/numpy/core/src/_simd/_simd_convert.inc @@ -94,6 +94,7 @@ simd_sequence_from_iterable(PyObject *obj, simd_data_type dtype, Py_ssize_t min_ "minimum acceptable size of the required sequence is %d, given(%d)", min_size, seq_size ); + Py_DECREF(seq_obj); return NULL; } npyv_lanetype_u8 *dst = simd_sequence_new(seq_size, dtype); diff --git a/numpy/core/src/common/npy_binsearch.h b/numpy/core/src/common/npy_binsearch.h new file mode 100644 index 000000000..8d2f0714d --- /dev/null +++ b/numpy/core/src/common/npy_binsearch.h @@ -0,0 +1,31 @@ +#ifndef __NPY_BINSEARCH_H__ +#define __NPY_BINSEARCH_H__ + +#include "npy_sort.h" +#include <numpy/npy_common.h> +#include <numpy/ndarraytypes.h> + + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void (PyArray_BinSearchFunc)(const char*, const char*, char*, + npy_intp, npy_intp, + npy_intp, npy_intp, npy_intp, + PyArrayObject*); + +typedef int (PyArray_ArgBinSearchFunc)(const char*, const char*, + const char*, char*, + npy_intp, npy_intp, npy_intp, + npy_intp, npy_intp, npy_intp, + PyArrayObject*); + +NPY_NO_EXPORT PyArray_BinSearchFunc* get_binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side); +NPY_NO_EXPORT PyArray_ArgBinSearchFunc* get_argbinsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/numpy/core/src/common/npy_binsearch.h.src b/numpy/core/src/common/npy_binsearch.h.src deleted file mode 100644 index 052c44482..000000000 --- a/numpy/core/src/common/npy_binsearch.h.src +++ /dev/null @@ -1,144 +0,0 @@ -#ifndef __NPY_BINSEARCH_H__ -#define __NPY_BINSEARCH_H__ - -#include "npy_sort.h" -#include <numpy/npy_common.h> -#include <numpy/ndarraytypes.h> - -#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) - -typedef void (PyArray_BinSearchFunc)(const char*, const char*, char*, - npy_intp, npy_intp, - npy_intp, npy_intp, npy_intp, - PyArrayObject*); - -typedef int (PyArray_ArgBinSearchFunc)(const char*, const char*, - const char*, char*, - npy_intp, npy_intp, npy_intp, - npy_intp, npy_intp, npy_intp, - PyArrayObject*); - -typedef struct { - int typenum; - PyArray_BinSearchFunc *binsearch[NPY_NSEARCHSIDES]; -} binsearch_map; - -typedef struct { - int typenum; - PyArray_ArgBinSearchFunc *argbinsearch[NPY_NSEARCHSIDES]; -} argbinsearch_map; - -/**begin repeat - * - * #side = left, right# - */ - -/**begin repeat1 - * - * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, half, float, double, longdouble, - * cfloat, cdouble, clongdouble, datetime, timedelta# - */ - -NPY_NO_EXPORT void -binsearch_@side@_@suff@(const char *arr, const char *key, char *ret, - npy_intp arr_len, npy_intp key_len, - npy_intp arr_str, npy_intp key_str, npy_intp ret_str, - PyArrayObject *unused); -NPY_NO_EXPORT int -argbinsearch_@side@_@suff@(const char *arr, const char *key, - const char *sort, char *ret, - npy_intp arr_len, npy_intp key_len, - npy_intp arr_str, npy_intp key_str, - npy_intp sort_str, npy_intp ret_str, - PyArrayObject *unused); -/**end repeat1**/ - -NPY_NO_EXPORT void -npy_binsearch_@side@(const char *arr, const char *key, char *ret, - npy_intp arr_len, npy_intp key_len, - npy_intp arr_str, npy_intp key_str, - npy_intp ret_str, PyArrayObject *cmp); -NPY_NO_EXPORT int -npy_argbinsearch_@side@(const char *arr, const char *key, - const char *sort, char *ret, - npy_intp arr_len, npy_intp key_len, - npy_intp arr_str, npy_intp key_str, - npy_intp sort_str, npy_intp ret_str, - PyArrayObject *cmp); -/**end repeat**/ - -/**begin repeat - * - * #arg = , arg# - * #Arg = , Arg# - */ - -static @arg@binsearch_map _@arg@binsearch_map[] = { - /* If adding new types, make sure to keep them ordered by type num */ - /**begin repeat1 - * - * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, DATETIME, TIMEDELTA, HALF# - * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, float, double, longdouble, - * cfloat, cdouble, clongdouble, datetime, timedelta, half# - */ - {NPY_@TYPE@, - { - &@arg@binsearch_left_@suff@, - &@arg@binsearch_right_@suff@, - }, - }, - /**end repeat1**/ -}; - -static PyArray_@Arg@BinSearchFunc *gen@arg@binsearch_map[] = { - &npy_@arg@binsearch_left, - &npy_@arg@binsearch_right, -}; - -static NPY_INLINE PyArray_@Arg@BinSearchFunc* -get_@arg@binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side) -{ - npy_intp nfuncs = ARRAY_SIZE(_@arg@binsearch_map); - npy_intp min_idx = 0; - npy_intp max_idx = nfuncs; - int type = dtype->type_num; - - if (side >= NPY_NSEARCHSIDES) { - return NULL; - } - - /* - * It seems only fair that a binary search function be searched for - * using a binary search... - */ - while (min_idx < max_idx) { - npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); - - if (_@arg@binsearch_map[mid_idx].typenum < type) { - min_idx = mid_idx + 1; - } - else { - max_idx = mid_idx; - } - } - - if (min_idx < nfuncs && - _@arg@binsearch_map[min_idx].typenum == type) { - return _@arg@binsearch_map[min_idx].@arg@binsearch[side]; - } - - if (dtype->f->compare) { - return gen@arg@binsearch_map[side]; - } - - return NULL; -} -/**end repeat**/ - -#undef ARRAY_SIZE - -#endif diff --git a/numpy/core/src/common/npy_config.h b/numpy/core/src/common/npy_config.h index fd0f1855c..b01eca5ab 100644 --- a/numpy/core/src/common/npy_config.h +++ b/numpy/core/src/common/npy_config.h @@ -136,11 +136,23 @@ #undef HAVE_CPOWL #undef HAVE_CEXPL +#include <cygwin/version.h> +#if CYGWIN_VERSION_DLL_MAJOR < 3003 +/* https://cygwin.com/pipermail/cygwin-announce/2021-October/010268.html */ /* Builtin abs reports overflow */ #undef HAVE_CABSL #undef HAVE_HYPOTL #endif +#if CYGWIN_VERSION_DLL_MAJOR < 3002 +/* https://cygwin.com/pipermail/cygwin-announce/2021-March/009987.html */ +/* Segfault */ +#undef HAVE_MODFL +/* sqrt(-inf) returns -inf instead of -nan */ +#undef HAVE_SQRTL +#endif +#endif + /* Disable broken gnu trig functions */ #if defined(HAVE_FEATURES_H) #include <features.h> diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src index a2383c45f..1385220f9 100644 --- a/numpy/core/src/common/npy_cpu_features.c.src +++ b/numpy/core/src/common/npy_cpu_features.c.src @@ -62,6 +62,7 @@ npy_cpu_features_dict(void) * AVX512IFMA, AVX512VBMI, AVX512VBMI2, AVX512BITALG, * AVX512_KNL, AVX512_KNM, AVX512_SKX, AVX512_CLX, AVX512_CNL, AVX512_ICL, * VSX, VSX2, VSX3, + * VX, VXE, VXE2, * NEON, NEON_FP16, NEON_VFPV4, ASIMD, FPHP, ASIMDHP, ASIMDDP, ASIMDFHM# */ if (PyDict_SetItemString(dict, "@feature@", @@ -509,6 +510,42 @@ npy__cpu_init_features(void) #endif } +/***************** ZARCH ******************/ + +#elif defined(__s390x__) + +#include <sys/auxv.h> +#ifndef HWCAP_S390_VXE + #define HWCAP_S390_VXE 8192 +#endif + +#ifndef HWCAP_S390_VXRS_EXT2 + #define HWCAP_S390_VXRS_EXT2 32768 +#endif + +static void +npy__cpu_init_features(void) +{ + memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); + + unsigned int hwcap = getauxval(AT_HWCAP); + if ((hwcap & HWCAP_S390_VX) == 0) { + return; + } + + if (hwcap & HWCAP_S390_VXRS_EXT2) { + npy__cpu_have[NPY_CPU_FEATURE_VX] = + npy__cpu_have[NPY_CPU_FEATURE_VXE] = + npy__cpu_have[NPY_CPU_FEATURE_VXE2] = 1; + return; + } + + npy__cpu_have[NPY_CPU_FEATURE_VXE] = (hwcap & HWCAP_S390_VXE) != 0; + + npy__cpu_have[NPY_CPU_FEATURE_VX] = 1; +} + + /***************** ARM ******************/ #elif defined(__arm__) || defined(__aarch64__) diff --git a/numpy/core/src/common/npy_cpu_features.h b/numpy/core/src/common/npy_cpu_features.h index ce1fc822a..1f52a445d 100644 --- a/numpy/core/src/common/npy_cpu_features.h +++ b/numpy/core/src/common/npy_cpu_features.h @@ -82,6 +82,15 @@ enum npy_cpu_features // ARMv8.2 single&half-precision multiply NPY_CPU_FEATURE_ASIMDFHM = 307, + // IBM/ZARCH + NPY_CPU_FEATURE_VX = 350, + + // Vector-Enhancements Facility 1 + NPY_CPU_FEATURE_VXE = 351, + + // Vector-Enhancements Facility 2 + NPY_CPU_FEATURE_VXE2 = 352, + NPY_CPU_FEATURE_MAX }; @@ -138,6 +147,7 @@ npy_cpu_features_dict(void); * On aarch64: ['NEON', 'NEON_FP16', 'NEON_VPFV4', 'ASIMD'] * On ppc64: [] * On ppc64le: ['VSX', 'VSX2'] + * On s390x: [] * On any other arch or if the optimization is disabled: [] */ NPY_VISIBILITY_HIDDEN PyObject * @@ -159,6 +169,7 @@ npy_cpu_baseline_list(void); * On aarch64: ['ASIMDHP', 'ASIMDDP', 'ASIMDFHM'] * On ppc64: ['VSX', 'VSX2', 'VSX3'] * On ppc64le: ['VSX3'] + * On s390x: ['VX', 'VXE', VXE2] * On any other arch or if the optimization is disabled: [] */ NPY_VISIBILITY_HIDDEN PyObject * diff --git a/numpy/core/src/common/npy_sort.h.src b/numpy/core/src/common/npy_sort.h.src index b4a1e9b0c..a3f556f56 100644 --- a/numpy/core/src/common/npy_sort.h.src +++ b/numpy/core/src/common/npy_sort.h.src @@ -18,6 +18,11 @@ static NPY_INLINE int npy_get_msb(npy_uintp unum) return depth_limit; } +#ifdef __cplusplus +extern "C" { +#endif + + /* ***************************************************************************** @@ -102,4 +107,8 @@ NPY_NO_EXPORT int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *ar NPY_NO_EXPORT int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); NPY_NO_EXPORT int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); +#ifdef __cplusplus +} +#endif + #endif diff --git a/numpy/core/src/common/numpy_tag.h b/numpy/core/src/common/numpy_tag.h index dc8d5286b..60e9b02cd 100644 --- a/numpy/core/src/common/numpy_tag.h +++ b/numpy/core/src/common/numpy_tag.h @@ -1,8 +1,15 @@ #ifndef _NPY_COMMON_TAG_H_ #define _NPY_COMMON_TAG_H_ +#include "../npysort/npysort_common.h" + namespace npy { +template<typename... tags> +struct taglist { + static constexpr unsigned size = sizeof...(tags); +}; + struct integral_tag { }; struct floating_point_tag { @@ -14,63 +21,203 @@ struct date_tag { struct bool_tag : integral_tag { using type = npy_bool; + static constexpr NPY_TYPES type_value = NPY_BOOL; + static int less(type const& a, type const& b) { + return BOOL_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct byte_tag : integral_tag { using type = npy_byte; + static constexpr NPY_TYPES type_value = NPY_BYTE; + static int less(type const& a, type const& b) { + return BYTE_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct ubyte_tag : integral_tag { using type = npy_ubyte; + static constexpr NPY_TYPES type_value = NPY_UBYTE; + static int less(type const& a, type const& b) { + return UBYTE_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct short_tag : integral_tag { using type = npy_short; + static constexpr NPY_TYPES type_value = NPY_SHORT; + static int less(type const& a, type const& b) { + return SHORT_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct ushort_tag : integral_tag { using type = npy_ushort; + static constexpr NPY_TYPES type_value = NPY_USHORT; + static int less(type const& a, type const& b) { + return USHORT_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct int_tag : integral_tag { using type = npy_int; + static constexpr NPY_TYPES type_value = NPY_INT; + static int less(type const& a, type const& b) { + return INT_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct uint_tag : integral_tag { using type = npy_uint; + static constexpr NPY_TYPES type_value = NPY_UINT; + static int less(type const& a, type const& b) { + return UINT_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct long_tag : integral_tag { using type = npy_long; + static constexpr NPY_TYPES type_value = NPY_LONG; + static int less(type const& a, type const& b) { + return LONG_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct ulong_tag : integral_tag { using type = npy_ulong; + static constexpr NPY_TYPES type_value = NPY_ULONG; + static int less(type const& a, type const& b) { + return ULONG_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct longlong_tag : integral_tag { using type = npy_longlong; + static constexpr NPY_TYPES type_value = NPY_LONGLONG; + static int less(type const& a, type const& b) { + return LONGLONG_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct ulonglong_tag : integral_tag { using type = npy_ulonglong; + static constexpr NPY_TYPES type_value = NPY_ULONGLONG; + static int less(type const& a, type const& b) { + return ULONGLONG_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct half_tag { using type = npy_half; + static constexpr NPY_TYPES type_value = NPY_HALF; + static int less(type const& a, type const& b) { + return HALF_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct float_tag : floating_point_tag { using type = npy_float; + static constexpr NPY_TYPES type_value = NPY_FLOAT; + static int less(type const& a, type const& b) { + return FLOAT_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct double_tag : floating_point_tag { using type = npy_double; + static constexpr NPY_TYPES type_value = NPY_DOUBLE; + static int less(type const& a, type const& b) { + return DOUBLE_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct longdouble_tag : floating_point_tag { using type = npy_longdouble; + static constexpr NPY_TYPES type_value = NPY_LONGDOUBLE; + static int less(type const& a, type const& b) { + return LONGDOUBLE_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct cfloat_tag : complex_tag { using type = npy_cfloat; + static constexpr NPY_TYPES type_value = NPY_CFLOAT; + static int less(type const& a, type const& b) { + return CFLOAT_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct cdouble_tag : complex_tag { using type = npy_cdouble; + static constexpr NPY_TYPES type_value = NPY_CDOUBLE; + static int less(type const& a, type const& b) { + return CDOUBLE_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct clongdouble_tag : complex_tag { using type = npy_clongdouble; + static constexpr NPY_TYPES type_value = NPY_CLONGDOUBLE; + static int less(type const& a, type const& b) { + return CLONGDOUBLE_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct datetime_tag : date_tag { using type = npy_datetime; + static constexpr NPY_TYPES type_value = NPY_DATETIME; + static int less(type const& a, type const& b) { + return DATETIME_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; struct timedelta_tag : date_tag { using type = npy_timedelta; + static constexpr NPY_TYPES type_value = NPY_TIMEDELTA; + static int less(type const& a, type const& b) { + return TIMEDELTA_LT(a, b); + } + static int less_equal(type const& a, type const& b) { + return !less(b, a); + } }; } // namespace npy diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src index 9486b7cff..36937629d 100644 --- a/numpy/core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/core/src/multiarray/_multiarray_tests.c.src @@ -797,25 +797,6 @@ npy_char_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args)) return (PyObject *)descr; } -/* used to test UPDATEIFCOPY usage emits deprecation warning */ -static PyObject* -npy_updateifcopy_deprecation(PyObject* NPY_UNUSED(self), PyObject* args) -{ - int flags; - PyObject* array; - if (!PyArray_Check(args)) { - PyErr_SetString(PyExc_TypeError, "test needs ndarray input"); - return NULL; - } - flags = NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY; - array = PyArray_FromArray((PyArrayObject*)args, NULL, flags); - if (array == NULL) - return NULL; - PyArray_ResolveWritebackIfCopy((PyArrayObject*)array); - Py_DECREF(array); - Py_RETURN_NONE; -} - /* used to test PyArray_As1D usage emits not implemented error */ static PyObject* npy_pyarrayas1d_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args)) @@ -1086,20 +1067,18 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) PyArrayMethodObject *cast = (PyArrayMethodObject *)cast_obj; /* Pass some information about this cast out! */ - PyObject *cast_info = Py_BuildValue("{sOsOsisisisisisssi}", + PyObject *cast_info = Py_BuildValue("{sOsOsisisisisiss}", "from", from_dtype, "to", to_dtype, "legacy", (cast->name != NULL && strncmp(cast->name, "legacy_", 7) == 0), - "casting", cast->casting & ~_NPY_CAST_IS_VIEW, + "casting", cast->casting, "requires_pyapi", cast->flags & NPY_METH_REQUIRES_PYAPI, "supports_unaligned", cast->flags & NPY_METH_SUPPORTS_UNALIGNED, "no_floatingpoint_errors", cast->flags & NPY_METH_NO_FLOATINGPOINT_ERRORS, - "name", cast->name, - "cast_is_view", - cast->casting & _NPY_CAST_IS_VIEW); + "name", cast->name); if (cast_info == NULL) { goto fail; } @@ -2414,9 +2393,6 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"npy_char_deprecation", npy_char_deprecation, METH_NOARGS, NULL}, - {"npy_updateifcopy_deprecation", - npy_updateifcopy_deprecation, - METH_O, NULL}, {"npy_pyarrayas1d_deprecation", npy_pyarrayas1d_deprecation, METH_NOARGS, NULL}, diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c index d93dac506..b421d9e4f 100644 --- a/numpy/core/src/multiarray/array_method.c +++ b/numpy/core/src/multiarray/array_method.c @@ -48,13 +48,19 @@ * * We could allow setting the output descriptors specifically to simplify * this step. + * + * Note that the default version will indicate that the cast can be done + * as using `arr.view(new_dtype)` if the default cast-safety is + * set to "no-cast". This default function cannot be used if a view may + * be sufficient for casting but the cast is not always "no-cast". */ static NPY_CASTING default_resolve_descriptors( PyArrayMethodObject *method, PyArray_DTypeMeta **dtypes, PyArray_Descr **input_descrs, - PyArray_Descr **output_descrs) + PyArray_Descr **output_descrs, + npy_intp *view_offset) { int nin = method->nin; int nout = method->nout; @@ -76,6 +82,13 @@ default_resolve_descriptors( * abstract ones or unspecified outputs). We can use the common-dtype * operation to provide a default here. */ + if (method->casting == NPY_NO_CASTING) { + /* + * By (current) definition no-casting should imply viewable. This + * is currently indicated for example for object to object cast. + */ + *view_offset = 0; + } return method->casting; fail: @@ -102,9 +115,10 @@ is_contiguous( /** * The default method to fetch the correct loop for a cast or ufunc * (at the time of writing only casts). - * The default version can return loops explicitly registered during method - * creation. It does specialize contiguous loops, although has to check - * all descriptors itemsizes for this. + * Note that the default function provided here will only indicate that a cast + * can be done as a view (i.e., arr.view(new_dtype)) when this is trivially + * true, i.e., for cast safety "no-cast". It will not recognize view as an + * option for other casts (e.g., viewing '>i8' as '>i4' with an offset of 4). * * @param context * @param aligned @@ -166,7 +180,7 @@ validate_spec(PyArrayMethod_Spec *spec) "not exceed %d. (method: %s)", NPY_MAXARGS, spec->name); return -1; } - switch (spec->casting & ~_NPY_CAST_IS_VIEW) { + switch (spec->casting) { case NPY_NO_CASTING: case NPY_EQUIV_CASTING: case NPY_SAFE_CASTING: @@ -495,8 +509,9 @@ boundarraymethod_dealloc(PyObject *self) /* - * Calls resolve_descriptors() and returns the casting level and the resolved - * descriptors as a tuple. If the operation is impossible returns (-1, None). + * Calls resolve_descriptors() and returns the casting level, the resolved + * descriptors as a tuple, and a possible view-offset (integer or None). + * If the operation is impossible returns (-1, None, None). * May raise an error, but usually should not. * The function validates the casting attribute compared to the returned * casting level. @@ -551,14 +566,15 @@ boundarraymethod__resolve_descripors( } } + npy_intp view_offset = NPY_MIN_INTP; NPY_CASTING casting = self->method->resolve_descriptors( - self->method, self->dtypes, given_descrs, loop_descrs); + self->method, self->dtypes, given_descrs, loop_descrs, &view_offset); if (casting < 0 && PyErr_Occurred()) { return NULL; } else if (casting < 0) { - return Py_BuildValue("iO", casting, Py_None); + return Py_BuildValue("iO", casting, Py_None, Py_None); } PyObject *result_tuple = PyTuple_New(nin + nout); @@ -570,9 +586,22 @@ boundarraymethod__resolve_descripors( PyTuple_SET_ITEM(result_tuple, i, (PyObject *)loop_descrs[i]); } + PyObject *view_offset_obj; + if (view_offset == NPY_MIN_INTP) { + Py_INCREF(Py_None); + view_offset_obj = Py_None; + } + else { + view_offset_obj = PyLong_FromSsize_t(view_offset); + if (view_offset_obj == NULL) { + Py_DECREF(result_tuple); + return NULL; + } + } + /* - * The casting flags should be the most generic casting level (except the - * cast-is-view flag. If no input is parametric, it must match exactly. + * The casting flags should be the most generic casting level. + * If no input is parametric, it must match exactly. * * (Note that these checks are only debugging checks.) */ @@ -584,7 +613,7 @@ boundarraymethod__resolve_descripors( } } if (self->method->casting != -1) { - NPY_CASTING cast = casting & ~_NPY_CAST_IS_VIEW; + NPY_CASTING cast = casting; if (self->method->casting != PyArray_MinCastSafety(cast, self->method->casting)) { PyErr_Format(PyExc_RuntimeError, @@ -592,6 +621,7 @@ boundarraymethod__resolve_descripors( "(set level is %d, got %d for method %s)", self->method->casting, cast, self->method->name); Py_DECREF(result_tuple); + Py_DECREF(view_offset_obj); return NULL; } if (!parametric) { @@ -608,12 +638,13 @@ boundarraymethod__resolve_descripors( "(set level is %d, got %d for method %s)", self->method->casting, cast, self->method->name); Py_DECREF(result_tuple); + Py_DECREF(view_offset_obj); return NULL; } } } - return Py_BuildValue("iN", casting, result_tuple); + return Py_BuildValue("iNN", casting, result_tuple, view_offset_obj); } @@ -694,8 +725,9 @@ boundarraymethod__simple_strided_call( return NULL; } + npy_intp view_offset = NPY_MIN_INTP; NPY_CASTING casting = self->method->resolve_descriptors( - self->method, self->dtypes, descrs, out_descrs); + self->method, self->dtypes, descrs, out_descrs, &view_offset); if (casting < 0) { PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL; diff --git a/numpy/core/src/multiarray/array_method.h b/numpy/core/src/multiarray/array_method.h index 7b7372bd0..35b9033e0 100644 --- a/numpy/core/src/multiarray/array_method.h +++ b/numpy/core/src/multiarray/array_method.h @@ -70,7 +70,8 @@ typedef NPY_CASTING (resolve_descriptors_function)( struct PyArrayMethodObject_tag *method, PyArray_DTypeMeta **dtypes, PyArray_Descr **given_descrs, - PyArray_Descr **loop_descrs); + PyArray_Descr **loop_descrs, + npy_intp *view_offset); typedef int (get_loop_function)( diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index 1b197d0f2..3f080d902 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -75,36 +75,19 @@ PyArray_Size(PyObject *op) } } -/*NUMPY_API - * - * Precondition: 'arr' is a copy of 'base' (though possibly with different - * strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the - * ->base pointer on 'arr', so that when 'arr' is destructed, it will copy any - * changes back to 'base'. DEPRECATED, use PyArray_SetWritebackIfCopyBase - * - * Steals a reference to 'base'. - * - * Returns 0 on success, -1 on failure. - */ +/*NUMPY_API */ NPY_NO_EXPORT int PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) { - int ret; - /* 2017-Nov -10 1.14 (for PyPy only) */ - /* 2018-April-21 1.15 (all Python implementations) */ - if (DEPRECATE("PyArray_SetUpdateIfCopyBase is deprecated, use " - "PyArray_SetWritebackIfCopyBase instead, and be sure to call " - "PyArray_ResolveWritebackIfCopy before the array is deallocated, " - "i.e. before the last call to Py_DECREF. If cleaning up from an " - "error, PyArray_DiscardWritebackIfCopy may be called instead to " - "throw away the scratch buffer.") < 0) - return -1; - ret = PyArray_SetWritebackIfCopyBase(arr, base); - if (ret >=0) { - PyArray_ENABLEFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); - PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); - } - return ret; + /* 2021-Dec-15 1.23*/ + PyErr_SetString(PyExc_RuntimeError, + "PyArray_SetUpdateIfCopyBase is disabled, use " + "PyArray_SetWritebackIfCopyBase instead, and be sure to call " + "PyArray_ResolveWritebackIfCopy before the array is deallocated, " + "i.e. before the last call to Py_DECREF. If cleaning up from an " + "error, PyArray_DiscardWritebackIfCopy may be called instead to " + "throw away the scratch buffer."); + return -1; } /*NUMPY_API @@ -377,9 +360,9 @@ PyArray_ResolveWritebackIfCopy(PyArrayObject * self) { PyArrayObject_fields *fa = (PyArrayObject_fields *)self; if (fa && fa->base) { - if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) || (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) { + if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) { /* - * UPDATEIFCOPY or WRITEBACKIFCOPY means that fa->base's data + * WRITEBACKIFCOPY means that fa->base's data * should be updated with the contents * of self. * fa->base->flags is not WRITEABLE to protect the relationship @@ -388,7 +371,6 @@ PyArray_ResolveWritebackIfCopy(PyArrayObject * self) int retval = 0; PyArray_ENABLEFLAGS(((PyArrayObject *)fa->base), NPY_ARRAY_WRITEABLE); - PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); retval = PyArray_CopyAnyInto((PyArrayObject *)fa->base, self); Py_DECREF(fa->base); @@ -462,25 +444,6 @@ array_dealloc(PyArrayObject *self) PyErr_Clear(); } } - if (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY) { - /* DEPRECATED, remove once the flag is removed */ - char const * msg = "UPDATEIFCOPY detected in array_dealloc. " - " Required call to PyArray_ResolveWritebackIfCopy or " - "PyArray_DiscardWritebackIfCopy is missing"; - /* - * prevent reaching 0 twice and thus recursing into dealloc. - * Increasing sys.gettotalrefcount, but path should not be taken. - */ - Py_INCREF(self); - /* 2017-Nov-10 1.14 */ - WARN_IN_DEALLOC(PyExc_DeprecationWarning, msg); - retval = PyArray_ResolveWritebackIfCopy(self); - if (retval < 0) - { - PyErr_Print(); - PyErr_Clear(); - } - } /* * If fa->base is non-NULL, it is something * to DECREF -- either a view or a buffer object @@ -493,14 +456,6 @@ array_dealloc(PyArrayObject *self) if (PyDataType_FLAGCHK(fa->descr, NPY_ITEM_REFCOUNT)) { PyArray_XDECREF(self); } - /* - * Allocation will never be 0, see comment in ctors.c - * line 820 - */ - size_t nbytes = PyArray_NBYTES(self); - if (nbytes == 0) { - nbytes = fa->descr->elsize ? fa->descr->elsize : 1; - } if (fa->mem_handler == NULL) { char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { @@ -511,7 +466,16 @@ array_dealloc(PyArrayObject *self) } // Guess at malloc/free ??? free(fa->data); - } else { + } + else { + /* + * In theory `PyArray_NBYTES_ALLOCATED`, but differs somewhere? + * So instead just use the knowledge that 0 is impossible. + */ + size_t nbytes = PyArray_NBYTES(self); + if (nbytes == 0) { + nbytes = 1; + } PyDataMem_UserFREE(fa->data, nbytes, fa->mem_handler); Py_DECREF(fa->mem_handler); } @@ -571,8 +535,6 @@ PyArray_DebugPrint(PyArrayObject *obj) printf(" NPY_ALIGNED"); if (fobj->flags & NPY_ARRAY_WRITEABLE) printf(" NPY_WRITEABLE"); - if (fobj->flags & NPY_ARRAY_UPDATEIFCOPY) - printf(" NPY_UPDATEIFCOPY"); if (fobj->flags & NPY_ARRAY_WRITEBACKIFCOPY) printf(" NPY_WRITEBACKIFCOPY"); printf("\n"); @@ -660,15 +622,15 @@ array_might_be_written(PyArrayObject *obj) /*NUMPY_API * - * This function does nothing if obj is writeable, and raises an exception - * (and returns -1) if obj is not writeable. It may also do other - * house-keeping, such as issuing warnings on arrays which are transitioning - * to become views. Always call this function at some point before writing to - * an array. + * This function does nothing and returns 0 if *obj* is writeable. + * It raises an exception and returns -1 if *obj* is not writeable. + * It may also do other house-keeping, such as issuing warnings on + * arrays which are transitioning to become views. Always call this + * function at some point before writing to an array. * - * 'name' is a name for the array, used to give better error - * messages. Something like "assignment destination", "output array", or even - * just "array". + * *name* is a name for the array, used to give better error messages. + * It can be something like "assignment destination", "output array", + * or even just "array". */ NPY_NO_EXPORT int PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name) diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c index 327f685d4..a985a2308 100644 --- a/numpy/core/src/multiarray/calculation.c +++ b/numpy/core/src/multiarray/calculation.c @@ -175,7 +175,7 @@ _PyArray_ArgMinMaxCommon(PyArrayObject *op, NPY_END_THREADS_DESCR(PyArray_DESCR(ap)); Py_DECREF(ap); - /* Trigger the UPDATEIFCOPY/WRITEBACKIFCOPY if necessary */ + /* Trigger the WRITEBACKIFCOPY if necessary */ if (out != NULL && out != rp) { PyArray_ResolveWritebackIfCopy(rp); Py_DECREF(rp); diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index b3526c4c1..85fd3aab1 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -292,6 +292,35 @@ npy_memchr(char * haystack, char needle, return p; } +/* + * Helper to work around issues with the allocation strategy currently + * allocating not 1 byte for empty arrays, but enough for an array where + * all 0 dimensions are replaced with size 1 (if the itemsize is not 0). + * + * This means that we can fill in nice (nonzero) strides and still handle + * slicing direct math without being in danger of leaving the allocated byte + * bounds. + * In practice, that probably does not matter, but in principle this would be + * undefined behaviour in C. Another solution may be to force the strides + * to 0 in these cases. See also gh-15788. + * + * Unlike the code in `PyArray_NewFromDescr` does no overflow checks. + */ +static NPY_INLINE npy_intp +PyArray_NBYTES_ALLOCATED(PyArrayObject *arr) +{ + if (PyArray_ITEMSIZE(arr) == 0) { + return 1; + } + npy_intp nbytes = PyArray_ITEMSIZE(arr); + for (int i = 0; i < PyArray_NDIM(arr); i++) { + if (PyArray_DIMS(arr)[i] != 0) { + nbytes *= PyArray_DIMS(arr)[i]; + } + } + return nbytes; +} + /* * Simple helper to create a tuple from an array of items. The `make_null_none` diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c index ef101a78b..a1de580d9 100644 --- a/numpy/core/src/multiarray/conversion_utils.c +++ b/numpy/core/src/multiarray/conversion_utils.c @@ -172,7 +172,7 @@ PyArray_CopyConverter(PyObject *obj, _PyArray_CopyMode *copymode) { } int int_copymode; - PyObject* numpy_CopyMode = NULL; + static PyObject* numpy_CopyMode = NULL; npy_cache_import("numpy", "_CopyMode", &numpy_CopyMode); if (numpy_CopyMode != NULL && (PyObject *)Py_TYPE(obj) == numpy_CopyMode) { @@ -182,6 +182,7 @@ PyArray_CopyConverter(PyObject *obj, _PyArray_CopyMode *copymode) { } int_copymode = (int)PyLong_AsLong(mode_value); + Py_DECREF(mode_value); if (error_converting(int_copymode)) { return NPY_FAIL; } diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 3135d6989..b21fc3cfa 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -223,14 +223,11 @@ PyArray_MinCastSafety(NPY_CASTING casting1, NPY_CASTING casting2) if (casting1 < 0 || casting2 < 0) { return -1; } - NPY_CASTING view = casting1 & casting2 & _NPY_CAST_IS_VIEW; - casting1 = casting1 & ~_NPY_CAST_IS_VIEW; - casting2 = casting2 & ~_NPY_CAST_IS_VIEW; /* larger casting values are less safe */ if (casting1 > casting2) { - return casting1 | view; + return casting1; } - return casting2 | view; + return casting2; } @@ -363,29 +360,41 @@ PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) static NPY_CASTING _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, - PyArray_DTypeMeta *dtypes[2], PyArray_Descr *from, PyArray_Descr *to) + PyArray_DTypeMeta *dtypes[2], PyArray_Descr *from, PyArray_Descr *to, + npy_intp *view_offset) { PyArray_Descr *descrs[2] = {from, to}; PyArray_Descr *out_descrs[2]; + *view_offset = NPY_MIN_INTP; NPY_CASTING casting = castingimpl->resolve_descriptors( - castingimpl, dtypes, descrs, out_descrs); + castingimpl, dtypes, descrs, out_descrs, view_offset); if (casting < 0) { return -1; } /* The returned descriptors may not match, requiring a second check */ if (out_descrs[0] != descrs[0]) { - NPY_CASTING from_casting = PyArray_GetCastSafety( - descrs[0], out_descrs[0], NULL); + npy_intp from_offset = NPY_MIN_INTP; + NPY_CASTING from_casting = PyArray_GetCastInfo( + descrs[0], out_descrs[0], NULL, &from_offset); casting = PyArray_MinCastSafety(casting, from_casting); + if (from_offset != *view_offset) { + /* `view_offset` differs: The multi-step cast cannot be a view. */ + *view_offset = NPY_MIN_INTP; + } if (casting < 0) { goto finish; } } if (descrs[1] != NULL && out_descrs[1] != descrs[1]) { - NPY_CASTING from_casting = PyArray_GetCastSafety( - descrs[1], out_descrs[1], NULL); + npy_intp from_offset = NPY_MIN_INTP; + NPY_CASTING from_casting = PyArray_GetCastInfo( + descrs[1], out_descrs[1], NULL, &from_offset); casting = PyArray_MinCastSafety(casting, from_casting); + if (from_offset != *view_offset) { + /* `view_offset` differs: The multi-step cast cannot be a view. */ + *view_offset = NPY_MIN_INTP; + } if (casting < 0) { goto finish; } @@ -396,15 +405,21 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, Py_DECREF(out_descrs[1]); /* * Check for less harmful non-standard returns. The following two returns - * should never happen. They would be roughly equivalent, but less precise, - * versions of `(NPY_NO_CASTING|_NPY_CAST_IS_VIEW)`. - * 1. No-casting must imply cast-is-view. - * 2. Equivalent-casting + cast-is-view is (currently) the definition - * of a "no" cast (there may be reasons to relax this). - * Note that e.g. `(NPY_UNSAFE_CASTING|_NPY_CAST_IS_VIEW)` is valid. + * should never happen: + * 1. No-casting must imply a view offset of 0. + * 2. Equivalent-casting + 0 view offset is (usually) the definition + * of a "no" cast. However, changing the order of fields can also + * create descriptors that are not equivalent but views. + * Note that unsafe casts can have a view offset. For example, in + * principle, casting `<i8` to `<i4` is a cast with 0 offset. */ - assert(casting != NPY_NO_CASTING); - assert(casting != (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW)); + if (*view_offset != 0) { + assert(casting != NPY_NO_CASTING); + } + else { + assert(casting != NPY_EQUIV_CASTING + || (PyDataType_HASFIELDS(from) && PyDataType_HASFIELDS(to))); + } return casting; } @@ -420,11 +435,13 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, * @param to The descriptor to cast to (may be NULL) * @param to_dtype If `to` is NULL, must pass the to_dtype (otherwise this * is ignored). + * @param[out] view_offset * @return NPY_CASTING or -1 on error or if the cast is not possible. */ NPY_NO_EXPORT NPY_CASTING -PyArray_GetCastSafety( - PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype) +PyArray_GetCastInfo( + PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype, + npy_intp *view_offset) { if (to != NULL) { to_dtype = NPY_DTYPE(to); @@ -441,7 +458,7 @@ PyArray_GetCastSafety( PyArrayMethodObject *castingimpl = (PyArrayMethodObject *)meth; PyArray_DTypeMeta *dtypes[2] = {NPY_DTYPE(from), to_dtype}; NPY_CASTING casting = _get_cast_safety_from_castingimpl(castingimpl, - dtypes, from, to); + dtypes, from, to, view_offset); Py_DECREF(meth); return casting; @@ -449,8 +466,8 @@ PyArray_GetCastSafety( /** - * Check whether a cast is safe, see also `PyArray_GetCastSafety` for - * a similar function. Unlike GetCastSafety, this function checks the + * Check whether a cast is safe, see also `PyArray_GetCastInfo` for + * a similar function. Unlike GetCastInfo, this function checks the * `castingimpl->casting` when available. This allows for two things: * * 1. It avoids calling `resolve_descriptors` in some cases. @@ -493,8 +510,9 @@ PyArray_CheckCastSafety(NPY_CASTING casting, } PyArray_DTypeMeta *dtypes[2] = {NPY_DTYPE(from), to_dtype}; + npy_intp view_offset; NPY_CASTING safety = _get_cast_safety_from_castingimpl(castingimpl, - dtypes, from, to); + dtypes, from, to, &view_offset); Py_DECREF(meth); /* If casting is the smaller (or equal) safety we match */ if (safety < 0) { @@ -971,8 +989,9 @@ PyArray_CastDescrToDType(PyArray_Descr *descr, PyArray_DTypeMeta *given_DType) PyArray_Descr *loop_descrs[2]; PyArrayMethodObject *meth = (PyArrayMethodObject *)tmp; + npy_intp view_offset = NPY_MIN_INTP; NPY_CASTING casting = meth->resolve_descriptors( - meth, dtypes, given_descrs, loop_descrs); + meth, dtypes, given_descrs, loop_descrs, &view_offset); Py_DECREF(tmp); if (casting < 0) { goto error; @@ -2289,7 +2308,8 @@ legacy_same_dtype_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) { Py_INCREF(given_descrs[0]); loop_descrs[0] = given_descrs[0]; @@ -2315,7 +2335,8 @@ legacy_same_dtype_resolve_descriptors( */ if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) == PyDataType_ISNOTSWAPPED(loop_descrs[1])) { - return NPY_NO_CASTING | _NPY_CAST_IS_VIEW; + *view_offset = 0; + return NPY_NO_CASTING; } return NPY_EQUIV_CASTING; } @@ -2354,7 +2375,8 @@ simple_cast_resolve_descriptors( PyArrayMethodObject *self, PyArray_DTypeMeta *dtypes[2], PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) { assert(NPY_DT_is_legacy(dtypes[0]) && NPY_DT_is_legacy(dtypes[1])); @@ -2378,7 +2400,8 @@ simple_cast_resolve_descriptors( } if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) == PyDataType_ISNOTSWAPPED(loop_descrs[1])) { - return NPY_NO_CASTING | _NPY_CAST_IS_VIEW; + *view_offset = 0; + return NPY_NO_CASTING; } return NPY_EQUIV_CASTING; } @@ -2572,7 +2595,8 @@ cast_to_string_resolve_descriptors( PyArrayMethodObject *self, PyArray_DTypeMeta *dtypes[2], PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *NPY_UNUSED(view_offset)) { /* * NOTE: The following code used to be part of PyArray_AdaptFlexibleDType @@ -2723,7 +2747,8 @@ string_to_string_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) { Py_INCREF(given_descrs[0]); loop_descrs[0] = given_descrs[0]; @@ -2739,19 +2764,29 @@ string_to_string_resolve_descriptors( loop_descrs[1] = given_descrs[1]; } - if (loop_descrs[0]->elsize == loop_descrs[1]->elsize) { - if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) == - PyDataType_ISNOTSWAPPED(loop_descrs[1])) { - return NPY_NO_CASTING | _NPY_CAST_IS_VIEW; + if (loop_descrs[0]->elsize < loop_descrs[1]->elsize) { + /* New string is longer: safe but cannot be a view */ + return NPY_SAFE_CASTING; + } + else { + /* New string fits into old: if the byte-order matches can be a view */ + int not_swapped = (PyDataType_ISNOTSWAPPED(loop_descrs[0]) + == PyDataType_ISNOTSWAPPED(loop_descrs[1])); + if (not_swapped) { + *view_offset = 0; + } + + if (loop_descrs[0]->elsize > loop_descrs[1]->elsize) { + return NPY_SAME_KIND_CASTING; + } + /* The strings have the same length: */ + if (not_swapped) { + return NPY_NO_CASTING; } else { return NPY_EQUIV_CASTING; } } - else if (loop_descrs[0]->elsize <= loop_descrs[1]->elsize) { - return NPY_SAFE_CASTING; - } - return NPY_SAME_KIND_CASTING; } @@ -2866,7 +2901,8 @@ PyArray_InitializeStringCasts(void) */ static NPY_CASTING cast_to_void_dtype_class( - PyArray_Descr **given_descrs, PyArray_Descr **loop_descrs) + PyArray_Descr **given_descrs, PyArray_Descr **loop_descrs, + npy_intp *view_offset) { /* `dtype="V"` means unstructured currently (compare final path) */ loop_descrs[1] = PyArray_DescrNewFromType(NPY_VOID); @@ -2876,11 +2912,13 @@ cast_to_void_dtype_class( loop_descrs[1]->elsize = given_descrs[0]->elsize; Py_INCREF(given_descrs[0]); loop_descrs[0] = given_descrs[0]; + + *view_offset = 0; if (loop_descrs[0]->type_num == NPY_VOID && loop_descrs[0]->subarray == NULL && loop_descrs[1]->names == NULL) { - return NPY_NO_CASTING | _NPY_CAST_IS_VIEW; + return NPY_NO_CASTING; } - return NPY_SAFE_CASTING | _NPY_CAST_IS_VIEW; + return NPY_SAFE_CASTING; } @@ -2889,12 +2927,13 @@ nonstructured_to_structured_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) { NPY_CASTING casting; if (given_descrs[1] == NULL) { - return cast_to_void_dtype_class(given_descrs, loop_descrs); + return cast_to_void_dtype_class(given_descrs, loop_descrs, view_offset); } if (given_descrs[1]->subarray != NULL) { @@ -2903,12 +2942,18 @@ nonstructured_to_structured_resolve_descriptors( * possible to allow a view if the field has exactly one element. */ casting = NPY_SAFE_CASTING; + npy_intp sub_view_offset = NPY_MIN_INTP; /* Subarray dtype */ - NPY_CASTING base_casting = PyArray_GetCastSafety( - given_descrs[0], given_descrs[1]->subarray->base, NULL); + NPY_CASTING base_casting = PyArray_GetCastInfo( + given_descrs[0], given_descrs[1]->subarray->base, NULL, + &sub_view_offset); if (base_casting < 0) { return -1; } + if (given_descrs[1]->elsize == given_descrs[1]->subarray->base->elsize) { + /* A single field, view is OK if sub-view is */ + *view_offset = sub_view_offset; + } casting = PyArray_MinCastSafety(casting, base_casting); } else if (given_descrs[1]->names != NULL) { @@ -2920,21 +2965,32 @@ nonstructured_to_structured_resolve_descriptors( else { /* Considered at most unsafe casting (but this could be changed) */ casting = NPY_UNSAFE_CASTING; - if (PyTuple_Size(given_descrs[1]->names) == 1) { - /* A view may be acceptable */ - casting |= _NPY_CAST_IS_VIEW; - } Py_ssize_t pos = 0; PyObject *key, *tuple; while (PyDict_Next(given_descrs[1]->fields, &pos, &key, &tuple)) { PyArray_Descr *field_descr = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0); - NPY_CASTING field_casting = PyArray_GetCastSafety( - given_descrs[0], field_descr, NULL); + npy_intp field_view_off = NPY_MIN_INTP; + NPY_CASTING field_casting = PyArray_GetCastInfo( + given_descrs[0], field_descr, NULL, &field_view_off); casting = PyArray_MinCastSafety(casting, field_casting); if (casting < 0) { return -1; } + if (field_view_off != NPY_MIN_INTP) { + npy_intp to_off = PyLong_AsSsize_t(PyTuple_GET_ITEM(tuple, 1)); + if (error_converting(to_off)) { + return -1; + } + *view_offset = field_view_off - to_off; + } + } + if (PyTuple_Size(given_descrs[1]->names) != 1) { + /* + * Assume that a view is impossible when there is more than one + * field. (Fields could overlap, but that seems weird...) + */ + *view_offset = NPY_MIN_INTP; } } } @@ -2944,15 +3000,20 @@ nonstructured_to_structured_resolve_descriptors( !PyDataType_REFCHK(given_descrs[0])) { /* * A simple view, at the moment considered "safe" (the refcheck is - * probably not necessary, but more future proof + * probably not necessary, but more future proof) */ - casting = NPY_SAFE_CASTING | _NPY_CAST_IS_VIEW; + *view_offset = 0; + casting = NPY_SAFE_CASTING; } else if (given_descrs[0]->elsize <= given_descrs[1]->elsize) { casting = NPY_SAFE_CASTING; } else { casting = NPY_UNSAFE_CASTING; + /* new elsize is smaller so a view is OK (reject refs for now) */ + if (!PyDataType_REFCHK(given_descrs[0])) { + *view_offset = 0; + } } } @@ -3048,6 +3109,8 @@ PyArray_GetGenericToVoidCastingImpl(void) method->casting = -1; method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors; method->get_strided_loop = &nonstructured_to_structured_get_loop; + method->nin = 1; + method->nout = 1; return (PyObject *)method; } @@ -3058,12 +3121,19 @@ structured_to_nonstructured_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *dtypes[2], PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) { PyArray_Descr *base_descr; + /* The structured part may allow a view (and have its own offset): */ + npy_intp struct_view_offset = NPY_MIN_INTP; if (given_descrs[0]->subarray != NULL) { base_descr = given_descrs[0]->subarray->base; + /* A view is possible if the subarray has exactly one element: */ + if (given_descrs[0]->elsize == given_descrs[0]->subarray->base->elsize) { + struct_view_offset = 0; + } } else if (given_descrs[0]->names != NULL) { if (PyTuple_Size(given_descrs[0]->names) != 1) { @@ -3073,6 +3143,10 @@ structured_to_nonstructured_resolve_descriptors( PyObject *key = PyTuple_GetItem(given_descrs[0]->names, 0); PyObject *base_tup = PyDict_GetItem(given_descrs[0]->fields, key); base_descr = (PyArray_Descr *)PyTuple_GET_ITEM(base_tup, 0); + struct_view_offset = PyLong_AsSsize_t(PyTuple_GET_ITEM(base_tup, 1)); + if (error_converting(struct_view_offset)) { + return -1; + } } else { /* @@ -3080,20 +3154,29 @@ structured_to_nonstructured_resolve_descriptors( * at this time they go back to legacy behaviour using getitem/setitem. */ base_descr = NULL; + struct_view_offset = 0; } /* - * The cast is always considered unsafe, so the PyArray_GetCastSafety - * result currently does not matter. + * The cast is always considered unsafe, so the PyArray_GetCastInfo + * result currently only matters for the view_offset. */ - if (base_descr != NULL && PyArray_GetCastSafety( - base_descr, given_descrs[1], dtypes[1]) < 0) { + npy_intp base_view_offset = NPY_MIN_INTP; + if (base_descr != NULL && PyArray_GetCastInfo( + base_descr, given_descrs[1], dtypes[1], &base_view_offset) < 0) { return -1; } + if (base_view_offset != NPY_MIN_INTP + && struct_view_offset != NPY_MIN_INTP) { + *view_offset = base_view_offset + struct_view_offset; + } /* Void dtypes always do the full cast. */ if (given_descrs[1] == NULL) { loop_descrs[1] = NPY_DT_CALL_default_descr(dtypes[1]); + if (loop_descrs[1] == NULL) { + return -1; + } /* * Special case strings here, it should be useless (and only actually * work for empty arrays). Possibly this should simply raise for @@ -3187,6 +3270,8 @@ PyArray_GetVoidToGenericCastingImpl(void) method->casting = -1; method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors; method->get_strided_loop = &structured_to_nonstructured_get_loop; + method->nin = 1; + method->nout = 1; return (PyObject *)method; } @@ -3201,16 +3286,19 @@ PyArray_GetVoidToGenericCastingImpl(void) * implementations on the dtype, to avoid duplicate work. */ static NPY_CASTING -can_cast_fields_safety(PyArray_Descr *from, PyArray_Descr *to) +can_cast_fields_safety( + PyArray_Descr *from, PyArray_Descr *to, npy_intp *view_offset) { - NPY_CASTING casting = NPY_NO_CASTING | _NPY_CAST_IS_VIEW; - Py_ssize_t field_count = PyTuple_Size(from->names); if (field_count != PyTuple_Size(to->names)) { /* TODO: This should be rejected! */ return NPY_UNSAFE_CASTING; } + + NPY_CASTING casting = NPY_NO_CASTING; + *view_offset = 0; /* if there are no fields, a view is OK. */ for (Py_ssize_t i = 0; i < field_count; i++) { + npy_intp field_view_off = NPY_MIN_INTP; PyObject *from_key = PyTuple_GET_ITEM(from->names, i); PyObject *from_tup = PyDict_GetItemWithError(from->fields, from_key); if (from_tup == NULL) { @@ -3229,15 +3317,40 @@ can_cast_fields_safety(PyArray_Descr *from, PyArray_Descr *to) } PyArray_Descr *to_base = (PyArray_Descr*)PyTuple_GET_ITEM(to_tup, 0); - NPY_CASTING field_casting = PyArray_GetCastSafety(from_base, to_base, NULL); + NPY_CASTING field_casting = PyArray_GetCastInfo( + from_base, to_base, NULL, &field_view_off); if (field_casting < 0) { return -1; } casting = PyArray_MinCastSafety(casting, field_casting); + + /* Adjust the "view offset" by the field offsets: */ + if (field_view_off != NPY_MIN_INTP) { + npy_intp to_off = PyLong_AsSsize_t(PyTuple_GET_ITEM(to_tup, 1)); + if (error_converting(to_off)) { + return -1; + } + npy_intp from_off = PyLong_AsSsize_t(PyTuple_GET_ITEM(from_tup, 1)); + if (error_converting(from_off)) { + return -1; + } + field_view_off = field_view_off - to_off + from_off; + } + + /* + * If there is one field, use its field offset. After that propagate + * the view offset if they match and set to "invalid" if not. + */ + if (i == 0) { + *view_offset = field_view_off; + } + else if (*view_offset != field_view_off) { + *view_offset = NPY_MIN_INTP; + } } - if (!(casting & _NPY_CAST_IS_VIEW)) { - assert((casting & ~_NPY_CAST_IS_VIEW) != NPY_NO_CASTING); - return casting; + if (*view_offset != 0) { + /* If the calculated `view_offset` is not 0, it can only be "equiv" */ + return PyArray_MinCastSafety(casting, NPY_EQUIV_CASTING); } /* @@ -3277,38 +3390,42 @@ void_to_void_resolve_descriptors( PyArrayMethodObject *self, PyArray_DTypeMeta *dtypes[2], PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) { NPY_CASTING casting; if (given_descrs[1] == NULL) { /* This is weird, since it doesn't return the original descr, but... */ - return cast_to_void_dtype_class(given_descrs, loop_descrs); + return cast_to_void_dtype_class(given_descrs, loop_descrs, view_offset); } if (given_descrs[0]->names != NULL && given_descrs[1]->names != NULL) { /* From structured to structured, need to check fields */ - casting = can_cast_fields_safety(given_descrs[0], given_descrs[1]); + casting = can_cast_fields_safety( + given_descrs[0], given_descrs[1], view_offset); } else if (given_descrs[0]->names != NULL) { return structured_to_nonstructured_resolve_descriptors( - self, dtypes, given_descrs, loop_descrs); + self, dtypes, given_descrs, loop_descrs, view_offset); } else if (given_descrs[1]->names != NULL) { return nonstructured_to_structured_resolve_descriptors( - self, dtypes, given_descrs, loop_descrs); + self, dtypes, given_descrs, loop_descrs, view_offset); } else if (given_descrs[0]->subarray == NULL && given_descrs[1]->subarray == NULL) { /* Both are plain void dtypes */ if (given_descrs[0]->elsize == given_descrs[1]->elsize) { - casting = NPY_NO_CASTING | _NPY_CAST_IS_VIEW; + casting = NPY_NO_CASTING; + *view_offset = 0; } else if (given_descrs[0]->elsize < given_descrs[1]->elsize) { casting = NPY_SAFE_CASTING; } else { casting = NPY_SAME_KIND_CASTING; + *view_offset = 0; } } else { @@ -3322,20 +3439,51 @@ void_to_void_resolve_descriptors( /* If the shapes do not match, this is at most an unsafe cast */ casting = NPY_UNSAFE_CASTING; + /* + * We can use a view in two cases: + * 1. The shapes and elsizes matches, so any view offset applies to + * each element of the subarray identically. + * (in practice this probably implies the `view_offset` will be 0) + * 2. There is exactly one element and the subarray has no effect + * (can be tested by checking if the itemsizes of the base matches) + */ + npy_bool subarray_layout_supports_view = NPY_FALSE; if (from_sub && to_sub) { int res = PyObject_RichCompareBool(from_sub->shape, to_sub->shape, Py_EQ); if (res < 0) { return -1; } else if (res) { - /* Both are subarrays and the shape matches */ - casting = NPY_NO_CASTING | _NPY_CAST_IS_VIEW; + /* Both are subarrays and the shape matches, could be no cast */ + casting = NPY_NO_CASTING; + /* May be a view if there is one element or elsizes match */ + if (from_sub->base->elsize == to_sub->base->elsize + || given_descrs[0]->elsize == from_sub->base->elsize) { + subarray_layout_supports_view = NPY_TRUE; + } + } + } + else if (from_sub) { + /* May use a view if "from" has only a single element: */ + if (given_descrs[0]->elsize == from_sub->base->elsize) { + subarray_layout_supports_view = NPY_TRUE; + } + } + else { + /* May use a view if "from" has only a single element: */ + if (given_descrs[1]->elsize == to_sub->base->elsize) { + subarray_layout_supports_view = NPY_TRUE; } } PyArray_Descr *from_base = (from_sub == NULL) ? given_descrs[0] : from_sub->base; PyArray_Descr *to_base = (to_sub == NULL) ? given_descrs[1] : to_sub->base; - NPY_CASTING field_casting = PyArray_GetCastSafety(from_base, to_base, NULL); + /* An offset for */ + NPY_CASTING field_casting = PyArray_GetCastInfo( + from_base, to_base, NULL, view_offset); + if (!subarray_layout_supports_view) { + *view_offset = NPY_MIN_INTP; + } if (field_casting < 0) { return -1; } @@ -3443,7 +3591,8 @@ object_to_any_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *dtypes[2], PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *NPY_UNUSED(view_offset)) { if (given_descrs[1] == NULL) { /* @@ -3513,7 +3662,8 @@ any_to_object_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *dtypes[2], PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *NPY_UNUSED(view_offset)) { if (given_descrs[1] == NULL) { loop_descrs[1] = NPY_DT_CALL_default_descr(dtypes[1]); @@ -3591,10 +3741,6 @@ object_to_object_get_loop( static int PyArray_InitializeObjectToObjectCast(void) { - /* - * The object dtype does not support byte order changes, so its cast - * is always a direct view. - */ PyArray_DTypeMeta *Object = PyArray_DTypeFromTypeNum(NPY_OBJECT); PyArray_DTypeMeta *dtypes[2] = {Object, Object}; PyType_Slot slots[] = { @@ -3602,7 +3748,7 @@ PyArray_InitializeObjectToObjectCast(void) {0, NULL}}; PyArrayMethod_Spec spec = { .name = "object_to_object_cast", - .casting = NPY_NO_CASTING | _NPY_CAST_IS_VIEW, + .casting = NPY_NO_CASTING, .nin = 1, .nout = 1, .flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_SUPPORTS_UNALIGNED, diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h index 5e0682f22..6b4413959 100644 --- a/numpy/core/src/multiarray/convert_datatype.h +++ b/numpy/core/src/multiarray/convert_datatype.h @@ -68,8 +68,9 @@ NPY_NO_EXPORT NPY_CASTING PyArray_MinCastSafety(NPY_CASTING casting1, NPY_CASTING casting2); NPY_NO_EXPORT NPY_CASTING -PyArray_GetCastSafety( - PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype); +PyArray_GetCastInfo( + PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype, + npy_intp *view_offset); NPY_NO_EXPORT int PyArray_CheckCastSafety(NPY_CASTING casting, @@ -80,7 +81,8 @@ legacy_same_dtype_resolve_descriptors( PyArrayMethodObject *self, PyArray_DTypeMeta *dtypes[2], PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]); + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset); NPY_NO_EXPORT int legacy_cast_get_strided_loop( @@ -94,7 +96,8 @@ simple_cast_resolve_descriptors( PyArrayMethodObject *self, PyArray_DTypeMeta *dtypes[2], PyArray_Descr *input_descrs[2], - PyArray_Descr *loop_descrs[2]); + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset); NPY_NO_EXPORT int PyArray_InitializeCasts(void); diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index b62426854..25eb91977 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -740,7 +740,6 @@ PyArray_NewFromDescr_int( } else { fa->flags = (flags & ~NPY_ARRAY_WRITEBACKIFCOPY); - fa->flags &= ~NPY_ARRAY_UPDATEIFCOPY; } fa->descr = descr; fa->base = (PyObject *)NULL; @@ -754,14 +753,20 @@ PyArray_NewFromDescr_int( } fa->strides = fa->dimensions + nd; - /* Copy dimensions, check them, and find total array size `nbytes` */ + /* + * Copy dimensions, check them, and find total array size `nbytes` + * + * Note that we ignore 0-length dimensions, to match this in the `free` + * calls, `PyArray_NBYTES_ALLOCATED` is a private helper matching this + * behaviour, but without overflow checking. + */ for (int i = 0; i < nd; i++) { fa->dimensions[i] = dims[i]; if (fa->dimensions[i] == 0) { /* * Compare to PyArray_OverflowMultiplyList that - * returns 0 in this case. + * returns 0 in this case. See also `PyArray_NBYTES_ALLOCATED`. */ continue; } @@ -870,16 +875,39 @@ PyArray_NewFromDescr_int( /* * call the __array_finalize__ method if a subtype was requested. * If obj is NULL use Py_None for the Python callback. + * For speed, we skip if __array_finalize__ is inherited from ndarray + * (since that function does nothing), or, for backward compatibility, + * if it is None. */ if (subtype != &PyArray_Type) { PyObject *res, *func; - - func = PyObject_GetAttr((PyObject *)fa, npy_ma_str_array_finalize); + static PyObject *ndarray_array_finalize = NULL; + /* First time, cache ndarray's __array_finalize__ */ + if (ndarray_array_finalize == NULL) { + ndarray_array_finalize = PyObject_GetAttr( + (PyObject *)&PyArray_Type, npy_ma_str_array_finalize); + } + func = PyObject_GetAttr((PyObject *)subtype, npy_ma_str_array_finalize); if (func == NULL) { goto fail; } + else if (func == ndarray_array_finalize) { + Py_DECREF(func); + } else if (func == Py_None) { Py_DECREF(func); + /* + * 2022-01-08, NumPy 1.23; when deprecation period is over, remove this + * whole stanza so one gets a "NoneType object is not callable" TypeError. + */ + if (DEPRECATE( + "Setting __array_finalize__ = None to indicate no finalization" + "should be done is deprecated. Instead, just inherit from " + "ndarray or, if that is not possible, explicitly set to " + "ndarray.__array_function__; this will raise a TypeError " + "in the future. (Deprecated since NumPy 1.23)") < 0) { + goto fail; + } } else { if (PyCapsule_CheckExact(func)) { @@ -898,7 +926,7 @@ PyArray_NewFromDescr_int( if (obj == NULL) { obj = Py_None; } - res = PyObject_CallFunctionObjArgs(func, obj, NULL); + res = PyObject_CallFunctionObjArgs(func, (PyObject *)fa, obj, NULL); Py_DECREF(func); if (res == NULL) { goto fail; @@ -1152,6 +1180,16 @@ _array_from_buffer_3118(PyObject *memoryview) npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS]; view = PyMemoryView_GET_BUFFER(memoryview); + + if (view->suboffsets != NULL) { + PyErr_SetString(PyExc_BufferError, + "NumPy currently does not support importing buffers which " + "include suboffsets as they are not compatible with the NumPy" + "memory layout without a copy. Consider copying the original " + "before trying to convert it to a NumPy array."); + return NULL; + } + nd = view->ndim; descr = _dtype_from_buffer_3118(memoryview); @@ -1301,9 +1339,10 @@ _array_from_array_like(PyObject *op, * We skip bytes and unicode since they are considered scalars. Unicode * would fail but bytes would be incorrectly converted to a uint8 array. */ - if (!PyBytes_Check(op) && !PyUnicode_Check(op)) { + if (PyObject_CheckBuffer(op) && !PyBytes_Check(op) && !PyUnicode_Check(op)) { PyObject *memoryview = PyMemoryView_FromObject(op); if (memoryview == NULL) { + /* TODO: Should probably not blanket ignore errors. */ PyErr_Clear(); } else { @@ -1710,10 +1749,12 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, if (flags & NPY_ARRAY_ENSURENOCOPY ) { PyErr_SetString(PyExc_ValueError, "Unable to avoid copy while creating an array."); + Py_DECREF(dtype); + npy_free_coercion_cache(cache); return NULL; } - if (cache == 0 && newtype != NULL && + if (cache == NULL && newtype != NULL && PyDataType_ISSIGNED(newtype) && PyArray_IsScalar(op, Generic)) { assert(ndim == 0); /* @@ -1740,8 +1781,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, } /* There was no array (or array-like) passed in directly. */ - if ((flags & NPY_ARRAY_WRITEBACKIFCOPY) || - (flags & NPY_ARRAY_UPDATEIFCOPY)) { + if (flags & NPY_ARRAY_WRITEBACKIFCOPY) { PyErr_SetString(PyExc_TypeError, "WRITEBACKIFCOPY used for non-array input."); Py_DECREF(dtype); @@ -1810,7 +1850,6 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, * NPY_ARRAY_WRITEABLE, * NPY_ARRAY_NOTSWAPPED, * NPY_ARRAY_ENSURECOPY, - * NPY_ARRAY_UPDATEIFCOPY, * NPY_ARRAY_WRITEBACKIFCOPY, * NPY_ARRAY_FORCECAST, * NPY_ARRAY_ENSUREARRAY, @@ -1837,9 +1876,6 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, * Fortran arrays are always behaved (aligned, * notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d). * - * NPY_ARRAY_UPDATEIFCOPY is deprecated in favor of - * NPY_ARRAY_WRITEBACKIFCOPY in 1.14 - * NPY_ARRAY_WRITEBACKIFCOPY flag sets this flag in the returned * array if a copy is made and the base argument points to the (possibly) * misbehaved array. Before returning to python, PyArray_ResolveWritebackIfCopy @@ -1962,6 +1998,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) if (flags & NPY_ARRAY_ENSURENOCOPY ) { PyErr_SetString(PyExc_ValueError, "Unable to avoid copy while creating an array from given array."); + Py_DECREF(newtype); return NULL; } @@ -1990,31 +2027,8 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) return NULL; } - if (flags & NPY_ARRAY_UPDATEIFCOPY) { - /* This is the ONLY place the NPY_ARRAY_UPDATEIFCOPY flag - * is still used. - * Can be deleted once the flag itself is removed - */ - /* 2017-Nov-10 1.14 */ - if (DEPRECATE( - "NPY_ARRAY_UPDATEIFCOPY, NPY_ARRAY_INOUT_ARRAY, and " - "NPY_ARRAY_INOUT_FARRAY are deprecated, use NPY_WRITEBACKIFCOPY, " - "NPY_ARRAY_INOUT_ARRAY2, or NPY_ARRAY_INOUT_FARRAY2 respectively " - "instead, and call PyArray_ResolveWritebackIfCopy before the " - "array is deallocated, i.e. before the last call to Py_DECREF.") < 0) { - Py_DECREF(ret); - return NULL; - } - Py_INCREF(arr); - if (PyArray_SetWritebackIfCopyBase(ret, arr) < 0) { - Py_DECREF(ret); - return NULL; - } - PyArray_ENABLEFLAGS(ret, NPY_ARRAY_UPDATEIFCOPY); - PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEBACKIFCOPY); - } - else if (flags & NPY_ARRAY_WRITEBACKIFCOPY) { + if (flags & NPY_ARRAY_WRITEBACKIFCOPY) { Py_INCREF(arr); if (PyArray_SetWritebackIfCopyBase(ret, arr) < 0) { Py_DECREF(ret); diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index e0064c017..03ebaa7ce 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -3746,8 +3746,6 @@ find_object_datetime_type(PyObject *obj, int type_num) } - - /* * Describes casting within datetimes or timedelta */ @@ -3756,7 +3754,8 @@ time_to_time_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) { /* This is a within-dtype cast, which currently must handle byteswapping */ Py_INCREF(given_descrs[0]); @@ -3772,14 +3771,14 @@ time_to_time_resolve_descriptors( int is_timedelta = given_descrs[0]->type_num == NPY_TIMEDELTA; if (given_descrs[0] == given_descrs[1]) { - return NPY_NO_CASTING | _NPY_CAST_IS_VIEW; + *view_offset = 0; + return NPY_NO_CASTING; } - NPY_CASTING byteorder_may_allow_view = 0; - if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) == - PyDataType_ISNOTSWAPPED(loop_descrs[1])) { - byteorder_may_allow_view = _NPY_CAST_IS_VIEW; - } + npy_bool byteorder_may_allow_view = ( + PyDataType_ISNOTSWAPPED(loop_descrs[0]) + == PyDataType_ISNOTSWAPPED(loop_descrs[1])); + PyArray_DatetimeMetaData *meta1, *meta2; meta1 = get_datetime_metadata_from_dtype(loop_descrs[0]); assert(meta1 != NULL); @@ -3798,12 +3797,16 @@ time_to_time_resolve_descriptors( ((meta2->base >= 7) && (meta1->base - meta2->base == 3) && ((meta1->num / meta2->num) == 1000000000))) { if (byteorder_may_allow_view) { - return NPY_NO_CASTING | byteorder_may_allow_view; + *view_offset = 0; + return NPY_NO_CASTING; } return NPY_EQUIV_CASTING; } else if (meta1->base == NPY_FR_GENERIC) { - return NPY_SAFE_CASTING | byteorder_may_allow_view; + if (byteorder_may_allow_view) { + *view_offset = 0; + } + return NPY_SAFE_CASTING ; } else if (meta2->base == NPY_FR_GENERIC) { /* TODO: This is actually an invalid cast (casting will error) */ @@ -3931,10 +3934,11 @@ datetime_to_timedelta_resolve_descriptors( /* In the current setup both strings and unicode casts support all outputs */ static NPY_CASTING time_to_string_resolve_descriptors( - PyArrayMethodObject *self, + PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *dtypes[2], PyArray_Descr **given_descrs, - PyArray_Descr **loop_descrs) + PyArray_Descr **loop_descrs, + npy_intp *NPY_UNUSED(view_offset)) { if (given_descrs[1] != NULL && dtypes[0]->type_num == NPY_DATETIME) { /* @@ -4013,7 +4017,8 @@ string_to_datetime_cast_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *dtypes[2], PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *NPY_UNUSED(view_offset)) { if (given_descrs[1] == NULL) { /* NOTE: This doesn't actually work, and will error during the cast */ diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c index ce0293615..5d245b106 100644 --- a/numpy/core/src/multiarray/dragon4.c +++ b/numpy/core/src/multiarray/dragon4.c @@ -1809,9 +1809,16 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, pos--; numFractionDigits--; } - if (trim_mode == TrimMode_LeaveOneZero && buffer[pos-1] == '.') { - buffer[pos++] = '0'; - numFractionDigits++; + if (buffer[pos-1] == '.') { + /* in TrimMode_LeaveOneZero, add trailing 0 back */ + if (trim_mode == TrimMode_LeaveOneZero){ + buffer[pos++] = '0'; + numFractionDigits++; + } + /* in TrimMode_DptZeros, remove trailing decimal point */ + else if (trim_mode == TrimMode_DptZeros) { + pos--; + } } } diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index 8fb44c4f6..4877f8dfa 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -2991,7 +2991,8 @@ _strided_to_strided_multistep_cast( * transferfunction and transferdata. */ static NPY_INLINE int -init_cast_info(NPY_cast_info *cast_info, NPY_CASTING *casting, +init_cast_info( + NPY_cast_info *cast_info, NPY_CASTING *casting, npy_intp *view_offset, PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, int main_step) { PyObject *meth = PyArray_GetCastingImpl( @@ -3016,7 +3017,8 @@ init_cast_info(NPY_cast_info *cast_info, NPY_CASTING *casting, PyArray_Descr *in_descr[2] = {src_dtype, dst_dtype}; *casting = cast_info->context.method->resolve_descriptors( - cast_info->context.method, dtypes, in_descr, cast_info->descriptors); + cast_info->context.method, dtypes, + in_descr, cast_info->descriptors, view_offset); if (NPY_UNLIKELY(*casting < 0)) { if (!PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, @@ -3071,6 +3073,9 @@ _clear_cast_info_after_get_loop_failure(NPY_cast_info *cast_info) * transfer function from the each casting implementation (ArrayMethod). * May set the transfer function to NULL when the cast can be achieved using * a view. + * TODO: Expand the view functionality for general offsets, not just 0: + * Partial casts could be skipped also for `view_offset != 0`. + * * The `out_needs_api` flag must be initialized. * * NOTE: In theory casting errors here could be slightly misleading in case @@ -3101,9 +3106,12 @@ define_cast_for_descrs( castdata.main.func = NULL; castdata.to.func = NULL; castdata.from.func = NULL; + /* `view_offset` passed to `init_cast_info` but unused for the main cast */ + npy_intp view_offset = NPY_MIN_INTP; NPY_CASTING casting = -1; - if (init_cast_info(cast_info, &casting, src_dtype, dst_dtype, 1) < 0) { + if (init_cast_info( + cast_info, &casting, &view_offset, src_dtype, dst_dtype, 1) < 0) { return -1; } @@ -3123,17 +3131,18 @@ define_cast_for_descrs( */ if (NPY_UNLIKELY(src_dtype != cast_info->descriptors[0] || must_wrap)) { NPY_CASTING from_casting = -1; + npy_intp from_view_offset = NPY_MIN_INTP; /* Cast function may not support the input, wrap if necessary */ if (init_cast_info( - &castdata.from, &from_casting, + &castdata.from, &from_casting, &from_view_offset, src_dtype, cast_info->descriptors[0], 0) < 0) { goto fail; } casting = PyArray_MinCastSafety(casting, from_casting); /* Prepare the actual cast (if necessary): */ - if (from_casting & _NPY_CAST_IS_VIEW && !must_wrap) { - /* This step is not necessary and can be skipped. */ + if (from_view_offset == 0 && !must_wrap) { + /* This step is not necessary and can be skipped */ castdata.from.func = &_dec_src_ref_nop; /* avoid NULL */ NPY_cast_info_xfree(&castdata.from); } @@ -3161,16 +3170,17 @@ define_cast_for_descrs( */ if (NPY_UNLIKELY(dst_dtype != cast_info->descriptors[1] || must_wrap)) { NPY_CASTING to_casting = -1; + npy_intp to_view_offset = NPY_MIN_INTP; /* Cast function may not support the output, wrap if necessary */ if (init_cast_info( - &castdata.to, &to_casting, + &castdata.to, &to_casting, &to_view_offset, cast_info->descriptors[1], dst_dtype, 0) < 0) { goto fail; } casting = PyArray_MinCastSafety(casting, to_casting); /* Prepare the actual cast (if necessary): */ - if (to_casting & _NPY_CAST_IS_VIEW && !must_wrap) { + if (to_view_offset == 0 && !must_wrap) { /* This step is not necessary and can be skipped. */ castdata.to.func = &_dec_src_ref_nop; /* avoid NULL */ NPY_cast_info_xfree(&castdata.to); diff --git a/numpy/core/src/multiarray/experimental_public_dtype_api.c b/numpy/core/src/multiarray/experimental_public_dtype_api.c index 4b9c7199b..e9d191002 100644 --- a/numpy/core/src/multiarray/experimental_public_dtype_api.c +++ b/numpy/core/src/multiarray/experimental_public_dtype_api.c @@ -16,7 +16,7 @@ #include "common_dtype.h" -#define EXPERIMENTAL_DTYPE_API_VERSION 2 +#define EXPERIMENTAL_DTYPE_API_VERSION 3 typedef struct{ diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c index 3b1b4f406..b5bd7c8c1 100644 --- a/numpy/core/src/multiarray/flagsobject.c +++ b/numpy/core/src/multiarray/flagsobject.c @@ -238,25 +238,6 @@ _define_get_warn(NPY_ARRAY_ALIGNED| NPY_ARRAY_C_CONTIGUOUS, carray) static PyObject * -arrayflags_updateifcopy_get(PyArrayFlagsObject *self, void *NPY_UNUSED(ignored)) -{ - PyObject *item; - /* 2017-Nov-10 1.14 */ - if(DEPRECATE("UPDATEIFCOPY deprecated, use WRITEBACKIFCOPY instead") < 0) { - return NULL; - } - if ((self->flags & (NPY_ARRAY_UPDATEIFCOPY)) == (NPY_ARRAY_UPDATEIFCOPY)) { - item = Py_True; - } - else { - item = Py_False; - } - Py_INCREF(item); - return item; -} - - -static PyObject * arrayflags_forc_get(PyArrayFlagsObject *self, void *NPY_UNUSED(ignored)) { PyObject *item; @@ -314,36 +295,6 @@ arrayflags_num_get(PyArrayFlagsObject *self, void *NPY_UNUSED(ignored)) /* relies on setflags order being write, align, uic */ static int -arrayflags_updateifcopy_set( - PyArrayFlagsObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) -{ - PyObject *res; - - if (obj == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete flags updateifcopy attribute"); - return -1; - } - if (self->arr == NULL) { - PyErr_SetString(PyExc_ValueError, - "Cannot set flags on array scalars."); - return -1; - } - /* 2017-Nov-10 1.14 */ - if(DEPRECATE("UPDATEIFCOPY deprecated, use WRITEBACKIFCOPY instead") < 0) { - return -1; - } - res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, Py_None, - (PyObject_IsTrue(obj) ? Py_True : Py_False)); - if (res == NULL) { - return -1; - } - Py_DECREF(res); - return 0; -} - -/* relies on setflags order being write, align, uic */ -static int arrayflags_writebackifcopy_set( PyArrayFlagsObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) { @@ -473,10 +424,6 @@ static PyGetSetDef arrayflags_getsets[] = { (getter)arrayflags_fortran_get, NULL, NULL, NULL}, - {"updateifcopy", - (getter)arrayflags_updateifcopy_get, - (setter)arrayflags_updateifcopy_set, - NULL, NULL}, {"writebackifcopy", (getter)arrayflags_writebackifcopy_get, (setter)arrayflags_writebackifcopy_set, @@ -574,8 +521,6 @@ arrayflags_getitem(PyArrayFlagsObject *self, PyObject *ind) return arrayflags_aligned_get(self, NULL); case 'X': return arrayflags_writebackifcopy_get(self, NULL); - case 'U': - return arrayflags_updateifcopy_get(self, NULL); default: goto fail; } @@ -631,9 +576,6 @@ arrayflags_getitem(PyArrayFlagsObject *self, PyObject *ind) } break; case 12: - if (strncmp(key, "UPDATEIFCOPY", n) == 0) { - return arrayflags_updateifcopy_get(self, NULL); - } if (strncmp(key, "C_CONTIGUOUS", n) == 0) { return arrayflags_contiguous_get(self, NULL); } @@ -684,10 +626,6 @@ arrayflags_setitem(PyArrayFlagsObject *self, PyObject *ind, PyObject *item) ((n==1) && (strncmp(key, "A", n) == 0))) { return arrayflags_aligned_set(self, item, NULL); } - else if (((n==12) && (strncmp(key, "UPDATEIFCOPY", n) == 0)) || - ((n==1) && (strncmp(key, "U", n) == 0))) { - return arrayflags_updateifcopy_set(self, item, NULL); - } else if (((n==15) && (strncmp(key, "WRITEBACKIFCOPY", n) == 0)) || ((n==1) && (strncmp(key, "X", n) == 0))) { return arrayflags_writebackifcopy_set(self, item, NULL); @@ -721,16 +659,14 @@ arrayflags_print(PyArrayFlagsObject *self) return PyUnicode_FromFormat( " %s : %s\n %s : %s\n" " %s : %s\n %s : %s%s\n" - " %s : %s\n %s : %s\n" - " %s : %s\n", + " %s : %s\n %s : %s\n", "C_CONTIGUOUS", _torf_(fl, NPY_ARRAY_C_CONTIGUOUS), "F_CONTIGUOUS", _torf_(fl, NPY_ARRAY_F_CONTIGUOUS), "OWNDATA", _torf_(fl, NPY_ARRAY_OWNDATA), "WRITEABLE", _torf_(fl, NPY_ARRAY_WRITEABLE), _warn_on_write, "ALIGNED", _torf_(fl, NPY_ARRAY_ALIGNED), - "WRITEBACKIFCOPY", _torf_(fl, NPY_ARRAY_WRITEBACKIFCOPY), - "UPDATEIFCOPY", _torf_(fl, NPY_ARRAY_UPDATEIFCOPY) + "WRITEBACKIFCOPY", _torf_(fl, NPY_ARRAY_WRITEBACKIFCOPY) ); } diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c index e81ca2947..a4f972ba4 100644 --- a/numpy/core/src/multiarray/getset.c +++ b/numpy/core/src/multiarray/getset.c @@ -384,15 +384,7 @@ array_data_set(PyArrayObject *self, PyObject *op, void *NPY_UNUSED(ignored)) } if (PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA) { PyArray_XDECREF(self); - size_t nbytes = PyArray_NBYTES(self); - /* - * Allocation will never be 0, see comment in ctors.c - * line 820 - */ - if (nbytes == 0) { - PyArray_Descr *dtype = PyArray_DESCR(self); - nbytes = dtype->elsize ? dtype->elsize : 1; - } + size_t nbytes = PyArray_NBYTES_ALLOCATED(self); PyObject *handler = PyArray_HANDLER(self); if (handler == NULL) { /* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */ @@ -401,14 +393,13 @@ array_data_set(PyArrayObject *self, PyObject *op, void *NPY_UNUSED(ignored)) return -1; } PyDataMem_UserFREE(PyArray_DATA(self), nbytes, handler); + Py_CLEAR(((PyArrayObject_fields *)self)->mem_handler); } if (PyArray_BASE(self)) { - if ((PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) || - (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY)) { + if (PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) { PyArray_ENABLEFLAGS((PyArrayObject *)PyArray_BASE(self), NPY_ARRAY_WRITEABLE); PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); - PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); } Py_DECREF(PyArray_BASE(self)); ((PyArrayObject_fields *)self)->base = NULL; @@ -505,9 +496,6 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) /* Changing the size of the dtype results in a shape change */ if (newtype->elsize != PyArray_DESCR(self)->elsize) { - int axis; - npy_intp newdim; - /* forbidden cases */ if (PyArray_NDIM(self) == 0) { PyErr_SetString(PyExc_ValueError, @@ -522,31 +510,20 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) goto fail; } - /* determine which axis to resize */ - if (PyArray_IS_C_CONTIGUOUS(self)) { - axis = PyArray_NDIM(self) - 1; - } - else if (PyArray_IS_F_CONTIGUOUS(self)) { - /* 2015-11-27 1.11.0, gh-6747 */ - if (DEPRECATE( - "Changing the shape of an F-contiguous array by " - "descriptor assignment is deprecated. To maintain the " - "Fortran contiguity of a multidimensional Fortran " - "array, use 'a.T.view(...).T' instead") < 0) { - goto fail; - } - axis = 0; - } - else { - /* Don't mention the deprecated F-contiguous support */ + /* resize on last axis only */ + int axis = PyArray_NDIM(self) - 1; + if (PyArray_DIMS(self)[axis] != 1 && + PyArray_STRIDES(self)[axis] != PyArray_DESCR(self)->elsize) { PyErr_SetString(PyExc_ValueError, - "To change to a dtype of a different size, the array must " - "be C-contiguous"); + "To change to a dtype of a different size, the last axis " + "must be contiguous"); goto fail; } + npy_intp newdim; + if (newtype->elsize < PyArray_DESCR(self)->elsize) { - /* if it is compatible, increase the size of the relevant axis */ + /* if it is compatible, increase the size of the last axis */ if (newtype->elsize == 0 || PyArray_DESCR(self)->elsize % newtype->elsize != 0) { PyErr_SetString(PyExc_ValueError, @@ -558,7 +535,7 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) PyArray_DIMS(self)[axis] *= newdim; PyArray_STRIDES(self)[axis] = newtype->elsize; } - else if (newtype->elsize > PyArray_DESCR(self)->elsize) { + else /* newtype->elsize > PyArray_DESCR(self)->elsize */ { /* if it is compatible, decrease the size of the relevant axis */ newdim = PyArray_DIMS(self)[axis] * PyArray_DESCR(self)->elsize; if ((newdim % newtype->elsize) != 0) { @@ -633,7 +610,7 @@ array_struct_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) inter->flags = inter->flags & ~NPY_ARRAY_WRITEABLE; } /* reset unused flags */ - inter->flags &= ~(NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_UPDATEIFCOPY |NPY_ARRAY_OWNDATA); + inter->flags &= ~(NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_OWNDATA); if (PyArray_ISNOTSWAPPED(self)) inter->flags |= NPY_ARRAY_NOTSWAPPED; /* * Copy shape and strides over since these can be reset @@ -949,15 +926,6 @@ array_transpose_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) return PyArray_Transpose(self, NULL); } -/* If this is None, no function call is made - --- default sub-class behavior -*/ -static PyObject * -array_finalize_get(PyArrayObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - Py_RETURN_NONE; -} - NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { {"ndim", (getter)array_ndim_get, @@ -1031,10 +999,6 @@ NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { (getter)array_priority_get, NULL, NULL, NULL}, - {"__array_finalize__", - (getter)array_finalize_get, - NULL, - NULL, NULL}, {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ }; diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 014a863d5..5d515d013 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -3254,7 +3254,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, * If copy_if_overlap != 0, check if `a` has memory overlap with any of the * arrays in `index` and with `extra_op`. If yes, make copies as appropriate * to avoid problems if `a` is modified during the iteration. - * `iter->array` may contain a copied array (UPDATEIFCOPY/WRITEBACKIFCOPY set). + * `iter->array` may contain a copied array (WRITEBACKIFCOPY set). */ NPY_NO_EXPORT PyObject * PyArray_MapIterArrayCopyIfOverlap(PyArrayObject * a, PyObject * index, diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index b0b6f42f1..33f78dff2 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -859,7 +859,7 @@ array_astype(PyArrayObject *self, * and it's not a subtype if subok is False, then we * can skip the copy. */ - if (forcecopy != NPY_COPY_ALWAYS && + if (forcecopy != NPY_COPY_ALWAYS && (order == NPY_KEEPORDER || (order == NPY_ANYORDER && (PyArray_IS_C_CONTIGUOUS(self) || @@ -881,7 +881,7 @@ array_astype(PyArrayObject *self, Py_DECREF(dtype); return NULL; } - + if (!PyArray_CanCastArrayTo(self, dtype, casting)) { PyErr_Clear(); npy_set_invalid_cast_error( @@ -926,6 +926,13 @@ array_astype(PyArrayObject *self, static PyObject * +array_finalizearray(PyArrayObject *self, PyObject *obj) +{ + Py_RETURN_NONE; +} + + +static PyObject * array_wraparray(PyArrayObject *self, PyObject *args) { PyArrayObject *arr; @@ -1934,7 +1941,7 @@ array_setstate(PyArrayObject *self, PyObject *args) PyObject *rawdata = NULL; char *datastr; Py_ssize_t len; - npy_intp size, dimensions[NPY_MAXDIMS]; + npy_intp dimensions[NPY_MAXDIMS]; int nd; npy_intp nbytes; int overflowed; @@ -1976,11 +1983,7 @@ array_setstate(PyArrayObject *self, PyObject *args) * since fa could be a 0-d or scalar, and then * PyDataMem_UserFREE will be confused */ - size_t n_tofree = PyArray_NBYTES(self); - if (n_tofree == 0) { - PyArray_Descr *dtype = PyArray_DESCR(self); - n_tofree = dtype->elsize ? dtype->elsize : 1; - } + size_t n_tofree = PyArray_NBYTES_ALLOCATED(self); Py_XDECREF(PyArray_DESCR(self)); fa->descr = typecode; Py_INCREF(typecode); @@ -1988,17 +1991,39 @@ array_setstate(PyArrayObject *self, PyObject *args) if (nd < 0) { return NULL; } - size = PyArray_MultiplyList(dimensions, nd); - if (size < 0) { - /* More items than are addressable */ - return PyErr_NoMemory(); + /* + * We should do two things here: + * 1. Validate the input, that it is neither invalid, nor "too big" + * ("too big" ignores dimensios of size 0). + * 2. Find `PyArray_NBYTES` of the result, as this is what we may need to + * copy from the pickled data (may not match allocation currently if 0). + * Compare with `PyArray_NewFromDescr`, raise MemoryError for simplicity. + */ + npy_bool empty = NPY_FALSE; + nbytes = 1; + for (int i = 0; i < nd; i++) { + if (dimensions[i] < 0) { + PyErr_SetString(PyExc_TypeError, + "impossible dimension while unpickling array"); + return NULL; + } + if (dimensions[i] == 0) { + empty = NPY_TRUE; + } + overflowed = npy_mul_with_overflow_intp( + &nbytes, nbytes, dimensions[i]); + if (overflowed) { + return PyErr_NoMemory(); + } } overflowed = npy_mul_with_overflow_intp( - &nbytes, size, PyArray_DESCR(self)->elsize); + &nbytes, nbytes, PyArray_DESCR(self)->elsize); if (overflowed) { - /* More bytes than are addressable */ return PyErr_NoMemory(); } + if (empty) { + nbytes = 0; + } if (PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { if (!PyList_Check(rawdata)) { @@ -2039,8 +2064,7 @@ array_setstate(PyArrayObject *self, PyObject *args) if (len != nbytes) { PyErr_SetString(PyExc_ValueError, - "buffer size does not" \ - " match array size"); + "buffer size does not match array size"); Py_DECREF(rawdata); return NULL; } @@ -2065,7 +2089,6 @@ array_setstate(PyArrayObject *self, PyObject *args) fa->base = NULL; PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); - PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); if (PyArray_DIMS(self) != NULL) { npy_free_cache_dim_array(self); @@ -2097,21 +2120,18 @@ array_setstate(PyArrayObject *self, PyObject *args) /* Bytes should always be considered immutable, but we just grab the * pointer if they are large, to save memory. */ if (!IsAligned(self) || swap || (len <= 1000)) { - npy_intp num = PyArray_NBYTES(self); - if (num == 0) { - Py_DECREF(rawdata); - Py_RETURN_NONE; - } + npy_intp num = PyArray_NBYTES_ALLOCATED(self); /* Store the handler in case the default is modified */ Py_XDECREF(fa->mem_handler); fa->mem_handler = PyDataMem_GetHandler(); if (fa->mem_handler == NULL) { + Py_CLEAR(fa->mem_handler); Py_DECREF(rawdata); return NULL; } fa->data = PyDataMem_UserNEW(num, PyArray_HANDLER(self)); if (PyArray_DATA(self) == NULL) { - Py_DECREF(fa->mem_handler); + Py_CLEAR(fa->mem_handler); Py_DECREF(rawdata); return PyErr_NoMemory(); } @@ -2158,11 +2178,8 @@ array_setstate(PyArrayObject *self, PyObject *args) } } else { - npy_intp num = PyArray_NBYTES(self); - int elsize = PyArray_DESCR(self)->elsize; - if (num == 0 || elsize == 0) { - Py_RETURN_NONE; - } + npy_intp num = PyArray_NBYTES_ALLOCATED(self); + /* Store the functions in case the default handler is modified */ Py_XDECREF(fa->mem_handler); fa->mem_handler = PyDataMem_GetHandler(); @@ -2171,7 +2188,7 @@ array_setstate(PyArrayObject *self, PyObject *args) } fa->data = PyDataMem_UserNEW(num, PyArray_HANDLER(self)); if (PyArray_DATA(self) == NULL) { - Py_DECREF(fa->mem_handler); + Py_CLEAR(fa->mem_handler); return PyErr_NoMemory(); } if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_NEEDS_INIT)) { @@ -2180,7 +2197,6 @@ array_setstate(PyArrayObject *self, PyObject *args) PyArray_ENABLEFLAGS(self, NPY_ARRAY_OWNDATA); fa->base = NULL; if (_setlist_pkl(self, rawdata) < 0) { - Py_DECREF(fa->mem_handler); return NULL; } } @@ -2621,7 +2637,6 @@ array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds) } else { PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); - PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); Py_XDECREF(fa->base); fa->base = NULL; } @@ -2769,6 +2784,9 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { {"__array_prepare__", (PyCFunction)array_preparearray, METH_VARARGS, NULL}, + {"__array_finalize__", + (PyCFunction)array_finalizearray, + METH_O, NULL}, {"__array_wrap__", (PyCFunction)array_wraparray, METH_VARARGS, NULL}, diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index cf0160a2b..789446d0c 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -1478,11 +1478,30 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) if (type1 == type2) { return 1; } + + if (Py_TYPE(Py_TYPE(type1)) == &PyType_Type) { + /* + * 2021-12-17: This case is nonsense and should be removed eventually! + * + * boost::python has/had a bug effectively using EquivTypes with + * `type(arbitrary_obj)`. That is clearly wrong as that cannot be a + * `PyArray_Descr *`. We assume that `type(type(type(arbitrary_obj))` + * is always in practice `type` (this is the type of the metaclass), + * but for our descriptors, `type(type(descr))` is DTypeMeta. + * + * In that case, we just return False. There is a possibility that + * this actually _worked_ effectively (returning 1 sometimes). + * We ignore that possibility for simplicity; it really is not our bug. + */ + return 0; + } + /* * Do not use PyArray_CanCastTypeTo because it supports legacy flexible * dtypes as input. */ - NPY_CASTING safety = PyArray_GetCastSafety(type1, type2, NULL); + npy_intp view_offset; + NPY_CASTING safety = PyArray_GetCastInfo(type1, type2, NULL, &view_offset); if (safety < 0) { PyErr_Clear(); return 0; @@ -4621,7 +4640,6 @@ set_flaginfo(PyObject *d) _addnew(FORTRAN, NPY_ARRAY_F_CONTIGUOUS, F); _addnew(CONTIGUOUS, NPY_ARRAY_C_CONTIGUOUS, C); _addnew(ALIGNED, NPY_ARRAY_ALIGNED, A); - _addnew(UPDATEIFCOPY, NPY_ARRAY_UPDATEIFCOPY, U); _addnew(WRITEBACKIFCOPY, NPY_ARRAY_WRITEBACKIFCOPY, X); _addnew(WRITEABLE, NPY_ARRAY_WRITEABLE, W); _addone(C_CONTIGUOUS, NPY_ARRAY_C_CONTIGUOUS); diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index db1e49db8..907761874 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -1203,8 +1203,7 @@ gentype_struct_get(PyObject *self, void *NPY_UNUSED(ignored)) inter->two = 2; inter->nd = 0; inter->flags = PyArray_FLAGS(arr); - inter->flags &= ~(NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_WRITEBACKIFCOPY | - NPY_ARRAY_OWNDATA); + inter->flags &= ~(NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_OWNDATA); inter->flags |= NPY_ARRAY_NOTSWAPPED; inter->typekind = PyArray_DESCR(arr)->kind; inter->itemsize = PyArray_DESCR(arr)->elsize; @@ -2585,6 +2584,7 @@ gentype_arrtype_getbuffer(PyObject *self, Py_buffer *view, int flags) "user-defined scalar %R registered for built-in dtype %S? " "This should be impossible.", self, descr); + Py_DECREF(descr); return -1; } view->ndim = 0; diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c index f615aa336..34248076c 100644 --- a/numpy/core/src/multiarray/temp_elide.c +++ b/numpy/core/src/multiarray/temp_elide.c @@ -286,7 +286,6 @@ can_elide_temp(PyObject *olhs, PyObject *orhs, int *cannot) !PyArray_ISNUMBER(alhs) || !PyArray_CHKFLAGS(alhs, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(alhs) || - PyArray_CHKFLAGS(alhs, NPY_ARRAY_UPDATEIFCOPY) || PyArray_CHKFLAGS(alhs, NPY_ARRAY_WRITEBACKIFCOPY) || PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES) { return 0; @@ -365,7 +364,6 @@ can_elide_temp_unary(PyArrayObject * m1) !PyArray_ISNUMBER(m1) || !PyArray_CHKFLAGS(m1, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(m1) || - PyArray_CHKFLAGS(m1, NPY_ARRAY_UPDATEIFCOPY) || PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES) { return 0; } diff --git a/numpy/core/src/npymath/npy_math_internal.h.src b/numpy/core/src/npymath/npy_math_internal.h.src index 5b418342f..15d35637f 100644 --- a/numpy/core/src/npymath/npy_math_internal.h.src +++ b/numpy/core/src/npymath/npy_math_internal.h.src @@ -54,6 +54,9 @@ * ==================================================== */ #include "npy_math_private.h" +#ifdef _MSC_VER +# include <intrin.h> // for __popcnt +#endif /* Magic binary numbers used by bit_count * For type T, the magic numbers are computed as follows: diff --git a/numpy/core/src/npysort/binsearch.c.src b/numpy/core/src/npysort/binsearch.c.src deleted file mode 100644 index 41165897b..000000000 --- a/numpy/core/src/npysort/binsearch.c.src +++ /dev/null @@ -1,250 +0,0 @@ -/* -*- c -*- */ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION - -#include "npy_sort.h" -#include "npysort_common.h" -#include "npy_binsearch.h" - -#define NOT_USED NPY_UNUSED(unused) - -/* - ***************************************************************************** - ** NUMERIC SEARCHES ** - ***************************************************************************** - */ - -/**begin repeat - * - * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, DATETIME, TIMEDELTA# - * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong, half, float, double, longdouble, - * cfloat, cdouble, clongdouble, datetime, timedelta# - * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, - * npy_uint, npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_ushort, npy_float, npy_double, npy_longdouble, npy_cfloat, - * npy_cdouble, npy_clongdouble, npy_datetime, npy_timedelta# - */ - -#define @TYPE@_LTE(a, b) (!@TYPE@_LT((b), (a))) - -/**begin repeat1 - * - * #side = left, right# - * #CMP = LT, LTE# - */ - -NPY_NO_EXPORT void -binsearch_@side@_@suff@(const char *arr, const char *key, char *ret, - npy_intp arr_len, npy_intp key_len, - npy_intp arr_str, npy_intp key_str, npy_intp ret_str, - PyArrayObject *NOT_USED) -{ - npy_intp min_idx = 0; - npy_intp max_idx = arr_len; - @type@ last_key_val; - - if (key_len == 0) { - return; - } - last_key_val = *(const @type@ *)key; - - for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { - const @type@ key_val = *(const @type@ *)key; - /* - * Updating only one of the indices based on the previous key - * gives the search a big boost when keys are sorted, but slightly - * slows down things for purely random ones. - */ - if (@TYPE@_LT(last_key_val, key_val)) { - max_idx = arr_len; - } - else { - min_idx = 0; - max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; - } - - last_key_val = key_val; - - while (min_idx < max_idx) { - const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); - const @type@ mid_val = *(const @type@ *)(arr + mid_idx*arr_str); - if (@TYPE@_@CMP@(mid_val, key_val)) { - min_idx = mid_idx + 1; - } - else { - max_idx = mid_idx; - } - } - *(npy_intp *)ret = min_idx; - } -} - -NPY_NO_EXPORT int -argbinsearch_@side@_@suff@(const char *arr, const char *key, - const char *sort, char *ret, - npy_intp arr_len, npy_intp key_len, - npy_intp arr_str, npy_intp key_str, - npy_intp sort_str, npy_intp ret_str, - PyArrayObject *NOT_USED) -{ - npy_intp min_idx = 0; - npy_intp max_idx = arr_len; - @type@ last_key_val; - - if (key_len == 0) { - return 0; - } - last_key_val = *(const @type@ *)key; - - for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { - const @type@ key_val = *(const @type@ *)key; - /* - * Updating only one of the indices based on the previous key - * gives the search a big boost when keys are sorted, but slightly - * slows down things for purely random ones. - */ - if (@TYPE@_LT(last_key_val, key_val)) { - max_idx = arr_len; - } - else { - min_idx = 0; - max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; - } - - last_key_val = key_val; - - while (min_idx < max_idx) { - const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); - const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx*sort_str); - @type@ mid_val; - - if (sort_idx < 0 || sort_idx >= arr_len) { - return -1; - } - - mid_val = *(const @type@ *)(arr + sort_idx*arr_str); - - if (@TYPE@_@CMP@(mid_val, key_val)) { - min_idx = mid_idx + 1; - } - else { - max_idx = mid_idx; - } - } - *(npy_intp *)ret = min_idx; - } - return 0; -} - -/**end repeat1**/ -/**end repeat**/ - -/* - ***************************************************************************** - ** GENERIC SEARCH ** - ***************************************************************************** - */ - - /**begin repeat - * - * #side = left, right# - * #CMP = <, <=# - */ - -NPY_NO_EXPORT void -npy_binsearch_@side@(const char *arr, const char *key, char *ret, - npy_intp arr_len, npy_intp key_len, - npy_intp arr_str, npy_intp key_str, npy_intp ret_str, - PyArrayObject *cmp) -{ - PyArray_CompareFunc *compare = PyArray_DESCR(cmp)->f->compare; - npy_intp min_idx = 0; - npy_intp max_idx = arr_len; - const char *last_key = key; - - for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { - /* - * Updating only one of the indices based on the previous key - * gives the search a big boost when keys are sorted, but slightly - * slows down things for purely random ones. - */ - if (compare(last_key, key, cmp) @CMP@ 0) { - max_idx = arr_len; - } - else { - min_idx = 0; - max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; - } - - last_key = key; - - while (min_idx < max_idx) { - const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); - const char *arr_ptr = arr + mid_idx*arr_str; - - if (compare(arr_ptr, key, cmp) @CMP@ 0) { - min_idx = mid_idx + 1; - } - else { - max_idx = mid_idx; - } - } - *(npy_intp *)ret = min_idx; - } -} - -NPY_NO_EXPORT int -npy_argbinsearch_@side@(const char *arr, const char *key, - const char *sort, char *ret, - npy_intp arr_len, npy_intp key_len, - npy_intp arr_str, npy_intp key_str, - npy_intp sort_str, npy_intp ret_str, - PyArrayObject *cmp) -{ - PyArray_CompareFunc *compare = PyArray_DESCR(cmp)->f->compare; - npy_intp min_idx = 0; - npy_intp max_idx = arr_len; - const char *last_key = key; - - for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { - /* - * Updating only one of the indices based on the previous key - * gives the search a big boost when keys are sorted, but slightly - * slows down things for purely random ones. - */ - if (compare(last_key, key, cmp) @CMP@ 0) { - max_idx = arr_len; - } - else { - min_idx = 0; - max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; - } - - last_key = key; - - while (min_idx < max_idx) { - const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); - const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx*sort_str); - const char *arr_ptr; - - if (sort_idx < 0 || sort_idx >= arr_len) { - return -1; - } - - arr_ptr = arr + sort_idx*arr_str; - - if (compare(arr_ptr, key, cmp) @CMP@ 0) { - min_idx = mid_idx + 1; - } - else { - max_idx = mid_idx; - } - } - *(npy_intp *)ret = min_idx; - } - return 0; -} - -/**end repeat**/ diff --git a/numpy/core/src/npysort/binsearch.cpp b/numpy/core/src/npysort/binsearch.cpp new file mode 100644 index 000000000..cd5f03470 --- /dev/null +++ b/numpy/core/src/npysort/binsearch.cpp @@ -0,0 +1,387 @@ +/* -*- c -*- */ + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION + +#include "npy_sort.h" +#include "numpy_tag.h" +#include <numpy/npy_common.h> +#include <numpy/ndarraytypes.h> + +#include "npy_binsearch.h" + +#include <array> +#include <functional> // for std::less and std::less_equal + +// Enumerators for the variant of binsearch +enum arg_t { noarg, arg}; +enum side_t { left, right}; + +// Mapping from enumerators to comparators +template<class Tag, side_t side> +struct side_to_cmp; +template<class Tag> +struct side_to_cmp<Tag, left> { static constexpr auto value = Tag::less; }; +template<class Tag> +struct side_to_cmp<Tag, right> { static constexpr auto value = Tag::less_equal; }; + +template<side_t side> +struct side_to_generic_cmp; +template<> +struct side_to_generic_cmp<left> { using type = std::less<int>; }; +template<> +struct side_to_generic_cmp<right> { using type = std::less_equal<int>; }; + +/* + ***************************************************************************** + ** NUMERIC SEARCHES ** + ***************************************************************************** + */ +template<class Tag, side_t side> +static void +binsearch(const char *arr, const char *key, char *ret, + npy_intp arr_len, npy_intp key_len, + npy_intp arr_str, npy_intp key_str, npy_intp ret_str, + PyArrayObject*) +{ + using T = typename Tag::type; + auto cmp = side_to_cmp<Tag, side>::value; + npy_intp min_idx = 0; + npy_intp max_idx = arr_len; + T last_key_val; + + if (key_len == 0) { + return; + } + last_key_val = *(const T *)key; + + for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { + const T key_val = *(const T *)key; + /* + * Updating only one of the indices based on the previous key + * gives the search a big boost when keys are sorted, but slightly + * slows down things for purely random ones. + */ + if (cmp(last_key_val, key_val)) { + max_idx = arr_len; + } + else { + min_idx = 0; + max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; + } + + last_key_val = key_val; + + while (min_idx < max_idx) { + const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); + const T mid_val = *(const T *)(arr + mid_idx*arr_str); + if (cmp(mid_val, key_val)) { + min_idx = mid_idx + 1; + } + else { + max_idx = mid_idx; + } + } + *(npy_intp *)ret = min_idx; + } +} + +template<class Tag, side_t side> +static int +argbinsearch(const char *arr, const char *key, + const char *sort, char *ret, + npy_intp arr_len, npy_intp key_len, + npy_intp arr_str, npy_intp key_str, + npy_intp sort_str, npy_intp ret_str, PyArrayObject*) +{ + using T = typename Tag::type; + auto cmp = side_to_cmp<Tag, side>::value; + npy_intp min_idx = 0; + npy_intp max_idx = arr_len; + T last_key_val; + + if (key_len == 0) { + return 0; + } + last_key_val = *(const T *)key; + + for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { + const T key_val = *(const T *)key; + /* + * Updating only one of the indices based on the previous key + * gives the search a big boost when keys are sorted, but slightly + * slows down things for purely random ones. + */ + if (cmp(last_key_val, key_val)) { + max_idx = arr_len; + } + else { + min_idx = 0; + max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; + } + + last_key_val = key_val; + + while (min_idx < max_idx) { + const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); + const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx*sort_str); + T mid_val; + + if (sort_idx < 0 || sort_idx >= arr_len) { + return -1; + } + + mid_val = *(const T *)(arr + sort_idx*arr_str); + + if (cmp(mid_val, key_val)) { + min_idx = mid_idx + 1; + } + else { + max_idx = mid_idx; + } + } + *(npy_intp *)ret = min_idx; + } + return 0; +} + +/* + ***************************************************************************** + ** GENERIC SEARCH ** + ***************************************************************************** + */ + +template<side_t side> +static void +npy_binsearch(const char *arr, const char *key, char *ret, + npy_intp arr_len, npy_intp key_len, + npy_intp arr_str, npy_intp key_str, npy_intp ret_str, + PyArrayObject *cmp) +{ + using Cmp = typename side_to_generic_cmp<side>::type; + PyArray_CompareFunc *compare = PyArray_DESCR(cmp)->f->compare; + npy_intp min_idx = 0; + npy_intp max_idx = arr_len; + const char *last_key = key; + + for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { + /* + * Updating only one of the indices based on the previous key + * gives the search a big boost when keys are sorted, but slightly + * slows down things for purely random ones. + */ + if (Cmp{}(compare(last_key, key, cmp), 0)) { + max_idx = arr_len; + } + else { + min_idx = 0; + max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; + } + + last_key = key; + + while (min_idx < max_idx) { + const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); + const char *arr_ptr = arr + mid_idx*arr_str; + + if (Cmp{}(compare(arr_ptr, key, cmp), 0)) { + min_idx = mid_idx + 1; + } + else { + max_idx = mid_idx; + } + } + *(npy_intp *)ret = min_idx; + } +} + +template<side_t side> +static int +npy_argbinsearch(const char *arr, const char *key, + const char *sort, char *ret, + npy_intp arr_len, npy_intp key_len, + npy_intp arr_str, npy_intp key_str, + npy_intp sort_str, npy_intp ret_str, + PyArrayObject *cmp) +{ + using Cmp = typename side_to_generic_cmp<side>::type; + PyArray_CompareFunc *compare = PyArray_DESCR(cmp)->f->compare; + npy_intp min_idx = 0; + npy_intp max_idx = arr_len; + const char *last_key = key; + + for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { + /* + * Updating only one of the indices based on the previous key + * gives the search a big boost when keys are sorted, but slightly + * slows down things for purely random ones. + */ + if (Cmp{}(compare(last_key, key, cmp), 0)) { + max_idx = arr_len; + } + else { + min_idx = 0; + max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; + } + + last_key = key; + + while (min_idx < max_idx) { + const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); + const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx*sort_str); + const char *arr_ptr; + + if (sort_idx < 0 || sort_idx >= arr_len) { + return -1; + } + + arr_ptr = arr + sort_idx*arr_str; + + if (Cmp{}(compare(arr_ptr, key, cmp), 0)) { + min_idx = mid_idx + 1; + } + else { + max_idx = mid_idx; + } + } + *(npy_intp *)ret = min_idx; + } + return 0; +} + +/* + ***************************************************************************** + ** GENERATOR ** + ***************************************************************************** + */ + +template<arg_t arg> +struct binsearch_base; + +template<> +struct binsearch_base<arg> { + using function_type = PyArray_ArgBinSearchFunc*; + struct value_type { + int typenum; + function_type binsearch[NPY_NSEARCHSIDES]; + }; + template<class... Tags> + static constexpr std::array<value_type, sizeof...(Tags)> make_binsearch_map(npy::taglist<Tags...>) { + return std::array<value_type, sizeof...(Tags)>{ + value_type{ + Tags::type_value, + { + (function_type)&argbinsearch<Tags, left>, + (function_type)argbinsearch<Tags, right> + } + }... + }; + } + static constexpr std::array<function_type, 2> npy_map = { + (function_type)&npy_argbinsearch<left>, + (function_type)&npy_argbinsearch<right> + }; +}; +constexpr std::array<binsearch_base<arg>::function_type, 2> binsearch_base<arg>::npy_map; + +template<> +struct binsearch_base<noarg> { + using function_type = PyArray_BinSearchFunc*; + struct value_type { + int typenum; + function_type binsearch[NPY_NSEARCHSIDES]; + }; + template<class... Tags> + static constexpr std::array<value_type, sizeof...(Tags)> make_binsearch_map(npy::taglist<Tags...>) { + return std::array<value_type, sizeof...(Tags)>{ + value_type{ + Tags::type_value, + { + (function_type)&binsearch<Tags, left>, + (function_type)binsearch<Tags, right> + } + }... + }; + } + static constexpr std::array<function_type, 2> npy_map = { + (function_type)&npy_binsearch<left>, + (function_type)&npy_binsearch<right> + }; +}; +constexpr std::array<binsearch_base<noarg>::function_type, 2> binsearch_base<noarg>::npy_map; + +// Handle generation of all binsearch variants +template<arg_t arg> +struct binsearch_t : binsearch_base<arg> { + using binsearch_base<arg>::make_binsearch_map; + using value_type = typename binsearch_base<arg>::value_type; + + using taglist = npy::taglist< + /* If adding new types, make sure to keep them ordered by type num */ + npy::bool_tag, npy::byte_tag, npy::ubyte_tag, npy::short_tag, + npy::ushort_tag, npy::int_tag, npy::uint_tag, npy::long_tag, + npy::ulong_tag, npy::longlong_tag, npy::ulonglong_tag, npy::half_tag, + npy::float_tag, npy::double_tag, npy::longdouble_tag, npy::cfloat_tag, + npy::cdouble_tag, npy::clongdouble_tag, npy::datetime_tag, + npy::timedelta_tag>; + + static constexpr std::array<value_type, taglist::size> map = make_binsearch_map(taglist()); +}; +template<arg_t arg> +constexpr std::array<typename binsearch_t<arg>::value_type, binsearch_t<arg>::taglist::size> binsearch_t<arg>::map; + + +template<arg_t arg> +static NPY_INLINE typename binsearch_t<arg>::function_type +_get_binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side) +{ + using binsearch = binsearch_t<arg>; + npy_intp nfuncs = binsearch::map.size();; + npy_intp min_idx = 0; + npy_intp max_idx = nfuncs; + int type = dtype->type_num; + + if ((int)side >= (int)NPY_NSEARCHSIDES) { + return NULL; + } + + /* + * It seems only fair that a binary search function be searched for + * using a binary search... + */ + while (min_idx < max_idx) { + npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); + + if (binsearch::map[mid_idx].typenum < type) { + min_idx = mid_idx + 1; + } + else { + max_idx = mid_idx; + } + } + + if (min_idx < nfuncs && + binsearch::map[min_idx].typenum == type) { + return binsearch::map[min_idx].binsearch[side]; + } + + if (dtype->f->compare) { + return binsearch::npy_map[side]; + } + + return NULL; +} + + +/* + ***************************************************************************** + ** C INTERFACE ** + ***************************************************************************** + */ +extern "C" { + NPY_NO_EXPORT PyArray_BinSearchFunc* get_binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side) { + return _get_binsearch_func<noarg>(dtype, side); + } + NPY_NO_EXPORT PyArray_ArgBinSearchFunc* get_argbinsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side) { + return _get_binsearch_func<arg>(dtype, side); + } +} diff --git a/numpy/core/src/npysort/npysort_common.h b/numpy/core/src/npysort/npysort_common.h index 2a6e4d421..a537fcd08 100644 --- a/numpy/core/src/npysort/npysort_common.h +++ b/numpy/core/src/npysort/npysort_common.h @@ -4,6 +4,10 @@ #include <stdlib.h> #include <numpy/ndarraytypes.h> +#ifdef __cplusplus +extern "C" { +#endif + /* ***************************************************************************** ** SWAP MACROS ** @@ -139,14 +143,14 @@ LONGDOUBLE_LT(npy_longdouble a, npy_longdouble b) NPY_INLINE static int -npy_half_isnan(npy_half h) +_npy_half_isnan(npy_half h) { return ((h&0x7c00u) == 0x7c00u) && ((h&0x03ffu) != 0x0000u); } NPY_INLINE static int -npy_half_lt_nonan(npy_half h1, npy_half h2) +_npy_half_lt_nonan(npy_half h1, npy_half h2) { if (h1&0x8000u) { if (h2&0x8000u) { @@ -173,11 +177,11 @@ HALF_LT(npy_half a, npy_half b) { int ret; - if (npy_half_isnan(b)) { - ret = !npy_half_isnan(a); + if (_npy_half_isnan(b)) { + ret = !_npy_half_isnan(a); } else { - ret = !npy_half_isnan(a) && npy_half_lt_nonan(a, b); + ret = !_npy_half_isnan(a) && _npy_half_lt_nonan(a, b); } return ret; @@ -373,4 +377,8 @@ GENERIC_SWAP(char *a, char *b, size_t len) } } +#ifdef __cplusplus +} +#endif + #endif diff --git a/numpy/core/src/npysort/radixsort.cpp b/numpy/core/src/npysort/radixsort.cpp index 017ea43b6..5393869ee 100644 --- a/numpy/core/src/npysort/radixsort.cpp +++ b/numpy/core/src/npysort/radixsort.cpp @@ -14,9 +14,9 @@ */ // Reference: https://github.com/eloj/radix-sorting#-key-derivation -template <class T> -T -KEY_OF(T x) +template <class T, class UT> +UT +KEY_OF(UT x) { // Floating-point is currently disabled. // Floating-point tests succeed for double and float on macOS but not on @@ -27,12 +27,12 @@ KEY_OF(T x) // For floats, we invert the key if the sign bit is set, else we invert // the sign bit. return ((x) ^ (-((x) >> (sizeof(T) * 8 - 1)) | - ((T)1 << (sizeof(T) * 8 - 1)))); + ((UT)1 << (sizeof(T) * 8 - 1)))); } else if (std::is_signed<T>::value) { // For signed ints, we flip the sign bit so the negatives are below the // positives. - return ((x) ^ ((T)1 << (sizeof(T) * 8 - 1))); + return ((x) ^ ((UT)1 << (sizeof(UT) * 8 - 1))); } else { return x; @@ -46,24 +46,24 @@ nth_byte(T key, npy_intp l) return (key >> (l << 3)) & 0xFF; } -template <class T> -static T * -radixsort0(T *start, T *aux, npy_intp num) +template <class T, class UT> +static UT * +radixsort0(UT *start, UT *aux, npy_intp num) { - npy_intp cnt[sizeof(T)][1 << 8] = {{0}}; - T key0 = KEY_OF(start[0]); + npy_intp cnt[sizeof(UT)][1 << 8] = {{0}}; + UT key0 = KEY_OF<T>(start[0]); for (npy_intp i = 0; i < num; i++) { - T k = KEY_OF(start[i]); + UT k = KEY_OF<T>(start[i]); - for (size_t l = 0; l < sizeof(T); l++) { + for (size_t l = 0; l < sizeof(UT); l++) { cnt[l][nth_byte(k, l)]++; } } size_t ncols = 0; - npy_ubyte cols[sizeof(T)]; - for (size_t l = 0; l < sizeof(T); l++) { + npy_ubyte cols[sizeof(UT)]; + for (size_t l = 0; l < sizeof(UT); l++) { if (cnt[l][nth_byte(key0, l)] != num) { cols[ncols++] = l; } @@ -79,9 +79,9 @@ radixsort0(T *start, T *aux, npy_intp num) } for (size_t l = 0; l < ncols; l++) { - T *temp; + UT *temp; for (npy_intp i = 0; i < num; i++) { - T k = KEY_OF(start[i]); + UT k = KEY_OF<T>(start[i]); npy_intp dst = cnt[cols[l]][nth_byte(k, cols[l])]++; aux[dst] = start[i]; } @@ -94,18 +94,18 @@ radixsort0(T *start, T *aux, npy_intp num) return start; } -template <class T> +template <class T, class UT> static int -radixsort_(T *start, npy_intp num) +radixsort_(UT *start, npy_intp num) { if (num < 2) { return 0; } npy_bool all_sorted = 1; - T k1 = KEY_OF(start[0]), k2; + UT k1 = KEY_OF<T>(start[0]); for (npy_intp i = 1; i < num; i++) { - k2 = KEY_OF(start[i]); + UT k2 = KEY_OF<T>(start[i]); if (k1 > k2) { all_sorted = 0; break; @@ -117,14 +117,14 @@ radixsort_(T *start, npy_intp num) return 0; } - T *aux = (T *)malloc(num * sizeof(T)); + UT *aux = (UT *)malloc(num * sizeof(UT)); if (aux == nullptr) { return -NPY_ENOMEM; } - T *sorted = radixsort0(start, aux, num); + UT *sorted = radixsort0<T>(start, aux, num); if (sorted != start) { - memcpy(start, sorted, num * sizeof(T)); + memcpy(start, sorted, num * sizeof(UT)); } free(aux); @@ -135,27 +135,28 @@ template <class T> static int radixsort(void *start, npy_intp num) { - return radixsort_((T *)start, num); + using UT = typename std::make_unsigned<T>::type; + return radixsort_<T>((UT *)start, num); } -template <class T> +template <class T, class UT> static npy_intp * -aradixsort0(T *start, npy_intp *aux, npy_intp *tosort, npy_intp num) +aradixsort0(UT *start, npy_intp *aux, npy_intp *tosort, npy_intp num) { - npy_intp cnt[sizeof(T)][1 << 8] = {{0}}; - T key0 = KEY_OF(start[0]); + npy_intp cnt[sizeof(UT)][1 << 8] = {{0}}; + UT key0 = KEY_OF<T>(start[0]); for (npy_intp i = 0; i < num; i++) { - T k = KEY_OF(start[i]); + UT k = KEY_OF<T>(start[i]); - for (size_t l = 0; l < sizeof(T); l++) { + for (size_t l = 0; l < sizeof(UT); l++) { cnt[l][nth_byte(k, l)]++; } } size_t ncols = 0; - npy_ubyte cols[sizeof(T)]; - for (size_t l = 0; l < sizeof(T); l++) { + npy_ubyte cols[sizeof(UT)]; + for (size_t l = 0; l < sizeof(UT); l++) { if (cnt[l][nth_byte(key0, l)] != num) { cols[ncols++] = l; } @@ -173,7 +174,7 @@ aradixsort0(T *start, npy_intp *aux, npy_intp *tosort, npy_intp num) for (size_t l = 0; l < ncols; l++) { npy_intp *temp; for (npy_intp i = 0; i < num; i++) { - T k = KEY_OF(start[tosort[i]]); + UT k = KEY_OF<T>(start[tosort[i]]); npy_intp dst = cnt[cols[l]][nth_byte(k, cols[l])]++; aux[dst] = tosort[i]; } @@ -186,22 +187,22 @@ aradixsort0(T *start, npy_intp *aux, npy_intp *tosort, npy_intp num) return tosort; } -template <class T> +template <class T, class UT> static int -aradixsort_(T *start, npy_intp *tosort, npy_intp num) +aradixsort_(UT *start, npy_intp *tosort, npy_intp num) { npy_intp *sorted; npy_intp *aux; - T k1, k2; + UT k1, k2; npy_bool all_sorted = 1; if (num < 2) { return 0; } - k1 = KEY_OF(start[tosort[0]]); + k1 = KEY_OF<T>(start[tosort[0]]); for (npy_intp i = 1; i < num; i++) { - k2 = KEY_OF(start[tosort[i]]); + k2 = KEY_OF<T>(start[tosort[i]]); if (k1 > k2) { all_sorted = 0; break; @@ -218,7 +219,7 @@ aradixsort_(T *start, npy_intp *tosort, npy_intp num) return -NPY_ENOMEM; } - sorted = aradixsort0(start, aux, tosort, num); + sorted = aradixsort0<T>(start, aux, tosort, num); if (sorted != tosort) { memcpy(tosort, sorted, num * sizeof(npy_intp)); } @@ -231,7 +232,8 @@ template <class T> static int aradixsort(void *start, npy_intp *tosort, npy_intp num) { - return aradixsort_((T *)start, tosort, num); + using UT = typename std::make_unsigned<T>::type; + return aradixsort_<T>((UT *)start, tosort, num); } extern "C" { diff --git a/numpy/core/src/umath/_operand_flag_tests.c.src b/numpy/core/src/umath/_operand_flag_tests.c index c59e13baf..c59e13baf 100644 --- a/numpy/core/src/umath/_operand_flag_tests.c.src +++ b/numpy/core/src/umath/_operand_flag_tests.c diff --git a/numpy/core/src/umath/_scaled_float_dtype.c b/numpy/core/src/umath/_scaled_float_dtype.c index b6c19362a..a214b32aa 100644 --- a/numpy/core/src/umath/_scaled_float_dtype.c +++ b/numpy/core/src/umath/_scaled_float_dtype.c @@ -325,7 +325,8 @@ sfloat_to_sfloat_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) { loop_descrs[0] = given_descrs[0]; Py_INCREF(loop_descrs[0]); @@ -341,7 +342,8 @@ sfloat_to_sfloat_resolve_descriptors( if (((PyArray_SFloatDescr *)loop_descrs[0])->scaling == ((PyArray_SFloatDescr *)loop_descrs[1])->scaling) { /* same scaling is just a view */ - return NPY_NO_CASTING | _NPY_CAST_IS_VIEW; + *view_offset = 0; + return NPY_NO_CASTING; } else if (-((PyArray_SFloatDescr *)loop_descrs[0])->scaling == ((PyArray_SFloatDescr *)loop_descrs[1])->scaling) { @@ -384,7 +386,8 @@ float_to_from_sfloat_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *dtypes[2], PyArray_Descr *NPY_UNUSED(given_descrs[2]), - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) { loop_descrs[0] = NPY_DT_CALL_default_descr(dtypes[0]); if (loop_descrs[0] == NULL) { @@ -394,7 +397,8 @@ float_to_from_sfloat_resolve_descriptors( if (loop_descrs[1] == NULL) { return -1; } - return NPY_NO_CASTING | _NPY_CAST_IS_VIEW; + *view_offset = 0; + return NPY_NO_CASTING; } @@ -422,7 +426,8 @@ sfloat_to_bool_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), PyArray_Descr *given_descrs[2], - PyArray_Descr *loop_descrs[2]) + PyArray_Descr *loop_descrs[2], + npy_intp *NPY_UNUSED(view_offset)) { Py_INCREF(given_descrs[0]); loop_descrs[0] = given_descrs[0]; @@ -541,7 +546,8 @@ multiply_sfloats_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), PyArray_Descr *given_descrs[3], - PyArray_Descr *loop_descrs[3]) + PyArray_Descr *loop_descrs[3], + npy_intp *NPY_UNUSED(view_offset)) { /* * Multiply the scaling for the result. If the result was passed in we @@ -602,7 +608,8 @@ add_sfloats_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), PyArray_Descr *given_descrs[3], - PyArray_Descr *loop_descrs[3]) + PyArray_Descr *loop_descrs[3], + npy_intp *NPY_UNUSED(view_offset)) { /* * Here we accept an output descriptor (the inner loop can deal with it), diff --git a/numpy/core/src/umath/dispatching.c b/numpy/core/src/umath/dispatching.c index 8e99c0420..81d47a0e1 100644 --- a/numpy/core/src/umath/dispatching.c +++ b/numpy/core/src/umath/dispatching.c @@ -46,19 +46,23 @@ #include "dispatching.h" #include "dtypemeta.h" +#include "common_dtype.h" #include "npy_hashtable.h" #include "legacy_array_method.h" #include "ufunc_object.h" #include "ufunc_type_resolution.h" +#define PROMOTION_DEBUG_TRACING 0 + + /* forward declaration */ static NPY_INLINE PyObject * promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], - npy_bool allow_legacy_promotion, npy_bool cache); + npy_bool allow_legacy_promotion); /** @@ -147,6 +151,23 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate) * (Based on `isinstance()`, the knowledge that non-abstract DTypes cannot * be subclassed is used, however.) * + * NOTE: This currently does not take into account output dtypes which do not + * have to match. The possible extension here is that if an output + * is given (and thus an output dtype), but not part of the signature + * we could ignore it for matching, but *prefer* a loop that matches + * better. + * Why is this not done currently? First, it seems a niche feature that + * loops can only be distinguished based on the output dtype. Second, + * there are some nasty theoretical things because: + * + * np.add(f4, f4, out=f8) + * np.add(f4, f4, out=f8, dtype=f8) + * + * are different, the first uses the f4 loop, the second the f8 loop. + * The problem is, that the current cache only uses the op_dtypes and + * both are `(f4, f4, f8)`. The cache would need to store also which + * output was provided by `dtype=`/`signature=`. + * * @param ufunc * @param op_dtypes The DTypes that are either passed in (defined by an * operand) or defined by the `signature` as also passed in as @@ -159,17 +180,35 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate) */ static int resolve_implementation_info(PyUFuncObject *ufunc, - PyArray_DTypeMeta *op_dtypes[], PyObject **out_info) + PyArray_DTypeMeta *op_dtypes[], npy_bool only_promoters, + PyObject **out_info) { int nin = ufunc->nin, nargs = ufunc->nargs; Py_ssize_t size = PySequence_Length(ufunc->_loops); PyObject *best_dtypes = NULL; PyObject *best_resolver_info = NULL; +#if PROMOTION_DEBUG_TRACING + printf("Promoting for '%s' promoters only: %d\n", + ufunc->name ? ufunc->name : "<unknown>", (int)only_promoters); + printf(" DTypes: "); + PyObject *tmp = PyArray_TupleFromItems(ufunc->nargs, op_dtypes, 1); + PyObject_Print(tmp, stdout, 0); + Py_DECREF(tmp); + printf("\n"); + Py_DECREF(tmp); +#endif + for (Py_ssize_t res_idx = 0; res_idx < size; res_idx++) { /* Test all resolvers */ PyObject *resolver_info = PySequence_Fast_GET_ITEM( ufunc->_loops, res_idx); + + if (only_promoters && PyObject_TypeCheck( + PyTuple_GET_ITEM(resolver_info, 1), &PyArrayMethod_Type)) { + continue; + } + PyObject *curr_dtypes = PyTuple_GET_ITEM(resolver_info, 0); /* * Test if the current resolver matches, it could make sense to @@ -179,20 +218,31 @@ resolve_implementation_info(PyUFuncObject *ufunc, npy_bool matches = NPY_TRUE; /* - * NOTE: We check also the output DType. In principle we do not - * have to strictly match it (unless it is provided by the - * `signature`). This assumes that a (fallback) promoter will - * unset the output DType if no exact match is found. + * NOTE: We currently match the output dtype exactly here, this is + * actually only necessary if the signature includes. + * Currently, we rely that op-dtypes[nin:nout] is NULLed if not. */ for (Py_ssize_t i = 0; i < nargs; i++) { PyArray_DTypeMeta *given_dtype = op_dtypes[i]; PyArray_DTypeMeta *resolver_dtype = ( (PyArray_DTypeMeta *)PyTuple_GET_ITEM(curr_dtypes, i)); assert((PyObject *)given_dtype != Py_None); - if (given_dtype == NULL && i >= nin) { - /* Unspecified out always matches (see below for inputs) */ - continue; + if (given_dtype == NULL) { + if (i >= nin) { + /* Unspecified out always matches (see below for inputs) */ + continue; + } + /* + * This is a reduce-like operation, which always have the form + * `(res_DType, op_DType, res_DType)`. If the first and last + * dtype of the loops match, this should be reduce-compatible. + */ + if (PyTuple_GET_ITEM(curr_dtypes, 0) + == PyTuple_GET_ITEM(curr_dtypes, 2)) { + continue; + } } + if (resolver_dtype == (PyArray_DTypeMeta *)Py_None) { /* always matches */ continue; @@ -204,24 +254,7 @@ resolve_implementation_info(PyUFuncObject *ufunc, matches = NPY_FALSE; break; } - if (given_dtype == NULL) { - /* - * If an input was not specified, this is a reduce-like - * operation: reductions use `(operand_DType, NULL, out_DType)` - * as they only have a single operand. This allows special - * reduce promotion rules useful for example for sum/product. - * E.g. `np.add.reduce([True, True])` promotes to integer. - * - * Continuing here allows a promoter to handle reduce-like - * promotions explicitly if necessary. - * TODO: The `!NPY_DT_is_abstract(resolver_dtype)` currently - * ensures that this is a promoter. If we allow - * `ArrayMethods` to use abstract DTypes, we may have to - * reject it here or the `ArrayMethod` has to implement - * the reduce promotion. - */ - continue; - } + int subclass = PyObject_IsSubclass( (PyObject *)given_dtype, (PyObject *)resolver_dtype); if (subclass < 0) { @@ -254,8 +287,12 @@ resolve_implementation_info(PyUFuncObject *ufunc, * In all cases, we give up resolution, since it would be * necessary to compare to two "best" cases. */ - int unambiguously_equally_good = 1; for (Py_ssize_t i = 0; i < nargs; i++) { + if (i == ufunc->nin && current_best != -1) { + /* inputs prefer one loop and outputs have lower priority */ + break; + } + int best; PyObject *prev_dtype = PyTuple_GET_ITEM(best_dtypes, i); @@ -265,50 +302,18 @@ resolve_implementation_info(PyUFuncObject *ufunc, /* equivalent, so this entry does not matter */ continue; } - /* - * TODO: Even if the input is not specified, if we have - * abstract DTypes and one is a subclass of the other, - * the subclass should be considered a better match - * (subclasses are always more specific). - */ - /* Whether this (normally output) dtype was specified at all */ if (op_dtypes[i] == NULL) { /* - * When DType is completely unspecified, prefer abstract - * over concrete, assuming it will resolve. - * Furthermore, we cannot decide which abstract/None - * is "better", only concrete ones which are subclasses - * of Abstract ones are defined as worse. + * If an a dtype is NULL it always matches, so there is no + * point in defining one as more precise than the other. */ - npy_bool prev_is_concrete = NPY_FALSE; - npy_bool new_is_concrete = NPY_FALSE; - if ((prev_dtype != Py_None) && - !NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) { - prev_is_concrete = NPY_TRUE; - } - if ((new_dtype != Py_None) && - !NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) { - new_is_concrete = NPY_TRUE; - } - if (prev_is_concrete == new_is_concrete) { - best = -1; - } - else if (prev_is_concrete) { - unambiguously_equally_good = 0; - best = 1; - } - else { - unambiguously_equally_good = 0; - best = 0; - } + continue; } /* If either is None, the other is strictly more specific */ - else if (prev_dtype == Py_None) { - unambiguously_equally_good = 0; + if (prev_dtype == Py_None) { best = 1; } else if (new_dtype == Py_None) { - unambiguously_equally_good = 0; best = 0; } /* @@ -318,20 +323,25 @@ resolve_implementation_info(PyUFuncObject *ufunc, else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype) && !NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) { /* - * Ambiguous unless the are identical (checked above), - * but since they are concrete it does not matter which - * best to compare. + * Ambiguous unless they are identical (checked above), + * or one matches exactly. */ - best = -1; + if (prev_dtype == (PyObject *)op_dtypes[i]) { + best = 0; + } + else if (new_dtype == (PyObject *)op_dtypes[i]) { + best = 1; + } + else { + best = -1; + } } else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) { /* old is not abstract, so better (both not possible) */ - unambiguously_equally_good = 0; best = 0; } else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) { /* new is not abstract, so better (both not possible) */ - unambiguously_equally_good = 0; best = 1; } /* @@ -349,6 +359,10 @@ resolve_implementation_info(PyUFuncObject *ufunc, return -1; } + if (best == -1) { + /* no new info, nothing to update */ + continue; + } if ((current_best != -1) && (current_best != best)) { /* * We need a clear best, this could be tricky, unless @@ -367,15 +381,34 @@ resolve_implementation_info(PyUFuncObject *ufunc, if (current_best == -1) { /* - * TODO: It would be nice to have a "diagnostic mode" that - * informs if this happens! (An immediate error currently - * blocks later legacy resolution, but may work in the - * future.) + * We could not find a best loop, but promoters should be + * designed in a way to disambiguate such scenarios, so we + * retry the whole lookup using only promoters. + * (There is a small chance we already got two promoters. + * We just redo it anyway for simplicity.) */ - if (unambiguously_equally_good) { - /* unset the best resolver to indicate this */ - best_resolver_info = NULL; - continue; + if (!only_promoters) { + return resolve_implementation_info(ufunc, + op_dtypes, NPY_TRUE, out_info); + } + /* + * If this is already the retry, we are out of luck. Promoters + * should be designed in a way that this cannot happen! + * (It should be noted, that the retry might not find anything + * and we still do a legacy lookup later.) + */ + PyObject *given = PyArray_TupleFromItems( + ufunc->nargs, (PyObject **)op_dtypes, 1); + if (given != NULL) { + PyErr_Format(PyExc_RuntimeError, + "Could not find a loop for the inputs:\n %S\n" + "The two promoters %S and %S matched the input " + "equally well. Promoters must be designed " + "to be unambiguous. NOTE: This indicates an error " + "in NumPy or an extending library and should be " + "reported.", + given, best_dtypes, curr_dtypes); + Py_DECREF(given); } *out_info = NULL; return 0; @@ -457,10 +490,9 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter, if (Py_EnterRecursiveCall(" during ufunc promotion.") != 0) { goto finish; } - /* TODO: The caching logic here may need revising: */ resolved_info = promote_and_get_info_and_ufuncimpl(ufunc, operands, signature, new_op_dtypes, - /* no legacy promotion */ NPY_FALSE, /* cache */ NPY_TRUE); + /* no legacy promotion */ NPY_FALSE); Py_LeaveRecursiveCall(); @@ -551,6 +583,10 @@ legacy_promote_using_legacy_type_resolver(PyUFuncObject *ufunc, NPY_UNSAFE_CASTING, (PyArrayObject **)ops, type_tuple, out_descrs) < 0) { Py_XDECREF(type_tuple); + /* Not all legacy resolvers clean up on failures: */ + for (int i = 0; i < nargs; i++) { + Py_CLEAR(out_descrs[i]); + } return -1; } Py_XDECREF(type_tuple); @@ -560,17 +596,19 @@ legacy_promote_using_legacy_type_resolver(PyUFuncObject *ufunc, Py_INCREF(operation_DTypes[i]); Py_DECREF(out_descrs[i]); } - if (ufunc->type_resolver == &PyUFunc_SimpleBinaryComparisonTypeResolver) { - /* - * In this one case, the deprecation means that we actually override - * the signature. - */ - for (int i = 0; i < nargs; i++) { - if (signature[i] != NULL && signature[i] != operation_DTypes[i]) { - Py_INCREF(operation_DTypes[i]); - Py_SETREF(signature[i], operation_DTypes[i]); - *out_cacheable = 0; - } + /* + * The PyUFunc_SimpleBinaryComparisonTypeResolver has a deprecation + * warning (ignoring `dtype=`) and cannot be cached. + * All datetime ones *should* have a warning, but currently don't, + * but ignore all signature passing also. So they can also + * not be cached, and they mutate the signature which of course is wrong, + * but not doing it would confuse the code later. + */ + for (int i = 0; i < nargs; i++) { + if (signature[i] != NULL && signature[i] != operation_DTypes[i]) { + Py_INCREF(operation_DTypes[i]); + Py_SETREF(signature[i], operation_DTypes[i]); + *out_cacheable = 0; } } return 0; @@ -607,7 +645,7 @@ add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc, Py_DECREF(info); return NULL; } - + Py_DECREF(info); /* now borrowed from the ufunc's list of loops */ return info; } @@ -625,7 +663,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], - npy_bool allow_legacy_promotion, npy_bool cache) + npy_bool allow_legacy_promotion) { /* * Fetch the dispatching info which consists of the implementation and @@ -644,11 +682,12 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, } /* - * If `info == NULL`, the caching failed, repeat using the full resolution - * in `resolve_implementation_info`. + * If `info == NULL`, loading from cache failed, use the full resolution + * in `resolve_implementation_info` (which caches its result on success). */ if (info == NULL) { - if (resolve_implementation_info(ufunc, op_dtypes, &info) < 0) { + if (resolve_implementation_info(ufunc, + op_dtypes, NPY_FALSE, &info) < 0) { return NULL; } if (info != NULL && PyObject_TypeCheck( @@ -657,41 +696,12 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * Found the ArrayMethod and NOT promoter. Before returning it * add it to the cache for faster lookup in the future. */ - if (cache && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, + if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, (PyObject **)op_dtypes, info, 0) < 0) { return NULL; } return info; } - else if (info == NULL && op_dtypes[0] == NULL) { - /* - * If we have a reduction, fill in the unspecified input/array - * assuming it should have the same dtype as the operand input - * (or the output one if given). - * Then, try again. In some cases, this will choose different - * paths, such as `ll->?` instead of an `??->?` loop for `np.equal` - * when the input is `.l->.` (`.` meaning undefined). This will - * then cause an error. But cast to `?` would always lose - * information, and in many cases important information: - * - * ```python - * from operator import eq - * from functools import reduce - * - * reduce(eq, [1, 2, 3]) != reduce(eq, [True, True, True]) - * ``` - * - * The special cases being `logical_(and|or|xor)` which can always - * cast to boolean ahead of time and still give the right answer - * (unsafe cast to bool is fine here). We special case these at - * the time of this comment (NumPy 1.21). - */ - assert(ufunc->nin == 2 && ufunc->nout == 1); - op_dtypes[0] = op_dtypes[2] != NULL ? op_dtypes[2] : op_dtypes[1]; - Py_INCREF(op_dtypes[0]); - return promote_and_get_info_and_ufuncimpl(ufunc, - ops, signature, op_dtypes, allow_legacy_promotion, 1); - } } /* @@ -707,6 +717,11 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, return NULL; } else if (info != NULL) { + /* Add result to the cache using the original types: */ + if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, 0) < 0) { + return NULL; + } return info; } } @@ -730,9 +745,15 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, return NULL; } info = promote_and_get_info_and_ufuncimpl(ufunc, - ops, signature, new_op_dtypes, NPY_FALSE, cacheable); + ops, signature, new_op_dtypes, NPY_FALSE); for (int i = 0; i < ufunc->nargs; i++) { - Py_XDECREF(new_op_dtypes); + Py_XDECREF(new_op_dtypes[i]); + } + + /* Add this to the cache using the original types: */ + if (cacheable && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, 0) < 0) { + return NULL; } return info; } @@ -745,6 +766,14 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * only work with DType (classes/types). This is because it has to ensure * that legacy (value-based promotion) is used when necessary. * + * NOTE: The machinery here currently ignores output arguments unless + * they are part of the signature. This slightly limits unsafe loop + * specializations, which is important for the `ensure_reduce_compatible` + * fallback mode. + * To fix this, the caching mechanism (and dispatching) can be extended. + * When/if that happens, the `ensure_reduce_compatible` could be + * deprecated (it should never kick in because promotion kick in first). + * * @param ufunc The ufunc object, used mainly for the fallback. * @param ops The array operands (used only for the fallback). * @param signature As input, the DType signature fixed explicitly by the user. @@ -754,9 +783,16 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * either by the `signature` or by an `operand`. * (outputs and the second input can be NULL for reductions). * NOTE: In some cases, the promotion machinery may currently modify - * these. + * these including clearing the output. * @param force_legacy_promotion If set, we have to use the old type resolution * to implement value-based promotion/casting. + * @param ensure_reduce_compatible Must be set for reductions, in which case + * the found implementation is checked for reduce-like compatibility. + * If it is *not* compatible and `signature[2] != NULL`, we assume its + * output DType is correct (see NOTE above). + * If removed, promotion may require information about whether this + * is a reduction, so the more likely case is to always keep fixing this + * when necessary, but push down the handling so it can be cached. */ NPY_NO_EXPORT PyArrayMethodObject * promote_and_get_ufuncimpl(PyUFuncObject *ufunc, @@ -764,9 +800,10 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], npy_bool force_legacy_promotion, - npy_bool allow_legacy_promotion) + npy_bool allow_legacy_promotion, + npy_bool ensure_reduce_compatible) { - int nargs = ufunc->nargs; + int nin = ufunc->nin, nargs = ufunc->nargs; /* * Get the actual DTypes we operate with by mixing the operand array @@ -782,6 +819,15 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, Py_XSETREF(op_dtypes[i], signature[i]); assert(i >= ufunc->nin || !NPY_DT_is_abstract(signature[i])); } + else if (i >= nin) { + /* + * We currently just ignore outputs if not in signature, this will + * always give the/a correct result (limits registering specialized + * loops which include the cast). + * (See also comment in resolve_implementation_info.) + */ + Py_CLEAR(op_dtypes[i]); + } } if (force_legacy_promotion) { @@ -798,7 +844,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, - ops, signature, op_dtypes, allow_legacy_promotion, NPY_TRUE); + ops, signature, op_dtypes, allow_legacy_promotion); if (info == NULL) { if (!PyErr_Occurred()) { @@ -809,8 +855,26 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); - /* Fill `signature` with final DTypes used by the ArrayMethod/inner-loop */ + /* + * In certain cases (only the logical ufuncs really), the loop we found may + * not be reduce-compatible. Since the machinery can't distinguish a + * reduction with an output from a normal ufunc call, we have to assume + * the result DType is correct and force it for the input (if not forced + * already). + * NOTE: This does assume that all loops are "safe" see the NOTE in this + * comment. That could be relaxed, in which case we may need to + * cache if a call was for a reduction. + */ PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); + if (ensure_reduce_compatible && signature[0] == NULL && + PyTuple_GET_ITEM(all_dtypes, 0) != PyTuple_GET_ITEM(all_dtypes, 2)) { + signature[0] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM(all_dtypes, 2); + Py_INCREF(signature[0]); + return promote_and_get_ufuncimpl(ufunc, + ops, signature, op_dtypes, + force_legacy_promotion, allow_legacy_promotion, NPY_FALSE); + } + for (int i = 0; i < nargs; i++) { if (signature[i] == NULL) { signature[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM(all_dtypes, i); @@ -826,6 +890,112 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, /* + * Generic promoter used by as a final fallback on ufuncs. Most operations are + * homogeneous, so we can try to find the homogeneous dtype on the inputs + * and use that. + * We need to special case the reduction case, where op_dtypes[0] == NULL + * is possible. + */ +NPY_NO_EXPORT int +default_ufunc_promoter(PyUFuncObject *ufunc, + PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + if (ufunc->type_resolver == &PyUFunc_SimpleBinaryComparisonTypeResolver + && signature[0] == NULL && signature[1] == NULL + && signature[2] != NULL && signature[2]->type_num != NPY_BOOL) { + /* bail out, this is _only_ to give future/deprecation warning! */ + return -1; + } + + /* If nin < 2 promotion is a no-op, so it should not be registered */ + assert(ufunc->nin > 1); + if (op_dtypes[0] == NULL) { + assert(ufunc->nin == 2 && ufunc->nout == 1); /* must be reduction */ + Py_INCREF(op_dtypes[1]); + new_op_dtypes[0] = op_dtypes[1]; + Py_INCREF(op_dtypes[1]); + new_op_dtypes[1] = op_dtypes[1]; + Py_INCREF(op_dtypes[1]); + new_op_dtypes[2] = op_dtypes[1]; + return 0; + } + PyArray_DTypeMeta *common = NULL; + /* + * If a signature is used and homogeneous in its outputs use that + * (Could/should likely be rather applied to inputs also, although outs + * only could have some advantage and input dtypes are rarely enforced.) + */ + for (int i = ufunc->nin; i < ufunc->nargs; i++) { + if (signature[i] != NULL) { + if (common == NULL) { + Py_INCREF(signature[i]); + common = signature[i]; + } + else if (common != signature[i]) { + Py_CLEAR(common); /* Not homogeneous, unset common */ + break; + } + } + } + /* Otherwise, use the common DType of all input operands */ + if (common == NULL) { + common = PyArray_PromoteDTypeSequence(ufunc->nin, op_dtypes); + if (common == NULL) { + if (PyErr_ExceptionMatches(PyExc_TypeError)) { + PyErr_Clear(); /* Do not propagate normal promotion errors */ + } + return -1; + } + } + + for (int i = 0; i < ufunc->nargs; i++) { + PyArray_DTypeMeta *tmp = common; + if (signature[i]) { + tmp = signature[i]; /* never replace a fixed one. */ + } + Py_INCREF(tmp); + new_op_dtypes[i] = tmp; + } + for (int i = ufunc->nin; i < ufunc->nargs; i++) { + Py_XINCREF(op_dtypes[i]); + new_op_dtypes[i] = op_dtypes[i]; + } + + Py_DECREF(common); + return 0; +} + + +/* + * In some cases, we assume that there will only ever be object loops, + * and the object loop should *always* be chosen. + * (in those cases more specific loops should not really be registered, but + * we do not check that.) + * + * We default to this for "old-style" ufuncs which have exactly one loop + * consisting only of objects (during registration time, numba mutates this + * but presumably). + */ +NPY_NO_EXPORT int +object_only_ufunc_promoter(PyUFuncObject *ufunc, + PyArray_DTypeMeta *NPY_UNUSED(op_dtypes[]), + PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + PyArray_DTypeMeta *object_DType = PyArray_DTypeFromTypeNum(NPY_OBJECT); + + for (int i = 0; i < ufunc->nargs; i++) { + if (signature[i] == NULL) { + Py_INCREF(object_DType); + new_op_dtypes[i] = object_DType; + } + } + Py_DECREF(object_DType); + return 0; +} + +/* * Special promoter for the logical ufuncs. The logical ufuncs can always * use the ??->? and still get the correct output (as long as the output * is not supposed to be `object`). @@ -843,6 +1013,12 @@ logical_ufunc_promoter(PyUFuncObject *NPY_UNUSED(ufunc), */ int force_object = 0; + if (signature[0] == NULL && signature[1] == NULL + && signature[2] != NULL && signature[2]->type_num != NPY_BOOL) { + /* bail out, this is _only_ to give future/deprecation warning! */ + return -1; + } + for (int i = 0; i < 3; i++) { PyArray_DTypeMeta *item; if (signature[i] != NULL) { @@ -913,4 +1089,3 @@ install_logical_ufunc_promoter(PyObject *ufunc) return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); } - diff --git a/numpy/core/src/umath/dispatching.h b/numpy/core/src/umath/dispatching.h index 2f314615d..a7e9e88d0 100644 --- a/numpy/core/src/umath/dispatching.h +++ b/numpy/core/src/umath/dispatching.h @@ -20,13 +20,25 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], npy_bool force_legacy_promotion, - npy_bool allow_legacy_promotion); + npy_bool allow_legacy_promotion, + npy_bool ensure_reduce_compatible); NPY_NO_EXPORT PyObject * add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc, PyArray_DTypeMeta *operation_dtypes[], int ignore_duplicate); NPY_NO_EXPORT int +default_ufunc_promoter(PyUFuncObject *ufunc, + PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]); + +NPY_NO_EXPORT int +object_only_ufunc_promoter(PyUFuncObject *ufunc, + PyArray_DTypeMeta *NPY_UNUSED(op_dtypes[]), + PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]); + +NPY_NO_EXPORT int install_logical_ufunc_promoter(PyObject *ufunc); diff --git a/numpy/core/src/umath/legacy_array_method.c b/numpy/core/src/umath/legacy_array_method.c index a423823d4..f4b2aed96 100644 --- a/numpy/core/src/umath/legacy_array_method.c +++ b/numpy/core/src/umath/legacy_array_method.c @@ -103,7 +103,8 @@ NPY_NO_EXPORT NPY_CASTING wrapped_legacy_resolve_descriptors(PyArrayMethodObject *NPY_UNUSED(self), PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), PyArray_Descr *NPY_UNUSED(given_descrs[]), - PyArray_Descr *NPY_UNUSED(loop_descrs[])) + PyArray_Descr *NPY_UNUSED(loop_descrs[]), + npy_intp *NPY_UNUSED(view_offset)) { PyErr_SetString(PyExc_RuntimeError, "cannot use legacy wrapping ArrayMethod without calling the ufunc " @@ -121,12 +122,43 @@ simple_legacy_resolve_descriptors( PyArrayMethodObject *method, PyArray_DTypeMeta **dtypes, PyArray_Descr **given_descrs, - PyArray_Descr **output_descrs) + PyArray_Descr **output_descrs, + npy_intp *NPY_UNUSED(view_offset)) { + int i = 0; int nin = method->nin; int nout = method->nout; - for (int i = 0; i < nin + nout; i++) { + if (nin == 2 && nout == 1 && given_descrs[2] != NULL + && dtypes[0] == dtypes[2]) { + /* + * Could be a reduction, which requires `descr[0] is descr[2]` + * (identity) at least currently. This is because `op[0] is op[2]`. + * (If the output descriptor is not passed, the below works.) + */ + output_descrs[2] = ensure_dtype_nbo(given_descrs[2]); + if (output_descrs[2] == NULL) { + Py_CLEAR(output_descrs[2]); + return -1; + } + Py_INCREF(output_descrs[2]); + output_descrs[0] = output_descrs[2]; + if (dtypes[1] == dtypes[2]) { + /* Same for the second one (accumulation is stricter) */ + Py_INCREF(output_descrs[2]); + output_descrs[1] = output_descrs[2]; + } + else { + output_descrs[1] = ensure_dtype_nbo(given_descrs[1]); + if (output_descrs[1] == NULL) { + i = 2; + goto fail; + } + } + return NPY_NO_CASTING; + } + + for (; i < nin + nout; i++) { if (given_descrs[i] != NULL) { output_descrs[i] = ensure_dtype_nbo(given_descrs[i]); } @@ -146,7 +178,7 @@ simple_legacy_resolve_descriptors( return NPY_NO_CASTING; fail: - for (int i = 0; i < nin + nout; i++) { + for (; i >= 0; i--) { Py_CLEAR(output_descrs[i]); } return -1; @@ -194,6 +226,10 @@ get_wrapped_legacy_ufunc_loop(PyArrayMethod_Context *context, *out_loop = &generic_wrapped_legacy_loop; *out_transferdata = get_new_loop_data( loop, user_data, (*flags & NPY_METH_REQUIRES_PYAPI) != 0); + if (*out_transferdata == NULL) { + PyErr_NoMemory(); + return -1; + } return 0; } diff --git a/numpy/core/src/umath/legacy_array_method.h b/numpy/core/src/umath/legacy_array_method.h index 0dec1fb3a..d20b4fb08 100644 --- a/numpy/core/src/umath/legacy_array_method.h +++ b/numpy/core/src/umath/legacy_array_method.h @@ -27,7 +27,7 @@ get_wrapped_legacy_ufunc_loop(PyArrayMethod_Context *context, NPY_NO_EXPORT NPY_CASTING wrapped_legacy_resolve_descriptors(PyArrayMethodObject *, - PyArray_DTypeMeta **, PyArray_Descr **, PyArray_Descr **); + PyArray_DTypeMeta **, PyArray_Descr **, PyArray_Descr **, npy_intp *); #endif /*_NPY_LEGACY_ARRAY_METHOD_H */ diff --git a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src index 95cce553a..2dd43fb85 100644 --- a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src @@ -386,7 +386,7 @@ avx512_permute_x8var_pd(__m512d t0, __m512d t1, __m512d t2, __m512d t3, * #and_masks =_mm256_and_ps, _mm512_kand# * #xor_masks =_mm256_xor_ps, _mm512_kxor# * #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps# - * #mask_to_int = _mm256_movemask_ps, # + * #mask_to_int = _mm256_movemask_ps, npyv_tobits_b32# * #full_mask= 0xFF, 0xFFFF# * #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps# * #cvtps_epi32 = _mm256_cvtps_epi32, # @@ -833,11 +833,19 @@ AVX512F_exp_DOUBLE(npy_double * op, op += num_lanes; num_remaining_elements -= num_lanes; } - if (overflow_mask) { + /* + * Don't count on the compiler for cast between mask and int registers. + * On gcc7 with flags -march>=nocona -O3 can cause FP stack overflow + * which may lead to putting NaN into certain HW/FP calculations. + * + * For more details, please check the comments in: + * - https://github.com/numpy/numpy/issues/20356 + */ + if (npyv_tobits_b64(overflow_mask)) { npy_set_floatstatus_overflow(); } - if (underflow_mask) { + if (npyv_tobits_b64(underflow_mask)) { npy_set_floatstatus_underflow(); } } @@ -1062,10 +1070,10 @@ AVX512F_log_DOUBLE(npy_double * op, num_remaining_elements -= num_lanes; } - if (invalid_mask) { + if (npyv_tobits_b64(invalid_mask)) { npy_set_floatstatus_invalid(); } - if (divide_by_zero_mask) { + if (npyv_tobits_b64(divide_by_zero_mask)) { npy_set_floatstatus_divbyzero(); } } diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index c28c8abd8..8cb44d433 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -301,20 +301,6 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, PyArrayMethod_StridedLoop *strided_loop; NPY_ARRAYMETHOD_FLAGS flags = 0; - npy_intp fixed_strides[3]; - NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); - if (wheremask != NULL) { - if (PyArrayMethod_GetMaskedStridedLoop(context, - 1, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { - goto fail; - } - } - else { - if (context->method->get_strided_loop(context, - 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { - goto fail; - } - } int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; needs_api |= NpyIter_IterationNeedsAPI(iter); @@ -349,6 +335,25 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, goto fail; } + /* + * Note that we need to ensure that the iterator is reset before getting + * the fixed strides. (The buffer information is unitialized before.) + */ + npy_intp fixed_strides[3]; + NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); + if (wheremask != NULL) { + if (PyArrayMethod_GetMaskedStridedLoop(context, + 1, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { + goto fail; + } + } + else { + if (context->method->get_strided_loop(context, + 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { + goto fail; + } + } + if (NpyIter_GetIterSize(iter) != 0) { NpyIter_IterNextFunc *iternext; char **dataptr; @@ -382,6 +387,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, } Py_INCREF(result); + NPY_AUXDATA_FREE(auxdata); if (!NpyIter_Deallocate(iter)) { Py_DECREF(result); return NULL; diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 186f18a62..415ff0f07 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -998,10 +998,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, } if (*allow_legacy_promotion && (!all_scalar && any_scalar)) { *force_legacy_promotion = should_use_min_scalar(nin, out_op, 0, NULL); - /* - * TODO: if this is False, we end up in a "very slow" path that should - * be avoided. This makes `int_arr + 0.` ~40% slower. - */ } /* Convert and fill in output arguments */ @@ -1077,13 +1073,15 @@ check_for_trivial_loop(PyArrayMethodObject *ufuncimpl, int must_copy = !PyArray_ISALIGNED(op[i]); if (dtypes[i] != PyArray_DESCR(op[i])) { - NPY_CASTING safety = PyArray_GetCastSafety( - PyArray_DESCR(op[i]), dtypes[i], NULL); + npy_intp view_offset; + NPY_CASTING safety = PyArray_GetCastInfo( + PyArray_DESCR(op[i]), dtypes[i], NULL, &view_offset); if (safety < 0 && PyErr_Occurred()) { /* A proper error during a cast check, should be rare */ return -1; } - if (!(safety & _NPY_CAST_IS_VIEW)) { + if (view_offset != 0) { + /* NOTE: Could possibly implement non-zero view offsets */ must_copy = 1; } @@ -2717,11 +2715,11 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, char *method) { /* - * Note that the `ops` is not realy correct. But legacy resolution + * Note that the `ops` is not really correct. But legacy resolution * cannot quite handle the correct ops (e.g. a NULL first item if `out` - * is NULL), and it should only matter in very strange cases. + * is NULL) so we pass `arr` instead in that case. */ - PyArrayObject *ops[3] = {arr, arr, NULL}; + PyArrayObject *ops[3] = {out ? out : arr, arr, out}; /* * TODO: If `out` is not provided, arguably `initial` could define * the first DType (and maybe also the out one), that way @@ -2741,12 +2739,11 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, } PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, - ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE); - Py_DECREF(operation_DTypes[1]); - if (out != NULL) { - Py_DECREF(operation_DTypes[0]); - Py_DECREF(operation_DTypes[2]); - } + ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE, NPY_TRUE); + /* DTypes may currently get filled in fallbacks and XDECREF for error: */ + Py_XDECREF(operation_DTypes[0]); + Py_XDECREF(operation_DTypes[1]); + Py_XDECREF(operation_DTypes[2]); if (ufuncimpl == NULL) { return NULL; } @@ -2767,12 +2764,18 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, * The first operand and output should be the same array, so they should * be identical. The second argument can be different for reductions, * but is checked to be identical for accumulate and reduceat. + * Ideally, the type-resolver ensures that all are identical, but we do + * not enforce this here strictly. Otherwise correct handling of + * byte-order changes (or metadata) requires a lot of care; see gh-20699. */ - if (out_descrs[0] != out_descrs[2] || ( - enforce_uniform_args && out_descrs[0] != out_descrs[1])) { + if (!PyArray_EquivTypes(out_descrs[0], out_descrs[2]) || ( + enforce_uniform_args && !PyArray_EquivTypes( + out_descrs[0], out_descrs[1]))) { PyErr_Format(PyExc_TypeError, - "the resolved dtypes are not compatible with %s.%s", - ufunc_get_name_cstr(ufunc), method); + "the resolved dtypes are not compatible with %s.%s. " + "Resolved (%R, %R, %R)", + ufunc_get_name_cstr(ufunc), method, + out_descrs[0], out_descrs[1], out_descrs[2]); goto fail; } /* TODO: This really should _not_ be unsafe casting (same above)! */ @@ -3029,8 +3032,12 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, return NULL; } - /* The below code assumes that all descriptors are identical: */ - assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]); + /* + * The below code assumes that all descriptors are interchangeable, we + * allow them to not be strictly identical (but they typically should be) + */ + assert(PyArray_EquivTypes(descrs[0], descrs[1]) + && PyArray_EquivTypes(descrs[0], descrs[2])); if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) { /* This can be removed, but the initial element copy needs fixing */ @@ -3442,8 +3449,12 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, return NULL; } - /* The below code assumes that all descriptors are identical: */ - assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]); + /* + * The below code assumes that all descriptors are interchangeable, we + * allow them to not be strictly identical (but they typically should be) + */ + assert(PyArray_EquivTypes(descrs[0], descrs[1]) + && PyArray_EquivTypes(descrs[0], descrs[2])); if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) { /* This can be removed, but the initial element copy needs fixing */ @@ -4511,8 +4522,10 @@ resolve_descriptors(int nop, if (ufuncimpl->resolve_descriptors != &wrapped_legacy_resolve_descriptors) { /* The default: use the `ufuncimpl` as nature intended it */ + npy_intp view_offset = NPY_MIN_INTP; /* currently ignored */ + NPY_CASTING safety = ufuncimpl->resolve_descriptors(ufuncimpl, - signature, original_dtypes, dtypes); + signature, original_dtypes, dtypes, &view_offset); if (safety < 0) { goto finish; } @@ -4852,7 +4865,8 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, */ PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, operands, signature, - operand_DTypes, force_legacy_promotion, allow_legacy_promotion); + operand_DTypes, force_legacy_promotion, allow_legacy_promotion, + NPY_FALSE); if (ufuncimpl == NULL) { goto fail; } @@ -4892,6 +4906,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, */ Py_XDECREF(wheremask); for (int i = 0; i < nop; i++) { + Py_DECREF(signature[i]); Py_XDECREF(operand_DTypes[i]); Py_DECREF(operation_descrs[i]); if (i < nin) { @@ -4915,6 +4930,7 @@ fail: Py_XDECREF(wheremask); for (int i = 0; i < ufunc->nargs; i++) { Py_XDECREF(operands[i]); + Py_XDECREF(signature[i]); Py_XDECREF(operand_DTypes[i]); Py_XDECREF(operation_descrs[i]); if (i < nout) { @@ -5190,9 +5206,22 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi info = add_and_return_legacy_wrapping_ufunc_loop(ufunc, op_dtypes, 1); if (info == NULL) { + Py_DECREF(ufunc); return NULL; } } + /* + * TODO: I tried adding a default promoter here (either all object for + * some special cases, or all homogeneous). Those are reasonable + * defaults, but short-cut a deprecated SciPy loop, where the + * homogeneous loop `ddd->d` was deprecated, but an inhomogeneous + * one `dld->d` should be picked. + * The default promoter *is* a reasonable default, but switched that + * behaviour. + * Another problem appeared due to buggy type-resolution for + * datetimes, this meant that `timedelta.sum(dtype="f8")` returned + * datetimes (and not floats or error), arguably wrong, but... + */ return (PyObject *)ufunc; } @@ -5963,7 +5992,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, operands, signature, operand_DTypes, - force_legacy_promotion, allow_legacy_promotion); + force_legacy_promotion, allow_legacy_promotion, NPY_FALSE); if (ufuncimpl == NULL) { goto fail; } @@ -6141,7 +6170,9 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) Py_XDECREF(op2_array); Py_XDECREF(iter); Py_XDECREF(iter2); - for (int i = 0; i < 3; i++) { + for (int i = 0; i < nop; i++) { + Py_DECREF(signature[i]); + Py_XDECREF(operand_DTypes[i]); Py_XDECREF(operation_descrs[i]); Py_XDECREF(array_operands[i]); } @@ -6167,6 +6198,8 @@ fail: Py_XDECREF(iter); Py_XDECREF(iter2); for (int i = 0; i < 3; i++) { + Py_XDECREF(signature[i]); + Py_XDECREF(operand_DTypes[i]); Py_XDECREF(operation_descrs[i]); Py_XDECREF(array_operands[i]); } diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 9ed923cf5..90846ca55 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -1528,7 +1528,7 @@ PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc, } if (j == nargs) { *out_innerloop = ufunc->functions[i]; - *out_innerloopdata = ufunc->data[i]; + *out_innerloopdata = (ufunc->data == NULL) ? NULL : ufunc->data[i]; return 0; } diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index 272555704..d79506000 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -56,7 +56,7 @@ object_ufunc_loop_selector(PyUFuncObject *ufunc, int *out_needs_api) { *out_innerloop = ufunc->functions[0]; - *out_innerloopdata = ufunc->data[0]; + *out_innerloopdata = (ufunc->data == NULL) ? NULL : ufunc->data[0]; *out_needs_api = 1; return 0; @@ -288,8 +288,8 @@ int initumath(PyObject *m) PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(NPY_NZERO)); PyModule_AddObject(m, "NAN", PyFloat_FromDouble(NPY_NAN)); - s = PyDict_GetItemString(d, "true_divide"); - PyDict_SetItemString(d, "divide", s); + s = PyDict_GetItemString(d, "divide"); + PyDict_SetItemString(d, "true_divide", s); s = PyDict_GetItemString(d, "conjugate"); s2 = PyDict_GetItemString(d, "remainder"); diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py index cb4792090..a57e46fd0 100644 --- a/numpy/core/tests/test_casting_unittests.py +++ b/numpy/core/tests/test_casting_unittests.py @@ -76,7 +76,6 @@ class Casting(enum.IntEnum): safe = 2 same_kind = 3 unsafe = 4 - cast_is_view = 1 << 16 def _get_cancast_table(): @@ -259,14 +258,14 @@ class TestCasting: del default for to_dt in [to_Dt(), to_Dt().newbyteorder()]: - casting, (from_res, to_res) = cast._resolve_descriptors( - (from_dt, to_dt)) + casting, (from_res, to_res), view_off = ( + cast._resolve_descriptors((from_dt, to_dt))) assert(type(from_res) == from_Dt) assert(type(to_res) == to_Dt) - if casting & Casting.cast_is_view: + if view_off is not None: # If a view is acceptable, this is "no" casting # and byte order must be matching. - assert casting == Casting.no | Casting.cast_is_view + assert casting == Casting.no # The above table lists this as "equivalent" assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt] # Note that to_res may not be the same as from_dt @@ -299,7 +298,7 @@ class TestCasting: to_dt = to_dt.values[0] cast = get_castingimpl(type(from_dt), type(to_dt)) - casting, (from_res, to_res) = cast._resolve_descriptors( + casting, (from_res, to_res), view_off = cast._resolve_descriptors( (from_dt, to_dt)) if from_res is not from_dt or to_res is not to_dt: @@ -307,7 +306,7 @@ class TestCasting: # each of which should is tested individually. return - safe = (casting & ~Casting.cast_is_view) <= Casting.safe + safe = casting <= Casting.safe del from_res, to_res, casting arr1, arr2, values = self.get_data(from_dt, to_dt) @@ -355,14 +354,15 @@ class TestCasting: for time_dt in time_dtypes: cast = get_castingimpl(type(from_dt), type(time_dt)) - casting, (from_res, to_res) = cast._resolve_descriptors( + casting, (from_res, to_res), view_off = cast._resolve_descriptors( (from_dt, time_dt)) assert from_res is from_dt assert to_res is time_dt del from_res, to_res - assert(casting & CAST_TABLE[from_Dt][type(time_dt)]) + assert casting & CAST_TABLE[from_Dt][type(time_dt)] + assert view_off is None int64_dt = np.dtype(np.int64) arr1, arr2, values = self.get_data(from_dt, int64_dt) @@ -391,31 +391,37 @@ class TestCasting: assert arr2_o.tobytes() == arr2.tobytes() @pytest.mark.parametrize( - ["from_dt", "to_dt", "expected_casting", "nom", "denom"], - [("M8[ns]", None, - Casting.no | Casting.cast_is_view, 1, 1), - (str(np.dtype("M8[ns]").newbyteorder()), None, Casting.equiv, 1, 1), - ("M8", "M8[ms]", Casting.safe | Casting.cast_is_view, 1, 1), - ("M8[ms]", "M8", Casting.unsafe, 1, 1), # should be invalid cast - ("M8[5ms]", "M8[5ms]", Casting.no | Casting.cast_is_view, 1, 1), - ("M8[ns]", "M8[ms]", Casting.same_kind, 1, 10**6), - ("M8[ms]", "M8[ns]", Casting.safe, 10**6, 1), - ("M8[ms]", "M8[7ms]", Casting.same_kind, 1, 7), - ("M8[4D]", "M8[1M]", Casting.same_kind, None, + ["from_dt", "to_dt", "expected_casting", "expected_view_off", + "nom", "denom"], + [("M8[ns]", None, Casting.no, 0, 1, 1), + (str(np.dtype("M8[ns]").newbyteorder()), None, + Casting.equiv, None, 1, 1), + ("M8", "M8[ms]", Casting.safe, 0, 1, 1), + # should be invalid cast: + ("M8[ms]", "M8", Casting.unsafe, None, 1, 1), + ("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1), + ("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6), + ("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1), + ("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7), + ("M8[4D]", "M8[1M]", Casting.same_kind, None, None, # give full values based on NumPy 1.19.x [-2**63, 0, -1, 1314, -1315, 564442610]), - ("m8[ns]", None, Casting.no | Casting.cast_is_view, 1, 1), - (str(np.dtype("m8[ns]").newbyteorder()), None, Casting.equiv, 1, 1), - ("m8", "m8[ms]", Casting.safe | Casting.cast_is_view, 1, 1), - ("m8[ms]", "m8", Casting.unsafe, 1, 1), # should be invalid cast - ("m8[5ms]", "m8[5ms]", Casting.no | Casting.cast_is_view, 1, 1), - ("m8[ns]", "m8[ms]", Casting.same_kind, 1, 10**6), - ("m8[ms]", "m8[ns]", Casting.safe, 10**6, 1), - ("m8[ms]", "m8[7ms]", Casting.same_kind, 1, 7), - ("m8[4D]", "m8[1M]", Casting.unsafe, None, + ("m8[ns]", None, Casting.no, 0, 1, 1), + (str(np.dtype("m8[ns]").newbyteorder()), None, + Casting.equiv, None, 1, 1), + ("m8", "m8[ms]", Casting.safe, 0, 1, 1), + # should be invalid cast: + ("m8[ms]", "m8", Casting.unsafe, None, 1, 1), + ("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1), + ("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6), + ("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1), + ("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7), + ("m8[4D]", "m8[1M]", Casting.unsafe, None, None, # give full values based on NumPy 1.19.x [-2**63, 0, 0, 1314, -1315, 564442610])]) - def test_time_to_time(self, from_dt, to_dt, expected_casting, nom, denom): + def test_time_to_time(self, from_dt, to_dt, + expected_casting, expected_view_off, + nom, denom): from_dt = np.dtype(from_dt) if to_dt is not None: to_dt = np.dtype(to_dt) @@ -428,10 +434,12 @@ class TestCasting: DType = type(from_dt) cast = get_castingimpl(DType, DType) - casting, (from_res, to_res) = cast._resolve_descriptors((from_dt, to_dt)) + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, to_dt)) assert from_res is from_dt assert to_res is to_dt or to_dt is None assert casting == expected_casting + assert view_off == expected_view_off if nom is not None: expected_out = (values * nom // denom).view(to_res) @@ -476,9 +484,11 @@ class TestCasting: expected_length = get_expected_stringlength(other_dt) string_dt = np.dtype(f"{string_char}{expected_length}") - safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None)) + safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors( + (other_dt, None)) assert res_dt.itemsize == expected_length * fact assert safety == Casting.safe # we consider to string casts "safe" + assert view_off is None assert isinstance(res_dt, string_DT) # These casts currently implement changing the string length, so @@ -490,19 +500,24 @@ class TestCasting: expected_safety = Casting.same_kind to_dt = self.string_with_modified_length(string_dt, change_length) - safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt)) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (other_dt, to_dt)) assert res_dt is to_dt assert safety == expected_safety + assert view_off is None # The opposite direction is always considered unsafe: cast = get_castingimpl(string_DT, other_DT) - safety, _ = cast._resolve_descriptors((string_dt, other_dt)) + safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt)) assert safety == Casting.unsafe + assert view_off is None cast = get_castingimpl(string_DT, other_DT) - safety, (_, res_dt) = cast._resolve_descriptors((string_dt, None)) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (string_dt, None)) assert safety == Casting.unsafe + assert view_off is None assert other_dt is res_dt # returns the singleton for simple dtypes @pytest.mark.parametrize("string_char", ["S", "U"]) @@ -521,7 +536,8 @@ class TestCasting: cast = get_castingimpl(type(other_dt), string_DT) cast_back = get_castingimpl(string_DT, type(other_dt)) - _, (res_other_dt, string_dt) = cast._resolve_descriptors((other_dt, None)) + _, (res_other_dt, string_dt), _ = cast._resolve_descriptors( + (other_dt, None)) if res_other_dt is not other_dt: # do not support non-native byteorder, skip test in that case @@ -580,13 +596,16 @@ class TestCasting: expected_length = other_dt.itemsize // div string_dt = np.dtype(f"{string_char}{expected_length}") - safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None)) + safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors( + (other_dt, None)) assert res_dt.itemsize == expected_length * fact assert isinstance(res_dt, string_DT) + expected_view_off = None if other_dt.char == string_char: if other_dt.isnative: - expected_safety = Casting.no | Casting.cast_is_view + expected_safety = Casting.no + expected_view_off = 0 else: expected_safety = Casting.equiv elif string_char == "U": @@ -594,13 +613,19 @@ class TestCasting: else: expected_safety = Casting.unsafe + assert view_off == expected_view_off assert expected_safety == safety for change_length in [-1, 0, 1]: to_dt = self.string_with_modified_length(string_dt, change_length) - safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt)) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (other_dt, to_dt)) assert res_dt is to_dt + if change_length <= 0: + assert view_off == expected_view_off + else: + assert view_off is None if expected_safety == Casting.unsafe: assert safety == expected_safety elif change_length < 0: @@ -655,12 +680,16 @@ class TestCasting: object_dtype = type(np.dtype(object)) cast = get_castingimpl(object_dtype, type(dtype)) - safety, (_, res_dt) = cast._resolve_descriptors((np.dtype("O"), dtype)) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (np.dtype("O"), dtype)) assert safety == Casting.unsafe + assert view_off is None assert res_dt is dtype - safety, (_, res_dt) = cast._resolve_descriptors((np.dtype("O"), None)) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (np.dtype("O"), None)) assert safety == Casting.unsafe + assert view_off is None assert res_dt == dtype.newbyteorder("=") @pytest.mark.parametrize("dtype", simple_dtype_instances()) @@ -669,8 +698,10 @@ class TestCasting: object_dtype = type(np.dtype(object)) cast = get_castingimpl(type(dtype), object_dtype) - safety, (_, res_dt) = cast._resolve_descriptors((dtype, None)) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (dtype, None)) assert safety == Casting.safe + assert view_off is None assert res_dt is np.dtype("O") @pytest.mark.parametrize("casting", ["no", "unsafe"]) @@ -681,6 +712,71 @@ class TestCasting: assert np.can_cast("V4", dtype, casting=casting) == expected assert np.can_cast(dtype, "V4", casting=casting) == expected + @pytest.mark.parametrize(["to_dt", "expected_off"], + [ # Same as `from_dt` but with both fields shifted: + (np.dtype({"names": ["a", "b"], "formats": ["i4", "f4"], + "offsets": [2, 6]}), -2), + # Additional change of the names + # TODO: Tests will need changing for order vs. name based casting: + (np.dtype({"names": ["b", "a"], "formats": ["f4", "i4"], + "offsets": [6, 2]}), -2), + # Incompatible field offset change (offsets -2 and 0) + (np.dtype({"names": ["b", "a"], "formats": ["f4", "i4"], + "offsets": [6, 0]}), None)]) + def test_structured_field_offsets(self, to_dt, expected_off): + # This checks the cast-safety and view offset for swapped and "shifted" + # fields which are viewable + from_dt = np.dtype({"names": ["a", "b"], + "formats": ["i4", "f4"], + "offsets": [0, 4]}) + cast = get_castingimpl(type(from_dt), type(to_dt)) + safety, _, view_off = cast._resolve_descriptors((from_dt, to_dt)) + assert safety == Casting.equiv + # Shifting the original data pointer by -2 will align both by + # effectively adding 2 bytes of spacing before `from_dt`. + assert view_off == expected_off + + @pytest.mark.parametrize(("from_dt", "to_dt", "expected_off"), [ + # Subarray cases: + ("i", "(1,1)i", 0), + ("(1,1)i", "i", 0), + ("(2,1)i", "(2,1)i", 0), + # field cases (field to field is tested explicitly also): + ("i", dict(names=["a"], formats=["i"], offsets=[2]), -2), + (dict(names=["a"], formats=["i"], offsets=[2]), "i", 2), + # Currently considered not viewable, due to multiple fields + # even though they overlap (maybe we should not allow that?) + ("i", dict(names=["a", "b"], formats=["i", "i"], offsets=[2, 2]), + None), + # different number of fields can't work, should probably just fail + # so it never reports "viewable": + ("i,i", "i,i,i", None), + # Unstructured void cases: + ("i4", "V3", 0), # void smaller or equal + ("i4", "V4", 0), # void smaller or equal + ("i4", "V10", None), # void is larger (no view) + ("O", "V4", None), # currently reject objects for view here. + ("O", "V8", None), # currently reject objects for view here. + ("V4", "V3", 0), + ("V4", "V4", 0), + ("V3", "V4", None), + # Note that currently void-to-other cast goes via byte-strings + # and is not a "view" based cast like the opposite direction: + ("V4", "i4", None), + # completely invalid/impossible cast: + ("i,i", "i,i,i", None), + ]) + def test_structured_view_offsets_paramteric( + self, from_dt, to_dt, expected_off): + # TODO: While this test is fairly thorough, right now, it does not + # really test some paths that may have nonzero offsets (they don't + # really exists). + from_dt = np.dtype(from_dt) + to_dt = np.dtype(to_dt) + cast = get_castingimpl(type(from_dt), type(to_dt)) + _, _, view_off = cast._resolve_descriptors((from_dt, to_dt)) + assert view_off == expected_off + @pytest.mark.parametrize("dtype", np.typecodes["All"]) def test_object_casts_NULL_None_equivalence(self, dtype): # None to <other> casts may succeed or fail, but a NULL'ed array must diff --git a/numpy/core/tests/test_cpu_features.py b/numpy/core/tests/test_cpu_features.py index 2ccbff41c..706cf7a7e 100644 --- a/numpy/core/tests/test_cpu_features.py +++ b/numpy/core/tests/test_cpu_features.py @@ -146,6 +146,17 @@ class Test_POWER_Features(AbstractTest): def load_flags(self): self.load_flags_auxv() + +is_zarch = re.match("^(s390x)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_zarch, + reason="Only for Linux and IBM Z") +class Test_ZARCH_Features(AbstractTest): + features = ["VX", "VXE", "VXE2"] + + def load_flags(self): + self.load_flags_auxv() + + is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE) @pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM") class Test_ARM_Features(AbstractTest): diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index b95d669a8..baae77a35 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -1437,7 +1437,7 @@ class TestDateTime: # NaTs with suppress_warnings() as sup: - sup.filter(RuntimeWarning, r".*encountered in true\_divide") + sup.filter(RuntimeWarning, r".*encountered in divide") nat = np.timedelta64('NaT') for tp in (int, float): assert_equal(np.timedelta64(1) / tp(0), nat) @@ -2029,19 +2029,25 @@ class TestDateTime: assert_equal(np.maximum.reduce(a), np.timedelta64(7, 's')) + def test_timedelta_correct_mean(self): + # test mainly because it worked only via a bug in that allowed: + # `timedelta.sum(dtype="f8")` to ignore the dtype request. + a = np.arange(1000, dtype="m8[s]") + assert_array_equal(a.mean(), a.sum() / len(a)) + def test_datetime_no_subtract_reducelike(self): # subtracting two datetime64 works, but we cannot reduce it, since # the result of that subtraction will have a different dtype. arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]") - msg = r"the resolved dtypes are not compatible with subtract\." + msg = r"the resolved dtypes are not compatible" - with pytest.raises(TypeError, match=msg + "reduce"): + with pytest.raises(TypeError, match=msg): np.subtract.reduce(arr) - with pytest.raises(TypeError, match=msg + "accumulate"): + with pytest.raises(TypeError, match=msg): np.subtract.accumulate(arr) - with pytest.raises(TypeError, match=msg + "reduceat"): + with pytest.raises(TypeError, match=msg): np.subtract.reduceat(arr, [0]) def test_datetime_busday_offset(self): diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index e0b66defc..d2a69d4cf 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -257,20 +257,6 @@ class TestDatetime64Timezone(_DeprecationTestCase): self.assert_deprecated(np.datetime64, args=(dt,)) -class TestNonCContiguousViewDeprecation(_DeprecationTestCase): - """View of non-C-contiguous arrays deprecated in 1.11.0. - - The deprecation will not be raised for arrays that are both C and F - contiguous, as C contiguous is dominant. There are more such arrays - with relaxed stride checking than without so the deprecation is not - as visible with relaxed stride checking in force. - """ - - def test_fortran_contiguous(self): - self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,)) - self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,)) - - class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase): """Assigning the 'data' attribute of an ndarray is unsafe as pointed out in gh-7093. Eventually, such assignment should NOT be allowed, but @@ -380,18 +366,6 @@ class TestPyArray_AS2D(_DeprecationTestCase): assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation) -class Test_UPDATEIFCOPY(_DeprecationTestCase): - """ - v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use - WRITEBACKIFCOPY instead - """ - def test_npy_updateifcopy_deprecation(self): - from numpy.core._multiarray_tests import npy_updateifcopy_deprecation - arr = np.arange(9).reshape(3, 3) - v = arr.T - self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,)) - - class TestDatetimeEvent(_DeprecationTestCase): # 2017-08-11, 1.14.0 def test_3_tuple(self): @@ -427,11 +401,6 @@ class TestBincount(_DeprecationTestCase): self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None)) -class TestAlen(_DeprecationTestCase): - # 2019-08-02, 1.18.0 - def test_alen(self): - self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3]))) - class TestGeneratorSum(_DeprecationTestCase): # 2018-02-25, 1.15.0 @@ -1270,3 +1239,14 @@ class TestMemEventHook(_DeprecationTestCase): with pytest.warns(DeprecationWarning, match='PyDataMem_SetEventHook is deprecated'): ma_tests.test_pydatamem_seteventhook_end() + + +class TestArrayFinalizeNone(_DeprecationTestCase): + message = "Setting __array_finalize__ = None" + + def test_use_none_is_deprecated(self): + # Deprecated way that ndarray itself showed nothing needs finalizing. + class NoFinalize(np.ndarray): + __array_finalize__ = None + + self.assert_deprecated(lambda: np.array(1).view(NoFinalize)) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 23182470b..708e82910 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -234,11 +234,6 @@ class TestFlags: assert_equal(self.a.flags.owndata, True) assert_equal(self.a.flags.writeable, True) assert_equal(self.a.flags.aligned, True) - with assert_warns(DeprecationWarning): - assert_equal(self.a.flags.updateifcopy, False) - with assert_warns(DeprecationWarning): - assert_equal(self.a.flags['U'], False) - assert_equal(self.a.flags['UPDATEIFCOPY'], False) assert_equal(self.a.flags.writebackifcopy, False) assert_equal(self.a.flags['X'], False) assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) @@ -5381,19 +5376,8 @@ class TestFlat: assert_(c.flags.writeable is False) assert_(d.flags.writeable is False) - # for 1.14 all are set to non-writeable on the way to replacing the - # UPDATEIFCOPY array returned for non-contiguous arrays. assert_(e.flags.writeable is True) assert_(f.flags.writeable is False) - with assert_warns(DeprecationWarning): - assert_(c.flags.updateifcopy is False) - with assert_warns(DeprecationWarning): - assert_(d.flags.updateifcopy is False) - with assert_warns(DeprecationWarning): - assert_(e.flags.updateifcopy is False) - with assert_warns(DeprecationWarning): - # UPDATEIFCOPY is removed. - assert_(f.flags.updateifcopy is False) assert_(c.flags.writebackifcopy is False) assert_(d.flags.writebackifcopy is False) assert_(e.flags.writebackifcopy is False) @@ -6888,26 +6872,6 @@ class TestInner: assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) -class TestAlen: - def test_basic(self): - with pytest.warns(DeprecationWarning): - m = np.array([1, 2, 3]) - assert_equal(np.alen(m), 3) - - m = np.array([[1, 2, 3], [4, 5, 7]]) - assert_equal(np.alen(m), 2) - - m = [1, 2, 3] - assert_equal(np.alen(m), 3) - - m = [[1, 2, 3], [4, 5, 7]] - assert_equal(np.alen(m), 2) - - def test_singleton(self): - with pytest.warns(DeprecationWarning): - assert_equal(np.alen(5), 1) - - class TestChoose: def setup(self): self.x = 2*np.ones((3,), dtype=int) @@ -7813,6 +7777,26 @@ class TestNewBufferProtocol: # Fix buffer info again before we delete (or we lose the memory) _multiarray_tests.corrupt_or_fix_bufferinfo(obj) + def test_no_suboffsets(self): + try: + import _testbuffer + except ImportError: + raise pytest.skip("_testbuffer is not available") + + for shape in [(2, 3), (2, 3, 4)]: + data = list(range(np.prod(shape))) + buffer = _testbuffer.ndarray(data, shape, format='i', + flags=_testbuffer.ND_PIL) + msg = "NumPy currently does not support.*suboffsets" + with pytest.raises(BufferError, match=msg): + np.asarray(buffer) + with pytest.raises(BufferError, match=msg): + np.asarray([buffer]) + + # Also check (unrelated and more limited but similar) frombuffer: + with pytest.raises(BufferError): + np.frombuffer(buffer) + class TestArrayCreationCopyArgument(object): @@ -7832,9 +7816,9 @@ class TestArrayCreationCopyArgument(object): pyscalar = arr.item(0) # Test never-copy raises error: - assert_raises(ValueError, np.array, scalar, + assert_raises(ValueError, np.array, scalar, copy=np._CopyMode.NEVER) - assert_raises(ValueError, np.array, pyscalar, + assert_raises(ValueError, np.array, pyscalar, copy=np._CopyMode.NEVER) assert_raises(ValueError, np.array, pyscalar, copy=self.RaiseOnBool()) @@ -8970,12 +8954,28 @@ class TestArrayFinalize: a = np.array(1).view(SavesBase) assert_(a.saved_base is a.base) - def test_bad_finalize(self): + def test_bad_finalize1(self): class BadAttributeArray(np.ndarray): @property def __array_finalize__(self): raise RuntimeError("boohoo!") + with pytest.raises(TypeError, match="not callable"): + np.arange(10).view(BadAttributeArray) + + def test_bad_finalize2(self): + class BadAttributeArray(np.ndarray): + def __array_finalize__(self): + raise RuntimeError("boohoo!") + + with pytest.raises(TypeError, match="takes 1 positional"): + np.arange(10).view(BadAttributeArray) + + def test_bad_finalize3(self): + class BadAttributeArray(np.ndarray): + def __array_finalize__(self, obj): + raise RuntimeError("boohoo!") + with pytest.raises(RuntimeError, match="boohoo!"): np.arange(10).view(BadAttributeArray) @@ -9013,6 +9013,14 @@ class TestArrayFinalize: break_cycles() assert_(obj_ref() is None, "no references should remain") + def test_can_use_super(self): + class SuperFinalize(np.ndarray): + def __array_finalize__(self, obj): + self.saved_result = super().__array_finalize__(obj) + + a = np.array(1).view(SuperFinalize) + assert_(a.saved_result is None) + def test_orderconverter_with_nonASCII_unicode_ordering(): # gh-7475 @@ -9207,3 +9215,66 @@ def test_getfield(): pytest.raises(ValueError, a.getfield, 'uint8', -1) pytest.raises(ValueError, a.getfield, 'uint8', 16) pytest.raises(ValueError, a.getfield, 'uint64', 0) + + +class TestViewDtype: + """ + Verify that making a view of a non-contiguous array works as expected. + """ + def test_smaller_dtype_multiple(self): + # x is non-contiguous + x = np.arange(10, dtype='<i4')[::2] + with pytest.raises(ValueError, + match='the last axis must be contiguous'): + x.view('<i2') + expected = [[0, 0], [2, 0], [4, 0], [6, 0], [8, 0]] + assert_array_equal(x[:, np.newaxis].view('<i2'), expected) + + def test_smaller_dtype_not_multiple(self): + # x is non-contiguous + x = np.arange(5, dtype='<i4')[::2] + + with pytest.raises(ValueError, + match='the last axis must be contiguous'): + x.view('S3') + with pytest.raises(ValueError, + match='When changing to a smaller dtype'): + x[:, np.newaxis].view('S3') + + # Make sure the problem is because of the dtype size + expected = [[b''], [b'\x02'], [b'\x04']] + assert_array_equal(x[:, np.newaxis].view('S4'), expected) + + def test_larger_dtype_multiple(self): + # x is non-contiguous in the first dimension, contiguous in the last + x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :] + expected = np.array([[65536], [327684], [589832], + [851980], [1114128]], dtype='<i4') + assert_array_equal(x.view('<i4'), expected) + + def test_larger_dtype_not_multiple(self): + # x is non-contiguous in the first dimension, contiguous in the last + x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :] + with pytest.raises(ValueError, + match='When changing to a larger dtype'): + x.view('S3') + # Make sure the problem is because of the dtype size + expected = [[b'\x00\x00\x01'], [b'\x04\x00\x05'], [b'\x08\x00\t'], + [b'\x0c\x00\r'], [b'\x10\x00\x11']] + assert_array_equal(x.view('S4'), expected) + + def test_f_contiguous(self): + # x is F-contiguous + x = np.arange(4 * 3, dtype='<i4').reshape(4, 3).T + with pytest.raises(ValueError, + match='the last axis must be contiguous'): + x.view('<i2') + + def test_non_c_contiguous(self): + # x is contiguous in axis=-1, but not C-contiguous in other axes + x = np.arange(2 * 3 * 4, dtype='i1').\ + reshape(2, 3, 4).transpose(1, 0, 2) + expected = [[[256, 770], [3340, 3854]], + [[1284, 1798], [4368, 4882]], + [[2312, 2826], [5396, 5910]]] + assert_array_equal(x.view('<i2'), expected) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index ed775cac6..d96c14e54 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -828,7 +828,7 @@ def test_iter_nbo_align_contig(): casting='equiv', op_dtypes=[np.dtype('f4')]) with i: - # context manager triggers UPDATEIFCOPY on i at exit + # context manager triggers WRITEBACKIFCOPY on i at exit assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) assert_equal(i.operands[0], a) diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 90078a2ea..8a77eca00 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -4,6 +4,7 @@ import warnings import itertools import operator import platform +from distutils.version import LooseVersion as _LooseVersion import pytest from hypothesis import given, settings, Verbosity from hypothesis.strategies import sampled_from @@ -680,17 +681,29 @@ class TestAbs: @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) def test_builtin_abs(self, dtype): - if sys.platform == "cygwin" and dtype == np.clongdouble: + if ( + sys.platform == "cygwin" and dtype == np.clongdouble and + ( + _LooseVersion(platform.release().split("-")[0]) + < _LooseVersion("3.3.0") + ) + ): pytest.xfail( - reason="absl is computed in double precision on cygwin" + reason="absl is computed in double precision on cygwin < 3.3" ) self._test_abs_func(abs, dtype) @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) def test_numpy_abs(self, dtype): - if sys.platform == "cygwin" and dtype == np.clongdouble: + if ( + sys.platform == "cygwin" and dtype == np.clongdouble and + ( + _LooseVersion(platform.release().split("-")[0]) + < _LooseVersion("3.3.0") + ) + ): pytest.xfail( - reason="absl is computed in double precision on cygwin" + reason="absl is computed in double precision on cygwin < 3.3" ) self._test_abs_func(np.abs, dtype) diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py index ee21d4aa5..4deb5a0a4 100644 --- a/numpy/core/tests/test_scalarprint.py +++ b/numpy/core/tests/test_scalarprint.py @@ -306,6 +306,7 @@ class TestRealScalars: assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), "1.2" if tp != np.float16 else "1.2002") assert_equal(fpos(tp('1.'), trim='-'), "1") + assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1") @pytest.mark.skipif(not platform.machine().startswith("ppc64"), reason="only applies to ppc float128 values") diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index ef0bac957..9a9d46da0 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -1762,12 +1762,15 @@ class TestUfunc: result = _rational_tests.test_add(a, b) assert_equal(result, target) - # But since we use the old type resolver, this may not work - # for dtype variations unless the output dtype is given: + # This works even more generally, so long the default common-dtype + # promoter works out: result = _rational_tests.test_add(a, b.astype(np.uint16), out=c) assert_equal(result, target) + + # But, it can be fooled, e.g. (use scalars, which forces legacy + # type resolution to kick in, which then fails): with assert_raises(TypeError): - _rational_tests.test_add(a, b.astype(np.uint16)) + _rational_tests.test_add(a, np.uint16(2)) def test_operand_flags(self): a = np.arange(16, dtype='l').reshape(4, 4) @@ -2123,6 +2126,17 @@ class TestUfunc: c = np.array([1., 2.]) assert_array_equal(ufunc(a, c), ufunc([True, True], True)) assert ufunc.reduce(a) == True + # check that the output has no effect: + out = np.zeros(2, dtype=np.int32) + expected = ufunc([True, True], True).astype(out.dtype) + assert_array_equal(ufunc(a, c, out=out), expected) + out = np.zeros((), dtype=np.int32) + assert ufunc.reduce(a, out=out) == True + # Last check, test reduction when out and a match (the complexity here + # is that the "i,i->?" may seem right, but should not match. + a = np.array([3], dtype="i") + out = np.zeros((), dtype=a.dtype) + assert ufunc.reduce(a, out=out) == 1 @pytest.mark.parametrize("ufunc", [np.logical_and, np.logical_or, np.logical_xor]) @@ -2134,6 +2148,60 @@ class TestUfunc: # It would be safe, but not equiv casting: ufunc(a, c, out=out, casting="equiv") + def test_reducelike_byteorder_resolution(self): + # See gh-20699, byte-order changes need some extra care in the type + # resolution to make the following succeed: + arr_be = np.arange(10, dtype=">i8") + arr_le = np.arange(10, dtype="<i8") + + assert np.add.reduce(arr_be) == np.add.reduce(arr_le) + assert_array_equal(np.add.accumulate(arr_be), np.add.accumulate(arr_le)) + assert_array_equal( + np.add.reduceat(arr_be, [1]), np.add.reduceat(arr_le, [1])) + + def test_reducelike_out_promotes(self): + # Check that the out argument to reductions is considered for + # promotion. See also gh-20455. + # Note that these paths could prefer `initial=` in the future and + # do not up-cast to the default integer for add and prod + arr = np.ones(1000, dtype=np.uint8) + out = np.zeros((), dtype=np.uint16) + assert np.add.reduce(arr, out=out) == 1000 + arr[:10] = 2 + assert np.multiply.reduce(arr, out=out) == 2**10 + + # For legacy dtypes, the signature currently has to be forced if `out=` + # is passed. The two paths below should differ, without `dtype=` the + # expected result should be: `np.prod(arr.astype("f8")).astype("f4")`! + arr = np.full(5, 2**25-1, dtype=np.int64) + + # float32 and int64 promote to float64: + res = np.zeros((), dtype=np.float32) + # If `dtype=` is passed, the calculation is forced to float32: + single_res = np.zeros((), dtype=np.float32) + np.multiply.reduce(arr, out=single_res, dtype=np.float32) + assert single_res != res + + def test_reducelike_output_needs_identical_cast(self): + # Checks the case where the we have a simple byte-swap works, maily + # tests that this is not rejected directly. + # (interesting because we require descriptor identity in reducelikes). + arr = np.ones(20, dtype="f8") + out = np.empty((), dtype=arr.dtype.newbyteorder()) + expected = np.add.reduce(arr) + np.add.reduce(arr, out=out) + assert_array_equal(expected, out) + # Check reduceat: + out = np.empty(2, dtype=arr.dtype.newbyteorder()) + expected = np.add.reduceat(arr, [0, 1]) + np.add.reduceat(arr, [0, 1], out=out) + assert_array_equal(expected, out) + # And accumulate: + out = np.empty(arr.shape, dtype=arr.dtype.newbyteorder()) + expected = np.add.accumulate(arr) + np.add.accumulate(arr, out=out) + assert_array_equal(expected, out) + def test_reduce_noncontig_output(self): # Check that reduction deals with non-contiguous output arrays # appropriately. diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index fc7c592f0..e7fee46b7 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -17,18 +17,8 @@ from numpy.testing import ( assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings, _gen_alignment_data, assert_array_almost_equal_nulp ) +from numpy.testing._private.utils import _glibc_older_than -def get_glibc_version(): - try: - ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1] - except Exception as inst: - ver = '0.0' - - return ver - - -glibcver = get_glibc_version() -glibc_older_than = lambda x: (glibcver != '0.0' and glibcver < x) def on_powerpc(): """ True if we are running on a Power PC platform.""" @@ -1014,7 +1004,7 @@ class TestSpecialFloats: # See: https://github.com/numpy/numpy/issues/19192 @pytest.mark.xfail( - glibc_older_than("2.17"), + _glibc_older_than("2.17"), reason="Older glibc versions may not raise appropriate FP exceptions" ) def test_exp_exceptions(self): @@ -1262,6 +1252,11 @@ class TestSpecialFloats: assert_raises(FloatingPointError, np.arctanh, np.array(value, dtype=dt)) + # See: https://github.com/numpy/numpy/issues/20448 + @pytest.mark.xfail( + _glibc_older_than("2.17"), + reason="Older glibc versions may not raise appropriate FP exceptions" + ) def test_exp2(self): with np.errstate(all='ignore'): in_ = [np.nan, -np.nan, np.inf, -np.inf] @@ -1397,7 +1392,7 @@ class TestAVXFloat32Transcendental: M = np.int_(N/20) index = np.random.randint(low=0, high=N, size=M) x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N)) - if not glibc_older_than("2.17"): + if not _glibc_older_than("2.17"): # test coverage for elements > 117435.992f for which glibc is used # this is known to be problematic on old glibc, so skip it there x_f32[index] = np.float32(10E+10*np.random.rand(M)) @@ -3433,7 +3428,7 @@ class TestComplexFunctions: x_series = np.logspace(-20, -3.001, 200) x_basic = np.logspace(-2.999, 0, 10, endpoint=False) - if glibc_older_than("2.19") and dtype is np.longcomplex: + if dtype is np.longcomplex: if (platform.machine() == 'aarch64' and bad_arcsinh()): pytest.skip("Trig functions of np.longcomplex values known " "to be inaccurate on aarch64 for some compilation " diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py index 32e2dca66..3d4d5b5aa 100644 --- a/numpy/core/tests/test_umath_accuracy.py +++ b/numpy/core/tests/test_umath_accuracy.py @@ -5,11 +5,14 @@ import sys import pytest from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER from numpy.testing import assert_array_max_ulp +from numpy.testing._private.utils import _glibc_older_than from numpy.core._multiarray_umath import __cpu_features__ IS_AVX = __cpu_features__.get('AVX512F', False) or \ (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False)) -runtest = sys.platform.startswith('linux') and IS_AVX +# only run on linux with AVX, also avoid old glibc (numpy/numpy#20448). +runtest = (sys.platform.startswith('linux') + and IS_AVX and not _glibc_older_than("2.17")) platform_skip = pytest.mark.skipif(not runtest, reason="avoid testing inconsistent platform " "library implementations") diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index 8d105a248..c4bafca1b 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -170,7 +170,7 @@ def _num_fromflags(flaglist): return num _flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', - 'OWNDATA', 'UPDATEIFCOPY', 'WRITEBACKIFCOPY'] + 'OWNDATA', 'WRITEBACKIFCOPY'] def _flags_fromnum(num): res = [] for key in _flagnames: @@ -261,7 +261,6 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None): - WRITEABLE / W - ALIGNED / A - WRITEBACKIFCOPY / X - - UPDATEIFCOPY / U Returns ------- diff --git a/numpy/ctypeslib.pyi b/numpy/ctypeslib.pyi index 1c396d240..106356873 100644 --- a/numpy/ctypeslib.pyi +++ b/numpy/ctypeslib.pyi @@ -5,21 +5,15 @@ from ctypes import c_int64 as _c_intp import os import sys import ctypes +from collections.abc import Iterable, Sequence from typing import ( Literal as L, Any, - List, Union, TypeVar, - Type, Generic, - Optional, overload, - Iterable, ClassVar, - Tuple, - Sequence, - Dict, ) from numpy import ( @@ -77,12 +71,12 @@ from numpy.typing import ( # TODO: Add a proper `_Shape` bound once we've got variadic typevars _DType = TypeVar("_DType", bound=dtype[Any]) -_DTypeOptional = TypeVar("_DTypeOptional", bound=Optional[dtype[Any]]) +_DTypeOptional = TypeVar("_DTypeOptional", bound=None | dtype[Any]) _SCT = TypeVar("_SCT", bound=generic) _DTypeLike = Union[ dtype[_SCT], - Type[_SCT], + type[_SCT], _SupportsDType[dtype[_SCT]], ] _ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] @@ -93,7 +87,6 @@ _FlagsKind = L[ 'ALIGNED', 'A', 'WRITEABLE', 'W', 'OWNDATA', 'O', - 'UPDATEIFCOPY', 'U', 'WRITEBACKIFCOPY', 'X', ] @@ -104,18 +97,18 @@ class _ndptr(ctypes.c_void_p, Generic[_DTypeOptional]): _dtype_: ClassVar[_DTypeOptional] _shape_: ClassVar[None] _ndim_: ClassVar[None | int] - _flags_: ClassVar[None | List[_FlagsKind]] + _flags_: ClassVar[None | list[_FlagsKind]] @overload @classmethod - def from_param(cls: Type[_ndptr[None]], obj: ndarray[Any, Any]) -> _ctypes: ... + def from_param(cls: type[_ndptr[None]], obj: ndarray[Any, Any]) -> _ctypes: ... @overload @classmethod - def from_param(cls: Type[_ndptr[_DType]], obj: ndarray[Any, _DType]) -> _ctypes: ... + def from_param(cls: type[_ndptr[_DType]], obj: ndarray[Any, _DType]) -> _ctypes: ... class _concrete_ndptr(_ndptr[_DType]): _dtype_: ClassVar[_DType] - _shape_: ClassVar[Tuple[int, ...]] + _shape_: ClassVar[tuple[int, ...]] @property def contents(self) -> ndarray[Any, _DType]: ... @@ -124,7 +117,7 @@ def load_library( loader_path: str | bytes | os.PathLike[str] | os.PathLike[bytes], ) -> ctypes.CDLL: ... -__all__: List[str] +__all__: list[str] c_intp = _c_intp @@ -134,7 +127,7 @@ def ndpointer( ndim: int = ..., shape: None | _ShapeLike = ..., flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> Type[_ndptr[None]]: ... +) -> type[_ndptr[None]]: ... @overload def ndpointer( dtype: _DTypeLike[_SCT], @@ -142,7 +135,7 @@ def ndpointer( *, shape: _ShapeLike, flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> Type[_concrete_ndptr[dtype[_SCT]]]: ... +) -> type[_concrete_ndptr[dtype[_SCT]]]: ... @overload def ndpointer( dtype: DTypeLike, @@ -150,54 +143,54 @@ def ndpointer( *, shape: _ShapeLike, flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> Type[_concrete_ndptr[dtype[Any]]]: ... +) -> type[_concrete_ndptr[dtype[Any]]]: ... @overload def ndpointer( dtype: _DTypeLike[_SCT], ndim: int = ..., shape: None = ..., flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> Type[_ndptr[dtype[_SCT]]]: ... +) -> type[_ndptr[dtype[_SCT]]]: ... @overload def ndpointer( dtype: DTypeLike, ndim: int = ..., shape: None = ..., flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> Type[_ndptr[dtype[Any]]]: ... +) -> type[_ndptr[dtype[Any]]]: ... @overload -def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[bool_] | Type[ctypes.c_bool]) -> Type[ctypes.c_bool]: ... +def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[bool_] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ... @overload -def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | Type[ctypes.c_byte]) -> Type[ctypes.c_byte]: ... +def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ... @overload -def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | Type[ctypes.c_short]) -> Type[ctypes.c_short]: ... +def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ... @overload -def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | Type[ctypes.c_int]) -> Type[ctypes.c_int]: ... +def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ... @overload -def as_ctypes_type(dtype: _IntCodes | _DTypeLike[int_] | Type[int | ctypes.c_long]) -> Type[ctypes.c_long]: ... +def as_ctypes_type(dtype: _IntCodes | _DTypeLike[int_] | type[int | ctypes.c_long]) -> type[ctypes.c_long]: ... @overload -def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | Type[ctypes.c_longlong]) -> Type[ctypes.c_longlong]: ... +def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ... @overload -def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | Type[ctypes.c_ubyte]) -> Type[ctypes.c_ubyte]: ... +def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ... @overload -def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | Type[ctypes.c_ushort]) -> Type[ctypes.c_ushort]: ... +def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ... @overload -def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | Type[ctypes.c_uint]) -> Type[ctypes.c_uint]: ... +def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ... @overload -def as_ctypes_type(dtype: _UIntCodes | _DTypeLike[uint] | Type[ctypes.c_ulong]) -> Type[ctypes.c_ulong]: ... +def as_ctypes_type(dtype: _UIntCodes | _DTypeLike[uint] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ... @overload -def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | Type[ctypes.c_ulonglong]) -> Type[ctypes.c_ulonglong]: ... +def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ... @overload -def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | Type[ctypes.c_float]) -> Type[ctypes.c_float]: ... +def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ... @overload -def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | Type[float | ctypes.c_double]) -> Type[ctypes.c_double]: ... +def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ... @overload -def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | Type[ctypes.c_longdouble]) -> Type[ctypes.c_longdouble]: ... +def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ... @overload -def as_ctypes_type(dtype: _VoidDTypeLike) -> Type[Any]: ... # `ctypes.Union` or `ctypes.Structure` +def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ctypes.Union` or `ctypes.Structure` @overload -def as_ctypes_type(dtype: str) -> Type[Any]: ... +def as_ctypes_type(dtype: str) -> type[Any]: ... @overload def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... diff --git a/numpy/distutils/armccompiler.py b/numpy/distutils/armccompiler.py new file mode 100644 index 000000000..968504c7b --- /dev/null +++ b/numpy/distutils/armccompiler.py @@ -0,0 +1,28 @@ +from __future__ import division, absolute_import, print_function + +from distutils.unixccompiler import UnixCCompiler + +class ArmCCompiler(UnixCCompiler): + + """ + Arm compiler. + """ + + compiler_type = 'arm' + cc_exe = 'armclang' + cxx_exe = 'armclang++' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables(compiler=cc_compiler + + ' -O3 -fPIC', + compiler_so=cc_compiler + + ' -O3 -fPIC', + compiler_cxx=cxx_compiler + + ' -O3 -fPIC', + linker_exe=cc_compiler + + ' -lamath', + linker_so=cc_compiler + + ' -lamath -shared') diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index 713b8c72f..16f00d8ed 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -109,7 +109,7 @@ replace_method(CCompiler, 'find_executables', CCompiler_find_executables) # Using customized CCompiler.spawn. -def CCompiler_spawn(self, cmd, display=None): +def CCompiler_spawn(self, cmd, display=None, env=None): """ Execute a command in a sub-process. @@ -120,6 +120,7 @@ def CCompiler_spawn(self, cmd, display=None): display : str or sequence of str, optional The text to add to the log file kept by `numpy.distutils`. If not given, `display` is equal to `cmd`. + env: a dictionary for environment variables, optional Returns ------- @@ -131,6 +132,7 @@ def CCompiler_spawn(self, cmd, display=None): If the command failed, i.e. the exit status was not 0. """ + env = env if env is not None else dict(os.environ) if display is None: display = cmd if is_sequence(display): @@ -138,9 +140,9 @@ def CCompiler_spawn(self, cmd, display=None): log.info(display) try: if self.verbose: - subprocess.check_output(cmd) + subprocess.check_output(cmd, env=env) else: - subprocess.check_output(cmd, stderr=subprocess.STDOUT) + subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) except subprocess.CalledProcessError as exc: o = exc.output s = exc.returncode @@ -706,6 +708,9 @@ compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', "Intel C Compiler for 64-bit applications on Windows") compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', "PathScale Compiler for SiCortex-based applications") +compiler_class['arm'] = ('armccompiler', 'ArmCCompiler', + "Arm C Compiler") + ccompiler._default_compilers += (('linux.*', 'intel'), ('linux.*', 'intele'), ('linux.*', 'intelem'), diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index 39847c20f..f1d024b94 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -228,6 +228,7 @@ class _Config: x64 = "SSE SSE2 SSE3", ppc64 = '', # play it safe ppc64le = "VSX VSX2", + s390x = '', armhf = '', # play it safe aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD" ) @@ -293,6 +294,13 @@ class _Config: VSX2 = dict(interest=2, implies="VSX", implies_detect=False), ## Power9/ISA 3.00 VSX3 = dict(interest=3, implies="VSX2", implies_detect=False), + # IBM/Z + ## VX(z13) support + VX = dict(interest=1, headers="vecintrin.h"), + ## Vector-Enhancements Facility + VXE = dict(interest=2, implies="VX", implies_detect=False), + ## Vector-Enhancements Facility 2 + VXE2 = dict(interest=3, implies="VXE", implies_detect=False), # ARM NEON = dict(interest=1, headers="arm_neon.h"), NEON_FP16 = dict(interest=2, implies="NEON"), @@ -472,6 +480,23 @@ class _Config: return partial + on_zarch = self.cc_on_s390x + if on_zarch: + partial = dict( + VX = dict( + flags="-march=arch11 -mzvector" + ), + VXE = dict( + flags="-march=arch12", implies_detect=False + ), + VXE2 = dict( + flags="-march=arch13", implies_detect=False + ) + ) + + return partial + + if self.cc_on_aarch64 and is_unix: return dict( NEON = dict( implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True @@ -654,9 +679,9 @@ class _Distutils: @staticmethod def dist_load_module(name, path): """Load a module from file, required by the abstract class '_Cache'.""" - from numpy.compat import npy_load_module + from .misc_util import exec_mod_from_location try: - return npy_load_module(name, path) + return exec_mod_from_location(name, path) except Exception as e: _Distutils.dist_log(e, stderr=True) return None @@ -874,7 +899,11 @@ class _CCompiler: cc_on_x64 : bool True when the target architecture is 64-bit x86 cc_on_ppc64 : bool - True when the target architecture is 64-bit big-endian PowerPC + True when the target architecture is 64-bit big-endian powerpc + cc_on_ppc64le : bool + True when the target architecture is 64-bit litle-endian powerpc + cc_on_s390x : bool + True when the target architecture is IBM/ZARCH on linux cc_on_armhf : bool True when the target architecture is 32-bit ARMv7+ cc_on_aarch64 : bool @@ -919,6 +948,7 @@ class _CCompiler: ("cc_on_ppc64", ".*(powerpc|ppc)64.*"), ("cc_on_aarch64", ".*(aarch64|arm64).*"), ("cc_on_armhf", ".*arm.*"), + ("cc_on_s390x", ".*s390x.*"), # undefined platform ("cc_on_noarch", ""), ) @@ -983,7 +1013,8 @@ class _CCompiler: self.cc_is_gcc = True self.cc_march = "unknown" - for arch in ("x86", "x64", "ppc64", "ppc64le", "armhf", "aarch64"): + for arch in ("x86", "x64", "ppc64", "ppc64le", + "armhf", "aarch64", "s390x"): if getattr(self, "cc_on_" + arch): self.cc_march = arch break @@ -1063,7 +1094,9 @@ class _CCompiler: _cc_normalize_unix_frgx = re.compile( # 2- to remove any flags starts with # -march, -mcpu, -x(INTEL) and '-m' without '=' - r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]))(?!-m[a-z0-9\-\.]*.$)" + r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|" + # exclude: + r"(?:-mzvector)" ) _cc_normalize_unix_krgx = re.compile( # 3- keep only the highest of diff --git a/numpy/distutils/checks/cpu_vx.c b/numpy/distutils/checks/cpu_vx.c new file mode 100644 index 000000000..18fb7ef94 --- /dev/null +++ b/numpy/distutils/checks/cpu_vx.c @@ -0,0 +1,16 @@ +#if (__VEC__ < 10301) || (__ARCH__ < 11) + #error VX not supported +#endif + +#include <vecintrin.h> +int main(int argc, char **argv) +{ + __vector double x = vec_abs(vec_xl(argc, (double*)argv)); + __vector double y = vec_load_len((double*)argv, (unsigned int)argc); + + x = vec_round(vec_ceil(x) + vec_floor(y)); + __vector bool long long m = vec_cmpge(x, y); + __vector long long i = vec_signed(vec_sel(x, y, m)); + + return (int)vec_extract(i, 0); +} diff --git a/numpy/distutils/checks/cpu_vxe.c b/numpy/distutils/checks/cpu_vxe.c new file mode 100644 index 000000000..ca41f8434 --- /dev/null +++ b/numpy/distutils/checks/cpu_vxe.c @@ -0,0 +1,25 @@ +#if (__VEC__ < 10302) || (__ARCH__ < 12) + #error VXE not supported +#endif + +#include <vecintrin.h> +int main(int argc, char **argv) +{ + __vector float x = vec_nabs(vec_xl(argc, (float*)argv)); + __vector float y = vec_load_len((float*)argv, (unsigned int)argc); + + x = vec_round(vec_ceil(x) + vec_floor(y)); + __vector bool int m = vec_cmpge(x, y); + x = vec_sel(x, y, m); + + // need to test the existance of intrin "vflls" since vec_doublee + // is vec_doublee maps to wrong intrin "vfll". + // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100871 +#if defined(__GNUC__) && !defined(__clang__) + __vector long long i = vec_signed(__builtin_s390_vflls(x)); +#else + __vector long long i = vec_signed(vec_doublee(x)); +#endif + + return (int)vec_extract(i, 0); +} diff --git a/numpy/distutils/checks/cpu_vxe2.c b/numpy/distutils/checks/cpu_vxe2.c new file mode 100644 index 000000000..f36d57129 --- /dev/null +++ b/numpy/distutils/checks/cpu_vxe2.c @@ -0,0 +1,21 @@ +#if (__VEC__ < 10303) || (__ARCH__ < 13) + #error VXE2 not supported +#endif + +#include <vecintrin.h> + +int main(int argc, char **argv) +{ + int val; + __vector signed short large = { 'a', 'b', 'c', 'a', 'g', 'h', 'g', 'o' }; + __vector signed short search = { 'g', 'h', 'g', 'o' }; + __vector unsigned char len = { 0 }; + __vector unsigned char res = vec_search_string_cc(large, search, len, &val); + __vector float x = vec_xl(argc, (float*)argv); + __vector int i = vec_signed(x); + + i = vec_srdb(vec_sldb(i, i, 2), i, 3); + val += (int)vec_extract(res, 1); + val += vec_extract(i, 0); + return val; +} diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py index a4fda537d..dc1ab3b9b 100644 --- a/numpy/distutils/command/build.py +++ b/numpy/distutils/command/build.py @@ -47,7 +47,8 @@ class build(old_build): - not part of dispatch-able features(--cpu-dispatch) - not supported by compiler or platform """ - self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F AVX512_SKX VSX VSX2 VSX3 NEON ASIMD" + self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F" \ + " AVX512_SKX VSX VSX2 VSX3 NEON ASIMD VX VXE VXE2" def finalize_options(self): build_scripts = self.build_scripts diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py index c333517c0..d8dcfa899 100644 --- a/numpy/distutils/fcompiler/__init__.py +++ b/numpy/distutils/fcompiler/__init__.py @@ -743,8 +743,9 @@ _default_compilers = ( ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', 'intelvem', 'intelem', 'flang')), ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), - ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', 'vast', 'compaq', - 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor', 'fujitsu')), + ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', + 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', + 'pathf95', 'nagfor', 'fujitsu')), ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), diff --git a/numpy/distutils/fcompiler/arm.py b/numpy/distutils/fcompiler/arm.py new file mode 100644 index 000000000..bc491d947 --- /dev/null +++ b/numpy/distutils/fcompiler/arm.py @@ -0,0 +1,73 @@ +from __future__ import division, absolute_import, print_function + +import sys + +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file +from sys import platform +from os.path import join, dirname, normpath + +compilers = ['ArmFlangCompiler'] + +import functools + +class ArmFlangCompiler(FCompiler): + compiler_type = 'arm' + description = 'Arm Compiler' + version_pattern = r'\s*Arm.*version (?P<version>[\d.-]+).*' + + ar_exe = 'lib.exe' + possible_executables = ['armflang'] + + executables = { + 'version_cmd': ["", "--version"], + 'compiler_f77': ["armflang", "-fPIC"], + 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"], + 'compiler_f90': ["armflang", "-fPIC"], + 'linker_so': ["armflang", "-fPIC", "-shared"], + 'archiver': ["ar", "-cr"], + 'ranlib': None + } + + pic_flags = ["-fPIC", "-DPIC"] + c_compiler = 'arm' + module_dir_switch = '-module ' # Don't remove ending space! + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + opt.extend(['flang', 'flangrti', 'ompstub']) + return opt + + @functools.lru_cache(maxsize=128) + def get_library_dirs(self): + """List of compiler library directories.""" + opt = FCompiler.get_library_dirs(self) + flang_dir = dirname(self.executables['compiler_f77'][0]) + opt.append(normpath(join(flang_dir, '..', 'lib'))) + + return opt + + def get_flags(self): + return [] + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + return ['-O3'] + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + return '-Wl,-rpath=%s' % dir + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='armflang').get_version()) + diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index 39178071d..d8143328e 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -324,7 +324,7 @@ class Gnu95FCompiler(GnuFCompiler): c_archs[c_archs.index("i386")] = "i686" # check the arches the Fortran compiler supports, and compare with # arch flags from C compiler - for arch in ["ppc", "i686", "x86_64", "ppc64"]: + for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]: if _can_target(cmd, arch) and arch in c_archs: arch_flags.extend(["-arch", arch]) return arch_flags diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index b68b3af47..513be75db 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -31,8 +31,6 @@ def clean_up_temporary_directory(): atexit.register(clean_up_temporary_directory) -from numpy.compat import npy_load_module - __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', 'dict_append', 'appendpath', 'generate_config_py', 'get_cmd', 'allpath', 'get_mathlibs', @@ -44,7 +42,8 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', 'dot_join', 'get_frame', 'minrelpath', 'njoin', 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', 'get_build_architecture', 'get_info', 'get_pkg_info', - 'get_num_build_jobs', 'sanitize_cxx_flags'] + 'get_num_build_jobs', 'sanitize_cxx_flags', + 'exec_mod_from_location'] class InstallableLib: """ @@ -945,9 +944,8 @@ class Configuration: try: setup_name = os.path.splitext(os.path.basename(setup_py))[0] n = dot_join(self.name, subpackage_name, setup_name) - setup_module = npy_load_module('_'.join(n.split('.')), - setup_py, - ('.py', 'U', 1)) + setup_module = exec_mod_from_location( + '_'.join(n.split('.')), setup_py) if not hasattr(setup_module, 'configuration'): if not self.options['assume_default_configuration']: self.warn('Assuming default configuration '\ @@ -1993,8 +1991,8 @@ class Configuration: name = os.path.splitext(os.path.basename(fn))[0] n = dot_join(self.name, name) try: - version_module = npy_load_module('_'.join(n.split('.')), - fn, info) + version_module = exec_mod_from_location( + '_'.join(n.split('.')), fn) except ImportError as e: self.warn(str(e)) version_module = None @@ -2491,3 +2489,14 @@ def sanitize_cxx_flags(cxxflags): return [flag for flag in cxxflags if flag not in _cxx_ignore_flags] +def exec_mod_from_location(modname, modfile): + ''' + Use importlib machinery to import a module `modname` from the file + `modfile`. Depending on the `spec.loader`, the module may not be + registered in sys.modules. + ''' + spec = importlib.util.spec_from_file_location(modname, modfile) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + return foo + diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 7f41bb07e..d5a1687da 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -501,7 +501,11 @@ def get_info(name, notfound_action=0): 1 - display warning message 2 - raise error """ - cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead + cl = {'armpl': armpl_info, + 'blas_armpl': blas_armpl_info, + 'lapack_armpl': lapack_armpl_info, + 'fftw3_armpl': fftw3_armpl_info, + 'atlas': atlas_info, # use lapack_opt or blas_opt instead 'atlas_threads': atlas_threads_info, # ditto 'atlas_blas': atlas_blas_info, 'atlas_blas_threads': atlas_blas_threads_info, @@ -1152,6 +1156,16 @@ class fftw3_info(fftw_info): 'macros':[('SCIPY_FFTW3_H', None)]}, ] + +class fftw3_armpl_info(fftw_info): + section = 'fftw3' + dir_env_var = 'ARMPL_DIR' + notfounderror = FFTWNotFoundError + ver_info = [{'name': 'fftw3', + 'libs': ['armpl_lp64_mp'], + 'includes': ['fftw3.h'], + 'macros': [('SCIPY_FFTW3_H', None)]}] + class dfftw_info(fftw_info): section = 'fftw' @@ -1311,6 +1325,31 @@ class blas_mkl_info(mkl_info): pass +class armpl_info(system_info): + section = 'armpl' + dir_env_var = 'ARMPL_DIR' + _lib_armpl = ['armpl_lp64_mp'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + armpl_libs = self.get_libs('armpl_libs', self._lib_armpl) + info = self.check_libs2(lib_dirs, armpl_libs) + if info is None: + return + dict_append(info, + define_macros=[('SCIPY_MKL_H', None), + ('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + self.set_info(**info) + +class lapack_armpl_info(armpl_info): + pass + +class blas_armpl_info(armpl_info): + pass + + class atlas_info(system_info): section = 'atlas' dir_env_var = 'ATLAS' @@ -1748,9 +1787,16 @@ class lapack_opt_info(system_info): notfounderror = LapackNotFoundError # List of all known LAPACK libraries, in the default order - lapack_order = ['mkl', 'openblas', 'flame', + lapack_order = ['armpl', 'mkl', 'openblas', 'flame', 'accelerate', 'atlas', 'lapack'] order_env_var_name = 'NPY_LAPACK_ORDER' + + def _calc_info_armpl(self): + info = get_info('lapack_armpl') + if info: + self.set_info(**info) + return True + return False def _calc_info_mkl(self): info = get_info('lapack_mkl') @@ -1925,9 +1971,16 @@ class blas_opt_info(system_info): notfounderror = BlasNotFoundError # List of all known BLAS libraries, in the default order - blas_order = ['mkl', 'blis', 'openblas', + blas_order = ['armpl', 'mkl', 'blis', 'openblas', 'accelerate', 'atlas', 'blas'] order_env_var_name = 'NPY_BLAS_ORDER' + + def _calc_info_armpl(self): + info = get_info('blas_armpl') + if info: + self.set_info(**info) + return True + return False def _calc_info_mkl(self): info = get_info('blas_mkl') diff --git a/numpy/distutils/tests/test_ccompiler_opt.py b/numpy/distutils/tests/test_ccompiler_opt.py index 1b27ab07c..6f9970c75 100644 --- a/numpy/distutils/tests/test_ccompiler_opt.py +++ b/numpy/distutils/tests/test_ccompiler_opt.py @@ -32,6 +32,7 @@ arch_compilers = dict( ppc64le = ("gcc", "clang"), armhf = ("gcc", "clang"), aarch64 = ("gcc", "clang"), + s390x = ("gcc", "clang"), noarch = ("gcc",) ) @@ -382,18 +383,19 @@ class _Test_CCompilerOpt: if o == "native" and self.cc_name() == "msvc": continue self.expect(o, - trap_files=".*cpu_(sse|vsx|neon).c", - x86="", ppc64="", armhf="" + trap_files=".*cpu_(sse|vsx|neon|vx).c", + x86="", ppc64="", armhf="", s390x="" ) self.expect(o, - trap_files=".*cpu_(sse3|vsx2|neon_vfpv4).c", + trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c", x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16", - aarch64="", ppc64le="" + aarch64="", ppc64le="", s390x="vx" ) self.expect(o, trap_files=".*cpu_(popcnt|vsx3).c", x86="sse .* sse41", ppc64="vsx vsx2", - armhf="neon neon_fp16 .* asimd .*" + armhf="neon neon_fp16 .* asimd .*", + s390x="vx vxe vxe2" ) self.expect(o, x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*", @@ -403,13 +405,14 @@ class _Test_CCompilerOpt: # in msvc, avx512_knl avx512_knm aren't supported x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*", armhf=".* asimd asimdhp asimddp .*", - ppc64="vsx vsx2 vsx3.*" + ppc64="vsx vsx2 vsx3.*", + s390x="vx vxe vxe2.*" ) # min self.expect("min", x86="sse sse2", x64="sse sse2 sse3", armhf="", aarch64="neon neon_fp16 .* asimd", - ppc64="", ppc64le="vsx vsx2" + ppc64="", ppc64le="vsx vsx2", s390x="" ) self.expect( "min", trap_files=".*cpu_(sse2|vsx2).c", @@ -420,7 +423,7 @@ class _Test_CCompilerOpt: try: self.expect("native", trap_flags=".*(-march=native|-xHost|/QxHost).*", - x86=".*", ppc64=".*", armhf=".*" + x86=".*", ppc64=".*", armhf=".*", s390x=".*" ) if self.march() != "unknown": raise AssertionError( @@ -432,14 +435,15 @@ class _Test_CCompilerOpt: def test_flags(self): self.expect_flags( - "sse sse2 vsx vsx2 neon neon_fp16", + "sse sse2 vsx vsx2 neon neon_fp16 vx vxe", x86_gcc="-msse -msse2", x86_icc="-msse -msse2", x86_iccw="/arch:SSE2", x86_msvc="/arch:SSE2" if self.march() == "x86" else "", ppc64_gcc= "-mcpu=power8", ppc64_clang="-maltivec -mvsx -mpower8-vector", armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee", - aarch64="" + aarch64="", + s390x="-mzvector -march=arch12" ) # testing normalize -march self.expect_flags( @@ -463,6 +467,10 @@ class _Test_CCompilerOpt: "asimddp asimdhp asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml" ) + self.expect_flags( + "vx vxe vxe2", + s390x=r"-mzvector -march=arch13" + ) def test_targets_exceptions(self): for targets in ( @@ -484,7 +492,7 @@ class _Test_CCompilerOpt: try: self.expect_targets( targets, - x86="", armhf="", ppc64="" + x86="", armhf="", ppc64="", s390x="" ) if self.march() != "unknown": raise AssertionError( @@ -496,26 +504,26 @@ class _Test_CCompilerOpt: def test_targets_syntax(self): for targets in ( - "/*@targets $keep_baseline sse vsx neon*/", - "/*@targets,$keep_baseline,sse,vsx,neon*/", - "/*@targets*$keep_baseline*sse*vsx*neon*/", + "/*@targets $keep_baseline sse vsx neon vx*/", + "/*@targets,$keep_baseline,sse,vsx,neon vx*/", + "/*@targets*$keep_baseline*sse*vsx*neon*vx*/", """ /* ** @targets - ** $keep_baseline, sse vsx,neon + ** $keep_baseline, sse vsx,neon, vx */ """, """ /* - ************@targets************* - ** $keep_baseline, sse vsx, neon - ********************************* + ************@targets**************** + ** $keep_baseline, sse vsx, neon, vx + ************************************ */ """, """ /* /////////////@targets///////////////// - //$keep_baseline//sse//vsx//neon + //$keep_baseline//sse//vsx//neon//vx ///////////////////////////////////// */ """, @@ -523,11 +531,11 @@ class _Test_CCompilerOpt: /* @targets $keep_baseline - SSE VSX NEON*/ + SSE VSX NEON VX*/ """ ) : self.expect_targets(targets, - x86="sse", ppc64="vsx", armhf="neon", unknown="" + x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown="" ) def test_targets(self): @@ -538,10 +546,12 @@ class _Test_CCompilerOpt: sse sse2 sse41 avx avx2 avx512f vsx vsx2 vsx3 neon neon_fp16 asimdhp asimddp + vx vxe vxe2 */ """, - baseline="avx vsx2 asimd", - x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx3" + baseline="avx vsx2 asimd vx vxe", + x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx3", + s390x="vxe2" ) # test skipping non-dispatch features self.expect_targets( @@ -550,10 +560,11 @@ class _Test_CCompilerOpt: sse41 avx avx2 avx512f vsx2 vsx3 asimd asimdhp asimddp + vx vxe vxe2 */ """, - baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp", - x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2" + baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2", + x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2" ) # test skipping features that not supported self.expect_targets( @@ -562,11 +573,13 @@ class _Test_CCompilerOpt: sse2 sse41 avx2 avx512f vsx2 vsx3 neon asimdhp asimddp + vx vxe vxe2 */ """, baseline="", - trap_files=".*(avx2|avx512f|vsx3|asimddp).c", - x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon" + trap_files=".*(avx2|avx512f|vsx3|asimddp|vxe2).c", + x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon", + s390x="vxe vx" ) # test skipping features that implies each other self.expect_targets( @@ -598,14 +611,16 @@ class _Test_CCompilerOpt: sse2 sse42 avx2 avx512f vsx2 vsx3 neon neon_vfpv4 asimd asimddp + vx vxe vxe2 */ """, - baseline="sse41 avx2 vsx2 asimd vsx3", + baseline="sse41 avx2 vsx2 asimd vsx3 vxe", x86="avx512f avx2 sse42 sse2", ppc64="vsx3 vsx2", armhf="asimddp asimd neon_vfpv4 neon", # neon, neon_vfpv4, asimd implies each other - aarch64="asimddp asimd" + aarch64="asimddp asimd", + s390x="vxe2 vxe vx" ) # 'keep_sort', leave the sort as-is self.expect_targets( @@ -615,13 +630,15 @@ class _Test_CCompilerOpt: avx512f sse42 avx2 sse2 vsx2 vsx3 asimd neon neon_vfpv4 asimddp + vxe vxe2 */ """, x86="avx512f sse42 avx2 sse2", ppc64="vsx2 vsx3", armhf="asimd neon neon_vfpv4 asimddp", # neon, neon_vfpv4, asimd implies each other - aarch64="asimd asimddp" + aarch64="asimd asimddp", + s390x="vxe vxe2" ) # 'autovec', skipping features that can't be # vectorized by the compiler @@ -736,11 +753,13 @@ class _Test_CCompilerOpt: (sse41 avx sse42) (sse3 avx2 avx512f) (vsx vsx3 vsx2) (asimddp neon neon_vfpv4 asimd asimdhp) + (vx vxe vxe2) */ """, x86="avx avx512f", ppc64="vsx3", armhf=r"\(asimdhp asimddp\)", + s390x="vxe2" ) # test compiler variety and avoiding duplicating self.expect_targets( diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index f147f1b97..b1cb74fae 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -145,7 +145,7 @@ def get_include(): Notes ----- - .. versionadded:: 1.22.0 + .. versionadded:: 1.21.1 Unless the build system you are using has specific support for f2py, building a Python extension using a ``.pyf`` signature file is a two-step diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi index e52e12bbd..6e3a82cf8 100644 --- a/numpy/f2py/__init__.pyi +++ b/numpy/f2py/__init__.pyi @@ -1,28 +1,29 @@ import os import subprocess -from typing import Literal as L, Any, List, Iterable, Dict, overload, TypedDict +from collections.abc import Iterable +from typing import Literal as L, Any, overload, TypedDict from numpy._pytesttester import PytestTester class _F2PyDictBase(TypedDict): - csrc: List[str] - h: List[str] + csrc: list[str] + h: list[str] class _F2PyDict(_F2PyDictBase, total=False): - fsrc: List[str] - ltx: List[str] + fsrc: list[str] + ltx: list[str] -__all__: List[str] -__path__: List[str] +__all__: list[str] +__path__: list[str] test: PytestTester -def run_main(comline_list: Iterable[str]) -> Dict[str, _F2PyDict]: ... +def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... @overload def compile( # type: ignore[misc] source: str | bytes, modulename: str = ..., - extra_args: str | List[str] = ..., + extra_args: str | list[str] = ..., verbose: bool = ..., source_fn: None | str | bytes | os.PathLike[Any] = ..., extension: L[".f", ".f90"] = ..., @@ -32,7 +33,7 @@ def compile( # type: ignore[misc] def compile( source: str | bytes, modulename: str = ..., - extra_args: str | List[str] = ..., + extra_args: str | list[str] = ..., verbose: bool = ..., source_fn: None | str | bytes | os.PathLike[Any] = ..., extension: L[".f", ".f90"] = ..., diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 1d9236dcd..528c4adee 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -845,20 +845,26 @@ int_from_pyobj(int* v, PyObject *obj, const char *errmess) return !(*v == -1 && PyErr_Occurred()); } - if (PyComplex_Check(obj)) + if (PyComplex_Check(obj)) { + PyErr_Clear(); tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { /*pass*/; - else if (PySequence_Check(obj)) + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); tmp = PySequence_GetItem(obj, 0); + } + if (tmp) { - PyErr_Clear(); if (int_from_pyobj(v, tmp, errmess)) { Py_DECREF(tmp); return 1; } Py_DECREF(tmp); } + { PyObject* err = PyErr_Occurred(); if (err == NULL) { @@ -888,15 +894,19 @@ long_from_pyobj(long* v, PyObject *obj, const char *errmess) { return !(*v == -1 && PyErr_Occurred()); } - if (PyComplex_Check(obj)) + if (PyComplex_Check(obj)) { + PyErr_Clear(); tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { /*pass*/; - else if (PySequence_Check(obj)) - tmp = PySequence_GetItem(obj,0); + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } if (tmp) { - PyErr_Clear(); if (long_from_pyobj(v, tmp, errmess)) { Py_DECREF(tmp); return 1; @@ -934,14 +944,19 @@ long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess) return !(*v == -1 && PyErr_Occurred()); } - if (PyComplex_Check(obj)) + if (PyComplex_Check(obj)) { + PyErr_Clear(); tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { /*pass*/; - else if (PySequence_Check(obj)) - tmp = PySequence_GetItem(obj,0); - if (tmp) { + } + else if (PySequence_Check(obj)) { PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { if (long_long_from_pyobj(v, tmp, errmess)) { Py_DECREF(tmp); return 1; @@ -1001,14 +1016,20 @@ double_from_pyobj(double* v, PyObject *obj, const char *errmess) Py_DECREF(tmp); return !(*v == -1.0 && PyErr_Occurred()); } - if (PyComplex_Check(obj)) + + if (PyComplex_Check(obj)) { + PyErr_Clear(); tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { /*pass*/; - else if (PySequence_Check(obj)) - tmp = PySequence_GetItem(obj,0); - if (tmp) { + } + else if (PySequence_Check(obj)) { PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} Py_DECREF(tmp); } diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index b02eb68b7..10b210d14 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -574,11 +574,16 @@ beginpattern90 = re.compile( groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|' r'endinterface|endsubroutine|endfunction') endpattern = re.compile( - beforethisafter % ('', groupends, groupends, r'[\w\s]*'), re.I), 'end' -endifs = r'(end\s*(if|do|where|select|while|forall|associate|block|critical|enum|team))|(module\s*procedure)' + beforethisafter % ('', groupends, groupends, r'.*'), re.I), 'end' +endifs = r'end\s*(if|do|where|select|while|forall|associate|block|' + \ + r'critical|enum|team)' endifpattern = re.compile( beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif' # +moduleprocedures = r'module\s*procedure' +moduleprocedurepattern = re.compile( + beforethisafter % ('', moduleprocedures, moduleprocedures, r'.*'), re.I), \ + 'moduleprocedure' implicitpattern = re.compile( beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' dimensionpattern = re.compile(beforethisafter % ( @@ -727,7 +732,8 @@ def crackline(line, reset=0): callpattern, usepattern, containspattern, entrypattern, f2pyenhancementspattern, - multilinepattern + multilinepattern, + moduleprocedurepattern ]: m = pat[0].match(line) if m: @@ -797,6 +803,8 @@ def crackline(line, reset=0): expectbegin = 0 elif pat[1] == 'endif': pass + elif pat[1] == 'moduleprocedure': + analyzeline(m, pat[1], line) elif pat[1] == 'contains': if ignorecontains: return @@ -886,6 +894,9 @@ selectpattern = re.compile( r'\s*(?P<this>(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I) nameargspattern = re.compile( r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I) +operatorpattern = re.compile( + r'\s*(?P<scheme>(operator|assignment))' + r'@\(@\s*(?P<name>[^)]+)\s*@\)@\s*\Z', re.I) callnameargspattern = re.compile( r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I) real16pattern = re.compile( @@ -908,6 +919,10 @@ def _resolvenameargspattern(line): m1 = nameargspattern.match(line) if m1: return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind') + m1 = operatorpattern.match(line) + if m1: + name = m1.group('scheme') + '(' + m1.group('name') + ')' + return name, [], None, None m1 = callnameargspattern.match(line) if m1: return m1.group('name'), m1.group('args'), None, None @@ -1151,6 +1166,9 @@ def analyzeline(m, case, line): continue else: k = rmbadname1(m1.group('name')) + if case in ['public', 'private'] and \ + (k == 'operator' or k == 'assignment'): + k += m1.group('after') if k not in edecl: edecl[k] = {} if case == 'dimension': @@ -1193,6 +1211,9 @@ def analyzeline(m, case, line): groupcache[groupcounter]['vars'] = edecl if last_name is not None: previous_context = ('variable', last_name, groupcounter) + elif case == 'moduleprocedure': + groupcache[groupcounter]['implementedby'] = \ + [x.strip() for x in m.group('after').split(',')] elif case == 'parameter': edecl = groupcache[groupcounter]['vars'] ll = m.group('after').strip()[1:-1] @@ -2105,7 +2126,8 @@ def analyzebody(block, args, tab=''): else: as_ = args b = postcrack(b, as_, tab=tab + '\t') - if b['block'] in ['interface', 'abstract interface'] and not b['body']: + if b['block'] in ['interface', 'abstract interface'] and \ + not b['body'] and not b['implementedby']: if 'f2pyenhancements' not in b: continue if b['block'].replace(' ', '') == 'pythonmodule': @@ -2591,7 +2613,10 @@ def analyzevars(block): if dsize.contains(s): try: a, b = dsize.linear_solve(s) - solve_v = lambda s: (s - b) / a + + def solve_v(s, a=a, b=b): + return (s - b) / a + all_symbols = set(a.symbols()) all_symbols.update(b.symbols()) except RuntimeError as msg: diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 4d79c304a..bb5b32878 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -82,6 +82,9 @@ Options: file <modulename>module.c or extension module <modulename>. Default is 'untitled'. + '-include<header>' Writes additional headers in the C wrapper, can be passed + multiple times, generates #include <header> each time. + --[no-]lower Do [not] lower the cases in <fortran files>. By default, --lower is assumed with -h key, and --no-lower without -h key. diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 0b32137ef..c96378170 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -702,15 +702,14 @@ check_and_fix_dimensions(const PyArrayObject *arr, const int rank, npy_intp *dims); static int -count_negative_dimensions(const int rank, const npy_intp *dims) +find_first_negative_dimension(const int rank, const npy_intp *dims) { - int i = 0, r = 0; - while (i < rank) { - if (dims[i] < 0) - ++r; - ++i; + for (int i = 0; i < rank; ++i) { + if (dims[i] < 0) { + return i; + } } - return r; + return -1; } #ifdef DEBUG_COPY_ND_ARRAY @@ -795,15 +794,12 @@ array_from_pyobj(const int type_num, npy_intp *dims, const int rank, ((intent & F2PY_INTENT_CACHE) && (obj == Py_None)) || ((intent & F2PY_OPTIONAL) && (obj == Py_None))) { /* intent(cache), optional, intent(hide) */ - if (count_negative_dimensions(rank, dims) > 0) { - int i; - strcpy(mess, - "failed to create intent(cache|hide)|optional array" - "-- must have defined dimensions but got ("); - for (i = 0; i < rank; ++i) - sprintf(mess + strlen(mess), "%" NPY_INTP_FMT ",", dims[i]); - strcat(mess, ")"); - PyErr_SetString(PyExc_ValueError, mess); + int i = find_first_negative_dimension(rank, dims); + if (i >= 0) { + PyErr_Format(PyExc_ValueError, + "failed to create intent(cache|hide)|optional array" + " -- must have defined dimensions, but dims[%d] = %" + NPY_INTP_FMT, i, dims[i]); return NULL; } arr = (PyArrayObject *)PyArray_New(&PyArray_Type, rank, dims, type_num, diff --git a/numpy/f2py/tests/src/abstract_interface/foo.f90 b/numpy/f2py/tests/src/abstract_interface/foo.f90 new file mode 100644 index 000000000..76d16aae2 --- /dev/null +++ b/numpy/f2py/tests/src/abstract_interface/foo.f90 @@ -0,0 +1,34 @@ +module ops_module + + abstract interface + subroutine op(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + end subroutine + end interface + +contains + + subroutine foo(x, y, r1, r2) + integer, intent(in) :: x, y + integer, intent(out) :: r1, r2 + procedure (op) add1, add2 + procedure (op), pointer::p + p=>add1 + call p(x, y, r1) + p=>add2 + call p(x, y, r2) + end subroutine +end module + +subroutine add1(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + y +end subroutine + +subroutine add2(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + 2 * y +end subroutine diff --git a/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 b/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 new file mode 100644 index 000000000..36791e469 --- /dev/null +++ b/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 @@ -0,0 +1,6 @@ +module test + abstract interface + subroutine foo() + end subroutine + end interface +end module test diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index ea47e0555..c8ae7b9dc 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -202,7 +202,6 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY); ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED); ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE); - ADDCONST("UPDATEIFCOPY", NPY_ARRAY_UPDATEIFCOPY); ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY); ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED); diff --git a/numpy/f2py/tests/src/block_docstring/foo.f b/numpy/f2py/tests/src/block_docstring/foo.f new file mode 100644 index 000000000..c8315f12c --- /dev/null +++ b/numpy/f2py/tests/src/block_docstring/foo.f @@ -0,0 +1,6 @@ + SUBROUTINE FOO() + INTEGER BAR(2, 3) + + COMMON /BLOCK/ BAR + RETURN + END diff --git a/numpy/f2py/tests/src/callback/foo.f b/numpy/f2py/tests/src/callback/foo.f new file mode 100644 index 000000000..ba397bb38 --- /dev/null +++ b/numpy/f2py/tests/src/callback/foo.f @@ -0,0 +1,62 @@ + subroutine t(fun,a) + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine func(a) +cf2py intent(in,out) a + integer a + a = a + 11 + end + + subroutine func0(a) +cf2py intent(out) a + integer a + a = 11 + end + + subroutine t2(a) +cf2py intent(callback) fun + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine string_callback(callback, a) + external callback + double precision callback + double precision a + character*1 r +cf2py intent(out) a + r = 'r' + a = callback(r) + end + + subroutine string_callback_array(callback, cu, lencu, a) + external callback + integer callback + integer lencu + character*8 cu(lencu) + integer a +cf2py intent(out) a + + a = callback(cu, lencu) + end + + subroutine hidden_callback(a, r) + external global_f +cf2py intent(callback, hide) global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end + + subroutine hidden_callback2(a, r) + external global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end diff --git a/numpy/f2py/tests/src/callback/gh17797.f90 b/numpy/f2py/tests/src/callback/gh17797.f90 new file mode 100644 index 000000000..49853afd7 --- /dev/null +++ b/numpy/f2py/tests/src/callback/gh17797.f90 @@ -0,0 +1,7 @@ +function gh17797(f, y) result(r) + external f + integer(8) :: r, f + integer(8), dimension(:) :: y + r = f(0) + r = r + sum(y) +end function gh17797 diff --git a/numpy/f2py/tests/src/callback/gh18335.f90 b/numpy/f2py/tests/src/callback/gh18335.f90 new file mode 100644 index 000000000..92b6d7540 --- /dev/null +++ b/numpy/f2py/tests/src/callback/gh18335.f90 @@ -0,0 +1,17 @@ + ! When gh18335_workaround is defined as an extension, + ! the issue cannot be reproduced. + !subroutine gh18335_workaround(f, y) + ! implicit none + ! external f + ! integer(kind=1) :: y(1) + ! call f(y) + !end subroutine gh18335_workaround + + function gh18335(f) result (r) + implicit none + external f + integer(kind=1) :: y(1), r + y(1) = 123 + call f(y) + r = y(1) + end function gh18335 diff --git a/numpy/f2py/tests/src/crackfortran/foo_deps.f90 b/numpy/f2py/tests/src/crackfortran/foo_deps.f90 new file mode 100644 index 000000000..e327b25c8 --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/foo_deps.f90 @@ -0,0 +1,6 @@ +module foo + type bar + character(len = 4) :: text + end type bar + type(bar), parameter :: abar = bar('abar') +end module foo diff --git a/numpy/f2py/tests/src/crackfortran/gh15035.f b/numpy/f2py/tests/src/crackfortran/gh15035.f new file mode 100644 index 000000000..1bb2e6745 --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/gh15035.f @@ -0,0 +1,16 @@ + subroutine subb(k) + real(8), intent(inout) :: k(:) + k=k+1 + endsubroutine + + subroutine subc(w,k) + real(8), intent(in) :: w(:) + real(8), intent(out) :: k(size(w)) + k=w+1 + endsubroutine + + function t0(value) + character value + character t0 + t0 = value + endfunction diff --git a/numpy/f2py/tests/src/crackfortran/gh17859.f b/numpy/f2py/tests/src/crackfortran/gh17859.f new file mode 100644 index 000000000..995953845 --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/gh17859.f @@ -0,0 +1,12 @@ + integer(8) function external_as_statement(fcn) + implicit none + external fcn + integer(8) :: fcn + external_as_statement = fcn(0) + end + + integer(8) function external_as_attribute(fcn) + implicit none + integer(8), external :: fcn + external_as_attribute = fcn(0) + end diff --git a/numpy/f2py/tests/src/crackfortran/gh2848.f90 b/numpy/f2py/tests/src/crackfortran/gh2848.f90 new file mode 100644 index 000000000..31ea9327a --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/gh2848.f90 @@ -0,0 +1,13 @@ + subroutine gh2848( & + ! first 2 parameters + par1, par2,& + ! last 2 parameters + par3, par4) + + integer, intent(in) :: par1, par2 + integer, intent(out) :: par3, par4 + + par3 = par1 + par4 = par2 + + end subroutine gh2848 diff --git a/numpy/f2py/tests/src/crackfortran/operators.f90 b/numpy/f2py/tests/src/crackfortran/operators.f90 new file mode 100644 index 000000000..1d060a3d2 --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/operators.f90 @@ -0,0 +1,49 @@ +module foo + type bar + character(len = 32) :: item + end type bar + interface operator(.item.) + module procedure item_int, item_real + end interface operator(.item.) + interface operator(==) + module procedure items_are_equal + end interface operator(==) + interface assignment(=) + module procedure get_int, get_real + end interface assignment(=) +contains + function item_int(val) result(elem) + integer, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(I32)") val + end function item_int + + function item_real(val) result(elem) + real, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(1PE32.12)") val + end function item_real + + function items_are_equal(val1, val2) result(equal) + type(bar), intent(in) :: val1, val2 + logical :: equal + + equal = (val1%item == val2%item) + end function items_are_equal + + subroutine get_real(rval, item) + real, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_real + + subroutine get_int(rval, item) + integer, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_int +end module foo diff --git a/numpy/f2py/tests/src/crackfortran/privatemod.f90 b/numpy/f2py/tests/src/crackfortran/privatemod.f90 new file mode 100644 index 000000000..2674c2147 --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/privatemod.f90 @@ -0,0 +1,11 @@ +module foo + private + integer :: a + public :: setA + integer :: b +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/numpy/f2py/tests/src/crackfortran/publicmod.f90 b/numpy/f2py/tests/src/crackfortran/publicmod.f90 new file mode 100644 index 000000000..1db76e3fe --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/publicmod.f90 @@ -0,0 +1,10 @@ +module foo + public + integer, private :: a + public :: setA +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/numpy/f2py/tests/src/quoted_character/foo.f b/numpy/f2py/tests/src/quoted_character/foo.f new file mode 100644 index 000000000..9dc1cfa44 --- /dev/null +++ b/numpy/f2py/tests/src/quoted_character/foo.f @@ -0,0 +1,14 @@ + SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) + CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR + PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", + 1 OPENPAR="(", CLOSEPAR=")") + CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 +Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 + OUT1 = SINGLE + OUT2 = DOUBLE + OUT3 = SEMICOL + OUT4 = EXCLA + OUT5 = OPENPAR + OUT6 = CLOSEPAR + RETURN + END diff --git a/numpy/f2py/tests/src/return_character/foo77.f b/numpy/f2py/tests/src/return_character/foo77.f new file mode 100644 index 000000000..facae1016 --- /dev/null +++ b/numpy/f2py/tests/src/return_character/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + character value + character t0 + t0 = value + end + function t1(value) + character*1 value + character*1 t1 + t1 = value + end + function t5(value) + character*5 value + character*5 t5 + t5 = value + end + function ts(value) + character*(*) value + character*(*) ts + ts = value + end + + subroutine s0(t0,value) + character value + character t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + character*1 value + character*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s5(t5,value) + character*5 value + character*5 t5 +cf2py intent(out) t5 + t5 = value + end + subroutine ss(ts,value) + character*(*) value + character*10 ts +cf2py intent(out) ts + ts = value + end diff --git a/numpy/f2py/tests/src/return_character/foo90.f90 b/numpy/f2py/tests/src/return_character/foo90.f90 new file mode 100644 index 000000000..36182bcf2 --- /dev/null +++ b/numpy/f2py/tests/src/return_character/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_char + contains + function t0(value) + character :: value + character :: t0 + t0 = value + end function t0 + function t1(value) + character(len=1) :: value + character(len=1) :: t1 + t1 = value + end function t1 + function t5(value) + character(len=5) :: value + character(len=5) :: t5 + t5 = value + end function t5 + function ts(value) + character(len=*) :: value + character(len=10) :: ts + ts = value + end function ts + + subroutine s0(t0,value) + character :: value + character :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + character(len=1) :: value + character(len=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s5(t5,value) + character(len=5) :: value + character(len=5) :: t5 +!f2py intent(out) t5 + t5 = value + end subroutine s5 + subroutine ss(ts,value) + character(len=*) :: value + character(len=10) :: ts +!f2py intent(out) ts + ts = value + end subroutine ss +end module f90_return_char diff --git a/numpy/f2py/tests/src/return_complex/foo77.f b/numpy/f2py/tests/src/return_complex/foo77.f new file mode 100644 index 000000000..37a1ec845 --- /dev/null +++ b/numpy/f2py/tests/src/return_complex/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + complex value + complex t0 + t0 = value + end + function t8(value) + complex*8 value + complex*8 t8 + t8 = value + end + function t16(value) + complex*16 value + complex*16 t16 + t16 = value + end + function td(value) + double complex value + double complex td + td = value + end + + subroutine s0(t0,value) + complex value + complex t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s8(t8,value) + complex*8 value + complex*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine s16(t16,value) + complex*16 value + complex*16 t16 +cf2py intent(out) t16 + t16 = value + end + subroutine sd(td,value) + double complex value + double complex td +cf2py intent(out) td + td = value + end diff --git a/numpy/f2py/tests/src/return_complex/foo90.f90 b/numpy/f2py/tests/src/return_complex/foo90.f90 new file mode 100644 index 000000000..adc27b470 --- /dev/null +++ b/numpy/f2py/tests/src/return_complex/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_complex + contains + function t0(value) + complex :: value + complex :: t0 + t0 = value + end function t0 + function t8(value) + complex(kind=4) :: value + complex(kind=4) :: t8 + t8 = value + end function t8 + function t16(value) + complex(kind=8) :: value + complex(kind=8) :: t16 + t16 = value + end function t16 + function td(value) + double complex :: value + double complex :: td + td = value + end function td + + subroutine s0(t0,value) + complex :: value + complex :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s8(t8,value) + complex(kind=4) :: value + complex(kind=4) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine s16(t16,value) + complex(kind=8) :: value + complex(kind=8) :: t16 +!f2py intent(out) t16 + t16 = value + end subroutine s16 + subroutine sd(td,value) + double complex :: value + double complex :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_complex diff --git a/numpy/f2py/tests/src/return_integer/foo77.f b/numpy/f2py/tests/src/return_integer/foo77.f new file mode 100644 index 000000000..1ab895b9a --- /dev/null +++ b/numpy/f2py/tests/src/return_integer/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + integer value + integer t0 + t0 = value + end + function t1(value) + integer*1 value + integer*1 t1 + t1 = value + end + function t2(value) + integer*2 value + integer*2 t2 + t2 = value + end + function t4(value) + integer*4 value + integer*4 t4 + t4 = value + end + function t8(value) + integer*8 value + integer*8 t8 + t8 = value + end + + subroutine s0(t0,value) + integer value + integer t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + integer*1 value + integer*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + integer*2 value + integer*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + integer*4 value + integer*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + integer*8 value + integer*8 t8 +cf2py intent(out) t8 + t8 = value + end diff --git a/numpy/f2py/tests/src/return_integer/foo90.f90 b/numpy/f2py/tests/src/return_integer/foo90.f90 new file mode 100644 index 000000000..ba9249aa2 --- /dev/null +++ b/numpy/f2py/tests/src/return_integer/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_integer + contains + function t0(value) + integer :: value + integer :: t0 + t0 = value + end function t0 + function t1(value) + integer(kind=1) :: value + integer(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + integer(kind=2) :: value + integer(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + integer(kind=4) :: value + integer(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + integer(kind=8) :: value + integer(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + integer :: value + integer :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + integer(kind=1) :: value + integer(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + integer(kind=2) :: value + integer(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + integer(kind=4) :: value + integer(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + integer(kind=8) :: value + integer(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_integer diff --git a/numpy/f2py/tests/src/return_logical/foo77.f b/numpy/f2py/tests/src/return_logical/foo77.f new file mode 100644 index 000000000..ef530145f --- /dev/null +++ b/numpy/f2py/tests/src/return_logical/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + logical value + logical t0 + t0 = value + end + function t1(value) + logical*1 value + logical*1 t1 + t1 = value + end + function t2(value) + logical*2 value + logical*2 t2 + t2 = value + end + function t4(value) + logical*4 value + logical*4 t4 + t4 = value + end +c function t8(value) +c logical*8 value +c logical*8 t8 +c t8 = value +c end + + subroutine s0(t0,value) + logical value + logical t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + logical*1 value + logical*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + logical*2 value + logical*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + logical*4 value + logical*4 t4 +cf2py intent(out) t4 + t4 = value + end +c subroutine s8(t8,value) +c logical*8 value +c logical*8 t8 +cf2py intent(out) t8 +c t8 = value +c end diff --git a/numpy/f2py/tests/src/return_logical/foo90.f90 b/numpy/f2py/tests/src/return_logical/foo90.f90 new file mode 100644 index 000000000..a4526468e --- /dev/null +++ b/numpy/f2py/tests/src/return_logical/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_logical + contains + function t0(value) + logical :: value + logical :: t0 + t0 = value + end function t0 + function t1(value) + logical(kind=1) :: value + logical(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + logical(kind=2) :: value + logical(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + logical(kind=4) :: value + logical(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + logical(kind=8) :: value + logical(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + logical :: value + logical :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + logical(kind=1) :: value + logical(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + logical(kind=2) :: value + logical(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + logical(kind=4) :: value + logical(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + logical(kind=8) :: value + logical(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_logical diff --git a/numpy/f2py/tests/src/return_real/foo77.f b/numpy/f2py/tests/src/return_real/foo77.f new file mode 100644 index 000000000..bf43dbf11 --- /dev/null +++ b/numpy/f2py/tests/src/return_real/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + real value + real t0 + t0 = value + end + function t4(value) + real*4 value + real*4 t4 + t4 = value + end + function t8(value) + real*8 value + real*8 t8 + t8 = value + end + function td(value) + double precision value + double precision td + td = value + end + + subroutine s0(t0,value) + real value + real t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s4(t4,value) + real*4 value + real*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + real*8 value + real*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine sd(td,value) + double precision value + double precision td +cf2py intent(out) td + td = value + end diff --git a/numpy/f2py/tests/src/return_real/foo90.f90 b/numpy/f2py/tests/src/return_real/foo90.f90 new file mode 100644 index 000000000..df9719980 --- /dev/null +++ b/numpy/f2py/tests/src/return_real/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_real + contains + function t0(value) + real :: value + real :: t0 + t0 = value + end function t0 + function t4(value) + real(kind=4) :: value + real(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + real(kind=8) :: value + real(kind=8) :: t8 + t8 = value + end function t8 + function td(value) + double precision :: value + double precision :: td + td = value + end function td + + subroutine s0(t0,value) + real :: value + real :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s4(t4,value) + real(kind=4) :: value + real(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + real(kind=8) :: value + real(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine sd(td,value) + double precision :: value + double precision :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_real diff --git a/numpy/f2py/tests/src/string/fixed_string.f90 b/numpy/f2py/tests/src/string/fixed_string.f90 new file mode 100644 index 000000000..7fd158543 --- /dev/null +++ b/numpy/f2py/tests/src/string/fixed_string.f90 @@ -0,0 +1,34 @@ +function sint(s) result(i) + implicit none + character(len=*) :: s + integer :: j, i + i = 0 + do j=len(s), 1, -1 + if (.not.((i.eq.0).and.(s(j:j).eq.' '))) then + i = i + ichar(s(j:j)) * 10 ** (j - 1) + endif + end do + return + end function sint + + function test_in_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4) :: a + integer :: i + i = sint(a) + a(1:1) = 'A' + return + end function test_in_bytes4 + + function test_inout_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4), intent(inout) :: a + integer :: i + if (a(1:1).ne.' ') then + a(1:1) = 'E' + endif + i = sint(a) + return + end function test_inout_bytes4 diff --git a/numpy/f2py/tests/src/string/string.f b/numpy/f2py/tests/src/string/string.f new file mode 100644 index 000000000..5210ca4dc --- /dev/null +++ b/numpy/f2py/tests/src/string/string.f @@ -0,0 +1,12 @@ +C FILE: STRING.F + SUBROUTINE FOO(A,B,C,D) + CHARACTER*5 A, B + CHARACTER*(*) C,D +Cf2py intent(in) a,c +Cf2py intent(inout) b,d + A(1:1) = 'A' + B(1:1) = 'B' + C(1:1) = 'C' + D(1:1) = 'D' + END +C END OF FILE STRING.F diff --git a/numpy/f2py/tests/test_abstract_interface.py b/numpy/f2py/tests/test_abstract_interface.py index 936c1f7bc..29e4b0647 100644 --- a/numpy/f2py/tests/test_abstract_interface.py +++ b/numpy/f2py/tests/test_abstract_interface.py @@ -1,66 +1,22 @@ +from pathlib import Path import textwrap from . import util from numpy.f2py import crackfortran class TestAbstractInterface(util.F2PyTest): - suffix = '.f90' + sources = [util.getpath("tests", "src", "abstract_interface", "foo.f90")] - skip = ['add1', 'add2'] - - code = textwrap.dedent(""" - module ops_module - - abstract interface - subroutine op(x, y, z) - integer, intent(in) :: x, y - integer, intent(out) :: z - end subroutine - end interface - - contains - - subroutine foo(x, y, r1, r2) - integer, intent(in) :: x, y - integer, intent(out) :: r1, r2 - procedure (op) add1, add2 - procedure (op), pointer::p - p=>add1 - call p(x, y, r1) - p=>add2 - call p(x, y, r2) - end subroutine - end module - - subroutine add1(x, y, z) - integer, intent(in) :: x, y - integer, intent(out) :: z - z = x + y - end subroutine - - subroutine add2(x, y, z) - integer, intent(in) :: x, y - integer, intent(out) :: z - z = x + 2 * y - end subroutine - """) + skip = ["add1", "add2"] def test_abstract_interface(self): assert self.module.ops_module.foo(3, 5) == (8, 13) - def test_parse_abstract_interface(self, tmp_path): + def test_parse_abstract_interface(self): # Test gh18403 - f_path = tmp_path / "gh18403_mod.f90" - with f_path.open('w') as ff: - ff.write(textwrap.dedent("""\ - module test - abstract interface - subroutine foo() - end subroutine - end interface - end module test - """)) - mod = crackfortran.crackfortran([str(f_path)]) + fpath = util.getpath("tests", "src", "abstract_interface", + "gh18403_mod.f90") + mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 - assert len(mod[0]['body']) == 1 - assert mod[0]['body'][0]['block'] == 'abstract interface' + assert len(mod[0]["body"]) == 1 + assert mod[0]["body"][0]["block"] == "abstract interface" diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index 649fd1c48..5a084bc3e 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -6,7 +6,6 @@ import pytest import numpy as np -from numpy.testing import assert_, assert_equal from numpy.core.multiarray import typeinfo from . import util @@ -31,11 +30,13 @@ def setup_module(): define_macros=[]) """ d = os.path.dirname(__file__) - src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'), - os.path.join(d, '..', 'src', 'fortranobject.c'), - os.path.join(d, '..', 'src', 'fortranobject.h')] + src = [ + util.getpath("tests", "src", "array_from_pyobj", "wrapmodule.c"), + util.getpath("src", "fortranobject.c"), + util.getpath("src", "fortranobject.h"), + ] wrap = util.build_module_distutils(src, config_code, - 'test_array_from_pyobj_ext') + "test_array_from_pyobj_ext") def flags_info(arr): @@ -45,39 +46,48 @@ def flags_info(arr): def flags2names(flags): info = [] - for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY', - 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE', - 'WRITEBACKIFCOPY', 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO', - 'CARRAY', 'FARRAY' - ]: + for flagname in [ + "CONTIGUOUS", + "FORTRAN", + "OWNDATA", + "ENSURECOPY", + "ENSUREARRAY", + "ALIGNED", + "NOTSWAPPED", + "WRITEABLE", + "WRITEBACKIFCOPY", + "BEHAVED", + "BEHAVED_RO", + "CARRAY", + "FARRAY", + ]: if abs(flags) & getattr(wrap, flagname, 0): info.append(flagname) return info class Intent: - def __init__(self, intent_list=[]): self.intent_list = intent_list[:] flags = 0 for i in intent_list: - if i == 'optional': + if i == "optional": flags |= wrap.F2PY_OPTIONAL else: - flags |= getattr(wrap, 'F2PY_INTENT_' + i.upper()) + flags |= getattr(wrap, "F2PY_INTENT_" + i.upper()) self.flags = flags def __getattr__(self, name): name = name.lower() - if name == 'in_': - name = 'in' + if name == "in_": + name = "in" return self.__class__(self.intent_list + [name]) def __str__(self): - return 'intent(%s)' % (','.join(self.intent_list)) + return "intent(%s)" % (",".join(self.intent_list)) def __repr__(self): - return 'Intent(%r)' % (self.intent_list) + return "Intent(%r)" % (self.intent_list) def is_intent(self, *names): for name in names: @@ -88,32 +98,46 @@ class Intent: def is_intent_exact(self, *names): return len(self.intent_list) == len(names) and self.is_intent(*names) -intent = Intent() - -_type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT', - 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG', - 'FLOAT', 'DOUBLE', 'CFLOAT'] - -_cast_dict = {'BOOL': ['BOOL']} -_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE'] -_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] -_cast_dict['BYTE'] = ['BYTE'] -_cast_dict['UBYTE'] = ['UBYTE'] -_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT'] -_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT'] -_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT'] -_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT'] -_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] -_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] - -_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] -_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] - -_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT'] -_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE'] +intent = Intent() -_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] +_type_names = [ + "BOOL", + "BYTE", + "UBYTE", + "SHORT", + "USHORT", + "INT", + "UINT", + "LONG", + "ULONG", + "LONGLONG", + "ULONGLONG", + "FLOAT", + "DOUBLE", + "CFLOAT", +] + +_cast_dict = {"BOOL": ["BOOL"]} +_cast_dict["BYTE"] = _cast_dict["BOOL"] + ["BYTE"] +_cast_dict["UBYTE"] = _cast_dict["BOOL"] + ["UBYTE"] +_cast_dict["BYTE"] = ["BYTE"] +_cast_dict["UBYTE"] = ["UBYTE"] +_cast_dict["SHORT"] = _cast_dict["BYTE"] + ["UBYTE", "SHORT"] +_cast_dict["USHORT"] = _cast_dict["UBYTE"] + ["BYTE", "USHORT"] +_cast_dict["INT"] = _cast_dict["SHORT"] + ["USHORT", "INT"] +_cast_dict["UINT"] = _cast_dict["USHORT"] + ["SHORT", "UINT"] + +_cast_dict["LONG"] = _cast_dict["INT"] + ["LONG"] +_cast_dict["ULONG"] = _cast_dict["UINT"] + ["ULONG"] + +_cast_dict["LONGLONG"] = _cast_dict["LONG"] + ["LONGLONG"] +_cast_dict["ULONGLONG"] = _cast_dict["ULONG"] + ["ULONGLONG"] + +_cast_dict["FLOAT"] = _cast_dict["SHORT"] + ["USHORT", "FLOAT"] +_cast_dict["DOUBLE"] = _cast_dict["INT"] + ["UINT", "FLOAT", "DOUBLE"] + +_cast_dict["CFLOAT"] = _cast_dict["FLOAT"] + ["CFLOAT"] # 32 bit system malloc typically does not provide the alignment required by # 16 byte long double types this means the inout intent cannot be satisfied @@ -121,15 +145,22 @@ _cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] # when numpy gains an aligned allocator the tests could be enabled again # # Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. -if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) and - sys.platform != 'win32' and - (platform.system(), platform.processor()) != ('Darwin', 'arm')): - _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) - _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ - ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] - _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \ - ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] - _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE'] +if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) + and sys.platform != "win32" + and (platform.system(), platform.processor()) != ("Darwin", "arm")): + _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) + _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ + "ULONG", + "FLOAT", + "DOUBLE", + "LONGDOUBLE", + ] + _cast_dict["CLONGDOUBLE"] = _cast_dict["LONGDOUBLE"] + [ + "CFLOAT", + "CDOUBLE", + "CLONGDOUBLE", + ] + _cast_dict["CDOUBLE"] = _cast_dict["DOUBLE"] + ["CFLOAT", "CDOUBLE"] class Type: @@ -154,8 +185,8 @@ class Type: def _init(self, name): self.NAME = name.upper() info = typeinfo[self.NAME] - self.type_num = getattr(wrap, 'NPY_' + self.NAME) - assert_equal(self.type_num, info.num) + self.type_num = getattr(wrap, "NPY_" + self.NAME) + assert self.type_num == info.num self.dtype = np.dtype(info.type) self.type = info.type self.elsize = info.bits / 8 @@ -195,7 +226,6 @@ class Type: class Array: - def __init__(self, typ, dims, intent, obj): self.type = typ self.dims = dims @@ -206,76 +236,78 @@ class Array: # arr.dtypechar may be different from typ.dtypechar self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) - assert_(isinstance(self.arr, np.ndarray), repr(type(self.arr))) + assert isinstance(self.arr, np.ndarray) self.arr_attr = wrap.array_attrs(self.arr) if len(dims) > 1: - if self.intent.is_intent('c'): - assert_(intent.flags & wrap.F2PY_INTENT_C) - assert_(not self.arr.flags['FORTRAN'], - repr((self.arr.flags, getattr(obj, 'flags', None)))) - assert_(self.arr.flags['CONTIGUOUS']) - assert_(not self.arr_attr[6] & wrap.FORTRAN) + if self.intent.is_intent("c"): + assert (intent.flags & wrap.F2PY_INTENT_C) + assert not self.arr.flags["FORTRAN"] + assert self.arr.flags["CONTIGUOUS"] + assert (not self.arr_attr[6] & wrap.FORTRAN) else: - assert_(not intent.flags & wrap.F2PY_INTENT_C) - assert_(self.arr.flags['FORTRAN']) - assert_(not self.arr.flags['CONTIGUOUS']) - assert_(self.arr_attr[6] & wrap.FORTRAN) + assert (not intent.flags & wrap.F2PY_INTENT_C) + assert self.arr.flags["FORTRAN"] + assert not self.arr.flags["CONTIGUOUS"] + assert (self.arr_attr[6] & wrap.FORTRAN) if obj is None: self.pyarr = None self.pyarr_attr = None return - if intent.is_intent('cache'): - assert_(isinstance(obj, np.ndarray), repr(type(obj))) + if intent.is_intent("cache"): + assert isinstance(obj, np.ndarray), repr(type(obj)) self.pyarr = np.array(obj).reshape(*dims).copy() else: self.pyarr = np.array( - np.array(obj, dtype=typ.dtypechar).reshape(*dims), - order=self.intent.is_intent('c') and 'C' or 'F') - assert_(self.pyarr.dtype == typ, - repr((self.pyarr.dtype, typ))) - self.pyarr.setflags(write=self.arr.flags['WRITEABLE']) - assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) + np.array(obj, dtype=typ.dtypechar).reshape(*dims), + order=self.intent.is_intent("c") and "C" or "F", + ) + assert self.pyarr.dtype == typ + self.pyarr.setflags(write=self.arr.flags["WRITEABLE"]) + assert self.pyarr.flags["OWNDATA"], (obj, intent) self.pyarr_attr = wrap.array_attrs(self.pyarr) if len(dims) > 1: - if self.intent.is_intent('c'): - assert_(not self.pyarr.flags['FORTRAN']) - assert_(self.pyarr.flags['CONTIGUOUS']) - assert_(not self.pyarr_attr[6] & wrap.FORTRAN) + if self.intent.is_intent("c"): + assert not self.pyarr.flags["FORTRAN"] + assert self.pyarr.flags["CONTIGUOUS"] + assert (not self.pyarr_attr[6] & wrap.FORTRAN) else: - assert_(self.pyarr.flags['FORTRAN']) - assert_(not self.pyarr.flags['CONTIGUOUS']) - assert_(self.pyarr_attr[6] & wrap.FORTRAN) + assert self.pyarr.flags["FORTRAN"] + assert not self.pyarr.flags["CONTIGUOUS"] + assert (self.pyarr_attr[6] & wrap.FORTRAN) - assert_(self.arr_attr[1] == self.pyarr_attr[1]) # nd - assert_(self.arr_attr[2] == self.pyarr_attr[2]) # dimensions + assert self.arr_attr[1] == self.pyarr_attr[1] # nd + assert self.arr_attr[2] == self.pyarr_attr[2] # dimensions if self.arr_attr[1] <= 1: - assert_(self.arr_attr[3] == self.pyarr_attr[3], - repr((self.arr_attr[3], self.pyarr_attr[3], - self.arr.tobytes(), self.pyarr.tobytes()))) # strides - assert_(self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], - repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr - assert_(self.arr_attr[6] == self.pyarr_attr[6], - repr((self.arr_attr[6], self.pyarr_attr[6], - flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), - flags2names(self.arr_attr[6]), intent))) # flags - - if intent.is_intent('cache'): - assert_(self.arr_attr[5][3] >= self.type.elsize, - repr((self.arr_attr[5][3], self.type.elsize))) + assert self.arr_attr[3] == self.pyarr_attr[3], repr(( + self.arr_attr[3], + self.pyarr_attr[3], + self.arr.tobytes(), + self.pyarr.tobytes(), + )) # strides + assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:] # descr + assert self.arr_attr[6] == self.pyarr_attr[6], repr(( + self.arr_attr[6], + self.pyarr_attr[6], + flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), + flags2names(self.arr_attr[6]), + intent, + )) # flags + + if intent.is_intent("cache"): + assert self.arr_attr[5][3] >= self.type.elsize else: - assert_(self.arr_attr[5][3] == self.type.elsize, - repr((self.arr_attr[5][3], self.type.elsize))) - assert_(self.arr_equal(self.pyarr, self.arr)) + assert self.arr_attr[5][3] == self.type.elsize + assert (self.arr_equal(self.pyarr, self.arr)) if isinstance(self.obj, np.ndarray): if typ.elsize == Type(obj.dtype).elsize: - if not intent.is_intent('copy') and self.arr_attr[1] <= 1: - assert_(self.has_shared_memory()) + if not intent.is_intent("copy") and self.arr_attr[1] <= 1: + assert self.has_shared_memory() def arr_equal(self, arr1, arr2): if arr1.shape != arr2.shape: @@ -286,8 +318,7 @@ class Array: return str(self.arr) def has_shared_memory(self): - """Check that created array shares data with input array. - """ + """Check that created array shares data with input array.""" if self.obj is self.arr: return True if not isinstance(self.obj, np.ndarray): @@ -297,300 +328,300 @@ class Array: class TestIntent: - def test_in_out(self): - assert_equal(str(intent.in_.out), 'intent(in,out)') - assert_(intent.in_.c.is_intent('c')) - assert_(not intent.in_.c.is_intent_exact('c')) - assert_(intent.in_.c.is_intent_exact('c', 'in')) - assert_(intent.in_.c.is_intent_exact('in', 'c')) - assert_(not intent.in_.is_intent('c')) + assert str(intent.in_.out) == "intent(in,out)" + assert intent.in_.c.is_intent("c") + assert not intent.in_.c.is_intent_exact("c") + assert intent.in_.c.is_intent_exact("c", "in") + assert intent.in_.c.is_intent_exact("in", "c") + assert not intent.in_.is_intent("c") class TestSharedMemory: num2seq = [1, 2] num23seq = [[1, 2, 3], [4, 5, 6]] - @pytest.fixture(autouse=True, scope='class', params=_type_names) + @pytest.fixture(autouse=True, scope="class", params=_type_names) def setup_type(self, request): request.cls.type = Type(request.param) - request.cls.array = lambda self, dims, intent, obj: \ - Array(Type(request.param), dims, intent, obj) + request.cls.array = lambda self, dims, intent, obj: Array( + Type(request.param), dims, intent, obj) def test_in_from_2seq(self): a = self.array([2], intent.in_, self.num2seq) - assert_(not a.has_shared_memory()) + assert not a.has_shared_memory() def test_in_from_2casttype(self): for t in self.type.cast_types(): obj = np.array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_, obj) if t.elsize == self.type.elsize: - assert_( - a.has_shared_memory(), repr((self.type.dtype, t.dtype))) + assert a.has_shared_memory(), repr((self.type.dtype, t.dtype)) else: - assert_(not a.has_shared_memory(), repr(t.dtype)) + assert not a.has_shared_memory() - @pytest.mark.parametrize('write', ['w', 'ro']) - @pytest.mark.parametrize('order', ['C', 'F']) - @pytest.mark.parametrize('inp', ['2seq', '23seq']) + @pytest.mark.parametrize("write", ["w", "ro"]) + @pytest.mark.parametrize("order", ["C", "F"]) + @pytest.mark.parametrize("inp", ["2seq", "23seq"]) def test_in_nocopy(self, write, order, inp): - """Test if intent(in) array can be passed without copies - """ - seq = getattr(self, 'num' + inp) + """Test if intent(in) array can be passed without copies""" + seq = getattr(self, "num" + inp) obj = np.array(seq, dtype=self.type.dtype, order=order) - obj.setflags(write=(write == 'w')) - a = self.array(obj.shape, ((order=='C' and intent.in_.c) or intent.in_), obj) + obj.setflags(write=(write == "w")) + a = self.array(obj.shape, + ((order == "C" and intent.in_.c) or intent.in_), obj) assert a.has_shared_memory() def test_inout_2seq(self): obj = np.array(self.num2seq, dtype=self.type.dtype) a = self.array([len(self.num2seq)], intent.inout, obj) - assert_(a.has_shared_memory()) + assert a.has_shared_memory() try: a = self.array([2], intent.in_.inout, self.num2seq) except TypeError as msg: - if not str(msg).startswith('failed to initialize intent' - '(inout|inplace|cache) array'): + if not str(msg).startswith( + "failed to initialize intent(inout|inplace|cache) array"): raise else: - raise SystemError('intent(inout) should have failed on sequence') + raise SystemError("intent(inout) should have failed on sequence") def test_f_inout_23seq(self): - obj = np.array(self.num23seq, dtype=self.type.dtype, order='F') + obj = np.array(self.num23seq, dtype=self.type.dtype, order="F") shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.inout, obj) - assert_(a.has_shared_memory()) + assert a.has_shared_memory() - obj = np.array(self.num23seq, dtype=self.type.dtype, order='C') + obj = np.array(self.num23seq, dtype=self.type.dtype, order="C") shape = (len(self.num23seq), len(self.num23seq[0])) try: a = self.array(shape, intent.in_.inout, obj) except ValueError as msg: - if not str(msg).startswith('failed to initialize intent' - '(inout) array'): + if not str(msg).startswith( + "failed to initialize intent(inout) array"): raise else: raise SystemError( - 'intent(inout) should have failed on improper array') + "intent(inout) should have failed on improper array") def test_c_inout_23seq(self): obj = np.array(self.num23seq, dtype=self.type.dtype) shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.c.inout, obj) - assert_(a.has_shared_memory()) + assert a.has_shared_memory() def test_in_copy_from_2casttype(self): for t in self.type.cast_types(): obj = np.array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) + assert not a.has_shared_memory() def test_c_in_from_23seq(self): - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, self.num23seq) - assert_(not a.has_shared_memory()) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, + self.num23seq) + assert not a.has_shared_memory() def test_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) + assert not a.has_shared_memory() def test_f_in_from_23casttype(self): for t in self.type.cast_types(): - obj = np.array(self.num23seq, dtype=t.dtype, order='F') - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, obj) + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) if t.elsize == self.type.elsize: - assert_(a.has_shared_memory(), repr(t.dtype)) + assert a.has_shared_memory() else: - assert_(not a.has_shared_memory(), repr(t.dtype)) + assert not a.has_shared_memory() def test_c_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.c, obj) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj) if t.elsize == self.type.elsize: - assert_(a.has_shared_memory(), repr(t.dtype)) + assert a.has_shared_memory() else: - assert_(not a.has_shared_memory(), repr(t.dtype)) + assert not a.has_shared_memory() def test_f_copy_in_from_23casttype(self): for t in self.type.cast_types(): - obj = np.array(self.num23seq, dtype=t.dtype, order='F') - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy, + obj) + assert not a.has_shared_memory() def test_c_copy_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.c.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy, + obj) + assert not a.has_shared_memory() def test_in_cache_from_2casttype(self): for t in self.type.all_types(): if t.elsize != self.type.elsize: continue obj = np.array(self.num2seq, dtype=t.dtype) - shape = (len(self.num2seq),) + shape = (len(self.num2seq), ) a = self.array(shape, intent.in_.c.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) + assert a.has_shared_memory() a = self.array(shape, intent.in_.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) + assert a.has_shared_memory() - obj = np.array(self.num2seq, dtype=t.dtype, order='F') + obj = np.array(self.num2seq, dtype=t.dtype, order="F") a = self.array(shape, intent.in_.c.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) + assert a.has_shared_memory() a = self.array(shape, intent.in_.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) + assert a.has_shared_memory(), repr(t.dtype) try: a = self.array(shape, intent.in_.cache, obj[::-1]) except ValueError as msg: - if not str(msg).startswith('failed to initialize' - ' intent(cache) array'): + if not str(msg).startswith( + "failed to initialize intent(cache) array"): raise else: raise SystemError( - 'intent(cache) should have failed on multisegmented array') + "intent(cache) should have failed on multisegmented array") def test_in_cache_from_2casttype_failure(self): for t in self.type.all_types(): if t.elsize >= self.type.elsize: continue obj = np.array(self.num2seq, dtype=t.dtype) - shape = (len(self.num2seq),) + shape = (len(self.num2seq), ) try: self.array(shape, intent.in_.cache, obj) # Should succeed except ValueError as msg: - if not str(msg).startswith('failed to initialize' - ' intent(cache) array'): + if not str(msg).startswith( + "failed to initialize intent(cache) array"): raise else: raise SystemError( - 'intent(cache) should have failed on smaller array') + "intent(cache) should have failed on smaller array") def test_cache_hidden(self): - shape = (2,) + shape = (2, ) a = self.array(shape, intent.cache.hide, None) - assert_(a.arr.shape == shape) + assert a.arr.shape == shape shape = (2, 3) a = self.array(shape, intent.cache.hide, None) - assert_(a.arr.shape == shape) + assert a.arr.shape == shape shape = (-1, 3) try: a = self.array(shape, intent.cache.hide, None) except ValueError as msg: - if not str(msg).startswith('failed to create intent' - '(cache|hide)|optional array'): + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): raise else: raise SystemError( - 'intent(cache) should have failed on undefined dimensions') + "intent(cache) should have failed on undefined dimensions") def test_hidden(self): - shape = (2,) + shape = (2, ) a = self.array(shape, intent.hide, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) shape = (2, 3) a = self.array(shape, intent.hide, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) - assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] shape = (2, 3) a = self.array(shape, intent.c.hide, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) - assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] shape = (-1, 3) try: a = self.array(shape, intent.hide, None) except ValueError as msg: - if not str(msg).startswith('failed to create intent' - '(cache|hide)|optional array'): + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): raise else: - raise SystemError('intent(hide) should have failed' - ' on undefined dimensions') + raise SystemError( + "intent(hide) should have failed on undefined dimensions") def test_optional_none(self): - shape = (2,) + shape = (2, ) a = self.array(shape, intent.optional, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) shape = (2, 3) a = self.array(shape, intent.optional, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) - assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] shape = (2, 3) a = self.array(shape, intent.c.optional, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) - assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] def test_optional_from_2seq(self): obj = self.num2seq - shape = (len(obj),) + shape = (len(obj), ) a = self.array(shape, intent.optional, obj) - assert_(a.arr.shape == shape) - assert_(not a.has_shared_memory()) + assert a.arr.shape == shape + assert not a.has_shared_memory() def test_optional_from_23seq(self): obj = self.num23seq shape = (len(obj), len(obj[0])) a = self.array(shape, intent.optional, obj) - assert_(a.arr.shape == shape) - assert_(not a.has_shared_memory()) + assert a.arr.shape == shape + assert not a.has_shared_memory() a = self.array(shape, intent.optional.c, obj) - assert_(a.arr.shape == shape) - assert_(not a.has_shared_memory()) + assert a.arr.shape == shape + assert not a.has_shared_memory() def test_inplace(self): obj = np.array(self.num23seq, dtype=self.type.dtype) - assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape a = self.array(shape, intent.inplace, obj) - assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) a.arr[1][2] = 54 - assert_(obj[1][2] == a.arr[1][2] == - np.array(54, dtype=self.type.dtype), repr((obj, a.arr))) - assert_(a.arr is obj) - assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! - assert_(not obj.flags['CONTIGUOUS']) + assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes are changed inplace! + assert not obj.flags["CONTIGUOUS"] def test_inplace_from_casttype(self): for t in self.type.cast_types(): if t is self.type: continue obj = np.array(self.num23seq, dtype=t.dtype) - assert_(obj.dtype.type == t.type) - assert_(obj.dtype.type is not self.type.type) - assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) + assert obj.dtype.type == t.type + assert obj.dtype.type is not self.type.type + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape a = self.array(shape, intent.inplace, obj) - assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) a.arr[1][2] = 54 - assert_(obj[1][2] == a.arr[1][2] == - np.array(54, dtype=self.type.dtype), repr((obj, a.arr))) - assert_(a.arr is obj) - assert_(obj.flags['FORTRAN']) # obj attributes changed inplace! - assert_(not obj.flags['CONTIGUOUS']) - assert_(obj.dtype.type is self.type.type) # obj changed inplace! + assert obj[1][2] == a.arr[1][2] == np.array(54, + dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes changed inplace! + assert not obj.flags["CONTIGUOUS"] + assert obj.dtype.type is self.type.type # obj changed inplace! diff --git a/numpy/f2py/tests/test_assumed_shape.py b/numpy/f2py/tests/test_assumed_shape.py index 79e3ad138..e546c379b 100644 --- a/numpy/f2py/tests/test_assumed_shape.py +++ b/numpy/f2py/tests/test_assumed_shape.py @@ -2,35 +2,31 @@ import os import pytest import tempfile -from numpy.testing import assert_ from . import util -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - class TestAssumedShapeSumExample(util.F2PyTest): - sources = [_path('src', 'assumed_shape', 'foo_free.f90'), - _path('src', 'assumed_shape', 'foo_use.f90'), - _path('src', 'assumed_shape', 'precision.f90'), - _path('src', 'assumed_shape', 'foo_mod.f90'), - _path('src', 'assumed_shape', '.f2py_f2cmap'), - ] + sources = [ + util.getpath("tests", "src", "assumed_shape", "foo_free.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_use.f90"), + util.getpath("tests", "src", "assumed_shape", "precision.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"), + util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"), + ] @pytest.mark.slow def test_all(self): r = self.module.fsum([1, 2]) - assert_(r == 3, repr(r)) + assert r == 3 r = self.module.sum([1, 2]) - assert_(r == 3, repr(r)) + assert r == 3 r = self.module.sum_with_use([1, 2]) - assert_(r == 3, repr(r)) + assert r == 3 r = self.module.mod.sum([1, 2]) - assert_(r == 3, repr(r)) + assert r == 3 r = self.module.mod.fsum([1, 2]) - assert_(r == 3, repr(r)) + assert r == 3 class TestF2cmapOption(TestAssumedShapeSumExample): @@ -40,7 +36,7 @@ class TestF2cmapOption(TestAssumedShapeSumExample): f2cmap_src = self.sources.pop(-1) self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False) - with open(f2cmap_src, 'rb') as f: + with open(f2cmap_src, "rb") as f: self.f2cmap_file.write(f.read()) self.f2cmap_file.close() diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py index 7d725165b..e0eacc032 100644 --- a/numpy/f2py/tests/test_block_docstring.py +++ b/numpy/f2py/tests/test_block_docstring.py @@ -2,22 +2,16 @@ import sys import pytest from . import util -from numpy.testing import assert_equal, IS_PYPY +from numpy.testing import IS_PYPY -class TestBlockDocString(util.F2PyTest): - code = """ - SUBROUTINE FOO() - INTEGER BAR(2, 3) - COMMON /BLOCK/ BAR - RETURN - END - """ +class TestBlockDocString(util.F2PyTest): + sources = [util.getpath("tests", "src", "block_docstring", "foo.f")] - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") def test_block_docstring(self): expected = "bar : 'i'-array(2,3)\n" - assert_equal(self.module.block.__doc__, expected) + assert self.module.block.__doc__ == expected diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 5d2aab94d..4e91430fd 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -7,77 +7,14 @@ import traceback import time import numpy as np -from numpy.testing import assert_, assert_equal, IS_PYPY +from numpy.testing import IS_PYPY from . import util class TestF77Callback(util.F2PyTest): - code = """ - subroutine t(fun,a) - integer a -cf2py intent(out) a - external fun - call fun(a) - end - - subroutine func(a) -cf2py intent(in,out) a - integer a - a = a + 11 - end - - subroutine func0(a) -cf2py intent(out) a - integer a - a = 11 - end - - subroutine t2(a) -cf2py intent(callback) fun - integer a -cf2py intent(out) a - external fun - call fun(a) - end - - subroutine string_callback(callback, a) - external callback - double precision callback - double precision a - character*1 r -cf2py intent(out) a - r = 'r' - a = callback(r) - end - - subroutine string_callback_array(callback, cu, lencu, a) - external callback - integer callback - integer lencu - character*8 cu(lencu) - integer a -cf2py intent(out) a - - a = callback(cu, lencu) - end - - subroutine hidden_callback(a, r) - external global_f -cf2py intent(callback, hide) global_f - integer a, r, global_f -cf2py intent(out) r - r = global_f(a) - end - - subroutine hidden_callback2(a, r) - external global_f - integer a, r, global_f -cf2py intent(out) r - r = global_f(a) - end - """ + sources = [util.getpath("tests", "src", "callback", "foo.f")] - @pytest.mark.parametrize('name', 't,t2'.split(',')) + @pytest.mark.parametrize("name", "t,t2".split(",")) def test_all(self, name): self.check_function(name) @@ -110,75 +47,74 @@ cf2py intent(out) r Return objects: a : int """) - assert_equal(self.module.t.__doc__, expected) + assert self.module.t.__doc__ == expected def check_function(self, name): t = getattr(self.module, name) r = t(lambda: 4) - assert_(r == 4, repr(r)) - r = t(lambda a: 5, fun_extra_args=(6,)) - assert_(r == 5, repr(r)) - r = t(lambda a: a, fun_extra_args=(6,)) - assert_(r == 6, repr(r)) - r = t(lambda a: 5 + a, fun_extra_args=(7,)) - assert_(r == 12, repr(r)) - r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi,)) - assert_(r == 180, repr(r)) - r = t(math.degrees, fun_extra_args=(math.pi,)) - assert_(r == 180, repr(r)) - - r = t(self.module.func, fun_extra_args=(6,)) - assert_(r == 17, repr(r)) + assert r == 4 + r = t(lambda a: 5, fun_extra_args=(6, )) + assert r == 5 + r = t(lambda a: a, fun_extra_args=(6, )) + assert r == 6 + r = t(lambda a: 5 + a, fun_extra_args=(7, )) + assert r == 12 + r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, )) + assert r == 180 + r = t(math.degrees, fun_extra_args=(math.pi, )) + assert r == 180 + + r = t(self.module.func, fun_extra_args=(6, )) + assert r == 17 r = t(self.module.func0) - assert_(r == 11, repr(r)) + assert r == 11 r = t(self.module.func0._cpointer) - assert_(r == 11, repr(r)) + assert r == 11 class A: - def __call__(self): return 7 def mth(self): return 9 + a = A() r = t(a) - assert_(r == 7, repr(r)) + assert r == 7 r = t(a.mth) - assert_(r == 9, repr(r)) + assert r == 9 - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") def test_string_callback(self): - def callback(code): - if code == 'r': + if code == "r": return 0 else: return 1 - f = getattr(self.module, 'string_callback') + f = getattr(self.module, "string_callback") r = f(callback) - assert_(r == 0, repr(r)) + assert r == 0 - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") def test_string_callback_array(self): # See gh-10027 - cu = np.zeros((1, 8), 'S1') + cu = np.zeros((1, 8), "S1") def callback(cu, lencu): if cu.shape != (lencu, 8): return 1 - if cu.dtype != 'S1': + if cu.dtype != "S1": return 2 - if not np.all(cu == b''): + if not np.all(cu == b""): return 3 return 0 - f = getattr(self.module, 'string_callback_array') + f = getattr(self.module, "string_callback_array") res = f(callback, cu, len(cu)) - assert_(res == 0, repr(res)) + assert res == 0 def test_threadsafety(self): # Segfaults if the callback handling is not threadsafe @@ -192,7 +128,7 @@ cf2py intent(out) r # Check reentrancy r = self.module.t(lambda: 123) - assert_(r == 123) + assert r == 123 return 42 @@ -200,13 +136,15 @@ cf2py intent(out) r try: for j in range(50): r = self.module.t(cb) - assert_(r == 42) + assert r == 42 self.check_function(name) except Exception: errors.append(traceback.format_exc()) - threads = [threading.Thread(target=runner, args=(arg,)) - for arg in ("t", "t2") for n in range(20)] + threads = [ + threading.Thread(target=runner, args=(arg, )) + for arg in ("t", "t2") for n in range(20) + ] for t in threads: t.start() @@ -222,34 +160,34 @@ cf2py intent(out) r try: self.module.hidden_callback(2) except Exception as msg: - assert_(str(msg).startswith('Callback global_f not defined')) + assert str(msg).startswith("Callback global_f not defined") try: self.module.hidden_callback2(2) except Exception as msg: - assert_(str(msg).startswith('cb: Callback global_f not defined')) + assert str(msg).startswith("cb: Callback global_f not defined") self.module.global_f = lambda x: x + 1 r = self.module.hidden_callback(2) - assert_(r == 3) + assert r == 3 self.module.global_f = lambda x: x + 2 r = self.module.hidden_callback(2) - assert_(r == 4) + assert r == 4 del self.module.global_f try: self.module.hidden_callback(2) except Exception as msg: - assert_(str(msg).startswith('Callback global_f not defined')) + assert str(msg).startswith("Callback global_f not defined") self.module.global_f = lambda x=0: x + 3 r = self.module.hidden_callback(2) - assert_(r == 5) + assert r == 5 # reproducer of gh18341 r = self.module.hidden_callback2(2) - assert_(r == 3) + assert r == 3 class TestF77CallbackPythonTLS(TestF77Callback): @@ -257,26 +195,14 @@ class TestF77CallbackPythonTLS(TestF77Callback): Callback tests using Python thread-local storage instead of compiler-provided """ + options = ["-DF2PY_USE_PYTHON_TLS"] class TestF90Callback(util.F2PyTest): - - suffix = '.f90' - - code = textwrap.dedent( - """ - function gh17797(f, y) result(r) - external f - integer(8) :: r, f - integer(8), dimension(:) :: y - r = f(0) - r = r + sum(y) - end function gh17797 - """) + sources = [util.getpath("tests", "src", "callback", "gh17797.f90")] def test_gh17797(self): - def incr(x): return x + 123 @@ -291,32 +217,9 @@ class TestGH18335(util.F2PyTest): implemented as a separate test class. Do not extend this test with other tests! """ - - suffix = '.f90' - - code = textwrap.dedent( - """ - ! When gh18335_workaround is defined as an extension, - ! the issue cannot be reproduced. - !subroutine gh18335_workaround(f, y) - ! implicit none - ! external f - ! integer(kind=1) :: y(1) - ! call f(y) - !end subroutine gh18335_workaround - - function gh18335(f) result (r) - implicit none - external f - integer(kind=1) :: y(1), r - y(1) = 123 - call f(y) - r = y(1) - end function gh18335 - """) + sources = [util.getpath("tests", "src", "callback", "gh18335.f90")] def test_gh18335(self): - def foo(x): x[0] += 1 diff --git a/numpy/f2py/tests/test_common.py b/numpy/f2py/tests/test_common.py index e4bf35504..8a4b221ef 100644 --- a/numpy/f2py/tests/test_common.py +++ b/numpy/f2py/tests/test_common.py @@ -5,21 +5,14 @@ import pytest import numpy as np from . import util -from numpy.testing import assert_array_equal - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) class TestCommonBlock(util.F2PyTest): - sources = [_path('src', 'common', 'block.f')] + sources = [util.getpath("tests", "src", "common", "block.f")] - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") def test_common_block(self): self.module.initcb() - assert_array_equal(self.module.block.long_bn, - np.array(1.0, dtype=np.float64)) - assert_array_equal(self.module.block.string_bn, - np.array('2', dtype='|S1')) - assert_array_equal(self.module.block.ok, - np.array(3, dtype=np.int32)) + assert self.module.block.long_bn == np.array(1.0, dtype=np.float64) + assert self.module.block.string_bn == np.array("2", dtype="|S1") + assert self.module.block.ok == np.array(3, dtype=np.int32) diff --git a/numpy/f2py/tests/test_compile_function.py b/numpy/f2py/tests/test_compile_function.py index f76fd6448..3c16f3198 100644 --- a/numpy/f2py/tests/test_compile_function.py +++ b/numpy/f2py/tests/test_compile_function.py @@ -9,7 +9,6 @@ import pytest import numpy.f2py -from numpy.testing import assert_equal from . import util @@ -17,14 +16,13 @@ def setup_module(): if not util.has_c_compiler(): pytest.skip("Needs C compiler") if not util.has_f77_compiler(): - pytest.skip('Needs FORTRAN 77 compiler') + pytest.skip("Needs FORTRAN 77 compiler") # extra_args can be a list (since gh-11937) or string. # also test absence of extra_args -@pytest.mark.parametrize( - "extra_args", [['--noopt', '--debug'], '--noopt --debug', ''] - ) +@pytest.mark.parametrize("extra_args", + [["--noopt", "--debug"], "--noopt --debug", ""]) @pytest.mark.leaks_references(reason="Imported module seems never deleted.") def test_f2py_init_compile(extra_args): # flush through the f2py __init__ compile() function code path as a @@ -33,7 +31,7 @@ def test_f2py_init_compile(extra_args): # the Fortran 77 syntax requires 6 spaces before any commands, but # more space may be added/ - fsource = """ + fsource = """ integer function foo() foo = 10 + 5 return @@ -45,7 +43,7 @@ def test_f2py_init_compile(extra_args): modname = util.get_temp_module_name() cwd = os.getcwd() - target = os.path.join(moddir, str(uuid.uuid4()) + '.f') + target = os.path.join(moddir, str(uuid.uuid4()) + ".f") # try running compile() with and without a source_fn provided so # that the code path where a temporary file for writing Fortran # source is created is also explored @@ -54,40 +52,35 @@ def test_f2py_init_compile(extra_args): # util.py, but don't actually use build_module() because it has # its own invocation of subprocess that circumvents the # f2py.compile code block under test - try: - os.chdir(moddir) - ret_val = numpy.f2py.compile( - fsource, - modulename=modname, - extra_args=extra_args, - source_fn=source_fn - ) - finally: - os.chdir(cwd) - - # check for compile success return value - assert_equal(ret_val, 0) - - # we are not currently able to import the Python-Fortran - # interface module on Windows / Appveyor, even though we do get - # successful compilation on that platform with Python 3.x - if sys.platform != 'win32': - # check for sensible result of Fortran function; that means - # we can import the module name in Python and retrieve the - # result of the sum operation - return_check = import_module(modname) - calc_result = return_check.foo() - assert_equal(calc_result, 15) - # Removal from sys.modules, is not as such necessary. Even with - # removal, the module (dict) stays alive. - del sys.modules[modname] + with util.switchdir(moddir): + ret_val = numpy.f2py.compile(fsource, + modulename=modname, + extra_args=extra_args, + source_fn=source_fn) + + # check for compile success return value + assert ret_val == 0 + + # we are not currently able to import the Python-Fortran + # interface module on Windows / Appveyor, even though we do get + # successful compilation on that platform with Python 3.x + if sys.platform != "win32": + # check for sensible result of Fortran function; that means + # we can import the module name in Python and retrieve the + # result of the sum operation + return_check = import_module(modname) + calc_result = return_check.foo() + assert calc_result == 15 + # Removal from sys.modules, is not as such necessary. Even with + # removal, the module (dict) stays alive. + del sys.modules[modname] def test_f2py_init_compile_failure(): # verify an appropriate integer status value returned by # f2py.compile() when invalid Fortran is provided ret_val = numpy.f2py.compile(b"invalid") - assert_equal(ret_val, 1) + assert ret_val == 1 def test_f2py_init_compile_bad_cmd(): @@ -99,27 +92,26 @@ def test_f2py_init_compile_bad_cmd(): # downstream NOTE: how bad of an idea is this patching? try: temp = sys.executable - sys.executable = 'does not exist' + sys.executable = "does not exist" # the OSError should take precedence over invalid Fortran ret_val = numpy.f2py.compile(b"invalid") - assert_equal(ret_val, 127) + assert ret_val == 127 finally: sys.executable = temp -@pytest.mark.parametrize('fsource', - ['program test_f2py\nend program test_f2py', - b'program test_f2py\nend program test_f2py',]) +@pytest.mark.parametrize( + "fsource", + [ + "program test_f2py\nend program test_f2py", + b"program test_f2py\nend program test_f2py", + ], +) def test_compile_from_strings(tmpdir, fsource): # Make sure we can compile str and bytes gh-12796 - cwd = os.getcwd() - try: - os.chdir(str(tmpdir)) - ret_val = numpy.f2py.compile( - fsource, - modulename='test_compile_from_strings', - extension='.f90') - assert_equal(ret_val, 0) - finally: - os.chdir(cwd) + with util.switchdir(tmpdir): + ret_val = numpy.f2py.compile(fsource, + modulename="test_compile_from_strings", + extension=".f90") + assert ret_val == 0 diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index 039e085b4..e33e12d62 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -1,6 +1,5 @@ import pytest import numpy as np -from numpy.testing import assert_array_equal, assert_equal from numpy.f2py.crackfortran import markinnerspaces from . import util from numpy.f2py import crackfortran @@ -10,163 +9,109 @@ import textwrap class TestNoSpace(util.F2PyTest): # issue gh-15035: add handling for endsubroutine, endfunction with no space # between "end" and the block name - code = """ - subroutine subb(k) - real(8), intent(inout) :: k(:) - k=k+1 - endsubroutine - - subroutine subc(w,k) - real(8), intent(in) :: w(:) - real(8), intent(out) :: k(size(w)) - k=w+1 - endsubroutine - - function t0(value) - character value - character t0 - t0 = value - endfunction - """ + sources = [util.getpath("tests", "src", "crackfortran", "gh15035.f")] def test_module(self): k = np.array([1, 2, 3], dtype=np.float64) w = np.array([1, 2, 3], dtype=np.float64) self.module.subb(k) - assert_array_equal(k, w + 1) + assert np.allclose(k, w + 1) self.module.subc([w, k]) - assert_array_equal(k, w + 1) - assert self.module.t0(23) == b'2' - - -class TestPublicPrivate(): - - def test_defaultPrivate(self, tmp_path): - f_path = tmp_path / "mod.f90" - with f_path.open('w') as ff: - ff.write(textwrap.dedent("""\ - module foo - private - integer :: a - public :: setA - integer :: b - contains - subroutine setA(v) - integer, intent(in) :: v - a = v - end subroutine setA - end module foo - """)) - mod = crackfortran.crackfortran([str(f_path)]) + assert np.allclose(k, w + 1) + assert self.module.t0(23) == b"2" + + +class TestPublicPrivate: + def test_defaultPrivate(self): + fpath = util.getpath("tests", "src", "crackfortran", "privatemod.f90") + mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 mod = mod[0] - assert 'private' in mod['vars']['a']['attrspec'] - assert 'public' not in mod['vars']['a']['attrspec'] - assert 'private' in mod['vars']['b']['attrspec'] - assert 'public' not in mod['vars']['b']['attrspec'] - assert 'private' not in mod['vars']['seta']['attrspec'] - assert 'public' in mod['vars']['seta']['attrspec'] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" in mod["vars"]["b"]["attrspec"] + assert "public" not in mod["vars"]["b"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] def test_defaultPublic(self, tmp_path): - f_path = tmp_path / "mod.f90" - with f_path.open('w') as ff: - ff.write(textwrap.dedent("""\ - module foo - public - integer, private :: a - public :: setA - contains - subroutine setA(v) - integer, intent(in) :: v - a = v - end subroutine setA - end module foo - """)) - mod = crackfortran.crackfortran([str(f_path)]) + fpath = util.getpath("tests", "src", "crackfortran", "publicmod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] + + +class TestModuleProcedure(): + def test_moduleOperators(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "operators.f90") + mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 mod = mod[0] - assert 'private' in mod['vars']['a']['attrspec'] - assert 'public' not in mod['vars']['a']['attrspec'] - assert 'private' not in mod['vars']['seta']['attrspec'] - assert 'public' in mod['vars']['seta']['attrspec'] + assert "body" in mod and len(mod["body"]) == 9 + assert mod["body"][1]["name"] == "operator(.item.)" + assert "implementedby" in mod["body"][1] + assert mod["body"][1]["implementedby"] == \ + ["item_int", "item_real"] + assert mod["body"][2]["name"] == "operator(==)" + assert "implementedby" in mod["body"][2] + assert mod["body"][2]["implementedby"] == ["items_are_equal"] + assert mod["body"][3]["name"] == "assignment(=)" + assert "implementedby" in mod["body"][3] + assert mod["body"][3]["implementedby"] == \ + ["get_int", "get_real"] class TestExternal(util.F2PyTest): # issue gh-17859: add external attribute support - code = """ - integer(8) function external_as_statement(fcn) - implicit none - external fcn - integer(8) :: fcn - external_as_statement = fcn(0) - end - - integer(8) function external_as_attribute(fcn) - implicit none - integer(8), external :: fcn - external_as_attribute = fcn(0) - end - """ + sources = [util.getpath("tests", "src", "crackfortran", "gh17859.f")] def test_external_as_statement(self): def incr(x): return x + 123 + r = self.module.external_as_statement(incr) assert r == 123 def test_external_as_attribute(self): def incr(x): return x + 123 + r = self.module.external_as_attribute(incr) assert r == 123 class TestCrackFortran(util.F2PyTest): - - suffix = '.f90' - - code = textwrap.dedent(""" - subroutine gh2848( & - ! first 2 parameters - par1, par2,& - ! last 2 parameters - par3, par4) - - integer, intent(in) :: par1, par2 - integer, intent(out) :: par3, par4 - - par3 = par1 - par4 = par2 - - end subroutine gh2848 - """) + # gh-2848: commented lines between parameters in subroutine parameter lists + sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90")] def test_gh2848(self): r = self.module.gh2848(1, 2) assert r == (1, 2) -class TestMarkinnerspaces(): - # issue #14118: markinnerspaces does not handle multiple quotations +class TestMarkinnerspaces: + # gh-14118: markinnerspaces does not handle multiple quotations def test_do_not_touch_normal_spaces(self): test_list = ["a ", " a", "a b c", "'abcdefghij'"] for i in test_list: - assert_equal(markinnerspaces(i), i) + assert markinnerspaces(i) == i def test_one_relevant_space(self): - assert_equal(markinnerspaces("a 'b c' \\\' \\\'"), "a 'b@_@c' \\' \\'") - assert_equal(markinnerspaces(r'a "b c" \" \"'), r'a "b@_@c" \" \"') + assert markinnerspaces("a 'b c' \\' \\'") == "a 'b@_@c' \\' \\'" + assert markinnerspaces(r'a "b c" \" \"') == r'a "b@_@c" \" \"' def test_ignore_inner_quotes(self): - assert_equal(markinnerspaces('a \'b c" " d\' e'), - "a 'b@_@c\"@_@\"@_@d' e") - assert_equal(markinnerspaces('a "b c\' \' d" e'), - "a \"b@_@c'@_@'@_@d\" e") + assert markinnerspaces("a 'b c\" \" d' e") == "a 'b@_@c\"@_@\"@_@d' e" + assert markinnerspaces("a \"b c' ' d\" e") == "a \"b@_@c'@_@'@_@d\" e" def test_multiple_relevant_spaces(self): - assert_equal(markinnerspaces("a 'b c' 'd e'"), "a 'b@_@c' 'd@_@e'") - assert_equal(markinnerspaces(r'a "b c" "d e"'), r'a "b@_@c" "d@_@e"') + assert markinnerspaces("a 'b c' 'd e'") == "a 'b@_@c' 'd@_@e'" + assert markinnerspaces(r'a "b c" "d e"') == r'a "b@_@c" "d@_@e"' class TestDimSpec(util.F2PyTest): @@ -200,7 +145,7 @@ class TestDimSpec(util.F2PyTest): """ - suffix = '.f90' + suffix = ".f90" code_template = textwrap.dedent(""" function get_arr_size_{count}(a, n) result (length) @@ -221,33 +166,38 @@ class TestDimSpec(util.F2PyTest): end subroutine """) - linear_dimspecs = ['n', '2*n', '2:n', 'n/2', '5 - n/2', '3*n:20', - 'n*(n+1):n*(n+5)'] - nonlinear_dimspecs = ['2*n:3*n*n+2*n'] + linear_dimspecs = [ + "n", "2*n", "2:n", "n/2", "5 - n/2", "3*n:20", "n*(n+1):n*(n+5)", + "2*n, n" + ] + nonlinear_dimspecs = ["2*n:3*n*n+2*n"] all_dimspecs = linear_dimspecs + nonlinear_dimspecs - code = '' + code = "" for count, dimspec in enumerate(all_dimspecs): + lst = [(d.split(":")[0] if ":" in d else "1") for d in dimspec.split(',')] code += code_template.format( - count=count, dimspec=dimspec, - first=dimspec.split(':')[0] if ':' in dimspec else '1') + count=count, + dimspec=dimspec, + first=", ".join(lst), + ) - @pytest.mark.parametrize('dimspec', all_dimspecs) + @pytest.mark.parametrize("dimspec", all_dimspecs) def test_array_size(self, dimspec): count = self.all_dimspecs.index(dimspec) - get_arr_size = getattr(self.module, f'get_arr_size_{count}') + get_arr_size = getattr(self.module, f"get_arr_size_{count}") for n in [1, 2, 3, 4, 5]: sz, a = get_arr_size(n) - assert len(a) == sz + assert a.size == sz - @pytest.mark.parametrize('dimspec', all_dimspecs) + @pytest.mark.parametrize("dimspec", all_dimspecs) def test_inv_array_size(self, dimspec): count = self.all_dimspecs.index(dimspec) - get_arr_size = getattr(self.module, f'get_arr_size_{count}') - get_inv_arr_size = getattr(self.module, f'get_inv_arr_size_{count}') + get_arr_size = getattr(self.module, f"get_arr_size_{count}") + get_inv_arr_size = getattr(self.module, f"get_inv_arr_size_{count}") for n in [1, 2, 3, 4, 5]: sz, a = get_arr_size(n) @@ -266,18 +216,9 @@ class TestDimSpec(util.F2PyTest): assert sz == sz1, (n, n1, sz, sz1) -class TestModuleDeclaration(): +class TestModuleDeclaration: def test_dependencies(self, tmp_path): - f_path = tmp_path / "mod.f90" - with f_path.open('w') as ff: - ff.write(textwrap.dedent("""\ - module foo - type bar - character(len = 4) :: text - end type bar - type(bar), parameter :: abar = bar('abar') - end module foo - """)) - mod = crackfortran.crackfortran([str(f_path)]) + fpath = util.getpath("tests", "src", "crackfortran", "foo_deps.f90") + mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 - assert mod[0]['vars']['abar']['='] == "bar('abar')" + assert mod[0]["vars"]["abar"]["="] == "bar('abar')" diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index a7e2b28ed..f0cb61fb6 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -1,32 +1,26 @@ import os import pytest -from numpy.testing import assert_ from numpy.f2py.crackfortran import ( _selected_int_kind_func as selected_int_kind, - _selected_real_kind_func as selected_real_kind - ) + _selected_real_kind_func as selected_real_kind, +) from . import util -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - class TestKind(util.F2PyTest): - sources = [_path('src', 'kind', 'foo.f90')] + sources = [util.getpath("tests", "src", "kind", "foo.f90")] - @pytest.mark.slow def test_all(self): selectedrealkind = self.module.selectedrealkind selectedintkind = self.module.selectedintkind for i in range(40): - assert_(selectedintkind(i) in [selected_int_kind(i), -1], - 'selectedintkind(%s): expected %r but got %r' % - (i, selected_int_kind(i), selectedintkind(i))) + assert selectedintkind(i) == selected_int_kind( + i + ), f"selectedintkind({i}): expected {selected_int_kind(i)!r} but got {selectedintkind(i)!r}" for i in range(20): - assert_(selectedrealkind(i) in [selected_real_kind(i), -1], - 'selectedrealkind(%s): expected %r but got %r' % - (i, selected_real_kind(i), selectedrealkind(i))) + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py index 04266ca5b..80653b7d2 100644 --- a/numpy/f2py/tests/test_mixed.py +++ b/numpy/f2py/tests/test_mixed.py @@ -2,23 +2,21 @@ import os import textwrap import pytest -from numpy.testing import assert_, assert_equal, IS_PYPY +from numpy.testing import IS_PYPY from . import util -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - class TestMixed(util.F2PyTest): - sources = [_path('src', 'mixed', 'foo.f'), - _path('src', 'mixed', 'foo_fixed.f90'), - _path('src', 'mixed', 'foo_free.f90')] + sources = [ + util.getpath("tests", "src", "mixed", "foo.f"), + util.getpath("tests", "src", "mixed", "foo_fixed.f90"), + util.getpath("tests", "src", "mixed", "foo_free.f90"), + ] def test_all(self): - assert_(self.module.bar11() == 11) - assert_(self.module.foo_fixed.bar12() == 12) - assert_(self.module.foo_free.bar13() == 13) + assert self.module.bar11() == 11 + assert self.module.foo_fixed.bar12() == 12 + assert self.module.foo_free.bar13() == 13 @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") @@ -32,4 +30,4 @@ class TestMixed(util.F2PyTest): ------- a : int """) - assert_equal(self.module.bar11.__doc__, expected) + assert self.module.bar11.__doc__ == expected diff --git a/numpy/f2py/tests/test_module_doc.py b/numpy/f2py/tests/test_module_doc.py index 4b9555cee..28822d405 100644 --- a/numpy/f2py/tests/test_module_doc.py +++ b/numpy/f2py/tests/test_module_doc.py @@ -4,27 +4,24 @@ import pytest import textwrap from . import util -from numpy.testing import assert_equal, IS_PYPY - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) +from numpy.testing import IS_PYPY class TestModuleDocString(util.F2PyTest): - sources = [_path('src', 'module_data', 'module_data_docstring.f90')] + sources = [ + util.getpath("tests", "src", "module_data", + "module_data_docstring.f90") + ] - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") def test_module_docstring(self): - assert_equal(self.module.mod.__doc__, - textwrap.dedent('''\ + assert self.module.mod.__doc__ == textwrap.dedent("""\ i : 'i'-scalar x : 'i'-array(4) a : 'f'-array(2,3) b : 'f'-array(-1,-1), not allocated\x00 foo()\n - Wrapper for ``foo``.\n\n''') - ) + Wrapper for ``foo``.\n\n""") diff --git a/numpy/f2py/tests/test_parameter.py b/numpy/f2py/tests/test_parameter.py index b61827169..2f620eaa0 100644 --- a/numpy/f2py/tests/test_parameter.py +++ b/numpy/f2py/tests/test_parameter.py @@ -2,115 +2,111 @@ import os import pytest import numpy as np -from numpy.testing import assert_raises, assert_equal from . import util -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - class TestParameters(util.F2PyTest): # Check that intent(in out) translates as intent(inout) - sources = [_path('src', 'parameter', 'constant_real.f90'), - _path('src', 'parameter', 'constant_integer.f90'), - _path('src', 'parameter', 'constant_both.f90'), - _path('src', 'parameter', 'constant_compound.f90'), - _path('src', 'parameter', 'constant_non_compound.f90'), + sources = [ + util.getpath("tests", "src", "parameter", "constant_real.f90"), + util.getpath("tests", "src", "parameter", "constant_integer.f90"), + util.getpath("tests", "src", "parameter", "constant_both.f90"), + util.getpath("tests", "src", "parameter", "constant_compound.f90"), + util.getpath("tests", "src", "parameter", "constant_non_compound.f90"), ] @pytest.mark.slow def test_constant_real_single(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float32)[::2] - assert_raises(ValueError, self.module.foo_single, x) + pytest.raises(ValueError, self.module.foo_single, x) # check values with contiguous array x = np.arange(3, dtype=np.float32) self.module.foo_single(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) @pytest.mark.slow def test_constant_real_double(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo_double, x) + pytest.raises(ValueError, self.module.foo_double, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_double(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) @pytest.mark.slow def test_constant_compound_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] - assert_raises(ValueError, self.module.foo_compound_int, x) + pytest.raises(ValueError, self.module.foo_compound_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_compound_int(x) - assert_equal(x, [0 + 1 + 2*6, 1, 2]) + assert np.allclose(x, [0 + 1 + 2 * 6, 1, 2]) @pytest.mark.slow def test_constant_non_compound_int(self): # check values x = np.arange(4, dtype=np.int32) self.module.foo_non_compound_int(x) - assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3]) + assert np.allclose(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3]) @pytest.mark.slow def test_constant_integer_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] - assert_raises(ValueError, self.module.foo_int, x) + pytest.raises(ValueError, self.module.foo_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_int(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) @pytest.mark.slow def test_constant_integer_long(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int64)[::2] - assert_raises(ValueError, self.module.foo_long, x) + pytest.raises(ValueError, self.module.foo_long, x) # check values with contiguous array x = np.arange(3, dtype=np.int64) self.module.foo_long(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) @pytest.mark.slow def test_constant_both(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo, x) + pytest.raises(ValueError, self.module.foo, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo(x) - assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) @pytest.mark.slow def test_constant_no(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo_no, x) + pytest.raises(ValueError, self.module.foo_no, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_no(x) - assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) @pytest.mark.slow def test_constant_sum(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo_sum, x) + pytest.raises(ValueError, self.module.foo_sum, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_sum(x) - assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) diff --git a/numpy/f2py/tests/test_quoted_character.py b/numpy/f2py/tests/test_quoted_character.py index 20c77666c..82671cd8e 100644 --- a/numpy/f2py/tests/test_quoted_character.py +++ b/numpy/f2py/tests/test_quoted_character.py @@ -4,29 +4,13 @@ import sys import pytest -from numpy.testing import assert_equal from . import util class TestQuotedCharacter(util.F2PyTest): - code = """ - SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) - CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR - PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", - 1 OPENPAR="(", CLOSEPAR=")") - CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 -Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 - OUT1 = SINGLE - OUT2 = DOUBLE - OUT3 = SEMICOL - OUT4 = EXCLA - OUT5 = OPENPAR - OUT6 = CLOSEPAR - RETURN - END - """ + sources = [util.getpath("tests", "src", "quoted_character", "foo.f")] - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") def test_quoted_character(self): - assert_equal(self.module.foo(), (b"'", b'"', b';', b'!', b'(', b')')) + assert self.module.foo() == (b"'", b'"', b";", b"!", b"(", b")") diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index b91499e4a..40b9d4327 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -2,54 +2,46 @@ import os import pytest import numpy as np -from numpy.testing import assert_, assert_raises, assert_equal, assert_string_equal from . import util -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - class TestIntentInOut(util.F2PyTest): # Check that intent(in out) translates as intent(inout) - sources = [_path('src', 'regression', 'inout.f90')] + sources = [util.getpath("tests", "src", "regression", "inout.f90")] @pytest.mark.slow def test_inout(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float32)[::2] - assert_raises(ValueError, self.module.foo, x) + pytest.raises(ValueError, self.module.foo, x) # check values with contiguous array x = np.arange(3, dtype=np.float32) self.module.foo(x) - assert_equal(x, [3, 1, 2]) + assert np.allclose(x, [3, 1, 2]) class TestNumpyVersionAttribute(util.F2PyTest): # Check that th attribute __f2py_numpy_version__ is present # in the compiled module and that has the value np.__version__. - sources = [_path('src', 'regression', 'inout.f90')] + sources = [util.getpath("tests", "src", "regression", "inout.f90")] @pytest.mark.slow def test_numpy_version_attribute(self): # Check that self.module has an attribute named "__f2py_numpy_version__" - assert_(hasattr(self.module, "__f2py_numpy_version__"), - msg="Fortran module does not have __f2py_numpy_version__") + assert hasattr(self.module, "__f2py_numpy_version__") # Check that the attribute __f2py_numpy_version__ is a string - assert_(isinstance(self.module.__f2py_numpy_version__, str), - msg="__f2py_numpy_version__ is not a string") + assert isinstance(self.module.__f2py_numpy_version__, str) # Check that __f2py_numpy_version__ has the value numpy.__version__ - assert_string_equal(np.__version__, self.module.__f2py_numpy_version__) + assert np.__version__ == self.module.__f2py_numpy_version__ def test_include_path(): incdir = np.f2py.get_include() fnames_in_dir = os.listdir(incdir) - for fname in ('fortranobject.c', 'fortranobject.h'): + for fname in ("fortranobject.c", "fortranobject.h"): assert fname in fnames_in_dir - diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py index 2c999ed0b..21055faef 100644 --- a/numpy/f2py/tests/test_return_character.py +++ b/numpy/f2py/tests/test_return_character.py @@ -1,145 +1,45 @@ import pytest from numpy import array -from numpy.testing import assert_ from . import util import platform -IS_S390X = platform.machine() == 's390x' +IS_S390X = platform.machine() == "s390x" -class TestReturnCharacter(util.F2PyTest): +class TestReturnCharacter(util.F2PyTest): def check_function(self, t, tname): - if tname in ['t0', 't1', 's0', 's1']: - assert_(t(23) == b'2') - r = t('ab') - assert_(r == b'a', repr(r)) - r = t(array('ab')) - assert_(r == b'a', repr(r)) - r = t(array(77, 'u1')) - assert_(r == b'M', repr(r)) - #assert_(_raises(ValueError, t, array([77,87]))) - #assert_(_raises(ValueError, t, array(77))) - elif tname in ['ts', 'ss']: - assert_(t(23) == b'23', repr(t(23))) - assert_(t('123456789abcdef') == b'123456789a') - elif tname in ['t5', 's5']: - assert_(t(23) == b'23', repr(t(23))) - assert_(t('ab') == b'ab', repr(t('ab'))) - assert_(t('123456789abcdef') == b'12345') + if tname in ["t0", "t1", "s0", "s1"]: + assert t(23) == b"2" + r = t("ab") + assert r == b"a" + r = t(array("ab")) + assert r == b"a" + r = t(array(77, "u1")) + assert r == b"M" + elif tname in ["ts", "ss"]: + assert t(23) == b"23" + assert t("123456789abcdef") == b"123456789a" + elif tname in ["t5", "s5"]: + assert t(23) == b"23" + assert t("ab") == b"ab" + assert t("123456789abcdef") == b"12345" else: raise NotImplementedError -class TestF77ReturnCharacter(TestReturnCharacter): - code = """ - function t0(value) - character value - character t0 - t0 = value - end - function t1(value) - character*1 value - character*1 t1 - t1 = value - end - function t5(value) - character*5 value - character*5 t5 - t5 = value - end - function ts(value) - character*(*) value - character*(*) ts - ts = value - end - - subroutine s0(t0,value) - character value - character t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - character*1 value - character*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s5(t5,value) - character*5 value - character*5 t5 -cf2py intent(out) t5 - t5 = value - end - subroutine ss(ts,value) - character*(*) value - character*10 ts -cf2py intent(out) ts - ts = value - end - """ +class TestFReturnCharacter(TestReturnCharacter): + sources = [ + util.getpath("tests", "src", "return_character", "foo77.f"), + util.getpath("tests", "src", "return_character", "foo90.f90"), + ] @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") - @pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(',')) - def test_all(self, name): + @pytest.mark.parametrize("name", "t0,t1,t5,s0,s1,s5,ss".split(",")) + def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) - -class TestF90ReturnCharacter(TestReturnCharacter): - suffix = ".f90" - code = """ -module f90_return_char - contains - function t0(value) - character :: value - character :: t0 - t0 = value - end function t0 - function t1(value) - character(len=1) :: value - character(len=1) :: t1 - t1 = value - end function t1 - function t5(value) - character(len=5) :: value - character(len=5) :: t5 - t5 = value - end function t5 - function ts(value) - character(len=*) :: value - character(len=10) :: ts - ts = value - end function ts - - subroutine s0(t0,value) - character :: value - character :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - character(len=1) :: value - character(len=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s5(t5,value) - character(len=5) :: value - character(len=5) :: t5 -!f2py intent(out) t5 - t5 = value - end subroutine s5 - subroutine ss(ts,value) - character(len=*) :: value - character(len=10) :: ts -!f2py intent(out) ts - ts = value - end subroutine ss -end module f90_return_char - """ - @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") - @pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(',')) - def test_all(self, name): + @pytest.mark.parametrize("name", "t0,t1,t5,ts,s0,s1,s5,ss".split(",")) + def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_char, name), name) diff --git a/numpy/f2py/tests/test_return_complex.py b/numpy/f2py/tests/test_return_complex.py index 3d2e2b94f..dc5592899 100644 --- a/numpy/f2py/tests/test_return_complex.py +++ b/numpy/f2py/tests/test_return_complex.py @@ -1,163 +1,65 @@ import pytest from numpy import array -from numpy.testing import assert_, assert_raises from . import util class TestReturnComplex(util.F2PyTest): - def check_function(self, t, tname): - if tname in ['t0', 't8', 's0', 's8']: + if tname in ["t0", "t8", "s0", "s8"]: err = 1e-5 else: err = 0.0 - assert_(abs(t(234j) - 234.0j) <= err) - assert_(abs(t(234.6) - 234.6) <= err) - assert_(abs(t(234) - 234.0) <= err) - assert_(abs(t(234.6 + 3j) - (234.6 + 3j)) <= err) - #assert_( abs(t('234')-234.)<=err) - #assert_( abs(t('234.6')-234.6)<=err) - assert_(abs(t(-234) + 234.) <= err) - assert_(abs(t([234]) - 234.) <= err) - assert_(abs(t((234,)) - 234.) <= err) - assert_(abs(t(array(234)) - 234.) <= err) - assert_(abs(t(array(23 + 4j, 'F')) - (23 + 4j)) <= err) - assert_(abs(t(array([234])) - 234.) <= err) - assert_(abs(t(array([[234]])) - 234.) <= err) - assert_(abs(t(array([234], 'b')) + 22.) <= err) - assert_(abs(t(array([234], 'h')) - 234.) <= err) - assert_(abs(t(array([234], 'i')) - 234.) <= err) - assert_(abs(t(array([234], 'l')) - 234.) <= err) - assert_(abs(t(array([234], 'q')) - 234.) <= err) - assert_(abs(t(array([234], 'f')) - 234.) <= err) - assert_(abs(t(array([234], 'd')) - 234.) <= err) - assert_(abs(t(array([234 + 3j], 'F')) - (234 + 3j)) <= err) - assert_(abs(t(array([234], 'D')) - 234.) <= err) - - #assert_raises(TypeError, t, array([234], 'a1')) - assert_raises(TypeError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(TypeError, t, t) - assert_raises(TypeError, t, {}) + assert abs(t(234j) - 234.0j) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6 + 3j) - (234.6 + 3j)) <= err + # assert abs(t('234')-234.)<=err + # assert abs(t('234.6')-234.6)<=err + assert abs(t(-234) + 234.0) <= err + assert abs(t([234]) - 234.0) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err + assert abs(t(array([234])) - 234.0) <= err + assert abs(t(array([[234]])) - 234.0) <= err + assert abs(t(array([234], "b")) + 22.0) <= err + assert abs(t(array([234], "h")) - 234.0) <= err + assert abs(t(array([234], "i")) - 234.0) <= err + assert abs(t(array([234], "l")) - 234.0) <= err + assert abs(t(array([234], "q")) - 234.0) <= err + assert abs(t(array([234], "f")) - 234.0) <= err + assert abs(t(array([234], "d")) - 234.0) <= err + assert abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err + assert abs(t(array([234], "D")) - 234.0) <= err + + # pytest.raises(TypeError, t, array([234], 'a1')) + pytest.raises(TypeError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) try: - r = t(10 ** 400) - assert_(repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r)) + r = t(10**400) + assert repr(r) in ["(inf+0j)", "(Infinity+0j)"] except OverflowError: pass -class TestF77ReturnComplex(TestReturnComplex): - code = """ - function t0(value) - complex value - complex t0 - t0 = value - end - function t8(value) - complex*8 value - complex*8 t8 - t8 = value - end - function t16(value) - complex*16 value - complex*16 t16 - t16 = value - end - function td(value) - double complex value - double complex td - td = value - end +class TestFReturnComplex(TestReturnComplex): + sources = [ + util.getpath("tests", "src", "return_complex", "foo77.f"), + util.getpath("tests", "src", "return_complex", "foo90.f90"), + ] - subroutine s0(t0,value) - complex value - complex t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s8(t8,value) - complex*8 value - complex*8 t8 -cf2py intent(out) t8 - t8 = value - end - subroutine s16(t16,value) - complex*16 value - complex*16 t16 -cf2py intent(out) t16 - t16 = value - end - subroutine sd(td,value) - double complex value - double complex td -cf2py intent(out) td - td = value - end - """ - - @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(',')) - def test_all(self, name): + @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) + def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) - -class TestF90ReturnComplex(TestReturnComplex): - suffix = ".f90" - code = """ -module f90_return_complex - contains - function t0(value) - complex :: value - complex :: t0 - t0 = value - end function t0 - function t8(value) - complex(kind=4) :: value - complex(kind=4) :: t8 - t8 = value - end function t8 - function t16(value) - complex(kind=8) :: value - complex(kind=8) :: t16 - t16 = value - end function t16 - function td(value) - double complex :: value - double complex :: td - td = value - end function td - - subroutine s0(t0,value) - complex :: value - complex :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s8(t8,value) - complex(kind=4) :: value - complex(kind=4) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 - subroutine s16(t16,value) - complex(kind=8) :: value - complex(kind=8) :: t16 -!f2py intent(out) t16 - t16 = value - end subroutine s16 - subroutine sd(td,value) - double complex :: value - double complex :: td -!f2py intent(out) td - td = value - end subroutine sd -end module f90_return_complex - """ - - @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_complex, name), name) + @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_complex, name), + name) diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py index 0a8121dc1..a43c677fd 100644 --- a/numpy/f2py/tests/test_return_integer.py +++ b/numpy/f2py/tests/test_return_integer.py @@ -1,175 +1,55 @@ import pytest from numpy import array -from numpy.testing import assert_, assert_raises from . import util class TestReturnInteger(util.F2PyTest): - def check_function(self, t, tname): - assert_(t(123) == 123, repr(t(123))) - assert_(t(123.6) == 123) - assert_(t('123') == 123) - assert_(t(-123) == -123) - assert_(t([123]) == 123) - assert_(t((123,)) == 123) - assert_(t(array(123)) == 123) - assert_(t(array([123])) == 123) - assert_(t(array([[123]])) == 123) - assert_(t(array([123], 'b')) == 123) - assert_(t(array([123], 'h')) == 123) - assert_(t(array([123], 'i')) == 123) - assert_(t(array([123], 'l')) == 123) - assert_(t(array([123], 'B')) == 123) - assert_(t(array([123], 'f')) == 123) - assert_(t(array([123], 'd')) == 123) - - #assert_raises(ValueError, t, array([123],'S3')) - assert_raises(ValueError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(Exception, t, t) - assert_raises(Exception, t, {}) - - if tname in ['t8', 's8']: - assert_raises(OverflowError, t, 100000000000000000000000) - assert_raises(OverflowError, t, 10000000011111111111111.23) - - -class TestF77ReturnInteger(TestReturnInteger): - code = """ - function t0(value) - integer value - integer t0 - t0 = value - end - function t1(value) - integer*1 value - integer*1 t1 - t1 = value - end - function t2(value) - integer*2 value - integer*2 t2 - t2 = value - end - function t4(value) - integer*4 value - integer*4 t4 - t4 = value - end - function t8(value) - integer*8 value - integer*8 t8 - t8 = value - end - - subroutine s0(t0,value) - integer value - integer t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - integer*1 value - integer*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s2(t2,value) - integer*2 value - integer*2 t2 -cf2py intent(out) t2 - t2 = value - end - subroutine s4(t4,value) - integer*4 value - integer*4 t4 -cf2py intent(out) t4 - t4 = value - end - subroutine s8(t8,value) - integer*8 value - integer*8 t8 -cf2py intent(out) t8 - t8 = value - end - """ - - @pytest.mark.parametrize('name', - 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) - def test_all(self, name): + assert t(123) == 123 + assert t(123.6) == 123 + assert t("123") == 123 + assert t(-123) == -123 + assert t([123]) == 123 + assert t((123, )) == 123 + assert t(array(123)) == 123 + assert t(array([123])) == 123 + assert t(array([[123]])) == 123 + assert t(array([123], "b")) == 123 + assert t(array([123], "h")) == 123 + assert t(array([123], "i")) == 123 + assert t(array([123], "l")) == 123 + assert t(array([123], "B")) == 123 + assert t(array([123], "f")) == 123 + assert t(array([123], "d")) == 123 + + # pytest.raises(ValueError, t, array([123],'S3')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(Exception, t, t) + pytest.raises(Exception, t, {}) + + if tname in ["t8", "s8"]: + pytest.raises(OverflowError, t, 100000000000000000000000) + pytest.raises(OverflowError, t, 10000000011111111111111.23) + + +class TestFReturnInteger(TestReturnInteger): + sources = [ + util.getpath("tests", "src", "return_integer", "foo77.f"), + util.getpath("tests", "src", "return_integer", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", + "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) - -class TestF90ReturnInteger(TestReturnInteger): - suffix = ".f90" - code = """ -module f90_return_integer - contains - function t0(value) - integer :: value - integer :: t0 - t0 = value - end function t0 - function t1(value) - integer(kind=1) :: value - integer(kind=1) :: t1 - t1 = value - end function t1 - function t2(value) - integer(kind=2) :: value - integer(kind=2) :: t2 - t2 = value - end function t2 - function t4(value) - integer(kind=4) :: value - integer(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - integer(kind=8) :: value - integer(kind=8) :: t8 - t8 = value - end function t8 - - subroutine s0(t0,value) - integer :: value - integer :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - integer(kind=1) :: value - integer(kind=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s2(t2,value) - integer(kind=2) :: value - integer(kind=2) :: t2 -!f2py intent(out) t2 - t2 = value - end subroutine s2 - subroutine s4(t4,value) - integer(kind=4) :: value - integer(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - integer(kind=8) :: value - integer(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 -end module f90_return_integer - """ - - @pytest.mark.parametrize('name', - 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_integer, name), name) + @pytest.mark.parametrize("name", + "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_integer, name), + name) diff --git a/numpy/f2py/tests/test_return_logical.py b/numpy/f2py/tests/test_return_logical.py index 9db939c7e..6f64745ee 100644 --- a/numpy/f2py/tests/test_return_logical.py +++ b/numpy/f2py/tests/test_return_logical.py @@ -1,185 +1,64 @@ import pytest from numpy import array -from numpy.testing import assert_, assert_raises from . import util class TestReturnLogical(util.F2PyTest): - def check_function(self, t): - assert_(t(True) == 1, repr(t(True))) - assert_(t(False) == 0, repr(t(False))) - assert_(t(0) == 0) - assert_(t(None) == 0) - assert_(t(0.0) == 0) - assert_(t(0j) == 0) - assert_(t(1j) == 1) - assert_(t(234) == 1) - assert_(t(234.6) == 1) - assert_(t(234.6 + 3j) == 1) - assert_(t('234') == 1) - assert_(t('aaa') == 1) - assert_(t('') == 0) - assert_(t([]) == 0) - assert_(t(()) == 0) - assert_(t({}) == 0) - assert_(t(t) == 1) - assert_(t(-234) == 1) - assert_(t(10 ** 100) == 1) - assert_(t([234]) == 1) - assert_(t((234,)) == 1) - assert_(t(array(234)) == 1) - assert_(t(array([234])) == 1) - assert_(t(array([[234]])) == 1) - assert_(t(array([234], 'b')) == 1) - assert_(t(array([234], 'h')) == 1) - assert_(t(array([234], 'i')) == 1) - assert_(t(array([234], 'l')) == 1) - assert_(t(array([234], 'f')) == 1) - assert_(t(array([234], 'd')) == 1) - assert_(t(array([234 + 3j], 'F')) == 1) - assert_(t(array([234], 'D')) == 1) - assert_(t(array(0)) == 0) - assert_(t(array([0])) == 0) - assert_(t(array([[0]])) == 0) - assert_(t(array([0j])) == 0) - assert_(t(array([1])) == 1) - assert_raises(ValueError, t, array([0, 0])) - + assert t(True) == 1 + assert t(False) == 0 + assert t(0) == 0 + assert t(None) == 0 + assert t(0.0) == 0 + assert t(0j) == 0 + assert t(1j) == 1 + assert t(234) == 1 + assert t(234.6) == 1 + assert t(234.6 + 3j) == 1 + assert t("234") == 1 + assert t("aaa") == 1 + assert t("") == 0 + assert t([]) == 0 + assert t(()) == 0 + assert t({}) == 0 + assert t(t) == 1 + assert t(-234) == 1 + assert t(10**100) == 1 + assert t([234]) == 1 + assert t((234, )) == 1 + assert t(array(234)) == 1 + assert t(array([234])) == 1 + assert t(array([[234]])) == 1 + assert t(array([234], "b")) == 1 + assert t(array([234], "h")) == 1 + assert t(array([234], "i")) == 1 + assert t(array([234], "l")) == 1 + assert t(array([234], "f")) == 1 + assert t(array([234], "d")) == 1 + assert t(array([234 + 3j], "F")) == 1 + assert t(array([234], "D")) == 1 + assert t(array(0)) == 0 + assert t(array([0])) == 0 + assert t(array([[0]])) == 0 + assert t(array([0j])) == 0 + assert t(array([1])) == 1 + pytest.raises(ValueError, t, array([0, 0])) -class TestF77ReturnLogical(TestReturnLogical): - code = """ - function t0(value) - logical value - logical t0 - t0 = value - end - function t1(value) - logical*1 value - logical*1 t1 - t1 = value - end - function t2(value) - logical*2 value - logical*2 t2 - t2 = value - end - function t4(value) - logical*4 value - logical*4 t4 - t4 = value - end -c function t8(value) -c logical*8 value -c logical*8 t8 -c t8 = value -c end - subroutine s0(t0,value) - logical value - logical t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - logical*1 value - logical*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s2(t2,value) - logical*2 value - logical*2 t2 -cf2py intent(out) t2 - t2 = value - end - subroutine s4(t4,value) - logical*4 value - logical*4 t4 -cf2py intent(out) t4 - t4 = value - end -c subroutine s8(t8,value) -c logical*8 value -c logical*8 t8 -cf2py intent(out) t8 -c t8 = value -c end - """ +class TestFReturnLogical(TestReturnLogical): + sources = [ + util.getpath("tests", "src", "return_logical", "foo77.f"), + util.getpath("tests", "src", "return_logical", "foo90.f90"), + ] @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t1,t2,t4,s0,s1,s2,s4'.split(',')) - def test_all(self, name): + @pytest.mark.parametrize("name", "t0,t1,t2,t4,s0,s1,s2,s4".split(",")) + def test_all_f77(self, name): self.check_function(getattr(self.module, name)) - -class TestF90ReturnLogical(TestReturnLogical): - suffix = ".f90" - code = """ -module f90_return_logical - contains - function t0(value) - logical :: value - logical :: t0 - t0 = value - end function t0 - function t1(value) - logical(kind=1) :: value - logical(kind=1) :: t1 - t1 = value - end function t1 - function t2(value) - logical(kind=2) :: value - logical(kind=2) :: t2 - t2 = value - end function t2 - function t4(value) - logical(kind=4) :: value - logical(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - logical(kind=8) :: value - logical(kind=8) :: t8 - t8 = value - end function t8 - - subroutine s0(t0,value) - logical :: value - logical :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - logical(kind=1) :: value - logical(kind=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s2(t2,value) - logical(kind=2) :: value - logical(kind=2) :: t2 -!f2py intent(out) t2 - t2 = value - end subroutine s2 - subroutine s4(t4,value) - logical(kind=4) :: value - logical(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - logical(kind=8) :: value - logical(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 -end module f90_return_logical - """ - @pytest.mark.slow - @pytest.mark.parametrize('name', - 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) - def test_all(self, name): + @pytest.mark.parametrize("name", + "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py index 8e5022a8e..d9fecef1a 100644 --- a/numpy/f2py/tests/test_return_real.py +++ b/numpy/f2py/tests/test_return_real.py @@ -2,58 +2,56 @@ import platform import pytest from numpy import array -from numpy.testing import assert_, assert_raises from . import util class TestReturnReal(util.F2PyTest): - def check_function(self, t, tname): - if tname in ['t0', 't4', 's0', 's4']: + if tname in ["t0", "t4", "s0", "s4"]: err = 1e-5 else: err = 0.0 - assert_(abs(t(234) - 234.0) <= err) - assert_(abs(t(234.6) - 234.6) <= err) - assert_(abs(t('234') - 234) <= err) - assert_(abs(t('234.6') - 234.6) <= err) - assert_(abs(t(-234) + 234) <= err) - assert_(abs(t([234]) - 234) <= err) - assert_(abs(t((234,)) - 234.) <= err) - assert_(abs(t(array(234)) - 234.) <= err) - assert_(abs(t(array([234])) - 234.) <= err) - assert_(abs(t(array([[234]])) - 234.) <= err) - assert_(abs(t(array([234], 'b')) + 22) <= err) - assert_(abs(t(array([234], 'h')) - 234.) <= err) - assert_(abs(t(array([234], 'i')) - 234.) <= err) - assert_(abs(t(array([234], 'l')) - 234.) <= err) - assert_(abs(t(array([234], 'B')) - 234.) <= err) - assert_(abs(t(array([234], 'f')) - 234.) <= err) - assert_(abs(t(array([234], 'd')) - 234.) <= err) - if tname in ['t0', 't4', 's0', 's4']: - assert_(t(1e200) == t(1e300)) # inf - - #assert_raises(ValueError, t, array([234], 'S1')) - assert_raises(ValueError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(Exception, t, t) - assert_raises(Exception, t, {}) + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t("234") - 234) <= err + assert abs(t("234.6") - 234.6) <= err + assert abs(t(-234) + 234) <= err + assert abs(t([234]) - 234) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array([234])) - 234.0) <= err + assert abs(t(array([[234]])) - 234.0) <= err + assert abs(t(array([234], "b")) + 22) <= err + assert abs(t(array([234], "h")) - 234.0) <= err + assert abs(t(array([234], "i")) - 234.0) <= err + assert abs(t(array([234], "l")) - 234.0) <= err + assert abs(t(array([234], "B")) - 234.0) <= err + assert abs(t(array([234], "f")) - 234.0) <= err + assert abs(t(array([234], "d")) - 234.0) <= err + if tname in ["t0", "t4", "s0", "s4"]: + assert t(1e200) == t(1e300) # inf + + # pytest.raises(ValueError, t, array([234], 'S1')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(Exception, t, t) + pytest.raises(Exception, t, {}) try: - r = t(10 ** 400) - assert_(repr(r) in ['inf', 'Infinity'], repr(r)) + r = t(10**400) + assert repr(r) in ["inf", "Infinity"] except OverflowError: pass - @pytest.mark.skipif( - platform.system() == 'Darwin', + platform.system() == "Darwin", reason="Prone to error when run with numpy/f2py/tests on mac os, " - "but not when run in isolation") + "but not when run in isolation", +) class TestCReturnReal(TestReturnReal): suffix = ".pyf" module_name = "c_ext_return_real" @@ -86,118 +84,21 @@ end interface end python module c_ext_return_real """ - @pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(',')) + @pytest.mark.parametrize("name", "t4,t8,s4,s8".split(",")) def test_all(self, name): self.check_function(getattr(self.module, name), name) -class TestF77ReturnReal(TestReturnReal): - code = """ - function t0(value) - real value - real t0 - t0 = value - end - function t4(value) - real*4 value - real*4 t4 - t4 = value - end - function t8(value) - real*8 value - real*8 t8 - t8 = value - end - function td(value) - double precision value - double precision td - td = value - end - - subroutine s0(t0,value) - real value - real t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s4(t4,value) - real*4 value - real*4 t4 -cf2py intent(out) t4 - t4 = value - end - subroutine s8(t8,value) - real*8 value - real*8 t8 -cf2py intent(out) t8 - t8 = value - end - subroutine sd(td,value) - double precision value - double precision td -cf2py intent(out) td - td = value - end - """ +class TestFReturnReal(TestReturnReal): + sources = [ + util.getpath("tests", "src", "return_real", "foo77.f"), + util.getpath("tests", "src", "return_real", "foo90.f90"), + ] - @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) - def test_all(self, name): + @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) + def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) - -class TestF90ReturnReal(TestReturnReal): - suffix = ".f90" - code = """ -module f90_return_real - contains - function t0(value) - real :: value - real :: t0 - t0 = value - end function t0 - function t4(value) - real(kind=4) :: value - real(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - real(kind=8) :: value - real(kind=8) :: t8 - t8 = value - end function t8 - function td(value) - double precision :: value - double precision :: td - td = value - end function td - - subroutine s0(t0,value) - real :: value - real :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s4(t4,value) - real(kind=4) :: value - real(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - real(kind=8) :: value - real(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 - subroutine sd(td,value) - double precision :: value - double precision :: td -!f2py intent(out) td - td = value - end subroutine sd -end module f90_return_real - """ - - @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) - def test_all(self, name): + @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) + def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_real, name), name) diff --git a/numpy/f2py/tests/test_semicolon_split.py b/numpy/f2py/tests/test_semicolon_split.py index d8b4bf222..5375543e0 100644 --- a/numpy/f2py/tests/test_semicolon_split.py +++ b/numpy/f2py/tests/test_semicolon_split.py @@ -2,17 +2,18 @@ import platform import pytest from . import util -from numpy.testing import assert_equal + @pytest.mark.skipif( - platform.system() == 'Darwin', + platform.system() == "Darwin", reason="Prone to error when run with numpy/f2py/tests on mac os, " - "but not when run in isolation") + "but not when run in isolation", +) class TestMultiline(util.F2PyTest): suffix = ".pyf" module_name = "multiline" - code = """ -python module {module} + code = f""" +python module {module_name} usercode ''' void foo(int* x) {{ char dummy = ';'; @@ -25,22 +26,23 @@ void foo(int* x) {{ integer intent(out) :: x end subroutine foo end interface -end python module {module} - """.format(module=module_name) +end python module {module_name} + """ def test_multiline(self): - assert_equal(self.module.foo(), 42) + assert self.module.foo() == 42 @pytest.mark.skipif( - platform.system() == 'Darwin', + platform.system() == "Darwin", reason="Prone to error when run with numpy/f2py/tests on mac os, " - "but not when run in isolation") + "but not when run in isolation", +) class TestCallstatement(util.F2PyTest): suffix = ".pyf" module_name = "callstatement" - code = """ -python module {module} + code = f""" +python module {module_name} usercode ''' void foo(int* x) {{ }} @@ -56,8 +58,8 @@ void foo(int* x) {{ }} end subroutine foo end interface -end python module {module} - """.format(module=module_name) +end python module {module_name} + """ def test_callstatement(self): - assert_equal(self.module.foo(), 42) + assert self.module.foo() == 42 diff --git a/numpy/f2py/tests/test_size.py b/numpy/f2py/tests/test_size.py index b609fa77f..bd2c349df 100644 --- a/numpy/f2py/tests/test_size.py +++ b/numpy/f2py/tests/test_size.py @@ -1,49 +1,45 @@ import os import pytest +import numpy as np -from numpy.testing import assert_equal from . import util -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - class TestSizeSumExample(util.F2PyTest): - sources = [_path('src', 'size', 'foo.f90')] + sources = [util.getpath("tests", "src", "size", "foo.f90")] @pytest.mark.slow def test_all(self): r = self.module.foo([[]]) - assert_equal(r, [0], repr(r)) + assert r == [0] r = self.module.foo([[1, 2]]) - assert_equal(r, [3], repr(r)) + assert r == [3] r = self.module.foo([[1, 2], [3, 4]]) - assert_equal(r, [3, 7], repr(r)) + assert np.allclose(r, [3, 7]) r = self.module.foo([[1, 2], [3, 4], [5, 6]]) - assert_equal(r, [3, 7, 11], repr(r)) + assert np.allclose(r, [3, 7, 11]) @pytest.mark.slow def test_transpose(self): r = self.module.trans([[]]) - assert_equal(r.T, [[]], repr(r)) + assert np.allclose(r.T, np.array([[]])) r = self.module.trans([[1, 2]]) - assert_equal(r, [[1], [2]], repr(r)) + assert np.allclose(r, [[1.], [2.]]) r = self.module.trans([[1, 2, 3], [4, 5, 6]]) - assert_equal(r, [[1, 4], [2, 5], [3, 6]], repr(r)) + assert np.allclose(r, [[1, 4], [2, 5], [3, 6]]) @pytest.mark.slow def test_flatten(self): r = self.module.flatten([[]]) - assert_equal(r, [], repr(r)) + assert np.allclose(r, []) r = self.module.flatten([[1, 2]]) - assert_equal(r, [1, 2], repr(r)) + assert np.allclose(r, [1, 2]) r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) - assert_equal(r, [1, 2, 3, 4, 5, 6], repr(r)) + assert np.allclose(r, [1, 2, 3, 4, 5, 6]) diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py index 7b27f8786..9e937188c 100644 --- a/numpy/f2py/tests/test_string.py +++ b/numpy/f2py/tests/test_string.py @@ -1,109 +1,43 @@ import os import pytest import textwrap -from numpy.testing import assert_array_equal import numpy as np from . import util -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - class TestString(util.F2PyTest): - sources = [_path('src', 'string', 'char.f90')] + sources = [util.getpath("tests", "src", "string", "char.f90")] @pytest.mark.slow def test_char(self): - strings = np.array(['ab', 'cd', 'ef'], dtype='c').T - inp, out = self.module.char_test.change_strings(strings, - strings.shape[1]) - assert_array_equal(inp, strings) + strings = np.array(["ab", "cd", "ef"], dtype="c").T + inp, out = self.module.char_test.change_strings( + strings, strings.shape[1]) + assert inp == pytest.approx(strings) expected = strings.copy() - expected[1, :] = 'AAA' - assert_array_equal(out, expected) + expected[1, :] = "AAA" + assert out == pytest.approx(expected) class TestDocStringArguments(util.F2PyTest): - suffix = '.f' - - code = """ -C FILE: STRING.F - SUBROUTINE FOO(A,B,C,D) - CHARACTER*5 A, B - CHARACTER*(*) C,D -Cf2py intent(in) a,c -Cf2py intent(inout) b,d - PRINT*, "A=",A - PRINT*, "B=",B - PRINT*, "C=",C - PRINT*, "D=",D - PRINT*, "CHANGE A,B,C,D" - A(1:1) = 'A' - B(1:1) = 'B' - C(1:1) = 'C' - D(1:1) = 'D' - PRINT*, "A=",A - PRINT*, "B=",B - PRINT*, "C=",C - PRINT*, "D=",D - END -C END OF FILE STRING.F - """ + sources = [util.getpath("tests", "src", "string", "string.f")] def test_example(self): - a = np.array(b'123\0\0') - b = np.array(b'123\0\0') - c = np.array(b'123') - d = np.array(b'123') + a = np.array(b"123\0\0") + b = np.array(b"123\0\0") + c = np.array(b"123") + d = np.array(b"123") self.module.foo(a, b, c, d) - assert a.tobytes() == b'123\0\0' - assert b.tobytes() == b'B23\0\0', (b.tobytes(),) - assert c.tobytes() == b'123' - assert d.tobytes() == b'D23' + assert a.tobytes() == b"123\0\0" + assert b.tobytes() == b"B23\0\0" + assert c.tobytes() == b"123" + assert d.tobytes() == b"D23" class TestFixedString(util.F2PyTest): - suffix = '.f90' - - code = textwrap.dedent(""" - function sint(s) result(i) - implicit none - character(len=*) :: s - integer :: j, i - i = 0 - do j=len(s), 1, -1 - if (.not.((i.eq.0).and.(s(j:j).eq.' '))) then - i = i + ichar(s(j:j)) * 10 ** (j - 1) - endif - end do - return - end function sint - - function test_in_bytes4(a) result (i) - implicit none - integer :: sint - character(len=4) :: a - integer :: i - i = sint(a) - a(1:1) = 'A' - return - end function test_in_bytes4 - - function test_inout_bytes4(a) result (i) - implicit none - integer :: sint - character(len=4), intent(inout) :: a - integer :: i - if (a(1:1).ne.' ') then - a(1:1) = 'E' - endif - i = sint(a) - return - end function test_inout_bytes4 - """) + sources = [util.getpath("tests", "src", "string", "fixed_string.f90")] @staticmethod def _sint(s, start=0, end=None): @@ -122,41 +56,41 @@ class TestFixedString(util.F2PyTest): end = len(s) i = 0 for j in range(start, min(end, len(s))): - i += s[j] * 10 ** j + i += s[j] * 10**j return i - def _get_input(self, intent='in'): - if intent in ['in']: - yield '' - yield '1' - yield '1234' - yield '12345' - yield b'' - yield b'\0' - yield b'1' - yield b'\01' - yield b'1\0' - yield b'1234' - yield b'12345' - yield np.ndarray((), np.bytes_, buffer=b'') # array(b'', dtype='|S0') - yield np.array(b'') # array(b'', dtype='|S1') - yield np.array(b'\0') - yield np.array(b'1') - yield np.array(b'1\0') - yield np.array(b'\01') - yield np.array(b'1234') - yield np.array(b'123\0') - yield np.array(b'12345') + def _get_input(self, intent="in"): + if intent in ["in"]: + yield "" + yield "1" + yield "1234" + yield "12345" + yield b"" + yield b"\0" + yield b"1" + yield b"\01" + yield b"1\0" + yield b"1234" + yield b"12345" + yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0') + yield np.array(b"") # array(b'', dtype='|S1') + yield np.array(b"\0") + yield np.array(b"1") + yield np.array(b"1\0") + yield np.array(b"\01") + yield np.array(b"1234") + yield np.array(b"123\0") + yield np.array(b"12345") def test_intent_in(self): for s in self._get_input(): r = self.module.test_in_bytes4(s) # also checks that s is not changed inplace expected = self._sint(s, end=4) - assert r == expected, (s) + assert r == expected, s def test_intent_inout(self): - for s in self._get_input(intent='inout'): + for s in self._get_input(intent="inout"): rest = self._sint(s, start=4) r = self.module.test_inout_bytes4(s) expected = self._sint(s, end=4) diff --git a/numpy/f2py/tests/test_symbolic.py b/numpy/f2py/tests/test_symbolic.py index 52cabac53..e8dec72f0 100644 --- a/numpy/f2py/tests/test_symbolic.py +++ b/numpy/f2py/tests/test_symbolic.py @@ -1,35 +1,56 @@ -from numpy.testing import assert_raises +import pytest + from numpy.f2py.symbolic import ( - Expr, Op, ArithOp, Language, - as_symbol, as_number, as_string, as_array, as_complex, - as_terms, as_factors, eliminate_quotes, insert_quotes, - fromstring, as_expr, as_apply, - as_numer_denom, as_ternary, as_ref, as_deref, - normalize, as_eq, as_ne, as_lt, as_gt, as_le, as_ge - ) + Expr, + Op, + ArithOp, + Language, + as_symbol, + as_number, + as_string, + as_array, + as_complex, + as_terms, + as_factors, + eliminate_quotes, + insert_quotes, + fromstring, + as_expr, + as_apply, + as_numer_denom, + as_ternary, + as_ref, + as_deref, + normalize, + as_eq, + as_ne, + as_lt, + as_gt, + as_le, + as_ge, +) from . import util class TestSymbolic(util.F2PyTest): - def test_eliminate_quotes(self): def worker(s): r, d = eliminate_quotes(s) s1 = insert_quotes(r, d) assert s1 == s - for kind in ['', 'mykind_']: + for kind in ["", "mykind_"]: worker(kind + '"1234" // "ABCD"') worker(kind + '"1234" // ' + kind + '"ABCD"') - worker(kind + '"1234" // \'ABCD\'') - worker(kind + '"1234" // ' + kind + '\'ABCD\'') + worker(kind + "\"1234\" // 'ABCD'") + worker(kind + '"1234" // ' + kind + "'ABCD'") worker(kind + '"1\\"2\'AB\'34"') - worker('a = ' + kind + "'1\\'2\"AB\"34'") + worker("a = " + kind + "'1\\'2\"AB\"34'") def test_sanity(self): - x = as_symbol('x') - y = as_symbol('y') - z = as_symbol('z') + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") assert x.op == Op.SYMBOL assert repr(x) == "Expr(Op.SYMBOL, 'x')" @@ -70,7 +91,7 @@ class TestSymbolic(util.F2PyTest): assert s != s2 a = as_array((n, m)) - b = as_array((n,)) + b = as_array((n, )) assert a.op == Op.ARRAY assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4))," " Expr(Op.INTEGER, (456, 4))))") @@ -108,88 +129,90 @@ class TestSymbolic(util.F2PyTest): assert hash(e) is not None def test_tostring_fortran(self): - x = as_symbol('x') - y = as_symbol('y') - z = as_symbol('z') + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") n = as_number(123) m = as_number(456) a = as_array((n, m)) c = as_complex(n, m) - assert str(x) == 'x' - assert str(n) == '123' - assert str(a) == '[123, 456]' - assert str(c) == '(123, 456)' - - assert str(Expr(Op.TERMS, {x: 1})) == 'x' - assert str(Expr(Op.TERMS, {x: 2})) == '2 * x' - assert str(Expr(Op.TERMS, {x: -1})) == '-x' - assert str(Expr(Op.TERMS, {x: -2})) == '-2 * x' - assert str(Expr(Op.TERMS, {x: 1, y: 1})) == 'x + y' - assert str(Expr(Op.TERMS, {x: -1, y: -1})) == '-x - y' - assert str(Expr(Op.TERMS, {x: 2, y: 3})) == '2 * x + 3 * y' - assert str(Expr(Op.TERMS, {x: -2, y: 3})) == '-2 * x + 3 * y' - assert str(Expr(Op.TERMS, {x: 2, y: -3})) == '2 * x - 3 * y' - - assert str(Expr(Op.FACTORS, {x: 1})) == 'x' - assert str(Expr(Op.FACTORS, {x: 2})) == 'x ** 2' - assert str(Expr(Op.FACTORS, {x: -1})) == 'x ** -1' - assert str(Expr(Op.FACTORS, {x: -2})) == 'x ** -2' - assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == 'x * y' - assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == 'x ** 2 * y ** 3' + assert str(x) == "x" + assert str(n) == "123" + assert str(a) == "[123, 456]" + assert str(c) == "(123, 456)" + + assert str(Expr(Op.TERMS, {x: 1})) == "x" + assert str(Expr(Op.TERMS, {x: 2})) == "2 * x" + assert str(Expr(Op.TERMS, {x: -1})) == "-x" + assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x" + assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y" + assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y" + assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y" + + assert str(Expr(Op.FACTORS, {x: 1})) == "x" + assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2" + assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1" + assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2" + assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y" + assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3" v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3}) - assert str(v) == 'x ** 2 * (x + y) ** 3', str(v) + assert str(v) == "x ** 2 * (x + y) ** 3", str(v) v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3}) - assert str(v) == 'x ** 2 * (x * y) ** 3', str(v) + assert str(v) == "x ** 2 * (x * y) ** 3", str(v) - assert str(Expr(Op.APPLY, ('f', (), {}))) == 'f()' - assert str(Expr(Op.APPLY, ('f', (x,), {}))) == 'f(x)' - assert str(Expr(Op.APPLY, ('f', (x, y), {}))) == 'f(x, y)' - assert str(Expr(Op.INDEXING, ('f', x))) == 'f[x]' + assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()" + assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)" + assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)" + assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]" - assert str(as_ternary(x, y, z)) == 'merge(y, z, x)' - assert str(as_eq(x, y)) == 'x .eq. y' - assert str(as_ne(x, y)) == 'x .ne. y' - assert str(as_lt(x, y)) == 'x .lt. y' - assert str(as_le(x, y)) == 'x .le. y' - assert str(as_gt(x, y)) == 'x .gt. y' - assert str(as_ge(x, y)) == 'x .ge. y' + assert str(as_ternary(x, y, z)) == "merge(y, z, x)" + assert str(as_eq(x, y)) == "x .eq. y" + assert str(as_ne(x, y)) == "x .ne. y" + assert str(as_lt(x, y)) == "x .lt. y" + assert str(as_le(x, y)) == "x .le. y" + assert str(as_gt(x, y)) == "x .gt. y" + assert str(as_ge(x, y)) == "x .ge. y" def test_tostring_c(self): language = Language.C - x = as_symbol('x') - y = as_symbol('y') - z = as_symbol('z') + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") n = as_number(123) - assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == 'x * x' - assert Expr(Op.FACTORS, {x + y: 2}).tostring( - language=language) == '(x + y) * (x + y)' - assert Expr(Op.FACTORS, {x: 12}).tostring( - language=language) == 'pow(x, 12)' - - assert as_apply(ArithOp.DIV, x, y).tostring( - language=language) == 'x / y' - assert as_apply(ArithOp.DIV, x, x + y).tostring( - language=language) == 'x / (x + y)' - assert as_apply(ArithOp.DIV, x - y, x + y).tostring( - language=language) == '(x - y) / (x + y)' - assert (x + (x - y) / (x + y) + n).tostring( - language=language) == '123 + x + (x - y) / (x + y)' - - assert as_ternary(x, y, z).tostring(language=language) == '(x ? y : z)' - assert as_eq(x, y).tostring(language=language) == 'x == y' - assert as_ne(x, y).tostring(language=language) == 'x != y' - assert as_lt(x, y).tostring(language=language) == 'x < y' - assert as_le(x, y).tostring(language=language) == 'x <= y' - assert as_gt(x, y).tostring(language=language) == 'x > y' - assert as_ge(x, y).tostring(language=language) == 'x >= y' + assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x" + assert (Expr(Op.FACTORS, { + x + y: 2 + }).tostring(language=language) == "(x + y) * (x + y)") + assert Expr(Op.FACTORS, { + x: 12 + }).tostring(language=language) == "pow(x, 12)" + + assert as_apply(ArithOp.DIV, x, + y).tostring(language=language) == "x / y" + assert (as_apply(ArithOp.DIV, x, + x + y).tostring(language=language) == "x / (x + y)") + assert (as_apply(ArithOp.DIV, x - y, x + + y).tostring(language=language) == "(x - y) / (x + y)") + assert (x + (x - y) / (x + y) + + n).tostring(language=language) == "123 + x + (x - y) / (x + y)" + + assert as_ternary(x, y, z).tostring(language=language) == "(x ? y : z)" + assert as_eq(x, y).tostring(language=language) == "x == y" + assert as_ne(x, y).tostring(language=language) == "x != y" + assert as_lt(x, y).tostring(language=language) == "x < y" + assert as_le(x, y).tostring(language=language) == "x <= y" + assert as_gt(x, y).tostring(language=language) == "x > y" + assert as_ge(x, y).tostring(language=language) == "x >= y" def test_operations(self): - x = as_symbol('x') - y = as_symbol('y') - z = as_symbol('z') + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") assert x + x == Expr(Op.TERMS, {x: 2}) assert x - x == Expr(Op.INTEGER, (0, 4)) @@ -205,28 +228,35 @@ class TestSymbolic(util.F2PyTest): assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3}) assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2}) - assert x ** 2 == Expr(Op.FACTORS, {x: 2}) - assert (x + y) ** 2 == Expr(Op.TERMS, - {Expr(Op.FACTORS, {x: 2}): 1, - Expr(Op.FACTORS, {y: 2}): 1, - Expr(Op.FACTORS, {x: 1, y: 1}): 2}) - assert (x + y) * x == x ** 2 + x * y - assert (x + y) ** 2 == x ** 2 + 2 * x * y + y ** 2 - assert (x + y) ** 2 + (x - y) ** 2 == 2 * x ** 2 + 2 * y ** 2 + assert x**2 == Expr(Op.FACTORS, {x: 2}) + assert (x + y)**2 == Expr( + Op.TERMS, + { + Expr(Op.FACTORS, {x: 2}): 1, + Expr(Op.FACTORS, {y: 2}): 1, + Expr(Op.FACTORS, { + x: 1, + y: 1 + }): 2, + }, + ) + assert (x + y) * x == x**2 + x * y + assert (x + y)**2 == x**2 + 2 * x * y + y**2 + assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2 assert (x + y) * z == x * z + y * z assert z * (x + y) == x * z + y * z assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2)) assert (2 * x / 2) == x - assert (3 * x / 2) == as_apply(ArithOp.DIV, 3*x, as_number(2)) + assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2)) assert (4 * x / 2) == 2 * x - assert (5 * x / 2) == as_apply(ArithOp.DIV, 5*x, as_number(2)) + assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) assert (6 * x / 2) == 3 * x - assert ((3*5) * x / 6) == as_apply(ArithOp.DIV, 5*x, as_number(2)) - assert (30*x**2*y**4 / (24*x**3*y**3)) == as_apply(ArithOp.DIV, - 5*y, 4*x) - assert ((15 * x / 6) / 5) == as_apply( - ArithOp.DIV, x, as_number(2)), ((15 * x / 6) / 5) + assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) + assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply( + ArithOp.DIV, 5 * y, 4 * x) + assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x, + as_number(2)), (15 * x / 6) / 5 assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5)) assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5}) @@ -238,127 +268,128 @@ class TestSymbolic(util.F2PyTest): assert s // x == Expr(Op.CONCAT, (s, x)) assert x // s == Expr(Op.CONCAT, (x, s)) - c = as_complex(1., 2.) - assert -c == as_complex(-1., -2.) - assert c + c == as_expr((1+2j)*2) - assert c * c == as_expr((1+2j)**2) + c = as_complex(1.0, 2.0) + assert -c == as_complex(-1.0, -2.0) + assert c + c == as_expr((1 + 2j) * 2) + assert c * c == as_expr((1 + 2j)**2) def test_substitute(self): - x = as_symbol('x') - y = as_symbol('y') - z = as_symbol('z') + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") a = as_array((x, y)) assert x.substitute({x: y}) == y assert (x + y).substitute({x: z}) == y + z assert (x * y).substitute({x: z}) == y * z - assert (x ** 4).substitute({x: z}) == z ** 4 + assert (x**4).substitute({x: z}) == z**4 assert (x / y).substitute({x: z}) == z / y assert x.substitute({x: y + z}) == y + z assert a.substitute({x: y + z}) == as_array((y + z, y)) - assert as_ternary(x, y, z).substitute( - {x: y + z}) == as_ternary(y + z, y, z) - assert as_eq(x, y).substitute( - {x: y + z}) == as_eq(y + z, y) + assert as_ternary(x, y, + z).substitute({x: y + z}) == as_ternary(y + z, y, z) + assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y) def test_fromstring(self): - x = as_symbol('x') - y = as_symbol('y') - z = as_symbol('z') - f = as_symbol('f') + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") s = as_string('"ABC"') t = as_string('"123"') a = as_array((x, y)) - assert fromstring('x') == x - assert fromstring('+ x') == x - assert fromstring('- x') == -x - assert fromstring('x + y') == x + y - assert fromstring('x + 1') == x + 1 - assert fromstring('x * y') == x * y - assert fromstring('x * 2') == x * 2 - assert fromstring('x / y') == x / y - assert fromstring('x ** 2', - language=Language.Python) == x ** 2 - assert fromstring('x ** 2 ** 3', - language=Language.Python) == x ** 2 ** 3 - assert fromstring('(x + y) * z') == (x + y) * z - - assert fromstring('f(x)') == f(x) - assert fromstring('f(x,y)') == f(x, y) - assert fromstring('f[x]') == f[x] - assert fromstring('f[x][y]') == f[x][y] + assert fromstring("x") == x + assert fromstring("+ x") == x + assert fromstring("- x") == -x + assert fromstring("x + y") == x + y + assert fromstring("x + 1") == x + 1 + assert fromstring("x * y") == x * y + assert fromstring("x * 2") == x * 2 + assert fromstring("x / y") == x / y + assert fromstring("x ** 2", language=Language.Python) == x**2 + assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3 + assert fromstring("(x + y) * z") == (x + y) * z + + assert fromstring("f(x)") == f(x) + assert fromstring("f(x,y)") == f(x, y) + assert fromstring("f[x]") == f[x] + assert fromstring("f[x][y]") == f[x][y] assert fromstring('"ABC"') == s - assert normalize(fromstring('"ABC" // "123" ', - language=Language.Fortran)) == s // t + assert (normalize( + fromstring('"ABC" // "123" ', + language=Language.Fortran)) == s // t) assert fromstring('f("ABC")') == f(s) - assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', 'MYSTRKIND') - - assert fromstring('(/x, y/)') == a, fromstring('(/x, y/)') - assert fromstring('f((/x, y/))') == f(a) - assert fromstring('(/(x+y)*z/)') == as_array(((x+y)*z,)) - - assert fromstring('123') == as_number(123) - assert fromstring('123_2') == as_number(123, 2) - assert fromstring('123_myintkind') == as_number(123, 'myintkind') - - assert fromstring('123.0') == as_number(123.0, 4) - assert fromstring('123.0_4') == as_number(123.0, 4) - assert fromstring('123.0_8') == as_number(123.0, 8) - assert fromstring('123.0e0') == as_number(123.0, 4) - assert fromstring('123.0d0') == as_number(123.0, 8) - assert fromstring('123d0') == as_number(123.0, 8) - assert fromstring('123e-0') == as_number(123.0, 4) - assert fromstring('123d+0') == as_number(123.0, 8) - assert fromstring('123.0_myrealkind') == as_number(123.0, 'myrealkind') - assert fromstring('3E4') == as_number(30000.0, 4) - - assert fromstring('(1, 2)') == as_complex(1, 2) - assert fromstring('(1e2, PI)') == as_complex( - as_number(100.0), as_symbol('PI')) - - assert fromstring('[1, 2]') == as_array((as_number(1), as_number(2))) - - assert fromstring('POINT(x, y=1)') == as_apply( - as_symbol('POINT'), x, y=as_number(1)) - assert (fromstring('PERSON(name="John", age=50, shape=(/34, 23/))') - == as_apply(as_symbol('PERSON'), - name=as_string('"John"'), - age=as_number(50), - shape=as_array((as_number(34), as_number(23))))) - - assert fromstring('x?y:z') == as_ternary(x, y, z) - - assert fromstring('*x') == as_deref(x) - assert fromstring('**x') == as_deref(as_deref(x)) - assert fromstring('&x') == as_ref(x) - assert fromstring('(*x) * (*y)') == as_deref(x) * as_deref(y) - assert fromstring('(*x) * *y') == as_deref(x) * as_deref(y) - assert fromstring('*x * *y') == as_deref(x) * as_deref(y) - assert fromstring('*x**y') == as_deref(x) * as_deref(y) - - assert fromstring('x == y') == as_eq(x, y) - assert fromstring('x != y') == as_ne(x, y) - assert fromstring('x < y') == as_lt(x, y) - assert fromstring('x > y') == as_gt(x, y) - assert fromstring('x <= y') == as_le(x, y) - assert fromstring('x >= y') == as_ge(x, y) - - assert fromstring('x .eq. y', language=Language.Fortran) == as_eq(x, y) - assert fromstring('x .ne. y', language=Language.Fortran) == as_ne(x, y) - assert fromstring('x .lt. y', language=Language.Fortran) == as_lt(x, y) - assert fromstring('x .gt. y', language=Language.Fortran) == as_gt(x, y) - assert fromstring('x .le. y', language=Language.Fortran) == as_le(x, y) - assert fromstring('x .ge. y', language=Language.Fortran) == as_ge(x, y) + assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND") + + assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)") + assert fromstring("f((/x, y/))") == f(a) + assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, )) + + assert fromstring("123") == as_number(123) + assert fromstring("123_2") == as_number(123, 2) + assert fromstring("123_myintkind") == as_number(123, "myintkind") + + assert fromstring("123.0") == as_number(123.0, 4) + assert fromstring("123.0_4") == as_number(123.0, 4) + assert fromstring("123.0_8") == as_number(123.0, 8) + assert fromstring("123.0e0") == as_number(123.0, 4) + assert fromstring("123.0d0") == as_number(123.0, 8) + assert fromstring("123d0") == as_number(123.0, 8) + assert fromstring("123e-0") == as_number(123.0, 4) + assert fromstring("123d+0") == as_number(123.0, 8) + assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind") + assert fromstring("3E4") == as_number(30000.0, 4) + + assert fromstring("(1, 2)") == as_complex(1, 2) + assert fromstring("(1e2, PI)") == as_complex(as_number(100.0), + as_symbol("PI")) + + assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2))) + + assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"), + x, + y=as_number(1)) + assert fromstring( + 'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply( + as_symbol("PERSON"), + name=as_string('"John"'), + age=as_number(50), + shape=as_array((as_number(34), as_number(23))), + ) + + assert fromstring("x?y:z") == as_ternary(x, y, z) + + assert fromstring("*x") == as_deref(x) + assert fromstring("**x") == as_deref(as_deref(x)) + assert fromstring("&x") == as_ref(x) + assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y) + assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x**y") == as_deref(x) * as_deref(y) + + assert fromstring("x == y") == as_eq(x, y) + assert fromstring("x != y") == as_ne(x, y) + assert fromstring("x < y") == as_lt(x, y) + assert fromstring("x > y") == as_gt(x, y) + assert fromstring("x <= y") == as_le(x, y) + assert fromstring("x >= y") == as_ge(x, y) + + assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y) + assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y) + assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y) + assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y) + assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y) + assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y) def test_traverse(self): - x = as_symbol('x') - y = as_symbol('y') - z = as_symbol('z') - f = as_symbol('f') + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") # Use traverse to substitute a symbol def replace_visit(s, r=z): @@ -373,8 +404,9 @@ class TestSymbolic(util.F2PyTest): assert (f[y]).traverse(replace_visit) == f[y] assert (f[z]).traverse(replace_visit) == f[z] assert (x + y + z).traverse(replace_visit) == (2 * z + y) - assert (x + f(y, x - z)).traverse( - replace_visit) == (z + f(y, as_number(0))) + assert (x + + f(y, x - z)).traverse(replace_visit) == (z + + f(y, as_number(0))) assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y) # Use traverse to collect symbols, method 1 @@ -416,28 +448,28 @@ class TestSymbolic(util.F2PyTest): assert symbols == {x} def test_linear_solve(self): - x = as_symbol('x') - y = as_symbol('y') - z = as_symbol('z') + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") assert x.linear_solve(x) == (as_number(1), as_number(0)) - assert (x+1).linear_solve(x) == (as_number(1), as_number(1)) - assert (2*x).linear_solve(x) == (as_number(2), as_number(0)) - assert (2*x+3).linear_solve(x) == (as_number(2), as_number(3)) + assert (x + 1).linear_solve(x) == (as_number(1), as_number(1)) + assert (2 * x).linear_solve(x) == (as_number(2), as_number(0)) + assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3)) assert as_number(3).linear_solve(x) == (as_number(0), as_number(3)) assert y.linear_solve(x) == (as_number(0), y) - assert (y*z).linear_solve(x) == (as_number(0), y * z) + assert (y * z).linear_solve(x) == (as_number(0), y * z) - assert (x+y).linear_solve(x) == (as_number(1), y) - assert (z*x+y).linear_solve(x) == (z, y) - assert ((z+y)*x+y).linear_solve(x) == (z + y, y) - assert (z*y*x+y).linear_solve(x) == (z * y, y) + assert (x + y).linear_solve(x) == (as_number(1), y) + assert (z * x + y).linear_solve(x) == (z, y) + assert ((z + y) * x + y).linear_solve(x) == (z + y, y) + assert (z * y * x + y).linear_solve(x) == (z * y, y) - assert_raises(RuntimeError, lambda: (x*x).linear_solve(x)) + pytest.raises(RuntimeError, lambda: (x * x).linear_solve(x)) def test_as_numer_denom(self): - x = as_symbol('x') - y = as_symbol('y') + x = as_symbol("x") + y = as_symbol("y") n = as_number(123) assert as_numer_denom(x) == (x, as_number(1)) @@ -446,11 +478,11 @@ class TestSymbolic(util.F2PyTest): assert as_numer_denom(x / y) == (x, y) assert as_numer_denom(x * y) == (x * y, as_number(1)) assert as_numer_denom(n + x / y) == (x + n * y, y) - assert as_numer_denom(n + x / (y - x / n)) == (y * n ** 2, y * n - x) + assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x) def test_polynomial_atoms(self): - x = as_symbol('x') - y = as_symbol('y') + x = as_symbol("x") + y = as_symbol("y") n = as_number(123) assert x.polynomial_atoms() == {x} @@ -459,4 +491,4 @@ class TestSymbolic(util.F2PyTest): assert (y(x)).polynomial_atoms() == {y(x)} assert (y(x) + x).polynomial_atoms() == {y(x), x} assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} - assert (y(x) ** x).polynomial_atoms() == {y(x)} + assert (y(x)**x).polynomial_atoms() == {y(x)} diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 1a6805e75..c115970f4 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -3,6 +3,7 @@ Utility functions for - building and importing modules on test time, using a temporary location - detecting if compilers are present +- determining paths to tests """ import os @@ -14,7 +15,10 @@ import atexit import textwrap import re import pytest +import contextlib +import numpy +from pathlib import Path from numpy.compat import asbytes, asstr from numpy.testing import temppath from importlib import import_module @@ -78,9 +82,11 @@ def _memoize(func): if isinstance(ret, Exception): raise ret return ret + wrapper.__name__ = func.__name__ return wrapper + # # Building modules # @@ -93,8 +99,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): """ - code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; " - "f2py2e.main()" % repr(sys.path)) + code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()" d = get_module_dir() @@ -109,29 +114,30 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): dst_sources.append(dst) base, ext = os.path.splitext(dst) - if ext in ('.f90', '.f', '.c', '.pyf'): + if ext in (".f90", ".f", ".c", ".pyf"): f2py_sources.append(dst) # Prepare options if module_name is None: module_name = get_temp_module_name() - f2py_opts = ['-c', '-m', module_name] + options + f2py_sources + f2py_opts = ["-c", "-m", module_name] + options + f2py_sources if skip: - f2py_opts += ['skip:'] + skip + f2py_opts += ["skip:"] + skip if only: - f2py_opts += ['only:'] + only + f2py_opts += ["only:"] + only # Build cwd = os.getcwd() try: os.chdir(d) - cmd = [sys.executable, '-c', code] + f2py_opts - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + cmd = [sys.executable, "-c", code] + f2py_opts + p = subprocess.Popen(cmd, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, err = p.communicate() if p.returncode != 0: - raise RuntimeError("Running f2py failed: %s\n%s" - % (cmd[4:], asstr(out))) + raise RuntimeError("Running f2py failed: %s\n%s" % + (cmd[4:], asstr(out))) finally: os.chdir(cwd) @@ -144,20 +150,28 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): @_memoize -def build_code(source_code, options=[], skip=[], only=[], suffix=None, +def build_code(source_code, + options=[], + skip=[], + only=[], + suffix=None, module_name=None): """ Compile and import Fortran code using f2py. """ if suffix is None: - suffix = '.f' + suffix = ".f" with temppath(suffix=suffix) as path: - with open(path, 'w') as f: + with open(path, "w") as f: f.write(source_code) - return build_module([path], options=options, skip=skip, only=only, + return build_module([path], + options=options, + skip=skip, + only=only, module_name=module_name) + # # Check if compilers are available at all... # @@ -174,10 +188,10 @@ def _get_compiler_status(): # XXX: this is really ugly. But I don't know how to invoke Distutils # in a safer way... - code = textwrap.dedent("""\ + code = textwrap.dedent(f"""\ import os import sys - sys.path = %(syspath)s + sys.path = {repr(sys.path)} def configuration(parent_name='',top_path=None): global config @@ -189,7 +203,7 @@ def _get_compiler_status(): setup(configuration=configuration) config_cmd = config.get_config_cmd() - have_c = config_cmd.try_compile('void foo() {}') + have_c = config_cmd.try_compile('void foo() {{}}') print('COMPILERS:%%d,%%d,%%d' %% (have_c, config.have_f77c(), config.have_f90c())) @@ -199,23 +213,27 @@ def _get_compiler_status(): tmpdir = tempfile.mkdtemp() try: - script = os.path.join(tmpdir, 'setup.py') + script = os.path.join(tmpdir, "setup.py") - with open(script, 'w') as f: + with open(script, "w") as f: f.write(code) - cmd = [sys.executable, 'setup.py', 'config'] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + cmd = [sys.executable, "setup.py", "config"] + p = subprocess.Popen(cmd, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmpdir) out, err = p.communicate() finally: shutil.rmtree(tmpdir) - m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out) + m = re.search(br"COMPILERS:(\d+),(\d+),(\d+)", out) if m: - _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))), - bool(int(m.group(3)))) + _compiler_status = ( + bool(int(m.group(1))), + bool(int(m.group(2))), + bool(int(m.group(3))), + ) # Finished return _compiler_status @@ -231,6 +249,7 @@ def has_f77_compiler(): def has_f90_compiler(): return _get_compiler_status()[2] + # # Building with distutils # @@ -256,38 +275,38 @@ def build_module_distutils(source_files, config_code, module_name, **kw): # Build script config_code = textwrap.dedent(config_code).replace("\n", "\n ") - code = textwrap.dedent("""\ - import os - import sys - sys.path = %(syspath)s - - def configuration(parent_name='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - %(config_code)s - return config + code = fr""" +import os +import sys +sys.path = {repr(sys.path)} - if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) - """) % dict(config_code=config_code, syspath=repr(sys.path)) +def configuration(parent_name='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('', parent_name, top_path) + {config_code} + return config - script = os.path.join(d, get_temp_module_name() + '.py') +if __name__ == "__main__": + from numpy.distutils.core import setup + setup(configuration=configuration) + """ + script = os.path.join(d, get_temp_module_name() + ".py") dst_sources.append(script) - with open(script, 'wb') as f: + with open(script, "wb") as f: f.write(asbytes(code)) # Build cwd = os.getcwd() try: os.chdir(d) - cmd = [sys.executable, script, 'build_ext', '-i'] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + cmd = [sys.executable, script, "build_ext", "-i"] + p = subprocess.Popen(cmd, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, err = p.communicate() if p.returncode != 0: - raise RuntimeError("Running distutils build failed: %s\n%s" - % (cmd[4:], asstr(out))) + raise RuntimeError("Running distutils build failed: %s\n%s" % + (cmd[4:], asstr(out))) finally: os.chdir(cwd) @@ -299,6 +318,7 @@ def build_module_distutils(source_files, config_code, module_name, **kw): __import__(module_name) return sys.modules[module_name] + # # Unittest convenience # @@ -310,13 +330,13 @@ class F2PyTest: options = [] skip = [] only = [] - suffix = '.f' + suffix = ".f" module = None module_name = None def setup(self): - if sys.platform == 'win32': - pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)') + if sys.platform == "win32": + pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)") if self.module is not None: return @@ -334,9 +354,9 @@ class F2PyTest: needs_f77 = False needs_f90 = False for fn in codes: - if fn.endswith('.f'): + if str(fn).endswith(".f"): needs_f77 = True - elif fn.endswith('.f90'): + elif str(fn).endswith(".f90"): needs_f90 = True if needs_f77 and not has_f77_compiler(): pytest.skip("No Fortran 77 compiler available") @@ -345,12 +365,41 @@ class F2PyTest: # Build the module if self.code is not None: - self.module = build_code(self.code, options=self.options, - skip=self.skip, only=self.only, - suffix=self.suffix, - module_name=self.module_name) + self.module = build_code( + self.code, + options=self.options, + skip=self.skip, + only=self.only, + suffix=self.suffix, + module_name=self.module_name, + ) if self.sources is not None: - self.module = build_module(self.sources, options=self.options, - skip=self.skip, only=self.only, - module_name=self.module_name) + self.module = build_module( + self.sources, + options=self.options, + skip=self.skip, + only=self.only, + module_name=self.module_name, + ) + + +# +# Helper functions +# + + +def getpath(*a): + # Package root + d = Path(numpy.f2py.__file__).parent.resolve() + return d.joinpath(*a) + + +@contextlib.contextmanager +def switchdir(path): + curpath = Path.cwd() + os.chdir(path) + try: + yield + finally: + os.chdir(curpath) diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 510e576d3..5518aac16 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1,5 +1,3 @@ -from typing import Any, List - from numpy._pytesttester import PytestTester from numpy.fft._pocketfft import ( @@ -26,6 +24,6 @@ from numpy.fft.helper import ( rfftfreq as rfftfreq, ) -__all__: List[str] -__path__: List[str] +__all__: list[str] +__path__: list[str] test: PytestTester diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 86cf6a60d..fa234fc3a 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -1,15 +1,12 @@ -from typing import ( - Literal as L, - List, - Sequence, -) +from collections.abc import Sequence +from typing import Literal as L from numpy import complex128, float64 from numpy.typing import ArrayLike, NDArray, _ArrayLikeNumber_co _NormKind = L[None, "backward", "ortho", "forward"] -__all__: List[str] +__all__: list[str] def fft( a: ArrayLike, diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi index d75826f4e..a7f3bdf51 100644 --- a/numpy/fft/helper.pyi +++ b/numpy/fft/helper.pyi @@ -1,4 +1,4 @@ -from typing import List, Any, TypeVar, overload +from typing import Any, TypeVar, overload from numpy import generic, dtype, integer, floating, complexfloating from numpy.typing import ( @@ -15,7 +15,7 @@ _SCT = TypeVar("_SCT", bound=generic) _ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] -__all__: List[str] +__all__: list[str] @overload def fftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index ae23b2ec4..7338fc6d6 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,5 +1,5 @@ import math as math -from typing import Any, List +from typing import Any from numpy._pytesttester import PytestTester @@ -237,8 +237,8 @@ from numpy.core.multiarray import ( tracemalloc_domain as tracemalloc_domain, ) -__all__: List[str] -__path__: List[str] +__all__: list[str] +__path__: list[str] test: PytestTester __version__ = version diff --git a/numpy/lib/_version.pyi b/numpy/lib/_version.pyi index 3581d639b..1c82c99b6 100644 --- a/numpy/lib/_version.pyi +++ b/numpy/lib/_version.pyi @@ -1,6 +1,4 @@ -from typing import Union, List - -__all__: List[str] +__all__: list[str] class NumpyVersion: vstring: str @@ -11,9 +9,9 @@ class NumpyVersion: pre_release: str is_devversion: bool def __init__(self, vstring: str) -> None: ... - def __lt__(self, other: Union[str, NumpyVersion]) -> bool: ... - def __le__(self, other: Union[str, NumpyVersion]) -> bool: ... - def __eq__(self, other: Union[str, NumpyVersion]) -> bool: ... # type: ignore[override] - def __ne__(self, other: Union[str, NumpyVersion]) -> bool: ... # type: ignore[override] - def __gt__(self, other: Union[str, NumpyVersion]) -> bool: ... - def __ge__(self, other: Union[str, NumpyVersion]) -> bool: ... + def __lt__(self, other: str | NumpyVersion) -> bool: ... + def __le__(self, other: str | NumpyVersion) -> bool: ... + def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __gt__(self, other: str | NumpyVersion) -> bool: ... + def __ge__(self, other: str | NumpyVersion) -> bool: ... diff --git a/numpy/lib/arraypad.pyi b/numpy/lib/arraypad.pyi index d7c5f4844..1f74f2506 100644 --- a/numpy/lib/arraypad.pyi +++ b/numpy/lib/arraypad.pyi @@ -1,10 +1,7 @@ from typing import ( Literal as L, Any, - Dict, - List, overload, - Tuple, TypeVar, Protocol, ) @@ -25,9 +22,9 @@ class _ModeFunc(Protocol): def __call__( self, vector: NDArray[Any], - iaxis_pad_width: Tuple[int, int], + iaxis_pad_width: tuple[int, int], iaxis: int, - kwargs: Dict[str, Any], + kwargs: dict[str, Any], /, ) -> None: ... @@ -47,7 +44,7 @@ _ModeKind = L[ _ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] -__all__: List[str] +__all__: list[str] # TODO: In practice each keyword argument is exclusive to one or more # specific modes. Consider adding more overloads to express this in the future. diff --git a/numpy/lib/arraysetops.pyi b/numpy/lib/arraysetops.pyi index 6f13ec74b..e7e230bf1 100644 --- a/numpy/lib/arraysetops.pyi +++ b/numpy/lib/arraysetops.pyi @@ -1,10 +1,7 @@ from typing import ( Literal as L, Any, - List, - Union, TypeVar, - Tuple, overload, SupportsIndex, ) @@ -92,7 +89,7 @@ _SCTNoCast = TypeVar( _ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] -__all__: List[str] +__all__: list[str] @overload def ediff1d( @@ -148,7 +145,7 @@ def unique( return_inverse: L[False] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[_SCT], NDArray[intp]]: ... +) -> tuple[NDArray[_SCT], NDArray[intp]]: ... @overload def unique( ar: ArrayLike, @@ -156,7 +153,7 @@ def unique( return_inverse: L[False] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[Any], NDArray[intp]]: ... +) -> tuple[NDArray[Any], NDArray[intp]]: ... @overload def unique( ar: _ArrayLike[_SCT], @@ -164,7 +161,7 @@ def unique( return_inverse: L[True] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[_SCT], NDArray[intp]]: ... +) -> tuple[NDArray[_SCT], NDArray[intp]]: ... @overload def unique( ar: ArrayLike, @@ -172,7 +169,7 @@ def unique( return_inverse: L[True] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[Any], NDArray[intp]]: ... +) -> tuple[NDArray[Any], NDArray[intp]]: ... @overload def unique( ar: _ArrayLike[_SCT], @@ -180,7 +177,7 @@ def unique( return_inverse: L[False] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[_SCT], NDArray[intp]]: ... +) -> tuple[NDArray[_SCT], NDArray[intp]]: ... @overload def unique( ar: ArrayLike, @@ -188,7 +185,7 @@ def unique( return_inverse: L[False] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[Any], NDArray[intp]]: ... +) -> tuple[NDArray[Any], NDArray[intp]]: ... @overload def unique( ar: _ArrayLike[_SCT], @@ -196,7 +193,7 @@ def unique( return_inverse: L[True] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... @overload def unique( ar: ArrayLike, @@ -204,7 +201,7 @@ def unique( return_inverse: L[True] = ..., return_counts: L[False] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... @overload def unique( ar: _ArrayLike[_SCT], @@ -212,7 +209,7 @@ def unique( return_inverse: L[False] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... @overload def unique( ar: ArrayLike, @@ -220,7 +217,7 @@ def unique( return_inverse: L[False] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... @overload def unique( ar: _ArrayLike[_SCT], @@ -228,7 +225,7 @@ def unique( return_inverse: L[True] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... @overload def unique( ar: ArrayLike, @@ -236,7 +233,7 @@ def unique( return_inverse: L[True] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... @overload def unique( ar: _ArrayLike[_SCT], @@ -244,7 +241,7 @@ def unique( return_inverse: L[True] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ... @overload def unique( ar: ArrayLike, @@ -252,7 +249,7 @@ def unique( return_inverse: L[True] = ..., return_counts: L[True] = ..., axis: None | SupportsIndex = ..., -) -> Tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ... @overload def intersect1d( @@ -274,14 +271,14 @@ def intersect1d( ar2: _ArrayLike[_SCTNoCast], assume_unique: bool = ..., return_indices: L[True] = ..., -) -> Tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ... @overload def intersect1d( ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = ..., return_indices: L[True] = ..., -) -> Tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... @overload def setxor1d( diff --git a/numpy/lib/arrayterator.pyi b/numpy/lib/arrayterator.pyi index 82c669206..b32dffdfc 100644 --- a/numpy/lib/arrayterator.pyi +++ b/numpy/lib/arrayterator.pyi @@ -1,11 +1,8 @@ +from collections.abc import Generator from typing import ( - List, Any, TypeVar, - Generator, - List, Union, - Tuple, overload, ) @@ -19,10 +16,10 @@ _ScalarType = TypeVar("_ScalarType", bound=generic) _Index = Union[ Union[ellipsis, int, slice], - Tuple[Union[ellipsis, int, slice], ...], + tuple[Union[ellipsis, int, slice], ...], ] -__all__: List[str] +__all__: list[str] # NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, # but its ``__getattr__` method does wrap around the former and thus has @@ -31,12 +28,12 @@ __all__: List[str] class Arrayterator(ndarray[_Shape, _DType]): var: ndarray[_Shape, _DType] # type: ignore[assignment] buf_size: None | int - start: List[int] - stop: List[int] - step: List[int] + start: list[int] + stop: list[int] + step: list[int] @property # type: ignore[misc] - def shape(self) -> Tuple[int, ...]: ... + def shape(self) -> tuple[int, ...]: ... @property def flat( # type: ignore[override] self: ndarray[Any, dtype[_ScalarType]] diff --git a/numpy/lib/format.pyi b/numpy/lib/format.pyi index 092245daf..a4468f52f 100644 --- a/numpy/lib/format.pyi +++ b/numpy/lib/format.pyi @@ -1,8 +1,8 @@ -from typing import Any, List, Set, Literal, Final +from typing import Any, Literal, Final -__all__: List[str] +__all__: list[str] -EXPECTED_KEYS: Final[Set[str]] +EXPECTED_KEYS: Final[set[str]] MAGIC_PREFIX: Final[bytes] MAGIC_LEN: Literal[8] ARRAY_ALIGN: Literal[64] diff --git a/numpy/lib/function_base.pyi b/numpy/lib/function_base.pyi index 7e227f9da..6e2f886cf 100644 --- a/numpy/lib/function_base.pyi +++ b/numpy/lib/function_base.pyi @@ -1,19 +1,13 @@ import sys +from collections.abc import Sequence, Iterator, Callable, Iterable from typing import ( Literal as L, - List, - Type, - Sequence, - Tuple, Union, Any, TypeVar, - Iterator, overload, - Callable, Protocol, SupportsIndex, - Iterable, SupportsInt, ) @@ -73,11 +67,11 @@ _T_co = TypeVar("_T_co", covariant=True) _SCT = TypeVar("_SCT", bound=generic) _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -_2Tuple = Tuple[_T, _T] +_2Tuple = tuple[_T, _T] _ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] _DTypeLike = Union[ dtype[_SCT], - Type[_SCT], + type[_SCT], _SupportsDType[dtype[_SCT]], ] @@ -90,7 +84,7 @@ class _SupportsWriteFlush(Protocol): def write(self, s: str, /) -> object: ... def flush(self) -> object: ... -__all__: List[str] +__all__: list[str] # NOTE: This is in reality a re-export of `np.core.umath._add_newdoc_ufunc` def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None: ... @@ -99,13 +93,13 @@ def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None: ... def rot90( m: _ArrayLike[_SCT], k: int = ..., - axes: Tuple[int, int] = ..., + axes: tuple[int, int] = ..., ) -> NDArray[_SCT]: ... @overload def rot90( m: ArrayLike, k: int = ..., - axes: Tuple[int, int] = ..., + axes: tuple[int, int] = ..., ) -> NDArray[Any]: ... @overload @@ -654,7 +648,7 @@ def meshgrid( copy: bool = ..., sparse: bool = ..., indexing: L["xy", "ij"] = ..., -) -> List[NDArray[Any]]: ... +) -> list[NDArray[Any]]: ... @overload def delete( diff --git a/numpy/lib/histograms.pyi b/numpy/lib/histograms.pyi index 2ceb60793..81f26d910 100644 --- a/numpy/lib/histograms.pyi +++ b/numpy/lib/histograms.pyi @@ -1,10 +1,8 @@ +from collections.abc import Sequence from typing import ( Literal as L, - List, - Tuple, Any, SupportsIndex, - Sequence, ) from numpy.typing import ( @@ -23,29 +21,29 @@ _BinKind = L[ "sturges", ] -__all__: List[str] +__all__: list[str] def histogram_bin_edges( a: ArrayLike, bins: _BinKind | SupportsIndex | ArrayLike = ..., - range: None | Tuple[float, float] = ..., + range: None | tuple[float, float] = ..., weights: None | ArrayLike = ..., ) -> NDArray[Any]: ... def histogram( a: ArrayLike, bins: _BinKind | SupportsIndex | ArrayLike = ..., - range: None | Tuple[float, float] = ..., + range: None | tuple[float, float] = ..., normed: None = ..., weights: None | ArrayLike = ..., density: bool = ..., -) -> Tuple[NDArray[Any], NDArray[Any]]: ... +) -> tuple[NDArray[Any], NDArray[Any]]: ... def histogramdd( sample: ArrayLike, bins: SupportsIndex | ArrayLike = ..., - range: Sequence[Tuple[float, float]] = ..., + range: Sequence[tuple[float, float]] = ..., normed: None | bool = ..., weights: None | ArrayLike = ..., density: None | bool = ..., -) -> Tuple[NDArray[Any], List[NDArray[Any]]]: ... +) -> tuple[NDArray[Any], list[NDArray[Any]]]: ... diff --git a/numpy/lib/index_tricks.pyi b/numpy/lib/index_tricks.pyi index d16faf81a..923cb8a9f 100644 --- a/numpy/lib/index_tricks.pyi +++ b/numpy/lib/index_tricks.pyi @@ -1,12 +1,9 @@ +from collections.abc import Sequence from typing import ( Any, - Tuple, TypeVar, Generic, overload, - List, - Union, - Sequence, Literal, SupportsIndex, ) @@ -53,25 +50,25 @@ from numpy.core.multiarray import ( _T = TypeVar("_T") _DType = TypeVar("_DType", bound=dtype[Any]) _BoolType = TypeVar("_BoolType", Literal[True], Literal[False]) -_TupType = TypeVar("_TupType", bound=Tuple[Any, ...]) +_TupType = TypeVar("_TupType", bound=tuple[Any, ...]) _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) -__all__: List[str] +__all__: list[str] @overload -def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> Tuple[ndarray[Any, _DType], ...]: ... +def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ... @overload -def ix_(*args: str | _NestedSequence[str]) -> Tuple[NDArray[str_], ...]: ... +def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ... @overload -def ix_(*args: bytes | _NestedSequence[bytes]) -> Tuple[NDArray[bytes_], ...]: ... +def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ... @overload -def ix_(*args: bool | _NestedSequence[bool]) -> Tuple[NDArray[bool_], ...]: ... +def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[bool_], ...]: ... @overload -def ix_(*args: int | _NestedSequence[int]) -> Tuple[NDArray[int_], ...]: ... +def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ... @overload -def ix_(*args: float | _NestedSequence[float]) -> Tuple[NDArray[float_], ...]: ... +def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float_], ...]: ... @overload -def ix_(*args: complex | _NestedSequence[complex]) -> Tuple[NDArray[complex_], ...]: ... +def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex_], ...]: ... class nd_grid(Generic[_BoolType]): sparse: _BoolType @@ -79,13 +76,13 @@ class nd_grid(Generic[_BoolType]): @overload def __getitem__( self: nd_grid[Literal[False]], - key: Union[slice, Sequence[slice]], + key: slice | Sequence[slice], ) -> NDArray[Any]: ... @overload def __getitem__( self: nd_grid[Literal[True]], - key: Union[slice, Sequence[slice]], - ) -> List[NDArray[Any]]: ... + key: slice | Sequence[slice], + ) -> list[NDArray[Any]]: ... class MGridClass(nd_grid[Literal[False]]): def __init__(self) -> None: ... @@ -151,7 +148,7 @@ class IndexExpression(Generic[_BoolType]): @overload def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc] @overload - def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> Tuple[_T]: ... + def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ... @overload def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ... @@ -159,7 +156,7 @@ index_exp: IndexExpression[Literal[True]] s_: IndexExpression[Literal[False]] def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None: ... -def diag_indices(n: int, ndim: int = ...) -> Tuple[NDArray[int_], ...]: ... -def diag_indices_from(arr: ArrayLike) -> Tuple[NDArray[int_], ...]: ... +def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ... +def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ... # NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex` diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index f137bb5bc..d52c8fa0b 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -1,7 +1,6 @@ -from typing import List from abc import ABCMeta, abstractmethod -__all__: List[str] +__all__: list[str] # NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass, # even though it's reliant on subclasses implementing `__array_ufunc__` diff --git a/numpy/lib/nanfunctions.pyi b/numpy/lib/nanfunctions.pyi index 54b4a7e26..8642055fe 100644 --- a/numpy/lib/nanfunctions.pyi +++ b/numpy/lib/nanfunctions.pyi @@ -1,5 +1,3 @@ -from typing import List - from numpy.core.fromnumeric import ( amin, amax, @@ -20,7 +18,7 @@ from numpy.lib.function_base import ( quantile, ) -__all__: List[str] +__all__: list[str] # NOTE: In reaility these functions are not aliases but distinct functions # with identical signatures. diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index 85e26f094..a6c2d4c2d 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -816,6 +816,38 @@ def _loadtxt_pack_items(packing, items): start += length return tuple(ret) +def _ensure_ndmin_ndarray_check_param(ndmin): + """Just checks if the param ndmin is supported on + _ensure_ndmin_ndarray. Is intented to be used as + verification before running anything expensive. + e.g. loadtxt, genfromtxt + """ + # Check correctness of the values of `ndmin` + if ndmin not in [0, 1, 2]: + raise ValueError(f"Illegal value of ndmin keyword: {ndmin}") + +def _ensure_ndmin_ndarray(a, *, ndmin: int): + """This is a helper function of loadtxt and genfromtxt to ensure + proper minimum dimension as requested + + ndim: int. Supported values 1, 2, 3 + ^^ whenever this changes, keep in sync with + _ensure_ndmin_ndarray_check_param + """ + # Verify that the array has at least dimensions `ndmin`. + # Tweak the size and shape of the arrays - remove extraneous dimensions + if a.ndim > ndmin: + a = np.squeeze(a) + # and ensure we have the minimum number of dimensions asked for + # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0 + if a.ndim < ndmin: + if ndmin == 1: + a = np.atleast_1d(a) + elif ndmin == 2: + a = np.atleast_2d(a).T + + return a + # amount of lines loadtxt reads in one chunk, can be overridden for testing _loadtxt_chunksize = 50000 @@ -983,9 +1015,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, # Main body of loadtxt. # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Check correctness of the values of `ndmin` - if ndmin not in [0, 1, 2]: - raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) + _ensure_ndmin_ndarray_check_param(ndmin) # Type conversions for Py3 convenience if comments is not None: @@ -1182,17 +1212,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, if X.ndim == 3 and X.shape[:2] == (1, 1): X.shape = (1, -1) - # Verify that the array has at least dimensions `ndmin`. - # Tweak the size and shape of the arrays - remove extraneous dimensions - if X.ndim > ndmin: - X = np.squeeze(X) - # and ensure we have the minimum number of dimensions asked for - # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 - if X.ndim < ndmin: - if ndmin == 1: - X = np.atleast_1d(X) - elif ndmin == 2: - X = np.atleast_2d(X).T + X = _ensure_ndmin_ndarray(X, ndmin=ndmin) if unpack: if len(dtype_types) > 1: @@ -1573,8 +1593,8 @@ def _genfromtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None, names=None, excludelist=None, deletechars=None, replace_space=None, autostrip=None, case_sensitive=None, defaultfmt=None, unpack=None, usemask=None, loose=None, - invalid_raise=None, max_rows=None, encoding=None, *, - like=None): + invalid_raise=None, max_rows=None, encoding=None, + *, ndmin=None, like=None): return (like,) @@ -1587,8 +1607,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, deletechars=''.join(sorted(NameValidator.defaultdeletechars)), replace_space='_', autostrip=False, case_sensitive=True, defaultfmt="f%i", unpack=None, usemask=False, loose=True, - invalid_raise=True, max_rows=None, encoding='bytes', *, - like=None): + invalid_raise=True, max_rows=None, encoding='bytes', + *, ndmin=0, like=None): """ Load data from a text file, with missing values handled as specified. @@ -1687,6 +1707,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, to None the system default is used. The default value is 'bytes'. .. versionadded:: 1.14.0 + ndmin : int, optional + Same parameter as `loadtxt` + + .. versionadded:: 1.23.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -1780,9 +1804,12 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, case_sensitive=case_sensitive, defaultfmt=defaultfmt, unpack=unpack, usemask=usemask, loose=loose, invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding, + ndmin=ndmin, like=like ) + _ensure_ndmin_ndarray_check_param(ndmin) + if max_rows is not None: if skip_footer: raise ValueError( @@ -1807,22 +1834,21 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, byte_converters = False # Initialize the filehandle, the LineSplitter and the NameValidator + if isinstance(fname, os_PathLike): + fname = os_fspath(fname) + if isinstance(fname, str): + fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) + fid_ctx = contextlib.closing(fid) + else: + fid = fname + fid_ctx = contextlib.nullcontext(fid) try: - if isinstance(fname, os_PathLike): - fname = os_fspath(fname) - if isinstance(fname, str): - fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) - fid_ctx = contextlib.closing(fid) - else: - fid = fname - fid_ctx = contextlib.nullcontext(fid) fhd = iter(fid) except TypeError as e: raise TypeError( - f"fname must be a string, filehandle, list of strings,\n" - f"or generator. Got {type(fname)} instead." + "fname must be a string, a filehandle, a sequence of strings,\n" + f"or an iterator of strings. Got {type(fname)} instead." ) from e - with fid_ctx: split_line = LineSplitter(delimiter=delimiter, comments=comments, autostrip=autostrip, encoding=encoding) @@ -2292,7 +2318,9 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if usemask: output = output.view(MaskedArray) output._mask = outputmask - output = np.squeeze(output) + + output = _ensure_ndmin_ndarray(output, ndmin=ndmin) + if unpack: if names is None: return output.T diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index 8fd36ca6f..60684c846 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -2,23 +2,17 @@ import os import sys import zipfile import types +from re import Pattern +from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable from typing import ( Literal as L, Any, - Mapping, TypeVar, Generic, - List, - Type, - Iterator, Union, IO, overload, - Sequence, - Callable, - Pattern, Protocol, - Iterable, ) from numpy import ( @@ -48,7 +42,7 @@ _CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True) _CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True) _DTypeLike = Union[ - Type[_SCT], + type[_SCT], dtype[_SCT], _SupportsDType[dtype[_SCT]], ] @@ -66,17 +60,17 @@ class _SupportsReadSeek(Protocol[_CharType_co]): class _SupportsWrite(Protocol[_CharType_contra]): def write(self, s: _CharType_contra, /) -> object: ... -__all__: List[str] +__all__: list[str] class BagObj(Generic[_T_co]): def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ... def __getattribute__(self, key: str) -> _T_co: ... - def __dir__(self) -> List[str]: ... + def __dir__(self) -> list[str]: ... class NpzFile(Mapping[str, NDArray[Any]]): zip: zipfile.ZipFile fid: None | IO[str] - files: List[str] + files: list[str] allow_pickle: bool pickle_kwargs: None | Mapping[str, Any] # Represent `f` as a mutable property so we can access the type of `self` @@ -94,7 +88,7 @@ class NpzFile(Mapping[str, NDArray[Any]]): def __enter__(self: _T) -> _T: ... def __exit__( self, - exc_type: None | Type[BaseException], + exc_type: None | type[BaseException], exc_value: None | BaseException, traceback: None | types.TracebackType, /, @@ -140,7 +134,7 @@ def savez_compressed( def loadtxt( fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: None = ..., - comments: str | Sequence[str] = ..., + comments: None | str | Sequence[str] = ..., delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., @@ -156,7 +150,7 @@ def loadtxt( def loadtxt( fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: _DTypeLike[_SCT], - comments: str | Sequence[str] = ..., + comments: None | str | Sequence[str] = ..., delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., @@ -172,7 +166,7 @@ def loadtxt( def loadtxt( fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: DTypeLike, - comments: str | Sequence[str] = ..., + comments: None | str | Sequence[str] = ..., delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., @@ -212,27 +206,92 @@ def fromregex( encoding: None | str = ... ) -> NDArray[Any]: ... -# TODO: Sort out arguments @overload def genfromtxt( fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: None = ..., - *args: Any, - **kwargs: Any, + comments: str = ..., + delimiter: None | str | int | Iterable[int] = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: None | Sequence[int] = ..., + names: L[None, True] | str | Collection[str] = ..., + excludelist: None | Sequence[str] = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L['upper', 'lower'] = ..., + defaultfmt: str = ..., + unpack: None | bool = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: None | int = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: None | ArrayLike = ..., ) -> NDArray[float64]: ... @overload def genfromtxt( fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: _DTypeLike[_SCT], - *args: Any, - **kwargs: Any, + comments: str = ..., + delimiter: None | str | int | Iterable[int] = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: None | Sequence[int] = ..., + names: L[None, True] | str | Collection[str] = ..., + excludelist: None | Sequence[str] = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L['upper', 'lower'] = ..., + defaultfmt: str = ..., + unpack: None | bool = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: None | int = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: None | ArrayLike = ..., ) -> NDArray[_SCT]: ... @overload def genfromtxt( fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], dtype: DTypeLike, - *args: Any, - **kwargs: Any, + comments: str = ..., + delimiter: None | str | int | Iterable[int] = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: None | Sequence[int] = ..., + names: L[None, True] | str | Collection[str] = ..., + excludelist: None | Sequence[str] = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L['upper', 'lower'] = ..., + defaultfmt: str = ..., + unpack: None | bool = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: None | int = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: None | ArrayLike = ..., ) -> NDArray[Any]: ... @overload diff --git a/numpy/lib/polynomial.pyi b/numpy/lib/polynomial.pyi index 00065f53b..7ba4747bd 100644 --- a/numpy/lib/polynomial.pyi +++ b/numpy/lib/polynomial.pyi @@ -1,12 +1,10 @@ from typing import ( Literal as L, - List, overload, Any, SupportsInt, SupportsIndex, TypeVar, - Tuple, NoReturn, ) @@ -38,8 +36,8 @@ from numpy.typing import ( _T = TypeVar("_T") -_2Tup = Tuple[_T, _T] -_5Tup = Tuple[ +_2Tup = tuple[_T, _T] +_5Tup = tuple[ _T, NDArray[float64], NDArray[int32], @@ -47,7 +45,7 @@ _5Tup = Tuple[ NDArray[float64], ] -__all__: List[str] +__all__: list[str] def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ... diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index a491f612e..ee4fbcd74 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -784,7 +784,8 @@ def repack_fields(a, align=False, recurse=False): This method removes any overlaps and reorders the fields in memory so they have increasing byte offsets, and adds or removes padding bytes depending - on the `align` option, which behaves like the `align` option to `np.dtype`. + on the `align` option, which behaves like the `align` option to + `numpy.dtype`. If `align=False`, this method produces a "packed" memory layout in which each field starts at the byte the previous field ended, and any padding @@ -917,11 +918,12 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): dtype : dtype, optional The dtype of the output unstructured array. copy : bool, optional - See copy argument to `ndarray.astype`. If true, always return a copy. - If false, and `dtype` requirements are satisfied, a view is returned. + See copy argument to `numpy.ndarray.astype`. If true, always return a + copy. If false, and `dtype` requirements are satisfied, a view is + returned. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - See casting argument of `ndarray.astype`. Controls what kind of data - casting may occur. + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. Returns ------- @@ -1020,11 +1022,12 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False, align : boolean, optional Whether to create an aligned memory layout. copy : bool, optional - See copy argument to `ndarray.astype`. If true, always return a copy. - If false, and `dtype` requirements are satisfied, a view is returned. + See copy argument to `numpy.ndarray.astype`. If true, always return a + copy. If false, and `dtype` requirements are satisfied, a view is + returned. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - See casting argument of `ndarray.astype`. Controls what kind of data - casting may occur. + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. Returns ------- diff --git a/numpy/lib/scimath.pyi b/numpy/lib/scimath.pyi index d0d4af41e..1b13a6805 100644 --- a/numpy/lib/scimath.pyi +++ b/numpy/lib/scimath.pyi @@ -1,13 +1,94 @@ -from typing import List - -__all__: List[str] - -def sqrt(x): ... -def log(x): ... -def log10(x): ... -def logn(n, x): ... -def log2(x): ... -def power(x, p): ... -def arccos(x): ... -def arcsin(x): ... -def arctanh(x): ... +from typing import overload, Any + +from numpy import complexfloating + +from numpy.typing import ( + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ComplexLike_co, + _FloatLike_co, +) + +__all__: list[str] + +@overload +def sqrt(x: _FloatLike_co) -> Any: ... +@overload +def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def log(x: _FloatLike_co) -> Any: ... +@overload +def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def log10(x: _FloatLike_co) -> Any: ... +@overload +def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def log2(x: _FloatLike_co) -> Any: ... +@overload +def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ... +@overload +def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ... +@overload +def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def arccos(x: _FloatLike_co) -> Any: ... +@overload +def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def arcsin(x: _FloatLike_co) -> Any: ... +@overload +def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def arctanh(x: _FloatLike_co) -> Any: ... +@overload +def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... diff --git a/numpy/lib/shape_base.pyi b/numpy/lib/shape_base.pyi index 8aa283d02..82702e67c 100644 --- a/numpy/lib/shape_base.pyi +++ b/numpy/lib/shape_base.pyi @@ -1,4 +1,5 @@ -from typing import List, TypeVar, Callable, Sequence, Any, overload, Tuple, SupportsIndex, Protocol +from collections.abc import Callable, Sequence +from typing import TypeVar, Any, overload, SupportsIndex, Protocol from numpy import ( generic, @@ -18,7 +19,7 @@ from numpy.typing import ( NDArray, _ShapeLike, _FiniteNestedSequence, - _SupportsDType, + _SupportsArray, _ArrayLikeBool_co, _ArrayLikeUInt_co, _ArrayLikeInt_co, @@ -31,7 +32,7 @@ from numpy.core.shape_base import vstack _SCT = TypeVar("_SCT", bound=generic) -_ArrayLike = _FiniteNestedSequence[_SupportsDType[dtype[_SCT]]] +_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] # The signatures of `__array_wrap__` and `__array_prepare__` are the same; # give them unique names for the sake of clarity @@ -39,7 +40,7 @@ class _ArrayWrap(Protocol): def __call__( self, array: NDArray[Any], - context: None | Tuple[ufunc, Tuple[Any, ...], int] = ..., + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., /, ) -> Any: ... @@ -47,7 +48,7 @@ class _ArrayPrepare(Protocol): def __call__( self, array: NDArray[Any], - context: None | Tuple[ufunc, Tuple[Any, ...], int] = ..., + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., /, ) -> Any: ... @@ -59,7 +60,7 @@ class _SupportsArrayPrepare(Protocol): @property def __array_prepare__(self) -> _ArrayPrepare: ... -__all__: List[str] +__all__: list[str] row_stack = vstack @@ -125,59 +126,59 @@ def array_split( ary: _ArrayLike[_SCT], indices_or_sections: _ShapeLike, axis: SupportsIndex = ..., -) -> List[NDArray[_SCT]]: ... +) -> list[NDArray[_SCT]]: ... @overload def array_split( ary: ArrayLike, indices_or_sections: _ShapeLike, axis: SupportsIndex = ..., -) -> List[NDArray[Any]]: ... +) -> list[NDArray[Any]]: ... @overload def split( ary: _ArrayLike[_SCT], indices_or_sections: _ShapeLike, axis: SupportsIndex = ..., -) -> List[NDArray[_SCT]]: ... +) -> list[NDArray[_SCT]]: ... @overload def split( ary: ArrayLike, indices_or_sections: _ShapeLike, axis: SupportsIndex = ..., -) -> List[NDArray[Any]]: ... +) -> list[NDArray[Any]]: ... @overload def hsplit( ary: _ArrayLike[_SCT], indices_or_sections: _ShapeLike, -) -> List[NDArray[_SCT]]: ... +) -> list[NDArray[_SCT]]: ... @overload def hsplit( ary: ArrayLike, indices_or_sections: _ShapeLike, -) -> List[NDArray[Any]]: ... +) -> list[NDArray[Any]]: ... @overload def vsplit( ary: _ArrayLike[_SCT], indices_or_sections: _ShapeLike, -) -> List[NDArray[_SCT]]: ... +) -> list[NDArray[_SCT]]: ... @overload def vsplit( ary: ArrayLike, indices_or_sections: _ShapeLike, -) -> List[NDArray[Any]]: ... +) -> list[NDArray[Any]]: ... @overload def dsplit( ary: _ArrayLike[_SCT], indices_or_sections: _ShapeLike, -) -> List[NDArray[_SCT]]: ... +) -> list[NDArray[_SCT]]: ... @overload def dsplit( ary: ArrayLike, indices_or_sections: _ShapeLike, -) -> List[NDArray[Any]]: ... +) -> list[NDArray[Any]]: ... @overload def get_array_prepare(*args: _SupportsArrayPrepare) -> _ArrayPrepare: ... diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py index 5093993a9..6794ad557 100644 --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -86,6 +86,7 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True): Vectorized write operations on such arrays will typically be unpredictable. They may even give different results for small, large, or transposed arrays. + Since writing to these arrays has to be tested and done with great care, you may want to use ``writeable=False`` to avoid accidental write operations. diff --git a/numpy/lib/stride_tricks.pyi b/numpy/lib/stride_tricks.pyi index aad404107..e05a9f74c 100644 --- a/numpy/lib/stride_tricks.pyi +++ b/numpy/lib/stride_tricks.pyi @@ -1,4 +1,5 @@ -from typing import Any, List, Dict, Iterable, TypeVar, overload, SupportsIndex +from collections.abc import Iterable +from typing import Any, TypeVar, overload, SupportsIndex from numpy import dtype, generic from numpy.typing import ( @@ -13,14 +14,14 @@ from numpy.typing import ( _SCT = TypeVar("_SCT", bound=generic) _ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] -__all__: List[str] +__all__: list[str] class DummyArray: - __array_interface__: Dict[str, Any] + __array_interface__: dict[str, Any] base: None | NDArray[Any] def __init__( self, - interface: Dict[str, Any], + interface: dict[str, Any], base: None | NDArray[Any] = ..., ) -> None: ... @@ -78,4 +79,4 @@ def broadcast_shapes(*args: _ShapeLike) -> _Shape: ... def broadcast_arrays( *args: ArrayLike, subok: bool = ..., -) -> List[NDArray[Any]]: ... +) -> list[NDArray[Any]]: ... diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index 75db5928b..ca3c35335 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -474,7 +474,7 @@ class TestStatistic: @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning") @pytest.mark.filterwarnings( - "ignore:invalid value encountered in (true_divide|double_scalars):" + "ignore:invalid value encountered in (divide|double_scalars):" "RuntimeWarning" ) @pytest.mark.parametrize("mode", ["mean", "median"]) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 5201b8e6e..b9b10bc06 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1425,6 +1425,10 @@ class TestFromTxt(LoadTxtBase): ('F', 25.0, 60.0)], dtype=descriptor) assert_equal(test, control) + def test_bad_fname(self): + with pytest.raises(TypeError, match='fname must be a string,'): + np.genfromtxt(123) + def test_commented_header(self): # Check that names can be retrieved even if the line is commented out. data = TextIO(""" @@ -2438,6 +2442,17 @@ M 33 21.99 assert_equal((), test.shape) assert_equal(expected.dtype, test.dtype) + @pytest.mark.parametrize("ndim", [0, 1, 2]) + def test_ndmin_keyword(self, ndim: int): + # lets have the same behaivour of ndmin as loadtxt + # as they should be the same for non-missing values + txt = "42" + + a = np.loadtxt(StringIO(txt), ndmin=ndim) + b = np.genfromtxt(StringIO(txt), ndmin=ndim) + + assert_array_equal(a, b) + class TestPathUsage: # Test that pathlib.Path can be used diff --git a/numpy/lib/twodim_base.pyi b/numpy/lib/twodim_base.pyi index cba503ca3..76d7e5a9d 100644 --- a/numpy/lib/twodim_base.pyi +++ b/numpy/lib/twodim_base.pyi @@ -1,11 +1,7 @@ +from collections.abc import Callable, Sequence from typing import ( Any, - Callable, - List, - Sequence, overload, - Tuple, - Type, TypeVar, Union, ) @@ -51,13 +47,13 @@ _MaskFunc = Callable[ ] _DTypeLike = Union[ - Type[_SCT], + type[_SCT], dtype[_SCT], _SupportsDType[dtype[_SCT]], ] _ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] -__all__: List[str] +__all__: list[str] @overload def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... @@ -182,7 +178,7 @@ def histogram2d( # type: ignore[misc] normed: None | bool = ..., weights: None | _ArrayLikeFloat_co = ..., density: None | bool = ..., -) -> Tuple[ +) -> tuple[ NDArray[float64], NDArray[floating[Any]], NDArray[floating[Any]], @@ -196,7 +192,7 @@ def histogram2d( normed: None | bool = ..., weights: None | _ArrayLikeFloat_co = ..., density: None | bool = ..., -) -> Tuple[ +) -> tuple[ NDArray[float64], NDArray[complexfloating[Any, Any]], NDArray[complexfloating[Any, Any]], @@ -210,7 +206,7 @@ def histogram2d( normed: None | bool = ..., weights: None | _ArrayLikeFloat_co = ..., density: None | bool = ..., -) -> Tuple[ +) -> tuple[ NDArray[float64], NDArray[Any], NDArray[Any], @@ -224,32 +220,32 @@ def mask_indices( n: int, mask_func: _MaskFunc[int], k: int = ..., -) -> Tuple[NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[intp], NDArray[intp]]: ... @overload def mask_indices( n: int, mask_func: _MaskFunc[_T], k: _T, -) -> Tuple[NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[intp], NDArray[intp]]: ... def tril_indices( n: int, k: int = ..., m: None | int = ..., -) -> Tuple[NDArray[int_], NDArray[int_]]: ... +) -> tuple[NDArray[int_], NDArray[int_]]: ... def tril_indices_from( arr: NDArray[Any], k: int = ..., -) -> Tuple[NDArray[int_], NDArray[int_]]: ... +) -> tuple[NDArray[int_], NDArray[int_]]: ... def triu_indices( n: int, k: int = ..., m: None | int = ..., -) -> Tuple[NDArray[int_], NDArray[int_]]: ... +) -> tuple[NDArray[int_], NDArray[int_]]: ... def triu_indices_from( arr: NDArray[Any], k: int = ..., -) -> Tuple[NDArray[int_], NDArray[int_]]: ... +) -> tuple[NDArray[int_], NDArray[int_]]: ... diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py index 56afd83ce..94d525f51 100644 --- a/numpy/lib/type_check.py +++ b/numpy/lib/type_check.py @@ -6,7 +6,7 @@ import warnings __all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', 'isreal', 'nan_to_num', 'real', 'real_if_close', - 'typename', 'asfarray', 'mintypecode', 'asscalar', + 'typename', 'asfarray', 'mintypecode', 'common_type'] import numpy.core.numeric as _nx @@ -276,22 +276,22 @@ def isreal(x): >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) >>> np.isreal(a) array([False, True, True, True, True, False]) - + The function does not work on string arrays. >>> a = np.array([2j, "a"], dtype="U") >>> np.isreal(a) # Warns about non-elementwise comparison False - + Returns True for all elements in input array of ``dtype=object`` even if any of the elements is complex. >>> a = np.array([1, "2", 3+4j], dtype=object) >>> np.isreal(a) array([ True, True, True]) - + isreal should not be used with object arrays - + >>> a = np.array([1+2j, 2+1j], dtype=object) >>> np.isreal(a) array([ True, True]) @@ -405,14 +405,14 @@ def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None): def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): """ Replace NaN with zero and infinity with large finite numbers (default - behaviour) or with the numbers defined by the user using the `nan`, + behaviour) or with the numbers defined by the user using the `nan`, `posinf` and/or `neginf` keywords. If `x` is inexact, NaN is replaced by zero or by the user defined value in - `nan` keyword, infinity is replaced by the largest finite floating point - values representable by ``x.dtype`` or by the user defined value in - `posinf` keyword and -infinity is replaced by the most negative finite - floating point values representable by ``x.dtype`` or by the user defined + `nan` keyword, infinity is replaced by the largest finite floating point + values representable by ``x.dtype`` or by the user defined value in + `posinf` keyword and -infinity is replaced by the most negative finite + floating point values representable by ``x.dtype`` or by the user defined value in `neginf` keyword. For complex dtypes, the above is applied to each of the real and @@ -429,27 +429,27 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. - + .. versionadded:: 1.13 nan : int, float, optional - Value to be used to fill NaN values. If no value is passed + Value to be used to fill NaN values. If no value is passed then NaN values will be replaced with 0.0. - + .. versionadded:: 1.17 posinf : int, float, optional - Value to be used to fill positive infinity values. If no value is + Value to be used to fill positive infinity values. If no value is passed then positive infinity values will be replaced with a very large number. - + .. versionadded:: 1.17 neginf : int, float, optional - Value to be used to fill negative infinity values. If no value is + Value to be used to fill negative infinity values. If no value is passed then negative infinity values will be replaced with a very small (or negative) number. - + .. versionadded:: 1.17 - + Returns ------- @@ -483,7 +483,7 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary -1.28000000e+002, 1.28000000e+002]) >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) - array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, 1.2800000e+02]) >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary @@ -529,7 +529,7 @@ def _real_if_close_dispatcher(a, tol=None): @array_function_dispatch(_real_if_close_dispatcher) def real_if_close(a, tol=100): """ - If input is complex with all imaginary parts close to zero, return + If input is complex with all imaginary parts close to zero, return real parts. "Close to zero" is defined as `tol` * (machine epsilon of the type for @@ -583,40 +583,6 @@ def real_if_close(a, tol=100): return a -def _asscalar_dispatcher(a): - # 2018-10-10, 1.16 - warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use ' - 'a.item() instead', DeprecationWarning, stacklevel=3) - return (a,) - - -@array_function_dispatch(_asscalar_dispatcher) -def asscalar(a): - """ - Convert an array of size 1 to its scalar equivalent. - - .. deprecated:: 1.16 - - Deprecated, use `numpy.ndarray.item()` instead. - - Parameters - ---------- - a : ndarray - Input array of size 1. - - Returns - ------- - out : scalar - Scalar representation of `a`. The output data type is the same type - returned by the input's `item` method. - - Examples - -------- - >>> np.asscalar(np.array([24])) - 24 - """ - return a.item() - #----------------------------------------------------------------------------- _namefromtype = {'S1': 'character', diff --git a/numpy/lib/type_check.pyi b/numpy/lib/type_check.pyi index 0a55dbf21..72eb777be 100644 --- a/numpy/lib/type_check.pyi +++ b/numpy/lib/type_check.pyi @@ -1,11 +1,8 @@ +from collections.abc import Container, Iterable from typing import ( Literal as L, Any, - Container, - Iterable, - List, overload, - Type, TypeVar, Protocol, ) @@ -49,7 +46,7 @@ class _SupportsImag(Protocol[_T_co]): @property def imag(self) -> _T_co: ... -__all__: List[str] +__all__: list[str] def mintypecode( typechars: Iterable[str | ArrayLike], @@ -62,7 +59,7 @@ def mintypecode( @overload def asfarray( a: object, - dtype: None | Type[float] = ..., + dtype: None | type[float] = ..., ) -> NDArray[float64]: ... @overload def asfarray( # type: ignore[misc] @@ -151,9 +148,6 @@ def real_if_close( tol: float = ..., ) -> NDArray[Any]: ... -# NOTE: deprecated -# def asscalar(a): ... - @overload def typename(char: L['S1']) -> L['character']: ... @overload @@ -204,28 +198,28 @@ def common_type( # type: ignore[misc] *arrays: _SupportsDType[dtype[ integer[Any] ]] -) -> Type[floating[_64Bit]]: ... +) -> type[floating[_64Bit]]: ... @overload def common_type( # type: ignore[misc] *arrays: _SupportsDType[dtype[ floating[_NBit1] ]] -) -> Type[floating[_NBit1]]: ... +) -> type[floating[_NBit1]]: ... @overload def common_type( # type: ignore[misc] *arrays: _SupportsDType[dtype[ integer[Any] | floating[_NBit1] ]] -) -> Type[floating[_NBit1 | _64Bit]]: ... +) -> type[floating[_NBit1 | _64Bit]]: ... @overload def common_type( # type: ignore[misc] *arrays: _SupportsDType[dtype[ floating[_NBit1] | complexfloating[_NBit2, _NBit2] ]] -) -> Type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]: ... +) -> type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]: ... @overload def common_type( *arrays: _SupportsDType[dtype[ integer[Any] | floating[_NBit1] | complexfloating[_NBit2, _NBit2] ]] -) -> Type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]: ... +) -> type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]: ... diff --git a/numpy/lib/ufunclike.pyi b/numpy/lib/ufunclike.pyi index 03f08ebff..9a6bbc825 100644 --- a/numpy/lib/ufunclike.pyi +++ b/numpy/lib/ufunclike.pyi @@ -1,4 +1,4 @@ -from typing import Any, overload, TypeVar, List, Union +from typing import Any, overload, TypeVar from numpy import floating, bool_, object_, ndarray from numpy.typing import ( @@ -10,7 +10,7 @@ from numpy.typing import ( _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) -__all__: List[str] +__all__: list[str] @overload def fix( # type: ignore[misc] @@ -29,7 +29,7 @@ def fix( ) -> NDArray[object_]: ... @overload def fix( - x: Union[_ArrayLikeFloat_co, _ArrayLikeObject_co], + x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: _ArrayType, ) -> _ArrayType: ... diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 1df2ab09b..c74ee127d 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -429,7 +429,7 @@ def _makenamedict(module='numpy'): return thedict, dictlist -def _info(obj, output=sys.stdout): +def _info(obj, output=None): """Provide information about ndarray obj. Parameters @@ -455,6 +455,9 @@ def _info(obj, output=sys.stdout): strides = obj.strides endian = obj.dtype.byteorder + if output is None: + output = sys.stdout + print("class: ", nm, file=output) print("shape: ", obj.shape, file=output) print("strides: ", strides, file=output) @@ -481,7 +484,7 @@ def _info(obj, output=sys.stdout): @set_module('numpy') -def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): +def info(object=None, maxwidth=76, output=None, toplevel='numpy'): """ Get help information for a function, class, or module. @@ -496,7 +499,8 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): Printing width. output : file like object, optional File like object that the output is written to, default is - ``stdout``. The object has to be opened in 'w' or 'a' mode. + ``None``, in which case ``sys.stdout`` will be used. + The object has to be opened in 'w' or 'a' mode. toplevel : str, optional Start search at this level. @@ -541,6 +545,9 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): elif hasattr(object, '_ppimport_attr'): object = object._ppimport_attr + if output is None: + output = sys.stdout + if object is None: info(info) elif isinstance(object, ndarray): diff --git a/numpy/lib/utils.pyi b/numpy/lib/utils.pyi index f0a8797ad..407ce1120 100644 --- a/numpy/lib/utils.pyi +++ b/numpy/lib/utils.pyi @@ -1,15 +1,9 @@ from ast import AST +from collections.abc import Callable, Mapping, Sequence from typing import ( Any, - Callable, - List, - Mapping, - Optional, overload, - Sequence, - Tuple, TypeVar, - Union, Protocol, ) @@ -28,17 +22,17 @@ _FuncType = TypeVar("_FuncType", bound=Callable[..., Any]) class _SupportsWrite(Protocol[_T_contra]): def write(self, s: _T_contra, /) -> Any: ... -__all__: List[str] +__all__: list[str] class _Deprecate: - old_name: Optional[str] - new_name: Optional[str] - message: Optional[str] + old_name: None | str + new_name: None | str + message: None | str def __init__( self, - old_name: Optional[str] = ..., - new_name: Optional[str] = ..., - message: Optional[str] = ..., + old_name: None | str = ..., + new_name: None | str = ..., + message: None | str = ..., ) -> None: ... # NOTE: `__call__` can in principle take arbitrary `*args` and `**kwargs`, # even though they aren't used for anything @@ -49,47 +43,47 @@ def get_include() -> str: ... @overload def deprecate( *, - old_name: Optional[str] = ..., - new_name: Optional[str] = ..., - message: Optional[str] = ..., + old_name: None | str = ..., + new_name: None | str = ..., + message: None | str = ..., ) -> _Deprecate: ... @overload def deprecate( func: _FuncType, /, - old_name: Optional[str] = ..., - new_name: Optional[str] = ..., - message: Optional[str] = ..., + old_name: None | str = ..., + new_name: None | str = ..., + message: None | str = ..., ) -> _FuncType: ... -def deprecate_with_doc(msg: Optional[str]) -> _Deprecate: ... +def deprecate_with_doc(msg: None | str) -> _Deprecate: ... # NOTE: In practice `byte_bounds` can (potentially) take any object # implementing the `__array_interface__` protocol. The caveat is # that certain keys, marked as optional in the spec, must be present for # `byte_bounds`. This concerns `"strides"` and `"data"`. -def byte_bounds(a: Union[generic, ndarray[Any, Any]]) -> Tuple[int, int]: ... +def byte_bounds(a: generic | ndarray[Any, Any]) -> tuple[int, int]: ... -def who(vardict: Optional[Mapping[str, ndarray[Any, Any]]] = ...) -> None: ... +def who(vardict: None | Mapping[str, ndarray[Any, Any]] = ...) -> None: ... def info( object: object = ..., maxwidth: int = ..., - output: Optional[_SupportsWrite[str]] = ..., + output: None | _SupportsWrite[str] = ..., toplevel: str = ..., ) -> None: ... def source( object: object, - output: Optional[_SupportsWrite[str]] = ..., + output: None | _SupportsWrite[str] = ..., ) -> None: ... def lookfor( what: str, - module: Union[None, str, Sequence[str]] = ..., + module: None | str | Sequence[str] = ..., import_modules: bool = ..., regenerate: bool = ..., - output: Optional[_SupportsWrite[str]] =..., + output: None | _SupportsWrite[str] =..., ) -> None: ... -def safe_eval(source: Union[str, AST]) -> Any: ... +def safe_eval(source: str | AST) -> Any: ... diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index d457f153a..d9acd5581 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1,5 +1,3 @@ -from typing import Any, List - from numpy.linalg.linalg import ( matrix_power as matrix_power, solve as solve, @@ -25,8 +23,8 @@ from numpy.linalg.linalg import ( from numpy._pytesttester import PytestTester -__all__: List[str] -__path__: List[str] +__all__: list[str] +__path__: list[str] test: PytestTester class LinAlgError(Exception): ... diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 0c27e0631..d831886c0 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -299,7 +299,13 @@ def tensorsolve(a, b, axes=None): for k in oldshape: prod *= k - a = a.reshape(-1, prod) + if a.size != prod ** 2: + raise LinAlgError( + "Input arrays must satisfy the requirement \ + prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])" + ) + + a = a.reshape(prod, prod) b = b.ravel() res = wrap(solve(a, b)) res.shape = oldshape diff --git a/numpy/linalg/linalg.pyi b/numpy/linalg/linalg.pyi index a60b9539e..ae0d4fe12 100644 --- a/numpy/linalg/linalg.pyi +++ b/numpy/linalg/linalg.pyi @@ -1,13 +1,11 @@ +from collections.abc import Iterable from typing import ( Literal as L, - List, - Iterable, overload, TypeVar, Any, SupportsIndex, SupportsInt, - Tuple, ) from numpy import ( @@ -34,10 +32,10 @@ from numpy.typing import ( _T = TypeVar("_T") _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -_2Tuple = Tuple[_T, _T] +_2Tuple = tuple[_T, _T] _ModeKind = L["reduced", "complete", "r", "raw"] -__all__: List[str] +__all__: list[str] @overload def tensorsolve( @@ -141,17 +139,17 @@ def eig(a: _ArrayLikeComplex_co) -> _2Tuple[NDArray[complexfloating[Any, Any]]]: def eigh( a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ..., -) -> Tuple[NDArray[float64], NDArray[float64]]: ... +) -> tuple[NDArray[float64], NDArray[float64]]: ... @overload def eigh( a: _ArrayLikeFloat_co, UPLO: L["L", "U", "l", "u"] = ..., -) -> Tuple[NDArray[floating[Any]], NDArray[floating[Any]]]: ... +) -> tuple[NDArray[floating[Any]], NDArray[floating[Any]]]: ... @overload def eigh( a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ..., -) -> Tuple[NDArray[floating[Any]], NDArray[complexfloating[Any, Any]]]: ... +) -> tuple[NDArray[floating[Any]], NDArray[complexfloating[Any, Any]]]: ... @overload def svd( @@ -159,7 +157,7 @@ def svd( full_matrices: bool = ..., compute_uv: L[True] = ..., hermitian: bool = ..., -) -> Tuple[ +) -> tuple[ NDArray[float64], NDArray[float64], NDArray[float64], @@ -170,7 +168,7 @@ def svd( full_matrices: bool = ..., compute_uv: L[True] = ..., hermitian: bool = ..., -) -> Tuple[ +) -> tuple[ NDArray[floating[Any]], NDArray[floating[Any]], NDArray[floating[Any]], @@ -181,7 +179,7 @@ def svd( full_matrices: bool = ..., compute_uv: L[True] = ..., hermitian: bool = ..., -) -> Tuple[ +) -> tuple[ NDArray[complexfloating[Any, Any]], NDArray[floating[Any]], NDArray[complexfloating[Any, Any]], @@ -240,21 +238,21 @@ def slogdet(a: _ArrayLikeComplex_co) -> _2Tuple[Any]: ... def det(a: _ArrayLikeComplex_co) -> Any: ... @overload -def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: None | float = ...) -> Tuple[ +def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: None | float = ...) -> tuple[ NDArray[float64], NDArray[float64], int32, NDArray[float64], ]: ... @overload -def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: None | float = ...) -> Tuple[ +def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: None | float = ...) -> tuple[ NDArray[floating[Any]], NDArray[floating[Any]], int32, NDArray[floating[Any]], ]: ... @overload -def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: None | float = ...) -> Tuple[ +def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: None | float = ...) -> tuple[ NDArray[complexfloating[Any, Any]], NDArray[floating[Any]], int32, @@ -272,7 +270,7 @@ def norm( def norm( x: ArrayLike, ord: None | float | L["fro", "nuc"] = ..., - axis: SupportsInt | SupportsIndex | Tuple[int, ...] = ..., + axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., keepdims: bool = ..., ) -> Any: ... diff --git a/numpy/linalg/tests/test_build.py b/numpy/linalg/tests/test_build.py deleted file mode 100644 index 868341ff2..000000000 --- a/numpy/linalg/tests/test_build.py +++ /dev/null @@ -1,53 +0,0 @@ -from subprocess import PIPE, Popen -import sys -import re -import pytest - -from numpy.linalg import lapack_lite -from numpy.testing import assert_ - - -class FindDependenciesLdd: - - def __init__(self): - self.cmd = ['ldd'] - - try: - p = Popen(self.cmd, stdout=PIPE, stderr=PIPE) - stdout, stderr = p.communicate() - except OSError as e: - raise RuntimeError(f'command {self.cmd} cannot be run') from e - - def get_dependencies(self, lfile): - p = Popen(self.cmd + [lfile], stdout=PIPE, stderr=PIPE) - stdout, stderr = p.communicate() - if not (p.returncode == 0): - raise RuntimeError(f'failed dependencies check for {lfile}') - - return stdout - - def grep_dependencies(self, lfile, deps): - stdout = self.get_dependencies(lfile) - - rdeps = dict([(dep, re.compile(dep)) for dep in deps]) - founds = [] - for l in stdout.splitlines(): - for k, v in rdeps.items(): - if v.search(l): - founds.append(k) - - return founds - - -class TestF77Mismatch: - - @pytest.mark.skipif(not(sys.platform[:5] == 'linux'), - reason="no fortran compiler on non-Linux platform") - def test_lapack(self): - f = FindDependenciesLdd() - deps = f.grep_dependencies(lapack_lite.__file__, - [b'libg2c', b'libgfortran']) - assert_(len(deps) <= 1, - """Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to -cause random crashes and wrong results. See numpy INSTALL.txt for more -information.""") diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index c1ba84a8e..5f9f3b920 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -467,6 +467,11 @@ class TestSolve(SolveCases): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) assert_equal(linalg.solve(x, x).dtype, dtype) + @pytest.mark.xfail(sys.platform == 'cygwin', + reason="Consistently fails on CI.") + def test_sq_cases(self): + super().test_sq_cases() + def test_0_size(self): class ArraySubclass(np.ndarray): pass @@ -534,6 +539,11 @@ class TestInv(InvCases): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) assert_equal(linalg.inv(x).dtype, dtype) + @pytest.mark.xfail(sys.platform == 'cygwin', + reason="Consistently fails on CI.") + def test_sq_cases(self): + super().test_sq_cases() + def test_0_size(self): # Check that all kinds of 0-sized arrays work class ArraySubclass(np.ndarray): @@ -1773,29 +1783,34 @@ class TestQR: class TestCholesky: # TODO: are there no other tests for cholesky? - def test_basic_property(self): + @pytest.mark.xfail( + sys.platform == 'cygwin', reason="Consistently fails in CI" + ) + @pytest.mark.parametrize( + 'shape', [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] + ) + @pytest.mark.parametrize( + 'dtype', (np.float32, np.float64, np.complex64, np.complex128) + ) + def test_basic_property(self, shape, dtype): # Check A = L L^H - shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] - dtypes = (np.float32, np.float64, np.complex64, np.complex128) - - for shape, dtype in itertools.product(shapes, dtypes): - np.random.seed(1) - a = np.random.randn(*shape) - if np.issubdtype(dtype, np.complexfloating): - a = a + 1j*np.random.randn(*shape) + np.random.seed(1) + a = np.random.randn(*shape) + if np.issubdtype(dtype, np.complexfloating): + a = a + 1j*np.random.randn(*shape) - t = list(range(len(shape))) - t[-2:] = -1, -2 + t = list(range(len(shape))) + t[-2:] = -1, -2 - a = np.matmul(a.transpose(t).conj(), a) - a = np.asarray(a, dtype=dtype) + a = np.matmul(a.transpose(t).conj(), a) + a = np.asarray(a, dtype=dtype) - c = np.linalg.cholesky(a) + c = np.linalg.cholesky(a) - b = np.matmul(c, c.transpose(t).conj()) - assert_allclose(b, a, - err_msg=f'{shape} {dtype}\n{a}\n{c}', - atol=500 * a.shape[0] * np.finfo(dtype).eps) + b = np.matmul(c, c.transpose(t).conj()) + assert_allclose(b, a, + err_msg=f'{shape} {dtype}\n{a}\n{c}', + atol=500 * a.shape[0] * np.finfo(dtype).eps) def test_0_size(self): class ArraySubclass(np.ndarray): @@ -2103,6 +2118,29 @@ class TestTensorinv: assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) +class TestTensorsolve: + + @pytest.mark.parametrize("a, axes", [ + (np.ones((4, 6, 8, 2)), None), + (np.ones((3, 3, 2)), (0, 2)), + ]) + def test_non_square_handling(self, a, axes): + with assert_raises(LinAlgError): + b = np.ones(a.shape[:2]) + linalg.tensorsolve(a, b, axes=axes) + + @pytest.mark.xfail(sys.platform == 'cygwin', + reason="Consistently fails on CI") + @pytest.mark.parametrize("shape", + [(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)], + ) + def test_tensorsolve_result(self, shape): + a = np.random.randn(*shape) + b = np.ones(a.shape[:2]) + x = np.linalg.tensorsolve(a, b) + assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b) + + def test_unsupported_commontype(): # linalg gracefully handles unsupported type arr = np.array([[1, -2], [2, 5]], dtype='float16') diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src index ff63ea218..f8a154445 100644 --- a/numpy/linalg/umath_linalg.c.src +++ b/numpy/linalg/umath_linalg.c.src @@ -4396,7 +4396,7 @@ static struct PyModuleDef moduledef = { NULL }; -PyObject *PyInit__umath_linalg(void) +PyMODINIT_FUNC PyInit__umath_linalg(void) { PyObject *m; PyObject *d; diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi index 26d44b508..04368b6c4 100644 --- a/numpy/ma/__init__.pyi +++ b/numpy/ma/__init__.pyi @@ -1,5 +1,3 @@ -from typing import Any, List - from numpy._pytesttester import PytestTester from numpy.ma import extras as extras @@ -231,6 +229,6 @@ from numpy.ma.extras import ( vstack as vstack, ) -__all__: List[str] -__path__: List[str] +__all__: list[str] +__path__: list[str] test: PytestTester diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 491c2c605..e0e5403a9 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -4811,7 +4811,6 @@ class MaskedArray(ndarray): WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False - UPDATEIFCOPY : False """ return self.flags['CONTIGUOUS'] @@ -5666,9 +5665,12 @@ class MaskedArray(ndarray): Parameters ---------- - axis : {None, int}, optional + axis : None or int or tuple of ints, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. + .. versionadded:: 1.7.0 + If this is a tuple of ints, the minimum is selected over multiple + axes, instead of a single axis or all the axes as before. out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. @@ -5800,9 +5802,12 @@ class MaskedArray(ndarray): Parameters ---------- - axis : {None, int}, optional + axis : None or int or tuple of ints, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. + .. versionadded:: 1.7.0 + If this is a tuple of ints, the maximum is selected over multiple + axes, instead of a single axis or all the axes as before. out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index bc1f45a8d..ffdb21983 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,4 +1,5 @@ -from typing import Any, List, TypeVar, Callable +from collections.abc import Callable +from typing import Any, TypeVar from numpy import ndarray, dtype, float64 from numpy import ( @@ -23,7 +24,7 @@ from numpy.lib.function_base import ( _ShapeType = TypeVar("_ShapeType", bound=Any) _DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) -__all__: List[str] +__all__: list[str] MaskType = bool_ nomask: bool_ diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 38bf1f0e8..56da5a7e5 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -592,7 +592,7 @@ def average(a, axis=None, weights=None, returned=False): avg = a.mean(axis) scl = avg.dtype.type(a.count(axis)) else: - wgt = np.asanyarray(weights) + wgt = asarray(weights) if issubclass(a.dtype.type, (np.integer, np.bool_)): result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') @@ -618,6 +618,7 @@ def average(a, axis=None, weights=None, returned=False): if m is not nomask: wgt = wgt*(~a.mask) + wgt.mask |= a.mask scl = wgt.sum(axis=axis, dtype=result_dtype) avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index e58e43bad..e66d7cb63 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import Any from numpy.lib.index_tricks import AxisConcatenator from numpy.ma.core import ( @@ -6,7 +6,7 @@ from numpy.ma.core import ( mask_rowcols as mask_rowcols, ) -__all__: List[str] +__all__: list[str] def count_masked(arr, axis=...): ... def masked_all(shape, dtype = ...): ... diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 7bd8678cf..264807e05 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -1,9 +1,9 @@ -from typing import List, Any, TypeVar +from typing import Any, TypeVar from numpy import dtype from numpy.ma import MaskedArray -__all__: List[str] +__all__: list[str] # TODO: Set the `bound` to something more suitable once we # have proper shape support @@ -84,7 +84,7 @@ def fromtextfile( varnames=..., vartypes=..., # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 - # delimitor=..., + # delimitor=..., ): ... def addfield(mrecord, newfield, newfieldname=...): ... diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index bf95c999a..c8f7f4269 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -3422,6 +3422,10 @@ class TestMaskedArrayMethods: assert_equal(sortedx._data, [1, 2, -2, -1, 0]) assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) + x = array([0, -1], dtype=np.int8) + sortedx = sort(x, kind="stable") + assert_equal(sortedx, array([-1, 0], dtype=np.int8)) + def test_stable_sort(self): x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8) expected = array([0, 3, 1, 4, 2, 5]) diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index e735b9bc7..d30dfd92f 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -309,6 +309,31 @@ class TestAverage: expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678]) assert_almost_equal(avg_masked, expected_masked) + # weights should be masked if needed + # depending on the array mask. This is to avoid summing + # masked nan or other values that are not cancelled by a zero + a = np.ma.array([1.0, 2.0, 3.0, 4.0], + mask=[False, False, True, True]) + avg_unmasked = average(a, weights=[1, 1, 1, np.nan]) + + assert_almost_equal(avg_unmasked, 1.5) + + a = np.ma.array([ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 1.0, 2.0, 3.0], + ], mask=[ + [False, True, True, False], + [True, False, True, True], + [True, False, True, False], + ]) + + avg_masked = np.ma.average(a, weights=[1, np.nan, 1], axis=0) + avg_expected = np.ma.array([1.0, np.nan, np.nan, 3.5], + mask=[False, True, True, False]) + + assert_almost_equal(avg_masked, avg_expected) + assert_equal(avg_masked.mask, avg_expected.mask) class TestConcatenator: # Tests for mr_, the equivalent of r_ for masked arrays. diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py index 83a9b2f51..3491cef7f 100644 --- a/numpy/ma/tests/test_subclassing.py +++ b/numpy/ma/tests/test_subclassing.py @@ -28,8 +28,7 @@ class SubArray(np.ndarray): return x def __array_finalize__(self, obj): - if callable(getattr(super(), '__array_finalize__', None)): - super().__array_finalize__(obj) + super().__array_finalize__(obj) self.info = getattr(obj, 'info', {}).copy() return @@ -315,7 +314,7 @@ class TestSubclassing: assert_startswith(repr(mx), 'masked_array') xsub = SubArray(x) mxsub = masked_array(xsub, mask=[True, False, True, False, False]) - assert_startswith(repr(mxsub), + assert_startswith(repr(mxsub), f'masked_{SubArray.__name__}(data=[--, 1, --, 3, 4]') def test_subclass_str(self): diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi index c1b82d2ec..b0ca8c9ca 100644 --- a/numpy/matrixlib/__init__.pyi +++ b/numpy/matrixlib/__init__.pyi @@ -1,5 +1,3 @@ -from typing import List - from numpy._pytesttester import PytestTester from numpy import ( @@ -12,6 +10,6 @@ from numpy.matrixlib.defmatrix import ( asmatrix as asmatrix, ) -__all__: List[str] -__path__: List[str] +__all__: list[str] +__path__: list[str] test: PytestTester diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 6c86ea1ef..8358bb111 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -1,8 +1,9 @@ -from typing import List, Any, Sequence, Mapping +from collections.abc import Sequence, Mapping +from typing import Any from numpy import matrix as matrix from numpy.typing import ArrayLike, DTypeLike, NDArray -__all__: List[str] +__all__: list[str] def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index e0cfedd7a..c9d1c27a9 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -1,5 +1,3 @@ -from typing import List - from numpy._pytesttester import PytestTester from numpy.polynomial import ( @@ -17,8 +15,8 @@ from numpy.polynomial.laguerre import Laguerre as Laguerre from numpy.polynomial.legendre import Legendre as Legendre from numpy.polynomial.polynomial import Polynomial as Polynomial -__all__: List[str] -__path__: List[str] +__all__: list[str] +__path__: list[str] test: PytestTester def set_default_printstyle(style): ... diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index c41601469..537221c45 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,7 +1,7 @@ import abc -from typing import Any, List, ClassVar +from typing import Any, ClassVar -__all__: List[str] +__all__: list[str] class ABCPolyBase(abc.ABC): __hash__: ClassVar[None] # type: ignore[assignment] diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 841c0859b..e8113dbae 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,10 +1,10 @@ -from typing import Any, List +from typing import Any from numpy import ndarray, dtype, int_ from numpy.polynomial._polybase import ABCPolyBase from numpy.polynomial.polyutils import trimcoef -__all__: List[str] +__all__: list[str] chebtrim = trimcoef diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 8364a5b0f..0d3556d69 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import Any from numpy import ndarray, dtype, int_, float_ from numpy.polynomial._polybase import ABCPolyBase diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index c029bfda7..0b7152a25 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import Any from numpy import ndarray, dtype, int_ from numpy.polynomial._polybase import ABCPolyBase diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index 2b9ab34e0..e546bc20a 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import Any from numpy import ndarray, dtype, int_ from numpy.polynomial._polybase import ABCPolyBase diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 86aef1793..63a1c3f3a 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import Any from numpy import ndarray, dtype, int_ from numpy.polynomial._polybase import ABCPolyBase diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index f779300a9..3c87f9d29 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import Any from numpy import ndarray, dtype, int_ from numpy.polynomial._polybase import ABCPolyBase diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 52c9cfc4a..06260a9f1 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,6 +1,4 @@ -from typing import List - -__all__: List[str] +__all__: list[str] class RankWarning(UserWarning): ... diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi index bf6147697..32bd64a0b 100644 --- a/numpy/random/__init__.pyi +++ b/numpy/random/__init__.pyi @@ -1,5 +1,3 @@ -from typing import List - from numpy._pytesttester import PytestTester from numpy.random._generator import Generator as Generator @@ -67,6 +65,6 @@ from numpy.random.mtrand import ( zipf as zipf, ) -__all__: List[str] -__path__: List[str] +__all__: list[str] +__path__: list[str] test: PytestTester diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index ad43f2812..864150458 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -5,6 +5,7 @@ from cpython cimport PyFloat_AsDouble import sys import numpy as np cimport numpy as np +cimport numpy.math as npmath from libc.stdint cimport uintptr_t @@ -171,8 +172,23 @@ cdef object prepare_ctypes(bitgen_t *bitgen): return _ctypes cdef double kahan_sum(double *darr, np.npy_intp n): + """ + Parameters + ---------- + darr : reference to double array + Address of values to sum + n : intp + Length of d + + Returns + ------- + float + The sum. 0.0 if n <= 0. + """ cdef double c, y, t, sum cdef np.npy_intp i + if n <= 0: + return 0.0 sum = darr[0] c = 0.0 for i in range(1, n): @@ -343,6 +359,24 @@ cdef object float_fill_from_double(void *func, bitgen_t *state, object size, obj out_array_data[i] = <float>random_func(state) return out_array +cdef int _check_array_cons_bounded_0_1(np.ndarray val, object name) except -1: + cdef double *val_data + cdef np.npy_intp i + cdef bint err = 0 + + if not np.PyArray_ISONESEGMENT(val) or np.PyArray_TYPE(val) != np.NPY_DOUBLE: + # slow path for non-contiguous arrays or any non-double dtypes + err = not np.all(np.greater_equal(val, 0)) or not np.all(np.less_equal(val, 1)) + else: + val_data = <double *>np.PyArray_DATA(val) + for i in range(np.PyArray_SIZE(val)): + err = (not (val_data[i] >= 0)) or (not val_data[i] <= 1) + if err: + break + if err: + raise ValueError(f"{name} < 0, {name} > 1 or {name} contains NaNs") + + return 0 cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1: if cons == CONS_NON_NEGATIVE: @@ -354,9 +388,7 @@ cdef int check_array_constraint(np.ndarray val, object name, constraint_type con elif np.any(np.less_equal(val, 0)): raise ValueError(name + " <= 0") elif cons == CONS_BOUNDED_0_1: - if not np.all(np.greater_equal(val, 0)) or \ - not np.all(np.less_equal(val, 1)): - raise ValueError("{0} < 0, {0} > 1 or {0} contains NaNs".format(name)) + return _check_array_cons_bounded_0_1(val, name) elif cons == CONS_BOUNDED_GT_0_1: if not np.all(np.greater(val, 0)) or not np.all(np.less_equal(val, 1)): raise ValueError("{0} <= 0, {0} > 1 or {0} contains NaNs".format(name)) @@ -383,10 +415,10 @@ cdef int check_array_constraint(np.ndarray val, object name, constraint_type con cdef int check_constraint(double val, object name, constraint_type cons) except -1: cdef bint is_nan if cons == CONS_NON_NEGATIVE: - if not np.isnan(val) and np.signbit(val): + if not npmath.isnan(val) and npmath.signbit(val): raise ValueError(name + " < 0") elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN: - if cons == CONS_POSITIVE_NOT_NAN and np.isnan(val): + if cons == CONS_POSITIVE_NOT_NAN and npmath.isnan(val): raise ValueError(name + " must not be NaN") elif val <= 0: raise ValueError(name + " <= 0") diff --git a/numpy/random/_examples/cython/setup.py b/numpy/random/_examples/cython/setup.py index 7e0dd3e05..f41150fdb 100644 --- a/numpy/random/_examples/cython/setup.py +++ b/numpy/random/_examples/cython/setup.py @@ -4,6 +4,7 @@ Build the Cython demonstrations of low-level access to NumPy random Usage: python setup.py build_ext -i """ +import setuptools # triggers monkeypatching distutils from distutils.core import setup from os.path import dirname, join, abspath diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index c574bef9a..bac25ffd5 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,4 +1,5 @@ -from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload, TypeVar, Literal +from collections.abc import Callable +from typing import Any, Union, overload, TypeVar, Literal from numpy import ( bool_, @@ -48,7 +49,7 @@ _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) _DTypeLikeFloat32 = Union[ dtype[float32], _SupportsDType[dtype[float32]], - Type[float32], + type[float32], _Float32Codes, _SingleCodes, ] @@ -56,8 +57,8 @@ _DTypeLikeFloat32 = Union[ _DTypeLikeFloat64 = Union[ dtype[float64], _SupportsDType[dtype[float64]], - Type[float], - Type[float64], + type[float], + type[float64], _Float64Codes, _DoubleCodes, ] @@ -66,9 +67,9 @@ class Generator: def __init__(self, bit_generator: BitGenerator) -> None: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... - def __getstate__(self) -> Dict[str, Any]: ... - def __setstate__(self, state: Dict[str, Any]) -> None: ... - def __reduce__(self) -> Tuple[Callable[[str], Generator], Tuple[str], Dict[str, Any]]: ... + def __getstate__(self) -> dict[str, Any]: ... + def __setstate__(self, state: dict[str, Any]) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ... @property def bit_generator(self) -> BitGenerator: ... def bytes(self, length: int) -> bytes: ... @@ -76,7 +77,7 @@ class Generator: def standard_normal( # type: ignore[misc] self, size: None = ..., - dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., out: None = ..., ) -> float: ... @overload @@ -95,14 +96,14 @@ class Generator: self, size: _ShapeLike = ..., dtype: _DTypeLikeFloat32 = ..., - out: Optional[ndarray[Any, dtype[float32]]] = ..., + out: None | ndarray[Any, dtype[float32]] = ..., ) -> ndarray[Any, dtype[float32]]: ... @overload def standard_normal( # type: ignore[misc] self, size: _ShapeLike = ..., dtype: _DTypeLikeFloat64 = ..., - out: Optional[ndarray[Any, dtype[float64]]] = ..., + out: None | ndarray[Any, dtype[float64]] = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]: ... @@ -112,7 +113,7 @@ class Generator: def standard_exponential( # type: ignore[misc] self, size: None = ..., - dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., method: Literal["zig", "inv"] = ..., out: None = ..., ) -> float: ... @@ -133,7 +134,7 @@ class Generator: size: _ShapeLike = ..., *, method: Literal["zig", "inv"] = ..., - out: Optional[ndarray[Any, dtype[float64]]] = ..., + out: None | ndarray[Any, dtype[float64]] = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def standard_exponential( @@ -141,7 +142,7 @@ class Generator: size: _ShapeLike = ..., dtype: _DTypeLikeFloat32 = ..., method: Literal["zig", "inv"] = ..., - out: Optional[ndarray[Any, dtype[float32]]] = ..., + out: None | ndarray[Any, dtype[float32]] = ..., ) -> ndarray[Any, dtype[float32]]: ... @overload def standard_exponential( @@ -149,13 +150,13 @@ class Generator: size: _ShapeLike = ..., dtype: _DTypeLikeFloat64 = ..., method: Literal["zig", "inv"] = ..., - out: Optional[ndarray[Any, dtype[float64]]] = ..., + out: None | ndarray[Any, dtype[float64]] = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def random( # type: ignore[misc] self, size: None = ..., - dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., out: None = ..., ) -> float: ... @overload @@ -169,45 +170,45 @@ class Generator: self, size: _ShapeLike = ..., *, - out: Optional[ndarray[Any, dtype[float64]]] = ..., + out: None | ndarray[Any, dtype[float64]] = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def random( self, size: _ShapeLike = ..., dtype: _DTypeLikeFloat32 = ..., - out: Optional[ndarray[Any, dtype[float32]]] = ..., + out: None | ndarray[Any, dtype[float32]] = ..., ) -> ndarray[Any, dtype[float32]]: ... @overload def random( self, size: _ShapeLike = ..., dtype: _DTypeLikeFloat64 = ..., - out: Optional[ndarray[Any, dtype[float64]]] = ..., + out: None | ndarray[Any, dtype[float64]] = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def beta( - self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @overload def exponential( - self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def integers( # type: ignore[misc] self, low: int, - high: Optional[int] = ..., + high: None | int = ..., ) -> int: ... @overload def integers( # type: ignore[misc] self, low: int, - high: Optional[int] = ..., + high: None | int = ..., size: None = ..., dtype: _DTypeLikeBool = ..., endpoint: bool = ..., @@ -216,24 +217,24 @@ class Generator: def integers( # type: ignore[misc] self, low: int, - high: Optional[int] = ..., + high: None | int = ..., size: None = ..., - dtype: Union[_DTypeLikeInt, _DTypeLikeUInt] = ..., + dtype: _DTypeLikeInt | _DTypeLikeUInt = ..., endpoint: bool = ..., ) -> int: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[int64]]: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., dtype: _DTypeLikeBool = ..., endpoint: bool = ..., ) -> ndarray[Any, dtype[bool_]]: ... @@ -241,110 +242,100 @@ class Generator: def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[int8], Type[int8], _Int8Codes, _SupportsDType[dtype[int8]]] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., endpoint: bool = ..., ) -> ndarray[Any, dtype[int8]]: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[int16], Type[int16], _Int16Codes, _SupportsDType[dtype[int16]]] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., endpoint: bool = ..., ) -> ndarray[Any, dtype[int16]]: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[int32], Type[int32], _Int32Codes, _SupportsDType[dtype[int32]]] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., endpoint: bool = ..., - ) -> ndarray[Any, dtype[Union[int32]]]: ... + ) -> ndarray[Any, dtype[int32]]: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Optional[ - Union[dtype[int64], Type[int64], _Int64Codes, _SupportsDType[dtype[int64]]] - ] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., endpoint: bool = ..., ) -> ndarray[Any, dtype[int64]]: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[uint8], Type[uint8], _UInt8Codes, _SupportsDType[dtype[uint8]]] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., endpoint: bool = ..., ) -> ndarray[Any, dtype[uint8]]: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[uint16], Type[uint16], _UInt16Codes, _SupportsDType[dtype[uint16]] - ] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., endpoint: bool = ..., - ) -> ndarray[Any, dtype[Union[uint16]]]: ... + ) -> ndarray[Any, dtype[uint16]]: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[uint32], Type[uint32], _UInt32Codes, _SupportsDType[dtype[uint32]] - ] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., endpoint: bool = ..., ) -> ndarray[Any, dtype[uint32]]: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[uint64], Type[uint64], _UInt64Codes, _SupportsDType[dtype[uint64]] - ] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., endpoint: bool = ..., ) -> ndarray[Any, dtype[uint64]]: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[int_], Type[int], Type[int_], _IntCodes, _SupportsDType[dtype[int_]] - ] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., endpoint: bool = ..., ) -> ndarray[Any, dtype[int_]]: ... @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[uint], Type[uint], _UIntCodes, _SupportsDType[dtype[uint]]] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., endpoint: bool = ..., ) -> ndarray[Any, dtype[uint]]: ... - # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> Union[_T, ndarray[Any,Any]] + # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> _T | ndarray[Any,Any] @overload def choice( self, a: int, size: None = ..., replace: bool = ..., - p: Optional[_ArrayLikeFloat_co] = ..., + p: None | _ArrayLikeFloat_co = ..., axis: int = ..., shuffle: bool = ..., ) -> int: ... @@ -354,7 +345,7 @@ class Generator: a: int, size: _ShapeLike = ..., replace: bool = ..., - p: Optional[_ArrayLikeFloat_co] = ..., + p: None | _ArrayLikeFloat_co = ..., axis: int = ..., shuffle: bool = ..., ) -> ndarray[Any, dtype[int64]]: ... @@ -364,7 +355,7 @@ class Generator: a: ArrayLike, size: None = ..., replace: bool = ..., - p: Optional[_ArrayLikeFloat_co] = ..., + p: None | _ArrayLikeFloat_co = ..., axis: int = ..., shuffle: bool = ..., ) -> Any: ... @@ -374,7 +365,7 @@ class Generator: a: ArrayLike, size: _ShapeLike = ..., replace: bool = ..., - p: Optional[_ArrayLikeFloat_co] = ..., + p: None | _ArrayLikeFloat_co = ..., axis: int = ..., shuffle: bool = ..., ) -> ndarray[Any, Any]: ... @@ -385,7 +376,7 @@ class Generator: self, low: _ArrayLikeFloat_co = ..., high: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @@ -394,21 +385,21 @@ class Generator: self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def standard_gamma( # type: ignore[misc] self, shape: float, size: None = ..., - dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., out: None = ..., ) -> float: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def standard_gamma( @@ -421,17 +412,17 @@ class Generator: def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., dtype: _DTypeLikeFloat32 = ..., - out: Optional[ndarray[Any, dtype[float32]]] = ..., + out: None | ndarray[Any, dtype[float32]] = ..., ) -> ndarray[Any, dtype[float32]]: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., dtype: _DTypeLikeFloat64 = ..., - out: Optional[ndarray[Any, dtype[float64]]] = ..., + out: None | ndarray[Any, dtype[float64]] = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @@ -440,13 +431,13 @@ class Generator: self, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def f( - self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] @@ -456,19 +447,19 @@ class Generator: dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def chisquare( - self, df: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def noncentral_chisquare( - self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] @@ -484,25 +475,25 @@ class Generator: def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def vonmises( - self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def pareto( - self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def weibull( - self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def power( - self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] @@ -515,7 +506,7 @@ class Generator: self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @@ -524,7 +515,7 @@ class Generator: self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @@ -533,7 +524,7 @@ class Generator: self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @@ -542,19 +533,19 @@ class Generator: self, mean: _ArrayLikeFloat_co = ..., sigma: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @overload def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def wald( - self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc] @@ -564,37 +555,37 @@ class Generator: left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc] @overload def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int64]]: ... @overload def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc] @overload def negative_binomial( - self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int64]]: ... @overload def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc] @overload def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int64]]: ... @overload def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] @overload def zipf( - self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int64]]: ... @overload def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] @overload def geometric( - self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int64]]: ... @overload def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] @@ -604,19 +595,19 @@ class Generator: ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[int64]]: ... @overload def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] @overload def logseries( - self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int64]]: ... def multivariate_normal( self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., check_valid: Literal["warn", "raise", "ignore"] = ..., tol: float = ..., *, @@ -625,23 +616,23 @@ class Generator: def multinomial( self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, - size: Optional[_ShapeLike] = ... + size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int64]]: ... def multivariate_hypergeometric( self, colors: _ArrayLikeInt_co, nsample: int, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., method: Literal["marginals", "count"] = ..., ) -> ndarray[Any, dtype[int64]]: ... def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... def permuted( - self, x: ArrayLike, *, axis: Optional[int] = ..., out: Optional[ndarray[Any, Any]] = ... + self, x: ArrayLike, *, axis: None | int = ..., out: None | ndarray[Any, Any] = ... ) -> ndarray[Any, Any]: ... def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... def default_rng( - seed: Union[None, _ArrayLikeInt_co, SeedSequence, BitGenerator, Generator] = ... + seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ... ) -> Generator: ... diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 391987a1e..d7c1879e7 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -32,6 +32,8 @@ from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE, cdef extern from "numpy/arrayobject.h": int PyArray_ResolveWritebackIfCopy(np.ndarray) + int PyArray_FailUnlessWriteable(np.PyArrayObject *obj, + const char *name) except -1 object PyArray_FromArray(np.PyArrayObject *, np.PyArray_Descr *, int) enum: @@ -3572,12 +3574,35 @@ cdef class Generator: >>> y.shape (3, 3, 2) - The following is probably true, given that 0.6 is roughly twice the - standard deviation: + Here we generate 800 samples from the bivariate normal distribution + with mean [0, 0] and covariance matrix [[6, -3], [-3, 3.5]]. The + expected variances of the first and second components of the sample + are 6 and 3.5, respectively, and the expected correlation + coefficient is -3/sqrt(6*3.5) ≈ -0.65465. - >>> list((x[0,0,:] - mean) < 0.6) - [True, True] # random + >>> cov = np.array([[6, -3], [-3, 3.5]]) + >>> pts = rng.multivariate_normal([0, 0], cov, size=800) + Check that the mean, covariance, and correlation coefficient of the + sample are close to the expected values: + + >>> pts.mean(axis=0) + array([ 0.0326911 , -0.01280782]) # may vary + >>> np.cov(pts.T) + array([[ 5.96202397, -2.85602287], + [-2.85602287, 3.47613949]]) # may vary + >>> np.corrcoef(pts.T)[0, 1] + -0.6273591314603949 # may vary + + We can visualize this data with a scatter plot. The orientation + of the point cloud illustrates the negative correlation of the + components of this sample. + + >>> import matplotlib.pyplot as plt + >>> plt.plot(pts[:, 0], pts[:, 1], '.', alpha=0.5) + >>> plt.axis('equal') + >>> plt.grid() + >>> plt.show() """ if method not in {'eigh', 'svd', 'cholesky'}: raise ValueError( @@ -4416,6 +4441,7 @@ cdef class Generator: else: if type(out) is not np.ndarray: raise TypeError('out must be a numpy array') + PyArray_FailUnlessWriteable(<np.PyArrayObject *>out, "out") if out.shape != x.shape: raise ValueError('out must have the same shape as x') np.copyto(out, x, casting='safe') @@ -4524,6 +4550,8 @@ cdef class Generator: char* buf_ptr if isinstance(x, np.ndarray): + if not x.flags.writeable: + raise ValueError('array is read-only') # Only call ndim on ndarrays, see GH 18142 axis = normalize_axis_index(axis, np.ndim(x)) diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index 820f27392..1494aad59 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -1,4 +1,4 @@ -from typing import Any, Union, TypedDict +from typing import Any, TypedDict from numpy import dtype, ndarray, uint32 from numpy.random.bit_generator import BitGenerator, SeedSequence @@ -13,7 +13,7 @@ class _MT19937State(TypedDict): state: _MT19937Internal class MT19937(BitGenerator): - def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... def jumped(self, jumps: int = ...) -> MT19937: ... @property diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx index 16a377cc6..e9a703e2f 100644 --- a/numpy/random/_mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -109,7 +109,7 @@ cdef class MT19937(BitGenerator): **Compatibility Guarantee** - ``MT19937`` makes a guarantee that a fixed seed and will always produce + ``MT19937`` makes a guarantee that a fixed seed will always produce the same random integer stream. References diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index 4881a987e..b1e73565e 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -1,4 +1,4 @@ -from typing import Union, TypedDict +from typing import TypedDict from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy.typing import _ArrayLikeInt_co @@ -14,7 +14,7 @@ class _PCG64State(TypedDict): uinteger: int class PCG64(BitGenerator): - def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... def jumped(self, jumps: int = ...) -> PCG64: ... @property def state( @@ -28,7 +28,7 @@ class PCG64(BitGenerator): def advance(self, delta: int) -> PCG64: ... class PCG64DXSM(BitGenerator): - def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... def jumped(self, jumps: int = ...) -> PCG64DXSM: ... @property def state( diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index dd1c5e6e9..1f289f5de 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -1,4 +1,4 @@ -from typing import Any, Union, TypedDict +from typing import Any, TypedDict from numpy import dtype, ndarray, uint64 from numpy.random.bit_generator import BitGenerator, SeedSequence @@ -19,9 +19,9 @@ class _PhiloxState(TypedDict): class Philox(BitGenerator): def __init__( self, - seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ..., - counter: Union[None, _ArrayLikeInt_co] = ..., - key: Union[None, _ArrayLikeInt_co] = ..., + seed: None | _ArrayLikeInt_co | SeedSequence = ..., + counter: None | _ArrayLikeInt_co = ..., + key: None | _ArrayLikeInt_co = ..., ) -> None: ... @property def state( diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi index 94d11a210..333aa92c4 100644 --- a/numpy/random/_sfc64.pyi +++ b/numpy/random/_sfc64.pyi @@ -1,4 +1,4 @@ -from typing import Any, Union, TypedDict +from typing import Any, TypedDict from numpy import dtype as dtype from numpy import ndarray as ndarray @@ -16,7 +16,7 @@ class _SFC64State(TypedDict): uinteger: int class SFC64(BitGenerator): - def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... @property def state( self, diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index fa2f1ab12..c5f976d07 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -1,16 +1,9 @@ import abc from threading import Lock +from collections.abc import Callable, Mapping, Sequence from typing import ( Any, - Callable, - Dict, - List, - Mapping, NamedTuple, - Optional, - Sequence, - Tuple, - Type, TypedDict, TypeVar, Union, @@ -26,19 +19,19 @@ _T = TypeVar("_T") _DTypeLikeUint32 = Union[ dtype[uint32], _SupportsDType[dtype[uint32]], - Type[uint32], + type[uint32], _UInt32Codes, ] _DTypeLikeUint64 = Union[ dtype[uint64], _SupportsDType[dtype[uint64]], - Type[uint64], + type[uint64], _UInt64Codes, ] class _SeedSeqState(TypedDict): - entropy: Union[None, int, Sequence[int]] - spawn_key: Tuple[int, ...] + entropy: None | int | Sequence[int] + spawn_key: tuple[int, ...] pool_size: int n_children_spawned: int @@ -53,28 +46,28 @@ class _Interface(NamedTuple): class ISeedSequence(abc.ABC): @abc.abstractmethod def generate_state( - self, n_words: int, dtype: Union[_DTypeLikeUint32, _DTypeLikeUint64] = ... - ) -> ndarray[Any, dtype[Union[uint32, uint64]]]: ... + self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... + ) -> ndarray[Any, dtype[uint32 | uint64]]: ... class ISpawnableSeedSequence(ISeedSequence): @abc.abstractmethod - def spawn(self: _T, n_children: int) -> List[_T]: ... + def spawn(self: _T, n_children: int) -> list[_T]: ... class SeedlessSeedSequence(ISpawnableSeedSequence): def generate_state( - self, n_words: int, dtype: Union[_DTypeLikeUint32, _DTypeLikeUint64] = ... - ) -> ndarray[Any, dtype[Union[uint32, uint64]]]: ... - def spawn(self: _T, n_children: int) -> List[_T]: ... + self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... + ) -> ndarray[Any, dtype[uint32 | uint64]]: ... + def spawn(self: _T, n_children: int) -> list[_T]: ... class SeedSequence(ISpawnableSeedSequence): - entropy: Union[None, int, Sequence[int]] - spawn_key: Tuple[int, ...] + entropy: None | int | Sequence[int] + spawn_key: tuple[int, ...] pool_size: int n_children_spawned: int pool: ndarray[Any, dtype[uint32]] def __init__( self, - entropy: Union[None, int, Sequence[int], _ArrayLikeInt_co] = ..., + entropy: None | int | Sequence[int] | _ArrayLikeInt_co = ..., *, spawn_key: Sequence[int] = ..., pool_size: int = ..., @@ -86,18 +79,18 @@ class SeedSequence(ISpawnableSeedSequence): self, ) -> _SeedSeqState: ... def generate_state( - self, n_words: int, dtype: Union[_DTypeLikeUint32, _DTypeLikeUint64] = ... - ) -> ndarray[Any, dtype[Union[uint32, uint64]]]: ... - def spawn(self, n_children: int) -> List[SeedSequence]: ... + self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... + ) -> ndarray[Any, dtype[uint32 | uint64]]: ... + def spawn(self, n_children: int) -> list[SeedSequence]: ... class BitGenerator(abc.ABC): lock: Lock - def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... - def __getstate__(self) -> Dict[str, Any]: ... - def __setstate__(self, state: Dict[str, Any]) -> None: ... + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def __getstate__(self) -> dict[str, Any]: ... + def __setstate__(self, state: dict[str, Any]) -> None: ... def __reduce__( self, - ) -> Tuple[Callable[[str], BitGenerator], Tuple[str], Tuple[Dict[str, Any]]]: ... + ) -> tuple[Callable[[str], BitGenerator], tuple[str], tuple[dict[str, Any]]]: ... @abc.abstractmethod @property def state(self) -> Mapping[str, Any]: ... @@ -108,7 +101,7 @@ class BitGenerator(abc.ABC): @overload def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> ndarray[Any, dtype[uint64]]: ... # type: ignore[misc] @overload - def random_raw(self, size: Optional[_ShapeLike] = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc] + def random_raw(self, size: None | _ShapeLike = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc] def _benchmark(self, cnt: int, method: str = ...) -> None: ... @property def ctypes(self) -> _Interface: ... diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index cbe87a299..5af449add 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -1,4 +1,5 @@ -from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload, Literal +from collections.abc import Callable +from typing import Any, Union, overload, Literal from numpy import ( bool_, @@ -46,7 +47,7 @@ from numpy.typing import ( _DTypeLikeFloat32 = Union[ dtype[float32], _SupportsDType[dtype[float32]], - Type[float32], + type[float32], _Float32Codes, _SingleCodes, ] @@ -54,29 +55,29 @@ _DTypeLikeFloat32 = Union[ _DTypeLikeFloat64 = Union[ dtype[float64], _SupportsDType[dtype[float64]], - Type[float], - Type[float64], + type[float], + type[float64], _Float64Codes, _DoubleCodes, ] class RandomState: _bit_generator: BitGenerator - def __init__(self, seed: Union[None, _ArrayLikeInt_co, BitGenerator] = ...) -> None: ... + def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... - def __getstate__(self) -> Dict[str, Any]: ... - def __setstate__(self, state: Dict[str, Any]) -> None: ... - def __reduce__(self) -> Tuple[Callable[[str], RandomState], Tuple[str], Dict[str, Any]]: ... - def seed(self, seed: Optional[_ArrayLikeFloat_co] = ...) -> None: ... + def __getstate__(self) -> dict[str, Any]: ... + def __setstate__(self, state: dict[str, Any]) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]: ... + def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ... @overload - def get_state(self, legacy: Literal[False] = ...) -> Dict[str, Any]: ... + def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... @overload def get_state( self, legacy: Literal[True] = ... - ) -> Union[Dict[str, Any], Tuple[str, ndarray[Any, dtype[uint32]], int, int, float]]: ... + ) -> dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]: ... def set_state( - self, state: Union[Dict[str, Any], Tuple[str, ndarray[Any, dtype[uint32]], int, int, float]] + self, state: dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float] ) -> None: ... @overload def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] @@ -90,13 +91,13 @@ class RandomState: def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def beta( - self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @overload def exponential( - self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] @@ -110,13 +111,13 @@ class RandomState: def randint( # type: ignore[misc] self, low: int, - high: Optional[int] = ..., + high: None | int = ..., ) -> int: ... @overload def randint( # type: ignore[misc] self, low: int, - high: Optional[int] = ..., + high: None | int = ..., size: None = ..., dtype: _DTypeLikeBool = ..., ) -> bool: ... @@ -124,114 +125,104 @@ class RandomState: def randint( # type: ignore[misc] self, low: int, - high: Optional[int] = ..., + high: None | int = ..., size: None = ..., - dtype: Union[_DTypeLikeInt, _DTypeLikeUInt] = ..., + dtype: _DTypeLikeInt | _DTypeLikeUInt = ..., ) -> int: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[int_]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., dtype: _DTypeLikeBool = ..., ) -> ndarray[Any, dtype[bool_]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[int8], Type[int8], _Int8Codes, _SupportsDType[dtype[int8]]] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., ) -> ndarray[Any, dtype[int8]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[int16], Type[int16], _Int16Codes, _SupportsDType[dtype[int16]]] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., ) -> ndarray[Any, dtype[int16]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[int32], Type[int32], _Int32Codes, _SupportsDType[dtype[int32]]] = ..., - ) -> ndarray[Any, dtype[Union[int32]]]: ... + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + ) -> ndarray[Any, dtype[int32]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Optional[ - Union[dtype[int64], Type[int64], _Int64Codes, _SupportsDType[dtype[int64]]] - ] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., ) -> ndarray[Any, dtype[int64]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[uint8], Type[uint8], _UInt8Codes, _SupportsDType[dtype[uint8]]] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., ) -> ndarray[Any, dtype[uint8]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[uint16], Type[uint16], _UInt16Codes, _SupportsDType[dtype[uint16]] - ] = ..., - ) -> ndarray[Any, dtype[Union[uint16]]]: ... + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + ) -> ndarray[Any, dtype[uint16]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[uint32], Type[uint32], _UInt32Codes, _SupportsDType[dtype[uint32]] - ] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., ) -> ndarray[Any, dtype[uint32]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[uint64], Type[uint64], _UInt64Codes, _SupportsDType[dtype[uint64]] - ] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., ) -> ndarray[Any, dtype[uint64]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[ - dtype[int_], Type[int], Type[int_], _IntCodes, _SupportsDType[dtype[int_]] - ] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., ) -> ndarray[Any, dtype[int_]]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., - dtype: Union[dtype[uint], Type[uint], _UIntCodes, _SupportsDType[dtype[uint]]] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., ) -> ndarray[Any, dtype[uint]]: ... def bytes(self, length: int) -> bytes: ... @overload @@ -240,7 +231,7 @@ class RandomState: a: int, size: None = ..., replace: bool = ..., - p: Optional[_ArrayLikeFloat_co] = ..., + p: None | _ArrayLikeFloat_co = ..., ) -> int: ... @overload def choice( @@ -248,7 +239,7 @@ class RandomState: a: int, size: _ShapeLike = ..., replace: bool = ..., - p: Optional[_ArrayLikeFloat_co] = ..., + p: None | _ArrayLikeFloat_co = ..., ) -> ndarray[Any, dtype[int_]]: ... @overload def choice( @@ -256,7 +247,7 @@ class RandomState: a: ArrayLike, size: None = ..., replace: bool = ..., - p: Optional[_ArrayLikeFloat_co] = ..., + p: None | _ArrayLikeFloat_co = ..., ) -> Any: ... @overload def choice( @@ -264,7 +255,7 @@ class RandomState: a: ArrayLike, size: _ShapeLike = ..., replace: bool = ..., - p: Optional[_ArrayLikeFloat_co] = ..., + p: None | _ArrayLikeFloat_co = ..., ) -> ndarray[Any, Any]: ... @overload def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @@ -273,7 +264,7 @@ class RandomState: self, low: _ArrayLikeFloat_co = ..., high: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def rand(self) -> float: ... @@ -284,13 +275,13 @@ class RandomState: @overload def randn(self, *args: int) -> ndarray[Any, dtype[float64]]: ... @overload - def random_integers(self, low: int, high: Optional[int] = ..., size: None = ...) -> int: ... # type: ignore[misc] + def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int: ... # type: ignore[misc] @overload def random_integers( self, low: _ArrayLikeInt_co, - high: Optional[_ArrayLikeInt_co] = ..., - size: Optional[_ShapeLike] = ..., + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[int_]]: ... @overload def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] @@ -305,7 +296,7 @@ class RandomState: self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def standard_gamma( # type: ignore[misc] @@ -317,7 +308,7 @@ class RandomState: def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @@ -326,13 +317,13 @@ class RandomState: self, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def f( - self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] @@ -342,19 +333,19 @@ class RandomState: dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def chisquare( - self, df: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def noncentral_chisquare( - self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] @@ -370,25 +361,25 @@ class RandomState: def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def vonmises( - self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def pareto( - self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def weibull( - self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def power( - self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] @@ -401,7 +392,7 @@ class RandomState: self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @@ -410,7 +401,7 @@ class RandomState: self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @@ -419,7 +410,7 @@ class RandomState: self, loc: _ArrayLikeFloat_co = ..., scale: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @@ -428,19 +419,19 @@ class RandomState: self, mean: _ArrayLikeFloat_co = ..., sigma: _ArrayLikeFloat_co = ..., - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] @overload def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] @overload def wald( - self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... @overload def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc] @@ -450,37 +441,37 @@ class RandomState: left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[float64]]: ... @overload def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc] @overload def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int_]]: ... @overload def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc] @overload def negative_binomial( - self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int_]]: ... @overload def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc] @overload def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int_]]: ... @overload def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] @overload def zipf( - self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int_]]: ... @overload def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] @overload def geometric( - self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int_]]: ... @overload def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] @@ -490,27 +481,27 @@ class RandomState: ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., ) -> ndarray[Any, dtype[int_]]: ... @overload def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] @overload def logseries( - self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int_]]: ... def multivariate_normal( self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, - size: Optional[_ShapeLike] = ..., + size: None | _ShapeLike = ..., check_valid: Literal["warn", "raise", "ignore"] = ..., tol: float = ..., ) -> ndarray[Any, dtype[float64]]: ... def multinomial( - self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[int_]]: ... def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> ndarray[Any, dtype[float64]]: ... def shuffle(self, x: ArrayLike) -> None: ... @overload diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 3e13503d0..8bf74aa5d 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -402,7 +402,7 @@ cdef class RandomState: See Also -------- - Generator.random: which should be used for new code. + random.Generator.random: which should be used for new code. Examples -------- @@ -476,7 +476,7 @@ cdef class RandomState: See Also -------- - Generator.beta: which should be used for new code. + random.Generator.beta: which should be used for new code. """ return cont(&legacy_beta, &self._aug_state, size, self.lock, 2, a, 'a', CONS_POSITIVE, @@ -525,7 +525,7 @@ cdef class RandomState: See Also -------- - Generator.exponential: which should be used for new code. + random.Generator.exponential: which should be used for new code. References ---------- @@ -570,7 +570,7 @@ cdef class RandomState: See Also -------- - Generator.standard_exponential: which should be used for new code. + random.Generator.standard_exponential: which should be used for new code. Examples -------- @@ -688,7 +688,7 @@ cdef class RandomState: random_integers : similar to `randint`, only for the closed interval [`low`, `high`], and 1 is the lowest value if `high` is omitted. - Generator.integers: which should be used for new code. + random.Generator.integers: which should be used for new code. Examples -------- @@ -790,7 +790,7 @@ cdef class RandomState: See Also -------- - Generator.bytes: which should be used for new code. + random.Generator.bytes: which should be used for new code. Examples -------- @@ -850,7 +850,7 @@ cdef class RandomState: See Also -------- randint, shuffle, permutation - Generator.choice: which should be used in new code + random.Generator.choice: which should be used in new code Notes ----- @@ -1058,7 +1058,7 @@ cdef class RandomState: rand : Convenience function that accepts dimensions as input, e.g., ``rand(2,2)`` would generate a 2-by-2 array of floats, uniformly distributed over ``[0, 1)``. - Generator.uniform: which should be used for new code. + random.Generator.uniform: which should be used for new code. Notes ----- @@ -1220,7 +1220,7 @@ cdef class RandomState: -------- standard_normal : Similar, but takes a tuple as its argument. normal : Also accepts mu and sigma arguments. - Generator.standard_normal: which should be used for new code. + random.Generator.standard_normal: which should be used for new code. Notes ----- @@ -1369,7 +1369,7 @@ cdef class RandomState: normal : Equivalent function with additional ``loc`` and ``scale`` arguments for setting the mean and standard deviation. - Generator.standard_normal: which should be used for new code. + random.Generator.standard_normal: which should be used for new code. Notes ----- @@ -1448,7 +1448,7 @@ cdef class RandomState: -------- scipy.stats.norm : probability density function, distribution or cumulative density function, etc. - Generator.normal: which should be used for new code. + random.Generator.normal: which should be used for new code. Notes ----- @@ -1545,7 +1545,7 @@ cdef class RandomState: -------- scipy.stats.gamma : probability density function, distribution or cumulative density function, etc. - Generator.standard_gamma: which should be used for new code. + random.Generator.standard_gamma: which should be used for new code. Notes ----- @@ -1629,7 +1629,7 @@ cdef class RandomState: -------- scipy.stats.gamma : probability density function, distribution or cumulative density function, etc. - Generator.gamma: which should be used for new code. + random.Generator.gamma: which should be used for new code. Notes ----- @@ -1717,7 +1717,7 @@ cdef class RandomState: -------- scipy.stats.f : probability density function, distribution or cumulative density function, etc. - Generator.f: which should be used for new code. + random.Generator.f: which should be used for new code. Notes ----- @@ -1810,7 +1810,7 @@ cdef class RandomState: See Also -------- - Generator.noncentral_f: which should be used for new code. + random.Generator.noncentral_f: which should be used for new code. Notes ----- @@ -1892,7 +1892,7 @@ cdef class RandomState: See Also -------- - Generator.chisquare: which should be used for new code. + random.Generator.chisquare: which should be used for new code. Notes ----- @@ -1964,7 +1964,7 @@ cdef class RandomState: See Also -------- - Generator.noncentral_chisquare: which should be used for new code. + random.Generator.noncentral_chisquare: which should be used for new code. Notes ----- @@ -2042,7 +2042,7 @@ cdef class RandomState: See Also -------- - Generator.standard_cauchy: which should be used for new code. + random.Generator.standard_cauchy: which should be used for new code. Notes ----- @@ -2121,7 +2121,7 @@ cdef class RandomState: See Also -------- - Generator.standard_t: which should be used for new code. + random.Generator.standard_t: which should be used for new code. Notes ----- @@ -2242,7 +2242,7 @@ cdef class RandomState: -------- scipy.stats.vonmises : probability density function, distribution, or cumulative density function, etc. - Generator.vonmises: which should be used for new code. + random.Generator.vonmises: which should be used for new code. Notes ----- @@ -2340,7 +2340,7 @@ cdef class RandomState: cumulative density function, etc. scipy.stats.genpareto : probability density function, distribution or cumulative density function, etc. - Generator.pareto: which should be used for new code. + random.Generator.pareto: which should be used for new code. Notes ----- @@ -2434,7 +2434,7 @@ cdef class RandomState: scipy.stats.weibull_min scipy.stats.genextreme gumbel - Generator.weibull: which should be used for new code. + random.Generator.weibull: which should be used for new code. Notes ----- @@ -2531,7 +2531,7 @@ cdef class RandomState: See Also -------- - Generator.power: which should be used for new code. + random.Generator.power: which should be used for new code. Notes ----- @@ -2640,7 +2640,7 @@ cdef class RandomState: See Also -------- - Generator.laplace: which should be used for new code. + random.Generator.laplace: which should be used for new code. Notes ----- @@ -2735,7 +2735,7 @@ cdef class RandomState: scipy.stats.gumbel_r scipy.stats.genextreme weibull - Generator.gumbel: which should be used for new code. + random.Generator.gumbel: which should be used for new code. Notes ----- @@ -2855,7 +2855,7 @@ cdef class RandomState: -------- scipy.stats.logistic : probability density function, distribution or cumulative density function, etc. - Generator.logistic: which should be used for new code. + random.Generator.logistic: which should be used for new code. Notes ----- @@ -2942,7 +2942,7 @@ cdef class RandomState: -------- scipy.stats.lognorm : probability density function, distribution, cumulative density function, etc. - Generator.lognormal: which should be used for new code. + random.Generator.lognormal: which should be used for new code. Notes ----- @@ -3050,7 +3050,7 @@ cdef class RandomState: See Also -------- - Generator.rayleigh: which should be used for new code. + random.Generator.rayleigh: which should be used for new code. Notes ----- @@ -3134,7 +3134,7 @@ cdef class RandomState: See Also -------- - Generator.wald: which should be used for new code. + random.Generator.wald: which should be used for new code. Notes ----- @@ -3211,7 +3211,7 @@ cdef class RandomState: See Also -------- - Generator.triangular: which should be used for new code. + random.Generator.triangular: which should be used for new code. Notes ----- @@ -3318,7 +3318,7 @@ cdef class RandomState: -------- scipy.stats.binom : probability density function, distribution or cumulative density function, etc. - Generator.binomial: which should be used for new code. + random.Generator.binomial: which should be used for new code. Notes ----- @@ -3466,7 +3466,7 @@ cdef class RandomState: See Also -------- - Generator.negative_binomial: which should be used for new code. + random.Generator.negative_binomial: which should be used for new code. Notes ----- @@ -3549,7 +3549,7 @@ cdef class RandomState: See Also -------- - Generator.poisson: which should be used for new code. + random.Generator.poisson: which should be used for new code. Notes ----- @@ -3636,7 +3636,7 @@ cdef class RandomState: -------- scipy.stats.zipf : probability density function, distribution, or cumulative density function, etc. - Generator.zipf: which should be used for new code. + random.Generator.zipf: which should be used for new code. Notes ----- @@ -3733,7 +3733,7 @@ cdef class RandomState: See Also -------- - Generator.geometric: which should be used for new code. + random.Generator.geometric: which should be used for new code. Examples -------- @@ -3797,7 +3797,7 @@ cdef class RandomState: -------- scipy.stats.hypergeom : probability density function, distribution or cumulative density function, etc. - Generator.hypergeometric: which should be used for new code. + random.Generator.hypergeometric: which should be used for new code. Notes ----- @@ -3920,7 +3920,7 @@ cdef class RandomState: -------- scipy.stats.logser : probability density function, distribution or cumulative density function, etc. - Generator.logseries: which should be used for new code. + random.Generator.logseries: which should be used for new code. Notes ----- @@ -4023,7 +4023,7 @@ cdef class RandomState: See Also -------- - Generator.multivariate_normal: which should be used for new code. + random.Generator.multivariate_normal: which should be used for new code. Notes ----- @@ -4079,12 +4079,35 @@ cdef class RandomState: >>> x.shape (3, 3, 2) - The following is probably true, given that 0.6 is roughly twice the - standard deviation: + Here we generate 800 samples from the bivariate normal distribution + with mean [0, 0] and covariance matrix [[6, -3], [-3, 3.5]]. The + expected variances of the first and second components of the sample + are 6 and 3.5, respectively, and the expected correlation + coefficient is -3/sqrt(6*3.5) ≈ -0.65465. - >>> list((x[0,0,:] - mean) < 0.6) - [True, True] # random + >>> cov = np.array([[6, -3], [-3, 3.5]]) + >>> pts = np.random.multivariate_normal([0, 0], cov, size=800) + Check that the mean, covariance, and correlation coefficient of the + sample are close to the expected values: + + >>> pts.mean(axis=0) + array([ 0.0326911 , -0.01280782]) # may vary + >>> np.cov(pts.T) + array([[ 5.96202397, -2.85602287], + [-2.85602287, 3.47613949]]) # may vary + >>> np.corrcoef(pts.T)[0, 1] + -0.6273591314603949 # may vary + + We can visualize this data with a scatter plot. The orientation + of the point cloud illustrates the negative correlation of the + components of this sample. + + >>> import matplotlib.pyplot as plt + >>> plt.plot(pts[:, 0], pts[:, 1], '.', alpha=0.5) + >>> plt.axis('equal') + >>> plt.grid() + >>> plt.show() """ from numpy.linalg import svd @@ -4193,7 +4216,7 @@ cdef class RandomState: See Also -------- - Generator.multinomial: which should be used for new code. + random.Generator.multinomial: which should be used for new code. Examples -------- @@ -4234,18 +4257,21 @@ cdef class RandomState: ValueError: pvals < 0, pvals > 1 or pvals contains NaNs """ - cdef np.npy_intp d, i, sz, offset + cdef np.npy_intp d, i, sz, offset, niter cdef np.ndarray parr, mnarr cdef double *pix cdef long *mnix cdef long ni - d = len(pvals) parr = <np.ndarray>np.PyArray_FROMANY( - pvals, np.NPY_DOUBLE, 1, 1, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS) + pvals, np.NPY_DOUBLE, 0, 1, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS) + if np.PyArray_NDIM(parr) == 0: + raise TypeError("pvals must be a 1-d sequence") + d = np.PyArray_SIZE(parr) pix = <double*>np.PyArray_DATA(parr) check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1) - if kahan_sum(pix, d-1) > (1.0 + 1e-12): + # Only check if pvals is non-empty due no checks in kahan_sum + if d and kahan_sum(pix, d-1) > (1.0 + 1e-12): # When floating, but not float dtype, and close, improve the error # 1.0001 works for float16 and float32 if (isinstance(pvals, np.ndarray) @@ -4260,7 +4286,6 @@ cdef class RandomState: else: msg = "sum(pvals[:-1]) > 1.0" raise ValueError(msg) - if size is None: shape = (d,) else: @@ -4268,7 +4293,6 @@ cdef class RandomState: shape = (operator.index(size), d) except: shape = tuple(size) + (d,) - multin = np.zeros(shape, dtype=int) mnarr = <np.ndarray>multin mnix = <long*>np.PyArray_DATA(mnarr) @@ -4276,8 +4300,10 @@ cdef class RandomState: ni = n check_constraint(ni, 'n', CONS_NON_NEGATIVE) offset = 0 + # gh-20483: Avoids divide by 0 + niter = sz // d if d else 0 with self.lock, nogil: - for i in range(sz // d): + for i in range(niter): legacy_random_multinomial(&self._bitgen, ni, &mnix[offset], pix, d, &self._binomial) offset += d @@ -4321,7 +4347,7 @@ cdef class RandomState: See Also -------- - Generator.dirichlet: which should be used for new code. + random.Generator.dirichlet: which should be used for new code. Notes ----- @@ -4456,7 +4482,7 @@ cdef class RandomState: See Also -------- - Generator.shuffle: which should be used for new code. + random.Generator.shuffle: which should be used for new code. Examples -------- @@ -4480,6 +4506,9 @@ cdef class RandomState: char* x_ptr char* buf_ptr + if isinstance(x, np.ndarray) and not x.flags.writeable: + raise ValueError('array is read-only') + if type(x) is np.ndarray and x.ndim == 1 and x.size: # Fast, statically typed path: shuffle the underlying buffer. # Only for non-empty, 1d objects of class ndarray (subclasses such @@ -4579,7 +4608,7 @@ cdef class RandomState: See Also -------- - Generator.permutation: which should be used for new code. + random.Generator.permutation: which should be used for new code. Examples -------- diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 99a819efb..d362092b5 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -5,6 +5,7 @@ import subprocess import sys import warnings import numpy as np +from numpy.distutils.misc_util import exec_mod_from_location try: import cffi @@ -75,10 +76,9 @@ def test_cython(tmp_path): assert so1 is not None assert so2 is not None # import the so's without adding the directory to sys.path - from importlib.machinery import ExtensionFileLoader - extending = ExtensionFileLoader('extending', so1).load_module() - extending_distributions = ExtensionFileLoader('extending_distributions', so2).load_module() - + exec_mod_from_location('extending', so1) + extending_distributions = exec_mod_from_location( + 'extending_distributions', so2) # actually test the cython c-extension from numpy.random import PCG64 values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd') diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index e5411b8ef..e16a82973 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1020,6 +1020,13 @@ class TestRandomDist: arr = np.ones((3, 2)) assert_raises(np.AxisError, random.shuffle, arr, 2) + def test_shuffle_not_writeable(self): + random = Generator(MT19937(self.seed)) + a = np.zeros(5) + a.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + random.shuffle(a) + def test_permutation(self): random = Generator(MT19937(self.seed)) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] @@ -1116,6 +1123,12 @@ class TestRandomDist: with pytest.raises(TypeError, match='Cannot cast'): random.permuted(x, axis=1, out=out) + def test_permuted_not_writeable(self): + x = np.zeros((2, 5)) + x.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + random.permuted(x, axis=1, out=x) + def test_beta(self): random = Generator(MT19937(self.seed)) actual = random.beta(.1, .9, size=(3, 2)) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 6a584a511..773b63653 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -564,6 +564,12 @@ class TestRandomDist: rng.shuffle(a) assert_equal(np.asarray(a), [4, 1, 0, 3, 2]) + def test_shuffle_not_writeable(self): + a = np.zeros(3) + a.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + np.random.shuffle(a) + def test_beta(self): np.random.seed(self.seed) actual = np.random.beta(.1, .9, size=(3, 2)) diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 595fb5fd3..7ad19ab55 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -201,3 +201,16 @@ class TestRegression: [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), expected) + + +def test_multinomial_empty(): + # gh-20483 + # Ensure that empty p-vals are correctly handled + assert random.multinomial(10, []).shape == (0,) + assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0) + + +def test_multinomial_1d_pval(): + # gh-20483 + with pytest.raises(TypeError, match="pvals must be a 1-d"): + random.multinomial(10, 0.3) diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index def0f9f58..a981d6113 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -1,5 +1,3 @@ -from typing import List - from numpy._pytesttester import PytestTester from unittest import ( @@ -48,11 +46,11 @@ from numpy.testing._private.utils import ( HAS_LAPACK64 as HAS_LAPACK64, ) -__all__: List[str] -__path__: List[str] +__all__: list[str] +__path__: list[str] test: PytestTester def run_module_suite( file_to_run: None | str = ..., - argv: None | List[str] = ..., + argv: None | list[str] = ..., ) -> None: ... diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 4c6b64bc9..2c71e45bd 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1228,13 +1228,13 @@ def rundocs(filename=None, raise_on_error=True): >>> np.lib.test(doctests=True) # doctest: +SKIP """ - from numpy.compat import npy_load_module + from numpy.distutils.misc_util import exec_mod_from_location import doctest if filename is None: f = sys._getframe(1) filename = f.f_globals['__file__'] name = os.path.splitext(os.path.basename(filename))[0] - m = npy_load_module(name, filename) + m = exec_mod_from_location(name, filename) tests = doctest.DocTestFinder().find(m) runner = doctest.DocTestRunner(verbose=False) @@ -2518,3 +2518,16 @@ def _no_tracing(func): finally: sys.settrace(original_trace) return wrapper + + +def _get_glibc_version(): + try: + ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1] + except Exception as inst: + ver = '0.0' + + return ver + + +_glibcver = _get_glibc_version() +_glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 4ba5d82ee..8117f18ae 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -5,22 +5,15 @@ import types import warnings import unittest import contextlib +from re import Pattern +from collections.abc import Callable, Iterable, Sequence from typing import ( Literal as L, Any, AnyStr, - Callable, ClassVar, - Dict, - Iterable, - List, NoReturn, overload, - Pattern, - Sequence, - Set, - Tuple, - Type, type_check_only, TypeVar, Union, @@ -59,14 +52,14 @@ _ComparisonFunc = Callable[ ], ] -__all__: List[str] +__all__: list[str] class KnownFailureException(Exception): ... class IgnoreException(Exception): ... class clear_and_catch_warnings(warnings.catch_warnings): - class_modules: ClassVar[Tuple[types.ModuleType, ...]] - modules: Set[types.ModuleType] + class_modules: ClassVar[tuple[types.ModuleType, ...]] + modules: set[types.ModuleType] @overload def __new__( cls, @@ -85,10 +78,10 @@ class clear_and_catch_warnings(warnings.catch_warnings): record: bool, modules: Iterable[types.ModuleType] = ..., ) -> clear_and_catch_warnings: ... - def __enter__(self) -> None | List[warnings.WarningMessage]: ... + def __enter__(self) -> None | list[warnings.WarningMessage]: ... def __exit__( self, - __exc_type: None | Type[BaseException] = ..., + __exc_type: None | type[BaseException] = ..., __exc_val: None | BaseException = ..., __exc_tb: None | types.TracebackType = ..., ) -> None: ... @@ -98,34 +91,34 @@ class clear_and_catch_warnings(warnings.catch_warnings): @type_check_only class _clear_and_catch_warnings_with_records(clear_and_catch_warnings): - def __enter__(self) -> List[warnings.WarningMessage]: ... + def __enter__(self) -> list[warnings.WarningMessage]: ... @type_check_only class _clear_and_catch_warnings_without_records(clear_and_catch_warnings): def __enter__(self) -> None: ... class suppress_warnings: - log: List[warnings.WarningMessage] + log: list[warnings.WarningMessage] def __init__( self, forwarding_rule: L["always", "module", "once", "location"] = ..., ) -> None: ... def filter( self, - category: Type[Warning] = ..., + category: type[Warning] = ..., message: str = ..., module: None | types.ModuleType = ..., ) -> None: ... def record( self, - category: Type[Warning] = ..., + category: type[Warning] = ..., message: str = ..., module: None | types.ModuleType = ..., - ) -> List[warnings.WarningMessage]: ... + ) -> list[warnings.WarningMessage]: ... def __enter__(self: _T) -> _T: ... def __exit__( self, - __exc_type: None | Type[BaseException] = ..., + __exc_type: None | type[BaseException] = ..., __exc_val: None | BaseException = ..., __exc_tb: None | types.TracebackType = ..., ) -> None: ... @@ -151,10 +144,10 @@ else: if sys.platform == "linux": def jiffies( _proc_pid_stat: str | bytes | os.PathLike[Any] = ..., - _load_time: List[float] = ..., + _load_time: list[float] = ..., ) -> int: ... else: - def jiffies(_load_time: List[float] = ...) -> int: ... + def jiffies(_load_time: list[float] = ...) -> int: ... def build_err_msg( arrays: Iterable[object], @@ -246,7 +239,7 @@ def assert_array_less( def runstring( astr: str | bytes | types.CodeType, - dict: None | Dict[str, Any], + dict: None | dict[str, Any], ) -> Any: ... def assert_string_equal(actual: str, desired: str) -> None: ... @@ -256,11 +249,11 @@ def rundocs( raise_on_error: bool = ..., ) -> None: ... -def raises(*args: Type[BaseException]) -> Callable[[_FT], _FT]: ... +def raises(*args: type[BaseException]) -> Callable[[_FT], _FT]: ... @overload def assert_raises( # type: ignore - expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...], + expected_exception: type[BaseException] | tuple[type[BaseException], ...], callable: Callable[..., Any], /, *args: Any, @@ -268,14 +261,14 @@ def assert_raises( # type: ignore ) -> None: ... @overload def assert_raises( - expected_exception: Type[_ET] | Tuple[Type[_ET], ...], + expected_exception: type[_ET] | tuple[type[_ET], ...], *, msg: None | str = ..., ) -> unittest.case._AssertRaisesContext[_ET]: ... @overload def assert_raises_regex( - expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...], + expected_exception: type[BaseException] | tuple[type[BaseException], ...], expected_regex: str | bytes | Pattern[Any], callable: Callable[..., Any], /, @@ -284,14 +277,14 @@ def assert_raises_regex( ) -> None: ... @overload def assert_raises_regex( - expected_exception: Type[_ET] | Tuple[Type[_ET], ...], + expected_exception: type[_ET] | tuple[type[_ET], ...], expected_regex: str | bytes | Pattern[Any], *, msg: None | str = ..., ) -> unittest.case._AssertRaisesContext[_ET]: ... def decorate_methods( - cls: Type[Any], + cls: type[Any], decorator: Callable[[Callable[..., Any]], Any], testmatch: None | str | bytes | Pattern[Any] = ..., ) -> None: ... @@ -338,11 +331,11 @@ def assert_array_max_ulp( @overload def assert_warns( - warning_class: Type[Warning], + warning_class: type[Warning], ) -> contextlib._GeneratorContextManager[None]: ... @overload def assert_warns( - warning_class: Type[Warning], + warning_class: type[Warning], func: Callable[..., _T], /, *args: Any, diff --git a/numpy/testing/print_coercion_tables.py b/numpy/testing/print_coercion_tables.py index 3a447cd2d..c1d4cdff8 100755 --- a/numpy/testing/print_coercion_tables.py +++ b/numpy/testing/print_coercion_tables.py @@ -87,11 +87,12 @@ def print_new_cast_table(*, can_cast=True, legacy=False, flags=False): from numpy.core._multiarray_tests import get_all_cast_information cast_table = { - 0 : "#", # No cast (classify as equivalent here) - 1 : "#", # equivalent casting - 2 : "=", # safe casting - 3 : "~", # same-kind casting - 4 : ".", # unsafe casting + -1: " ", + 0: "#", # No cast (classify as equivalent here) + 1: "#", # equivalent casting + 2: "=", # safe casting + 3: "~", # same-kind casting + 4: ".", # unsafe casting } flags_table = { 0 : "▗", 7: "█", diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 0754df402..76deb281c 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -189,6 +189,7 @@ PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ "core.shape_base", "core.umath", "core.umath_tests", + "distutils.armccompiler", "distutils.ccompiler", 'distutils.ccompiler_opt', "distutils.command", @@ -214,6 +215,7 @@ PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ "distutils.extension", "distutils.fcompiler", "distutils.fcompiler.absoft", + "distutils.fcompiler.arm", "distutils.fcompiler.compaq", "distutils.fcompiler.environment", "distutils.fcompiler.g95", @@ -315,6 +317,7 @@ SKIP_LIST = [ "numpy.core.code_generators.generate_numpy_api", "numpy.core.code_generators.generate_ufunc_api", "numpy.core.code_generators.numpy_api", + "numpy.core.code_generators.generate_umath_doc", "numpy.core.cversions", "numpy.core.generate_numpy_api", "numpy.distutils.msvc9compiler", diff --git a/numpy/typing/_callable.pyi b/numpy/typing/_callable.pyi index e1149f26a..6d7136592 100644 --- a/numpy/typing/_callable.pyi +++ b/numpy/typing/_callable.pyi @@ -11,11 +11,9 @@ See the `Mypy documentation`_ on protocols for more details. from __future__ import annotations from typing import ( - Union, TypeVar, overload, Any, - Tuple, NoReturn, Protocol, ) @@ -51,7 +49,7 @@ _T1 = TypeVar("_T1") _T2 = TypeVar("_T2") _T1_contra = TypeVar("_T1_contra", contravariant=True) _T2_contra = TypeVar("_T2_contra", contravariant=True) -_2Tuple = Tuple[_T1, _T1] +_2Tuple = tuple[_T1, _T1] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) @@ -310,11 +308,11 @@ class _ComplexOp(Protocol[_NBit1]): @overload def __call__( self, - other: Union[ - integer[_NBit2], - floating[_NBit2], - complexfloating[_NBit2, _NBit2], - ], /, + other: ( + integer[_NBit2] + | floating[_NBit2] + | complexfloating[_NBit2, _NBit2] + ), /, ) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ... class _NumberOp(Protocol): diff --git a/numpy/typing/_generic_alias.py b/numpy/typing/_generic_alias.py index 1eb2c8c05..0541ad77f 100644 --- a/numpy/typing/_generic_alias.py +++ b/numpy/typing/_generic_alias.py @@ -2,17 +2,11 @@ from __future__ import annotations import sys import types +from collections.abc import Generator, Iterable, Iterator from typing import ( Any, ClassVar, - FrozenSet, - Generator, - Iterable, - Iterator, - List, NoReturn, - Tuple, - Type, TypeVar, TYPE_CHECKING, ) @@ -93,38 +87,38 @@ class _GenericAlias: return super().__getattribute__("_origin") @property - def __args__(self) -> Tuple[object, ...]: + def __args__(self) -> tuple[object, ...]: return super().__getattribute__("_args") @property - def __parameters__(self) -> Tuple[TypeVar, ...]: + def __parameters__(self) -> tuple[TypeVar, ...]: """Type variables in the ``GenericAlias``.""" return super().__getattribute__("_parameters") def __init__( self, origin: type, - args: object | Tuple[object, ...], + args: object | tuple[object, ...], ) -> None: self._origin = origin self._args = args if isinstance(args, tuple) else (args,) self._parameters = tuple(_parse_parameters(self.__args__)) @property - def __call__(self) -> type: + def __call__(self) -> type[Any]: return self.__origin__ - def __reduce__(self: _T) -> Tuple[ - Type[_T], - Tuple[type, Tuple[object, ...]], + def __reduce__(self: _T) -> tuple[ + type[_T], + tuple[type[Any], tuple[object, ...]], ]: cls = type(self) return cls, (self.__origin__, self.__args__) - def __mro_entries__(self, bases: Iterable[object]) -> Tuple[type]: + def __mro_entries__(self, bases: Iterable[object]) -> tuple[type[Any]]: return (self.__origin__,) - def __dir__(self) -> List[str]: + def __dir__(self) -> list[str]: """Implement ``dir(self)``.""" cls = type(self) dir_origin = set(dir(self.__origin__)) @@ -155,7 +149,7 @@ class _GenericAlias: origin = _to_str(self.__origin__) return f"{origin}[{args}]" - def __getitem__(self: _T, key: object | Tuple[object, ...]) -> _T: + def __getitem__(self: _T, key: object | tuple[object, ...]) -> _T: """Return ``self[key]``.""" key_tup = key if isinstance(key, tuple) else (key,) @@ -178,7 +172,7 @@ class _GenericAlias: self.__args__ == value.__args__ ) - _ATTR_EXCEPTIONS: ClassVar[FrozenSet[str]] = frozenset({ + _ATTR_EXCEPTIONS: ClassVar[frozenset[str]] = frozenset({ "__origin__", "__args__", "__parameters__", diff --git a/numpy/typing/_ufunc.pyi b/numpy/typing/_ufunc.pyi index 1be3500c1..703b7f925 100644 --- a/numpy/typing/_ufunc.pyi +++ b/numpy/typing/_ufunc.pyi @@ -10,9 +10,7 @@ four private subclasses, one for each combination of from typing import ( Any, Generic, - List, overload, - Tuple, TypeVar, Literal, SupportsIndex, @@ -27,9 +25,9 @@ from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co from ._dtype_like import DTypeLike _T = TypeVar("_T") -_2Tuple = Tuple[_T, _T] -_3Tuple = Tuple[_T, _T, _T] -_4Tuple = Tuple[_T, _T, _T, _T] +_2Tuple = tuple[_T, _T] +_3Tuple = tuple[_T, _T, _T] +_4Tuple = tuple[_T, _T, _T, _T] _NTypes = TypeVar("_NTypes", bound=int) _IDType = TypeVar("_IDType", bound=Any) @@ -86,13 +84,13 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): dtype: DTypeLike = ..., subok: bool = ..., signature: str | _2Tuple[None | str] = ..., - extobj: List[Any] = ..., + extobj: list[Any] = ..., ) -> Any: ... @overload def __call__( self, __x1: ArrayLike, - out: None | NDArray[Any] | Tuple[NDArray[Any]] = ..., + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., *, where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., @@ -100,7 +98,7 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): dtype: DTypeLike = ..., subok: bool = ..., signature: str | _2Tuple[None | str] = ..., - extobj: List[Any] = ..., + extobj: list[Any] = ..., ) -> NDArray[Any]: ... def at( @@ -139,14 +137,14 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., - extobj: List[Any] = ..., + extobj: list[Any] = ..., ) -> Any: ... @overload def __call__( self, __x1: ArrayLike, __x2: ArrayLike, - out: None | NDArray[Any] | Tuple[NDArray[Any]] = ..., + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., *, where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., @@ -154,7 +152,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., - extobj: List[Any] = ..., + extobj: list[Any] = ..., ) -> NDArray[Any]: ... def at( @@ -207,7 +205,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., - extobj: List[Any] = ..., + extobj: list[Any] = ..., ) -> Any: ... @overload def outer( # type: ignore[misc] @@ -215,14 +213,14 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): A: ArrayLike, B: ArrayLike, /, *, - out: None | NDArray[Any] | Tuple[NDArray[Any]] = ..., + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., where: None | _ArrayLikeBool_co = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., - extobj: List[Any] = ..., + extobj: list[Any] = ..., ) -> NDArray[Any]: ... class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): @@ -264,7 +262,7 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., - extobj: List[Any] = ..., + extobj: list[Any] = ..., ) -> _2Tuple[Any]: ... @overload def __call__( @@ -280,7 +278,7 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., - extobj: List[Any] = ..., + extobj: list[Any] = ..., ) -> _2Tuple[NDArray[Any]]: ... class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): @@ -323,7 +321,7 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): dtype: DTypeLike = ..., subok: bool = ..., signature: str | _4Tuple[None | str] = ..., - extobj: List[Any] = ..., + extobj: list[Any] = ..., ) -> _2Tuple[Any]: ... @overload def __call__( @@ -340,7 +338,7 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): dtype: DTypeLike = ..., subok: bool = ..., signature: str | _4Tuple[None | str] = ..., - extobj: List[Any] = ..., + extobj: list[Any] = ..., ) -> _2Tuple[NDArray[Any]]: ... class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): @@ -385,21 +383,21 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., - extobj: List[Any] = ..., - axes: List[_2Tuple[SupportsIndex]] = ..., + extobj: list[Any] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., ) -> Any: ... @overload def __call__( self, __x1: ArrayLike, __x2: ArrayLike, - out: NDArray[Any] | Tuple[NDArray[Any]], + out: NDArray[Any] | tuple[NDArray[Any]], *, casting: _CastingKind = ..., order: _OrderKACF = ..., dtype: DTypeLike = ..., subok: bool = ..., signature: str | _3Tuple[None | str] = ..., - extobj: List[Any] = ..., - axes: List[_2Tuple[SupportsIndex]] = ..., + extobj: list[Any] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., ) -> NDArray[Any]: ... diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index b99b24c1f..3bbc101cf 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -1,4 +1,4 @@ -from typing import List, Any +from typing import Any import numpy as np b_ = np.bool_() @@ -15,13 +15,13 @@ AR_M: np.ndarray[Any, np.dtype[np.datetime64]] ANY: Any -AR_LIKE_b: List[bool] -AR_LIKE_u: List[np.uint32] -AR_LIKE_i: List[int] -AR_LIKE_f: List[float] -AR_LIKE_c: List[complex] -AR_LIKE_m: List[np.timedelta64] -AR_LIKE_M: List[np.datetime64] +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_m: list[np.timedelta64] +AR_LIKE_M: list[np.datetime64] # Array subtraction diff --git a/numpy/typing/tests/data/fail/array_constructors.pyi b/numpy/typing/tests/data/fail/array_constructors.pyi index 4f0a60b5b..065b7d8a0 100644 --- a/numpy/typing/tests/data/fail/array_constructors.pyi +++ b/numpy/typing/tests/data/fail/array_constructors.pyi @@ -21,10 +21,10 @@ np.linspace(0, 2, retstep=b'False') # E: No overload variant np.linspace(0, 2, dtype=0) # E: No overload variant np.linspace(0, 2, axis=None) # E: No overload variant -np.logspace(None, 'bob') # E: Argument 1 -np.logspace(0, 2, base=None) # E: Argument "base" +np.logspace(None, 'bob') # E: No overload variant +np.logspace(0, 2, base=None) # E: No overload variant -np.geomspace(None, 'bob') # E: Argument 1 +np.geomspace(None, 'bob') # E: No overload variant np.stack(generator) # E: No overload variant np.hstack({1, 2}) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/arrayprint.pyi b/numpy/typing/tests/data/fail/arrayprint.pyi index 86297a0b2..71b921e3a 100644 --- a/numpy/typing/tests/data/fail/arrayprint.pyi +++ b/numpy/typing/tests/data/fail/arrayprint.pyi @@ -1,4 +1,5 @@ -from typing import Callable, Any +from collections.abc import Callable +from typing import Any import numpy as np AR: np.ndarray diff --git a/numpy/typing/tests/data/fail/einsumfunc.pyi b/numpy/typing/tests/data/fail/einsumfunc.pyi index 33722f861..f0e3f1e95 100644 --- a/numpy/typing/tests/data/fail/einsumfunc.pyi +++ b/numpy/typing/tests/data/fail/einsumfunc.pyi @@ -1,4 +1,4 @@ -from typing import List, Any +from typing import Any import numpy as np AR_i: np.ndarray[Any, np.dtype[np.int64]] diff --git a/numpy/typing/tests/data/fail/index_tricks.pyi b/numpy/typing/tests/data/fail/index_tricks.pyi index c508bf3ae..22f6f4a61 100644 --- a/numpy/typing/tests/data/fail/index_tricks.pyi +++ b/numpy/typing/tests/data/fail/index_tricks.pyi @@ -1,9 +1,9 @@ -from typing import List import numpy as np -AR_LIKE_i: List[int] -AR_LIKE_f: List[float] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +np.ndindex([1, 2, 3]) # E: No overload variant np.unravel_index(AR_LIKE_f, (1, 2, 3)) # E: incompatible type np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # E: No overload variant np.mgrid[1] # E: Invalid index type diff --git a/numpy/typing/tests/data/fail/multiarray.pyi b/numpy/typing/tests/data/fail/multiarray.pyi index 22bcf8c92..a770e52c5 100644 --- a/numpy/typing/tests/data/fail/multiarray.pyi +++ b/numpy/typing/tests/data/fail/multiarray.pyi @@ -1,4 +1,3 @@ -from typing import List import numpy as np import numpy.typing as npt @@ -12,7 +11,7 @@ AR_M: npt.NDArray[np.datetime64] M: np.datetime64 -AR_LIKE_f: List[float] +AR_LIKE_f: list[float] def func(a: int) -> None: ... diff --git a/numpy/typing/tests/data/fail/nested_sequence.pyi b/numpy/typing/tests/data/fail/nested_sequence.pyi index e28661a05..c51593b1e 100644 --- a/numpy/typing/tests/data/fail/nested_sequence.pyi +++ b/numpy/typing/tests/data/fail/nested_sequence.pyi @@ -1,9 +1,9 @@ -from typing import Sequence, Tuple, List +from collections.abc import Sequence import numpy.typing as npt a: Sequence[float] -b: List[complex] -c: Tuple[str, ...] +b: list[complex] +c: tuple[str, ...] d: int e: str diff --git a/numpy/typing/tests/data/fail/random.pyi b/numpy/typing/tests/data/fail/random.pyi index c4d1e3e3e..f0e682019 100644 --- a/numpy/typing/tests/data/fail/random.pyi +++ b/numpy/typing/tests/data/fail/random.pyi @@ -1,9 +1,9 @@ import numpy as np -from typing import Any, List +from typing import Any SEED_FLOAT: float = 457.3 SEED_ARR_FLOAT: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0, 2, 3, 4]) -SEED_ARRLIKE_FLOAT: List[float] = [1.0, 2.0, 3.0, 4.0] +SEED_ARRLIKE_FLOAT: list[float] = [1.0, 2.0, 3.0, 4.0] SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) SEED_STR: str = "String seeding not allowed" # default rng diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index 94fe3f71e..964470538 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -31,7 +31,7 @@ np.complex64([]) # E: incompatible type np.complex64(1, 2) # E: Too many arguments # TODO: protocols (can't check for non-existent protocols w/ __getattr__) -np.datetime64(0) # E: non-matching overload +np.datetime64(0) # E: No overload variant class A: def __float__(self): diff --git a/numpy/typing/tests/data/fail/shape_base.pyi b/numpy/typing/tests/data/fail/shape_base.pyi new file mode 100644 index 000000000..e709741b7 --- /dev/null +++ b/numpy/typing/tests/data/fail/shape_base.pyi @@ -0,0 +1,8 @@ +import numpy as np + +class DTypeLike: + dtype: np.dtype[np.int_] + +dtype_like: DTypeLike + +np.expand_dims(dtype_like, (5, 10)) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/twodim_base.pyi b/numpy/typing/tests/data/fail/twodim_base.pyi index ab34a374c..faa430095 100644 --- a/numpy/typing/tests/data/fail/twodim_base.pyi +++ b/numpy/typing/tests/data/fail/twodim_base.pyi @@ -1,4 +1,4 @@ -from typing import Any, List, TypeVar +from typing import Any, TypeVar import numpy as np import numpy.typing as npt @@ -15,7 +15,7 @@ def func2(ar: npt.NDArray[Any], a: float) -> float: AR_b: npt.NDArray[np.bool_] AR_m: npt.NDArray[np.timedelta64] -AR_LIKE_b: List[bool] +AR_LIKE_b: list[bool] np.eye(10, M=20.0) # E: No overload variant np.eye(10, k=2.5, dtype=int) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/ufunclike.pyi b/numpy/typing/tests/data/fail/ufunclike.pyi index 82a5f3a1d..2f9fd14c8 100644 --- a/numpy/typing/tests/data/fail/ufunclike.pyi +++ b/numpy/typing/tests/data/fail/ufunclike.pyi @@ -1,4 +1,4 @@ -from typing import List, Any +from typing import Any import numpy as np AR_c: np.ndarray[Any, np.dtype[np.complex128]] diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index 5bd2fda20..4e9f8dded 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -1,4 +1,6 @@ -from typing import Any, Optional +from __future__ import annotations + +from typing import Any import numpy as np from numpy.typing import ArrayLike, _SupportsArray @@ -18,7 +20,7 @@ x12: ArrayLike = memoryview(b'foo') class A: - def __array__(self, dtype: Optional[np.dtype] = None) -> np.ndarray: + def __array__(self, dtype: None | np.dtype[Any] = None) -> np.ndarray: return np.array([1, 2, 3]) diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py index 8eaeb6afb..d06431eed 100644 --- a/numpy/typing/tests/data/pass/literal.py +++ b/numpy/typing/tests/data/pass/literal.py @@ -1,5 +1,7 @@ +from __future__ import annotations + from functools import partial -from typing import Callable, List, Tuple +from collections.abc import Callable import pytest # type: ignore import numpy as np @@ -11,7 +13,7 @@ KACF = frozenset({None, "K", "A", "C", "F"}) ACF = frozenset({None, "A", "C", "F"}) CF = frozenset({None, "C", "F"}) -order_list: List[Tuple[frozenset, Callable]] = [ +order_list: list[tuple[frozenset, Callable]] = [ (KACF, partial(np.ndarray, 1)), (KACF, AR.tobytes), (KACF, partial(AR.astype, int)), diff --git a/numpy/typing/tests/data/pass/numeric.py b/numpy/typing/tests/data/pass/numeric.py index 34fef7270..c4a73c1e9 100644 --- a/numpy/typing/tests/data/pass/numeric.py +++ b/numpy/typing/tests/data/pass/numeric.py @@ -5,7 +5,8 @@ Does not include tests which fall under ``array_constructors``. """ -from typing import List +from __future__ import annotations + import numpy as np class SubClass(np.ndarray): @@ -14,7 +15,7 @@ class SubClass(np.ndarray): i8 = np.int64(1) A = np.arange(27).reshape(3, 3, 3) -B: List[List[List[int]]] = A.tolist() +B: list[list[list[int]]] = A.tolist() C = np.empty((27, 27)).view(SubClass) np.count_nonzero(i8) diff --git a/numpy/typing/tests/data/pass/numerictypes.py b/numpy/typing/tests/data/pass/numerictypes.py index 5af0d171c..7f1dd0945 100644 --- a/numpy/typing/tests/data/pass/numerictypes.py +++ b/numpy/typing/tests/data/pass/numerictypes.py @@ -38,9 +38,9 @@ np.nbytes[np.int64] np.ScalarType np.ScalarType[0] -np.ScalarType[4] -np.ScalarType[9] -np.ScalarType[11] +np.ScalarType[3] +np.ScalarType[8] +np.ScalarType[10] np.typecodes["Character"] np.typecodes["Complex"] diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index 05bd62112..9816cd2c3 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -1,13 +1,12 @@ from __future__ import annotations -from typing import Any, List, Dict - +from typing import Any import numpy as np SEED_NONE = None SEED_INT = 4579435749574957634658964293569 SEED_ARR: np.ndarray[Any, np.dtype[np.int64]] = np.array([1, 2, 3, 4], dtype=np.int64) -SEED_ARRLIKE: List[int] = [1, 2, 3, 4] +SEED_ARRLIKE: list[int] = [1, 2, 3, 4] SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) SEED_MT19937: np.random.MT19937 = np.random.MT19937(0) SEED_PCG64: np.random.PCG64 = np.random.PCG64(0) @@ -76,13 +75,13 @@ D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9]) D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5]) I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_) I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_) -D_arr_like_0p1: List[float] = [0.1] -D_arr_like_0p5: List[float] = [0.5] -D_arr_like_0p9: List[float] = [0.9] -D_arr_like_1p5: List[float] = [1.5] -I_arr_like_10: List[int] = [10] -I_arr_like_20: List[int] = [20] -D_2D_like: List[List[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_arr_like_0p1: list[float] = [0.1] +D_arr_like_0p5: list[float] = [0.5] +D_arr_like_0p9: list[float] = [0.9] +D_arr_like_1p5: list[float] = [1.5] +I_arr_like_10: list[int] = [10] +I_arr_like_20: list[int] = [20] +D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32) @@ -499,7 +498,7 @@ def_gen.integers([100]) def_gen.integers(0, [100]) I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_) -I_bool_low_like: List[int] = [0] +I_bool_low_like: list[int] = [0] I_bool_high_open: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) @@ -528,7 +527,7 @@ def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, endpoint=True) def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True) I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8) -I_u1_low_like: List[int] = [0] +I_u1_low_like: list[int] = [0] I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) @@ -569,7 +568,7 @@ def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True) def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True) I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16) -I_u2_low_like: List[int] = [0] +I_u2_low_like: list[int] = [0] I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) @@ -610,7 +609,7 @@ def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True) def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True) I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32) -I_u4_low_like: List[int] = [0] +I_u4_low_like: list[int] = [0] I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) @@ -651,7 +650,7 @@ def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True) def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True) I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64) -I_u8_low_like: List[int] = [0] +I_u8_low_like: list[int] = [0] I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) @@ -692,7 +691,7 @@ def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True) def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True) I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8) -I_i1_low_like: List[int] = [-128] +I_i1_low_like: list[int] = [-128] I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) @@ -733,7 +732,7 @@ def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True) def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True) I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16) -I_i2_low_like: List[int] = [-32768] +I_i2_low_like: list[int] = [-32768] I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) @@ -774,7 +773,7 @@ def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True) def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True) I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32) -I_i4_low_like: List[int] = [-2147483648] +I_i4_low_like: list[int] = [-2147483648] I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) @@ -815,7 +814,7 @@ def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True) def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True) I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64) -I_i8_low_like: List[int] = [-9223372036854775808] +I_i8_low_like: list[int] = [-9223372036854775808] I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) @@ -912,7 +911,7 @@ def_gen.shuffle(D_2D, axis=1) def_gen.__str__() def_gen.__repr__() -def_gen_state: Dict[str, Any] +def_gen_state: dict[str, Any] def_gen_state = def_gen.__getstate__() def_gen.__setstate__(def_gen_state) diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 85965e0de..03ca3e83f 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -2,7 +2,7 @@ import operator import numpy as np -from typing import Iterable # noqa: F401 +from collections.abc import Iterable # Basic checks array = np.array([1, 2]) diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index c5b467469..2002727cf 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import Any import numpy as np import numpy.typing as npt @@ -34,14 +34,14 @@ AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] AR_M: np.ndarray[Any, np.dtype[np.datetime64]] AR_O: np.ndarray[Any, np.dtype[np.object_]] -AR_LIKE_b: List[bool] -AR_LIKE_u: List[np.uint32] -AR_LIKE_i: List[int] -AR_LIKE_f: List[float] -AR_LIKE_c: List[complex] -AR_LIKE_m: List[np.timedelta64] -AR_LIKE_M: List[np.datetime64] -AR_LIKE_O: List[np.object_] +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_m: list[np.timedelta64] +AR_LIKE_M: list[np.datetime64] +AR_LIKE_O: list[np.object_] # Array subtraction diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 233988e63..c3b0c3457 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,4 +1,4 @@ -from typing import List, Any, TypeVar +from typing import Any, TypeVar from pathlib import Path import numpy as np @@ -12,7 +12,7 @@ i8: np.int64 A: npt.NDArray[np.float64] B: SubClass[np.float64] -C: List[int] +C: list[int] def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ... @@ -114,10 +114,24 @@ reveal_type(np.require(B, requirements="W")) # E: SubClass[{float64}] reveal_type(np.require(B, requirements="A")) # E: SubClass[{float64}] reveal_type(np.require(C)) # E: ndarray[Any, Any] -reveal_type(np.linspace(0, 10)) # E: ndarray[Any, Any] -reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[ndarray[Any, Any], Any] -reveal_type(np.logspace(0, 10)) # E: ndarray[Any, Any] -reveal_type(np.geomspace(1, 10)) # E: ndarray[Any, Any] +reveal_type(np.linspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]] +reveal_type(np.linspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] +reveal_type(np.linspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] +reveal_type(np.linspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]] +reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], floating[Any]] +reveal_type(np.linspace(0j, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], complexfloating[Any, Any]] +reveal_type(np.linspace(0, 10, retstep=True, dtype=np.int64)) # E: Tuple[ndarray[Any, dtype[{int64}]], {int64}] +reveal_type(np.linspace(0j, 10, retstep=True, dtype=int)) # E: Tuple[ndarray[Any, dtype[Any]], Any] + +reveal_type(np.logspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]] +reveal_type(np.logspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] +reveal_type(np.logspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] +reveal_type(np.logspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]] + +reveal_type(np.geomspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]] +reveal_type(np.geomspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] +reveal_type(np.geomspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] +reveal_type(np.geomspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]] reveal_type(np.zeros_like(A)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.zeros_like(C)) # E: ndarray[Any, dtype[Any]] @@ -166,17 +180,17 @@ reveal_type(np.atleast_2d(A)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.atleast_3d(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.vstack([A, A])) # E: ndarray[Any, dtype[{float64}]] +reveal_type(np.vstack([A, A])) # E: ndarray[Any, Any] reveal_type(np.vstack([A, C])) # E: ndarray[Any, dtype[Any]] reveal_type(np.vstack([C, C])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.hstack([A, A])) # E: ndarray[Any, dtype[{float64}]] +reveal_type(np.hstack([A, A])) # E: ndarray[Any, Any] -reveal_type(np.stack([A, A])) # E: ndarray[Any, dtype[{float64}]] +reveal_type(np.stack([A, A])) # E: Any reveal_type(np.stack([A, C])) # E: ndarray[Any, dtype[Any]] reveal_type(np.stack([C, C])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.stack([A, A], axis=0)) # E: ndarray[Any, dtype[{float64}]] +reveal_type(np.stack([A, A], axis=0)) # E: Any reveal_type(np.stack([A, A], out=B)) # E: SubClass[{float64}] -reveal_type(np.block([[A, A], [A, A]])) # E: ndarray[Any, dtype[{float64}]] +reveal_type(np.block([[A, A], [A, A]])) # E: ndarray[Any, Any] reveal_type(np.block(C)) # E: ndarray[Any, dtype[Any]] diff --git a/numpy/typing/tests/data/reveal/arraypad.pyi b/numpy/typing/tests/data/reveal/arraypad.pyi index 995f82b57..a05d44034 100644 --- a/numpy/typing/tests/data/reveal/arraypad.pyi +++ b/numpy/typing/tests/data/reveal/arraypad.pyi @@ -1,18 +1,19 @@ -from typing import List, Any, Mapping, Tuple, SupportsIndex +from collections.abc import Mapping +from typing import Any, SupportsIndex import numpy as np import numpy.typing as npt def mode_func( ar: npt.NDArray[np.number[Any]], - width: Tuple[int, int], + width: tuple[int, int], iaxis: SupportsIndex, kwargs: Mapping[str, Any], ) -> None: ... AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] -AR_LIKE: List[int] +AR_LIKE: list[int] reveal_type(np.pad(AR_i8, (2, 3), "constant")) # E: ndarray[Any, dtype[{int64}]] reveal_type(np.pad(AR_LIKE, (2, 3), "constant")) # E: ndarray[Any, dtype[Any]] diff --git a/numpy/typing/tests/data/reveal/arrayprint.pyi b/numpy/typing/tests/data/reveal/arrayprint.pyi index e797097eb..6e65a8d8a 100644 --- a/numpy/typing/tests/data/reveal/arrayprint.pyi +++ b/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -1,4 +1,5 @@ -from typing import Any, Callable +from collections.abc import Callable +from typing import Any import numpy as np AR: np.ndarray[Any, Any] diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index ce8c1b269..0563b3472 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -1,6 +1,6 @@ import numpy as np import numpy.typing as npt -from typing import Sequence +from collections.abc import Sequence AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] diff --git a/numpy/typing/tests/data/reveal/einsumfunc.pyi b/numpy/typing/tests/data/reveal/einsumfunc.pyi index 5b07e6d3c..3c7146ada 100644 --- a/numpy/typing/tests/data/reveal/einsumfunc.pyi +++ b/numpy/typing/tests/data/reveal/einsumfunc.pyi @@ -1,12 +1,12 @@ -from typing import List, Any +from typing import Any import numpy as np -AR_LIKE_b: List[bool] -AR_LIKE_u: List[np.uint32] -AR_LIKE_i: List[int] -AR_LIKE_f: List[float] -AR_LIKE_c: List[complex] -AR_LIKE_U: List[str] +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_U: list[str] OUT_f: np.ndarray[Any, np.dtype[np.float64]] diff --git a/numpy/typing/tests/data/reveal/emath.pyi b/numpy/typing/tests/data/reveal/emath.pyi new file mode 100644 index 000000000..9ab2d72d2 --- /dev/null +++ b/numpy/typing/tests/data/reveal/emath.pyi @@ -0,0 +1,52 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +f8: np.float64 +c16: np.complex128 + +reveal_type(np.emath.sqrt(f8)) # E: Any +reveal_type(np.emath.sqrt(AR_f8)) # E: ndarray[Any, dtype[Any]] +reveal_type(np.emath.sqrt(c16)) # E: complexfloating[Any, Any] +reveal_type(np.emath.sqrt(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] + +reveal_type(np.emath.log(f8)) # E: Any +reveal_type(np.emath.log(AR_f8)) # E: ndarray[Any, dtype[Any]] +reveal_type(np.emath.log(c16)) # E: complexfloating[Any, Any] +reveal_type(np.emath.log(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] + +reveal_type(np.emath.log10(f8)) # E: Any +reveal_type(np.emath.log10(AR_f8)) # E: ndarray[Any, dtype[Any]] +reveal_type(np.emath.log10(c16)) # E: complexfloating[Any, Any] +reveal_type(np.emath.log10(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] + +reveal_type(np.emath.log2(f8)) # E: Any +reveal_type(np.emath.log2(AR_f8)) # E: ndarray[Any, dtype[Any]] +reveal_type(np.emath.log2(c16)) # E: complexfloating[Any, Any] +reveal_type(np.emath.log2(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] + +reveal_type(np.emath.logn(f8, 2)) # E: Any +reveal_type(np.emath.logn(AR_f8, 4)) # E: ndarray[Any, dtype[Any]] +reveal_type(np.emath.logn(f8, 1j)) # E: complexfloating[Any, Any] +reveal_type(np.emath.logn(AR_c16, 1.5)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] + +reveal_type(np.emath.power(f8, 2)) # E: Any +reveal_type(np.emath.power(AR_f8, 4)) # E: ndarray[Any, dtype[Any]] +reveal_type(np.emath.power(f8, 2j)) # E: complexfloating[Any, Any] +reveal_type(np.emath.power(AR_c16, 1.5)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] + +reveal_type(np.emath.arccos(f8)) # E: Any +reveal_type(np.emath.arccos(AR_f8)) # E: ndarray[Any, dtype[Any]] +reveal_type(np.emath.arccos(c16)) # E: complexfloating[Any, Any] +reveal_type(np.emath.arccos(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] + +reveal_type(np.emath.arcsin(f8)) # E: Any +reveal_type(np.emath.arcsin(AR_f8)) # E: ndarray[Any, dtype[Any]] +reveal_type(np.emath.arcsin(c16)) # E: complexfloating[Any, Any] +reveal_type(np.emath.arcsin(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] + +reveal_type(np.emath.arctanh(f8)) # E: Any +reveal_type(np.emath.arctanh(AR_f8)) # E: ndarray[Any, dtype[Any]] +reveal_type(np.emath.arctanh(c16)) # E: complexfloating[Any, Any] +reveal_type(np.emath.arctanh(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index cee4d8c3e..4018605ea 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -1,10 +1,10 @@ -from typing import Any, List +from typing import Any import numpy as np -AR_LIKE_b: List[bool] -AR_LIKE_i: List[int] -AR_LIKE_f: List[float] -AR_LIKE_U: List[str] +AR_LIKE_b: list[bool] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_U: list[str] AR_i8: np.ndarray[Any, np.dtype[np.int64]] @@ -24,6 +24,8 @@ reveal_type(iter(np.ndenumerate(AR_i8))) # E: Iterator[Tuple[builtins.tuple[bui reveal_type(iter(np.ndenumerate(AR_LIKE_f))) # E: Iterator[Tuple[builtins.tuple[builtins.int], {double}]] reveal_type(iter(np.ndenumerate(AR_LIKE_U))) # E: Iterator[Tuple[builtins.tuple[builtins.int], str_]] +reveal_type(np.ndindex(1, 2, 3)) # E: numpy.ndindex +reveal_type(np.ndindex((1, 2, 3))) # E: numpy.ndindex reveal_type(iter(np.ndindex(1, 2, 3))) # E: Iterator[builtins.tuple[builtins.int]] reveal_type(next(np.ndindex(1, 2, 3))) # E: builtins.tuple[builtins.int] diff --git a/numpy/typing/tests/data/reveal/lib_utils.pyi b/numpy/typing/tests/data/reveal/lib_utils.pyi index d82012707..9b1bf4123 100644 --- a/numpy/typing/tests/data/reveal/lib_utils.pyi +++ b/numpy/typing/tests/data/reveal/lib_utils.pyi @@ -1,10 +1,10 @@ from io import StringIO -from typing import Any, Dict +from typing import Any import numpy as np AR: np.ndarray[Any, np.dtype[np.float64]] -AR_DICT: Dict[str, np.ndarray[Any, np.dtype[np.float64]]] +AR_DICT: dict[str, np.ndarray[Any, np.dtype[np.float64]]] FILE: StringIO def func(a: int) -> bool: ... diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 0e91a7afd..f8937c379 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -1,4 +1,5 @@ -from typing import Any, List, TypeVar +import datetime as dt +from typing import Any, TypeVar from pathlib import Path import numpy as np @@ -16,8 +17,8 @@ AR_u1: npt.NDArray[np.uint8] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] -AR_LIKE_f: List[float] -AR_LIKE_i: List[int] +AR_LIKE_f: list[float] +AR_LIKE_i: list[int] m: np.timedelta64 M: np.datetime64 @@ -27,6 +28,10 @@ b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8) nditer_obj: np.nditer +date_scalar: dt.date +date_seq: list[dt.date] +timedelta_seq: list[dt.timedelta] + def func(a: int) -> bool: ... reveal_type(next(b_f8)) # E: tuple[Any] @@ -108,19 +113,26 @@ reveal_type(np.datetime_data(np.dtype(np.timedelta64))) # E: Tuple[builtins.str reveal_type(np.busday_count("2011-01", "2011-02")) # E: {int_} reveal_type(np.busday_count(["2011-01"], "2011-02")) # E: ndarray[Any, dtype[{int_}]] +reveal_type(np.busday_count(["2011-01"], date_scalar)) # E: ndarray[Any, dtype[{int_}]] reveal_type(np.busday_offset(M, m)) # E: datetime64 +reveal_type(np.busday_offset(date_scalar, m)) # E: datetime64 reveal_type(np.busday_offset(M, 5)) # E: datetime64 reveal_type(np.busday_offset(AR_M, m)) # E: ndarray[Any, dtype[datetime64]] +reveal_type(np.busday_offset(M, timedelta_seq)) # E: ndarray[Any, dtype[datetime64]] reveal_type(np.busday_offset("2011-01", "2011-02", roll="forward")) # E: datetime64 reveal_type(np.busday_offset(["2011-01"], "2011-02", roll="forward")) # E: ndarray[Any, dtype[datetime64]] reveal_type(np.is_busday("2012")) # E: bool_ +reveal_type(np.is_busday(date_scalar)) # E: bool_ reveal_type(np.is_busday(["2012"])) # E: ndarray[Any, dtype[bool_]] reveal_type(np.datetime_as_string(M)) # E: str_ reveal_type(np.datetime_as_string(AR_M)) # E: ndarray[Any, dtype[str_]] +reveal_type(np.busdaycalendar(holidays=date_seq)) # E: busdaycalendar +reveal_type(np.busdaycalendar(holidays=[M])) # E: busdaycalendar + reveal_type(np.compare_chararrays("a", "b", "!=", rstrip=False)) # E: ndarray[Any, dtype[bool_]] reveal_type(np.compare_chararrays(b"a", b"a", "==", True)) # E: ndarray[Any, dtype[bool_]] diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index d34f6f69a..a7cc68194 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -1,11 +1,13 @@ -from typing import TypeVar, Union +from __future__ import annotations + +from typing import TypeVar import numpy as np import numpy.typing as npt T1 = TypeVar("T1", bound=npt.NBitBase) T2 = TypeVar("T2", bound=npt.NBitBase) -def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[Union[T1, T2]]: +def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: return a + b i8: np.int64 diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index cd1c3136f..f91d6351b 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -24,6 +24,9 @@ AR_V: NDArray[np.void] ctypes_obj = AR_f8.ctypes +reveal_type(AR_f8.__dlpack__()) # E: Any +reveal_type(AR_f8.__dlpack_device__()) # E: Tuple[int, Literal[0]] + reveal_type(ctypes_obj.data) # E: int reveal_type(ctypes_obj.shape) # E: ctypes.Array[{c_intp}] reveal_type(ctypes_obj.strides) # E: ctypes.Array[{c_intp}] diff --git a/numpy/typing/tests/data/reveal/nested_sequence.pyi b/numpy/typing/tests/data/reveal/nested_sequence.pyi index 4d3aad467..c9f91cfa2 100644 --- a/numpy/typing/tests/data/reveal/nested_sequence.pyi +++ b/numpy/typing/tests/data/reveal/nested_sequence.pyi @@ -1,4 +1,5 @@ -from typing import Sequence, Tuple, List, Any +from collections.abc import Sequence +from typing import Any import numpy.typing as npt a: Sequence[int] @@ -6,8 +7,8 @@ b: Sequence[Sequence[int]] c: Sequence[Sequence[Sequence[int]]] d: Sequence[Sequence[Sequence[Sequence[int]]]] e: Sequence[bool] -f: Tuple[int, ...] -g: List[int] +f: tuple[int, ...] +g: list[int] h: Sequence[Any] def func(a: npt._NestedSequence[int]) -> None: diff --git a/numpy/typing/tests/data/reveal/npyio.pyi b/numpy/typing/tests/data/reveal/npyio.pyi index f54fbf610..68605cf94 100644 --- a/numpy/typing/tests/data/reveal/npyio.pyi +++ b/numpy/typing/tests/data/reveal/npyio.pyi @@ -1,6 +1,6 @@ import re import pathlib -from typing import IO, List +from typing import IO import numpy.typing as npt import numpy as np @@ -14,7 +14,7 @@ bag_obj: np.lib.npyio.BagObj[int] npz_file: np.lib.npyio.NpzFile AR_i8: npt.NDArray[np.int64] -AR_LIKE_f8: List[float] +AR_LIKE_f8: list[float] class BytesWriter: def write(self, data: bytes) -> None: ... @@ -64,6 +64,7 @@ reveal_type(np.loadtxt(bytes_file)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.loadtxt(pathlib_path, dtype=np.str_)) # E: ndarray[Any, dtype[str_]] reveal_type(np.loadtxt(str_path, dtype=str, skiprows=2)) # E: ndarray[Any, dtype[Any]] reveal_type(np.loadtxt(str_file, comments="test")) # E: ndarray[Any, dtype[{float64}]] +reveal_type(np.loadtxt(str_file, comments=None)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.loadtxt(str_path, delimiter="\n")) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.loadtxt(str_path, ndmin=2)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.loadtxt(["1", "2", "3"])) # E: ndarray[Any, dtype[{float64}]] @@ -76,7 +77,7 @@ reveal_type(np.fromregex(bytes_reader, "test", np.float64)) # E: ndarray[Any, d reveal_type(np.genfromtxt(bytes_file)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.genfromtxt(pathlib_path, dtype=np.str_)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.genfromtxt(str_path, dtype=str, skiprows=2)) # E: ndarray[Any, dtype[Any]] +reveal_type(np.genfromtxt(str_path, dtype=str, skip_header=2)) # E: ndarray[Any, dtype[Any]] reveal_type(np.genfromtxt(str_file, comments="test")) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.genfromtxt(str_path, delimiter="\n")) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.genfromtxt(str_path, ndmin=2)) # E: ndarray[Any, dtype[{float64}]] diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index bf5653937..246d79be8 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -5,7 +5,6 @@ Does not include tests which fall under ``array_constructors``. """ -from typing import List import numpy as np import numpy.typing as npt @@ -22,7 +21,7 @@ AR_c16: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_O: npt.NDArray[np.object_] -B: List[int] +B: list[int] C: SubClass reveal_type(np.count_nonzero(i8)) # E: int diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index cc2335264..e1857557d 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -33,9 +33,9 @@ reveal_type(np.nbytes[np.int64]) # E: int reveal_type(np.ScalarType) # E: Tuple reveal_type(np.ScalarType[0]) # E: Type[builtins.int] -reveal_type(np.ScalarType[4]) # E: Type[builtins.bool] -reveal_type(np.ScalarType[9]) # E: Type[{csingle}] -reveal_type(np.ScalarType[11]) # E: Type[{clongdouble}] +reveal_type(np.ScalarType[3]) # E: Type[builtins.bool] +reveal_type(np.ScalarType[8]) # E: Type[{csingle}] +reveal_type(np.ScalarType[10]) # E: Type[{clongdouble}] reveal_type(np.typecodes["Character"]) # E: Literal['c'] reveal_type(np.typecodes["Complex"]) # E: Literal['FDG'] diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index 4e06aa7d5..edea6a291 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, List +from typing import Any import numpy as np @@ -79,13 +79,13 @@ D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9]) D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5]) I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_) I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_) -D_arr_like_0p1: List[float] = [0.1] -D_arr_like_0p5: List[float] = [0.5] -D_arr_like_0p9: List[float] = [0.9] -D_arr_like_1p5: List[float] = [1.5] -I_arr_like_10: List[int] = [10] -I_arr_like_20: List[int] = [20] -D_2D_like: List[List[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_arr_like_0p1: list[float] = [0.1] +D_arr_like_0p5: list[float] = [0.5] +D_arr_like_0p9: list[float] = [0.9] +D_arr_like_1p5: list[float] = [1.5] +I_arr_like_10: list[int] = [10] +I_arr_like_20: list[int] = [20] +D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32) D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1) @@ -501,7 +501,7 @@ reveal_type(def_gen.integers([100])) # E: ndarray[Any, dtype[signedinteger[typi reveal_type(def_gen.integers(0, [100])) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_) -I_bool_low_like: List[int] = [0] +I_bool_low_like: list[int] = [0] I_bool_high_open: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) @@ -530,7 +530,7 @@ reveal_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, end reveal_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: ndarray[Any, dtype[bool_] I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8) -I_u1_low_like: List[int] = [0] +I_u1_low_like: list[int] = [0] I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) @@ -571,7 +571,7 @@ reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoin reveal_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16) -I_u2_low_like: List[int] = [0] +I_u2_low_like: list[int] = [0] I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) @@ -612,7 +612,7 @@ reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoi reveal_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32) -I_u4_low_like: List[int] = [0] +I_u4_low_like: list[int] = [0] I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) @@ -678,7 +678,7 @@ reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: ndarray[Any, dtype[{uint}]] I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64) -I_u8_low_like: List[int] = [0] +I_u8_low_like: list[int] = [0] I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) @@ -719,7 +719,7 @@ reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoi reveal_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8) -I_i1_low_like: List[int] = [-128] +I_i1_low_like: list[int] = [-128] I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) @@ -760,7 +760,7 @@ reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16) -I_i2_low_like: List[int] = [-32768] +I_i2_low_like: list[int] = [-32768] I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) @@ -801,7 +801,7 @@ reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoin reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32) -I_i4_low_like: List[int] = [-2147483648] +I_i4_low_like: list[int] = [-2147483648] I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) @@ -842,7 +842,7 @@ reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoin reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64) -I_i8_low_like: List[int] = [-9223372036854775808] +I_i8_low_like: list[int] = [-9223372036854775808] I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi index bf51c82a3..b2eaca899 100644 --- a/numpy/typing/tests/data/reveal/rec.pyi +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -1,12 +1,12 @@ import io -from typing import Any, List +from typing import Any import numpy as np import numpy.typing as npt AR_i8: npt.NDArray[np.int64] REC_AR_V: np.recarray[Any, np.dtype[np.record]] -AR_LIST: List[npt.NDArray[np.int64]] +AR_LIST: list[npt.NDArray[np.int64]] format_parser: np.format_parser record: np.record @@ -104,3 +104,24 @@ reveal_type(np.rec.array( # recarray[Any, dtype[record]] formats=[np.int64, np.float64], names=["i8", "f8"] )) + +reveal_type(np.rec.array( # recarray[Any, dtype[Any]] + None, + dtype=np.float64, + shape=(10, 3), +)) +reveal_type(np.rec.array( # recarray[Any, dtype[Any]] + None, + formats=[np.int64, np.float64], + names=["i8", "f8"], + shape=(10, 3), +)) +reveal_type(np.rec.array( # recarray[Any, dtype[Any]] + file_obj, + dtype=np.float64, +)) +reveal_type(np.rec.array( # recarray[Any, dtype[Any]] + file_obj, + formats=[np.int64, np.float64], + names=["i8", "f8"], +)) diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index f13678c3a..70e85dd09 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -1,6 +1,6 @@ import numpy as np from numpy.typing import NDArray -from typing import Any, List +from typing import Any i8: np.int64 f8: np.float64 @@ -9,7 +9,7 @@ AR_b: NDArray[np.bool_] AR_i8: NDArray[np.int64] AR_f8: NDArray[np.float64] -AR_LIKE_f8: List[float] +AR_LIKE_f8: list[float] reveal_type(np.take_along_axis(AR_f8, AR_i8, axis=1)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.take_along_axis(f8, AR_i8, axis=None)) # E: ndarray[Any, dtype[{float64}]] diff --git a/numpy/typing/tests/data/reveal/stride_tricks.pyi b/numpy/typing/tests/data/reveal/stride_tricks.pyi index 0d6dcd388..4eeb42095 100644 --- a/numpy/typing/tests/data/reveal/stride_tricks.pyi +++ b/numpy/typing/tests/data/reveal/stride_tricks.pyi @@ -1,10 +1,10 @@ -from typing import List, Dict, Any +from typing import Any import numpy as np import numpy.typing as npt AR_f8: npt.NDArray[np.float64] -AR_LIKE_f: List[float] -interface_dict: Dict[str, Any] +AR_LIKE_f: list[float] +interface_dict: dict[str, Any] reveal_type(np.lib.stride_tricks.DummyArray(interface_dict)) # E: lib.stride_tricks.DummyArray diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 9813dc723..fb419d48d 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -2,7 +2,8 @@ from __future__ import annotations import re import sys -from typing import Any, Callable, TypeVar +from collections.abc import Callable +from typing import Any, TypeVar from pathlib import Path import numpy as np diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 0318c3cf1..0dc58d437 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -1,4 +1,4 @@ -from typing import Any, List, TypeVar +from typing import Any, TypeVar import numpy as np import numpy.typing as npt @@ -21,7 +21,7 @@ AR_f: npt.NDArray[np.float64] AR_c: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] -AR_LIKE_b: List[bool] +AR_LIKE_b: list[bool] reveal_type(np.fliplr(AR_b)) # E: ndarray[Any, dtype[bool_]] reveal_type(np.fliplr(AR_LIKE_b)) # E: ndarray[Any, dtype[Any]] diff --git a/numpy/typing/tests/data/reveal/type_check.pyi b/numpy/typing/tests/data/reveal/type_check.pyi index 13d41d844..40344905b 100644 --- a/numpy/typing/tests/data/reveal/type_check.pyi +++ b/numpy/typing/tests/data/reveal/type_check.pyi @@ -1,4 +1,3 @@ -from typing import List import numpy as np import numpy.typing as npt @@ -14,7 +13,7 @@ AR_f16: npt.NDArray[np.floating[npt._128Bit]] AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] -AR_LIKE_f: List[float] +AR_LIKE_f: list[float] class RealObj: real: slice diff --git a/numpy/typing/tests/data/reveal/ufunclike.pyi b/numpy/typing/tests/data/reveal/ufunclike.pyi index 2d67c923f..9f06600b6 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.pyi +++ b/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -1,11 +1,11 @@ -from typing import List, Any +from typing import Any import numpy as np -AR_LIKE_b: List[bool] -AR_LIKE_u: List[np.uint32] -AR_LIKE_i: List[int] -AR_LIKE_f: List[float] -AR_LIKE_O: List[np.object_] +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_O: list[np.object_] AR_U: np.ndarray[Any, np.dtype[np.str_]] diff --git a/numpy/typing/tests/data/reveal/warnings_and_errors.pyi b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi index d5c50448a..19fa432f9 100644 --- a/numpy/typing/tests/data/reveal/warnings_and_errors.pyi +++ b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi @@ -1,5 +1,3 @@ -from typing import Type - import numpy as np reveal_type(np.ModuleDeprecationWarning()) # E: ModuleDeprecationWarning diff --git a/numpy/typing/tests/test_generic_alias.py b/numpy/typing/tests/test_generic_alias.py index 39343420b..8df2eea93 100644 --- a/numpy/typing/tests/test_generic_alias.py +++ b/numpy/typing/tests/test_generic_alias.py @@ -5,7 +5,7 @@ import copy import types import pickle import weakref -from typing import TypeVar, Any, Callable, Tuple, Type, Union +from typing import TypeVar, Any, Union, Callable import pytest import numpy as np @@ -31,7 +31,7 @@ GETATTR_NAMES = sorted(set(dir(np.ndarray)) - _GenericAlias._ATTR_EXCEPTIONS) BUFFER = np.array([1], dtype=np.int64) BUFFER.setflags(write=False) -def _get_subclass_mro(base: type) -> Tuple[type, ...]: +def _get_subclass_mro(base: type) -> tuple[type, ...]: class Subclass(base): # type: ignore[misc,valid-type] pass return Subclass.__mro__[1:] @@ -132,7 +132,7 @@ class TestGenericAlias: def test_raise( self, name: str, - exc_type: Type[BaseException], + exc_type: type[BaseException], func: FuncType, ) -> None: """Test operations that are supposed to raise.""" diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index fe58a8f4c..bb3914434 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -136,7 +136,7 @@ def test_fail(path: str) -> None: output_mypy = OUTPUT_MYPY assert path in output_mypy for error_line in output_mypy[path]: - error_line = _strip_filename(error_line) + error_line = _strip_filename(error_line).split("\n", 1)[0] match = re.match( r"(?P<lineno>\d+): (error|note): .+$", error_line, @@ -368,6 +368,7 @@ Expression: {} Expected reveal: {!r} Observed reveal: {!r} """ +_STRIP_PATTERN = re.compile(r"(\w+\.)+(\w+)") def _test_reveal( @@ -378,9 +379,8 @@ def _test_reveal( lineno: int, ) -> None: """Error-reporting helper function for `test_reveal`.""" - strip_pattern = re.compile(r"(\w+\.)+(\w+)") - stripped_reveal = strip_pattern.sub(strip_func, reveal) - stripped_expected_reveal = strip_pattern.sub(strip_func, expected_reveal) + stripped_reveal = _STRIP_PATTERN.sub(strip_func, reveal) + stripped_expected_reveal = _STRIP_PATTERN.sub(strip_func, expected_reveal) if stripped_reveal not in stripped_expected_reveal: raise AssertionError( _REVEAL_MSG.format(lineno, |