summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorczgdp1807 <gdp.1807@gmail.com>2021-09-03 12:17:26 +0530
committerczgdp1807 <gdp.1807@gmail.com>2021-09-03 12:17:26 +0530
commit781d0a7ac61ce007e65abcd4e30f2181e729ae61 (patch)
treef45f38a246bcefbca9ca8a08bd8ba55cbc6cdb15 /numpy
parentb341e4c3249817d2e14ddf71aa850a8a896b9303 (diff)
parent2ae1e068710174dc57b5ba5ad688517608efcf26 (diff)
downloadnumpy-781d0a7ac61ce007e65abcd4e30f2181e729ae61.tar.gz
resolved conflicts
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py94
-rw-r--r--numpy/__init__.pyi173
-rw-r--r--numpy/_pytesttester.py13
-rw-r--r--numpy/_pytesttester.pyi3
-rw-r--r--numpy/array_api/__init__.py370
-rw-r--r--numpy/array_api/_array_object.py1029
-rw-r--r--numpy/array_api/_constants.py6
-rw-r--r--numpy/array_api/_creation_functions.py316
-rw-r--r--numpy/array_api/_data_type_functions.py127
-rw-r--r--numpy/array_api/_dtypes.py143
-rw-r--r--numpy/array_api/_elementwise_functions.py729
-rw-r--r--numpy/array_api/_linear_algebra_functions.py68
-rw-r--r--numpy/array_api/_manipulation_functions.py86
-rw-r--r--numpy/array_api/_searching_functions.py46
-rw-r--r--numpy/array_api/_set_functions.py31
-rw-r--r--numpy/array_api/_sorting_functions.py37
-rw-r--r--numpy/array_api/_statistical_functions.py81
-rw-r--r--numpy/array_api/_typing.py44
-rw-r--r--numpy/array_api/_utility_functions.py37
-rw-r--r--numpy/array_api/setup.py12
-rw-r--r--numpy/array_api/tests/__init__.py7
-rw-r--r--numpy/array_api/tests/test_array_object.py269
-rw-r--r--numpy/array_api/tests/test_creation_functions.py141
-rw-r--r--numpy/array_api/tests/test_elementwise_functions.py111
-rw-r--r--numpy/core/_add_newdocs.py2
-rw-r--r--numpy/core/_add_newdocs_scalars.py45
-rw-r--r--numpy/core/_asarray.pyi8
-rw-r--r--numpy/core/_type_aliases.py9
-rw-r--r--numpy/core/_type_aliases.pyi8
-rw-r--r--numpy/core/_ufunc_config.pyi10
-rw-r--r--numpy/core/arrayprint.pyi8
-rw-r--r--numpy/core/code_generators/generate_umath.py4
-rw-r--r--numpy/core/einsumfunc.pyi35
-rw-r--r--numpy/core/fromnumeric.py9
-rw-r--r--numpy/core/fromnumeric.pyi8
-rw-r--r--numpy/core/function_base.pyi8
-rw-r--r--numpy/core/include/numpy/npy_cpu.h3
-rw-r--r--numpy/core/include/numpy/npy_endian.h1
-rw-r--r--numpy/core/include/numpy/npy_math.h2
-rw-r--r--numpy/core/multiarray.pyi116
-rw-r--r--numpy/core/numeric.pyi7
-rw-r--r--numpy/core/numerictypes.pyi12
-rw-r--r--numpy/core/overrides.py38
-rw-r--r--numpy/core/records.py14
-rw-r--r--numpy/core/setup.py6
-rw-r--r--numpy/core/shape_base.pyi20
-rw-r--r--numpy/core/src/common/npy_cpu_dispatch.h8
-rw-r--r--numpy/core/src/common/simd/avx2/arithmetic.h2
-rw-r--r--numpy/core/src/multiarray/alloc.c5
-rw-r--r--numpy/core/src/multiarray/convert.c12
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c2
-rw-r--r--numpy/core/src/multiarray/ctors.c2
-rw-r--r--numpy/core/src/multiarray/descriptor.c16
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src4
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c2
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src49
-rw-r--r--numpy/core/src/umath/_scaled_float_dtype.c67
-rw-r--r--numpy/core/src/umath/dispatching.c74
-rw-r--r--numpy/core/src/umath/dispatching.h4
-rw-r--r--numpy/core/src/umath/legacy_array_method.c4
-rw-r--r--numpy/core/src/umath/loops.c.src2
-rw-r--r--numpy/core/src/umath/loops_exponent_log.dispatch.c.src2
-rw-r--r--numpy/core/src/umath/ufunc_object.c11
-rw-r--r--numpy/core/tests/test_casting_unittests.py9
-rw-r--r--numpy/core/tests/test_custom_dtypes.py12
-rw-r--r--numpy/core/tests/test_datetime.py14
-rw-r--r--numpy/core/tests/test_deprecations.py48
-rw-r--r--numpy/core/tests/test_dtype.py9
-rw-r--r--numpy/core/tests/test_multiarray.py12
-rw-r--r--numpy/core/tests/test_numeric.py10
-rw-r--r--numpy/core/tests/test_scalar_methods.py26
-rw-r--r--numpy/core/tests/test_simd.py2
-rw-r--r--numpy/core/tests/test_ufunc.py87
-rw-r--r--numpy/core/tests/test_umath_complex.py56
-rw-r--r--numpy/distutils/ccompiler_opt.py11
-rw-r--r--numpy/distutils/cpuinfo.py4
-rw-r--r--numpy/distutils/exec_command.py2
-rw-r--r--numpy/distutils/fcompiler/compaq.py4
-rw-r--r--numpy/distutils/misc_util.py9
-rw-r--r--numpy/distutils/npy_pkg_config.py4
-rw-r--r--numpy/distutils/system_info.py4
-rw-r--r--numpy/distutils/unixccompiler.py2
-rw-r--r--numpy/f2py/__init__.pyi3
-rw-r--r--numpy/f2py/cb_rules.py1
-rwxr-xr-xnumpy/f2py/crackfortran.py4
-rwxr-xr-xnumpy/f2py/f2py2e.py62
-rw-r--r--numpy/f2py/tests/util.py2
-rw-r--r--numpy/fft/tests/test_pocketfft.py2
-rw-r--r--numpy/lib/__init__.pyi1
-rw-r--r--numpy/lib/_datasource.py2
-rw-r--r--numpy/lib/arraypad.pyi17
-rw-r--r--numpy/lib/arrayterator.pyi1
-rw-r--r--numpy/lib/format.py5
-rw-r--r--numpy/lib/format.pyi8
-rw-r--r--numpy/lib/function_base.py4
-rw-r--r--numpy/lib/index_tricks.py18
-rw-r--r--numpy/lib/index_tricks.pyi8
-rw-r--r--numpy/lib/npyio.py321
-rw-r--r--numpy/lib/npyio.pyi335
-rw-r--r--numpy/lib/polynomial.py23
-rw-r--r--numpy/lib/shape_base.pyi13
-rw-r--r--numpy/lib/stride_tricks.pyi83
-rw-r--r--numpy/lib/tests/test__datasource.py6
-rw-r--r--numpy/lib/tests/test_function_base.py20
-rw-r--r--numpy/lib/tests/test_io.py28
-rw-r--r--numpy/lib/tests/test_shape_base.py10
-rw-r--r--numpy/lib/tests/test_utils.py6
-rw-r--r--numpy/lib/type_check.pyi8
-rw-r--r--numpy/lib/utils.py6
-rw-r--r--numpy/lib/utils.pyi12
-rw-r--r--numpy/ma/core.py8
-rw-r--r--numpy/ma/mrecords.py4
-rw-r--r--numpy/ma/tests/test_old_ma.py16
-rw-r--r--numpy/random/_generator.pyi8
-rw-r--r--numpy/random/_generator.pyx2
-rw-r--r--numpy/random/_mt19937.pyi8
-rw-r--r--numpy/random/_pcg64.pyi8
-rw-r--r--numpy/random/_philox.pyi8
-rw-r--r--numpy/random/_sfc64.pyi8
-rw-r--r--numpy/random/bit_generator.pyi7
-rw-r--r--numpy/random/mtrand.pyi8
-rw-r--r--numpy/random/mtrand.pyx2
-rw-r--r--numpy/random/tests/test_generator_mt19937_regressions.py4
-rw-r--r--numpy/random/tests/test_randomstate_regression.py4
-rw-r--r--numpy/random/tests/test_regression.py4
-rw-r--r--numpy/setup.py1
-rw-r--r--numpy/testing/_private/utils.pyi31
-rw-r--r--numpy/tests/test_public_api.py21
-rw-r--r--numpy/typing/__init__.py26
-rw-r--r--numpy/typing/_array_like.py26
-rw-r--r--numpy/typing/_callable.py587
-rw-r--r--numpy/typing/_char_codes.py282
-rw-r--r--numpy/typing/_dtype_like.py51
-rw-r--r--numpy/typing/_shape.py12
-rw-r--r--numpy/typing/_ufunc.pyi28
-rw-r--r--numpy/typing/tests/data/fail/modules.py1
-rw-r--r--numpy/typing/tests/data/fail/npyio.py30
-rw-r--r--numpy/typing/tests/data/fail/scalars.py1
-rw-r--r--numpy/typing/tests/data/fail/stride_tricks.py9
-rw-r--r--numpy/typing/tests/data/reveal/arraypad.py3
-rw-r--r--numpy/typing/tests/data/reveal/npyio.py91
-rw-r--r--numpy/typing/tests/data/reveal/scalars.py2
-rw-r--r--numpy/typing/tests/data/reveal/stride_tricks.py28
-rw-r--r--numpy/typing/tests/test_runtime.py10
-rw-r--r--numpy/typing/tests/test_typing_extensions.py35
145 files changed, 5663 insertions, 1657 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 5e7f74059..ffef369e3 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -271,70 +271,54 @@ else:
oldnumeric = 'removed'
numarray = 'removed'
- if sys.version_info[:2] >= (3, 7):
- # module level getattr is only supported in 3.7 onwards
- # https://www.python.org/dev/peps/pep-0562/
- def __getattr__(attr):
- # Warn for expired attributes, and return a dummy function
- # that always raises an exception.
- try:
- msg = __expired_functions__[attr]
- except KeyError:
- pass
- else:
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
-
- def _expired(*args, **kwds):
- raise RuntimeError(msg)
-
- return _expired
-
- # Emit warnings for deprecated attributes
- try:
- val, msg = __deprecated_attrs__[attr]
- except KeyError:
- pass
- else:
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
- return val
-
- # Importing Tester requires importing all of UnitTest which is not a
- # cheap import Since it is mainly used in test suits, we lazy import it
- # here to save on the order of 10 ms of import time for most users
- #
- # The previous way Tester was imported also had a side effect of adding
- # the full `numpy.testing` namespace
- if attr == 'testing':
- import numpy.testing as testing
- return testing
- elif attr == 'Tester':
- from .testing import Tester
- return Tester
-
- raise AttributeError("module {!r} has no attribute "
- "{!r}".format(__name__, attr))
-
- def __dir__():
- return list(globals().keys() | {'Tester', 'testing'})
+ def __getattr__(attr):
+ # Warn for expired attributes, and return a dummy function
+ # that always raises an exception.
+ try:
+ msg = __expired_functions__[attr]
+ except KeyError:
+ pass
+ else:
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
- else:
- # We don't actually use this ourselves anymore, but I'm not 100% sure that
- # no-one else in the world is using it (though I hope not)
- from .testing import Tester
+ def _expired(*args, **kwds):
+ raise RuntimeError(msg)
- # We weren't able to emit a warning about these, so keep them around
- globals().update({
- k: v
- for k, (v, msg) in __deprecated_attrs__.items()
- })
+ return _expired
+ # Emit warnings for deprecated attributes
+ try:
+ val, msg = __deprecated_attrs__[attr]
+ except KeyError:
+ pass
+ else:
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ return val
+
+ # Importing Tester requires importing all of UnitTest which is not a
+ # cheap import Since it is mainly used in test suits, we lazy import it
+ # here to save on the order of 10 ms of import time for most users
+ #
+ # The previous way Tester was imported also had a side effect of adding
+ # the full `numpy.testing` namespace
+ if attr == 'testing':
+ import numpy.testing as testing
+ return testing
+ elif attr == 'Tester':
+ from .testing import Tester
+ return Tester
+
+ raise AttributeError("module {!r} has no attribute "
+ "{!r}".format(__name__, attr))
+
+ def __dir__():
+ return list(globals().keys() | {'Tester', 'testing'})
# Pytest testing
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
-
def _sanity_check():
"""
Quick sanity checks for common bugs caused by environment.
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 49c17a015..dafedeb56 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -165,6 +165,7 @@ from numpy.typing._extended_precision import (
)
from typing import (
+ Literal as L,
Any,
ByteString,
Callable,
@@ -190,13 +191,11 @@ from typing import (
Type,
TypeVar,
Union,
+ Protocol,
+ SupportsIndex,
+ Final,
)
-if sys.version_info >= (3, 8):
- from typing import Literal as L, Protocol, SupportsIndex, Final
-else:
- from typing_extensions import Literal as L, Protocol, SupportsIndex, Final
-
# Ensures that the stubs are picked up
from numpy import (
char as char,
@@ -517,7 +516,6 @@ from numpy.lib.npyio import (
recfromtxt as recfromtxt,
recfromcsv as recfromcsv,
load as load,
- loads as loads,
save as save,
savez as savez,
savez_compressed as savez_compressed,
@@ -624,6 +622,14 @@ from numpy.matrixlib import (
bmat as bmat,
)
+# Protocol for representing file-like-objects accepted
+# by `ndarray.tofile` and `fromfile`
+class _IOProtocol(Protocol):
+ def flush(self) -> object: ...
+ def fileno(self) -> int: ...
+ def tell(self) -> SupportsIndex: ...
+ def seek(self, offset: int, whence: int, /) -> object: ...
+
__all__: List[str]
__path__: List[str]
__version__: str
@@ -1187,9 +1193,9 @@ class flatiter(Generic[_NdArraySubClass]):
self, key: Union[_ArrayLikeInt, slice, ellipsis],
) -> _NdArraySubClass: ...
@overload
- def __array__(self: flatiter[ndarray[Any, _DType]], __dtype: None = ...) -> ndarray[Any, _DType]: ...
+ def __array__(self: flatiter[ndarray[Any, _DType]], dtype: None = ..., /) -> ndarray[Any, _DType]: ...
@overload
- def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ...
+ def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
_OrderKACF = Optional[L["K", "A", "C", "F"]]
_OrderACF = Optional[L["A", "C", "F"]]
@@ -1218,7 +1224,7 @@ class _ArrayOrScalarCommon:
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __copy__(self: _ArraySelf) -> _ArraySelf: ...
- def __deepcopy__(self: _ArraySelf, __memo: Optional[dict] = ...) -> _ArraySelf: ...
+ def __deepcopy__(self: _ArraySelf, memo: None | dict = ..., /) -> _ArraySelf: ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
@@ -1228,7 +1234,10 @@ class _ArrayOrScalarCommon:
# NOTE: `tostring()` is deprecated and therefore excluded
# def tostring(self, order=...): ...
def tofile(
- self, fid: Union[IO[bytes], str, bytes, os.PathLike[Any]], sep: str = ..., format: str = ...
+ self,
+ fid: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _IOProtocol,
+ sep: str = ...,
+ format: str = ...,
) -> None: ...
# generics and 0d arrays return builtin scalars
def tolist(self) -> Any: ...
@@ -1241,7 +1250,7 @@ class _ArrayOrScalarCommon:
def __array_priority__(self) -> float: ...
@property
def __array_struct__(self): ...
- def __setstate__(self, __state): ...
+ def __setstate__(self, state, /): ...
# a `bool_` is returned when `keepdims=True` and `self` is a 0d array
@overload
@@ -1651,7 +1660,7 @@ _ArrayNumber_co = NDArray[Union[bool_, number[Any]]]
_ArrayTD64_co = NDArray[Union[bool_, integer[Any], timedelta64]]
class _SupportsItem(Protocol[_T_co]):
- def item(self, __args: Any) -> _T_co: ...
+ def item(self, args: Any, /) -> _T_co: ...
class _SupportsReal(Protocol[_T_co]):
@property
@@ -1690,20 +1699,22 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
order: _OrderKACF = ...,
) -> _ArraySelf: ...
@overload
- def __array__(self, __dtype: None = ...) -> ndarray[Any, _DType_co]: ...
+ def __array__(self, dtype: None = ..., /) -> ndarray[Any, _DType_co]: ...
@overload
- def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ...
+ def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
def __array_wrap__(
self,
- __array: ndarray[_ShapeType2, _DType],
- __context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ array: ndarray[_ShapeType2, _DType],
+ context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ /,
) -> ndarray[_ShapeType2, _DType]: ...
def __array_prepare__(
self,
- __array: ndarray[_ShapeType2, _DType],
- __context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ array: ndarray[_ShapeType2, _DType],
+ context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ /,
) -> ndarray[_ShapeType2, _DType]: ...
@property
@@ -1730,16 +1741,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def item(
self: ndarray[Any, dtype[_SupportsItem[_T]]], # type: ignore[type-var]
- __args: Tuple[SupportsIndex, ...],
+ args: Tuple[SupportsIndex, ...],
+ /,
) -> _T: ...
@overload
- def itemset(self, __value: Any) -> None: ...
+ def itemset(self, value: Any, /) -> None: ...
@overload
- def itemset(self, __item: _ShapeLike, __value: Any) -> None: ...
+ def itemset(self, item: _ShapeLike, value: Any, /) -> None: ...
@overload
- def resize(self, __new_shape: _ShapeLike, *, refcheck: bool = ...) -> None: ...
+ def resize(self, new_shape: _ShapeLike, /, *, refcheck: bool = ...) -> None: ...
@overload
def resize(self, *new_shape: SupportsIndex, refcheck: bool = ...) -> None: ...
@@ -1759,7 +1771,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
) -> ndarray[Any, _DType_co]: ...
@overload
- def transpose(self: _ArraySelf, __axes: _ShapeLike) -> _ArraySelf: ...
+ def transpose(self: _ArraySelf, axes: _ShapeLike, /) -> _ArraySelf: ...
@overload
def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ...
@@ -1898,7 +1910,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def reshape(
- self, __shape: _ShapeLike, *, order: _OrderACF = ...
+ self, shape: _ShapeLike, /, *, order: _OrderACF = ...
) -> ndarray[Any, _DType_co]: ...
@overload
def reshape(
@@ -2904,9 +2916,9 @@ class generic(_ArrayOrScalarCommon):
@abstractmethod
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
@overload
- def __array__(self: _ScalarType, __dtype: None = ...) -> ndarray[Any, dtype[_ScalarType]]: ...
+ def __array__(self: _ScalarType, dtype: None = ..., /) -> ndarray[Any, dtype[_ScalarType]]: ...
@overload
- def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ...
+ def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
@property
def base(self) -> None: ...
@property
@@ -2974,8 +2986,7 @@ class generic(_ArrayOrScalarCommon):
) -> Any: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> Any: ...
@overload
@@ -3021,7 +3032,7 @@ class generic(_ArrayOrScalarCommon):
@overload
def reshape(
- self: _ScalarType, __shape: _ShapeLike, *, order: _OrderACF = ...
+ self: _ScalarType, shape: _ShapeLike, /, *, order: _OrderACF = ...
) -> ndarray[Any, dtype[_ScalarType]]: ...
@overload
def reshape(
@@ -3031,7 +3042,7 @@ class generic(_ArrayOrScalarCommon):
def squeeze(
self: _ScalarType, axis: Union[L[0], Tuple[()]] = ...
) -> _ScalarType: ...
- def transpose(self: _ScalarType, __axes: Tuple[()] = ...) -> _ScalarType: ...
+ def transpose(self: _ScalarType, axes: Tuple[()] = ..., /) -> _ScalarType: ...
# Keep `dtype` at the bottom to avoid name conflicts with `np.dtype`
@property
def dtype(self: _ScalarType) -> dtype[_ScalarType]: ...
@@ -3066,10 +3077,9 @@ class number(generic, Generic[_NBit1]): # type: ignore
__ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
class bool_(generic):
- def __init__(self, __value: object = ...) -> None: ...
+ def __init__(self, value: object = ..., /) -> None: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> bool: ...
def tolist(self) -> bool: ...
@property
@@ -3115,7 +3125,7 @@ class bool_(generic):
bool8 = bool_
class object_(generic):
- def __init__(self, __value: object = ...) -> None: ...
+ def __init__(self, value: object = ..., /) -> None: ...
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
@@ -3144,14 +3154,16 @@ class datetime64(generic):
@overload
def __init__(
self,
- __value: Union[None, datetime64, _CharLike_co, _DatetimeScalar] = ...,
- __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ...,
+ value: None | datetime64 | _CharLike_co | _DatetimeScalar = ...,
+ format: _CharLike_co | Tuple[_CharLike_co, _IntLike_co] = ...,
+ /,
) -> None: ...
@overload
def __init__(
self,
- __value: int,
- __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]]
+ value: int,
+ format: _CharLike_co | Tuple[_CharLike_co, _IntLike_co],
+ /,
) -> None: ...
def __add__(self, other: _TD64Like_co) -> datetime64: ...
def __radd__(self, other: _TD64Like_co) -> datetime64: ...
@@ -3165,28 +3177,16 @@ class datetime64(generic):
__gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
__ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
-# Support for `__index__` was added in python 3.8 (bpo-20092)
-if sys.version_info >= (3, 8):
- _IntValue = Union[SupportsInt, _CharLike_co, SupportsIndex]
- _FloatValue = Union[None, _CharLike_co, SupportsFloat, SupportsIndex]
- _ComplexValue = Union[
- None,
- _CharLike_co,
- SupportsFloat,
- SupportsComplex,
- SupportsIndex,
- complex, # `complex` is not a subtype of `SupportsComplex`
- ]
-else:
- _IntValue = Union[SupportsInt, _CharLike_co]
- _FloatValue = Union[None, _CharLike_co, SupportsFloat]
- _ComplexValue = Union[
- None,
- _CharLike_co,
- SupportsFloat,
- SupportsComplex,
- complex,
- ]
+_IntValue = Union[SupportsInt, _CharLike_co, SupportsIndex]
+_FloatValue = Union[None, _CharLike_co, SupportsFloat, SupportsIndex]
+_ComplexValue = Union[
+ None,
+ _CharLike_co,
+ SupportsFloat,
+ SupportsComplex,
+ SupportsIndex,
+ complex, # `complex` is not a subtype of `SupportsComplex`
+]
class integer(number[_NBit1]): # type: ignore
@property
@@ -3201,10 +3201,10 @@ class integer(number[_NBit1]): # type: ignore
# NOTE: `__index__` is technically defined in the bottom-most
# sub-classes (`int64`, `uint32`, etc)
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> int: ...
def tolist(self) -> int: ...
+ def is_integer(self) -> L[True]: ...
def __index__(self) -> int: ...
__truediv__: _IntTrueDiv[_NBit1]
__rtruediv__: _IntTrueDiv[_NBit1]
@@ -3224,7 +3224,7 @@ class integer(number[_NBit1]): # type: ignore
def __rxor__(self, other: _IntLike_co) -> integer: ...
class signedinteger(integer[_NBit1]):
- def __init__(self, __value: _IntValue = ...) -> None: ...
+ def __init__(self, value: _IntValue = ..., /) -> None: ...
__add__: _SignedIntOp[_NBit1]
__radd__: _SignedIntOp[_NBit1]
__sub__: _SignedIntOp[_NBit1]
@@ -3268,8 +3268,9 @@ longlong = signedinteger[_NBitLongLong]
class timedelta64(generic):
def __init__(
self,
- __value: Union[None, int, _CharLike_co, dt.timedelta, timedelta64] = ...,
- __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ...,
+ value: None | int | _CharLike_co | dt.timedelta | timedelta64 = ...,
+ format: _CharLike_co | Tuple[_CharLike_co, _IntLike_co] = ...,
+ /,
) -> None: ...
@property
def numerator(self: _ScalarType) -> _ScalarType: ...
@@ -3305,7 +3306,7 @@ class timedelta64(generic):
class unsignedinteger(integer[_NBit1]):
# NOTE: `uint64 + signedinteger -> float64`
- def __init__(self, __value: _IntValue = ...) -> None: ...
+ def __init__(self, value: _IntValue = ..., /) -> None: ...
__add__: _UnsignedIntOp[_NBit1]
__radd__: _UnsignedIntOp[_NBit1]
__sub__: _UnsignedIntOp[_NBit1]
@@ -3351,23 +3352,23 @@ _IntType = TypeVar("_IntType", bound=integer)
_FloatType = TypeVar('_FloatType', bound=floating)
class floating(inexact[_NBit1]):
- def __init__(self, __value: _FloatValue = ...) -> None: ...
+ def __init__(self, value: _FloatValue = ..., /) -> None: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ...,
+ /,
) -> float: ...
def tolist(self) -> float: ...
- def is_integer(self: float64) -> bool: ...
+ def is_integer(self) -> bool: ...
def hex(self: float64) -> str: ...
@classmethod
- def fromhex(cls: Type[float64], __string: str) -> float64: ...
+ def fromhex(cls: Type[float64], string: str, /) -> float64: ...
def as_integer_ratio(self) -> Tuple[int, int]: ...
if sys.version_info >= (3, 9):
def __ceil__(self: float64) -> int: ...
def __floor__(self: float64) -> int: ...
def __trunc__(self: float64) -> int: ...
def __getnewargs__(self: float64) -> Tuple[float]: ...
- def __getformat__(self: float64, __typestr: L["double", "float"]) -> str: ...
+ def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ...
@overload
def __round__(self, ndigits: None = ...) -> int: ...
@overload
@@ -3405,10 +3406,9 @@ longfloat = floating[_NBitLongDouble]
# describing the two 64 bit floats representing its real and imaginary component
class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]):
- def __init__(self, __value: _ComplexValue = ...) -> None: ...
+ def __init__(self, value: _ComplexValue = ..., /) -> None: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> complex: ...
def tolist(self) -> complex: ...
@property
@@ -3448,7 +3448,7 @@ class flexible(generic): ... # type: ignore
# depending on whether or not it's used as an opaque bytes sequence
# or a structure
class void(flexible):
- def __init__(self, __value: Union[_IntLike_co, bytes]) -> None: ...
+ def __init__(self, value: _IntLike_co | bytes, /) -> None: ...
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
@@ -3470,14 +3470,13 @@ class character(flexible): # type: ignore
class bytes_(character, bytes):
@overload
- def __init__(self, __value: object = ...) -> None: ...
+ def __init__(self, value: object = ..., /) -> None: ...
@overload
def __init__(
- self, __value: str, encoding: str = ..., errors: str = ...
+ self, value: str, /, encoding: str = ..., errors: str = ...
) -> None: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> bytes: ...
def tolist(self) -> bytes: ...
@@ -3486,14 +3485,13 @@ bytes0 = bytes_
class str_(character, str):
@overload
- def __init__(self, __value: object = ...) -> None: ...
+ def __init__(self, value: object = ..., /) -> None: ...
@overload
def __init__(
- self, __value: bytes, encoding: str = ..., errors: str = ...
+ self, value: bytes, /, encoding: str = ..., errors: str = ...
) -> None: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> str: ...
def tolist(self) -> str: ...
@@ -3733,9 +3731,10 @@ class errstate(Generic[_CallType], ContextDecorator):
def __enter__(self) -> None: ...
def __exit__(
self,
- __exc_type: Optional[Type[BaseException]],
- __exc_value: Optional[BaseException],
- __traceback: Optional[TracebackType],
+ exc_type: Optional[Type[BaseException]],
+ exc_value: Optional[BaseException],
+ traceback: Optional[TracebackType],
+ /,
) -> None: ...
class ndenumerate(Generic[_ScalarType]):
diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py
index acfaa1ca5..8decb9dd7 100644
--- a/numpy/_pytesttester.py
+++ b/numpy/_pytesttester.py
@@ -137,13 +137,20 @@ class PytestTester:
# offset verbosity. The "-q" cancels a "-v".
pytest_args += ["-q"]
- # Filter out distutils cpu warnings (could be localized to
- # distutils tests). ASV has problems with top level import,
- # so fetch module for suppression here.
with warnings.catch_warnings():
warnings.simplefilter("always")
+ # Filter out distutils cpu warnings (could be localized to
+ # distutils tests). ASV has problems with top level import,
+ # so fetch module for suppression here.
from numpy.distutils import cpuinfo
+ with warnings.catch_warnings(record=True):
+ # Ignore the warning from importing the array_api submodule. This
+ # warning is done on import, so it would break pytest collection,
+ # but importing it early here prevents the warning from being
+ # issued when it imported again.
+ import numpy.array_api
+
# Filter out annoying import messages. Want these in both develop and
# release mode.
pytest_args += [
diff --git a/numpy/_pytesttester.pyi b/numpy/_pytesttester.pyi
index 693f4128a..0be64b3f7 100644
--- a/numpy/_pytesttester.pyi
+++ b/numpy/_pytesttester.pyi
@@ -1,5 +1,4 @@
-from typing import List, Iterable
-from typing_extensions import Literal as L
+from typing import List, Iterable, Literal as L
__all__: List[str]
diff --git a/numpy/array_api/__init__.py b/numpy/array_api/__init__.py
new file mode 100644
index 000000000..790157504
--- /dev/null
+++ b/numpy/array_api/__init__.py
@@ -0,0 +1,370 @@
+"""
+A NumPy sub-namespace that conforms to the Python array API standard.
+
+This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
+is still considered experimental, and will issue a warning when imported.
+
+This is a proof-of-concept namespace that wraps the corresponding NumPy
+functions to give a conforming implementation of the Python array API standard
+(https://data-apis.github.io/array-api/latest/). The standard is currently in
+an RFC phase and comments on it are both welcome and encouraged. Comments
+should be made either at https://github.com/data-apis/array-api or at
+https://github.com/data-apis/consortium-feedback/discussions.
+
+NumPy already follows the proposed spec for the most part, so this module
+serves mostly as a thin wrapper around it. However, NumPy also implements a
+lot of behavior that is not included in the spec, so this serves as a
+restricted subset of the API. Only those functions that are part of the spec
+are included in this namespace, and all functions are given with the exact
+signature given in the spec, including the use of position-only arguments, and
+omitting any extra keyword arguments implemented by NumPy but not part of the
+spec. The behavior of some functions is also modified from the NumPy behavior
+to conform to the standard. Note that the underlying array object itself is
+wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
+is implemented in pure Python with no C extensions.
+
+The array API spec is designed as a "minimal API subset" and explicitly allows
+libraries to include behaviors not specified by it. But users of this module
+that intend to write portable code should be aware that only those behaviors
+that are listed in the spec are guaranteed to be implemented across libraries.
+Consequently, the NumPy implementation was chosen to be both conforming and
+minimal, so that users can use this implementation of the array API namespace
+and be sure that behaviors that it defines will be available in conforming
+namespaces from other libraries.
+
+A few notes about the current state of this submodule:
+
+- There is a test suite that tests modules against the array API standard at
+ https://github.com/data-apis/array-api-tests. The test suite is still a work
+ in progress, but the existing tests pass on this module, with a few
+ exceptions:
+
+ - DLPack support (see https://github.com/data-apis/array-api/pull/106) is
+ not included here, as it requires a full implementation in NumPy proper
+ first.
+
+ The test suite is not yet complete, and even the tests that exist are not
+ guaranteed to give a comprehensive coverage of the spec. Therefore, when
+ reviewing and using this submodule, you should refer to the standard
+ documents themselves. There are some tests in numpy.array_api.tests, but
+ they primarily focus on things that are not tested by the official array API
+ test suite.
+
+- There is a custom array object, numpy.array_api.Array, which is returned by
+ all functions in this module. All functions in the array API namespace
+ implicitly assume that they will only receive this object as input. The only
+ way to create instances of this object is to use one of the array creation
+ functions. It does not have a public constructor on the object itself. The
+ object is a small wrapper class around numpy.ndarray. The main purpose of it
+ is to restrict the namespace of the array object to only those dtypes and
+ only those methods that are required by the spec, as well as to limit/change
+ certain behavior that differs in the spec. In particular:
+
+ - The array API namespace does not have scalar objects, only 0-D arrays.
+ Operations on Array that would create a scalar in NumPy create a 0-D
+ array.
+
+ - Indexing: Only a subset of indices supported by NumPy are required by the
+ spec. The Array object restricts indexing to only allow those types of
+ indices that are required by the spec. See the docstring of the
+ numpy.array_api.Array._validate_indices helper function for more
+ information.
+
+ - Type promotion: Some type promotion rules are different in the spec. In
+ particular, the spec does not have any value-based casting. The spec also
+ does not require cross-kind casting, like integer -> floating-point. Only
+ those promotions that are explicitly required by the array API
+ specification are allowed in this module. See NEP 47 for more info.
+
+ - Functions do not automatically call asarray() on their input, and will not
+ work if the input type is not Array. The exception is array creation
+ functions, and Python operators on the Array object, which accept Python
+ scalars of the same type as the array dtype.
+
+- All functions include type annotations, corresponding to those given in the
+ spec (see _typing.py for definitions of some custom types). These do not
+ currently fully pass mypy due to some limitations in mypy.
+
+- Dtype objects are just the NumPy dtype objects, e.g., float64 =
+ np.dtype('float64'). The spec does not require any behavior on these dtype
+ objects other than that they be accessible by name and be comparable by
+ equality, but it was considered too much extra complexity to create custom
+ objects to represent dtypes.
+
+- All places where the implementations in this submodule are known to deviate
+ from their corresponding functions in NumPy are marked with "# Note:"
+ comments.
+
+Still TODO in this module are:
+
+- DLPack support for numpy.ndarray is still in progress. See
+ https://github.com/numpy/numpy/pull/19083.
+
+- The copy=False keyword argument to asarray() is not yet implemented. This
+ requires support in numpy.asarray() first.
+
+- Some functions are not yet fully tested in the array API test suite, and may
+ require updates that are not yet known until the tests are written.
+
+- The spec is still in an RFC phase and may still have minor updates, which
+ will need to be reflected here.
+
+- The linear algebra extension in the spec will be added in a future pull
+ request.
+
+- Complex number support in array API spec is planned but not yet finalized,
+ as are the fft extension and certain linear algebra functions such as eig
+ that require complex dtypes.
+
+"""
+
+import warnings
+
+warnings.warn(
+ "The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
+)
+
+__all__ = []
+
+from ._constants import e, inf, nan, pi
+
+__all__ += ["e", "inf", "nan", "pi"]
+
+from ._creation_functions import (
+ asarray,
+ arange,
+ empty,
+ empty_like,
+ eye,
+ from_dlpack,
+ full,
+ full_like,
+ linspace,
+ meshgrid,
+ ones,
+ ones_like,
+ zeros,
+ zeros_like,
+)
+
+__all__ += [
+ "asarray",
+ "arange",
+ "empty",
+ "empty_like",
+ "eye",
+ "from_dlpack",
+ "full",
+ "full_like",
+ "linspace",
+ "meshgrid",
+ "ones",
+ "ones_like",
+ "zeros",
+ "zeros_like",
+]
+
+from ._data_type_functions import (
+ broadcast_arrays,
+ broadcast_to,
+ can_cast,
+ finfo,
+ iinfo,
+ result_type,
+)
+
+__all__ += [
+ "broadcast_arrays",
+ "broadcast_to",
+ "can_cast",
+ "finfo",
+ "iinfo",
+ "result_type",
+]
+
+from ._dtypes import (
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ bool,
+)
+
+__all__ += [
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "float32",
+ "float64",
+ "bool",
+]
+
+from ._elementwise_functions import (
+ abs,
+ acos,
+ acosh,
+ add,
+ asin,
+ asinh,
+ atan,
+ atan2,
+ atanh,
+ bitwise_and,
+ bitwise_left_shift,
+ bitwise_invert,
+ bitwise_or,
+ bitwise_right_shift,
+ bitwise_xor,
+ ceil,
+ cos,
+ cosh,
+ divide,
+ equal,
+ exp,
+ expm1,
+ floor,
+ floor_divide,
+ greater,
+ greater_equal,
+ isfinite,
+ isinf,
+ isnan,
+ less,
+ less_equal,
+ log,
+ log1p,
+ log2,
+ log10,
+ logaddexp,
+ logical_and,
+ logical_not,
+ logical_or,
+ logical_xor,
+ multiply,
+ negative,
+ not_equal,
+ positive,
+ pow,
+ remainder,
+ round,
+ sign,
+ sin,
+ sinh,
+ square,
+ sqrt,
+ subtract,
+ tan,
+ tanh,
+ trunc,
+)
+
+__all__ += [
+ "abs",
+ "acos",
+ "acosh",
+ "add",
+ "asin",
+ "asinh",
+ "atan",
+ "atan2",
+ "atanh",
+ "bitwise_and",
+ "bitwise_left_shift",
+ "bitwise_invert",
+ "bitwise_or",
+ "bitwise_right_shift",
+ "bitwise_xor",
+ "ceil",
+ "cos",
+ "cosh",
+ "divide",
+ "equal",
+ "exp",
+ "expm1",
+ "floor",
+ "floor_divide",
+ "greater",
+ "greater_equal",
+ "isfinite",
+ "isinf",
+ "isnan",
+ "less",
+ "less_equal",
+ "log",
+ "log1p",
+ "log2",
+ "log10",
+ "logaddexp",
+ "logical_and",
+ "logical_not",
+ "logical_or",
+ "logical_xor",
+ "multiply",
+ "negative",
+ "not_equal",
+ "positive",
+ "pow",
+ "remainder",
+ "round",
+ "sign",
+ "sin",
+ "sinh",
+ "square",
+ "sqrt",
+ "subtract",
+ "tan",
+ "tanh",
+ "trunc",
+]
+
+# einsum is not yet implemented in the array API spec.
+
+# from ._linear_algebra_functions import einsum
+# __all__ += ['einsum']
+
+from ._linear_algebra_functions import matmul, tensordot, transpose, vecdot
+
+__all__ += ["matmul", "tensordot", "transpose", "vecdot"]
+
+from ._manipulation_functions import (
+ concat,
+ expand_dims,
+ flip,
+ reshape,
+ roll,
+ squeeze,
+ stack,
+)
+
+__all__ += ["concat", "expand_dims", "flip", "reshape", "roll", "squeeze", "stack"]
+
+from ._searching_functions import argmax, argmin, nonzero, where
+
+__all__ += ["argmax", "argmin", "nonzero", "where"]
+
+from ._set_functions import unique
+
+__all__ += ["unique"]
+
+from ._sorting_functions import argsort, sort
+
+__all__ += ["argsort", "sort"]
+
+from ._statistical_functions import max, mean, min, prod, std, sum, var
+
+__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
+
+from ._utility_functions import all, any
+
+__all__ += ["all", "any"]
diff --git a/numpy/array_api/_array_object.py b/numpy/array_api/_array_object.py
new file mode 100644
index 000000000..2d746e78b
--- /dev/null
+++ b/numpy/array_api/_array_object.py
@@ -0,0 +1,1029 @@
+"""
+Wrapper class around the ndarray object for the array API standard.
+
+The array API standard defines some behaviors differently than ndarray, in
+particular, type promotion rules are different (the standard has no
+value-based casting). The standard also specifies a more limited subset of
+array methods and functionalities than are implemented on ndarray. Since the
+goal of the array_api namespace is to be a minimal implementation of the array
+API standard, we need to define a separate wrapper class for the array_api
+namespace.
+
+The standard compliant class is only a wrapper class. It is *not* a subclass
+of ndarray.
+"""
+
+from __future__ import annotations
+
+import operator
+from enum import IntEnum
+from ._creation_functions import asarray
+from ._dtypes import (
+ _all_dtypes,
+ _boolean_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _floating_dtypes,
+ _numeric_dtypes,
+ _result_type,
+ _dtype_categories,
+)
+
+from typing import TYPE_CHECKING, Optional, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import PyCapsule, Device, Dtype
+
+import numpy as np
+
+from numpy import array_api
+
+
+class Array:
+ """
+ n-d array object for the array API namespace.
+
+ See the docstring of :py:obj:`np.ndarray <numpy.ndarray>` for more
+ information.
+
+ This is a wrapper around numpy.ndarray that restricts the usage to only
+ those things that are required by the array API namespace. Note,
+ attributes on this object that start with a single underscore are not part
+ of the API specification and should only be used internally. This object
+ should not be constructed directly. Rather, use one of the creation
+ functions, such as asarray().
+
+ """
+
+ # Use a custom constructor instead of __init__, as manually initializing
+ # this class is not supported API.
+ @classmethod
+ def _new(cls, x, /):
+ """
+ This is a private method for initializing the array API Array
+ object.
+
+ Functions outside of the array_api submodule should not use this
+ method. Use one of the creation functions instead, such as
+ ``asarray``.
+
+ """
+ obj = super().__new__(cls)
+ # Note: The spec does not have array scalars, only 0-D arrays.
+ if isinstance(x, np.generic):
+ # Convert the array scalar to a 0-D array
+ x = np.asarray(x)
+ if x.dtype not in _all_dtypes:
+ raise TypeError(
+ f"The array_api namespace does not support the dtype '{x.dtype}'"
+ )
+ obj._array = x
+ return obj
+
+ # Prevent Array() from working
+ def __new__(cls, *args, **kwargs):
+ raise TypeError(
+ "The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead."
+ )
+
+ # These functions are not required by the spec, but are implemented for
+ # the sake of usability.
+
+ def __str__(self: Array, /) -> str:
+ """
+ Performs the operation __str__.
+ """
+ return self._array.__str__().replace("array", "Array")
+
+ def __repr__(self: Array, /) -> str:
+ """
+ Performs the operation __repr__.
+ """
+ return f"Array({np.array2string(self._array, separator=', ')}, dtype={self.dtype.name})"
+
+ # These are various helper functions to make the array behavior match the
+ # spec in places where it either deviates from or is more strict than
+ # NumPy behavior
+
+ def _check_allowed_dtypes(self, other, dtype_category, op):
+ """
+ Helper function for operators to only allow specific input dtypes
+
+ Use like
+
+ other = self._check_allowed_dtypes(other, 'numeric', '__add__')
+ if other is NotImplemented:
+ return other
+ """
+
+ if self.dtype not in _dtype_categories[dtype_category]:
+ raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+ if isinstance(other, (int, float, bool)):
+ other = self._promote_scalar(other)
+ elif isinstance(other, Array):
+ if other.dtype not in _dtype_categories[dtype_category]:
+ raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+ else:
+ return NotImplemented
+
+ # This will raise TypeError for type combinations that are not allowed
+ # to promote in the spec (even if the NumPy array operator would
+ # promote them).
+ res_dtype = _result_type(self.dtype, other.dtype)
+ if op.startswith("__i"):
+ # Note: NumPy will allow in-place operators in some cases where
+ # the type promoted operator does not match the left-hand side
+ # operand. For example,
+
+ # >>> a = np.array(1, dtype=np.int8)
+ # >>> a += np.array(1, dtype=np.int16)
+
+ # The spec explicitly disallows this.
+ if res_dtype != self.dtype:
+ raise TypeError(
+ f"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}"
+ )
+
+ return other
+
+ # Helper function to match the type promotion rules in the spec
+ def _promote_scalar(self, scalar):
+ """
+ Returns a promoted version of a Python scalar appropriate for use with
+ operations on self.
+
+ This may raise an OverflowError in cases where the scalar is an
+ integer that is too large to fit in a NumPy integer dtype, or
+ TypeError when the scalar type is incompatible with the dtype of self.
+ """
+ if isinstance(scalar, bool):
+ if self.dtype not in _boolean_dtypes:
+ raise TypeError(
+ "Python bool scalars can only be promoted with bool arrays"
+ )
+ elif isinstance(scalar, int):
+ if self.dtype in _boolean_dtypes:
+ raise TypeError(
+ "Python int scalars cannot be promoted with bool arrays"
+ )
+ elif isinstance(scalar, float):
+ if self.dtype not in _floating_dtypes:
+ raise TypeError(
+ "Python float scalars can only be promoted with floating-point arrays."
+ )
+ else:
+ raise TypeError("'scalar' must be a Python scalar")
+
+ # Note: the spec only specifies integer-dtype/int promotion
+ # behavior for integers within the bounds of the integer dtype.
+ # Outside of those bounds we use the default NumPy behavior (either
+ # cast or raise OverflowError).
+ return Array._new(np.array(scalar, self.dtype))
+
+ @staticmethod
+ def _normalize_two_args(x1, x2):
+ """
+ Normalize inputs to two arg functions to fix type promotion rules
+
+ NumPy deviates from the spec type promotion rules in cases where one
+ argument is 0-dimensional and the other is not. For example:
+
+ >>> import numpy as np
+ >>> a = np.array([1.0], dtype=np.float32)
+ >>> b = np.array(1.0, dtype=np.float64)
+ >>> np.add(a, b) # The spec says this should be float64
+ array([2.], dtype=float32)
+
+ To fix this, we add a dimension to the 0-dimension array before passing it
+ through. This works because a dimension would be added anyway from
+ broadcasting, so the resulting shape is the same, but this prevents NumPy
+ from not promoting the dtype.
+ """
+ # Another option would be to use signature=(x1.dtype, x2.dtype, None),
+ # but that only works for ufuncs, so we would have to call the ufuncs
+ # directly in the operator methods. One should also note that this
+ # sort of trick wouldn't work for functions like searchsorted, which
+ # don't do normal broadcasting, but there aren't any functions like
+ # that in the array API namespace.
+ if x1.ndim == 0 and x2.ndim != 0:
+ # The _array[None] workaround was chosen because it is relatively
+ # performant. broadcast_to(x1._array, x2.shape) is much slower. We
+ # could also manually type promote x2, but that is more complicated
+ # and about the same performance as this.
+ x1 = Array._new(x1._array[None])
+ elif x2.ndim == 0 and x1.ndim != 0:
+ x2 = Array._new(x2._array[None])
+ return (x1, x2)
+
+ # Note: A large fraction of allowed indices are disallowed here (see the
+ # docstring below)
+ @staticmethod
+ def _validate_index(key, shape):
+ """
+ Validate an index according to the array API.
+
+ The array API specification only requires a subset of indices that are
+ supported by NumPy. This function will reject any index that is
+ allowed by NumPy but not required by the array API specification. We
+ always raise ``IndexError`` on such indices (the spec does not require
+ any specific behavior on them, but this makes the NumPy array API
+ namespace a minimal implementation of the spec). See
+ https://data-apis.org/array-api/latest/API_specification/indexing.html
+ for the full list of required indexing behavior
+
+ This function either raises IndexError if the index ``key`` is
+ invalid, or a new key to be used in place of ``key`` in indexing. It
+ only raises ``IndexError`` on indices that are not already rejected by
+ NumPy, as NumPy will already raise the appropriate error on such
+ indices. ``shape`` may be None, in which case, only cases that are
+ independent of the array shape are checked.
+
+ The following cases are allowed by NumPy, but not specified by the array
+ API specification:
+
+ - The start and stop of a slice may not be out of bounds. In
+ particular, for a slice ``i:j:k`` on an axis of size ``n``, only the
+ following are allowed:
+
+ - ``i`` or ``j`` omitted (``None``).
+ - ``-n <= i <= max(0, n - 1)``.
+ - For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
+ - For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
+
+ - Boolean array indices are not allowed as part of a larger tuple
+ index.
+
+ - Integer array indices are not allowed (with the exception of 0-D
+ arrays, which are treated the same as scalars).
+
+ Additionally, it should be noted that indices that would return a
+ scalar in NumPy will return a 0-D array. Array scalars are not allowed
+ in the specification, only 0-D arrays. This is done in the
+ ``Array._new`` constructor, not this function.
+
+ """
+ if isinstance(key, slice):
+ if shape is None:
+ return key
+ if shape == ():
+ return key
+ size = shape[0]
+ # Ensure invalid slice entries are passed through.
+ if key.start is not None:
+ try:
+ operator.index(key.start)
+ except TypeError:
+ return key
+ if not (-size <= key.start <= max(0, size - 1)):
+ raise IndexError(
+ "Slices with out-of-bounds start are not allowed in the array API namespace"
+ )
+ if key.stop is not None:
+ try:
+ operator.index(key.stop)
+ except TypeError:
+ return key
+ step = 1 if key.step is None else key.step
+ if (step > 0 and not (-size <= key.stop <= size)
+ or step < 0 and not (-size - 1 <= key.stop <= max(0, size - 1))):
+ raise IndexError("Slices with out-of-bounds stop are not allowed in the array API namespace")
+ return key
+
+ elif isinstance(key, tuple):
+ key = tuple(Array._validate_index(idx, None) for idx in key)
+
+ for idx in key:
+ if (
+ isinstance(idx, np.ndarray)
+ and idx.dtype in _boolean_dtypes
+ or isinstance(idx, (bool, np.bool_))
+ ):
+ if len(key) == 1:
+ return key
+ raise IndexError(
+ "Boolean array indices combined with other indices are not allowed in the array API namespace"
+ )
+ if isinstance(idx, tuple):
+ raise IndexError(
+ "Nested tuple indices are not allowed in the array API namespace"
+ )
+
+ if shape is None:
+ return key
+ n_ellipsis = key.count(...)
+ if n_ellipsis > 1:
+ return key
+ ellipsis_i = key.index(...) if n_ellipsis else len(key)
+
+ for idx, size in list(zip(key[:ellipsis_i], shape)) + list(
+ zip(key[:ellipsis_i:-1], shape[:ellipsis_i:-1])
+ ):
+ Array._validate_index(idx, (size,))
+ return key
+ elif isinstance(key, bool):
+ return key
+ elif isinstance(key, Array):
+ if key.dtype in _integer_dtypes:
+ if key.ndim != 0:
+ raise IndexError(
+ "Non-zero dimensional integer array indices are not allowed in the array API namespace"
+ )
+ return key._array
+ elif key is Ellipsis:
+ return key
+ elif key is None:
+ raise IndexError(
+ "newaxis indices are not allowed in the array API namespace"
+ )
+ try:
+ return operator.index(key)
+ except TypeError:
+ # Note: This also omits boolean arrays that are not already in
+ # Array() form, like a list of booleans.
+ raise IndexError(
+ "Only integers, slices (`:`), ellipsis (`...`), and boolean arrays are valid indices in the array API namespace"
+ )
+
+ # Everything below this line is required by the spec.
+
+ def __abs__(self: Array, /) -> Array:
+ """
+ Performs the operation __abs__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __abs__")
+ res = self._array.__abs__()
+ return self.__class__._new(res)
+
+ def __add__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __add__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__add__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__add__(other._array)
+ return self.__class__._new(res)
+
+ def __and__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __and__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__and__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__and__(other._array)
+ return self.__class__._new(res)
+
+ def __array_namespace__(
+ self: Array, /, *, api_version: Optional[str] = None
+ ) -> object:
+ if api_version is not None and not api_version.startswith("2021."):
+ raise ValueError(f"Unrecognized array API version: {api_version!r}")
+ return array_api
+
+ def __bool__(self: Array, /) -> bool:
+ """
+ Performs the operation __bool__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("bool is only allowed on arrays with 0 dimensions")
+ res = self._array.__bool__()
+ return res
+
+ def __dlpack__(self: Array, /, *, stream: None = None) -> PyCapsule:
+ """
+ Performs the operation __dlpack__.
+ """
+ res = self._array.__dlpack__(stream=stream)
+ return self.__class__._new(res)
+
+ def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]:
+ """
+ Performs the operation __dlpack_device__.
+ """
+ # Note: device support is required for this
+ res = self._array.__dlpack_device__()
+ return self.__class__._new(res)
+
+ def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+ """
+ Performs the operation __eq__.
+ """
+ # Even though "all" dtypes are allowed, we still require them to be
+ # promotable with each other.
+ other = self._check_allowed_dtypes(other, "all", "__eq__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__eq__(other._array)
+ return self.__class__._new(res)
+
+ def __float__(self: Array, /) -> float:
+ """
+ Performs the operation __float__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("float is only allowed on arrays with 0 dimensions")
+ res = self._array.__float__()
+ return res
+
+ def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __floordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__floordiv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__floordiv__(other._array)
+ return self.__class__._new(res)
+
+ def __ge__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ge__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__ge__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ge__(other._array)
+ return self.__class__._new(res)
+
+ def __getitem__(
+ self: Array,
+ key: Union[
+ int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
+ ],
+ /,
+ ) -> Array:
+ """
+ Performs the operation __getitem__.
+ """
+ # Note: Only indices required by the spec are allowed. See the
+ # docstring of _validate_index
+ key = self._validate_index(key, self.shape)
+ res = self._array.__getitem__(key)
+ return self._new(res)
+
+ def __gt__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __gt__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__gt__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__gt__(other._array)
+ return self.__class__._new(res)
+
+ def __int__(self: Array, /) -> int:
+ """
+ Performs the operation __int__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("int is only allowed on arrays with 0 dimensions")
+ res = self._array.__int__()
+ return res
+
+ def __invert__(self: Array, /) -> Array:
+ """
+ Performs the operation __invert__.
+ """
+ if self.dtype not in _integer_or_boolean_dtypes:
+ raise TypeError("Only integer or boolean dtypes are allowed in __invert__")
+ res = self._array.__invert__()
+ return self.__class__._new(res)
+
+ def __le__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __le__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__le__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__le__(other._array)
+ return self.__class__._new(res)
+
+ # Note: __len__ may end up being removed from the array API spec.
+ def __len__(self, /) -> int:
+ """
+ Performs the operation __len__.
+ """
+ return self._array.__len__()
+
+ def __lshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __lshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__lshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__lshift__(other._array)
+ return self.__class__._new(res)
+
+ def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __lt__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__lt__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__lt__(other._array)
+ return self.__class__._new(res)
+
+ def __matmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __matmul__.
+ """
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__matmul__")
+ if other is NotImplemented:
+ return other
+ res = self._array.__matmul__(other._array)
+ return self.__class__._new(res)
+
+ def __mod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __mod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__mod__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__mod__(other._array)
+ return self.__class__._new(res)
+
+ def __mul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __mul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__mul__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__mul__(other._array)
+ return self.__class__._new(res)
+
+ def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+ """
+ Performs the operation __ne__.
+ """
+ other = self._check_allowed_dtypes(other, "all", "__ne__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ne__(other._array)
+ return self.__class__._new(res)
+
+ def __neg__(self: Array, /) -> Array:
+ """
+ Performs the operation __neg__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __neg__")
+ res = self._array.__neg__()
+ return self.__class__._new(res)
+
+ def __or__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __or__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__or__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__or__(other._array)
+ return self.__class__._new(res)
+
+ def __pos__(self: Array, /) -> Array:
+ """
+ Performs the operation __pos__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __pos__")
+ res = self._array.__pos__()
+ return self.__class__._new(res)
+
+ # PEP 484 requires int to be a subtype of float, but __pow__ should not
+ # accept int.
+ def __pow__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __pow__.
+ """
+ from ._elementwise_functions import pow
+
+ other = self._check_allowed_dtypes(other, "floating-point", "__pow__")
+ if other is NotImplemented:
+ return other
+ # Note: NumPy's __pow__ does not follow type promotion rules for 0-d
+ # arrays, so we use pow() here instead.
+ return pow(self, other)
+
+ def __rshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rshift__(other._array)
+ return self.__class__._new(res)
+
+ def __setitem__(
+ self,
+ key: Union[
+ int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
+ ],
+ value: Union[int, float, bool, Array],
+ /,
+ ) -> None:
+ """
+ Performs the operation __setitem__.
+ """
+ # Note: Only indices required by the spec are allowed. See the
+ # docstring of _validate_index
+ key = self._validate_index(key, self.shape)
+ self._array.__setitem__(key, asarray(value)._array)
+
+ def __sub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __sub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__sub__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__sub__(other._array)
+ return self.__class__._new(res)
+
+ # PEP 484 requires int to be a subtype of float, but __truediv__ should
+ # not accept int.
+ def __truediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __truediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__truediv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__truediv__(other._array)
+ return self.__class__._new(res)
+
+ def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __xor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__xor__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__xor__(other._array)
+ return self.__class__._new(res)
+
+ def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __iadd__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__iadd__")
+ if other is NotImplemented:
+ return other
+ self._array.__iadd__(other._array)
+ return self
+
+ def __radd__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __radd__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__radd__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__radd__(other._array)
+ return self.__class__._new(res)
+
+ def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __iand__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__iand__")
+ if other is NotImplemented:
+ return other
+ self._array.__iand__(other._array)
+ return self
+
+ def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __rand__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__rand__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rand__(other._array)
+ return self.__class__._new(res)
+
+ def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ifloordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__ifloordiv__")
+ if other is NotImplemented:
+ return other
+ self._array.__ifloordiv__(other._array)
+ return self
+
+ def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rfloordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rfloordiv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rfloordiv__(other._array)
+ return self.__class__._new(res)
+
+ def __ilshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __ilshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__ilshift__")
+ if other is NotImplemented:
+ return other
+ self._array.__ilshift__(other._array)
+ return self
+
+ def __rlshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rlshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rlshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rlshift__(other._array)
+ return self.__class__._new(res)
+
+ def __imatmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __imatmul__.
+ """
+ # Note: NumPy does not implement __imatmul__.
+
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__imatmul__")
+ if other is NotImplemented:
+ return other
+
+ # __imatmul__ can only be allowed when it would not change the shape
+ # of self.
+ other_shape = other.shape
+ if self.shape == () or other_shape == ():
+ raise ValueError("@= requires at least one dimension")
+ if len(other_shape) == 1 or other_shape[-1] != other_shape[-2]:
+ raise ValueError("@= cannot change the shape of the input array")
+ self._array[:] = self._array.__matmul__(other._array)
+ return self
+
+ def __rmatmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __rmatmul__.
+ """
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__rmatmul__")
+ if other is NotImplemented:
+ return other
+ res = self._array.__rmatmul__(other._array)
+ return self.__class__._new(res)
+
+ def __imod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __imod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__imod__")
+ if other is NotImplemented:
+ return other
+ self._array.__imod__(other._array)
+ return self
+
+ def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rmod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rmod__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rmod__(other._array)
+ return self.__class__._new(res)
+
+ def __imul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __imul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__imul__")
+ if other is NotImplemented:
+ return other
+ self._array.__imul__(other._array)
+ return self
+
+ def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rmul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rmul__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rmul__(other._array)
+ return self.__class__._new(res)
+
+ def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ior__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ior__")
+ if other is NotImplemented:
+ return other
+ self._array.__ior__(other._array)
+ return self
+
+ def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ror__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ror__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ror__(other._array)
+ return self.__class__._new(res)
+
+ def __ipow__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __ipow__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__ipow__")
+ if other is NotImplemented:
+ return other
+ self._array.__ipow__(other._array)
+ return self
+
+ def __rpow__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __rpow__.
+ """
+ from ._elementwise_functions import pow
+
+ other = self._check_allowed_dtypes(other, "floating-point", "__rpow__")
+ if other is NotImplemented:
+ return other
+ # Note: NumPy's __pow__ does not follow the spec type promotion rules
+ # for 0-d arrays, so we use pow() here instead.
+ return pow(other, self)
+
+ def __irshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __irshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__irshift__")
+ if other is NotImplemented:
+ return other
+ self._array.__irshift__(other._array)
+ return self
+
+ def __rrshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rrshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rrshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rrshift__(other._array)
+ return self.__class__._new(res)
+
+ def __isub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __isub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__isub__")
+ if other is NotImplemented:
+ return other
+ self._array.__isub__(other._array)
+ return self
+
+ def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rsub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rsub__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rsub__(other._array)
+ return self.__class__._new(res)
+
+ def __itruediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __itruediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__itruediv__")
+ if other is NotImplemented:
+ return other
+ self._array.__itruediv__(other._array)
+ return self
+
+ def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __rtruediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__rtruediv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rtruediv__(other._array)
+ return self.__class__._new(res)
+
+ def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ixor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ixor__")
+ if other is NotImplemented:
+ return other
+ self._array.__ixor__(other._array)
+ return self
+
+ def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __rxor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__rxor__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rxor__(other._array)
+ return self.__class__._new(res)
+
+ @property
+ def dtype(self) -> Dtype:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.dtype <numpy.ndarray.dtype>`.
+
+ See its docstring for more information.
+ """
+ return self._array.dtype
+
+ @property
+ def device(self) -> Device:
+ return "cpu"
+
+ @property
+ def ndim(self) -> int:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.ndim <numpy.ndarray.ndim>`.
+
+ See its docstring for more information.
+ """
+ return self._array.ndim
+
+ @property
+ def shape(self) -> Tuple[int, ...]:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.shape <numpy.ndarray.shape>`.
+
+ See its docstring for more information.
+ """
+ return self._array.shape
+
+ @property
+ def size(self) -> int:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.size <numpy.ndarray.size>`.
+
+ See its docstring for more information.
+ """
+ return self._array.size
+
+ @property
+ def T(self) -> Array:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.T <numpy.ndarray.T>`.
+
+ See its docstring for more information.
+ """
+ return self._array.T
diff --git a/numpy/array_api/_constants.py b/numpy/array_api/_constants.py
new file mode 100644
index 000000000..9541941e7
--- /dev/null
+++ b/numpy/array_api/_constants.py
@@ -0,0 +1,6 @@
+import numpy as np
+
+e = np.e
+inf = np.inf
+nan = np.nan
+pi = np.pi
diff --git a/numpy/array_api/_creation_functions.py b/numpy/array_api/_creation_functions.py
new file mode 100644
index 000000000..e9c01e7e6
--- /dev/null
+++ b/numpy/array_api/_creation_functions.py
@@ -0,0 +1,316 @@
+from __future__ import annotations
+
+
+from typing import TYPE_CHECKING, List, Optional, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import (
+ Array,
+ Device,
+ Dtype,
+ NestedSequence,
+ SupportsDLPack,
+ SupportsBufferProtocol,
+ )
+ from collections.abc import Sequence
+from ._dtypes import _all_dtypes
+
+import numpy as np
+
+
+def _check_valid_dtype(dtype):
+ # Note: Only spelling dtypes as the dtype objects is supported.
+
+ # We use this instead of "dtype in _all_dtypes" because the dtype objects
+ # define equality with the sorts of things we want to disallw.
+ for d in (None,) + _all_dtypes:
+ if dtype is d:
+ return
+ raise ValueError("dtype must be one of the supported dtypes")
+
+
+def asarray(
+ obj: Union[
+ Array,
+ bool,
+ int,
+ float,
+ NestedSequence[bool | int | float],
+ SupportsDLPack,
+ SupportsBufferProtocol,
+ ],
+ /,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+ copy: Optional[bool] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.asarray <numpy.asarray>`.
+
+ See its docstring for more information.
+ """
+ # _array_object imports in this file are inside the functions to avoid
+ # circular imports
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ if copy is False:
+ # Note: copy=False is not yet implemented in np.asarray
+ raise NotImplementedError("copy=False is not yet implemented")
+ if isinstance(obj, Array) and (dtype is None or obj.dtype == dtype):
+ if copy is True:
+ return Array._new(np.array(obj._array, copy=True, dtype=dtype))
+ return obj
+ if dtype is None and isinstance(obj, int) and (obj > 2 ** 64 or obj < -(2 ** 63)):
+ # Give a better error message in this case. NumPy would convert this
+ # to an object array. TODO: This won't handle large integers in lists.
+ raise OverflowError("Integer out of bounds for array dtypes")
+ res = np.asarray(obj, dtype=dtype)
+ return Array._new(res)
+
+
+def arange(
+ start: Union[int, float],
+ /,
+ stop: Optional[Union[int, float]] = None,
+ step: Union[int, float] = 1,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arange <numpy.arange>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.arange(start, stop=stop, step=step, dtype=dtype))
+
+
+def empty(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.empty <numpy.empty>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.empty(shape, dtype=dtype))
+
+
+def empty_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.empty_like <numpy.empty_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.empty_like(x._array, dtype=dtype))
+
+
+def eye(
+ n_rows: int,
+ n_cols: Optional[int] = None,
+ /,
+ *,
+ k: Optional[int] = 0,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.eye <numpy.eye>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.eye(n_rows, M=n_cols, k=k, dtype=dtype))
+
+
+def from_dlpack(x: object, /) -> Array:
+ # Note: dlpack support is not yet implemented on Array
+ raise NotImplementedError("DLPack support is not yet implemented")
+
+
+def full(
+ shape: Union[int, Tuple[int, ...]],
+ fill_value: Union[int, float],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.full <numpy.full>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ if isinstance(fill_value, Array) and fill_value.ndim == 0:
+ fill_value = fill_value._array
+ res = np.full(shape, fill_value, dtype=dtype)
+ if res.dtype not in _all_dtypes:
+ # This will happen if the fill value is not something that NumPy
+ # coerces to one of the acceptable dtypes.
+ raise TypeError("Invalid input to full")
+ return Array._new(res)
+
+
+def full_like(
+ x: Array,
+ /,
+ fill_value: Union[int, float],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.full_like <numpy.full_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ res = np.full_like(x._array, fill_value, dtype=dtype)
+ if res.dtype not in _all_dtypes:
+ # This will happen if the fill value is not something that NumPy
+ # coerces to one of the acceptable dtypes.
+ raise TypeError("Invalid input to full_like")
+ return Array._new(res)
+
+
+def linspace(
+ start: Union[int, float],
+ stop: Union[int, float],
+ /,
+ num: int,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+ endpoint: bool = True,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linspace <numpy.linspace>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint))
+
+
+def meshgrid(*arrays: Sequence[Array], indexing: str = "xy") -> List[Array, ...]:
+ """
+ Array API compatible wrapper for :py:func:`np.meshgrid <numpy.meshgrid>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return [
+ Array._new(array)
+ for array in np.meshgrid(*[a._array for a in arrays], indexing=indexing)
+ ]
+
+
+def ones(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ones <numpy.ones>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.ones(shape, dtype=dtype))
+
+
+def ones_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ones_like <numpy.ones_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.ones_like(x._array, dtype=dtype))
+
+
+def zeros(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.zeros <numpy.zeros>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.zeros(shape, dtype=dtype))
+
+
+def zeros_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.zeros_like <numpy.zeros_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.zeros_like(x._array, dtype=dtype))
diff --git a/numpy/array_api/_data_type_functions.py b/numpy/array_api/_data_type_functions.py
new file mode 100644
index 000000000..fd92aa250
--- /dev/null
+++ b/numpy/array_api/_data_type_functions.py
@@ -0,0 +1,127 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _all_dtypes, _result_type
+
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, List, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import Dtype
+ from collections.abc import Sequence
+
+import numpy as np
+
+
+def broadcast_arrays(*arrays: Sequence[Array]) -> List[Array]:
+ """
+ Array API compatible wrapper for :py:func:`np.broadcast_arrays <numpy.broadcast_arrays>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return [
+ Array._new(array) for array in np.broadcast_arrays(*[a._array for a in arrays])
+ ]
+
+
+def broadcast_to(x: Array, /, shape: Tuple[int, ...]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.broadcast_to <numpy.broadcast_to>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return Array._new(np.broadcast_to(x._array, shape))
+
+
+def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool:
+ """
+ Array API compatible wrapper for :py:func:`np.can_cast <numpy.can_cast>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ if isinstance(from_, Array):
+ from_ = from_._array
+ return np.can_cast(from_, to)
+
+
+# These are internal objects for the return types of finfo and iinfo, since
+# the NumPy versions contain extra data that isn't part of the spec.
+@dataclass
+class finfo_object:
+ bits: int
+ # Note: The types of the float data here are float, whereas in NumPy they
+ # are scalars of the corresponding float dtype.
+ eps: float
+ max: float
+ min: float
+ smallest_normal: float
+
+
+@dataclass
+class iinfo_object:
+ bits: int
+ max: int
+ min: int
+
+
+def finfo(type: Union[Dtype, Array], /) -> finfo_object:
+ """
+ Array API compatible wrapper for :py:func:`np.finfo <numpy.finfo>`.
+
+ See its docstring for more information.
+ """
+ fi = np.finfo(type)
+ # Note: The types of the float data here are float, whereas in NumPy they
+ # are scalars of the corresponding float dtype.
+ return finfo_object(
+ fi.bits,
+ float(fi.eps),
+ float(fi.max),
+ float(fi.min),
+ float(fi.smallest_normal),
+ )
+
+
+def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:
+ """
+ Array API compatible wrapper for :py:func:`np.iinfo <numpy.iinfo>`.
+
+ See its docstring for more information.
+ """
+ ii = np.iinfo(type)
+ return iinfo_object(ii.bits, ii.max, ii.min)
+
+
+def result_type(*arrays_and_dtypes: Sequence[Union[Array, Dtype]]) -> Dtype:
+ """
+ Array API compatible wrapper for :py:func:`np.result_type <numpy.result_type>`.
+
+ See its docstring for more information.
+ """
+ # Note: we use a custom implementation that gives only the type promotions
+ # required by the spec rather than using np.result_type. NumPy implements
+ # too many extra type promotions like int64 + uint64 -> float64, and does
+ # value-based casting on scalar arrays.
+ A = []
+ for a in arrays_and_dtypes:
+ if isinstance(a, Array):
+ a = a.dtype
+ elif isinstance(a, np.ndarray) or a not in _all_dtypes:
+ raise TypeError("result_type() inputs must be array_api arrays or dtypes")
+ A.append(a)
+
+ if len(A) == 0:
+ raise ValueError("at least one array or dtype is required")
+ elif len(A) == 1:
+ return A[0]
+ else:
+ t = A[0]
+ for t2 in A[1:]:
+ t = _result_type(t, t2)
+ return t
diff --git a/numpy/array_api/_dtypes.py b/numpy/array_api/_dtypes.py
new file mode 100644
index 000000000..476d619fe
--- /dev/null
+++ b/numpy/array_api/_dtypes.py
@@ -0,0 +1,143 @@
+import numpy as np
+
+# Note: we use dtype objects instead of dtype classes. The spec does not
+# require any behavior on dtypes other than equality.
+int8 = np.dtype("int8")
+int16 = np.dtype("int16")
+int32 = np.dtype("int32")
+int64 = np.dtype("int64")
+uint8 = np.dtype("uint8")
+uint16 = np.dtype("uint16")
+uint32 = np.dtype("uint32")
+uint64 = np.dtype("uint64")
+float32 = np.dtype("float32")
+float64 = np.dtype("float64")
+# Note: This name is changed
+bool = np.dtype("bool")
+
+_all_dtypes = (
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ bool,
+)
+_boolean_dtypes = (bool,)
+_floating_dtypes = (float32, float64)
+_integer_dtypes = (int8, int16, int32, int64, uint8, uint16, uint32, uint64)
+_integer_or_boolean_dtypes = (
+ bool,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+_numeric_dtypes = (
+ float32,
+ float64,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+
+_dtype_categories = {
+ "all": _all_dtypes,
+ "numeric": _numeric_dtypes,
+ "integer": _integer_dtypes,
+ "integer or boolean": _integer_or_boolean_dtypes,
+ "boolean": _boolean_dtypes,
+ "floating-point": _floating_dtypes,
+}
+
+
+# Note: the spec defines a restricted type promotion table compared to NumPy.
+# In particular, cross-kind promotions like integer + float or boolean +
+# integer are not allowed, even for functions that accept both kinds.
+# Additionally, NumPy promotes signed integer + uint64 to float64, but this
+# promotion is not allowed here. To be clear, Python scalar int objects are
+# allowed to promote to floating-point dtypes, but only in array operators
+# (see Array._promote_scalar) method in _array_object.py.
+_promotion_table = {
+ (int8, int8): int8,
+ (int8, int16): int16,
+ (int8, int32): int32,
+ (int8, int64): int64,
+ (int16, int8): int16,
+ (int16, int16): int16,
+ (int16, int32): int32,
+ (int16, int64): int64,
+ (int32, int8): int32,
+ (int32, int16): int32,
+ (int32, int32): int32,
+ (int32, int64): int64,
+ (int64, int8): int64,
+ (int64, int16): int64,
+ (int64, int32): int64,
+ (int64, int64): int64,
+ (uint8, uint8): uint8,
+ (uint8, uint16): uint16,
+ (uint8, uint32): uint32,
+ (uint8, uint64): uint64,
+ (uint16, uint8): uint16,
+ (uint16, uint16): uint16,
+ (uint16, uint32): uint32,
+ (uint16, uint64): uint64,
+ (uint32, uint8): uint32,
+ (uint32, uint16): uint32,
+ (uint32, uint32): uint32,
+ (uint32, uint64): uint64,
+ (uint64, uint8): uint64,
+ (uint64, uint16): uint64,
+ (uint64, uint32): uint64,
+ (uint64, uint64): uint64,
+ (int8, uint8): int16,
+ (int8, uint16): int32,
+ (int8, uint32): int64,
+ (int16, uint8): int16,
+ (int16, uint16): int32,
+ (int16, uint32): int64,
+ (int32, uint8): int32,
+ (int32, uint16): int32,
+ (int32, uint32): int64,
+ (int64, uint8): int64,
+ (int64, uint16): int64,
+ (int64, uint32): int64,
+ (uint8, int8): int16,
+ (uint16, int8): int32,
+ (uint32, int8): int64,
+ (uint8, int16): int16,
+ (uint16, int16): int32,
+ (uint32, int16): int64,
+ (uint8, int32): int32,
+ (uint16, int32): int32,
+ (uint32, int32): int64,
+ (uint8, int64): int64,
+ (uint16, int64): int64,
+ (uint32, int64): int64,
+ (float32, float32): float32,
+ (float32, float64): float64,
+ (float64, float32): float64,
+ (float64, float64): float64,
+ (bool, bool): bool,
+}
+
+
+def _result_type(type1, type2):
+ if (type1, type2) in _promotion_table:
+ return _promotion_table[type1, type2]
+ raise TypeError(f"{type1} and {type2} cannot be type promoted together")
diff --git a/numpy/array_api/_elementwise_functions.py b/numpy/array_api/_elementwise_functions.py
new file mode 100644
index 000000000..4408fe833
--- /dev/null
+++ b/numpy/array_api/_elementwise_functions.py
@@ -0,0 +1,729 @@
+from __future__ import annotations
+
+from ._dtypes import (
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _numeric_dtypes,
+ _result_type,
+)
+from ._array_object import Array
+
+import numpy as np
+
+
+def abs(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.abs <numpy.abs>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in abs")
+ return Array._new(np.abs(x._array))
+
+
+# Note: the function name is different here
+def acos(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arccos <numpy.arccos>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in acos")
+ return Array._new(np.arccos(x._array))
+
+
+# Note: the function name is different here
+def acosh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arccosh <numpy.arccosh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in acosh")
+ return Array._new(np.arccosh(x._array))
+
+
+def add(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.add <numpy.add>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in add")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.add(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def asin(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arcsin <numpy.arcsin>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in asin")
+ return Array._new(np.arcsin(x._array))
+
+
+# Note: the function name is different here
+def asinh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arcsinh <numpy.arcsinh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in asinh")
+ return Array._new(np.arcsinh(x._array))
+
+
+# Note: the function name is different here
+def atan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctan <numpy.arctan>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atan")
+ return Array._new(np.arctan(x._array))
+
+
+# Note: the function name is different here
+def atan2(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctan2 <numpy.arctan2>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atan2")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.arctan2(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def atanh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctanh <numpy.arctanh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atanh")
+ return Array._new(np.arctanh(x._array))
+
+
+def bitwise_and(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_and <numpy.bitwise_and>`.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_and")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_and(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_left_shift(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.left_shift <numpy.left_shift>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+ raise TypeError("Only integer dtypes are allowed in bitwise_left_shift")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ # Note: bitwise_left_shift is only defined for x2 nonnegative.
+ if np.any(x2._array < 0):
+ raise ValueError("bitwise_left_shift(x1, x2) is only defined for x2 >= 0")
+ return Array._new(np.left_shift(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_invert(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.invert <numpy.invert>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _integer_or_boolean_dtypes:
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_invert")
+ return Array._new(np.invert(x._array))
+
+
+def bitwise_or(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_or <numpy.bitwise_or>`.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_or")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_or(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_right_shift(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.right_shift <numpy.right_shift>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+ raise TypeError("Only integer dtypes are allowed in bitwise_right_shift")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ # Note: bitwise_right_shift is only defined for x2 nonnegative.
+ if np.any(x2._array < 0):
+ raise ValueError("bitwise_right_shift(x1, x2) is only defined for x2 >= 0")
+ return Array._new(np.right_shift(x1._array, x2._array))
+
+
+def bitwise_xor(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_xor <numpy.bitwise_xor>`.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_xor")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_xor(x1._array, x2._array))
+
+
+def ceil(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ceil <numpy.ceil>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in ceil")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of ceil is the same as the input
+ return x
+ return Array._new(np.ceil(x._array))
+
+
+def cos(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.cos <numpy.cos>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in cos")
+ return Array._new(np.cos(x._array))
+
+
+def cosh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.cosh <numpy.cosh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in cosh")
+ return Array._new(np.cosh(x._array))
+
+
+def divide(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.divide <numpy.divide>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in divide")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.divide(x1._array, x2._array))
+
+
+def equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.equal <numpy.equal>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.equal(x1._array, x2._array))
+
+
+def exp(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.exp <numpy.exp>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in exp")
+ return Array._new(np.exp(x._array))
+
+
+def expm1(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.expm1 <numpy.expm1>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in expm1")
+ return Array._new(np.expm1(x._array))
+
+
+def floor(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.floor <numpy.floor>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in floor")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of floor is the same as the input
+ return x
+ return Array._new(np.floor(x._array))
+
+
+def floor_divide(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.floor_divide <numpy.floor_divide>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in floor_divide")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.floor_divide(x1._array, x2._array))
+
+
+def greater(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.greater <numpy.greater>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in greater")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.greater(x1._array, x2._array))
+
+
+def greater_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.greater_equal <numpy.greater_equal>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in greater_equal")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.greater_equal(x1._array, x2._array))
+
+
+def isfinite(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isfinite <numpy.isfinite>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isfinite")
+ return Array._new(np.isfinite(x._array))
+
+
+def isinf(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isinf <numpy.isinf>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isinf")
+ return Array._new(np.isinf(x._array))
+
+
+def isnan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isnan <numpy.isnan>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isnan")
+ return Array._new(np.isnan(x._array))
+
+
+def less(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.less <numpy.less>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in less")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.less(x1._array, x2._array))
+
+
+def less_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.less_equal <numpy.less_equal>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in less_equal")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.less_equal(x1._array, x2._array))
+
+
+def log(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log <numpy.log>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log")
+ return Array._new(np.log(x._array))
+
+
+def log1p(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log1p <numpy.log1p>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log1p")
+ return Array._new(np.log1p(x._array))
+
+
+def log2(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log2 <numpy.log2>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log2")
+ return Array._new(np.log2(x._array))
+
+
+def log10(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log10 <numpy.log10>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log10")
+ return Array._new(np.log10(x._array))
+
+
+def logaddexp(x1: Array, x2: Array) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logaddexp <numpy.logaddexp>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in logaddexp")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logaddexp(x1._array, x2._array))
+
+
+def logical_and(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_and <numpy.logical_and>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_and")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_and(x1._array, x2._array))
+
+
+def logical_not(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_not <numpy.logical_not>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_not")
+ return Array._new(np.logical_not(x._array))
+
+
+def logical_or(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_or <numpy.logical_or>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_or")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_or(x1._array, x2._array))
+
+
+def logical_xor(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_xor <numpy.logical_xor>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_xor")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_xor(x1._array, x2._array))
+
+
+def multiply(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.multiply <numpy.multiply>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in multiply")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.multiply(x1._array, x2._array))
+
+
+def negative(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.negative <numpy.negative>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in negative")
+ return Array._new(np.negative(x._array))
+
+
+def not_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.not_equal <numpy.not_equal>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.not_equal(x1._array, x2._array))
+
+
+def positive(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.positive <numpy.positive>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in positive")
+ return Array._new(np.positive(x._array))
+
+
+# Note: the function name is different here
+def pow(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.power <numpy.power>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in pow")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.power(x1._array, x2._array))
+
+
+def remainder(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.remainder <numpy.remainder>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in remainder")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.remainder(x1._array, x2._array))
+
+
+def round(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.round <numpy.round>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in round")
+ return Array._new(np.round(x._array))
+
+
+def sign(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sign <numpy.sign>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in sign")
+ return Array._new(np.sign(x._array))
+
+
+def sin(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sin <numpy.sin>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sin")
+ return Array._new(np.sin(x._array))
+
+
+def sinh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sinh <numpy.sinh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sinh")
+ return Array._new(np.sinh(x._array))
+
+
+def square(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.square <numpy.square>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in square")
+ return Array._new(np.square(x._array))
+
+
+def sqrt(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sqrt <numpy.sqrt>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sqrt")
+ return Array._new(np.sqrt(x._array))
+
+
+def subtract(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.subtract <numpy.subtract>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in subtract")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.subtract(x1._array, x2._array))
+
+
+def tan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.tan <numpy.tan>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in tan")
+ return Array._new(np.tan(x._array))
+
+
+def tanh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.tanh <numpy.tanh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in tanh")
+ return Array._new(np.tanh(x._array))
+
+
+def trunc(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.trunc <numpy.trunc>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in trunc")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of trunc is the same as the input
+ return x
+ return Array._new(np.trunc(x._array))
diff --git a/numpy/array_api/_linear_algebra_functions.py b/numpy/array_api/_linear_algebra_functions.py
new file mode 100644
index 000000000..089081725
--- /dev/null
+++ b/numpy/array_api/_linear_algebra_functions.py
@@ -0,0 +1,68 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _numeric_dtypes, _result_type
+
+from typing import Optional, Sequence, Tuple, Union
+
+import numpy as np
+
+# einsum is not yet implemented in the array API spec.
+
+# def einsum():
+# """
+# Array API compatible wrapper for :py:func:`np.einsum <numpy.einsum>`.
+#
+# See its docstring for more information.
+# """
+# return np.einsum()
+
+
+def matmul(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.matmul <numpy.matmul>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to numeric dtypes only is different from
+ # np.matmul.
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in matmul")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+
+ return Array._new(np.matmul(x1._array, x2._array))
+
+
+# Note: axes must be a tuple, unlike np.tensordot where it can be an array or array-like.
+def tensordot(
+ x1: Array,
+ x2: Array,
+ /,
+ *,
+ axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2,
+) -> Array:
+ # Note: the restriction to numeric dtypes only is different from
+ # np.tensordot.
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in tensordot")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+
+ return Array._new(np.tensordot(x1._array, x2._array, axes=axes))
+
+
+def transpose(x: Array, /, *, axes: Optional[Tuple[int, ...]] = None) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.transpose <numpy.transpose>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.transpose(x._array, axes=axes))
+
+
+# Note: vecdot is not in NumPy
+def vecdot(x1: Array, x2: Array, /, *, axis: Optional[int] = None) -> Array:
+ if axis is None:
+ axis = -1
+ return tensordot(x1, x2, axes=((axis,), (axis,)))
diff --git a/numpy/array_api/_manipulation_functions.py b/numpy/array_api/_manipulation_functions.py
new file mode 100644
index 000000000..c11866261
--- /dev/null
+++ b/numpy/array_api/_manipulation_functions.py
@@ -0,0 +1,86 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._data_type_functions import result_type
+
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+
+# Note: the function name is different here
+def concat(
+ arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: Optional[int] = 0
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.concatenate <numpy.concatenate>`.
+
+ See its docstring for more information.
+ """
+ # Note: Casting rules here are different from the np.concatenate default
+ # (no for scalars with axis=None, no cross-kind casting)
+ dtype = result_type(*arrays)
+ arrays = tuple(a._array for a in arrays)
+ return Array._new(np.concatenate(arrays, axis=axis, dtype=dtype))
+
+
+def expand_dims(x: Array, /, *, axis: int) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.expand_dims <numpy.expand_dims>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.expand_dims(x._array, axis))
+
+
+def flip(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.flip <numpy.flip>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.flip(x._array, axis=axis))
+
+
+def reshape(x: Array, /, shape: Tuple[int, ...]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.reshape <numpy.reshape>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.reshape(x._array, shape))
+
+
+def roll(
+ x: Array,
+ /,
+ shift: Union[int, Tuple[int, ...]],
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.roll <numpy.roll>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.roll(x._array, shift, axis=axis))
+
+
+def squeeze(x: Array, /, axis: Union[int, Tuple[int, ...]]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.squeeze <numpy.squeeze>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.squeeze(x._array, axis=axis))
+
+
+def stack(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.stack <numpy.stack>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ result_type(*arrays)
+ arrays = tuple(a._array for a in arrays)
+ return Array._new(np.stack(arrays, axis=axis))
diff --git a/numpy/array_api/_searching_functions.py b/numpy/array_api/_searching_functions.py
new file mode 100644
index 000000000..3dcef61c3
--- /dev/null
+++ b/numpy/array_api/_searching_functions.py
@@ -0,0 +1,46 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _result_type
+
+from typing import Optional, Tuple
+
+import numpy as np
+
+
+def argmax(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.argmax <numpy.argmax>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.argmax(x._array, axis=axis, keepdims=keepdims)))
+
+
+def argmin(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.argmin <numpy.argmin>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.argmin(x._array, axis=axis, keepdims=keepdims)))
+
+
+def nonzero(x: Array, /) -> Tuple[Array, ...]:
+ """
+ Array API compatible wrapper for :py:func:`np.nonzero <numpy.nonzero>`.
+
+ See its docstring for more information.
+ """
+ return tuple(Array._new(i) for i in np.nonzero(x._array))
+
+
+def where(condition: Array, x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.where <numpy.where>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ return Array._new(np.where(condition._array, x1._array, x2._array))
diff --git a/numpy/array_api/_set_functions.py b/numpy/array_api/_set_functions.py
new file mode 100644
index 000000000..357f238f5
--- /dev/null
+++ b/numpy/array_api/_set_functions.py
@@ -0,0 +1,31 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+from typing import Tuple, Union
+
+import numpy as np
+
+
+def unique(
+ x: Array,
+ /,
+ *,
+ return_counts: bool = False,
+ return_index: bool = False,
+ return_inverse: bool = False,
+) -> Union[Array, Tuple[Array, ...]]:
+ """
+ Array API compatible wrapper for :py:func:`np.unique <numpy.unique>`.
+
+ See its docstring for more information.
+ """
+ res = np.unique(
+ x._array,
+ return_counts=return_counts,
+ return_index=return_index,
+ return_inverse=return_inverse,
+ )
+ if isinstance(res, tuple):
+ return tuple(Array._new(i) for i in res)
+ return Array._new(res)
diff --git a/numpy/array_api/_sorting_functions.py b/numpy/array_api/_sorting_functions.py
new file mode 100644
index 000000000..9cd49786c
--- /dev/null
+++ b/numpy/array_api/_sorting_functions.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+import numpy as np
+
+
+def argsort(
+ x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.argsort <numpy.argsort>`.
+
+ See its docstring for more information.
+ """
+ # Note: this keyword argument is different, and the default is different.
+ kind = "stable" if stable else "quicksort"
+ res = np.argsort(x._array, axis=axis, kind=kind)
+ if descending:
+ res = np.flip(res, axis=axis)
+ return Array._new(res)
+
+
+def sort(
+ x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sort <numpy.sort>`.
+
+ See its docstring for more information.
+ """
+ # Note: this keyword argument is different, and the default is different.
+ kind = "stable" if stable else "quicksort"
+ res = np.sort(x._array, axis=axis, kind=kind)
+ if descending:
+ res = np.flip(res, axis=axis)
+ return Array._new(res)
diff --git a/numpy/array_api/_statistical_functions.py b/numpy/array_api/_statistical_functions.py
new file mode 100644
index 000000000..63790b447
--- /dev/null
+++ b/numpy/array_api/_statistical_functions.py
@@ -0,0 +1,81 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+from typing import Optional, Tuple, Union
+
+import numpy as np
+
+
+def max(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ return Array._new(np.max(x._array, axis=axis, keepdims=keepdims))
+
+
+def mean(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ return Array._new(np.mean(x._array, axis=axis, keepdims=keepdims))
+
+
+def min(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ return Array._new(np.min(x._array, axis=axis, keepdims=keepdims))
+
+
+def prod(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ return Array._new(np.prod(x._array, axis=axis, keepdims=keepdims))
+
+
+def std(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ correction: Union[int, float] = 0.0,
+ keepdims: bool = False,
+) -> Array:
+ # Note: the keyword argument correction is different here
+ return Array._new(np.std(x._array, axis=axis, ddof=correction, keepdims=keepdims))
+
+
+def sum(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ return Array._new(np.sum(x._array, axis=axis, keepdims=keepdims))
+
+
+def var(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ correction: Union[int, float] = 0.0,
+ keepdims: bool = False,
+) -> Array:
+ # Note: the keyword argument correction is different here
+ return Array._new(np.var(x._array, axis=axis, ddof=correction, keepdims=keepdims))
diff --git a/numpy/array_api/_typing.py b/numpy/array_api/_typing.py
new file mode 100644
index 000000000..d530a91ae
--- /dev/null
+++ b/numpy/array_api/_typing.py
@@ -0,0 +1,44 @@
+"""
+This file defines the types for type annotations.
+
+These names aren't part of the module namespace, but they are used in the
+annotations in the function signatures. The functions in the module are only
+valid for inputs that match the given type annotations.
+"""
+
+__all__ = [
+ "Array",
+ "Device",
+ "Dtype",
+ "SupportsDLPack",
+ "SupportsBufferProtocol",
+ "PyCapsule",
+]
+
+from typing import Any, Sequence, Type, Union
+
+from . import (
+ Array,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+)
+
+# This should really be recursive, but that isn't supported yet. See the
+# similar comment in numpy/typing/_array_like.py
+NestedSequence = Sequence[Sequence[Any]]
+
+Device = Any
+Dtype = Type[
+ Union[[int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64]]
+]
+SupportsDLPack = Any
+SupportsBufferProtocol = Any
+PyCapsule = Any
diff --git a/numpy/array_api/_utility_functions.py b/numpy/array_api/_utility_functions.py
new file mode 100644
index 000000000..5ecb4bd9f
--- /dev/null
+++ b/numpy/array_api/_utility_functions.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+from typing import Optional, Tuple, Union
+
+import numpy as np
+
+
+def all(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.all <numpy.all>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.all(x._array, axis=axis, keepdims=keepdims)))
+
+
+def any(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.any <numpy.any>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.any(x._array, axis=axis, keepdims=keepdims)))
diff --git a/numpy/array_api/setup.py b/numpy/array_api/setup.py
new file mode 100644
index 000000000..c8bc29102
--- /dev/null
+++ b/numpy/array_api/setup.py
@@ -0,0 +1,12 @@
+def configuration(parent_package="", top_path=None):
+ from numpy.distutils.misc_util import Configuration
+
+ config = Configuration("array_api", parent_package, top_path)
+ config.add_subpackage("tests")
+ return config
+
+
+if __name__ == "__main__":
+ from numpy.distutils.core import setup
+
+ setup(configuration=configuration)
diff --git a/numpy/array_api/tests/__init__.py b/numpy/array_api/tests/__init__.py
new file mode 100644
index 000000000..536062e38
--- /dev/null
+++ b/numpy/array_api/tests/__init__.py
@@ -0,0 +1,7 @@
+"""
+Tests for the array API namespace.
+
+Note, full compliance with the array API can be tested with the official array API test
+suite https://github.com/data-apis/array-api-tests. This test suite primarily
+focuses on those things that are not tested by the official test suite.
+"""
diff --git a/numpy/array_api/tests/test_array_object.py b/numpy/array_api/tests/test_array_object.py
new file mode 100644
index 000000000..088e09b9f
--- /dev/null
+++ b/numpy/array_api/tests/test_array_object.py
@@ -0,0 +1,269 @@
+from numpy.testing import assert_raises
+import numpy as np
+
+from .. import ones, asarray, result_type
+from .._dtypes import (
+ _all_dtypes,
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _numeric_dtypes,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint64,
+)
+
+
+def test_validate_index():
+ # The indexing tests in the official array API test suite test that the
+ # array object correctly handles the subset of indices that are required
+ # by the spec. But the NumPy array API implementation specifically
+ # disallows any index not required by the spec, via Array._validate_index.
+ # This test focuses on testing that non-valid indices are correctly
+ # rejected. See
+ # https://data-apis.org/array-api/latest/API_specification/indexing.html
+ # and the docstring of Array._validate_index for the exact indexing
+ # behavior that should be allowed. This does not test indices that are
+ # already invalid in NumPy itself because Array will generally just pass
+ # such indices directly to the underlying np.ndarray.
+
+ a = ones((3, 4))
+
+ # Out of bounds slices are not allowed
+ assert_raises(IndexError, lambda: a[:4])
+ assert_raises(IndexError, lambda: a[:-4])
+ assert_raises(IndexError, lambda: a[:3:-1])
+ assert_raises(IndexError, lambda: a[:-5:-1])
+ assert_raises(IndexError, lambda: a[3:])
+ assert_raises(IndexError, lambda: a[-4:])
+ assert_raises(IndexError, lambda: a[3::-1])
+ assert_raises(IndexError, lambda: a[-4::-1])
+
+ assert_raises(IndexError, lambda: a[...,:5])
+ assert_raises(IndexError, lambda: a[...,:-5])
+ assert_raises(IndexError, lambda: a[...,:4:-1])
+ assert_raises(IndexError, lambda: a[...,:-6:-1])
+ assert_raises(IndexError, lambda: a[...,4:])
+ assert_raises(IndexError, lambda: a[...,-5:])
+ assert_raises(IndexError, lambda: a[...,4::-1])
+ assert_raises(IndexError, lambda: a[...,-5::-1])
+
+ # Boolean indices cannot be part of a larger tuple index
+ assert_raises(IndexError, lambda: a[a[:,0]==1,0])
+ assert_raises(IndexError, lambda: a[a[:,0]==1,...])
+ assert_raises(IndexError, lambda: a[..., a[0]==1])
+ assert_raises(IndexError, lambda: a[[True, True, True]])
+ assert_raises(IndexError, lambda: a[(True, True, True),])
+
+ # Integer array indices are not allowed (except for 0-D)
+ idx = asarray([[0, 1]])
+ assert_raises(IndexError, lambda: a[idx])
+ assert_raises(IndexError, lambda: a[idx,])
+ assert_raises(IndexError, lambda: a[[0, 1]])
+ assert_raises(IndexError, lambda: a[(0, 1), (0, 1)])
+ assert_raises(IndexError, lambda: a[[0, 1]])
+ assert_raises(IndexError, lambda: a[np.array([[0, 1]])])
+
+ # np.newaxis is not allowed
+ assert_raises(IndexError, lambda: a[None])
+ assert_raises(IndexError, lambda: a[None, ...])
+ assert_raises(IndexError, lambda: a[..., None])
+
+
+def test_operators():
+ # For every operator, we test that it works for the required type
+ # combinations and raises TypeError otherwise
+ binary_op_dtypes = {
+ "__add__": "numeric",
+ "__and__": "integer_or_boolean",
+ "__eq__": "all",
+ "__floordiv__": "numeric",
+ "__ge__": "numeric",
+ "__gt__": "numeric",
+ "__le__": "numeric",
+ "__lshift__": "integer",
+ "__lt__": "numeric",
+ "__mod__": "numeric",
+ "__mul__": "numeric",
+ "__ne__": "all",
+ "__or__": "integer_or_boolean",
+ "__pow__": "floating",
+ "__rshift__": "integer",
+ "__sub__": "numeric",
+ "__truediv__": "floating",
+ "__xor__": "integer_or_boolean",
+ }
+
+ # Recompute each time because of in-place ops
+ def _array_vals():
+ for d in _integer_dtypes:
+ yield asarray(1, dtype=d)
+ for d in _boolean_dtypes:
+ yield asarray(False, dtype=d)
+ for d in _floating_dtypes:
+ yield asarray(1.0, dtype=d)
+
+ for op, dtypes in binary_op_dtypes.items():
+ ops = [op]
+ if op not in ["__eq__", "__ne__", "__le__", "__ge__", "__lt__", "__gt__"]:
+ rop = "__r" + op[2:]
+ iop = "__i" + op[2:]
+ ops += [rop, iop]
+ for s in [1, 1.0, False]:
+ for _op in ops:
+ for a in _array_vals():
+ # Test array op scalar. From the spec, the following combinations
+ # are supported:
+
+ # - Python bool for a bool array dtype,
+ # - a Python int within the bounds of the given dtype for integer array dtypes,
+ # - a Python int or float for floating-point array dtypes
+
+ # We do not do bounds checking for int scalars, but rather use the default
+ # NumPy behavior for casting in that case.
+
+ if ((dtypes == "all"
+ or dtypes == "numeric" and a.dtype in _numeric_dtypes
+ or dtypes == "integer" and a.dtype in _integer_dtypes
+ or dtypes == "integer_or_boolean" and a.dtype in _integer_or_boolean_dtypes
+ or dtypes == "boolean" and a.dtype in _boolean_dtypes
+ or dtypes == "floating" and a.dtype in _floating_dtypes
+ )
+ # bool is a subtype of int, which is why we avoid
+ # isinstance here.
+ and (a.dtype in _boolean_dtypes and type(s) == bool
+ or a.dtype in _integer_dtypes and type(s) == int
+ or a.dtype in _floating_dtypes and type(s) in [float, int]
+ )):
+ # Only test for no error
+ getattr(a, _op)(s)
+ else:
+ assert_raises(TypeError, lambda: getattr(a, _op)(s))
+
+ # Test array op array.
+ for _op in ops:
+ for x in _array_vals():
+ for y in _array_vals():
+ # See the promotion table in NEP 47 or the array
+ # API spec page on type promotion. Mixed kind
+ # promotion is not defined.
+ if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
+ or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
+ or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
+ or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
+ or x.dtype in _boolean_dtypes and y.dtype not in _boolean_dtypes
+ or y.dtype in _boolean_dtypes and x.dtype not in _boolean_dtypes
+ or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
+ or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
+ ):
+ assert_raises(TypeError, lambda: getattr(x, _op)(y))
+ # Ensure in-place operators only promote to the same dtype as the left operand.
+ elif (
+ _op.startswith("__i")
+ and result_type(x.dtype, y.dtype) != x.dtype
+ ):
+ assert_raises(TypeError, lambda: getattr(x, _op)(y))
+ # Ensure only those dtypes that are required for every operator are allowed.
+ elif (dtypes == "all" and (x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
+ or x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
+ or (dtypes == "numeric" and x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
+ or dtypes == "integer" and x.dtype in _integer_dtypes and y.dtype in _numeric_dtypes
+ or dtypes == "integer_or_boolean" and (x.dtype in _integer_dtypes and y.dtype in _integer_dtypes
+ or x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes)
+ or dtypes == "boolean" and x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
+ or dtypes == "floating" and x.dtype in _floating_dtypes and y.dtype in _floating_dtypes
+ ):
+ getattr(x, _op)(y)
+ else:
+ assert_raises(TypeError, lambda: getattr(x, _op)(y))
+
+ unary_op_dtypes = {
+ "__abs__": "numeric",
+ "__invert__": "integer_or_boolean",
+ "__neg__": "numeric",
+ "__pos__": "numeric",
+ }
+ for op, dtypes in unary_op_dtypes.items():
+ for a in _array_vals():
+ if (
+ dtypes == "numeric"
+ and a.dtype in _numeric_dtypes
+ or dtypes == "integer_or_boolean"
+ and a.dtype in _integer_or_boolean_dtypes
+ ):
+ # Only test for no error
+ getattr(a, op)()
+ else:
+ assert_raises(TypeError, lambda: getattr(a, op)())
+
+ # Finally, matmul() must be tested separately, because it works a bit
+ # different from the other operations.
+ def _matmul_array_vals():
+ for a in _array_vals():
+ yield a
+ for d in _all_dtypes:
+ yield ones((3, 4), dtype=d)
+ yield ones((4, 2), dtype=d)
+ yield ones((4, 4), dtype=d)
+
+ # Scalars always error
+ for _op in ["__matmul__", "__rmatmul__", "__imatmul__"]:
+ for s in [1, 1.0, False]:
+ for a in _matmul_array_vals():
+ if (type(s) in [float, int] and a.dtype in _floating_dtypes
+ or type(s) == int and a.dtype in _integer_dtypes):
+ # Type promotion is valid, but @ is not allowed on 0-D
+ # inputs, so the error is a ValueError
+ assert_raises(ValueError, lambda: getattr(a, _op)(s))
+ else:
+ assert_raises(TypeError, lambda: getattr(a, _op)(s))
+
+ for x in _matmul_array_vals():
+ for y in _matmul_array_vals():
+ if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
+ or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
+ or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
+ or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
+ or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
+ or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
+ or x.dtype in _boolean_dtypes
+ or y.dtype in _boolean_dtypes
+ ):
+ assert_raises(TypeError, lambda: x.__matmul__(y))
+ assert_raises(TypeError, lambda: y.__rmatmul__(x))
+ assert_raises(TypeError, lambda: x.__imatmul__(y))
+ elif x.shape == () or y.shape == () or x.shape[1] != y.shape[0]:
+ assert_raises(ValueError, lambda: x.__matmul__(y))
+ assert_raises(ValueError, lambda: y.__rmatmul__(x))
+ if result_type(x.dtype, y.dtype) != x.dtype:
+ assert_raises(TypeError, lambda: x.__imatmul__(y))
+ else:
+ assert_raises(ValueError, lambda: x.__imatmul__(y))
+ else:
+ x.__matmul__(y)
+ y.__rmatmul__(x)
+ if result_type(x.dtype, y.dtype) != x.dtype:
+ assert_raises(TypeError, lambda: x.__imatmul__(y))
+ elif y.shape[0] != y.shape[1]:
+ # This one fails because x @ y has a different shape from x
+ assert_raises(ValueError, lambda: x.__imatmul__(y))
+ else:
+ x.__imatmul__(y)
+
+
+def test_python_scalar_construtors():
+ a = asarray(False)
+ b = asarray(0)
+ c = asarray(0.0)
+
+ assert bool(a) == bool(b) == bool(c) == False
+ assert int(a) == int(b) == int(c) == 0
+ assert float(a) == float(b) == float(c) == 0.0
+
+ # bool/int/float should only be allowed on 0-D arrays.
+ assert_raises(TypeError, lambda: bool(asarray([False])))
+ assert_raises(TypeError, lambda: int(asarray([0])))
+ assert_raises(TypeError, lambda: float(asarray([0.0])))
diff --git a/numpy/array_api/tests/test_creation_functions.py b/numpy/array_api/tests/test_creation_functions.py
new file mode 100644
index 000000000..3cb8865cd
--- /dev/null
+++ b/numpy/array_api/tests/test_creation_functions.py
@@ -0,0 +1,141 @@
+from numpy.testing import assert_raises
+import numpy as np
+
+from .. import all
+from .._creation_functions import (
+ asarray,
+ arange,
+ empty,
+ empty_like,
+ eye,
+ from_dlpack,
+ full,
+ full_like,
+ linspace,
+ meshgrid,
+ ones,
+ ones_like,
+ zeros,
+ zeros_like,
+)
+from .._array_object import Array
+from .._dtypes import (
+ _all_dtypes,
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _numeric_dtypes,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint64,
+)
+
+
+def test_asarray_errors():
+ # Test various protections against incorrect usage
+ assert_raises(TypeError, lambda: Array([1]))
+ assert_raises(TypeError, lambda: asarray(["a"]))
+ assert_raises(ValueError, lambda: asarray([1.0], dtype=np.float16))
+ assert_raises(OverflowError, lambda: asarray(2**100))
+ # Preferably this would be OverflowError
+ # assert_raises(OverflowError, lambda: asarray([2**100]))
+ assert_raises(TypeError, lambda: asarray([2**100]))
+ asarray([1], device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: asarray([1], device="gpu"))
+
+ assert_raises(ValueError, lambda: asarray([1], dtype=int))
+ assert_raises(ValueError, lambda: asarray([1], dtype="i"))
+
+
+def test_asarray_copy():
+ a = asarray([1])
+ b = asarray(a, copy=True)
+ a[0] = 0
+ assert all(b[0] == 1)
+ assert all(a[0] == 0)
+ # Once copy=False is implemented, replace this with
+ # a = asarray([1])
+ # b = asarray(a, copy=False)
+ # a[0] = 0
+ # assert all(b[0] == 0)
+ assert_raises(NotImplementedError, lambda: asarray(a, copy=False))
+
+
+def test_arange_errors():
+ arange(1, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: arange(1, device="gpu"))
+ assert_raises(ValueError, lambda: arange(1, dtype=int))
+ assert_raises(ValueError, lambda: arange(1, dtype="i"))
+
+
+def test_empty_errors():
+ empty((1,), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: empty((1,), device="gpu"))
+ assert_raises(ValueError, lambda: empty((1,), dtype=int))
+ assert_raises(ValueError, lambda: empty((1,), dtype="i"))
+
+
+def test_empty_like_errors():
+ empty_like(asarray(1), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: empty_like(asarray(1), device="gpu"))
+ assert_raises(ValueError, lambda: empty_like(asarray(1), dtype=int))
+ assert_raises(ValueError, lambda: empty_like(asarray(1), dtype="i"))
+
+
+def test_eye_errors():
+ eye(1, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: eye(1, device="gpu"))
+ assert_raises(ValueError, lambda: eye(1, dtype=int))
+ assert_raises(ValueError, lambda: eye(1, dtype="i"))
+
+
+def test_full_errors():
+ full((1,), 0, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: full((1,), 0, device="gpu"))
+ assert_raises(ValueError, lambda: full((1,), 0, dtype=int))
+ assert_raises(ValueError, lambda: full((1,), 0, dtype="i"))
+
+
+def test_full_like_errors():
+ full_like(asarray(1), 0, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: full_like(asarray(1), 0, device="gpu"))
+ assert_raises(ValueError, lambda: full_like(asarray(1), 0, dtype=int))
+ assert_raises(ValueError, lambda: full_like(asarray(1), 0, dtype="i"))
+
+
+def test_linspace_errors():
+ linspace(0, 1, 10, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: linspace(0, 1, 10, device="gpu"))
+ assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype=float))
+ assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype="f"))
+
+
+def test_ones_errors():
+ ones((1,), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: ones((1,), device="gpu"))
+ assert_raises(ValueError, lambda: ones((1,), dtype=int))
+ assert_raises(ValueError, lambda: ones((1,), dtype="i"))
+
+
+def test_ones_like_errors():
+ ones_like(asarray(1), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: ones_like(asarray(1), device="gpu"))
+ assert_raises(ValueError, lambda: ones_like(asarray(1), dtype=int))
+ assert_raises(ValueError, lambda: ones_like(asarray(1), dtype="i"))
+
+
+def test_zeros_errors():
+ zeros((1,), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: zeros((1,), device="gpu"))
+ assert_raises(ValueError, lambda: zeros((1,), dtype=int))
+ assert_raises(ValueError, lambda: zeros((1,), dtype="i"))
+
+
+def test_zeros_like_errors():
+ zeros_like(asarray(1), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: zeros_like(asarray(1), device="gpu"))
+ assert_raises(ValueError, lambda: zeros_like(asarray(1), dtype=int))
+ assert_raises(ValueError, lambda: zeros_like(asarray(1), dtype="i"))
diff --git a/numpy/array_api/tests/test_elementwise_functions.py b/numpy/array_api/tests/test_elementwise_functions.py
new file mode 100644
index 000000000..a9274aec9
--- /dev/null
+++ b/numpy/array_api/tests/test_elementwise_functions.py
@@ -0,0 +1,111 @@
+from inspect import getfullargspec
+
+from numpy.testing import assert_raises
+
+from .. import asarray, _elementwise_functions
+from .._elementwise_functions import bitwise_left_shift, bitwise_right_shift
+from .._dtypes import (
+ _dtype_categories,
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+)
+
+
+def nargs(func):
+ return len(getfullargspec(func).args)
+
+
+def test_function_types():
+ # Test that every function accepts only the required input types. We only
+ # test the negative cases here (error). The positive cases are tested in
+ # the array API test suite.
+
+ elementwise_function_input_types = {
+ "abs": "numeric",
+ "acos": "floating-point",
+ "acosh": "floating-point",
+ "add": "numeric",
+ "asin": "floating-point",
+ "asinh": "floating-point",
+ "atan": "floating-point",
+ "atan2": "floating-point",
+ "atanh": "floating-point",
+ "bitwise_and": "integer or boolean",
+ "bitwise_invert": "integer or boolean",
+ "bitwise_left_shift": "integer",
+ "bitwise_or": "integer or boolean",
+ "bitwise_right_shift": "integer",
+ "bitwise_xor": "integer or boolean",
+ "ceil": "numeric",
+ "cos": "floating-point",
+ "cosh": "floating-point",
+ "divide": "floating-point",
+ "equal": "all",
+ "exp": "floating-point",
+ "expm1": "floating-point",
+ "floor": "numeric",
+ "floor_divide": "numeric",
+ "greater": "numeric",
+ "greater_equal": "numeric",
+ "isfinite": "numeric",
+ "isinf": "numeric",
+ "isnan": "numeric",
+ "less": "numeric",
+ "less_equal": "numeric",
+ "log": "floating-point",
+ "logaddexp": "floating-point",
+ "log10": "floating-point",
+ "log1p": "floating-point",
+ "log2": "floating-point",
+ "logical_and": "boolean",
+ "logical_not": "boolean",
+ "logical_or": "boolean",
+ "logical_xor": "boolean",
+ "multiply": "numeric",
+ "negative": "numeric",
+ "not_equal": "all",
+ "positive": "numeric",
+ "pow": "floating-point",
+ "remainder": "numeric",
+ "round": "numeric",
+ "sign": "numeric",
+ "sin": "floating-point",
+ "sinh": "floating-point",
+ "sqrt": "floating-point",
+ "square": "numeric",
+ "subtract": "numeric",
+ "tan": "floating-point",
+ "tanh": "floating-point",
+ "trunc": "numeric",
+ }
+
+ def _array_vals():
+ for d in _integer_dtypes:
+ yield asarray(1, dtype=d)
+ for d in _boolean_dtypes:
+ yield asarray(False, dtype=d)
+ for d in _floating_dtypes:
+ yield asarray(1.0, dtype=d)
+
+ for x in _array_vals():
+ for func_name, types in elementwise_function_input_types.items():
+ dtypes = _dtype_categories[types]
+ func = getattr(_elementwise_functions, func_name)
+ if nargs(func) == 2:
+ for y in _array_vals():
+ if x.dtype not in dtypes or y.dtype not in dtypes:
+ assert_raises(TypeError, lambda: func(x, y))
+ else:
+ if x.dtype not in dtypes:
+ assert_raises(TypeError, lambda: func(x))
+
+
+def test_bitwise_shift_error():
+ # bitwise shift functions should raise when the second argument is negative
+ assert_raises(
+ ValueError, lambda: bitwise_left_shift(asarray([1, 1]), asarray([1, -1]))
+ )
+ assert_raises(
+ ValueError, lambda: bitwise_right_shift(asarray([1, 1]), asarray([1, -1]))
+ )
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 759a91d27..06f2a6376 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -3252,7 +3252,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
a.dumps()
Returns the pickle of the array as a string.
- pickle.loads or numpy.loads will convert the string back to an array.
+ pickle.loads will convert the string back to an array.
Parameters
----------
diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py
index 602b1db6e..8773d6c96 100644
--- a/numpy/core/_add_newdocs_scalars.py
+++ b/numpy/core/_add_newdocs_scalars.py
@@ -205,12 +205,12 @@ add_newdoc_for_scalar_type('bytes_', ['string_'],
add_newdoc_for_scalar_type('void', [],
r"""
Either an opaque sequence of bytes, or a structure.
-
+
>>> np.void(b'abcd')
void(b'\x61\x62\x63\x64')
-
+
Structured `void` scalars can only be constructed via extraction from :ref:`structured_arrays`:
-
+
>>> arr = np.array((1, 2), dtype=[('x', np.int8), ('y', np.int8)])
>>> arr[()]
(1, 2) # looks like a tuple, but is `np.void`
@@ -226,20 +226,36 @@ add_newdoc_for_scalar_type('datetime64', [],
>>> np.datetime64(10, 'Y')
numpy.datetime64('1980')
>>> np.datetime64('1980', 'Y')
- numpy.datetime64('1980')
+ numpy.datetime64('1980')
>>> np.datetime64(10, 'D')
numpy.datetime64('1970-01-11')
-
+
See :ref:`arrays.datetime` for more information.
""")
add_newdoc_for_scalar_type('timedelta64', [],
"""
A timedelta stored as a 64-bit integer.
-
+
See :ref:`arrays.datetime` for more information.
""")
+add_newdoc('numpy.core.numerictypes', "integer", ('is_integer',
+ """
+ integer.is_integer() -> bool
+
+ Return ``True`` if the number is finite with integral value.
+
+ .. versionadded:: 1.22
+
+ Examples
+ --------
+ >>> np.int64(-2).is_integer()
+ True
+ >>> np.uint32(5).is_integer()
+ True
+ """))
+
# TODO: work out how to put this on the base class, np.floating
for float_name in ('half', 'single', 'double', 'longdouble'):
add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
@@ -257,3 +273,20 @@ for float_name in ('half', 'single', 'double', 'longdouble'):
>>> np.{ftype}(-.25).as_integer_ratio()
(-1, 4)
""".format(ftype=float_name)))
+
+ add_newdoc('numpy.core.numerictypes', float_name, ('is_integer',
+ f"""
+ {float_name}.is_integer() -> bool
+
+ Return ``True`` if the floating point number is finite with integral
+ value, and ``False`` otherwise.
+
+ .. versionadded:: 1.22
+
+ Examples
+ --------
+ >>> np.{float_name}(-2.0).is_integer()
+ True
+ >>> np.{float_name}(3.2).is_integer()
+ False
+ """))
diff --git a/numpy/core/_asarray.pyi b/numpy/core/_asarray.pyi
index 1928cfe12..fee9b7b6e 100644
--- a/numpy/core/_asarray.pyi
+++ b/numpy/core/_asarray.pyi
@@ -1,14 +1,8 @@
-import sys
-from typing import TypeVar, Union, Iterable, overload
+from typing import TypeVar, Union, Iterable, overload, Literal
from numpy import ndarray
from numpy.typing import ArrayLike, DTypeLike
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_ArrayType = TypeVar("_ArrayType", bound=ndarray)
_Requirements = Literal[
diff --git a/numpy/core/_type_aliases.py b/numpy/core/_type_aliases.py
index 67addef48..3765a0d34 100644
--- a/numpy/core/_type_aliases.py
+++ b/numpy/core/_type_aliases.py
@@ -115,15 +115,6 @@ def _add_aliases():
# add forward, reverse, and string mapping to numarray
sctypeDict[char] = info.type
- # Add deprecated numeric-style type aliases manually, at some point
- # we may want to deprecate the lower case "bytes0" version as well.
- for name in ["Bytes0", "Datetime64", "Str0", "Uint32", "Uint64"]:
- if english_lower(name) not in allTypes:
- # Only one of Uint32 or Uint64, aliases of `np.uintp`, was (and is) defined, note that this
- # is not UInt32/UInt64 (capital i), which is removed.
- continue
- allTypes[name] = allTypes[english_lower(name)]
- sctypeDict[name] = sctypeDict[english_lower(name)]
_add_aliases()
diff --git a/numpy/core/_type_aliases.pyi b/numpy/core/_type_aliases.pyi
index 6a1099cd3..c10d072f9 100644
--- a/numpy/core/_type_aliases.pyi
+++ b/numpy/core/_type_aliases.pyi
@@ -1,13 +1,7 @@
-import sys
-from typing import Dict, Union, Type, List
+from typing import Dict, Union, Type, List, TypedDict
from numpy import generic, signedinteger, unsignedinteger, floating, complexfloating
-if sys.version_info >= (3, 8):
- from typing import TypedDict
-else:
- from typing_extensions import TypedDict
-
class _SCTypes(TypedDict):
int: List[Type[signedinteger]]
uint: List[Type[unsignedinteger]]
diff --git a/numpy/core/_ufunc_config.pyi b/numpy/core/_ufunc_config.pyi
index e90f1c510..9c8cc8ab6 100644
--- a/numpy/core/_ufunc_config.pyi
+++ b/numpy/core/_ufunc_config.pyi
@@ -1,16 +1,10 @@
-import sys
-from typing import Optional, Union, Callable, Any
-
-if sys.version_info >= (3, 8):
- from typing import Literal, Protocol, TypedDict
-else:
- from typing_extensions import Literal, Protocol, TypedDict
+from typing import Optional, Union, Callable, Any, Literal, Protocol, TypedDict
_ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"]
_ErrFunc = Callable[[str, int], Any]
class _SupportsWrite(Protocol):
- def write(self, __msg: str) -> Any: ...
+ def write(self, msg: str, /) -> Any: ...
class _ErrDict(TypedDict):
divide: _ErrKind
diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi
index ac2b6f5a8..df22efed6 100644
--- a/numpy/core/arrayprint.pyi
+++ b/numpy/core/arrayprint.pyi
@@ -1,6 +1,5 @@
-import sys
from types import TracebackType
-from typing import Any, Optional, Callable, Union, Type
+from typing import Any, Optional, Callable, Union, Type, Literal, TypedDict, SupportsIndex
# Using a private class is by no means ideal, but it is simply a consquence
# of a `contextlib.context` returning an instance of aformentioned class
@@ -23,11 +22,6 @@ from numpy import (
)
from numpy.typing import ArrayLike, _CharLike_co, _FloatLike_co
-if sys.version_info > (3, 8):
- from typing import Literal, TypedDict, SupportsIndex
-else:
- from typing_extensions import Literal, TypedDict, SupportsIndex
-
_FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"]
class _FormatDict(TypedDict, total=False):
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 1b6917ebc..4891e8f23 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -489,7 +489,6 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalAnd'),
- TD(O, f='npy_ObjectLogicalAnd', out='?'),
),
'logical_not':
Ufunc(1, 1, None,
@@ -497,7 +496,6 @@ defdict = {
None,
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalNot'),
- TD(O, f='npy_ObjectLogicalNot', out='?'),
),
'logical_or':
Ufunc(2, 1, False_,
@@ -505,13 +503,13 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalOr'),
- TD(O, f='npy_ObjectLogicalOr', out='?'),
),
'logical_xor':
Ufunc(2, 1, False_,
docstrings.get('numpy.core.umath.logical_xor'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
+ # TODO: using obj.logical_xor() seems pretty much useless:
TD(P, f='logical_xor'),
),
'maximum':
diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi
index 2457e8719..52025d502 100644
--- a/numpy/core/einsumfunc.pyi
+++ b/numpy/core/einsumfunc.pyi
@@ -1,5 +1,4 @@
-import sys
-from typing import List, TypeVar, Optional, Any, overload, Union, Tuple, Sequence
+from typing import List, TypeVar, Optional, Any, overload, Union, Tuple, Sequence, Literal
from numpy import (
ndarray,
@@ -26,11 +25,6 @@ from numpy.typing import (
_DTypeLikeComplex_co,
)
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_ArrayType = TypeVar(
"_ArrayType",
bound=ndarray[Any, dtype[Union[bool_, number[Any]]]],
@@ -52,7 +46,8 @@ __all__: List[str]
# Something like `is_scalar = bool(__subscripts.partition("->")[-1])`
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeBool_co,
out: None = ...,
dtype: Optional[_DTypeLikeBool] = ...,
@@ -62,7 +57,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeUInt_co,
out: None = ...,
dtype: Optional[_DTypeLikeUInt] = ...,
@@ -72,7 +68,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeInt_co,
out: None = ...,
dtype: Optional[_DTypeLikeInt] = ...,
@@ -82,7 +79,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeFloat_co,
out: None = ...,
dtype: Optional[_DTypeLikeFloat] = ...,
@@ -92,7 +90,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeComplex_co,
out: None = ...,
dtype: Optional[_DTypeLikeComplex] = ...,
@@ -102,7 +101,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: Any,
casting: _CastingUnsafe,
dtype: Optional[_DTypeLikeComplex_co] = ...,
@@ -112,7 +112,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeComplex_co,
out: _ArrayType,
dtype: Optional[_DTypeLikeComplex_co] = ...,
@@ -122,7 +123,8 @@ def einsum(
) -> _ArrayType: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: Any,
out: _ArrayType,
casting: _CastingUnsafe,
@@ -136,7 +138,8 @@ def einsum(
# NOTE: In practice the list consists of a `str` (first element)
# and a variable number of integer tuples.
def einsum_path(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeComplex_co,
optimize: _OptimizeKind = ...,
) -> Tuple[List[Any], str]: ...
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 764377bc9..5ecb1e666 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -3320,18 +3320,15 @@ def around(a, decimals=0, out=None):
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
- .. [2] "How Futile are Mindless Assessments of
- Roundoff in Floating-Point Computation?", William Kahan,
- https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
- array([0., 2.])
+ array([0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
- array([0.4, 1.6])
+ array([0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
- array([0., 2., 2., 4., 4.])
+ array([0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi
index 45057e4b1..3cbe1d5c5 100644
--- a/numpy/core/fromnumeric.pyi
+++ b/numpy/core/fromnumeric.pyi
@@ -1,6 +1,5 @@
-import sys
import datetime as dt
-from typing import Optional, Union, Sequence, Tuple, Any, overload, TypeVar
+from typing import Optional, Union, Sequence, Tuple, Any, overload, TypeVar, Literal
from numpy import (
ndarray,
@@ -26,11 +25,6 @@ from numpy.typing import (
_NumberLike_co,
)
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
# Various annotations for scalars
# While dt.datetime and dt.timedelta are not technically part of NumPy,
diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi
index b5d6ca6ab..c35629aa7 100644
--- a/numpy/core/function_base.pyi
+++ b/numpy/core/function_base.pyi
@@ -1,14 +1,8 @@
-import sys
-from typing import overload, Tuple, Union, Sequence, Any
+from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal
from numpy import ndarray
from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co
-if sys.version_info >= (3, 8):
- from typing import SupportsIndex, Literal
-else:
- from typing_extensions import SupportsIndex, Literal
-
# TODO: wait for support for recursive types
_ArrayLikeNested = Sequence[Sequence[Any]]
_ArrayLikeNumber = Union[
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index bc1fad72f..e975b0105 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -18,6 +18,7 @@
* NPY_CPU_ARCEL
* NPY_CPU_ARCEB
* NPY_CPU_RISCV64
+ * NPY_CPU_LOONGARCH
* NPY_CPU_WASM
*/
#ifndef _NPY_CPUARCH_H_
@@ -103,6 +104,8 @@
#define NPY_CPU_ARCEB
#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64
#define NPY_CPU_RISCV64
+#elif defined(__loongarch__)
+ #define NPY_CPU_LOONGARCH
#elif defined(__EMSCRIPTEN__)
/* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */
#define NPY_CPU_WASM
diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h
index aa367a002..620595bec 100644
--- a/numpy/core/include/numpy/npy_endian.h
+++ b/numpy/core/include/numpy/npy_endian.h
@@ -49,6 +49,7 @@
|| defined(NPY_CPU_PPC64LE) \
|| defined(NPY_CPU_ARCEL) \
|| defined(NPY_CPU_RISCV64) \
+ || defined(NPY_CPU_LOONGARCH) \
|| defined(NPY_CPU_WASM)
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
#elif defined(NPY_CPU_PPC) \
diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h
index f32e298f0..e9a6a30d2 100644
--- a/numpy/core/include/numpy/npy_math.h
+++ b/numpy/core/include/numpy/npy_math.h
@@ -391,7 +391,7 @@ NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0);
union { \
ctype z; \
type a[2]; \
- } z1;; \
+ } z1; \
\
z1.a[0] = (x); \
z1.a[1] = (y); \
diff --git a/numpy/core/multiarray.pyi b/numpy/core/multiarray.pyi
index 7ae831b53..97e9c3498 100644
--- a/numpy/core/multiarray.pyi
+++ b/numpy/core/multiarray.pyi
@@ -1,12 +1,11 @@
# TODO: Sort out any and all missing functions in this namespace
import os
-import sys
import datetime as dt
from typing import (
+ Literal as L,
Any,
Callable,
- IO,
Iterable,
Optional,
overload,
@@ -16,6 +15,10 @@ from typing import (
Union,
Sequence,
Tuple,
+ SupportsIndex,
+ final,
+ Final,
+ Protocol,
)
from numpy import (
@@ -47,6 +50,7 @@ from numpy import (
_CastingKind,
_ModeKind,
_SupportsBuffer,
+ _IOProtocol,
)
from numpy.typing import (
@@ -78,15 +82,6 @@ from numpy.typing import (
_TD64Like_co,
)
-from numpy.array_api import (
- _CopyMode
-)
-
-if sys.version_info >= (3, 8):
- from typing import SupportsIndex, final, Final, Literal as L
-else:
- from typing_extensions import SupportsIndex, final, Final, Literal as L
-
_SCT = TypeVar("_SCT", bound=generic)
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
@@ -312,7 +307,8 @@ def ravel_multi_index(
@overload
def concatenate( # type: ignore[misc]
- __arrays: _ArrayLike[_SCT],
+ arrays: _ArrayLike[_SCT],
+ /,
axis: Optional[SupportsIndex] = ...,
out: None = ...,
*,
@@ -321,7 +317,8 @@ def concatenate( # type: ignore[misc]
) -> NDArray[_SCT]: ...
@overload
def concatenate( # type: ignore[misc]
- __arrays: ArrayLike,
+ arrays: ArrayLike,
+ /,
axis: Optional[SupportsIndex] = ...,
out: None = ...,
*,
@@ -330,7 +327,8 @@ def concatenate( # type: ignore[misc]
) -> NDArray[Any]: ...
@overload
def concatenate( # type: ignore[misc]
- __arrays: ArrayLike,
+ arrays: ArrayLike,
+ /,
axis: Optional[SupportsIndex] = ...,
out: None = ...,
*,
@@ -339,7 +337,8 @@ def concatenate( # type: ignore[misc]
) -> NDArray[_SCT]: ...
@overload
def concatenate( # type: ignore[misc]
- __arrays: ArrayLike,
+ arrays: ArrayLike,
+ /,
axis: Optional[SupportsIndex] = ...,
out: None = ...,
*,
@@ -348,7 +347,8 @@ def concatenate( # type: ignore[misc]
) -> NDArray[Any]: ...
@overload
def concatenate(
- __arrays: ArrayLike,
+ arrays: ArrayLike,
+ /,
axis: Optional[SupportsIndex] = ...,
out: _ArrayType = ...,
*,
@@ -357,19 +357,22 @@ def concatenate(
) -> _ArrayType: ...
def inner(
- __a: ArrayLike,
- __b: ArrayLike,
+ a: ArrayLike,
+ b: ArrayLike,
+ /,
) -> Any: ...
@overload
def where(
- __condition: ArrayLike,
+ condition: ArrayLike,
+ /,
) -> Tuple[NDArray[intp], ...]: ...
@overload
def where(
- __condition: ArrayLike,
- __x: ArrayLike,
- __y: ArrayLike,
+ condition: ArrayLike,
+ x: ArrayLike,
+ y: ArrayLike,
+ /,
) -> NDArray[Any]: ...
def lexsort(
@@ -384,7 +387,7 @@ def can_cast(
) -> bool: ...
def min_scalar_type(
- __a: ArrayLike,
+ a: ArrayLike, /,
) -> dtype[Any]: ...
def result_type(
@@ -397,24 +400,25 @@ def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ...
def dot(a: ArrayLike, b: ArrayLike, out: _ArrayType) -> _ArrayType: ...
@overload
-def vdot(__a: _ArrayLikeBool_co, __b: _ArrayLikeBool_co) -> bool_: ... # type: ignore[misc]
+def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> bool_: ... # type: ignore[misc]
@overload
-def vdot(__a: _ArrayLikeUInt_co, __b: _ArrayLikeUInt_co) -> unsignedinteger[Any]: ... # type: ignore[misc]
+def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger[Any]: ... # type: ignore[misc]
@overload
-def vdot(__a: _ArrayLikeInt_co, __b: _ArrayLikeInt_co) -> signedinteger[Any]: ... # type: ignore[misc]
+def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger[Any]: ... # type: ignore[misc]
@overload
-def vdot(__a: _ArrayLikeFloat_co, __b: _ArrayLikeFloat_co) -> floating[Any]: ... # type: ignore[misc]
+def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating[Any]: ... # type: ignore[misc]
@overload
-def vdot(__a: _ArrayLikeComplex_co, __b: _ArrayLikeComplex_co) -> complexfloating[Any, Any]: ... # type: ignore[misc]
+def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating[Any, Any]: ... # type: ignore[misc]
@overload
-def vdot(__a: _ArrayLikeTD64_co, __b: _ArrayLikeTD64_co) -> timedelta64: ...
+def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ...
@overload
-def vdot(__a: _ArrayLikeObject_co, __b: Any) -> Any: ...
+def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ...
@overload
-def vdot(__a: Any, __b: _ArrayLikeObject_co) -> Any: ...
+def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ...
def bincount(
- __x: ArrayLike,
+ x: ArrayLike,
+ /,
weights: Optional[ArrayLike] = ...,
minlength: SupportsIndex = ...,
) -> NDArray[intp]: ...
@@ -433,27 +437,31 @@ def putmask(
) -> None: ...
def packbits(
- __a: _ArrayLikeInt_co,
+ a: _ArrayLikeInt_co,
+ /,
axis: Optional[SupportsIndex] = ...,
bitorder: L["big", "little"] = ...,
) -> NDArray[uint8]: ...
def unpackbits(
- __a: _ArrayLike[uint8],
+ a: _ArrayLike[uint8],
+ /,
axis: Optional[SupportsIndex] = ...,
count: Optional[SupportsIndex] = ...,
bitorder: L["big", "little"] = ...,
) -> NDArray[uint8]: ...
def shares_memory(
- __a: object,
- __b: object,
+ a: object,
+ b: object,
+ /,
max_work: Optional[int] = ...,
) -> bool: ...
def may_share_memory(
- __a: object,
- __b: object,
+ a: object,
+ b: object,
+ /,
max_work: Optional[int] = ...,
) -> bool: ...
@@ -592,7 +600,7 @@ def asfortranarray(
# In practice `List[Any]` is list with an int, int and a valid
# `np.seterrcall()` object
def geterrobj() -> List[Any]: ...
-def seterrobj(__errobj: List[Any]) -> None: ...
+def seterrobj(errobj: List[Any], /) -> None: ...
def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]: ...
@@ -626,7 +634,7 @@ def fromstring(
) -> NDArray[Any]: ...
def frompyfunc(
- __func: Callable[..., Any],
+ func: Callable[..., Any], /,
nin: SupportsIndex,
nout: SupportsIndex,
*,
@@ -635,7 +643,7 @@ def frompyfunc(
@overload
def fromfile(
- file: str | bytes | os.PathLike[Any] | IO[Any],
+ file: str | bytes | os.PathLike[Any] | _IOProtocol,
dtype: None = ...,
count: SupportsIndex = ...,
sep: str = ...,
@@ -645,7 +653,7 @@ def fromfile(
) -> NDArray[float64]: ...
@overload
def fromfile(
- file: str | bytes | os.PathLike[Any] | IO[Any],
+ file: str | bytes | os.PathLike[Any] | _IOProtocol,
dtype: _DTypeLike[_SCT],
count: SupportsIndex = ...,
sep: str = ...,
@@ -655,7 +663,7 @@ def fromfile(
) -> NDArray[_SCT]: ...
@overload
def fromfile(
- file: str | bytes | os.PathLike[Any] | IO[Any],
+ file: str | bytes | os.PathLike[Any] | _IOProtocol,
dtype: DTypeLike,
count: SupportsIndex = ...,
sep: str = ...,
@@ -711,8 +719,8 @@ def frombuffer(
@overload
def arange( # type: ignore[misc]
- __stop: _IntLike_co,
- *,
+ stop: _IntLike_co,
+ /, *,
dtype: None = ...,
like: ArrayLike = ...,
) -> NDArray[signedinteger[Any]]: ...
@@ -727,8 +735,8 @@ def arange( # type: ignore[misc]
) -> NDArray[signedinteger[Any]]: ...
@overload
def arange( # type: ignore[misc]
- __stop: _FloatLike_co,
- *,
+ stop: _FloatLike_co,
+ /, *,
dtype: None = ...,
like: ArrayLike = ...,
) -> NDArray[floating[Any]]: ...
@@ -743,8 +751,8 @@ def arange( # type: ignore[misc]
) -> NDArray[floating[Any]]: ...
@overload
def arange(
- __stop: _TD64Like_co,
- *,
+ stop: _TD64Like_co,
+ /, *,
dtype: None = ...,
like: ArrayLike = ...,
) -> NDArray[timedelta64]: ...
@@ -768,8 +776,8 @@ def arange( # both start and stop must always be specified for datetime64
) -> NDArray[datetime64]: ...
@overload
def arange(
- __stop: Any,
- *,
+ stop: Any,
+ /, *,
dtype: _DTypeLike[_SCT],
like: ArrayLike = ...,
) -> NDArray[_SCT]: ...
@@ -784,7 +792,7 @@ def arange(
) -> NDArray[_SCT]: ...
@overload
def arange(
- __stop: Any,
+ stop: Any, /,
*,
dtype: DTypeLike,
like: ArrayLike = ...,
@@ -800,7 +808,7 @@ def arange(
) -> NDArray[Any]: ...
def datetime_data(
- __dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64],
+ dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /,
) -> Tuple[str, int]: ...
# The datetime functions perform unsafe casts to `datetime64[D]`,
@@ -951,7 +959,7 @@ def compare_chararrays(
rstrip: bool,
) -> NDArray[bool_]: ...
-def add_docstring(__obj: Callable[..., Any], __docstring: str) -> None: ...
+def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ...
_GetItemKeys = L[
"C", "CONTIGUOUS", "C_CONTIGUOUS",
diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi
index 3c2b553ec..54ab4b7c8 100644
--- a/numpy/core/numeric.pyi
+++ b/numpy/core/numeric.pyi
@@ -1,4 +1,3 @@
-import sys
from typing import (
Any,
Optional,
@@ -10,16 +9,12 @@ from typing import (
overload,
TypeVar,
Iterable,
+ Literal,
)
from numpy import ndarray, generic, dtype, bool_, signedinteger, _OrderKACF, _OrderCF
from numpy.typing import ArrayLike, DTypeLike, _ShapeLike
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_T = TypeVar("_T")
_ArrayType = TypeVar("_ArrayType", bound=ndarray)
diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi
index e99e1c500..1d3ff773b 100644
--- a/numpy/core/numerictypes.pyi
+++ b/numpy/core/numerictypes.pyi
@@ -1,6 +1,7 @@
import sys
import types
from typing import (
+ Literal as L,
Type,
Union,
Tuple,
@@ -10,6 +11,8 @@ from typing import (
Dict,
List,
Iterable,
+ Protocol,
+ TypedDict,
)
from numpy import (
@@ -49,11 +52,6 @@ from numpy.core._type_aliases import (
from numpy.typing import DTypeLike, ArrayLike, _SupportsDType
-if sys.version_info >= (3, 8):
- from typing import Literal as L, Protocol, TypedDict
-else:
- from typing_extensions import Literal as L, Protocol, TypedDict
-
_T = TypeVar("_T")
_SCT = TypeVar("_SCT", bound=generic)
@@ -86,8 +84,8 @@ class _typedict(Dict[Type[generic], _T]):
if sys.version_info >= (3, 10):
_TypeTuple = Union[
Type[Any],
- types.Union,
- Tuple[Union[Type[Any], types.Union, Tuple[Any, ...]], ...],
+ types.UnionType,
+ Tuple[Union[Type[Any], types.UnionType, Tuple[Any, ...]], ...],
]
else:
_TypeTuple = Union[
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index 70085d896..e1fdd06f2 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -126,18 +126,6 @@ def set_module(module):
return decorator
-
-# Call textwrap.dedent here instead of in the function so as to avoid
-# calling dedent multiple times on the same text
-_wrapped_func_source = textwrap.dedent("""
- @functools.wraps(implementation)
- def {name}(*args, **kwargs):
- relevant_args = dispatcher(*args, **kwargs)
- return implement_array_function(
- implementation, {name}, relevant_args, args, kwargs)
- """)
-
-
def array_function_dispatch(dispatcher, module=None, verify=True,
docs_from_dispatcher=False):
"""Decorator for adding dispatch with the __array_function__ protocol.
@@ -187,25 +175,15 @@ def array_function_dispatch(dispatcher, module=None, verify=True,
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
- # Equivalently, we could define this function directly instead of using
- # exec. This version has the advantage of giving the helper function a
- # more interpettable name. Otherwise, the original function does not
- # show up at all in many cases, e.g., if it's written in C or if the
- # dispatcher gets an invalid keyword argument.
- source = _wrapped_func_source.format(name=implementation.__name__)
-
- source_object = compile(
- source, filename='<__array_function__ internals>', mode='exec')
- scope = {
- 'implementation': implementation,
- 'dispatcher': dispatcher,
- 'functools': functools,
- 'implement_array_function': implement_array_function,
- }
- exec(source_object, scope)
-
- public_api = scope[implementation.__name__]
+ @functools.wraps(implementation)
+ def public_api(*args, **kwargs):
+ relevant_args = dispatcher(*args, **kwargs)
+ return implement_array_function(
+ implementation, public_api, relevant_args, args, kwargs)
+ public_api.__code__ = public_api.__code__.replace(
+ co_name=implementation.__name__,
+ co_filename='<__array_function__ internals>')
if module is not None:
public_api.__module__ = module
diff --git a/numpy/core/records.py b/numpy/core/records.py
index b3474ad01..fd5f1ab39 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -664,17 +664,17 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None,
if nn > 0:
shape = shape[:-nn]
+ _array = recarray(shape, descr)
+
+ # populate the record array (makes a copy)
for k, obj in enumerate(arrayList):
nn = descr[k].ndim
testshape = obj.shape[:obj.ndim - nn]
+ name = _names[k]
if testshape != shape:
- raise ValueError("array-shape mismatch in array %d" % k)
+ raise ValueError(f'array-shape mismatch in array {k} ("{name}")')
- _array = recarray(shape, descr)
-
- # populate the record array (makes a copy)
- for i in range(len(arrayList)):
- _array[_names[i]] = arrayList[i]
+ _array[name] = obj
return _array
@@ -939,7 +939,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
- raise IOError("Didn't read as many bytes as expected")
+ raise OSError("Didn't read as many bytes as expected")
return _array
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index c20320910..ba7d83787 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -381,9 +381,9 @@ def check_mathlib(config_cmd):
mathlibs = libs
break
else:
- raise EnvironmentError("math library missing; rerun "
- "setup.py after setting the "
- "MATHLIB env variable")
+ raise RuntimeError(
+ "math library missing; rerun setup.py after setting the "
+ "MATHLIB env variable")
return mathlibs
def visibility_define(config):
diff --git a/numpy/core/shape_base.pyi b/numpy/core/shape_base.pyi
index 9aaeceed7..d7914697d 100644
--- a/numpy/core/shape_base.pyi
+++ b/numpy/core/shape_base.pyi
@@ -1,14 +1,8 @@
-import sys
-from typing import TypeVar, overload, List, Sequence, Any
+from typing import TypeVar, overload, List, Sequence, Any, SupportsIndex
from numpy import generic, dtype
from numpy.typing import ArrayLike, NDArray, _NestedSequence, _SupportsArray
-if sys.version_info >= (3, 8):
- from typing import SupportsIndex
-else:
- from typing_extensions import SupportsIndex
-
_SCT = TypeVar("_SCT", bound=generic)
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
@@ -17,23 +11,23 @@ _ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]]
__all__: List[str]
@overload
-def atleast_1d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
@overload
-def atleast_1d(__arys: ArrayLike) -> NDArray[Any]: ...
+def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ...
@overload
def atleast_1d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
@overload
-def atleast_2d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
@overload
-def atleast_2d(__arys: ArrayLike) -> NDArray[Any]: ...
+def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ...
@overload
def atleast_2d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
@overload
-def atleast_3d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
@overload
-def atleast_3d(__arys: ArrayLike) -> NDArray[Any]: ...
+def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ...
@overload
def atleast_3d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
diff --git a/numpy/core/src/common/npy_cpu_dispatch.h b/numpy/core/src/common/npy_cpu_dispatch.h
index c8411104a..09e00badf 100644
--- a/numpy/core/src/common/npy_cpu_dispatch.h
+++ b/numpy/core/src/common/npy_cpu_dispatch.h
@@ -57,7 +57,7 @@
* avoid linking duplications due to the nature of the dispatch-able sources.
*
* Example:
- * @targets baseline avx avx512_skx vsx3 asimdhp // configration statments
+ * @targets baseline avx avx512_skx vsx3 asimdhp // configuration statements
*
* void NPY_CPU_DISPATCH_CURFX(dispatch_me)(const int *src, int *dst)
* {
@@ -180,7 +180,7 @@
* Macro NPY_CPU_DISPATCH_DECLARE_XB(LEFT, ...)
*
* Same as `NPY_CPU_DISPATCH_DECLARE` but exclude the baseline declaration even
- * if it was provided within the configration statments.
+ * if it was provided within the configuration statements.
*/
#define NPY_CPU_DISPATCH_DECLARE_XB(...) \
NPY__CPU_DISPATCH_CALL(NPY_CPU_DISPATCH_DECLARE_CHK_, NPY_CPU_DISPATCH_DECLARE_CB_, __VA_ARGS__)
@@ -196,7 +196,7 @@
* Example:
* Assume we have a dispatch-able source exporting the following function:
*
- * @targets baseline avx2 avx512_skx // configration statments
+ * @targets baseline avx2 avx512_skx // configration statements
*
* void NPY_CPU_DISPATCH_CURFX(dispatch_me)(const int *src, int *dst)
* {
@@ -238,7 +238,7 @@
* Macro NPY_CPU_DISPATCH_CALL_XB(LEFT, ...)
*
* Same as `NPY_CPU_DISPATCH_DECLARE` but exclude the baseline declaration even
- * if it was provided within the configration statements.
+ * if it was provided within the configuration statements.
* Returns void.
*/
#define NPY_CPU_DISPATCH_CALL_XB_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \
diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h
index e1b170863..ad9688338 100644
--- a/numpy/core/src/common/simd/avx2/arithmetic.h
+++ b/numpy/core/src/common/simd/avx2/arithmetic.h
@@ -284,7 +284,7 @@ NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a)
{
__m256i s0 = _mm256_hadd_epi32(a, a);
s0 = _mm256_hadd_epi32(s0, s0);
- __m128i s1 = _mm256_extracti128_si256(s0, 1);;
+ __m128i s1 = _mm256_extracti128_si256(s0, 1);
s1 = _mm_add_epi32(_mm256_castsi256_si128(s0), s1);
return _mm_cvtsi128_si32(s1);
}
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index 887deff53..e74056736 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -3,11 +3,6 @@
#include "structmember.h"
#include <pymem.h>
-/* public api in 3.7 */
-#if PY_VERSION_HEX < 0x03070000
-#define PyTraceMalloc_Track _PyTraceMalloc_Track
-#define PyTraceMalloc_Untrack _PyTraceMalloc_Untrack
-#endif
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index 29a2bb0e8..2ad8d6d0e 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -61,7 +61,7 @@ npy_fallocate(npy_intp nbytes, FILE * fp)
* early exit on no space, other errors will also get found during fwrite
*/
if (r == -1 && errno == ENOSPC) {
- PyErr_Format(PyExc_IOError, "Not enough free space to write "
+ PyErr_Format(PyExc_OSError, "Not enough free space to write "
"%"NPY_INTP_FMT" bytes", nbytes);
return -1;
}
@@ -138,7 +138,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
if (n3 == 0) {
/* binary data */
if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_LIST_PICKLE)) {
- PyErr_SetString(PyExc_IOError,
+ PyErr_SetString(PyExc_OSError,
"cannot write object arrays to a file in binary mode");
return -1;
}
@@ -182,7 +182,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
#endif
NPY_END_ALLOW_THREADS;
if (n < size) {
- PyErr_Format(PyExc_IOError,
+ PyErr_Format(PyExc_OSError,
"%ld requested and %ld written",
(long) size, (long) n);
return -1;
@@ -198,7 +198,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
(size_t) PyArray_DESCR(self)->elsize,
1, fp) < 1) {
NPY_END_THREADS;
- PyErr_Format(PyExc_IOError,
+ PyErr_Format(PyExc_OSError,
"problem writing element %" NPY_INTP_FMT
" to file", it->index);
Py_DECREF(it);
@@ -266,7 +266,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
NPY_END_ALLOW_THREADS;
Py_DECREF(byteobj);
if (n < n2) {
- PyErr_Format(PyExc_IOError,
+ PyErr_Format(PyExc_OSError,
"problem writing element %" NPY_INTP_FMT
" to file", it->index);
Py_DECREF(strobj);
@@ -276,7 +276,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
/* write separator for all but last one */
if (it->index != it->size-1) {
if (fwrite(sep, 1, n3, fp) < n3) {
- PyErr_Format(PyExc_IOError,
+ PyErr_Format(PyExc_OSError,
"problem writing separator to file");
Py_DECREF(strobj);
Py_DECREF(it);
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index e3b25d076..45b03a6f3 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -449,7 +449,7 @@ PyArray_GetCastSafety(
/**
* Check whether a cast is safe, see also `PyArray_GetCastSafety` for
- * a similiar function. Unlike GetCastSafety, this function checks the
+ * a similar function. Unlike GetCastSafety, this function checks the
* `castingimpl->casting` when available. This allows for two things:
*
* 1. It avoids calling `resolve_descriptors` in some cases.
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 1449ddcef..deab7d2a1 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -3340,7 +3340,7 @@ array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, npy_intp num, size_t *nrea
fail = 1;
}
if (fail) {
- PyErr_SetString(PyExc_IOError,
+ PyErr_SetString(PyExc_OSError,
"could not seek in file");
return NULL;
}
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 50964dab8..90453e38f 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -1723,22 +1723,6 @@ _convert_from_str(PyObject *obj, int align)
goto fail;
}
- /* Check for a deprecated Numeric-style typecode */
- /* `Uint` has deliberately weird uppercasing */
- char *dep_tps[] = {"Bytes", "Datetime64", "Str", "Uint"};
- int ndep_tps = sizeof(dep_tps) / sizeof(dep_tps[0]);
- for (int i = 0; i < ndep_tps; ++i) {
- char *dep_tp = dep_tps[i];
-
- if (strncmp(type, dep_tp, strlen(dep_tp)) == 0) {
- /* Deprecated 2020-06-09, NumPy 1.20 */
- if (DEPRECATE("Numeric-style type codes are "
- "deprecated and will result in "
- "an error in the future.") < 0) {
- goto fail;
- }
- }
- }
/*
* Probably only ever dispatches to `_convert_from_type`, but who
* knows what users are injecting into `np.typeDict`.
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index e533e4932..e38873746 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -819,6 +819,10 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop *
# define _CONVERT_FN(x) npy_floatbits_to_halfbits(x)
# elif @is_double1@
# define _CONVERT_FN(x) npy_doublebits_to_halfbits(x)
+# elif @is_half1@
+# define _CONVERT_FN(x) (x)
+# elif @is_bool1@
+# define _CONVERT_FN(x) npy_float_to_half((float)(x!=0))
# else
# define _CONVERT_FN(x) npy_float_to_half((float)x)
# endif
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 2ca642d76..d33c7060b 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -2284,7 +2284,7 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
return NULL;
}
if (npy_fseek(fp, offset, SEEK_CUR) != 0) {
- PyErr_SetFromErrno(PyExc_IOError);
+ PyErr_SetFromErrno(PyExc_OSError);
goto cleanup;
}
if (type == NULL) {
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 40f736125..740ec8cc2 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -1908,6 +1908,39 @@ error:
}
/**end repeat**/
+/**begin repeat
+ * #name = half, float, double, longdouble#
+ * #Name = Half, Float, Double, LongDouble#
+ * #is_half = 1,0,0,0#
+ * #c = f, f, , l#
+ */
+static PyObject *
+@name@_is_integer(PyObject *self)
+{
+#if @is_half@
+ npy_double val = npy_half_to_double(PyArrayScalar_VAL(self, @Name@));
+#else
+ npy_@name@ val = PyArrayScalar_VAL(self, @Name@);
+#endif
+ PyObject *ret;
+
+ if (npy_isnan(val)) {
+ Py_RETURN_FALSE;
+ }
+ if (!npy_isfinite(val)) {
+ Py_RETURN_FALSE;
+ }
+
+ ret = (npy_floor@c@(val) == val) ? Py_True : Py_False;
+ Py_INCREF(ret);
+ return ret;
+}
+/**end repeat**/
+
+static PyObject *
+integer_is_integer(PyObject *self) {
+ Py_RETURN_TRUE;
+}
/*
* need to fill in doc-strings for these methods on import -- copy from
@@ -2167,7 +2200,7 @@ static PyMethodDef @name@type_methods[] = {
/**end repeat**/
/**begin repeat
- * #name = integer,floating, complexfloating#
+ * #name = floating, complexfloating#
*/
static PyMethodDef @name@type_methods[] = {
/* Hook for the round() builtin */
@@ -2178,6 +2211,17 @@ static PyMethodDef @name@type_methods[] = {
};
/**end repeat**/
+static PyMethodDef integertype_methods[] = {
+ /* Hook for the round() builtin */
+ {"__round__",
+ (PyCFunction)integertype_dunder_round,
+ METH_VARARGS | METH_KEYWORDS, NULL},
+ {"is_integer",
+ (PyCFunction)integer_is_integer,
+ METH_NOARGS, NULL},
+ {NULL, NULL, 0, NULL} /* sentinel */
+};
+
/**begin repeat
* #name = half,float,double,longdouble#
*/
@@ -2185,6 +2229,9 @@ static PyMethodDef @name@type_methods[] = {
{"as_integer_ratio",
(PyCFunction)@name@_as_integer_ratio,
METH_NOARGS, NULL},
+ {"is_integer",
+ (PyCFunction)@name@_is_integer,
+ METH_NOARGS, NULL},
{NULL, NULL, 0, NULL}
};
/**end repeat**/
diff --git a/numpy/core/src/umath/_scaled_float_dtype.c b/numpy/core/src/umath/_scaled_float_dtype.c
index 599774cce..cbea378f0 100644
--- a/numpy/core/src/umath/_scaled_float_dtype.c
+++ b/numpy/core/src/umath/_scaled_float_dtype.c
@@ -464,9 +464,6 @@ init_casts(void)
* 2. Addition, which needs to use the common instance, and runs into
* cast safety subtleties since we will implement it without an additional
* cast.
- *
- * NOTE: When first writing this, promotion did not exist for new-style loops,
- * if it exists, we could use promotion to implement double * sfloat.
*/
static int
multiply_sfloats(PyArrayMethod_Context *NPY_UNUSED(context),
@@ -591,7 +588,8 @@ add_sfloats_resolve_descriptors(
static int
-add_loop(const char *ufunc_name, PyBoundArrayMethodObject *bmeth)
+add_loop(const char *ufunc_name,
+ PyArray_DTypeMeta *dtypes[3], PyObject *meth_or_promoter)
{
PyObject *mod = PyImport_ImportModule("numpy");
if (mod == NULL) {
@@ -605,13 +603,12 @@ add_loop(const char *ufunc_name, PyBoundArrayMethodObject *bmeth)
"numpy.%s was not a ufunc!", ufunc_name);
return -1;
}
- PyObject *dtype_tup = PyArray_TupleFromItems(
- 3, (PyObject **)bmeth->dtypes, 0);
+ PyObject *dtype_tup = PyArray_TupleFromItems(3, (PyObject **)dtypes, 1);
if (dtype_tup == NULL) {
Py_DECREF(ufunc);
return -1;
}
- PyObject *info = PyTuple_Pack(2, dtype_tup, bmeth->method);
+ PyObject *info = PyTuple_Pack(2, dtype_tup, meth_or_promoter);
Py_DECREF(dtype_tup);
if (info == NULL) {
Py_DECREF(ufunc);
@@ -624,6 +621,28 @@ add_loop(const char *ufunc_name, PyBoundArrayMethodObject *bmeth)
}
+
+/*
+ * We add some very basic promoters to allow multiplying normal and scaled
+ */
+static int
+promote_to_sfloat(PyUFuncObject *NPY_UNUSED(ufunc),
+ PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]),
+ PyArray_DTypeMeta *const signature[3],
+ PyArray_DTypeMeta *new_dtypes[3])
+{
+ for (int i = 0; i < 3; i++) {
+ PyArray_DTypeMeta *new = &PyArray_SFloatDType;
+ if (signature[i] != NULL) {
+ new = signature[i];
+ }
+ Py_INCREF(new);
+ new_dtypes[i] = new;
+ }
+ return 0;
+}
+
+
/*
* Add new ufunc loops (this is somewhat clumsy as of writing it, but should
* get less so with the introduction of public API).
@@ -650,7 +669,8 @@ init_ufuncs(void) {
if (bmeth == NULL) {
return -1;
}
- int res = add_loop("multiply", bmeth);
+ int res = add_loop("multiply",
+ bmeth->dtypes, (PyObject *)bmeth->method);
Py_DECREF(bmeth);
if (res < 0) {
return -1;
@@ -667,11 +687,40 @@ init_ufuncs(void) {
if (bmeth == NULL) {
return -1;
}
- res = add_loop("add", bmeth);
+ res = add_loop("add",
+ bmeth->dtypes, (PyObject *)bmeth->method);
Py_DECREF(bmeth);
if (res < 0) {
return -1;
}
+
+ /*
+ * Add a promoter for both directions of multiply with double.
+ */
+ PyArray_DTypeMeta *double_DType = PyArray_DTypeFromTypeNum(NPY_DOUBLE);
+ Py_DECREF(double_DType); /* immortal anyway */
+
+ PyArray_DTypeMeta *promoter_dtypes[3] = {
+ &PyArray_SFloatDType, double_DType, NULL};
+
+ PyObject *promoter = PyCapsule_New(
+ &promote_to_sfloat, "numpy._ufunc_promoter", NULL);
+ if (promoter == NULL) {
+ return -1;
+ }
+ res = add_loop("multiply", promoter_dtypes, promoter);
+ if (res < 0) {
+ Py_DECREF(promoter);
+ return -1;
+ }
+ promoter_dtypes[0] = double_DType;
+ promoter_dtypes[1] = &PyArray_SFloatDType;
+ res = add_loop("multiply", promoter_dtypes, promoter);
+ Py_DECREF(promoter);
+ if (res < 0) {
+ return -1;
+ }
+
return 0;
}
diff --git a/numpy/core/src/umath/dispatching.c b/numpy/core/src/umath/dispatching.c
index b1c5ccb6b..b97441b13 100644
--- a/numpy/core/src/umath/dispatching.c
+++ b/numpy/core/src/umath/dispatching.c
@@ -97,8 +97,9 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate)
return -1;
}
}
- if (!PyObject_TypeCheck(PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) {
- /* Must also accept promoters in the future. */
+ PyObject *meth_or_promoter = PyTuple_GET_ITEM(info, 1);
+ if (!PyObject_TypeCheck(meth_or_promoter, &PyArrayMethod_Type)
+ && !PyCapsule_IsValid(meth_or_promoter, "numpy._ufunc_promoter")) {
PyErr_SetString(PyExc_TypeError,
"Second argument to info must be an ArrayMethod or promoter");
return -1;
@@ -354,15 +355,68 @@ resolve_implementation_info(PyUFuncObject *ufunc,
* those defined by the `signature` unmodified).
*/
static PyObject *
-call_promoter_and_recurse(
- PyUFuncObject *NPY_UNUSED(ufunc), PyObject *NPY_UNUSED(promoter),
- PyArray_DTypeMeta *NPY_UNUSED(op_dtypes[]),
- PyArray_DTypeMeta *NPY_UNUSED(signature[]),
- PyArrayObject *const NPY_UNUSED(operands[]))
+call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter,
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArrayObject *const operands[])
{
- PyErr_SetString(PyExc_NotImplementedError,
- "Internal NumPy error, promoters are not used/implemented yet.");
- return NULL;
+ int nargs = ufunc->nargs;
+ PyObject *resolved_info = NULL;
+
+ int promoter_result;
+ PyArray_DTypeMeta *new_op_dtypes[NPY_MAXARGS];
+
+ if (PyCapsule_CheckExact(promoter)) {
+ /* We could also go the other way and wrap up the python function... */
+ promoter_function *promoter_function = PyCapsule_GetPointer(promoter,
+ "numpy._ufunc_promoter");
+ if (promoter_function == NULL) {
+ return NULL;
+ }
+ promoter_result = promoter_function(ufunc,
+ op_dtypes, signature, new_op_dtypes);
+ }
+ else {
+ PyErr_SetString(PyExc_NotImplementedError,
+ "Calling python functions for promotion is not implemented.");
+ return NULL;
+ }
+ if (promoter_result < 0) {
+ return NULL;
+ }
+ /*
+ * If none of the dtypes changes, we would recurse infinitely, abort.
+ * (Of course it is nevertheless possible to recurse infinitely.)
+ */
+ int dtypes_changed = 0;
+ for (int i = 0; i < nargs; i++) {
+ if (new_op_dtypes[i] != op_dtypes[i]) {
+ dtypes_changed = 1;
+ break;
+ }
+ }
+ if (!dtypes_changed) {
+ goto finish;
+ }
+
+ /*
+ * Do a recursive call, the promotion function has to ensure that the
+ * new tuple is strictly more precise (thus guaranteeing eventual finishing)
+ */
+ if (Py_EnterRecursiveCall(" during ufunc promotion.") != 0) {
+ goto finish;
+ }
+ /* TODO: The caching logic here may need revising: */
+ resolved_info = promote_and_get_info_and_ufuncimpl(ufunc,
+ operands, signature, new_op_dtypes,
+ /* no legacy promotion */ NPY_FALSE, /* cache */ NPY_TRUE);
+
+ Py_LeaveRecursiveCall();
+
+ finish:
+ for (int i = 0; i < nargs; i++) {
+ Py_XDECREF(new_op_dtypes[i]);
+ }
+ return resolved_info;
}
diff --git a/numpy/core/src/umath/dispatching.h b/numpy/core/src/umath/dispatching.h
index b01bc79fa..8d116873c 100644
--- a/numpy/core/src/umath/dispatching.h
+++ b/numpy/core/src/umath/dispatching.h
@@ -7,6 +7,10 @@
#include "array_method.h"
+typedef int promoter_function(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[]);
+
NPY_NO_EXPORT int
PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate);
diff --git a/numpy/core/src/umath/legacy_array_method.c b/numpy/core/src/umath/legacy_array_method.c
index a5e123baa..4351f1d25 100644
--- a/numpy/core/src/umath/legacy_array_method.c
+++ b/numpy/core/src/umath/legacy_array_method.c
@@ -142,7 +142,7 @@ simple_legacy_resolve_descriptors(
}
}
- return NPY_SAFE_CASTING;
+ return NPY_NO_CASTING;
fail:
for (int i = 0; i < nin + nout; i++) {
@@ -244,7 +244,7 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc,
.dtypes = signature,
.flags = flags,
.slots = slots,
- .casting = NPY_EQUIV_CASTING,
+ .casting = NPY_NO_CASTING,
};
PyBoundArrayMethodObject *bound_res = PyArrayMethod_FromSpec_int(&spec, 1);
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index b1afa69a7..8df439aca 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1340,7 +1340,7 @@ TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *s
*((npy_timedelta *)op1) = NPY_DATETIME_NAT;
}
else {
- *((npy_timedelta *)op1) = libdivide_s64_do(in1, &fast_d);;
+ *((npy_timedelta *)op1) = libdivide_s64_do(in1, &fast_d);
}
}
}
diff --git a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
index b17643d23..cc0fd19bb 100644
--- a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
+++ b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
@@ -800,7 +800,7 @@ AVX512F_exp_DOUBLE(npy_double * op,
q = _mm512_fmadd_pd(q, r, mA2);
q = _mm512_fmadd_pd(q, r, mA1);
q = _mm512_mul_pd(q, r);
- __m512d p = _mm512_fmadd_pd(r, q, r2);;
+ __m512d p = _mm512_fmadd_pd(r, q, r2);
p = _mm512_add_pd(r1, p);
/* Get 2^(j/32) from lookup table */
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index bed303a86..ebc6bf02a 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -4286,7 +4286,8 @@ _get_dtype(PyObject *dtype_obj) {
else if (NPY_UNLIKELY(out->singleton != descr)) {
/* This does not warn about `metadata`, but units is important. */
if (!PyArray_EquivTypes(out->singleton, descr)) {
- PyErr_Format(PyExc_TypeError,
+ /* Deprecated NumPy 1.21.2 (was an accidental error in 1.21) */
+ if (DEPRECATE(
"The `dtype` and `signature` arguments to "
"ufuncs only select the general DType and not details "
"such as the byte order or time unit (with rare "
@@ -4296,9 +4297,11 @@ _get_dtype(PyObject *dtype_obj) {
"In rare cases where the time unit was preserved, "
"either cast the inputs or provide an output array. "
"In the future NumPy may transition to allow providing "
- "`dtype=` to denote the outputs `dtype` as well");
- Py_DECREF(descr);
- return NULL;
+ "`dtype=` to denote the outputs `dtype` as well. "
+ "(Deprecated NumPy 1.21)") < 0) {
+ Py_DECREF(descr);
+ return NULL;
+ }
}
}
Py_INCREF(out);
diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py
index 3f67f1832..a13e807e2 100644
--- a/numpy/core/tests/test_casting_unittests.py
+++ b/numpy/core/tests/test_casting_unittests.py
@@ -695,6 +695,13 @@ class TestCasting:
expected = arr_normal.astype(dtype)
except TypeError:
with pytest.raises(TypeError):
- arr_NULLs.astype(dtype)
+ arr_NULLs.astype(dtype),
else:
assert_array_equal(expected, arr_NULLs.astype(dtype))
+
+ def test_float_to_bool(self):
+ # test case corresponding to gh-19514
+ # simple test for casting bool_ to float16
+ res = np.array([0, 3, -7], dtype=np.int8).view(bool)
+ expected = [0, 1, 1]
+ assert_array_equal(res, expected)
diff --git a/numpy/core/tests/test_custom_dtypes.py b/numpy/core/tests/test_custom_dtypes.py
index 3ec2363b9..5eb82bc93 100644
--- a/numpy/core/tests/test_custom_dtypes.py
+++ b/numpy/core/tests/test_custom_dtypes.py
@@ -101,6 +101,18 @@ class TestSFloat:
expected_view = a.view(np.float64) * b.view(np.float64)
assert_array_equal(res.view(np.float64), expected_view)
+ def test_basic_multiply_promotion(self):
+ float_a = np.array([1., 2., 3.])
+ b = self._get_array(2.)
+
+ res1 = float_a * b
+ res2 = b * float_a
+ # one factor is one, so we get the factor of b:
+ assert res1.dtype == res2.dtype == b.dtype
+ expected_view = float_a * b.view(np.float64)
+ assert_array_equal(res1.view(np.float64), expected_view)
+ assert_array_equal(res2.view(np.float64), expected_view)
+
def test_basic_addition(self):
a = self._get_array(2.)
b = self._get_array(4.)
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index b4146eadf..5a490646e 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -152,7 +152,7 @@ class TestDateTime:
expected = np.arange(size)
arr = np.tile(np.datetime64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
-
+
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_timedelta_nat_argsort_stability(self, size):
@@ -1373,13 +1373,13 @@ class TestDateTime:
assert_equal(tda / 0.5, tdc)
assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))
# m8 / m8
- assert_equal(tda / tdb, 6.0 / 9.0)
- assert_equal(np.divide(tda, tdb), 6.0 / 9.0)
- assert_equal(np.true_divide(tda, tdb), 6.0 / 9.0)
- assert_equal(tdb / tda, 9.0 / 6.0)
+ assert_equal(tda / tdb, 6 / 9)
+ assert_equal(np.divide(tda, tdb), 6 / 9)
+ assert_equal(np.true_divide(tda, tdb), 6 / 9)
+ assert_equal(tdb / tda, 9 / 6)
assert_equal((tda / tdb).dtype, np.dtype('f8'))
- assert_equal(tda / tdd, 60.0)
- assert_equal(tdd / tda, 1.0 / 60.0)
+ assert_equal(tda / tdd, 60)
+ assert_equal(tdd / tda, 1 / 60)
# int / m8
assert_raises(TypeError, np.divide, 2, tdb)
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 42e632e4a..44c76e0b8 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -314,21 +314,6 @@ class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTest
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
-class TestNumericStyleTypecodes(_DeprecationTestCase):
- """
- Most numeric style typecodes were previously deprecated (and removed)
- in 1.20. This also deprecates the remaining ones.
- """
- # 2020-06-09, NumPy 1.20
- def test_all_dtypes(self):
- deprecated_types = ['Bytes0', 'Datetime64', 'Str0']
- # Depending on intp size, either Uint32 or Uint64 is defined:
- deprecated_types.append(f"U{np.dtype(np.intp).name}")
- for dt in deprecated_types:
- self.assert_deprecated(np.dtype, exceptions=(TypeError,),
- args=(dt,))
-
-
class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase):
# Deprecated 2021-01-05, NumPy 1.21
message = r".*`.dtype` attribute"
@@ -1174,3 +1159,36 @@ class TestCtypesGetter(_DeprecationTestCase):
)
def test_not_deprecated(self, name: str) -> None:
self.assert_not_deprecated(lambda: getattr(self.ctypes, name))
+
+
+class TestUFuncForcedDTypeWarning(_DeprecationTestCase):
+ message = "The `dtype` and `signature` arguments to ufuncs only select the"
+
+ def test_not_deprecated(self):
+ import pickle
+ # does not warn (test relies on bad pickling behaviour, simply remove
+ # it if the `assert int64 is not int64_2` should start failing.
+ int64 = np.dtype("int64")
+ int64_2 = pickle.loads(pickle.dumps(int64))
+ assert int64 is not int64_2
+ self.assert_not_deprecated(lambda: np.add(3, 4, dtype=int64_2))
+
+ def test_deprecation(self):
+ int64 = np.dtype("int64")
+ self.assert_deprecated(lambda: np.add(3, 5, dtype=int64.newbyteorder()))
+ self.assert_deprecated(lambda: np.add(3, 5, dtype="m8[ns]"))
+
+ def test_behaviour(self):
+ int64 = np.dtype("int64")
+ arr = np.arange(10, dtype="m8[s]")
+
+ with pytest.warns(DeprecationWarning, match=self.message):
+ np.add(3, 5, dtype=int64.newbyteorder())
+ with pytest.warns(DeprecationWarning, match=self.message):
+ np.add(3, 5, dtype="m8[ns]") # previously used the "ns"
+ with pytest.warns(DeprecationWarning, match=self.message):
+ np.add(arr, arr, dtype="m8[ns]") # never preserved the "ns"
+ with pytest.warns(DeprecationWarning, match=self.message):
+ np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns"
+ with pytest.warns(DeprecationWarning, match=self.message):
+ np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns"
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 4f52268f5..23269f01b 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -109,9 +109,12 @@ class TestBuiltin:
operation(np.dtype(np.int32), 7)
@pytest.mark.parametrize("dtype",
- ['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
- 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
- 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0',
+ ['Bool', 'Bytes0', 'Complex32', 'Complex64',
+ 'Datetime64', 'Float16', 'Float32', 'Float64',
+ 'Int8', 'Int16', 'Int32', 'Int64',
+ 'Object0', 'Str0', 'Timedelta64',
+ 'UInt8', 'UInt16', 'Uint32', 'UInt32',
+ 'Uint64', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 9c56df2ba..8f8043c30 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -4885,9 +4885,9 @@ class TestIO:
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
- assert_raises(IOError, np.fromfile, b, np.uint8, 80)
+ assert_raises(OSError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
- assert_raises(IOError, lambda x: x.tofile(b), d)
+ assert_raises(OSError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
@@ -4970,12 +4970,12 @@ class TestIO:
x.tofile(tmp_filename)
def fail(*args, **kwargs):
- raise IOError('Can not tell or seek')
+ raise OSError('Can not tell or seek')
with io.open(tmp_filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
- assert_raises(IOError, np.fromfile, f, dtype=x.dtype)
+ assert_raises(OSError, np.fromfile, f, dtype=x.dtype)
def test_io_open_unbuffered_fromfile(self, x, tmp_filename):
# gh-6632
@@ -5284,12 +5284,12 @@ class TestIO:
def test_tofile_cleanup(self, tmp_filename):
x = np.zeros((10), dtype=object)
with open(tmp_filename, 'wb') as f:
- assert_raises(IOError, lambda: x.tofile(f, sep=''))
+ assert_raises(OSError, lambda: x.tofile(f, sep=''))
# Dup-ed file handle should be closed or remove will fail on Windows OS
os.remove(tmp_filename)
# Also make sure that we close the Python handle
- assert_raises(IOError, lambda: x.tofile(tmp_filename))
+ assert_raises(OSError, lambda: x.tofile(tmp_filename))
os.remove(tmp_filename)
def test_fromfile_subarray_binary(self, tmp_filename):
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index e2d648a3c..19de0a8aa 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -2381,7 +2381,7 @@ class TestClip:
shape=in_shapes[1], elements={"allow_nan": False}))
# Then calculate our result and expected result and check that they're
- # equal! See gh-12519 and gh-19457 for discussion deciding on this
+ # equal! See gh-12519 and gh-19457 for discussion deciding on this
# property and the result_type argument.
result = np.clip(arr, amin, amax)
t = np.result_type(arr, amin, amax)
@@ -2637,15 +2637,15 @@ class TestStdVar:
def test_ddof1(self):
assert_almost_equal(np.var(self.A, ddof=1),
- self.real_var*len(self.A)/float(len(self.A)-1))
+ self.real_var * len(self.A) / (len(self.A) - 1))
assert_almost_equal(np.std(self.A, ddof=1)**2,
- self.real_var*len(self.A)/float(len(self.A)-1))
+ self.real_var*len(self.A) / (len(self.A) - 1))
def test_ddof2(self):
assert_almost_equal(np.var(self.A, ddof=2),
- self.real_var*len(self.A)/float(len(self.A)-2))
+ self.real_var * len(self.A) / (len(self.A) - 2))
assert_almost_equal(np.std(self.A, ddof=2)**2,
- self.real_var*len(self.A)/float(len(self.A)-2))
+ self.real_var * len(self.A) / (len(self.A) - 2))
def test_out_scalar(self):
d = np.arange(10)
diff --git a/numpy/core/tests/test_scalar_methods.py b/numpy/core/tests/test_scalar_methods.py
index 3693bba59..94b2dd3c9 100644
--- a/numpy/core/tests/test_scalar_methods.py
+++ b/numpy/core/tests/test_scalar_methods.py
@@ -102,3 +102,29 @@ class TestAsIntegerRatio:
pytest.skip("longdouble too small on this platform")
assert_equal(nf / df, f, "{}/{}".format(n, d))
+
+
+class TestIsInteger:
+ @pytest.mark.parametrize("str_value", ["inf", "nan"])
+ @pytest.mark.parametrize("code", np.typecodes["Float"])
+ def test_special(self, code: str, str_value: str) -> None:
+ cls = np.dtype(code).type
+ value = cls(str_value)
+ assert not value.is_integer()
+
+ @pytest.mark.parametrize(
+ "code", np.typecodes["Float"] + np.typecodes["AllInteger"]
+ )
+ def test_true(self, code: str) -> None:
+ float_array = np.arange(-5, 5).astype(code)
+ for value in float_array:
+ assert value.is_integer()
+
+ @pytest.mark.parametrize("code", np.typecodes["Float"])
+ def test_false(self, code: str) -> None:
+ float_array = np.arange(-5, 5).astype(code)
+ float_array *= 1.1
+ for value in float_array:
+ if value == 0:
+ continue
+ assert not value.is_integer()
diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py
index ea5bbe103..f0c60953b 100644
--- a/numpy/core/tests/test_simd.py
+++ b/numpy/core/tests/test_simd.py
@@ -850,7 +850,7 @@ class _SIMD_ALL(_Test_Utility):
return
safe_neg = lambda x: -x-1 if -x > int_max else -x
- # test round divison for signed integers
+ # test round division for signed integers
for x, d in itertools.product(rdata, divisors):
d_neg = safe_neg(d)
data = self._data(x)
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index dab11d948..c3ea10d93 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -388,6 +388,24 @@ class TestUfunc:
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
+
+ def test_signature9(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 1, 1, "( 3) -> ( )")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 0))
+ assert_equal(ixs, (0,))
+ assert_equal(flags, (0,))
+ assert_equal(sizes, (3,))
+
+ def test_signature10(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "( 3? ) , (3? , 3?) ,(n )-> ( 9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
def test_signature_failure_extra_parenthesis(self):
with assert_raises(ValueError):
@@ -518,26 +536,36 @@ class TestUfunc:
np.add(arr, arr, dtype="m")
np.maximum(arr, arr, dtype="m")
- def test_forced_dtype_warning(self):
- # does not warn (test relies on bad pickling behaviour, simply remove
- # it if the `assert int64 is not int64_2` should start failing.
- int64 = np.dtype("int64")
- int64_2 = pickle.loads(pickle.dumps(int64))
- assert int64 is not int64_2
- np.add(3, 4, dtype=int64_2)
+ @pytest.mark.parametrize("ufunc", [np.add, np.sqrt])
+ def test_cast_safety(self, ufunc):
+ """Basic test for the safest casts, because ufuncs inner loops can
+ indicate a cast-safety as well (which is normally always "no").
+ """
+ def call_ufunc(arr, **kwargs):
+ return ufunc(*(arr,) * ufunc.nin, **kwargs)
+
+ arr = np.array([1., 2., 3.], dtype=np.float32)
+ arr_bs = arr.astype(arr.dtype.newbyteorder())
+ expected = call_ufunc(arr)
+ # Normally, a "no" cast:
+ res = call_ufunc(arr, casting="no")
+ assert_array_equal(expected, res)
+ # Byte-swapping is not allowed with "no" though:
+ with pytest.raises(TypeError):
+ call_ufunc(arr_bs, casting="no")
- arr = np.arange(10, dtype="m8[s]")
- msg = "The `dtype` and `signature` arguments to ufuncs only select the"
- with pytest.raises(TypeError, match=msg):
- np.add(3, 5, dtype=int64.newbyteorder())
- with pytest.raises(TypeError, match=msg):
- np.add(3, 5, dtype="m8[ns]") # previously used the "ns"
- with pytest.raises(TypeError, match=msg):
- np.add(arr, arr, dtype="m8[ns]") # never preserved the "ns"
- with pytest.raises(TypeError, match=msg):
- np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns"
- with pytest.raises(TypeError, match=msg):
- np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns"
+ # But is allowed with "equiv":
+ res = call_ufunc(arr_bs, casting="equiv")
+ assert_array_equal(expected, res)
+
+ # Casting to float64 is safe, but not equiv:
+ with pytest.raises(TypeError):
+ call_ufunc(arr_bs, dtype=np.float64, casting="equiv")
+
+ # but it is safe cast:
+ res = call_ufunc(arr_bs, dtype=np.float64, casting="safe")
+ expected = call_ufunc(arr.astype(np.float64)) # upcast
+ assert_array_equal(expected, res)
def test_true_divide(self):
a = np.array(10)
@@ -2049,6 +2077,27 @@ class TestUfunc:
assert_raises(TypeError, f, a, b)
assert_raises(TypeError, f, c, a)
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or]) # logical_xor object loop is bad
+ @pytest.mark.parametrize("signature",
+ [(None, None, object), (object, None, None),
+ (None, object, None)])
+ def test_logical_ufuncs_object_signatures(self, ufunc, signature):
+ a = np.array([True, None, False], dtype=object)
+ res = ufunc(a, a, signature=signature)
+ assert res.dtype == object
+
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or, np.logical_xor])
+ @pytest.mark.parametrize("signature",
+ [(bool, None, object), (object, None, bool),
+ (None, object, bool)])
+ def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature):
+ # Most mixed signatures fail (except those with bool out, e.g. `OO->?`)
+ a = np.array([True, None, False])
+ with pytest.raises(TypeError):
+ ufunc(a, a, signature=signature)
+
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
# appropriately.
diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py
index c051cd61b..af5bbe59e 100644
--- a/numpy/core/tests/test_umath_complex.py
+++ b/numpy/core/tests/test_umath_complex.py
@@ -134,8 +134,7 @@ class TestClog:
x = np.array([1+0j, 1+2j])
y_r = np.log(np.abs(x)) + 1j * np.angle(x)
y = np.log(x)
- for i in range(len(x)):
- assert_almost_equal(y[i], y_r[i])
+ assert_almost_equal(y, y_r)
@platform_skip
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
@@ -365,18 +364,24 @@ class TestCpow:
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = x ** 2
y = np.power(x, 2)
- for i in range(len(x)):
- assert_almost_equal(y[i], y_r[i])
+ assert_almost_equal(y, y_r)
def test_scalar(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = list(range(len(x)))
- # Compute the values for complex type in python
- p_r = [complex(x[i]) ** complex(y[i]) for i in lx]
- # Substitute a result allowed by C99 standard
- p_r[4] = complex(np.inf, np.nan)
- # Do the same with numpy complex scalars
+
+ # Hardcode the expected `builtins.complex` values,
+ # as complex exponentiation is broken as of bpo-44698
+ p_r = [
+ 1+0j,
+ 0.20787957635076193+0j,
+ 0.35812203996480685+0.6097119028618724j,
+ 0.12659112128185032+0.48847676699581527j,
+ complex(np.inf, np.nan),
+ complex(np.nan, np.nan),
+ ]
+
n_r = [x[i] ** y[i] for i in lx]
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
@@ -385,11 +390,18 @@ class TestCpow:
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = list(range(len(x)))
- # Compute the values for complex type in python
- p_r = [complex(x[i]) ** complex(y[i]) for i in lx]
- # Substitute a result allowed by C99 standard
- p_r[4] = complex(np.inf, np.nan)
- # Do the same with numpy arrays
+
+ # Hardcode the expected `builtins.complex` values,
+ # as complex exponentiation is broken as of bpo-44698
+ p_r = [
+ 1+0j,
+ 0.20787957635076193+0j,
+ 0.35812203996480685+0.6097119028618724j,
+ 0.12659112128185032+0.48847676699581527j,
+ complex(np.inf, np.nan),
+ complex(np.nan, np.nan),
+ ]
+
n_r = x ** y
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
@@ -405,8 +417,7 @@ class TestCabs:
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan])
y = np.abs(x)
- for i in range(len(x)):
- assert_almost_equal(y[i], y_r[i])
+ assert_almost_equal(y, y_r)
def test_fabs(self):
# Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
@@ -452,9 +463,10 @@ class TestCabs:
return np.abs(complex(a, b))
xa = np.array(x, dtype=complex)
- for i in range(len(xa)):
- ref = g(x[i], y[i])
- check_real_value(f, x[i], y[i], ref)
+ assert len(xa) == len(x) == len(y)
+ for xi, yi in zip(x, y):
+ ref = g(xi, yi)
+ check_real_value(f, xi, yi, ref)
class TestCarg:
def test_simple(self):
@@ -583,7 +595,7 @@ class TestComplexAbsoluteMixedDTypes:
@pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
@pytest.mark.parametrize("astype", [np.complex64, np.complex128])
@pytest.mark.parametrize("func", ['abs', 'square', 'conjugate'])
-
+
def test_array(self, stride, astype, func):
dtype = [('template_id', '<i8'), ('bank_chisq','<f4'),
('bank_chisq_dof','<i8'), ('chisq', '<f4'), ('chisq_dof','<i8'),
@@ -602,9 +614,9 @@ class TestComplexAbsoluteMixedDTypes:
myfunc = getattr(np, func)
a = vec['mycomplex']
g = myfunc(a[::stride])
-
+
b = vec['mycomplex'].copy()
h = myfunc(b[::stride])
-
+
assert_array_max_ulp(h.real, g.real, 1)
assert_array_max_ulp(h.imag, g.imag, 1)
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index 47d07ad4a..e7fd494d3 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -193,7 +193,12 @@ class _Config:
clang = dict(
native = '-march=native',
opt = "-O3",
- werror = '-Werror'
+ # One of the following flags needs to be applicable for Clang to
+ # guarantee the sanity of the testing process, however in certain
+ # cases `-Werror` gets skipped during the availability test due to
+ # "unused arguments" warnings.
+ # see https://github.com/numpy/numpy/issues/19624
+ werror = '-Werror-implicit-function-declaration -Werror'
),
icc = dict(
native = '-xHost',
@@ -516,7 +521,7 @@ class _Config:
def rm_temp():
try:
shutil.rmtree(tmp)
- except IOError:
+ except OSError:
pass
atexit.register(rm_temp)
self.conf_tmp_path = tmp
@@ -2495,7 +2500,7 @@ class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
last_hash = f.readline().split("cache_hash:")
if len(last_hash) == 2 and int(last_hash[1]) == cache_hash:
return True
- except IOError:
+ except OSError:
pass
self.dist_log("generate dispatched config -> ", config_path)
diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py
index 51ce3c129..776202109 100644
--- a/numpy/distutils/cpuinfo.py
+++ b/numpy/distutils/cpuinfo.py
@@ -27,7 +27,7 @@ from subprocess import getstatusoutput
def getoutput(cmd, successful_status=(0,), stacklevel=1):
try:
status, output = getstatusoutput(cmd)
- except EnvironmentError as e:
+ except OSError as e:
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
return False, ""
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
@@ -109,7 +109,7 @@ class LinuxCPUInfo(CPUInfoBase):
info[0]['uname_m'] = output.strip()
try:
fo = open('/proc/cpuinfo')
- except EnvironmentError as e:
+ except OSError as e:
warnings.warn(str(e), UserWarning, stacklevel=2)
else:
for line in fo:
diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py
index fb10d2470..79998cf5d 100644
--- a/numpy/distutils/exec_command.py
+++ b/numpy/distutils/exec_command.py
@@ -284,7 +284,7 @@ def _exec_command(command, use_shell=None, use_tee = None, **env):
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False)
- except EnvironmentError:
+ except OSError:
# Return 127, as os.spawn*() and /bin/sh do
return 127, ''
diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py
index 351a43dd7..01314c136 100644
--- a/numpy/distutils/fcompiler/compaq.py
+++ b/numpy/distutils/fcompiler/compaq.py
@@ -84,9 +84,9 @@ class CompaqVisualFCompiler(FCompiler):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e))
else:
raise
- except IOError as e:
+ except OSError as e:
if not "vcvarsall.bat" in str(e):
- print("Unexpected IOError in", __file__)
+ print("Unexpected OSError in", __file__)
raise
except ValueError as e:
if not "'path'" in str(e):
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 9c65ff43e..a903f3ea3 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -42,7 +42,7 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
- 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
+ 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
class InstallableLib:
@@ -110,6 +110,13 @@ def get_num_build_jobs():
return max(x for x in cmdattr if x is not None)
def quote_args(args):
+ """Quote list of arguments.
+
+ .. deprecated:: 1.22.
+ """
+ import warnings
+ warnings.warn('"quote_args" is deprecated.',
+ DeprecationWarning, stacklevel=2)
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
index 951ce5fb8..f6e3ad397 100644
--- a/numpy/distutils/npy_pkg_config.py
+++ b/numpy/distutils/npy_pkg_config.py
@@ -9,7 +9,7 @@ __all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}')
-class FormatError(IOError):
+class FormatError(OSError):
"""
Exception thrown when there is a problem parsing a configuration file.
@@ -20,7 +20,7 @@ class FormatError(IOError):
def __str__(self):
return self.msg
-class PkgNotFound(IOError):
+class PkgNotFound(OSError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 2846d754e..8467e1c19 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -2424,6 +2424,10 @@ class flame_info(system_info):
if info is None:
return
+ # Add the extra flag args to info
+ extra_info = self.calc_extra_info()
+ dict_append(info, **extra_info)
+
if self.check_embedded_lapack(info):
# check if the user has supplied all information required
self.set_info(**info)
diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py
index fb91f1789..733a9fc50 100644
--- a/numpy/distutils/unixccompiler.py
+++ b/numpy/distutils/unixccompiler.py
@@ -105,7 +105,7 @@ def UnixCCompiler_create_static_lib(self, objects, output_libname,
# and recreate.
# Also, ar on OS X doesn't handle updating universal archives
os.unlink(output_filename)
- except (IOError, OSError):
+ except OSError:
pass
self.mkpath(os.path.dirname(output_filename))
tmp_objects = objects + self.objects
diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi
index 7d8e092ea..e52e12bbd 100644
--- a/numpy/f2py/__init__.pyi
+++ b/numpy/f2py/__init__.pyi
@@ -1,7 +1,6 @@
import os
import subprocess
-from typing import Any, List, Iterable, Dict, overload
-from typing_extensions import TypedDict, Literal as L
+from typing import Literal as L, Any, List, Iterable, Dict, overload, TypedDict
from numpy._pytesttester import PytestTester
diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py
index 62aa2fca9..5c9ddb00a 100644
--- a/numpy/f2py/cb_rules.py
+++ b/numpy/f2py/cb_rules.py
@@ -110,6 +110,7 @@ f2py_cb_start_clock();
capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\");
if (capi_tmp) {
capi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp);
+ Py_DECREF(capi_tmp);
if (capi_arglist==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\");
goto capi_fail;
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 3ac9b80c8..c3ec792e3 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -3414,8 +3414,8 @@ if __name__ == "__main__":
try:
open(l).close()
files.append(l)
- except IOError as detail:
- errmess('IOError: %s\n' % str(detail))
+ except OSError as detail:
+ errmess(f'OSError: {detail!s}\n')
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py
index a14f068f1..f45374be6 100755
--- a/numpy/f2py/f2py2e.py
+++ b/numpy/f2py/f2py2e.py
@@ -275,9 +275,8 @@ def scaninputline(inputline):
with open(l):
pass
files.append(l)
- except IOError as detail:
- errmess('IOError: %s. Skipping file "%s".\n' %
- (str(detail), l))
+ except OSError as detail:
+ errmess(f'OSError: {detail!s}. Skipping file "{l!s}".\n')
elif f == -1:
skipfuncs.append(l)
elif f == 0:
@@ -359,33 +358,34 @@ def buildmodules(lst):
cfuncs.buildcfuncs()
outmess('Building modules...\n')
modules, mnames, isusedby = [], [], {}
- for i in range(len(lst)):
- if '__user__' in lst[i]['name']:
- cb_rules.buildcallbacks(lst[i])
+ for item in lst:
+ if '__user__' in item['name']:
+ cb_rules.buildcallbacks(item)
else:
- if 'use' in lst[i]:
- for u in lst[i]['use'].keys():
+ if 'use' in item:
+ for u in item['use'].keys():
if u not in isusedby:
isusedby[u] = []
- isusedby[u].append(lst[i]['name'])
- modules.append(lst[i])
- mnames.append(lst[i]['name'])
+ isusedby[u].append(item['name'])
+ modules.append(item)
+ mnames.append(item['name'])
ret = {}
- for i in range(len(mnames)):
- if mnames[i] in isusedby:
+ for module, name in zip(modules, mnames):
+ if name in isusedby:
outmess('\tSkipping module "%s" which is used by %s.\n' % (
- mnames[i], ','.join(['"%s"' % s for s in isusedby[mnames[i]]])))
+ name, ','.join('"%s"' % s for s in isusedby[name])))
else:
um = []
- if 'use' in modules[i]:
- for u in modules[i]['use'].keys():
+ if 'use' in module:
+ for u in module['use'].keys():
if u in isusedby and u in mnames:
um.append(modules[mnames.index(u)])
else:
outmess(
- '\tModule "%s" uses nonexisting "%s" which will be ignored.\n' % (mnames[i], u))
- ret[mnames[i]] = {}
- dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um))
+ f'\tModule "{name}" uses nonexisting "{u}" '
+ 'which will be ignored.\n')
+ ret[name] = {}
+ dict_append(ret[name], rules.buildmodule(module, um))
return ret
@@ -429,18 +429,20 @@ def run_main(comline_list):
capi_maps.load_f2cmap_file(options['f2cmap_file'])
postlist = callcrackfortran(files, options)
isusedby = {}
- for i in range(len(postlist)):
- if 'use' in postlist[i]:
- for u in postlist[i]['use'].keys():
+ for plist in postlist:
+ if 'use' in plist:
+ for u in plist['use'].keys():
if u not in isusedby:
isusedby[u] = []
- isusedby[u].append(postlist[i]['name'])
- for i in range(len(postlist)):
- if postlist[i]['block'] == 'python module' and '__user__' in postlist[i]['name']:
- if postlist[i]['name'] in isusedby:
+ isusedby[u].append(plist['name'])
+ for plist in postlist:
+ if plist['block'] == 'python module' and '__user__' in plist['name']:
+ if plist['name'] in isusedby:
# if not quiet:
- outmess('Skipping Makefile build for module "%s" which is used by %s\n' % (
- postlist[i]['name'], ','.join(['"%s"' % s for s in isusedby[postlist[i]['name']]])))
+ outmess(
+ f'Skipping Makefile build for module "{plist["name"]}" '
+ 'which is used by {}\n'.format(
+ ','.join(f'"{s}"' for s in isusedby[plist['name']])))
if 'signsfile' in options:
if options['verbose'] > 1:
outmess(
@@ -448,8 +450,8 @@ def run_main(comline_list):
outmess('%s %s\n' %
(os.path.basename(sys.argv[0]), options['signsfile']))
return
- for i in range(len(postlist)):
- if postlist[i]['block'] != 'python module':
+ for plist in postlist:
+ if plist['block'] != 'python module':
if 'python module' not in options:
errmess(
'Tip: If your original code is Fortran source then you must use -m option.\n')
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index d5fa76fed..eace3c9fc 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -36,7 +36,7 @@ def _cleanup():
pass
try:
shutil.rmtree(_module_dir)
- except (IOError, OSError):
+ except OSError:
pass
_module_dir = None
diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py
index 604ac8fde..392644237 100644
--- a/numpy/fft/tests/test_pocketfft.py
+++ b/numpy/fft/tests/test_pocketfft.py
@@ -10,7 +10,7 @@ import queue
def fft1(x):
L = len(x)
- phase = -2j*np.pi*(np.arange(L)/float(L))
+ phase = -2j * np.pi * (np.arange(L) / L)
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi
index 25640ec07..ae23b2ec4 100644
--- a/numpy/lib/__init__.pyi
+++ b/numpy/lib/__init__.pyi
@@ -130,7 +130,6 @@ from numpy.lib.npyio import (
recfromtxt as recfromtxt,
recfromcsv as recfromcsv,
load as load,
- loads as loads,
save as save,
savez as savez,
savez_compressed as savez_compressed,
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index c790a6462..56b94853d 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -530,7 +530,7 @@ class DataSource:
return _file_openers[ext](found, mode=mode,
encoding=encoding, newline=newline)
else:
- raise IOError("%s not found." % path)
+ raise FileNotFoundError(f"{path} not found.")
class Repository (DataSource):
diff --git a/numpy/lib/arraypad.pyi b/numpy/lib/arraypad.pyi
index df9538dd7..49ce8e683 100644
--- a/numpy/lib/arraypad.pyi
+++ b/numpy/lib/arraypad.pyi
@@ -1,11 +1,12 @@
-import sys
from typing import (
+ Literal as L,
Any,
Dict,
List,
overload,
Tuple,
TypeVar,
+ Protocol,
)
from numpy import ndarray, dtype, generic
@@ -18,20 +19,16 @@ from numpy.typing import (
_SupportsArray,
)
-if sys.version_info >= (3, 8):
- from typing import Literal as L, Protocol
-else:
- from typing_extensions import Literal as L, Protocol
-
_SCT = TypeVar("_SCT", bound=generic)
class _ModeFunc(Protocol):
def __call__(
self,
- __vector: NDArray[Any],
- __iaxis_pad_width: Tuple[int, int],
- __iaxis: int,
- __kwargs: Dict[str, Any],
+ vector: NDArray[Any],
+ iaxis_pad_width: Tuple[int, int],
+ iaxis: int,
+ kwargs: Dict[str, Any],
+ /,
) -> None: ...
_ModeKind = L[
diff --git a/numpy/lib/arrayterator.pyi b/numpy/lib/arrayterator.pyi
index 39d6fd843..82c669206 100644
--- a/numpy/lib/arrayterator.pyi
+++ b/numpy/lib/arrayterator.pyi
@@ -1,4 +1,3 @@
-import sys
from typing import (
List,
Any,
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 6ac66c22a..e566e253d 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -162,7 +162,6 @@ evolved with time and this document is more current.
"""
import numpy
-import io
import warnings
from numpy.lib.utils import safe_eval
from numpy.compat import (
@@ -831,7 +830,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
------
ValueError
If the data or the mode is invalid.
- IOError
+ OSError
If the file is not found or cannot be opened correctly.
See Also
@@ -909,7 +908,7 @@ def _read_bytes(fp, size, error_template="ran out of data"):
data += r
if len(r) == 0 or len(data) == size:
break
- except io.BlockingIOError:
+ except BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
diff --git a/numpy/lib/format.pyi b/numpy/lib/format.pyi
index 4c44d57bf..092245daf 100644
--- a/numpy/lib/format.pyi
+++ b/numpy/lib/format.pyi
@@ -1,10 +1,4 @@
-import sys
-from typing import Any, List, Set
-
-if sys.version_info >= (3, 8):
- from typing import Literal, Final
-else:
- from typing_extensions import Literal, Final
+from typing import Any, List, Set, Literal, Final
__all__: List[str]
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index b43a1d666..d875a00ae 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1512,7 +1512,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi):
difference from their predecessor of more than ``max(discont, period/2)``
to their `period`-complementary values.
- For the default case where `period` is :math:`2\pi` and is `discont` is
+ For the default case where `period` is :math:`2\pi` and `discont` is
:math:`\pi`, this unwraps a radian phase `p` such that adjacent differences
are never greater than :math:`\pi` by adding :math:`2k\pi` for some
integer :math:`k`.
@@ -1866,6 +1866,8 @@ def _parse_gufunc_signature(signature):
Tuple of input and output core dimensions parsed from the signature, each
of the form List[Tuple[str, ...]].
"""
+ signature = re.sub(r'\s+', '', signature)
+
if not re.match(_SIGNATURE, signature):
raise ValueError(
'not a valid gufunc signature: {}'.format(signature))
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 8d1b6e5be..2a4402c89 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -149,9 +149,9 @@ class nd_grid:
try:
size = []
typ = int
- for k in range(len(key)):
- step = key[k].step
- start = key[k].start
+ for kk in key:
+ step = kk.step
+ start = kk.start
if start is None:
start = 0
if step is None:
@@ -161,19 +161,19 @@ class nd_grid:
typ = float
else:
size.append(
- int(math.ceil((key[k].stop - start)/(step*1.0))))
+ int(math.ceil((kk.stop - start) / (step * 1.0))))
if (isinstance(step, (_nx.floating, float)) or
isinstance(start, (_nx.floating, float)) or
- isinstance(key[k].stop, (_nx.floating, float))):
+ isinstance(kk.stop, (_nx.floating, float))):
typ = float
if self.sparse:
nn = [_nx.arange(_x, dtype=_t)
for _x, _t in zip(size, (typ,)*len(size))]
else:
nn = _nx.indices(size, typ)
- for k in range(len(size)):
- step = key[k].step
- start = key[k].start
+ for k, kk in enumerate(key):
+ step = kk.step
+ start = kk.start
if start is None:
start = 0
if step is None:
@@ -181,7 +181,7 @@ class nd_grid:
if isinstance(step, (_nx.complexfloating, complex)):
step = int(abs(step))
if step != 1:
- step = (key[k].stop - start)/float(step-1)
+ step = (kk.stop - start) / float(step - 1)
nn[k] = (nn[k]*step+start)
if self.sparse:
slobj = [_nx.newaxis]*len(size)
diff --git a/numpy/lib/index_tricks.pyi b/numpy/lib/index_tricks.pyi
index 0f9ae94a9..530be3cae 100644
--- a/numpy/lib/index_tricks.pyi
+++ b/numpy/lib/index_tricks.pyi
@@ -1,4 +1,3 @@
-import sys
from typing import (
Any,
Tuple,
@@ -8,6 +7,8 @@ from typing import (
List,
Union,
Sequence,
+ Literal,
+ SupportsIndex,
)
from numpy import (
@@ -49,11 +50,6 @@ from numpy.core.multiarray import (
ravel_multi_index as ravel_multi_index,
)
-if sys.version_info >= (3, 8):
- from typing import Literal, SupportsIndex
-else:
- from typing_extensions import Literal, SupportsIndex
-
_T = TypeVar("_T")
_DType = TypeVar("_DType", bound=dtype[Any])
_BoolType = TypeVar("_BoolType", Literal[True], Literal[False])
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 7c73d9655..b91bf440f 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -5,7 +5,7 @@ import itertools
import warnings
import weakref
import contextlib
-from operator import itemgetter, index as opindex
+from operator import itemgetter, index as opindex, methodcaller
from collections.abc import Mapping
import numpy as np
@@ -26,18 +26,9 @@ from numpy.compat import (
)
-@set_module('numpy')
-def loads(*args, **kwargs):
- # NumPy 1.15.0, 2017-12-10
- warnings.warn(
- "np.loads is deprecated, use pickle.loads instead",
- DeprecationWarning, stacklevel=2)
- return pickle.loads(*args, **kwargs)
-
-
__all__ = [
- 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
- 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
+ 'savetxt', 'loadtxt', 'genfromtxt',
+ 'recfromtxt', 'recfromcsv', 'load', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
@@ -333,10 +324,12 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
Raises
------
- IOError
+ OSError
If the input file does not exist or cannot be read.
+ UnpicklingError
+ If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
ValueError
- The file contains an object array, but allow_pickle=False given.
+ The file contains an object array, but ``allow_pickle=False`` given.
See Also
--------
@@ -445,8 +438,8 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
try:
return pickle.load(fid, **pickle_kwargs)
except Exception as e:
- raise IOError(
- "Failed to interpret file %s as a pickle" % repr(file)) from e
+ raise pickle.UnpicklingError(
+ f"Failed to interpret file {file!r} as a pickle") from e
def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
@@ -728,41 +721,42 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
zipf.close()
+def _floatconv(x):
+ try:
+ return float(x) # The fastest path.
+ except ValueError:
+ if '0x' in x: # Don't accidentally convert "a" ("0xa") to 10.
+ try:
+ return float.fromhex(x)
+ except ValueError:
+ pass
+ raise # Raise the original exception, which makes more sense.
+
+
+_CONVERTERS = [ # These converters only ever get strs (not bytes) as input.
+ (np.bool_, lambda x: bool(int(x))),
+ (np.uint64, np.uint64),
+ (np.int64, np.int64),
+ (np.integer, lambda x: int(float(x))),
+ (np.longdouble, np.longdouble),
+ (np.floating, _floatconv),
+ (complex, lambda x: complex(x.replace('+-', '-'))),
+ (np.bytes_, methodcaller('encode', 'latin-1')),
+ (np.unicode_, str),
+]
+
+
def _getconv(dtype):
- """ Find the correct dtype converter. Adapted from matplotlib """
+ """
+ Find the correct dtype converter. Adapted from matplotlib.
- def floatconv(x):
- try:
- return float(x) # The fastest path.
- except ValueError:
- if '0x' in x: # Don't accidentally convert "a" ("0xa") to 10.
- try:
- return float.fromhex(x)
- except ValueError:
- pass
- raise # Raise the original exception, which makes more sense.
-
- typ = dtype.type
- if issubclass(typ, np.bool_):
- return lambda x: bool(int(x))
- if issubclass(typ, np.uint64):
- return np.uint64
- if issubclass(typ, np.int64):
- return np.int64
- if issubclass(typ, np.integer):
- return lambda x: int(float(x))
- elif issubclass(typ, np.longdouble):
- return np.longdouble
- elif issubclass(typ, np.floating):
- return floatconv
- elif issubclass(typ, complex):
- return lambda x: complex(asstr(x).replace('+-', '-'))
- elif issubclass(typ, np.bytes_):
- return asbytes
- elif issubclass(typ, np.unicode_):
- return asunicode
- else:
- return asstr
+ Even when a lambda is returned, it is defined at the toplevel, to allow
+ testing for equality and enabling optimization for single-type data.
+ """
+ for base, conv in _CONVERTERS:
+ if issubclass(dtype.type, base):
+ return conv
+ return str
# _loadtxt_flatten_dtype_internal and _loadtxt_pack_items are loadtxt helpers
@@ -978,52 +972,13 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
# Nested functions used by loadtxt.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- def split_line(line):
- """Chop off comments, strip, and split at delimiter. """
- line = _decode_line(line, encoding=encoding)
+ def split_line(line: str):
+ """Chop off comments, strip, and split at delimiter."""
for comment in comments: # Much faster than using a single regex.
line = line.split(comment, 1)[0]
line = line.strip('\r\n')
return line.split(delimiter) if line else []
- def read_data(chunk_size):
- """Parse each line, including the first.
-
- The file read, `fh`, is a global defined above.
-
- Parameters
- ----------
- chunk_size : int
- At most `chunk_size` lines are read at a time, with iteration
- until all lines are read.
-
- """
- X = []
- line_iter = itertools.chain([first_line], fh)
- line_iter = itertools.islice(line_iter, max_rows)
- for i, line in enumerate(line_iter):
- vals = split_line(line)
- if len(vals) == 0:
- continue
- if usecols:
- vals = [vals[j] for j in usecols]
- if len(vals) != ncols:
- line_num = i + skiprows + 1
- raise ValueError("Wrong number of columns at line %d"
- % line_num)
-
- # Convert each value according to its column and store
- items = [conv(val) for (conv, val) in zip(converters, vals)]
-
- # Then pack it according to the dtype's nesting
- items = packer(items)
- X.append(items)
- if len(X) > chunk_size:
- yield X
- X = []
- if X:
- yield X
-
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Main body of loadtxt.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -1051,14 +1006,14 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
byte_converters = True
if usecols is not None:
- # Allow usecols to be a single int or a sequence of ints
+ # Copy usecols, allowing it to be a single int or a sequence of ints.
try:
- usecols_as_list = list(usecols)
+ usecols = list(usecols)
except TypeError:
- usecols_as_list = [usecols]
- for col_idx in usecols_as_list:
+ usecols = [usecols]
+ for i, col_idx in enumerate(usecols):
try:
- opindex(col_idx)
+ usecols[i] = opindex(col_idx) # Cast to builtin int now.
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
@@ -1066,8 +1021,13 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
type(col_idx),
)
raise
- # Fall back to existing code
- usecols = usecols_as_list
+ if len(usecols) > 1:
+ usecols_getter = itemgetter(*usecols)
+ else:
+ # Get an iterable back, even if using a single column.
+ usecols_getter = lambda obj, c=usecols[0]: [obj[c]]
+ else:
+ usecols_getter = None
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
@@ -1075,50 +1035,70 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
dtype_types, packer = _loadtxt_flatten_dtype_internal(dtype)
- fown = False
+ fh_closing_ctx = contextlib.nullcontext()
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fencoding = getattr(fh, 'encoding', 'latin1')
- fh = iter(fh)
- fown = True
+ line_iter = iter(fh)
+ fh_closing_ctx = contextlib.closing(fh)
else:
- fh = iter(fname)
+ line_iter = iter(fname)
fencoding = getattr(fname, 'encoding', 'latin1')
+ try:
+ first_line = next(line_iter)
+ except StopIteration:
+ pass # Nothing matters if line_iter is empty.
+ else:
+ # Put first_line back.
+ line_iter = itertools.chain([first_line], line_iter)
+ if isinstance(first_line, bytes):
+ # Using latin1 matches _decode_line's behavior.
+ decoder = methodcaller(
+ "decode",
+ encoding if encoding is not None else "latin1")
+ line_iter = map(decoder, line_iter)
except TypeError as e:
raise ValueError(
f"fname must be a string, filehandle, list of strings,\n"
f"or generator. Got {type(fname)} instead."
) from e
- # input may be a python2 io stream
- if encoding is not None:
- fencoding = encoding
- # we must assume local encoding
- # TODO emit portability warning?
- elif fencoding is None:
- import locale
- fencoding = locale.getpreferredencoding()
+ with fh_closing_ctx:
+
+ # input may be a python2 io stream
+ if encoding is not None:
+ fencoding = encoding
+ # we must assume local encoding
+ # TODO emit portability warning?
+ elif fencoding is None:
+ import locale
+ fencoding = locale.getpreferredencoding()
- try:
# Skip the first `skiprows` lines
for i in range(skiprows):
- next(fh)
+ next(line_iter)
# Read until we find a line with some values, and use it to determine
# the need for decoding and estimate the number of columns.
- for first_line in fh:
+ for first_line in line_iter:
ncols = len(usecols or split_line(first_line))
if ncols:
+ # Put first_line back.
+ line_iter = itertools.chain([first_line], line_iter)
break
else: # End of lines reached
- first_line = ''
ncols = len(usecols or [])
warnings.warn('loadtxt: Empty input file: "%s"' % fname,
stacklevel=2)
+ line_iter = itertools.islice(line_iter, max_rows)
+ lineno_words_iter = filter(
+ itemgetter(1), # item[1] is words; filter skips empty lines.
+ enumerate(map(split_line, line_iter), 1 + skiprows))
+
# Now that we know ncols, create the default converters list, and
# set packing, if necessary.
if len(dtype_types) > 1:
@@ -1144,36 +1124,55 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
continue
if byte_converters:
# converters may use decode to workaround numpy's old
- # behaviour, so encode the string again before passing to
- # the user converter
- def tobytes_first(x, conv):
- if type(x) is bytes:
- return conv(x)
+ # behaviour, so encode the string again (converters are only
+ # called with strings) before passing to the user converter.
+ def tobytes_first(conv, x):
return conv(x.encode("latin1"))
- converters[i] = functools.partial(tobytes_first, conv=conv)
+ converters[i] = functools.partial(tobytes_first, conv)
else:
converters[i] = conv
- converters = [conv if conv is not bytes else
- lambda x: x.encode(fencoding) for conv in converters]
+ fencode = methodcaller("encode", fencoding)
+ converters = [conv if conv is not bytes else fencode
+ for conv in converters]
+ if len(set(converters)) == 1:
+ # Optimize single-type data. Note that this is only reached if
+ # `_getconv` returns equal callables (i.e. not local lambdas) on
+ # equal dtypes.
+ def convert_row(vals, _conv=converters[0]):
+ return [*map(_conv, vals)]
+ else:
+ def convert_row(vals):
+ return [conv(val) for conv, val in zip(converters, vals)]
# read data in chunks and fill it into an array via resize
# over-allocating and shrinking the array later may be faster but is
# probably not relevant compared to the cost of actually reading and
# converting the data
X = None
- for x in read_data(_loadtxt_chunksize):
+ while True:
+ chunk = []
+ for lineno, words in itertools.islice(
+ lineno_words_iter, _loadtxt_chunksize):
+ if usecols_getter is not None:
+ words = usecols_getter(words)
+ elif len(words) != ncols:
+ raise ValueError(
+ f"Wrong number of columns at line {lineno}")
+ # Convert each value according to its column, then pack it
+ # according to the dtype's nesting, and store it.
+ chunk.append(packer(convert_row(words)))
+ if not chunk: # The islice is empty, i.e. we're done.
+ break
+
if X is None:
- X = np.array(x, dtype)
+ X = np.array(chunk, dtype)
else:
nshape = list(X.shape)
pos = nshape[0]
- nshape[0] += len(x)
+ nshape[0] += len(chunk)
X.resize(nshape, refcheck=False)
- X[pos:, ...] = x
- finally:
- if fown:
- fh.close()
+ X[pos:, ...] = chunk
if X is None:
X = np.array([], dtype)
@@ -1475,8 +1474,11 @@ def fromregex(file, regexp, dtype, encoding=None):
Parameters
----------
- file : str or file
+ file : path or file
Filename or file object to read.
+
+ .. versionchanged:: 1.22.0
+ Now accepts `os.PathLike` implementations.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
@@ -1526,6 +1528,7 @@ def fromregex(file, regexp, dtype, encoding=None):
"""
own_fh = False
if not hasattr(file, "read"):
+ file = os.fspath(file)
file = np.lib._datasource.open(file, 'rt', encoding=encoding)
own_fh = True
@@ -1534,9 +1537,9 @@ def fromregex(file, regexp, dtype, encoding=None):
dtype = np.dtype(dtype)
content = file.read()
- if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode):
+ if isinstance(content, bytes) and isinstance(regexp, str):
regexp = asbytes(regexp)
- elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes):
+ elif isinstance(content, str) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
@@ -2307,62 +2310,6 @@ _genfromtxt_with_like = array_function_dispatch(
)(genfromtxt)
-def ndfromtxt(fname, **kwargs):
- """
- Load ASCII data stored in a file and return it as a single array.
-
- .. deprecated:: 1.17
- ndfromtxt` is a deprecated alias of `genfromtxt` which
- overwrites the ``usemask`` argument with `False` even when
- explicitly called as ``ndfromtxt(..., usemask=True)``.
- Use `genfromtxt` instead.
-
- Parameters
- ----------
- fname, kwargs : For a description of input parameters, see `genfromtxt`.
-
- See Also
- --------
- numpy.genfromtxt : generic function.
-
- """
- kwargs['usemask'] = False
- # Numpy 1.17
- warnings.warn(
- "np.ndfromtxt is a deprecated alias of np.genfromtxt, "
- "prefer the latter.",
- DeprecationWarning, stacklevel=2)
- return genfromtxt(fname, **kwargs)
-
-
-def mafromtxt(fname, **kwargs):
- """
- Load ASCII data stored in a text file and return a masked array.
-
- .. deprecated:: 1.17
- np.mafromtxt is a deprecated alias of `genfromtxt` which
- overwrites the ``usemask`` argument with `True` even when
- explicitly called as ``mafromtxt(..., usemask=False)``.
- Use `genfromtxt` instead.
-
- Parameters
- ----------
- fname, kwargs : For a description of input parameters, see `genfromtxt`.
-
- See Also
- --------
- numpy.genfromtxt : generic function to load ASCII data.
-
- """
- kwargs['usemask'] = True
- # Numpy 1.17
- warnings.warn(
- "np.mafromtxt is a deprecated alias of np.genfromtxt, "
- "prefer the latter.",
- DeprecationWarning, stacklevel=2)
- return genfromtxt(fname, **kwargs)
-
-
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi
index 508357927..4841e9e71 100644
--- a/numpy/lib/npyio.pyi
+++ b/numpy/lib/npyio.pyi
@@ -1,104 +1,265 @@
-from typing import Mapping, List, Any
+import os
+import sys
+import zipfile
+import types
+from typing import (
+ Literal as L,
+ Any,
+ Mapping,
+ TypeVar,
+ Generic,
+ List,
+ Type,
+ Iterator,
+ Union,
+ IO,
+ overload,
+ Sequence,
+ Callable,
+ Pattern,
+ Protocol,
+ Iterable,
+)
from numpy import (
DataSource as DataSource,
+ ndarray,
+ recarray,
+ dtype,
+ generic,
+ float64,
+ void,
)
+from numpy.ma.mrecords import MaskedRecords
+from numpy.typing import ArrayLike, DTypeLike, NDArray, _SupportsDType
+
from numpy.core.multiarray import (
packbits as packbits,
unpackbits as unpackbits,
)
+_T = TypeVar("_T")
+_T_contra = TypeVar("_T_contra", contravariant=True)
+_T_co = TypeVar("_T_co", covariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True)
+_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True)
+
+_DTypeLike = Union[
+ Type[_SCT],
+ dtype[_SCT],
+ _SupportsDType[dtype[_SCT]],
+]
+
+class _SupportsGetItem(Protocol[_T_contra, _T_co]):
+ def __getitem__(self, key: _T_contra, /) -> _T_co: ...
+
+class _SupportsRead(Protocol[_CharType_co]):
+ def read(self) -> _CharType_co: ...
+
+class _SupportsReadSeek(Protocol[_CharType_co]):
+ def read(self, n: int, /) -> _CharType_co: ...
+ def seek(self, offset: int, whence: int, /) -> object: ...
+
+class _SupportsWrite(Protocol[_CharType_contra]):
+ def write(self, s: _CharType_contra, /) -> object: ...
+
__all__: List[str]
-def loads(*args, **kwargs): ...
-
-class BagObj:
- def __init__(self, obj): ...
- def __getattribute__(self, key): ...
- def __dir__(self): ...
-
-def zipfile_factory(file, *args, **kwargs): ...
-
-class NpzFile(Mapping[Any, Any]):
- zip: Any
- fid: Any
- files: Any
- allow_pickle: Any
- pickle_kwargs: Any
- f: Any
- def __init__(self, fid, own_fid=..., allow_pickle=..., pickle_kwargs=...): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- def close(self): ...
- def __del__(self): ...
- def __iter__(self): ...
- def __len__(self): ...
- def __getitem__(self, key): ...
- def iteritems(self): ...
- def iterkeys(self): ...
-
-def load(file, mmap_mode=..., allow_pickle=..., fix_imports=..., encoding=...): ...
-def save(file, arr, allow_pickle=..., fix_imports=...): ...
-def savez(file, *args, **kwds): ...
-def savez_compressed(file, *args, **kwds): ...
+class BagObj(Generic[_T_co]):
+ def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ...
+ def __getattribute__(self, key: str) -> _T_co: ...
+ def __dir__(self) -> List[str]: ...
+
+class NpzFile(Mapping[str, NDArray[Any]]):
+ zip: zipfile.ZipFile
+ fid: None | IO[str]
+ files: List[str]
+ allow_pickle: bool
+ pickle_kwargs: None | Mapping[str, Any]
+ # Represent `f` as a mutable property so we can access the type of `self`
+ @property
+ def f(self: _T) -> BagObj[_T]: ...
+ @f.setter
+ def f(self: _T, value: BagObj[_T]) -> None: ...
+ def __init__(
+ self,
+ fid: IO[str],
+ own_fid: bool = ...,
+ allow_pickle: bool = ...,
+ pickle_kwargs: None | Mapping[str, Any] = ...,
+ ) -> None: ...
+ def __enter__(self: _T) -> _T: ...
+ def __exit__(
+ self,
+ exc_type: None | Type[BaseException],
+ exc_value: None | BaseException,
+ traceback: None | types.TracebackType,
+ /,
+ ) -> None: ...
+ def close(self) -> None: ...
+ def __del__(self) -> None: ...
+ def __iter__(self) -> Iterator[str]: ...
+ def __len__(self) -> int: ...
+ def __getitem__(self, key: str) -> NDArray[Any]: ...
+
+# NOTE: Returns a `NpzFile` if file is a zip file;
+# returns an `ndarray`/`memmap` otherwise
+def load(
+ file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes],
+ mmap_mode: L[None, "r+", "r", "w+", "c"] = ...,
+ allow_pickle: bool = ...,
+ fix_imports: bool = ...,
+ encoding: L["ASCII", "latin1", "bytes"] = ...,
+) -> Any: ...
+
+def save(
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
+ arr: ArrayLike,
+ allow_pickle: bool = ...,
+ fix_imports: bool = ...,
+) -> None: ...
+
+def savez(
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
+ *args: ArrayLike,
+ **kwds: ArrayLike,
+) -> None: ...
+
+def savez_compressed(
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
+ *args: ArrayLike,
+ **kwds: ArrayLike,
+) -> None: ...
+
+# File-like objects only have to implement `__iter__` and,
+# optionally, `encoding`
+@overload
def loadtxt(
- fname,
- dtype=...,
- comments=...,
- delimiter=...,
- converters=...,
- skiprows=...,
- usecols=...,
- unpack=...,
- ndmin=...,
- encoding=...,
- max_rows=...,
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: None = ...,
+ comments: str | Sequence[str] = ...,
+ delimiter: None | str = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ skiprows: int = ...,
+ usecols: int | Sequence[int] = ...,
+ unpack: bool = ...,
+ ndmin: L[0, 1, 2] = ...,
+ encoding: None | str = ...,
+ max_rows: None | int = ...,
*,
- like=...,
-): ...
+ like: None | ArrayLike = ...
+) -> NDArray[float64]: ...
+@overload
+def loadtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: _DTypeLike[_SCT],
+ comments: str | Sequence[str] = ...,
+ delimiter: None | str = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ skiprows: int = ...,
+ usecols: int | Sequence[int] = ...,
+ unpack: bool = ...,
+ ndmin: L[0, 1, 2] = ...,
+ encoding: None | str = ...,
+ max_rows: None | int = ...,
+ *,
+ like: None | ArrayLike = ...
+) -> NDArray[_SCT]: ...
+@overload
+def loadtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: DTypeLike,
+ comments: str | Sequence[str] = ...,
+ delimiter: None | str = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ skiprows: int = ...,
+ usecols: int | Sequence[int] = ...,
+ unpack: bool = ...,
+ ndmin: L[0, 1, 2] = ...,
+ encoding: None | str = ...,
+ max_rows: None | int = ...,
+ *,
+ like: None | ArrayLike = ...
+) -> NDArray[Any]: ...
+
def savetxt(
- fname,
- X,
- fmt=...,
- delimiter=...,
- newline=...,
- header=...,
- footer=...,
- comments=...,
- encoding=...,
-): ...
-def fromregex(file, regexp, dtype, encoding=...): ...
+ fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes],
+ X: ArrayLike,
+ fmt: str | Sequence[str] = ...,
+ delimiter: str = ...,
+ newline: str = ...,
+ header: str = ...,
+ footer: str = ...,
+ comments: str = ...,
+ encoding: None | str = ...,
+) -> None: ...
+
+@overload
+def fromregex(
+ file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
+ regexp: str | bytes | Pattern[Any],
+ dtype: _DTypeLike[_SCT],
+ encoding: None | str = ...
+) -> NDArray[_SCT]: ...
+@overload
+def fromregex(
+ file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
+ regexp: str | bytes | Pattern[Any],
+ dtype: DTypeLike,
+ encoding: None | str = ...
+) -> NDArray[Any]: ...
+
+# TODO: Sort out arguments
+@overload
def genfromtxt(
- fname,
- dtype=...,
- comments=...,
- delimiter=...,
- skip_header=...,
- skip_footer=...,
- converters=...,
- missing_values=...,
- filling_values=...,
- usecols=...,
- names=...,
- excludelist=...,
- deletechars=...,
- replace_space=...,
- autostrip=...,
- case_sensitive=...,
- defaultfmt=...,
- unpack=...,
- usemask=...,
- loose=...,
- invalid_raise=...,
- max_rows=...,
- encoding=...,
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: None = ...,
+ *args: Any,
+ **kwargs: Any,
+) -> NDArray[float64]: ...
+@overload
+def genfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: _DTypeLike[_SCT],
+ *args: Any,
+ **kwargs: Any,
+) -> NDArray[_SCT]: ...
+@overload
+def genfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: DTypeLike,
+ *args: Any,
+ **kwargs: Any,
+) -> NDArray[Any]: ...
+
+@overload
+def recfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ *,
+ usemask: L[False] = ...,
+ **kwargs: Any,
+) -> recarray[Any, dtype[void]]: ...
+@overload
+def recfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ *,
+ usemask: L[True],
+ **kwargs: Any,
+) -> MaskedRecords[Any, dtype[void]]: ...
+
+@overload
+def recfromcsv(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ *,
+ usemask: L[False] = ...,
+ **kwargs: Any,
+) -> recarray[Any, dtype[void]]: ...
+@overload
+def recfromcsv(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
- like=...,
-): ...
-def recfromtxt(fname, **kwargs): ...
-def recfromcsv(fname, **kwargs): ...
-
-# NOTE: Deprecated
-# def ndfromtxt(fname, **kwargs): ...
-# def mafromtxt(fname, **kwargs): ...
+ usemask: L[True],
+ **kwargs: Any,
+) -> MaskedRecords[Any, dtype[void]]: ...
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 23021cafa..c40e50a57 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -152,9 +152,8 @@ def poly(seq_of_zeros):
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
- for k in range(len(seq_of_zeros)):
- a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
- mode='full')
+ for zero in seq_of_zeros:
+ a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
@@ -770,8 +769,8 @@ def polyval(p, x):
else:
x = NX.asanyarray(x)
y = NX.zeros_like(x)
- for i in range(len(p)):
- y = y * x + p[i]
+ for pv in p:
+ y = y * x + pv
return y
@@ -1273,14 +1272,14 @@ class poly1d:
s = s[:-5]
return s
- for k in range(len(coeffs)):
- if not iscomplex(coeffs[k]):
- coefstr = fmt_float(real(coeffs[k]))
- elif real(coeffs[k]) == 0:
- coefstr = '%sj' % fmt_float(imag(coeffs[k]))
+ for k, coeff in enumerate(coeffs):
+ if not iscomplex(coeff):
+ coefstr = fmt_float(real(coeff))
+ elif real(coeff) == 0:
+ coefstr = '%sj' % fmt_float(imag(coeff))
else:
- coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
- fmt_float(imag(coeffs[k])))
+ coefstr = '(%s + %sj)' % (fmt_float(real(coeff)),
+ fmt_float(imag(coeff)))
power = (N-k)
if power == 0:
diff --git a/numpy/lib/shape_base.pyi b/numpy/lib/shape_base.pyi
index cfb3040b7..1598dc36c 100644
--- a/numpy/lib/shape_base.pyi
+++ b/numpy/lib/shape_base.pyi
@@ -1,5 +1,4 @@
-from typing import List, TypeVar, Callable, Sequence, Any, overload, Tuple
-from typing_extensions import SupportsIndex, Protocol
+from typing import List, TypeVar, Callable, Sequence, Any, overload, Tuple, SupportsIndex, Protocol
from numpy import (
generic,
@@ -39,15 +38,17 @@ _ArrayLike = _NestedSequence[_SupportsDType[dtype[_SCT]]]
class _ArrayWrap(Protocol):
def __call__(
self,
- __array: NDArray[Any],
- __context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ array: NDArray[Any],
+ context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ /,
) -> Any: ...
class _ArrayPrepare(Protocol):
def __call__(
self,
- __array: NDArray[Any],
- __context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ array: NDArray[Any],
+ context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ /,
) -> Any: ...
class _SupportsArrayWrap(Protocol):
diff --git a/numpy/lib/stride_tricks.pyi b/numpy/lib/stride_tricks.pyi
index d2e744b5a..bafc46e9c 100644
--- a/numpy/lib/stride_tricks.pyi
+++ b/numpy/lib/stride_tricks.pyi
@@ -1,16 +1,81 @@
-from typing import Any, List
+from typing import Any, List, Dict, Iterable, TypeVar, overload, SupportsIndex
-from numpy.typing import _ShapeLike, _Shape
+from numpy import dtype, generic
+from numpy.typing import (
+ NDArray,
+ ArrayLike,
+ _ShapeLike,
+ _Shape,
+ _NestedSequence,
+ _SupportsArray,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]]
__all__: List[str]
class DummyArray:
- __array_interface__: Any
- base: Any
- def __init__(self, interface, base=...): ...
+ __array_interface__: Dict[str, Any]
+ base: None | NDArray[Any]
+ def __init__(
+ self,
+ interface: Dict[str, Any],
+ base: None | NDArray[Any] = ...,
+ ) -> None: ...
+
+@overload
+def as_strided(
+ x: _ArrayLike[_SCT],
+ shape: None | Iterable[int] = ...,
+ strides: None | Iterable[int] = ...,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def as_strided(
+ x: ArrayLike,
+ shape: None | Iterable[int] = ...,
+ strides: None | Iterable[int] = ...,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def sliding_window_view(
+ x: _ArrayLike[_SCT],
+ window_shape: int | Iterable[int],
+ axis: None | SupportsIndex = ...,
+ *,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def sliding_window_view(
+ x: ArrayLike,
+ window_shape: int | Iterable[int],
+ axis: None | SupportsIndex = ...,
+ *,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def broadcast_to(
+ array: _ArrayLike[_SCT],
+ shape: int | Iterable[int],
+ subok: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def broadcast_to(
+ array: ArrayLike,
+ shape: int | Iterable[int],
+ subok: bool = ...,
+) -> NDArray[Any]: ...
-def as_strided(x, shape=..., strides=..., subok=..., writeable=...): ...
-def sliding_window_view(x, window_shape, axis=..., *, subok=..., writeable=...): ...
-def broadcast_to(array, shape, subok=...): ...
def broadcast_shapes(*args: _ShapeLike) -> _Shape: ...
-def broadcast_arrays(*args, subok=...): ...
+
+def broadcast_arrays(
+ *args: ArrayLike,
+ subok: bool = ...,
+) -> List[NDArray[Any]]: ...
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index 1ed7815d9..2738d41c4 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -102,10 +102,10 @@ class TestDataSourceOpen:
def test_InvalidHTTP(self):
url = invalid_httpurl()
- assert_raises(IOError, self.ds.open, url)
+ assert_raises(OSError, self.ds.open, url)
try:
self.ds.open(url)
- except IOError as e:
+ except OSError as e:
# Regression test for bug fixed in r4342.
assert_(e.errno is None)
@@ -120,7 +120,7 @@ class TestDataSourceOpen:
def test_InvalidFile(self):
invalid_file = invalid_textfile(self.tmpdir)
- assert_raises(IOError, self.ds.open, invalid_file)
+ assert_raises(OSError, self.ds.open, invalid_file)
def test_ValidGzipFile(self):
try:
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index e1b615223..829691b1c 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1528,6 +1528,21 @@ class TestVectorize:
([('x',)], [('y',), ()]))
assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'),
([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
+
+ # Tests to check if whitespaces are ignored
+ assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'),
+ ([('x', 'y')], [()]))
+ assert_equal(nfb._parse_gufunc_signature('(x),( y) ->()'),
+ ([('x',), ('y',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('( x)-> (y ) '),
+ ([('x',)], [('y',)]))
+ assert_equal(nfb._parse_gufunc_signature(' (x)->( y),( )'),
+ ([('x',)], [('y',), ()]))
+ assert_equal(nfb._parse_gufunc_signature(
+ '( ), ( a, b,c ) ,( d) -> (d , e)'),
+ ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
+
with assert_raises(ValueError):
nfb._parse_gufunc_signature('(x)(y)->()')
with assert_raises(ValueError):
@@ -2757,11 +2772,6 @@ class TestInterp:
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
-def compare_results(res, desired):
- for i in range(len(desired)):
- assert_array_equal(res[i], desired[i])
-
-
class TestPercentile:
def test_basic(self):
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index d97ad76df..11f2b7d4d 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -1229,9 +1229,11 @@ class Testfromregex:
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
- def test_record_unicode(self):
+ @pytest.mark.parametrize("path_type", [str, Path])
+ def test_record_unicode(self, path_type):
utf8 = b'\xcf\x96'
- with temppath() as path:
+ with temppath() as str_path:
+ path = path_type(str_path)
with open(path, 'wb') as f:
f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
@@ -2503,28 +2505,6 @@ class TestPathUsage:
data = np.genfromtxt(path)
assert_array_equal(a, data)
- def test_ndfromtxt(self):
- # Test outputting a standard ndarray
- with temppath(suffix='.txt') as path:
- path = Path(path)
- with path.open('w') as f:
- f.write(u'1 2\n3 4')
-
- control = np.array([[1, 2], [3, 4]], dtype=int)
- test = np.genfromtxt(path, dtype=int)
- assert_array_equal(test, control)
-
- def test_mafromtxt(self):
- # From `test_fancy_dtype_alt` above
- with temppath(suffix='.txt') as path:
- path = Path(path)
- with path.open('w') as f:
- f.write(u'1,2,3.0\n4,5,6.0\n')
-
- test = np.genfromtxt(path, delimiter=',', usemask=True)
- control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
- assert_equal(test, control)
-
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index fb7ba7874..a148e53da 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -392,7 +392,7 @@ class TestArraySplit:
assert_(a.dtype.type is res[-1].dtype.type)
# Same thing for manual splits:
- res = array_split(a, [0, 1, 2], axis=0)
+ res = array_split(a, [0, 1], axis=0)
tgt = [np.zeros((0, 10)), np.array([np.arange(10)]),
np.array([np.arange(10)])]
compare_results(res, tgt)
@@ -713,5 +713,9 @@ class TestMayShareMemory:
# Utility
def compare_results(res, desired):
- for i in range(len(desired)):
- assert_array_equal(res[i], desired[i])
+ """Compare lists of arrays."""
+ if len(res) != len(desired):
+ raise ValueError("Iterables have different lengths")
+ # See also PEP 618 for Python 3.10
+ for x, y in zip(res, desired):
+ assert_array_equal(x, y)
diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py
index 8a877ae69..72c91836f 100644
--- a/numpy/lib/tests/test_utils.py
+++ b/numpy/lib/tests/test_utils.py
@@ -11,6 +11,10 @@ from io import StringIO
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
+@pytest.mark.skipif(
+ sys.version_info == (3, 10, 0, "candidate", 1),
+ reason="Broken as of bpo-44524",
+)
def test_lookfor():
out = StringIO()
utils.lookfor('eigenvalue', module='numpy', output=out,
@@ -160,7 +164,7 @@ def test_info_method_heading():
class WithPublicMethods:
def first_method():
pass
-
+
def _has_method_heading(cls):
out = StringIO()
utils.info(cls, output=out)
diff --git a/numpy/lib/type_check.pyi b/numpy/lib/type_check.pyi
index fbe325858..5eb0e62d2 100644
--- a/numpy/lib/type_check.pyi
+++ b/numpy/lib/type_check.pyi
@@ -1,5 +1,5 @@
-import sys
from typing import (
+ Literal as L,
Any,
Container,
Iterable,
@@ -7,6 +7,7 @@ from typing import (
overload,
Type,
TypeVar,
+ Protocol,
)
from numpy import (
@@ -32,11 +33,6 @@ from numpy.typing import (
_DTypeLikeComplex,
)
-if sys.version_info >= (3, 8):
- from typing import Protocol, Literal as L
-else:
- from typing_extensions import Protocol, Literal as L
-
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
_SCT = TypeVar("_SCT", bound=generic)
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index b1a916d4a..1f2cb66fa 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -351,8 +351,7 @@ def who(vardict=None):
maxshape = 0
maxbyte = 0
totalbytes = 0
- for k in range(len(sta)):
- val = sta[k]
+ for val in sta:
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
@@ -369,8 +368,7 @@ def who(vardict=None):
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print(prval + "\n" + "="*(len(prval)+5) + "\n")
- for k in range(len(sta)):
- val = sta[k]
+ for val in sta:
print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
diff --git a/numpy/lib/utils.pyi b/numpy/lib/utils.pyi
index 0518655c6..f0a8797ad 100644
--- a/numpy/lib/utils.pyi
+++ b/numpy/lib/utils.pyi
@@ -1,4 +1,3 @@
-import sys
from ast import AST
from typing import (
Any,
@@ -11,6 +10,7 @@ from typing import (
Tuple,
TypeVar,
Union,
+ Protocol,
)
from numpy import ndarray, generic
@@ -21,17 +21,12 @@ from numpy.core.numerictypes import (
issubsctype as issubsctype,
)
-if sys.version_info >= (3, 8):
- from typing import Protocol
-else:
- from typing_extensions import Protocol
-
_T_contra = TypeVar("_T_contra", contravariant=True)
_FuncType = TypeVar("_FuncType", bound=Callable[..., Any])
# A file-like object opened in `w` mode
class _SupportsWrite(Protocol[_T_contra]):
- def write(self, __s: _T_contra) -> Any: ...
+ def write(self, s: _T_contra, /) -> Any: ...
__all__: List[str]
@@ -60,7 +55,8 @@ def deprecate(
) -> _Deprecate: ...
@overload
def deprecate(
- __func: _FuncType,
+ func: _FuncType,
+ /,
old_name: Optional[str] = ...,
new_name: Optional[str] = ...,
message: Optional[str] = ...,
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index d2150919f..b2ac383a2 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -3374,8 +3374,12 @@ class MaskedArray(ndarray):
_mask[indx] = mval
elif not self._hardmask:
# Set the data, then the mask
- _data[indx] = dval
- _mask[indx] = mval
+ if (isinstance(indx, masked_array) and
+ not isinstance(value, masked_array)):
+ _data[indx.data] = dval
+ else:
+ _data[indx] = dval
+ _mask[indx] = mval
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
_data[indx] = dval
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index 6814931b0..10b1b209c 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -658,8 +658,8 @@ def openfile(fname):
# Try to open the file and guess its type
try:
f = open(fname)
- except IOError as e:
- raise IOError(f"No such file: '{fname}'") from e
+ except FileNotFoundError as e:
+ raise FileNotFoundError(f"No such file: '{fname}'") from e
if f.readline()[:2] != "\\x":
f.seek(0, 0)
return f
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index ab003b94e..2b3034f9c 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -697,6 +697,22 @@ class TestMa:
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
+ def test_assignment_by_condition(self):
+ # Test for gh-18951
+ a = array([1, 2, 3, 4], mask=[1, 0, 1, 0])
+ c = a >= 3
+ a[c] = 5
+ assert_(a[2] is masked)
+
+ def test_assignment_by_condition_2(self):
+ # gh-19721
+ a = masked_array([0, 1], mask=[False, False])
+ b = masked_array([0, 1], mask=[True, True])
+ mask = a < 1
+ b[mask] = a[mask]
+ expected_mask = [False, True]
+ assert_equal(b.mask, expected_mask)
+
class TestUfuncs:
def setup(self):
diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi
index 14dc55131..64b683d7c 100644
--- a/numpy/random/_generator.pyi
+++ b/numpy/random/_generator.pyi
@@ -1,5 +1,4 @@
-import sys
-from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload, TypeVar
+from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload, TypeVar, Literal
from numpy import (
bool_,
@@ -44,11 +43,6 @@ from numpy.typing import (
_UIntCodes,
)
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
_DTypeLikeFloat32 = Union[
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index e2430d139..60b6bfc72 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -561,7 +561,7 @@ cdef class Generator:
raise TypeError('Unsupported dtype %r for integers' % _dtype)
- if size is None and dtype in (bool, int, np.compat.long):
+ if size is None and dtype in (bool, int):
if np.array(ret).shape == ():
return dtype(ret)
return ret
diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi
index 1b8bacdae..820f27392 100644
--- a/numpy/random/_mt19937.pyi
+++ b/numpy/random/_mt19937.pyi
@@ -1,15 +1,9 @@
-import sys
-from typing import Any, Union
+from typing import Any, Union, TypedDict
from numpy import dtype, ndarray, uint32
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy.typing import _ArrayLikeInt_co
-if sys.version_info >= (3, 8):
- from typing import TypedDict
-else:
- from typing_extensions import TypedDict
-
class _MT19937Internal(TypedDict):
key: ndarray[Any, dtype[uint32]]
pos: int
diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi
index 25e2fdde6..4881a987e 100644
--- a/numpy/random/_pcg64.pyi
+++ b/numpy/random/_pcg64.pyi
@@ -1,14 +1,8 @@
-import sys
-from typing import Union
+from typing import Union, TypedDict
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy.typing import _ArrayLikeInt_co
-if sys.version_info >= (3, 8):
- from typing import TypedDict
-else:
- from typing_extensions import TypedDict
-
class _PCG64Internal(TypedDict):
state: int
inc: int
diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi
index f6a5b9b9b..dd1c5e6e9 100644
--- a/numpy/random/_philox.pyi
+++ b/numpy/random/_philox.pyi
@@ -1,15 +1,9 @@
-import sys
-from typing import Any, Union
+from typing import Any, Union, TypedDict
from numpy import dtype, ndarray, uint64
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy.typing import _ArrayLikeInt_co
-if sys.version_info >= (3, 8):
- from typing import TypedDict
-else:
- from typing_extensions import TypedDict
-
class _PhiloxInternal(TypedDict):
counter: ndarray[Any, dtype[uint64]]
key: ndarray[Any, dtype[uint64]]
diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi
index 72a271c92..94d11a210 100644
--- a/numpy/random/_sfc64.pyi
+++ b/numpy/random/_sfc64.pyi
@@ -1,5 +1,4 @@
-import sys
-from typing import Any, Union
+from typing import Any, Union, TypedDict
from numpy import dtype as dtype
from numpy import ndarray as ndarray
@@ -7,11 +6,6 @@ from numpy import uint64
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy.typing import _ArrayLikeInt_co
-if sys.version_info >= (3, 8):
- from typing import TypedDict
-else:
- from typing_extensions import TypedDict
-
class _SFC64Internal(TypedDict):
state: ndarray[Any, dtype[uint64]]
diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi
index 5b68dde6c..fa2f1ab12 100644
--- a/numpy/random/bit_generator.pyi
+++ b/numpy/random/bit_generator.pyi
@@ -1,5 +1,4 @@
import abc
-import sys
from threading import Lock
from typing import (
Any,
@@ -16,16 +15,12 @@ from typing import (
TypeVar,
Union,
overload,
+ Literal,
)
from numpy import dtype, ndarray, uint32, uint64
from numpy.typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt32Codes, _UInt64Codes
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_T = TypeVar("_T")
_DTypeLikeUint32 = Union[
diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi
index 3137b0a95..cbe87a299 100644
--- a/numpy/random/mtrand.pyi
+++ b/numpy/random/mtrand.pyi
@@ -1,5 +1,4 @@
-import sys
-from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload
+from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload, Literal
from numpy import (
bool_,
@@ -44,11 +43,6 @@ from numpy.typing import (
_UIntCodes,
)
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_DTypeLikeFloat32 = Union[
dtype[float32],
_SupportsDType[dtype[float32]],
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index 4f5862faa..c9d8ee8e3 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -763,7 +763,7 @@ cdef class RandomState:
else:
raise TypeError('Unsupported dtype %r for randint' % _dtype)
- if size is None and dtype in (bool, int, np.compat.long):
+ if size is None and dtype in (bool, int):
if np.array(ret).shape == ():
return dtype(ret)
return ret
diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py
index 9f6dcdc6b..88d2792a6 100644
--- a/numpy/random/tests/test_generator_mt19937_regressions.py
+++ b/numpy/random/tests/test_generator_mt19937_regressions.py
@@ -32,11 +32,11 @@ class TestRegression:
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
- freq = np.sum(rvsn == 1) / float(N)
+ freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
- freq = np.sum(rvsn == 2) / float(N)
+ freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py
index 0bf361e5e..595fb5fd3 100644
--- a/numpy/random/tests/test_randomstate_regression.py
+++ b/numpy/random/tests/test_randomstate_regression.py
@@ -43,11 +43,11 @@ class TestRegression:
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
- freq = np.sum(rvsn == 1) / float(N)
+ freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
- freq = np.sum(rvsn == 2) / float(N)
+ freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py
index 54d5a3efb..8bf419875 100644
--- a/numpy/random/tests/test_regression.py
+++ b/numpy/random/tests/test_regression.py
@@ -39,11 +39,11 @@ class TestRegression:
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
- freq = np.sum(rvsn == 1) / float(N)
+ freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
- freq = np.sum(rvsn == 2) / float(N)
+ freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
diff --git a/numpy/setup.py b/numpy/setup.py
index cbf633504..a0ca99919 100644
--- a/numpy/setup.py
+++ b/numpy/setup.py
@@ -4,6 +4,7 @@ def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('numpy', parent_package, top_path)
+ config.add_subpackage('array_api')
config.add_subpackage('compat')
config.add_subpackage('core')
config.add_subpackage('distutils')
diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi
index 29915309f..26ce52e40 100644
--- a/numpy/testing/_private/utils.pyi
+++ b/numpy/testing/_private/utils.pyi
@@ -6,6 +6,7 @@ import warnings
import unittest
import contextlib
from typing import (
+ Literal as L,
Any,
AnyStr,
Callable,
@@ -23,6 +24,8 @@ from typing import (
type_check_only,
TypeVar,
Union,
+ Final,
+ SupportsIndex,
)
from numpy import generic, dtype, number, object_, bool_, _FloatValue
@@ -40,11 +43,6 @@ from unittest.case import (
SkipTest as SkipTest,
)
-if sys.version_info >= (3, 8):
- from typing import Final, SupportsIndex, Literal as L
-else:
- from typing_extensions import Final, SupportsIndex, Literal as L
-
_T = TypeVar("_T")
_ET = TypeVar("_ET", bound=BaseException)
_FT = TypeVar("_FT", bound=Callable[..., Any])
@@ -261,8 +259,9 @@ def raises(*args: Type[BaseException]) -> Callable[[_FT], _FT]: ...
@overload
def assert_raises( # type: ignore
- __expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...],
- __callable: Callable[..., Any],
+ expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...],
+ callable: Callable[..., Any],
+ /,
*args: Any,
**kwargs: Any,
) -> None: ...
@@ -275,9 +274,10 @@ def assert_raises(
@overload
def assert_raises_regex(
- __expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...],
- __expected_regex: str | bytes | Pattern[Any],
- __callable: Callable[..., Any],
+ expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...],
+ expected_regex: str | bytes | Pattern[Any],
+ callable: Callable[..., Any],
+ /,
*args: Any,
**kwargs: Any,
) -> None: ...
@@ -341,8 +341,9 @@ def assert_warns(
) -> contextlib._GeneratorContextManager[None]: ...
@overload
def assert_warns(
- __warning_class: Type[Warning],
- __func: Callable[..., _T],
+ warning_class: Type[Warning],
+ func: Callable[..., _T],
+ /,
*args: Any,
**kwargs: Any,
) -> _T: ...
@@ -351,7 +352,8 @@ def assert_warns(
def assert_no_warnings() -> contextlib._GeneratorContextManager[None]: ...
@overload
def assert_no_warnings(
- __func: Callable[..., _T],
+ func: Callable[..., _T],
+ /,
*args: Any,
**kwargs: Any,
) -> _T: ...
@@ -388,7 +390,8 @@ def temppath(
def assert_no_gc_cycles() -> contextlib._GeneratorContextManager[None]: ...
@overload
def assert_no_gc_cycles(
- __func: Callable[..., Any],
+ func: Callable[..., Any],
+ /,
*args: Any,
**kwargs: Any,
) -> None: ...
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index 6e4a8dee0..5b1578500 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -45,8 +45,6 @@ def test_numpy_namespace():
'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose',
'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap',
'get_include': 'numpy.lib.utils.get_include',
- 'mafromtxt': 'numpy.lib.npyio.mafromtxt',
- 'ndfromtxt': 'numpy.lib.npyio.ndfromtxt',
'recfromcsv': 'numpy.lib.npyio.recfromcsv',
'recfromtxt': 'numpy.lib.npyio.recfromtxt',
'safe_eval': 'numpy.lib.utils.safe_eval',
@@ -54,22 +52,8 @@ def test_numpy_namespace():
'show_config': 'numpy.__config__.show',
'who': 'numpy.lib.utils.who',
}
- if sys.version_info < (3, 7):
- # These built-in types are re-exported by numpy.
- builtins = {
- 'bool': 'builtins.bool',
- 'complex': 'builtins.complex',
- 'float': 'builtins.float',
- 'int': 'builtins.int',
- 'long': 'builtins.int',
- 'object': 'builtins.object',
- 'str': 'builtins.str',
- 'unicode': 'builtins.str',
- }
- allowlist = dict(undocumented, **builtins)
- else:
- # after 3.7, we override dir to not show these members
- allowlist = undocumented
+ # We override dir to not show these members
+ allowlist = undocumented
bad_results = check_dir(np)
# pytest gives better error messages with the builtin assert than with
# assert_equal
@@ -137,6 +121,7 @@ def test_NPY_NO_EXPORT():
# current status is fine. For others it may make sense to work on making them
# private, to clean up our public API and avoid confusion.
PUBLIC_MODULES = ['numpy.' + s for s in [
+ "array_api",
"ctypeslib",
"distutils",
"distutils.cpuinfo",
diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py
index d731f00ef..d60ddb5bb 100644
--- a/numpy/typing/__init__.py
+++ b/numpy/typing/__init__.py
@@ -5,13 +5,6 @@ Typing (:mod:`numpy.typing`)
.. versionadded:: 1.20
-.. warning::
-
- Some of the types in this module rely on features only present in
- the standard library in Python 3.8 and greater. If you want to use
- these types in earlier versions of Python, you should install the
- typing-extensions_ package.
-
Large parts of the NumPy API have PEP-484-style type annotations. In
addition a number of type aliases are available to users, most prominently
the two below:
@@ -143,24 +136,7 @@ API
# NOTE: The API section will be appended with additional entries
# further down in this file
-from typing import TYPE_CHECKING, List, Any
-
-if TYPE_CHECKING:
- # typing_extensions is always available when type-checking
- from typing_extensions import Literal as L
- _HAS_TYPING_EXTENSIONS: L[True]
-else:
- try:
- import typing_extensions
- except ImportError:
- _HAS_TYPING_EXTENSIONS = False
- else:
- _HAS_TYPING_EXTENSIONS = True
-
-if TYPE_CHECKING:
- from typing_extensions import final
-else:
- def final(f): return f
+from typing import TYPE_CHECKING, List, Any, final
if not TYPE_CHECKING:
__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"]
diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py
index c562f3c1f..6ea0eb662 100644
--- a/numpy/typing/_array_like.py
+++ b/numpy/typing/_array_like.py
@@ -1,7 +1,6 @@
from __future__ import annotations
-import sys
-from typing import Any, Sequence, TYPE_CHECKING, Union, TypeVar, Generic
+from typing import Any, Sequence, Protocol, Union, TypeVar
from numpy import (
ndarray,
dtype,
@@ -19,28 +18,19 @@ from numpy import (
str_,
bytes_,
)
-from . import _HAS_TYPING_EXTENSIONS
-
-if sys.version_info >= (3, 8):
- from typing import Protocol
-elif _HAS_TYPING_EXTENSIONS:
- from typing_extensions import Protocol
_T = TypeVar("_T")
_ScalarType = TypeVar("_ScalarType", bound=generic)
_DType = TypeVar("_DType", bound="dtype[Any]")
_DType_co = TypeVar("_DType_co", covariant=True, bound="dtype[Any]")
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
- # The `_SupportsArray` protocol only cares about the default dtype
- # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned
- # array.
- # Concrete implementations of the protocol are responsible for adding
- # any and all remaining overloads
- class _SupportsArray(Protocol[_DType_co]):
- def __array__(self) -> ndarray[Any, _DType_co]: ...
-else:
- class _SupportsArray(Generic[_DType_co]): ...
+# The `_SupportsArray` protocol only cares about the default dtype
+# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned
+# array.
+# Concrete implementations of the protocol are responsible for adding
+# any and all remaining overloads
+class _SupportsArray(Protocol[_DType_co]):
+ def __array__(self) -> ndarray[Any, _DType_co]: ...
# TODO: Wait for support for recursive types
_NestedSequence = Union[
diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py
index 8f911da3b..44ad5c291 100644
--- a/numpy/typing/_callable.py
+++ b/numpy/typing/_callable.py
@@ -10,7 +10,6 @@ See the `Mypy documentation`_ on protocols for more details.
from __future__ import annotations
-import sys
from typing import (
Union,
TypeVar,
@@ -18,7 +17,7 @@ from typing import (
Any,
Tuple,
NoReturn,
- TYPE_CHECKING,
+ Protocol,
)
from numpy import (
@@ -45,312 +44,282 @@ from ._scalars import (
_FloatLike_co,
_NumberLike_co,
)
-from . import NBitBase, _HAS_TYPING_EXTENSIONS
+from . import NBitBase
from ._generic_alias import NDArray
-if sys.version_info >= (3, 8):
- from typing import Protocol
-elif _HAS_TYPING_EXTENSIONS:
- from typing_extensions import Protocol
-
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
- _T1 = TypeVar("_T1")
- _T2 = TypeVar("_T2")
- _2Tuple = Tuple[_T1, _T1]
-
- _NBit1 = TypeVar("_NBit1", bound=NBitBase)
- _NBit2 = TypeVar("_NBit2", bound=NBitBase)
-
- _IntType = TypeVar("_IntType", bound=integer)
- _FloatType = TypeVar("_FloatType", bound=floating)
- _NumberType = TypeVar("_NumberType", bound=number)
- _NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
- _GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
-
- class _BoolOp(Protocol[_GenericType_co]):
- @overload
- def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ...
- @overload # platform dependent
- def __call__(self, __other: int) -> int_: ...
- @overload
- def __call__(self, __other: float) -> float64: ...
- @overload
- def __call__(self, __other: complex) -> complex128: ...
- @overload
- def __call__(self, __other: _NumberType) -> _NumberType: ...
-
- class _BoolBitOp(Protocol[_GenericType_co]):
- @overload
- def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ...
- @overload # platform dependent
- def __call__(self, __other: int) -> int_: ...
- @overload
- def __call__(self, __other: _IntType) -> _IntType: ...
-
- class _BoolSub(Protocol):
- # Note that `__other: bool_` is absent here
- @overload
- def __call__(self, __other: bool) -> NoReturn: ...
- @overload # platform dependent
- def __call__(self, __other: int) -> int_: ...
- @overload
- def __call__(self, __other: float) -> float64: ...
- @overload
- def __call__(self, __other: complex) -> complex128: ...
- @overload
- def __call__(self, __other: _NumberType) -> _NumberType: ...
-
- class _BoolTrueDiv(Protocol):
- @overload
- def __call__(self, __other: float | _IntLike_co) -> float64: ...
- @overload
- def __call__(self, __other: complex) -> complex128: ...
- @overload
- def __call__(self, __other: _NumberType) -> _NumberType: ...
-
- class _BoolMod(Protocol):
- @overload
- def __call__(self, __other: _BoolLike_co) -> int8: ...
- @overload # platform dependent
- def __call__(self, __other: int) -> int_: ...
- @overload
- def __call__(self, __other: float) -> float64: ...
- @overload
- def __call__(self, __other: _IntType) -> _IntType: ...
- @overload
- def __call__(self, __other: _FloatType) -> _FloatType: ...
-
- class _BoolDivMod(Protocol):
- @overload
- def __call__(self, __other: _BoolLike_co) -> _2Tuple[int8]: ...
- @overload # platform dependent
- def __call__(self, __other: int) -> _2Tuple[int_]: ...
- @overload
- def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
- @overload
- def __call__(self, __other: _IntType) -> _2Tuple[_IntType]: ...
- @overload
- def __call__(self, __other: _FloatType) -> _2Tuple[_FloatType]: ...
-
- class _TD64Div(Protocol[_NumberType_co]):
- @overload
- def __call__(self, __other: timedelta64) -> _NumberType_co: ...
- @overload
- def __call__(self, __other: _BoolLike_co) -> NoReturn: ...
- @overload
- def __call__(self, __other: _FloatLike_co) -> timedelta64: ...
-
- class _IntTrueDiv(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> floating[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> floating[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: complex
- ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
- @overload
- def __call__(self, __other: integer[_NBit2]) -> floating[_NBit1 | _NBit2]: ...
-
- class _UnsignedIntOp(Protocol[_NBit1]):
- # NOTE: `uint64 + signedinteger -> float64`
- @overload
- def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
- @overload
- def __call__(
- self, __other: int | signedinteger[Any]
- ) -> Any: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: complex
- ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: unsignedinteger[_NBit2]
- ) -> unsignedinteger[_NBit1 | _NBit2]: ...
-
- class _UnsignedIntBitOp(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> signedinteger[Any]: ...
- @overload
- def __call__(self, __other: signedinteger[Any]) -> signedinteger[Any]: ...
- @overload
- def __call__(
- self, __other: unsignedinteger[_NBit2]
- ) -> unsignedinteger[_NBit1 | _NBit2]: ...
-
- class _UnsignedIntMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
- @overload
- def __call__(
- self, __other: int | signedinteger[Any]
- ) -> Any: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: unsignedinteger[_NBit2]
- ) -> unsignedinteger[_NBit1 | _NBit2]: ...
-
- class _UnsignedIntDivMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ...
- @overload
- def __call__(
- self, __other: int | signedinteger[Any]
- ) -> _2Tuple[Any]: ...
- @overload
- def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
- @overload
- def __call__(
- self, __other: unsignedinteger[_NBit2]
- ) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ...
-
- class _SignedIntOp(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> signedinteger[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: complex
- ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: signedinteger[_NBit2]
- ) -> signedinteger[_NBit1 | _NBit2]: ...
-
- class _SignedIntBitOp(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> signedinteger[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(
- self, __other: signedinteger[_NBit2]
- ) -> signedinteger[_NBit1 | _NBit2]: ...
-
- class _SignedIntMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> signedinteger[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: signedinteger[_NBit2]
- ) -> signedinteger[_NBit1 | _NBit2]: ...
-
- class _SignedIntDivMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ...
- @overload
- def __call__(self, __other: int) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ...
- @overload
- def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
- @overload
- def __call__(
- self, __other: signedinteger[_NBit2]
- ) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ...
-
- class _FloatOp(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> floating[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> floating[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: complex
- ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: integer[_NBit2] | floating[_NBit2]
- ) -> floating[_NBit1 | _NBit2]: ...
-
- class _FloatMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> floating[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> floating[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: integer[_NBit2] | floating[_NBit2]
- ) -> floating[_NBit1 | _NBit2]: ...
-
- class _FloatDivMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> _2Tuple[floating[_NBit1]]: ...
- @overload
- def __call__(self, __other: int) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ...
- @overload
- def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
- @overload
- def __call__(
- self, __other: integer[_NBit2] | floating[_NBit2]
- ) -> _2Tuple[floating[_NBit1 | _NBit2]]: ...
-
- class _ComplexOp(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> complexfloating[_NBit1, _NBit1]: ...
- @overload
- def __call__(self, __other: int) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ...
- @overload
- def __call__(
- self, __other: complex
- ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self,
- __other: Union[
- integer[_NBit2],
- floating[_NBit2],
- complexfloating[_NBit2, _NBit2],
- ]
- ) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ...
-
- class _NumberOp(Protocol):
- def __call__(self, __other: _NumberLike_co) -> Any: ...
-
- class _ComparisonOp(Protocol[_T1, _T2]):
- @overload
- def __call__(self, __other: _T1) -> bool_: ...
- @overload
- def __call__(self, __other: _T2) -> NDArray[bool_]: ...
-
-else:
- _BoolOp = Any
- _BoolBitOp = Any
- _BoolSub = Any
- _BoolTrueDiv = Any
- _BoolMod = Any
- _BoolDivMod = Any
- _TD64Div = Any
- _IntTrueDiv = Any
- _UnsignedIntOp = Any
- _UnsignedIntBitOp = Any
- _UnsignedIntMod = Any
- _UnsignedIntDivMod = Any
- _SignedIntOp = Any
- _SignedIntBitOp = Any
- _SignedIntMod = Any
- _SignedIntDivMod = Any
- _FloatOp = Any
- _FloatMod = Any
- _FloatDivMod = Any
- _ComplexOp = Any
- _NumberOp = Any
- _ComparisonOp = Any
+_T1 = TypeVar("_T1")
+_T2 = TypeVar("_T2")
+_2Tuple = Tuple[_T1, _T1]
+
+_NBit1 = TypeVar("_NBit1", bound=NBitBase)
+_NBit2 = TypeVar("_NBit2", bound=NBitBase)
+
+_IntType = TypeVar("_IntType", bound=integer)
+_FloatType = TypeVar("_FloatType", bound=floating)
+_NumberType = TypeVar("_NumberType", bound=number)
+_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
+_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
+
+class _BoolOp(Protocol[_GenericType_co]):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: float, /) -> float64: ...
+ @overload
+ def __call__(self, other: complex, /) -> complex128: ...
+ @overload
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
+
+class _BoolBitOp(Protocol[_GenericType_co]):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: _IntType, /) -> _IntType: ...
+
+class _BoolSub(Protocol):
+ # Note that `other: bool_` is absent here
+ @overload
+ def __call__(self, other: bool, /) -> NoReturn: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: float, /) -> float64: ...
+ @overload
+ def __call__(self, other: complex, /) -> complex128: ...
+ @overload
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
+
+class _BoolTrueDiv(Protocol):
+ @overload
+ def __call__(self, other: float | _IntLike_co, /) -> float64: ...
+ @overload
+ def __call__(self, other: complex, /) -> complex128: ...
+ @overload
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
+
+class _BoolMod(Protocol):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> int8: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: float, /) -> float64: ...
+ @overload
+ def __call__(self, other: _IntType, /) -> _IntType: ...
+ @overload
+ def __call__(self, other: _FloatType, /) -> _FloatType: ...
+
+class _BoolDivMod(Protocol):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> _2Tuple[int_]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ...
+ @overload
+ def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ...
+
+class _TD64Div(Protocol[_NumberType_co]):
+ @overload
+ def __call__(self, other: timedelta64, /) -> _NumberType_co: ...
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> NoReturn: ...
+ @overload
+ def __call__(self, other: _FloatLike_co, /) -> timedelta64: ...
+
+class _IntTrueDiv(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(self, other: integer[_NBit2], /) -> floating[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntOp(Protocol[_NBit1]):
+ # NOTE: `uint64 + signedinteger -> float64`
+ @overload
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
+ @overload
+ def __call__(
+ self, other: int | signedinteger[Any], /
+ ) -> Any: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntBitOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[Any]: ...
+ @overload
+ def __call__(self, other: signedinteger[Any], /) -> signedinteger[Any]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
+ @overload
+ def __call__(
+ self, other: int | signedinteger[Any], /
+ ) -> Any: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntDivMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
+ @overload
+ def __call__(
+ self, other: int | signedinteger[Any], /
+ ) -> _2Tuple[Any]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ...
+
+class _SignedIntOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
+
+class _SignedIntBitOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
+
+class _SignedIntMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
+
+class _SignedIntDivMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
+ @overload
+ def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ...
+
+class _FloatOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: integer[_NBit2] | floating[_NBit2], /
+ ) -> floating[_NBit1 | _NBit2]: ...
+
+class _FloatMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: integer[_NBit2] | floating[_NBit2], /
+ ) -> floating[_NBit1 | _NBit2]: ...
+
+class _FloatDivMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ...
+ @overload
+ def __call__(self, other: int, /) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(
+ self, other: integer[_NBit2] | floating[_NBit2], /
+ ) -> _2Tuple[floating[_NBit1 | _NBit2]]: ...
+
+class _ComplexOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> complexfloating[_NBit1, _NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self,
+ other: Union[
+ integer[_NBit2],
+ floating[_NBit2],
+ complexfloating[_NBit2, _NBit2],
+ ], /,
+ ) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ...
+
+class _NumberOp(Protocol):
+ def __call__(self, other: _NumberLike_co, /) -> Any: ...
+
+class _ComparisonOp(Protocol[_T1, _T2]):
+ @overload
+ def __call__(self, other: _T1, /) -> bool_: ...
+ @overload
+ def __call__(self, other: _T2, /) -> NDArray[bool_]: ...
diff --git a/numpy/typing/_char_codes.py b/numpy/typing/_char_codes.py
index 22ee168e9..139471084 100644
--- a/numpy/typing/_char_codes.py
+++ b/numpy/typing/_char_codes.py
@@ -1,171 +1,111 @@
-import sys
-from typing import Any, TYPE_CHECKING
-
-from . import _HAS_TYPING_EXTENSIONS
-
-if sys.version_info >= (3, 8):
- from typing import Literal
-elif _HAS_TYPING_EXTENSIONS:
- from typing_extensions import Literal
-
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
- _BoolCodes = Literal["?", "=?", "<?", ">?", "bool", "bool_", "bool8"]
-
- _UInt8Codes = Literal["uint8", "u1", "=u1", "<u1", ">u1"]
- _UInt16Codes = Literal["uint16", "u2", "=u2", "<u2", ">u2"]
- _UInt32Codes = Literal["uint32", "u4", "=u4", "<u4", ">u4"]
- _UInt64Codes = Literal["uint64", "u8", "=u8", "<u8", ">u8"]
-
- _Int8Codes = Literal["int8", "i1", "=i1", "<i1", ">i1"]
- _Int16Codes = Literal["int16", "i2", "=i2", "<i2", ">i2"]
- _Int32Codes = Literal["int32", "i4", "=i4", "<i4", ">i4"]
- _Int64Codes = Literal["int64", "i8", "=i8", "<i8", ">i8"]
-
- _Float16Codes = Literal["float16", "f2", "=f2", "<f2", ">f2"]
- _Float32Codes = Literal["float32", "f4", "=f4", "<f4", ">f4"]
- _Float64Codes = Literal["float64", "f8", "=f8", "<f8", ">f8"]
-
- _Complex64Codes = Literal["complex64", "c8", "=c8", "<c8", ">c8"]
- _Complex128Codes = Literal["complex128", "c16", "=c16", "<c16", ">c16"]
-
- _ByteCodes = Literal["byte", "b", "=b", "<b", ">b"]
- _ShortCodes = Literal["short", "h", "=h", "<h", ">h"]
- _IntCCodes = Literal["intc", "i", "=i", "<i", ">i"]
- _IntPCodes = Literal["intp", "int0", "p", "=p", "<p", ">p"]
- _IntCodes = Literal["long", "int", "int_", "l", "=l", "<l", ">l"]
- _LongLongCodes = Literal["longlong", "q", "=q", "<q", ">q"]
-
- _UByteCodes = Literal["ubyte", "B", "=B", "<B", ">B"]
- _UShortCodes = Literal["ushort", "H", "=H", "<H", ">H"]
- _UIntCCodes = Literal["uintc", "I", "=I", "<I", ">I"]
- _UIntPCodes = Literal["uintp", "uint0", "P", "=P", "<P", ">P"]
- _UIntCodes = Literal["uint", "L", "=L", "<L", ">L"]
- _ULongLongCodes = Literal["ulonglong", "Q", "=Q", "<Q", ">Q"]
-
- _HalfCodes = Literal["half", "e", "=e", "<e", ">e"]
- _SingleCodes = Literal["single", "f", "=f", "<f", ">f"]
- _DoubleCodes = Literal["double", "float", "float_", "d", "=d", "<d", ">d"]
- _LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "<g", ">g"]
-
- _CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "<F", ">F"]
- _CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "<D", ">D"]
- _CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "<G", ">G"]
-
- _StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "<U", ">U"]
- _BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "<S", ">S"]
- _VoidCodes = Literal["void", "void0", "V", "=V", "<V", ">V"]
- _ObjectCodes = Literal["object", "object_", "O", "=O", "<O", ">O"]
-
- _DT64Codes = Literal[
- "datetime64", "=datetime64", "<datetime64", ">datetime64",
- "datetime64[Y]", "=datetime64[Y]", "<datetime64[Y]", ">datetime64[Y]",
- "datetime64[M]", "=datetime64[M]", "<datetime64[M]", ">datetime64[M]",
- "datetime64[W]", "=datetime64[W]", "<datetime64[W]", ">datetime64[W]",
- "datetime64[D]", "=datetime64[D]", "<datetime64[D]", ">datetime64[D]",
- "datetime64[h]", "=datetime64[h]", "<datetime64[h]", ">datetime64[h]",
- "datetime64[m]", "=datetime64[m]", "<datetime64[m]", ">datetime64[m]",
- "datetime64[s]", "=datetime64[s]", "<datetime64[s]", ">datetime64[s]",
- "datetime64[ms]", "=datetime64[ms]", "<datetime64[ms]", ">datetime64[ms]",
- "datetime64[us]", "=datetime64[us]", "<datetime64[us]", ">datetime64[us]",
- "datetime64[ns]", "=datetime64[ns]", "<datetime64[ns]", ">datetime64[ns]",
- "datetime64[ps]", "=datetime64[ps]", "<datetime64[ps]", ">datetime64[ps]",
- "datetime64[fs]", "=datetime64[fs]", "<datetime64[fs]", ">datetime64[fs]",
- "datetime64[as]", "=datetime64[as]", "<datetime64[as]", ">datetime64[as]",
- "M", "=M", "<M", ">M",
- "M8", "=M8", "<M8", ">M8",
- "M8[Y]", "=M8[Y]", "<M8[Y]", ">M8[Y]",
- "M8[M]", "=M8[M]", "<M8[M]", ">M8[M]",
- "M8[W]", "=M8[W]", "<M8[W]", ">M8[W]",
- "M8[D]", "=M8[D]", "<M8[D]", ">M8[D]",
- "M8[h]", "=M8[h]", "<M8[h]", ">M8[h]",
- "M8[m]", "=M8[m]", "<M8[m]", ">M8[m]",
- "M8[s]", "=M8[s]", "<M8[s]", ">M8[s]",
- "M8[ms]", "=M8[ms]", "<M8[ms]", ">M8[ms]",
- "M8[us]", "=M8[us]", "<M8[us]", ">M8[us]",
- "M8[ns]", "=M8[ns]", "<M8[ns]", ">M8[ns]",
- "M8[ps]", "=M8[ps]", "<M8[ps]", ">M8[ps]",
- "M8[fs]", "=M8[fs]", "<M8[fs]", ">M8[fs]",
- "M8[as]", "=M8[as]", "<M8[as]", ">M8[as]",
- ]
- _TD64Codes = Literal[
- "timedelta64", "=timedelta64", "<timedelta64", ">timedelta64",
- "timedelta64[Y]", "=timedelta64[Y]", "<timedelta64[Y]", ">timedelta64[Y]",
- "timedelta64[M]", "=timedelta64[M]", "<timedelta64[M]", ">timedelta64[M]",
- "timedelta64[W]", "=timedelta64[W]", "<timedelta64[W]", ">timedelta64[W]",
- "timedelta64[D]", "=timedelta64[D]", "<timedelta64[D]", ">timedelta64[D]",
- "timedelta64[h]", "=timedelta64[h]", "<timedelta64[h]", ">timedelta64[h]",
- "timedelta64[m]", "=timedelta64[m]", "<timedelta64[m]", ">timedelta64[m]",
- "timedelta64[s]", "=timedelta64[s]", "<timedelta64[s]", ">timedelta64[s]",
- "timedelta64[ms]", "=timedelta64[ms]", "<timedelta64[ms]", ">timedelta64[ms]",
- "timedelta64[us]", "=timedelta64[us]", "<timedelta64[us]", ">timedelta64[us]",
- "timedelta64[ns]", "=timedelta64[ns]", "<timedelta64[ns]", ">timedelta64[ns]",
- "timedelta64[ps]", "=timedelta64[ps]", "<timedelta64[ps]", ">timedelta64[ps]",
- "timedelta64[fs]", "=timedelta64[fs]", "<timedelta64[fs]", ">timedelta64[fs]",
- "timedelta64[as]", "=timedelta64[as]", "<timedelta64[as]", ">timedelta64[as]",
- "m", "=m", "<m", ">m",
- "m8", "=m8", "<m8", ">m8",
- "m8[Y]", "=m8[Y]", "<m8[Y]", ">m8[Y]",
- "m8[M]", "=m8[M]", "<m8[M]", ">m8[M]",
- "m8[W]", "=m8[W]", "<m8[W]", ">m8[W]",
- "m8[D]", "=m8[D]", "<m8[D]", ">m8[D]",
- "m8[h]", "=m8[h]", "<m8[h]", ">m8[h]",
- "m8[m]", "=m8[m]", "<m8[m]", ">m8[m]",
- "m8[s]", "=m8[s]", "<m8[s]", ">m8[s]",
- "m8[ms]", "=m8[ms]", "<m8[ms]", ">m8[ms]",
- "m8[us]", "=m8[us]", "<m8[us]", ">m8[us]",
- "m8[ns]", "=m8[ns]", "<m8[ns]", ">m8[ns]",
- "m8[ps]", "=m8[ps]", "<m8[ps]", ">m8[ps]",
- "m8[fs]", "=m8[fs]", "<m8[fs]", ">m8[fs]",
- "m8[as]", "=m8[as]", "<m8[as]", ">m8[as]",
- ]
-
-else:
- _BoolCodes = Any
-
- _UInt8Codes = Any
- _UInt16Codes = Any
- _UInt32Codes = Any
- _UInt64Codes = Any
-
- _Int8Codes = Any
- _Int16Codes = Any
- _Int32Codes = Any
- _Int64Codes = Any
-
- _Float16Codes = Any
- _Float32Codes = Any
- _Float64Codes = Any
-
- _Complex64Codes = Any
- _Complex128Codes = Any
-
- _ByteCodes = Any
- _ShortCodes = Any
- _IntCCodes = Any
- _IntPCodes = Any
- _IntCodes = Any
- _LongLongCodes = Any
-
- _UByteCodes = Any
- _UShortCodes = Any
- _UIntCCodes = Any
- _UIntPCodes = Any
- _UIntCodes = Any
- _ULongLongCodes = Any
-
- _HalfCodes = Any
- _SingleCodes = Any
- _DoubleCodes = Any
- _LongDoubleCodes = Any
-
- _CSingleCodes = Any
- _CDoubleCodes = Any
- _CLongDoubleCodes = Any
-
- _StrCodes = Any
- _BytesCodes = Any
- _VoidCodes = Any
- _ObjectCodes = Any
-
- _DT64Codes = Any
- _TD64Codes = Any
+from typing import Literal
+
+_BoolCodes = Literal["?", "=?", "<?", ">?", "bool", "bool_", "bool8"]
+
+_UInt8Codes = Literal["uint8", "u1", "=u1", "<u1", ">u1"]
+_UInt16Codes = Literal["uint16", "u2", "=u2", "<u2", ">u2"]
+_UInt32Codes = Literal["uint32", "u4", "=u4", "<u4", ">u4"]
+_UInt64Codes = Literal["uint64", "u8", "=u8", "<u8", ">u8"]
+
+_Int8Codes = Literal["int8", "i1", "=i1", "<i1", ">i1"]
+_Int16Codes = Literal["int16", "i2", "=i2", "<i2", ">i2"]
+_Int32Codes = Literal["int32", "i4", "=i4", "<i4", ">i4"]
+_Int64Codes = Literal["int64", "i8", "=i8", "<i8", ">i8"]
+
+_Float16Codes = Literal["float16", "f2", "=f2", "<f2", ">f2"]
+_Float32Codes = Literal["float32", "f4", "=f4", "<f4", ">f4"]
+_Float64Codes = Literal["float64", "f8", "=f8", "<f8", ">f8"]
+
+_Complex64Codes = Literal["complex64", "c8", "=c8", "<c8", ">c8"]
+_Complex128Codes = Literal["complex128", "c16", "=c16", "<c16", ">c16"]
+
+_ByteCodes = Literal["byte", "b", "=b", "<b", ">b"]
+_ShortCodes = Literal["short", "h", "=h", "<h", ">h"]
+_IntCCodes = Literal["intc", "i", "=i", "<i", ">i"]
+_IntPCodes = Literal["intp", "int0", "p", "=p", "<p", ">p"]
+_IntCodes = Literal["long", "int", "int_", "l", "=l", "<l", ">l"]
+_LongLongCodes = Literal["longlong", "q", "=q", "<q", ">q"]
+
+_UByteCodes = Literal["ubyte", "B", "=B", "<B", ">B"]
+_UShortCodes = Literal["ushort", "H", "=H", "<H", ">H"]
+_UIntCCodes = Literal["uintc", "I", "=I", "<I", ">I"]
+_UIntPCodes = Literal["uintp", "uint0", "P", "=P", "<P", ">P"]
+_UIntCodes = Literal["uint", "L", "=L", "<L", ">L"]
+_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "<Q", ">Q"]
+
+_HalfCodes = Literal["half", "e", "=e", "<e", ">e"]
+_SingleCodes = Literal["single", "f", "=f", "<f", ">f"]
+_DoubleCodes = Literal["double", "float", "float_", "d", "=d", "<d", ">d"]
+_LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "<g", ">g"]
+
+_CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "<F", ">F"]
+_CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "<D", ">D"]
+_CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "<G", ">G"]
+
+_StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "<U", ">U"]
+_BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "<S", ">S"]
+_VoidCodes = Literal["void", "void0", "V", "=V", "<V", ">V"]
+_ObjectCodes = Literal["object", "object_", "O", "=O", "<O", ">O"]
+
+_DT64Codes = Literal[
+ "datetime64", "=datetime64", "<datetime64", ">datetime64",
+ "datetime64[Y]", "=datetime64[Y]", "<datetime64[Y]", ">datetime64[Y]",
+ "datetime64[M]", "=datetime64[M]", "<datetime64[M]", ">datetime64[M]",
+ "datetime64[W]", "=datetime64[W]", "<datetime64[W]", ">datetime64[W]",
+ "datetime64[D]", "=datetime64[D]", "<datetime64[D]", ">datetime64[D]",
+ "datetime64[h]", "=datetime64[h]", "<datetime64[h]", ">datetime64[h]",
+ "datetime64[m]", "=datetime64[m]", "<datetime64[m]", ">datetime64[m]",
+ "datetime64[s]", "=datetime64[s]", "<datetime64[s]", ">datetime64[s]",
+ "datetime64[ms]", "=datetime64[ms]", "<datetime64[ms]", ">datetime64[ms]",
+ "datetime64[us]", "=datetime64[us]", "<datetime64[us]", ">datetime64[us]",
+ "datetime64[ns]", "=datetime64[ns]", "<datetime64[ns]", ">datetime64[ns]",
+ "datetime64[ps]", "=datetime64[ps]", "<datetime64[ps]", ">datetime64[ps]",
+ "datetime64[fs]", "=datetime64[fs]", "<datetime64[fs]", ">datetime64[fs]",
+ "datetime64[as]", "=datetime64[as]", "<datetime64[as]", ">datetime64[as]",
+ "M", "=M", "<M", ">M",
+ "M8", "=M8", "<M8", ">M8",
+ "M8[Y]", "=M8[Y]", "<M8[Y]", ">M8[Y]",
+ "M8[M]", "=M8[M]", "<M8[M]", ">M8[M]",
+ "M8[W]", "=M8[W]", "<M8[W]", ">M8[W]",
+ "M8[D]", "=M8[D]", "<M8[D]", ">M8[D]",
+ "M8[h]", "=M8[h]", "<M8[h]", ">M8[h]",
+ "M8[m]", "=M8[m]", "<M8[m]", ">M8[m]",
+ "M8[s]", "=M8[s]", "<M8[s]", ">M8[s]",
+ "M8[ms]", "=M8[ms]", "<M8[ms]", ">M8[ms]",
+ "M8[us]", "=M8[us]", "<M8[us]", ">M8[us]",
+ "M8[ns]", "=M8[ns]", "<M8[ns]", ">M8[ns]",
+ "M8[ps]", "=M8[ps]", "<M8[ps]", ">M8[ps]",
+ "M8[fs]", "=M8[fs]", "<M8[fs]", ">M8[fs]",
+ "M8[as]", "=M8[as]", "<M8[as]", ">M8[as]",
+]
+_TD64Codes = Literal[
+ "timedelta64", "=timedelta64", "<timedelta64", ">timedelta64",
+ "timedelta64[Y]", "=timedelta64[Y]", "<timedelta64[Y]", ">timedelta64[Y]",
+ "timedelta64[M]", "=timedelta64[M]", "<timedelta64[M]", ">timedelta64[M]",
+ "timedelta64[W]", "=timedelta64[W]", "<timedelta64[W]", ">timedelta64[W]",
+ "timedelta64[D]", "=timedelta64[D]", "<timedelta64[D]", ">timedelta64[D]",
+ "timedelta64[h]", "=timedelta64[h]", "<timedelta64[h]", ">timedelta64[h]",
+ "timedelta64[m]", "=timedelta64[m]", "<timedelta64[m]", ">timedelta64[m]",
+ "timedelta64[s]", "=timedelta64[s]", "<timedelta64[s]", ">timedelta64[s]",
+ "timedelta64[ms]", "=timedelta64[ms]", "<timedelta64[ms]", ">timedelta64[ms]",
+ "timedelta64[us]", "=timedelta64[us]", "<timedelta64[us]", ">timedelta64[us]",
+ "timedelta64[ns]", "=timedelta64[ns]", "<timedelta64[ns]", ">timedelta64[ns]",
+ "timedelta64[ps]", "=timedelta64[ps]", "<timedelta64[ps]", ">timedelta64[ps]",
+ "timedelta64[fs]", "=timedelta64[fs]", "<timedelta64[fs]", ">timedelta64[fs]",
+ "timedelta64[as]", "=timedelta64[as]", "<timedelta64[as]", ">timedelta64[as]",
+ "m", "=m", "<m", ">m",
+ "m8", "=m8", "<m8", ">m8",
+ "m8[Y]", "=m8[Y]", "<m8[Y]", ">m8[Y]",
+ "m8[M]", "=m8[M]", "<m8[M]", ">m8[M]",
+ "m8[W]", "=m8[W]", "<m8[W]", ">m8[W]",
+ "m8[D]", "=m8[D]", "<m8[D]", ">m8[D]",
+ "m8[h]", "=m8[h]", "<m8[h]", ">m8[h]",
+ "m8[m]", "=m8[m]", "<m8[m]", ">m8[m]",
+ "m8[s]", "=m8[s]", "<m8[s]", ">m8[s]",
+ "m8[ms]", "=m8[ms]", "<m8[ms]", ">m8[ms]",
+ "m8[us]", "=m8[us]", "<m8[us]", ">m8[us]",
+ "m8[ns]", "=m8[ns]", "<m8[ns]", ">m8[ns]",
+ "m8[ps]", "=m8[ps]", "<m8[ps]", ">m8[ps]",
+ "m8[fs]", "=m8[fs]", "<m8[fs]", ">m8[fs]",
+ "m8[as]", "=m8[as]", "<m8[as]", ">m8[as]",
+]
diff --git a/numpy/typing/_dtype_like.py b/numpy/typing/_dtype_like.py
index b2ce3adb4..0955f5b18 100644
--- a/numpy/typing/_dtype_like.py
+++ b/numpy/typing/_dtype_like.py
@@ -1,19 +1,10 @@
-import sys
-from typing import Any, List, Sequence, Tuple, Union, Type, TypeVar, TYPE_CHECKING
+from typing import Any, List, Sequence, Tuple, Union, Type, TypeVar, Protocol, TypedDict
import numpy as np
-from . import _HAS_TYPING_EXTENSIONS
from ._shape import _ShapeLike
from ._generic_alias import _DType as DType
-if sys.version_info >= (3, 8):
- from typing import Protocol, TypedDict
-elif _HAS_TYPING_EXTENSIONS:
- from typing_extensions import Protocol, TypedDict
-else:
- from ._generic_alias import _GenericAlias as GenericAlias
-
from ._char_codes import (
_BoolCodes,
_UInt8Codes,
@@ -59,30 +50,22 @@ from ._char_codes import (
_DTypeLikeNested = Any # TODO: wait for support for recursive types
_DType_co = TypeVar("_DType_co", covariant=True, bound=DType[Any])
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
- # Mandatory keys
- class _DTypeDictBase(TypedDict):
- names: Sequence[str]
- formats: Sequence[_DTypeLikeNested]
-
- # Mandatory + optional keys
- class _DTypeDict(_DTypeDictBase, total=False):
- offsets: Sequence[int]
- titles: Sequence[Any] # Only `str` elements are usable as indexing aliases, but all objects are legal
- itemsize: int
- aligned: bool
-
- # A protocol for anything with the dtype attribute
- class _SupportsDType(Protocol[_DType_co]):
- @property
- def dtype(self) -> _DType_co: ...
-
-else:
- _DTypeDict = Any
-
- class _SupportsDType: ...
- _SupportsDType = GenericAlias(_SupportsDType, _DType_co)
-
+# Mandatory keys
+class _DTypeDictBase(TypedDict):
+ names: Sequence[str]
+ formats: Sequence[_DTypeLikeNested]
+
+# Mandatory + optional keys
+class _DTypeDict(_DTypeDictBase, total=False):
+ offsets: Sequence[int]
+ titles: Sequence[Any] # Only `str` elements are usable as indexing aliases, but all objects are legal
+ itemsize: int
+ aligned: bool
+
+# A protocol for anything with the dtype attribute
+class _SupportsDType(Protocol[_DType_co]):
+ @property
+ def dtype(self) -> _DType_co: ...
# Would create a dtype[np.void]
_VoidDTypeLike = Union[
diff --git a/numpy/typing/_shape.py b/numpy/typing/_shape.py
index 75698f3d3..c28859b19 100644
--- a/numpy/typing/_shape.py
+++ b/numpy/typing/_shape.py
@@ -1,14 +1,4 @@
-import sys
-from typing import Sequence, Tuple, Union, Any
-
-from . import _HAS_TYPING_EXTENSIONS
-
-if sys.version_info >= (3, 8):
- from typing import SupportsIndex
-elif _HAS_TYPING_EXTENSIONS:
- from typing_extensions import SupportsIndex
-else:
- SupportsIndex = Any
+from typing import Sequence, Tuple, Union, SupportsIndex
_Shape = Tuple[int, ...]
diff --git a/numpy/typing/_ufunc.pyi b/numpy/typing/_ufunc.pyi
index be1e654c2..1be3500c1 100644
--- a/numpy/typing/_ufunc.pyi
+++ b/numpy/typing/_ufunc.pyi
@@ -14,6 +14,8 @@ from typing import (
overload,
Tuple,
TypeVar,
+ Literal,
+ SupportsIndex,
)
from numpy import ufunc, _CastingKind, _OrderKACF
@@ -24,8 +26,6 @@ from ._scalars import _ScalarLike_co
from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co
from ._dtype_like import DTypeLike
-from typing_extensions import Literal, SupportsIndex
-
_T = TypeVar("_T")
_2Tuple = Tuple[_T, _T]
_3Tuple = Tuple[_T, _T, _T]
@@ -105,8 +105,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
def at(
self,
- __a: NDArray[Any],
- __indices: _ArrayLikeInt_co,
+ a: NDArray[Any],
+ indices: _ArrayLikeInt_co,
+ /,
) -> None: ...
class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
@@ -158,9 +159,10 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
def at(
self,
- __a: NDArray[Any],
- __indices: _ArrayLikeInt_co,
- __b: ArrayLike,
+ a: NDArray[Any],
+ indices: _ArrayLikeInt_co,
+ b: ArrayLike,
+ /,
) -> None: ...
def reduce(
@@ -195,9 +197,9 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
@overload
def outer(
self,
- __A: _ScalarLike_co,
- __B: _ScalarLike_co,
- *,
+ A: _ScalarLike_co,
+ B: _ScalarLike_co,
+ /, *,
out: None = ...,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
@@ -210,9 +212,9 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
@overload
def outer( # type: ignore[misc]
self,
- __A: ArrayLike,
- __B: ArrayLike,
- *,
+ A: ArrayLike,
+ B: ArrayLike,
+ /, *,
out: None | NDArray[Any] | Tuple[NDArray[Any]] = ...,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
diff --git a/numpy/typing/tests/data/fail/modules.py b/numpy/typing/tests/data/fail/modules.py
index 7b9309329..59e724f22 100644
--- a/numpy/typing/tests/data/fail/modules.py
+++ b/numpy/typing/tests/data/fail/modules.py
@@ -12,7 +12,6 @@ np.math # E: Module has no attribute
# Public sub-modules that are not imported to their parent module by default;
# e.g. one must first execute `import numpy.lib.recfunctions`
np.lib.recfunctions # E: Module has no attribute
-np.ma.mrecords # E: Module has no attribute
np.__NUMPY_SETUP__ # E: Module has no attribute
np.__deprecated_attrs__ # E: Module has no attribute
diff --git a/numpy/typing/tests/data/fail/npyio.py b/numpy/typing/tests/data/fail/npyio.py
new file mode 100644
index 000000000..c91b4c9cb
--- /dev/null
+++ b/numpy/typing/tests/data/fail/npyio.py
@@ -0,0 +1,30 @@
+import pathlib
+from typing import IO
+
+import numpy.typing as npt
+import numpy as np
+
+str_path: str
+bytes_path: bytes
+pathlib_path: pathlib.Path
+str_file: IO[str]
+AR_i8: npt.NDArray[np.int64]
+
+np.load(str_file) # E: incompatible type
+
+np.save(bytes_path, AR_i8) # E: incompatible type
+np.save(str_file, AR_i8) # E: incompatible type
+
+np.savez(bytes_path, AR_i8) # E: incompatible type
+np.savez(str_file, AR_i8) # E: incompatible type
+
+np.savez_compressed(bytes_path, AR_i8) # E: incompatible type
+np.savez_compressed(str_file, AR_i8) # E: incompatible type
+
+np.loadtxt(bytes_path) # E: incompatible type
+
+np.fromregex(bytes_path, ".", np.int64) # E: No overload variant
+
+np.recfromtxt(bytes_path) # E: incompatible type
+
+np.recfromcsv(bytes_path) # E: incompatible type
diff --git a/numpy/typing/tests/data/fail/scalars.py b/numpy/typing/tests/data/fail/scalars.py
index 099418e67..94fe3f71e 100644
--- a/numpy/typing/tests/data/fail/scalars.py
+++ b/numpy/typing/tests/data/fail/scalars.py
@@ -87,7 +87,6 @@ round(c8) # E: No overload variant
c8.__getnewargs__() # E: Invalid self argument
f2.__getnewargs__() # E: Invalid self argument
-f2.is_integer() # E: Invalid self argument
f2.hex() # E: Invalid self argument
np.float16.fromhex("0x0.0p+0") # E: Invalid self argument
f2.__trunc__() # E: Invalid self argument
diff --git a/numpy/typing/tests/data/fail/stride_tricks.py b/numpy/typing/tests/data/fail/stride_tricks.py
new file mode 100644
index 000000000..f2bfba743
--- /dev/null
+++ b/numpy/typing/tests/data/fail/stride_tricks.py
@@ -0,0 +1,9 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+
+np.lib.stride_tricks.as_strided(AR_f8, shape=8) # E: No overload variant
+np.lib.stride_tricks.as_strided(AR_f8, strides=8) # E: No overload variant
+
+np.lib.stride_tricks.sliding_window_view(AR_f8, axis=(1,)) # E: No overload variant
diff --git a/numpy/typing/tests/data/reveal/arraypad.py b/numpy/typing/tests/data/reveal/arraypad.py
index ba5577ee0..03c03fb4e 100644
--- a/numpy/typing/tests/data/reveal/arraypad.py
+++ b/numpy/typing/tests/data/reveal/arraypad.py
@@ -1,5 +1,4 @@
-from typing import List, Any, Mapping, Tuple
-from typing_extensions import SupportsIndex
+from typing import List, Any, Mapping, Tuple, SupportsIndex
import numpy as np
import numpy.typing as npt
diff --git a/numpy/typing/tests/data/reveal/npyio.py b/numpy/typing/tests/data/reveal/npyio.py
new file mode 100644
index 000000000..d66201dd3
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/npyio.py
@@ -0,0 +1,91 @@
+import re
+import pathlib
+from typing import IO, List
+
+import numpy.typing as npt
+import numpy as np
+
+str_path: str
+pathlib_path: pathlib.Path
+str_file: IO[str]
+bytes_file: IO[bytes]
+
+bag_obj: np.lib.npyio.BagObj[int]
+npz_file: np.lib.npyio.NpzFile
+
+AR_i8: npt.NDArray[np.int64]
+AR_LIKE_f8: List[float]
+
+class BytesWriter:
+ def write(self, data: bytes) -> None: ...
+
+class BytesReader:
+ def read(self, n: int = ...) -> bytes: ...
+ def seek(self, offset: int, whence: int = ...) -> int: ...
+
+bytes_writer: BytesWriter
+bytes_reader: BytesReader
+
+reveal_type(bag_obj.a) # E: int
+reveal_type(bag_obj.b) # E: int
+
+reveal_type(npz_file.zip) # E: zipfile.ZipFile
+reveal_type(npz_file.fid) # E: Union[None, typing.IO[builtins.str]]
+reveal_type(npz_file.files) # E: list[builtins.str]
+reveal_type(npz_file.allow_pickle) # E: bool
+reveal_type(npz_file.pickle_kwargs) # E: Union[None, typing.Mapping[builtins.str, Any]]
+reveal_type(npz_file.f) # E: numpy.lib.npyio.BagObj[numpy.lib.npyio.NpzFile]
+reveal_type(npz_file["test"]) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(len(npz_file)) # E: int
+with npz_file as f:
+ reveal_type(f) # E: numpy.lib.npyio.NpzFile
+
+reveal_type(np.load(bytes_file)) # E: Any
+reveal_type(np.load(pathlib_path, allow_pickle=True)) # E: Any
+reveal_type(np.load(str_path, encoding="bytes")) # E: Any
+reveal_type(np.load(bytes_reader)) # E: Any
+
+reveal_type(np.save(bytes_file, AR_LIKE_f8)) # E: None
+reveal_type(np.save(pathlib_path, AR_i8, allow_pickle=True)) # E: None
+reveal_type(np.save(str_path, AR_LIKE_f8)) # E: None
+reveal_type(np.save(bytes_writer, AR_LIKE_f8)) # E: None
+
+reveal_type(np.savez(bytes_file, AR_LIKE_f8)) # E: None
+reveal_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8)) # E: None
+reveal_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8)) # E: None
+reveal_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8)) # E: None
+
+reveal_type(np.savez_compressed(bytes_file, AR_LIKE_f8)) # E: None
+reveal_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8)) # E: None
+reveal_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8)) # E: None
+reveal_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8)) # E: None
+
+reveal_type(np.loadtxt(bytes_file)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.loadtxt(pathlib_path, dtype=np.str_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(np.loadtxt(str_path, dtype=str, skiprows=2)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.loadtxt(str_file, comments="test")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.loadtxt(str_path, delimiter="\n")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.loadtxt(str_path, ndmin=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.loadtxt(["1", "2", "3"])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.fromregex(bytes_file, "test", np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.fromregex(str_file, b"test", dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(np.fromregex(pathlib_path, "test", np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.fromregex(bytes_reader, "test", np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.genfromtxt(bytes_file)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.genfromtxt(pathlib_path, dtype=np.str_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(np.genfromtxt(str_path, dtype=str, skiprows=2)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.genfromtxt(str_file, comments="test")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.genfromtxt(str_path, delimiter="\n")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.genfromtxt(str_path, ndmin=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.genfromtxt(["1", "2", "3"], ndmin=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.recfromtxt(bytes_file)) # E: numpy.recarray[Any, numpy.dtype[numpy.void]]
+reveal_type(np.recfromtxt(pathlib_path, usemask=True)) # E: numpy.ma.mrecords.MaskedRecords[Any, numpy.dtype[numpy.void]]
+reveal_type(np.recfromtxt(["1", "2", "3"])) # E: numpy.recarray[Any, numpy.dtype[numpy.void]]
+
+reveal_type(np.recfromcsv(bytes_file)) # E: numpy.recarray[Any, numpy.dtype[numpy.void]]
+reveal_type(np.recfromcsv(pathlib_path, usemask=True)) # E: numpy.ma.mrecords.MaskedRecords[Any, numpy.dtype[numpy.void]]
+reveal_type(np.recfromcsv(["1", "2", "3"])) # E: numpy.recarray[Any, numpy.dtype[numpy.void]]
diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py
index c36813004..e83d579e9 100644
--- a/numpy/typing/tests/data/reveal/scalars.py
+++ b/numpy/typing/tests/data/reveal/scalars.py
@@ -156,3 +156,5 @@ reveal_type(round(f8, 3)) # E: {float64}
if sys.version_info >= (3, 9):
reveal_type(f8.__ceil__()) # E: int
reveal_type(f8.__floor__()) # E: int
+
+reveal_type(i8.is_integer()) # E: Literal[True]
diff --git a/numpy/typing/tests/data/reveal/stride_tricks.py b/numpy/typing/tests/data/reveal/stride_tricks.py
new file mode 100644
index 000000000..152d9cea6
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/stride_tricks.py
@@ -0,0 +1,28 @@
+from typing import List, Dict, Any
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_LIKE_f: List[float]
+interface_dict: Dict[str, Any]
+
+reveal_type(np.lib.stride_tricks.DummyArray(interface_dict)) # E: numpy.lib.stride_tricks.DummyArray
+
+reveal_type(np.lib.stride_tricks.as_strided(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.lib.stride_tricks.as_strided(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.lib.stride_tricks.as_strided(AR_f8, strides=(1, 5))) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.lib.stride_tricks.as_strided(AR_f8, shape=[9, 20])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.lib.stride_tricks.sliding_window_view(AR_f8, 5)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.lib.stride_tricks.sliding_window_view(AR_LIKE_f, (1, 5))) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.lib.stride_tricks.sliding_window_view(AR_f8, [9], axis=1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.broadcast_to(AR_f8, 5)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.broadcast_to(AR_LIKE_f, (1, 5))) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.broadcast_to(AR_f8, [4, 6], subok=True)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2))) # E: tuple[builtins.int]
+reveal_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7))) # E: tuple[builtins.int]
+
+reveal_type(np.broadcast_arrays(AR_f8, AR_f8)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]]
+reveal_type(np.broadcast_arrays(AR_f8, AR_LIKE_f)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]]
diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py
index e82b08ac2..151b06bed 100644
--- a/numpy/typing/tests/test_runtime.py
+++ b/numpy/typing/tests/test_runtime.py
@@ -3,18 +3,12 @@
from __future__ import annotations
import sys
-from typing import get_type_hints, Union, Tuple, NamedTuple
+from typing import get_type_hints, Union, Tuple, NamedTuple, get_args, get_origin
import pytest
import numpy as np
import numpy.typing as npt
-try:
- from typing_extensions import get_args, get_origin
- SKIP = False
-except ImportError:
- SKIP = True
-
class TypeTup(NamedTuple):
typ: type
@@ -36,7 +30,6 @@ TYPES = {
@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
-@pytest.mark.skipif(SKIP, reason="requires typing-extensions")
def test_get_args(name: type, tup: TypeTup) -> None:
"""Test `typing.get_args`."""
typ, ref = tup.typ, tup.args
@@ -45,7 +38,6 @@ def test_get_args(name: type, tup: TypeTup) -> None:
@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
-@pytest.mark.skipif(SKIP, reason="requires typing-extensions")
def test_get_origin(name: type, tup: TypeTup) -> None:
"""Test `typing.get_origin`."""
typ, ref = tup.typ, tup.origin
diff --git a/numpy/typing/tests/test_typing_extensions.py b/numpy/typing/tests/test_typing_extensions.py
deleted file mode 100644
index f59f222fb..000000000
--- a/numpy/typing/tests/test_typing_extensions.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""Tests for the optional typing-extensions dependency."""
-
-import sys
-import textwrap
-import subprocess
-
-CODE = textwrap.dedent(r"""
- import sys
- import importlib
-
- assert "typing_extensions" not in sys.modules
- assert "numpy.typing" not in sys.modules
-
- # Importing `typing_extensions` will now raise an `ImportError`
- sys.modules["typing_extensions"] = None
- assert importlib.import_module("numpy.typing")
-""")
-
-
-def test_no_typing_extensions() -> None:
- """Import `numpy.typing` in the absence of typing-extensions.
-
- Notes
- -----
- Ideally, we'd just run the normal typing tests in an environment where
- typing-extensions is not installed, but unfortunatelly this is currently
- impossible as it is an indirect hard dependency of pytest.
-
- """
- p = subprocess.run([sys.executable, '-c', CODE], capture_output=True)
- if p.returncode:
- raise AssertionError(
- f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}"
- )
-