summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.cython-30.pxd4
-rw-r--r--numpy/__init__.pxd4
-rw-r--r--numpy/__init__.py19
-rw-r--r--numpy/__init__.pyi605
-rw-r--r--numpy/char.pyi53
-rw-r--r--numpy/core/_add_newdocs.py172
-rw-r--r--numpy/core/_asarray.py94
-rw-r--r--numpy/core/arrayprint.py3
-rw-r--r--numpy/core/code_generators/generate_umath.py8
-rw-r--r--numpy/core/fromnumeric.py32
-rw-r--r--numpy/core/function_base.py4
-rw-r--r--numpy/core/function_base.pyi56
-rw-r--r--numpy/core/include/numpy/arrayscalars.h3
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h4
-rw-r--r--numpy/core/include/numpy/npy_cpu.h3
-rw-r--r--numpy/core/multiarray.py14
-rw-r--r--numpy/core/numeric.py76
-rw-r--r--numpy/core/overrides.py21
-rw-r--r--numpy/core/records.py10
-rw-r--r--numpy/core/setup.py41
-rw-r--r--numpy/core/shape_base.py3
-rw-r--r--numpy/core/src/common/array_assign.c5
-rw-r--r--numpy/core/src/common/npy_binsearch.h.src8
-rw-r--r--numpy/core/src/common/npy_cblas.h35
-rw-r--r--numpy/core/src/common/npy_cpu_dispatch.h33
-rw-r--r--numpy/core/src/common/npy_cpu_features.c.src76
-rw-r--r--numpy/core/src/common/npy_partition.h.src4
-rw-r--r--numpy/core/src/common/npy_sort.h.src52
-rw-r--r--numpy/core/src/common/simd/avx2/arithmetic.h44
-rw-r--r--numpy/core/src/common/simd/avx512/arithmetic.h16
-rw-r--r--numpy/core/src/common/simd/neon/arithmetic.h43
-rw-r--r--numpy/core/src/common/simd/sse/arithmetic.h57
-rw-r--r--numpy/core/src/common/simd/vsx/arithmetic.h16
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src32
-rw-r--r--numpy/core/src/multiarray/alloc.c5
-rw-r--r--numpy/core/src/multiarray/array_coercion.c2
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.c170
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.h4
-rw-r--r--numpy/core/src/multiarray/arrayobject.c2
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src28
-rw-r--r--numpy/core/src/multiarray/buffer.c4
-rw-r--r--numpy/core/src/multiarray/calculation.c4
-rw-r--r--numpy/core/src/multiarray/common.c44
-rw-r--r--numpy/core/src/multiarray/common.h38
-rw-r--r--numpy/core/src/multiarray/compiled_base.c2
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c3
-rw-r--r--numpy/core/src/multiarray/convert.c9
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c32
-rw-r--r--numpy/core/src/multiarray/ctors.c133
-rw-r--r--numpy/core/src/multiarray/datetime.c195
-rw-r--r--numpy/core/src/multiarray/datetime_busdaycal.c2
-rw-r--r--numpy/core/src/multiarray/descriptor.c64
-rw-r--r--numpy/core/src/multiarray/dragon4.c4
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c21
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c2
-rw-r--r--numpy/core/src/multiarray/einsum.c.src1895
-rw-r--r--numpy/core/src/multiarray/einsum_debug.h28
-rw-r--r--numpy/core/src/multiarray/einsum_sumprod.c.src1897
-rw-r--r--numpy/core/src/multiarray/einsum_sumprod.h12
-rw-r--r--numpy/core/src/multiarray/flagsobject.c4
-rw-r--r--numpy/core/src/multiarray/getset.c25
-rw-r--r--numpy/core/src/multiarray/hashdescr.c2
-rw-r--r--numpy/core/src/multiarray/iterators.c8
-rw-r--r--numpy/core/src/multiarray/mapping.c14
-rw-r--r--numpy/core/src/multiarray/methods.c51
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c323
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.h2
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c16
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c40
-rw-r--r--numpy/core/src/multiarray/number.c8
-rw-r--r--numpy/core/src/multiarray/refcount.c16
-rw-r--r--numpy/core/src/multiarray/scalarapi.c10
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src92
-rw-r--r--numpy/core/src/multiarray/shape.c16
-rw-r--r--numpy/core/src/multiarray/strfuncs.c186
-rw-r--r--numpy/core/src/multiarray/temp_elide.c4
-rw-r--r--numpy/core/src/multiarray/usertypes.c4
-rw-r--r--numpy/core/src/npymath/npy_math_private.h1
-rw-r--r--numpy/core/src/npysort/binsearch.c.src8
-rw-r--r--numpy/core/src/npysort/heapsort.c.src12
-rw-r--r--numpy/core/src/npysort/mergesort.c.src12
-rw-r--r--numpy/core/src/npysort/quicksort.c.src12
-rw-r--r--numpy/core/src/npysort/radixsort.c.src8
-rw-r--r--numpy/core/src/npysort/selection.c.src2
-rw-r--r--numpy/core/src/npysort/timsort.c.src16
-rw-r--r--numpy/core/src/umath/_rational_tests.c.src37
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src16
-rw-r--r--numpy/core/src/umath/extobj.c8
-rw-r--r--numpy/core/src/umath/funcs.inc.src4
-rw-r--r--numpy/core/src/umath/override.c2
-rw-r--r--numpy/core/src/umath/reduction.c1
-rw-r--r--numpy/core/src/umath/ufunc_object.c55
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c45
-rw-r--r--numpy/core/src/umath/umathmodule.c43
-rw-r--r--numpy/core/tests/test_array_coercion.py22
-rw-r--r--numpy/core/tests/test_datetime.py17
-rw-r--r--numpy/core/tests/test_deprecations.py40
-rw-r--r--numpy/core/tests/test_multiarray.py22
-rw-r--r--numpy/core/tests/test_numeric.py129
-rw-r--r--numpy/core/tests/test_overrides.py167
-rw-r--r--numpy/core/tests/test_regression.py38
-rw-r--r--numpy/core/tests/test_shape_base.py39
-rw-r--r--numpy/core/tests/test_umath.py2
-rw-r--r--numpy/ctypeslib.py8
-rw-r--r--numpy/ctypeslib.pyi7
-rw-r--r--numpy/distutils/__init__.pyi4
-rw-r--r--numpy/distutils/ccompiler_opt.py94
-rw-r--r--numpy/distutils/checks/extra_avx512bw_mask.c18
-rw-r--r--numpy/distutils/checks/extra_avx512f_reduce.c41
-rw-r--r--numpy/distutils/fcompiler/__init__.py59
-rw-r--r--numpy/distutils/fcompiler/gnu.py2
-rw-r--r--numpy/distutils/system_info.py16
-rw-r--r--numpy/distutils/tests/test_ccompiler_opt_conf.py51
-rw-r--r--numpy/distutils/unixccompiler.py3
-rw-r--r--numpy/doc/basics.py341
-rw-r--r--numpy/doc/broadcasting.py180
-rw-r--r--numpy/doc/byteswapping.py155
-rw-r--r--numpy/doc/constants.py4
-rw-r--r--numpy/doc/creation.py143
-rw-r--r--numpy/doc/dispatch.py271
-rw-r--r--numpy/doc/glossary.py475
-rw-r--r--numpy/doc/indexing.py456
-rw-r--r--numpy/doc/internals.py162
-rw-r--r--numpy/doc/misc.py226
-rw-r--r--numpy/doc/structured_arrays.py646
-rw-r--r--numpy/doc/subclassing.py752
-rw-r--r--numpy/emath.pyi11
-rw-r--r--numpy/f2py/__init__.pyi5
-rw-r--r--numpy/f2py/cfuncs.py30
-rwxr-xr-xnumpy/f2py/rules.py2
-rw-r--r--numpy/f2py/src/test/foomodule.c2
-rw-r--r--numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c29
-rw-r--r--numpy/fft/__init__.pyi20
-rw-r--r--numpy/fft/helper.py3
-rw-r--r--numpy/fft/tests/test_helper.py5
-rw-r--r--numpy/lib/__init__.pyi177
-rw-r--r--numpy/lib/_iotools.py2
-rw-r--r--numpy/lib/arraysetops.py6
-rw-r--r--numpy/lib/function_base.py83
-rw-r--r--numpy/lib/index_tricks.py9
-rw-r--r--numpy/lib/nanfunctions.py18
-rw-r--r--numpy/lib/npyio.py137
-rw-r--r--numpy/lib/scimath.py16
-rw-r--r--numpy/lib/shape_base.py2
-rw-r--r--numpy/lib/tests/test_financial_expired.py13
-rw-r--r--numpy/lib/tests/test_function_base.py12
-rw-r--r--numpy/lib/tests/test_io.py49
-rw-r--r--numpy/lib/twodim_base.py37
-rw-r--r--numpy/linalg/__init__.pyi23
-rw-r--r--numpy/linalg/umath_linalg.c.src2
-rw-r--r--numpy/ma/__init__.pyi225
-rw-r--r--numpy/ma/tests/test_core.py2
-rw-r--r--numpy/ma/timer_comparison.py4
-rw-r--r--numpy/matrixlib/__init__.pyi6
-rw-r--r--numpy/polynomial/__init__.pyi9
-rw-r--r--numpy/polynomial/chebyshev.py32
-rw-r--r--numpy/polynomial/hermite.py28
-rw-r--r--numpy/polynomial/hermite_e.py30
-rw-r--r--numpy/polynomial/laguerre.py30
-rw-r--r--numpy/polynomial/legendre.py28
-rw-r--r--numpy/polynomial/polynomial.py29
-rw-r--r--numpy/random/__init__.pyi61
-rw-r--r--numpy/random/_generator.pyx306
-rw-r--r--numpy/random/mtrand.pyx84
-rw-r--r--numpy/random/tests/test_generator_mt19937.py50
-rw-r--r--numpy/rec.pyi5
-rw-r--r--numpy/testing/__init__.pyi44
-rw-r--r--numpy/testing/tests/test_utils.py2
-rw-r--r--numpy/tests/test_public_api.py23
-rw-r--r--numpy/typing/__init__.py5
-rw-r--r--numpy/typing/setup.py (renamed from numpy/tests/setup.py)5
-rw-r--r--numpy/typing/tests/__init__.py0
-rw-r--r--numpy/typing/tests/data/fail/array_like.py (renamed from numpy/tests/typing/fail/array_like.py)0
-rw-r--r--numpy/typing/tests/data/fail/dtype.py (renamed from numpy/tests/typing/fail/dtype.py)0
-rw-r--r--numpy/typing/tests/data/fail/flatiter.py25
-rw-r--r--numpy/typing/tests/data/fail/fromnumeric.py (renamed from numpy/tests/typing/fail/fromnumeric.py)28
-rw-r--r--numpy/typing/tests/data/fail/linspace.py13
-rw-r--r--numpy/typing/tests/data/fail/modules.py3
-rw-r--r--numpy/typing/tests/data/fail/ndarray.py (renamed from numpy/tests/typing/fail/ndarray.py)0
-rw-r--r--numpy/typing/tests/data/fail/numerictypes.py (renamed from numpy/tests/typing/fail/numerictypes.py)0
-rw-r--r--numpy/typing/tests/data/fail/scalars.py (renamed from numpy/tests/typing/fail/scalars.py)11
-rw-r--r--numpy/typing/tests/data/fail/simple.py (renamed from numpy/tests/typing/fail/simple.py)0
-rw-r--r--numpy/typing/tests/data/fail/ufuncs.py (renamed from numpy/tests/typing/fail/ufuncs.py)0
-rw-r--r--numpy/typing/tests/data/fail/warnings_and_errors.py (renamed from numpy/tests/typing/fail/warnings_and_errors.py)0
-rw-r--r--numpy/typing/tests/data/mypy.ini (renamed from numpy/tests/typing/mypy.ini)0
-rw-r--r--numpy/typing/tests/data/pass/array_like.py (renamed from numpy/tests/typing/pass/array_like.py)0
-rw-r--r--numpy/typing/tests/data/pass/dtype.py (renamed from numpy/tests/typing/pass/dtype.py)0
-rw-r--r--numpy/typing/tests/data/pass/flatiter.py14
-rw-r--r--numpy/typing/tests/data/pass/fromnumeric.py (renamed from numpy/tests/typing/pass/fromnumeric.py)75
-rw-r--r--numpy/typing/tests/data/pass/linspace.py22
-rw-r--r--numpy/typing/tests/data/pass/literal.py43
-rw-r--r--numpy/typing/tests/data/pass/ndarray_conversion.py (renamed from numpy/tests/typing/pass/ndarray_conversion.py)0
-rw-r--r--numpy/typing/tests/data/pass/ndarray_shape_manipulation.py (renamed from numpy/tests/typing/pass/ndarray_shape_manipulation.py)0
-rw-r--r--numpy/typing/tests/data/pass/numerictypes.py (renamed from numpy/tests/typing/pass/numerictypes.py)0
-rw-r--r--numpy/typing/tests/data/pass/scalars.py (renamed from numpy/tests/typing/pass/scalars.py)37
-rw-r--r--numpy/typing/tests/data/pass/simple.py (renamed from numpy/tests/typing/pass/simple.py)0
-rw-r--r--numpy/typing/tests/data/pass/simple_py3.py (renamed from numpy/tests/typing/pass/simple_py3.py)0
-rw-r--r--numpy/typing/tests/data/pass/ufuncs.py (renamed from numpy/tests/typing/pass/ufuncs.py)0
-rw-r--r--numpy/typing/tests/data/pass/warnings_and_errors.py (renamed from numpy/tests/typing/pass/warnings_and_errors.py)0
-rw-r--r--numpy/typing/tests/data/reveal/constants.py (renamed from numpy/tests/typing/reveal/constants.py)0
-rw-r--r--numpy/typing/tests/data/reveal/flatiter.py14
-rw-r--r--numpy/typing/tests/data/reveal/fromnumeric.py (renamed from numpy/tests/typing/reveal/fromnumeric.py)73
-rw-r--r--numpy/typing/tests/data/reveal/linspace.py6
-rw-r--r--numpy/typing/tests/data/reveal/modules.py20
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_conversion.py (renamed from numpy/tests/typing/reveal/ndarray_conversion.py)2
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py (renamed from numpy/tests/typing/reveal/ndarray_shape_manipulation.py)0
-rw-r--r--numpy/typing/tests/data/reveal/numerictypes.py (renamed from numpy/tests/typing/reveal/numerictypes.py)0
-rw-r--r--numpy/typing/tests/data/reveal/scalars.py (renamed from numpy/tests/typing/reveal/scalars.py)3
-rw-r--r--numpy/typing/tests/data/reveal/warnings_and_errors.py (renamed from numpy/tests/typing/reveal/warnings_and_errors.py)0
-rw-r--r--numpy/typing/tests/test_typing.py (renamed from numpy/tests/test_typing.py)18
-rw-r--r--numpy/version.pyi7
211 files changed, 6525 insertions, 7533 deletions
diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd
index 24f77260a..a2c451bc1 100644
--- a/numpy/__init__.cython-30.pxd
+++ b/numpy/__init__.cython-30.pxd
@@ -329,8 +329,8 @@ cdef extern from "numpy/arrayobject.h":
ctypedef long double npy_float128
ctypedef struct npy_cfloat:
- double real
- double imag
+ float real
+ float imag
ctypedef struct npy_cdouble:
double real
diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd
index 32a34eb22..fd704b7e3 100644
--- a/numpy/__init__.pxd
+++ b/numpy/__init__.pxd
@@ -290,8 +290,8 @@ cdef extern from "numpy/arrayobject.h":
ctypedef long double npy_float128
ctypedef struct npy_cfloat:
- double real
- double imag
+ float real
+ float imag
ctypedef struct npy_cdouble:
double real
diff --git a/numpy/__init__.py b/numpy/__init__.py
index c594928ce..41c3dc42d 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -215,12 +215,11 @@ else:
del Arrayterator
# These names were removed in NumPy 1.20. For at least one release,
- # attempts to access these names in the numpy namespace will have an
- # error message that refers to NEP 32 and points to the numpy_financial
- # library.
+ # attempts to access these names in the numpy namespace will trigger
+ # a warning, and calling the function will raise an exception.
_financial_names = ['fv', 'ipmt', 'irr', 'mirr', 'nper', 'npv', 'pmt',
'ppmt', 'pv', 'rate']
- __expired_attrs__ = {
+ __expired_functions__ = {
name: (f'In accordance with NEP 32, the function {name} was removed '
'from NumPy version 1.20. A replacement for this function '
'is available in the numpy_financial library: '
@@ -241,13 +240,19 @@ else:
# module level getattr is only supported in 3.7 onwards
# https://www.python.org/dev/peps/pep-0562/
def __getattr__(attr):
- # Raise AttributeError for expired attributes
+ # Warn for expired attributes, and return a dummy function
+ # that always raises an exception.
try:
- msg = __expired_attrs__[attr]
+ msg = __expired_functions__[attr]
except KeyError:
pass
else:
- raise AttributeError(msg)
+ warnings.warn(msg, RuntimeWarning)
+
+ def _expired(*args, **kwds):
+ raise RuntimeError(msg)
+
+ return _expired
# Emit warnings for deprecated attributes
try:
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index fad5e1774..bf54207a4 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -23,6 +23,7 @@ from typing import (
Sequence,
Sized,
SupportsAbs,
+ SupportsBytes,
SupportsComplex,
SupportsFloat,
SupportsInt,
@@ -33,21 +34,48 @@ from typing import (
Union,
)
-if sys.version_info[0] < 3:
- class SupportsBytes: ...
-
-else:
- from typing import SupportsBytes
-
if sys.version_info >= (3, 8):
- from typing import Literal, Protocol
+ from typing import Literal, Protocol, SupportsIndex
else:
from typing_extensions import Literal, Protocol
+ class SupportsIndex(Protocol):
+ def __index__(self) -> int: ...
+
+# Ensures that the stubs are picked up
+from . import (
+ char,
+ compat,
+ core,
+ ctypeslib,
+ emath,
+ fft,
+ lib,
+ linalg,
+ ma,
+ matrixlib,
+ polynomial,
+ random,
+ rec,
+ testing,
+ version,
+)
+
+from numpy.core.function_base import (
+ linspace,
+ logspace,
+ geomspace,
+)
+
+# Add an object to `__all__` if their stubs are defined in an external file;
+# their stubs will not be recognized otherwise.
+# NOTE: This is redundant for objects defined within this file.
+__all__ = ["linspace", "logspace", "geomspace"]
# TODO: remove when the full numpy namespace is defined
def __getattr__(name: str) -> Any: ...
_NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray)
+_ByteOrder = Literal["S", "<", ">", "=", "|", "L", "B", "N", "I"]
class dtype:
names: Optional[Tuple[str, ...]]
@@ -103,7 +131,7 @@ class dtype:
def ndim(self) -> int: ...
@property
def subdtype(self) -> Optional[Tuple[dtype, _Shape]]: ...
- def newbyteorder(self, new_order: str = ...) -> dtype: ...
+ def newbyteorder(self, __new_order: _ByteOrder = ...) -> dtype: ...
# Leave str and type for end to avoid having to use `builtins.str`
# everywhere. See https://github.com/python/mypy/issues/3775
@property
@@ -143,6 +171,14 @@ class _flagsobj:
def __getitem__(self, key: str) -> bool: ...
def __setitem__(self, key: str, value: bool) -> None: ...
+_ArrayLikeInt = Union[
+ int,
+ integer,
+ Sequence[Union[int, integer]],
+ Sequence[Sequence[Any]], # TODO: wait for support for recursive types
+ ndarray
+]
+
_FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter)
class flatiter(Generic[_ArraySelf]):
@@ -155,6 +191,18 @@ class flatiter(Generic[_ArraySelf]):
def copy(self) -> _ArraySelf: ...
def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ...
def __next__(self) -> generic: ...
+ def __len__(self) -> int: ...
+ @overload
+ def __getitem__(self, key: Union[int, integer]) -> generic: ...
+ @overload
+ def __getitem__(
+ self, key: Union[_ArrayLikeInt, slice, ellipsis],
+ ) -> _ArraySelf: ...
+ def __array__(self, __dtype: DtypeLike = ...) -> ndarray: ...
+
+_OrderKACF = Optional[Literal["K", "A", "C", "F"]]
+_OrderACF = Optional[Literal["A", "C", "F"]]
+_OrderCF = Optional[Literal["C", "F"]]
_ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon)
@@ -187,18 +235,12 @@ class _ArrayOrScalarCommon(
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __complex__(self) -> complex: ...
- if sys.version_info[0] < 3:
- def __oct__(self) -> str: ...
- def __hex__(self) -> str: ...
- def __nonzero__(self) -> bool: ...
- def __unicode__(self) -> Text: ...
- else:
- def __bool__(self) -> bool: ...
- def __bytes__(self) -> bytes: ...
+ def __bool__(self) -> bool: ...
+ def __bytes__(self) -> bytes: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
- def __copy__(self: _ArraySelf, order: str = ...) -> _ArraySelf: ...
- def __deepcopy__(self: _ArraySelf, memo: dict) -> _ArraySelf: ...
+ def __copy__(self: _ArraySelf) -> _ArraySelf: ...
+ def __deepcopy__(self: _ArraySelf, __memo: Optional[dict] = ...) -> _ArraySelf: ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __eq__(self, other): ...
@@ -207,58 +249,150 @@ class _ArrayOrScalarCommon(
def __ge__(self, other): ...
def __add__(self, other): ...
def __radd__(self, other): ...
- def __iadd__(self, other): ...
def __sub__(self, other): ...
def __rsub__(self, other): ...
- def __isub__(self, other): ...
def __mul__(self, other): ...
def __rmul__(self, other): ...
- def __imul__(self, other): ...
- if sys.version_info[0] < 3:
- def __div__(self, other): ...
- def __rdiv__(self, other): ...
- def __idiv__(self, other): ...
def __truediv__(self, other): ...
def __rtruediv__(self, other): ...
- def __itruediv__(self, other): ...
def __floordiv__(self, other): ...
def __rfloordiv__(self, other): ...
- def __ifloordiv__(self, other): ...
def __mod__(self, other): ...
def __rmod__(self, other): ...
- def __imod__(self, other): ...
def __divmod__(self, other): ...
def __rdivmod__(self, other): ...
# NumPy's __pow__ doesn't handle a third argument
def __pow__(self, other): ...
def __rpow__(self, other): ...
- def __ipow__(self, other): ...
def __lshift__(self, other): ...
def __rlshift__(self, other): ...
- def __ilshift__(self, other): ...
def __rshift__(self, other): ...
def __rrshift__(self, other): ...
- def __irshift__(self, other): ...
def __and__(self, other): ...
def __rand__(self, other): ...
- def __iand__(self, other): ...
def __xor__(self, other): ...
def __rxor__(self, other): ...
- def __ixor__(self, other): ...
def __or__(self, other): ...
def __ror__(self, other): ...
- def __ior__(self, other): ...
- if sys.version_info[:2] >= (3, 5):
- def __matmul__(self, other): ...
- def __rmatmul__(self, other): ...
def __neg__(self: _ArraySelf) -> _ArraySelf: ...
def __pos__(self: _ArraySelf) -> _ArraySelf: ...
def __abs__(self: _ArraySelf) -> _ArraySelf: ...
def __invert__(self: _ArraySelf) -> _ArraySelf: ...
- # TODO(shoyer): remove when all methods are defined
- def __getattr__(self, name) -> Any: ...
+ def astype(
+ self: _ArraySelf,
+ dtype: DtypeLike,
+ order: _OrderKACF = ...,
+ casting: _Casting = ...,
+ subok: bool = ...,
+ copy: bool = ...,
+ ) -> _ArraySelf: ...
+ def byteswap(self: _ArraySelf, inplace: bool = ...) -> _ArraySelf: ...
+ def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
+ def dump(self, file: str) -> None: ...
+ def dumps(self) -> bytes: ...
+ def fill(self, value: Any) -> None: ...
+ @property
+ def flat(self: _ArraySelf) -> flatiter[_ArraySelf]: ...
+ def flatten(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
+ def getfield(
+ self: _ArraySelf, dtype: DtypeLike, offset: int = ...
+ ) -> _ArraySelf: ...
+ @overload
+ def item(self, *args: int) -> Any: ...
+ @overload
+ def item(self, args: Tuple[int, ...]) -> Any: ...
+ @overload
+ def itemset(self, __value: Any) -> None: ...
+ @overload
+ def itemset(self, __item: _ShapeLike, __value: Any) -> None: ...
+ def ravel(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
+ @overload
+ def reshape(
+ self: _ArraySelf, shape: Sequence[int], *, order: _OrderACF = ...
+ ) -> _ArraySelf: ...
+ @overload
+ def reshape(
+ self: _ArraySelf, *shape: int, order: _OrderACF = ...
+ ) -> _ArraySelf: ...
+ @overload
+ def resize(self, new_shape: Sequence[int], *, refcheck: bool = ...) -> None: ...
+ @overload
+ def resize(self, *new_shape: int, refcheck: bool = ...) -> None: ...
+ def setflags(
+ self, write: bool = ..., align: bool = ..., uic: bool = ...
+ ) -> None: ...
+ def squeeze(
+ self: _ArraySelf, axis: Union[int, Tuple[int, ...]] = ...
+ ) -> _ArraySelf: ...
+ def swapaxes(self: _ArraySelf, axis1: int, axis2: int) -> _ArraySelf: ...
+ def tobytes(self, order: _OrderKACF = ...) -> bytes: ...
+ def tofile(
+ self, fid: Union[IO[bytes], str], sep: str = ..., format: str = ...
+ ) -> None: ...
+ # generics and 0d arrays return builtin scalars
+ def tolist(self) -> Any: ...
+ @overload
+ def transpose(self: _ArraySelf, axes: Sequence[int]) -> _ArraySelf: ...
+ @overload
+ def transpose(self: _ArraySelf, *axes: int) -> _ArraySelf: ...
+ @overload
+ def view(self, type: Type[_NdArraySubClass]) -> _NdArraySubClass: ...
+ @overload
+ def view(self: _ArraySelf, dtype: DtypeLike = ...) -> _ArraySelf: ...
+ @overload
+ def view(
+ self, dtype: DtypeLike, type: Type[_NdArraySubClass]
+ ) -> _NdArraySubClass: ...
+
+ # TODO: Add proper signatures
+ def __getitem__(self, key) -> Any: ...
+ @property
+ def __array_interface__(self): ...
+ @property
+ def __array_priority__(self): ...
+ @property
+ def __array_struct__(self): ...
+ def __array_wrap__(array, context=...): ...
+ def __setstate__(self, __state): ...
+ def all(self, axis=..., out=..., keepdims=...): ...
+ def any(self, axis=..., out=..., keepdims=...): ...
+ def argmax(self, axis=..., out=...): ...
+ def argmin(self, axis=..., out=...): ...
+ def argpartition(self, kth, axis=..., kind=..., order=...): ...
+ def argsort(self, axis=..., kind=..., order=...): ...
+ def choose(self, choices, out=..., mode=...): ...
+ def clip(self, min=..., max=..., out=..., **kwargs): ...
+ def compress(self, condition, axis=..., out=...): ...
+ def conj(self): ...
+ def conjugate(self): ...
+ def cumprod(self, axis=..., dtype=..., out=...): ...
+ def cumsum(self, axis=..., dtype=..., out=...): ...
+ def diagonal(self, offset=..., axis1=..., axis2=...): ...
+ def dot(self, b, out=...): ...
+ def max(self, axis=..., out=..., keepdims=..., initial=..., where=...): ...
+ def mean(self, axis=..., dtype=..., out=..., keepdims=...): ...
+ def min(self, axis=..., out=..., keepdims=..., initial=..., where=...): ...
+ def newbyteorder(self, new_order=...): ...
+ def nonzero(self): ...
+ def partition(self, kth, axis=..., kind=..., order=...): ...
+ def prod(self, axis=..., dtype=..., out=..., keepdims=..., initial=..., where=...): ...
+ def ptp(self, axis=..., out=..., keepdims=...): ...
+ def put(self, indices, values, mode=...): ...
+ def repeat(self, repeats, axis=...): ...
+ def round(self, decimals=..., out=...): ...
+ def searchsorted(self, v, side=..., sorter=...): ...
+ def setfield(self, val, dtype, offset=...): ...
+ def sort(self, axis=..., kind=..., order=...): ...
+ def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ...
+ def sum(self, axis=..., dtype=..., out=..., keepdims=..., initial=..., where=...): ...
+ def take(self, indices, axis=..., out=..., mode=...): ...
+ # NOTE: `tostring()` is deprecated and therefore excluded
+ # def tostring(self, order=...): ...
+ def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ...
+ def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ...
_BufferType = Union[ndarray, bytes, bytearray, memoryview]
+_Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"]
class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
@property
@@ -276,7 +410,7 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
buffer: _BufferType = ...,
offset: int = ...,
strides: _ShapeLike = ...,
- order: Optional[str] = ...,
+ order: _OrderKACF = ...,
) -> _ArraySelf: ...
@property
def dtype(self) -> _Dtype: ...
@@ -287,82 +421,33 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
@shape.setter
def shape(self, value: _ShapeLike): ...
@property
- def flat(self: _ArraySelf) -> flatiter[_ArraySelf]: ...
- @property
def strides(self) -> _Shape: ...
@strides.setter
def strides(self, value: _ShapeLike): ...
- # Array conversion
- @overload
- def item(self, *args: int) -> Any: ...
- @overload
- def item(self, args: Tuple[int, ...]) -> Any: ...
- def tolist(self) -> List[Any]: ...
- @overload
- def itemset(self, __value: Any) -> None: ...
- @overload
- def itemset(self, __item: _ShapeLike, __value: Any) -> None: ...
- def tobytes(self, order: Optional[str] = ...) -> bytes: ...
- def tofile(
- self, fid: Union[IO[bytes], str], sep: str = ..., format: str = ...
- ) -> None: ...
- def dump(self, file: str) -> None: ...
- def dumps(self) -> bytes: ...
- def astype(
- self: _ArraySelf,
- dtype: DtypeLike,
- order: str = ...,
- casting: str = ...,
- subok: bool = ...,
- copy: bool = ...,
- ) -> _ArraySelf: ...
- def byteswap(self: _ArraySelf, inplace: bool = ...) -> _ArraySelf: ...
- def copy(self: _ArraySelf, order: str = ...) -> _ArraySelf: ...
- @overload
- def view(self, type: Type[_NdArraySubClass]) -> _NdArraySubClass: ...
- @overload
- def view(self: _ArraySelf, dtype: DtypeLike = ...) -> _ArraySelf: ...
- @overload
- def view(
- self, dtype: DtypeLike, type: Type[_NdArraySubClass]
- ) -> _NdArraySubClass: ...
- def getfield(
- self: _ArraySelf, dtype: DtypeLike, offset: int = ...
- ) -> _ArraySelf: ...
- def setflags(
- self, write: bool = ..., align: bool = ..., uic: bool = ...
- ) -> None: ...
- def fill(self, value: Any) -> None: ...
- # Shape manipulation
- @overload
- def reshape(
- self: _ArraySelf, shape: Sequence[int], *, order: str = ...
- ) -> _ArraySelf: ...
- @overload
- def reshape(self: _ArraySelf, *shape: int, order: str = ...) -> _ArraySelf: ...
- @overload
- def resize(self, new_shape: Sequence[int], *, refcheck: bool = ...) -> None: ...
- @overload
- def resize(self, *new_shape: int, refcheck: bool = ...) -> None: ...
- @overload
- def transpose(self: _ArraySelf, axes: Sequence[int]) -> _ArraySelf: ...
- @overload
- def transpose(self: _ArraySelf, *axes: int) -> _ArraySelf: ...
- def swapaxes(self: _ArraySelf, axis1: int, axis2: int) -> _ArraySelf: ...
- def flatten(self: _ArraySelf, order: str = ...) -> _ArraySelf: ...
- def ravel(self: _ArraySelf, order: str = ...) -> _ArraySelf: ...
- def squeeze(
- self: _ArraySelf, axis: Union[int, Tuple[int, ...]] = ...
- ) -> _ArraySelf: ...
# Many of these special methods are irrelevant currently, since protocols
# aren't supported yet. That said, I'm adding them for completeness.
# https://docs.python.org/3/reference/datamodel.html
def __len__(self) -> int: ...
- def __getitem__(self, key) -> Any: ...
def __setitem__(self, key, value): ...
def __iter__(self) -> Any: ...
def __contains__(self, key) -> bool: ...
def __index__(self) -> int: ...
+ def __matmul__(self, other): ...
+ def __imatmul__(self, other): ...
+ def __rmatmul__(self, other): ...
+ # `np.generic` does not support inplace operations
+ def __iadd__(self, other): ...
+ def __isub__(self, other): ...
+ def __imul__(self, other): ...
+ def __itruediv__(self, other): ...
+ def __ifloordiv__(self, other): ...
+ def __imod__(self, other): ...
+ def __ipow__(self, other): ...
+ def __ilshift__(self, other): ...
+ def __irshift__(self, other): ...
+ def __iand__(self, other): ...
+ def __ixor__(self, other): ...
+ def __ior__(self, other): ...
# NOTE: while `np.generic` is not technically an instance of `ABCMeta`,
# the `@abstractmethod` decorator is herein used to (forcefully) deny
@@ -372,65 +457,87 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
# See https://github.com/numpy/numpy-stubs/pull/80 for more details.
+_CharLike = Union[str, bytes]
+
class generic(_ArrayOrScalarCommon):
@abstractmethod
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
@property
def base(self) -> None: ...
-class _real_generic(generic): # type: ignore
+class number(generic): # type: ignore
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
-class number(generic): ... # type: ignore
-
-class bool_(_real_generic):
+class bool_(generic):
def __init__(self, __value: object = ...) -> None: ...
+ @property
+ def real(self: _ArraySelf) -> _ArraySelf: ...
+ @property
+ def imag(self: _ArraySelf) -> _ArraySelf: ...
class object_(generic):
def __init__(self, __value: object = ...) -> None: ...
+ @property
+ def real(self: _ArraySelf) -> _ArraySelf: ...
+ @property
+ def imag(self: _ArraySelf) -> _ArraySelf: ...
-class datetime64:
+class datetime64(generic):
@overload
def __init__(
self,
- __value: Union[None, datetime64, str, dt.datetime] = ...,
- __format: str = ...
+ __value: Union[None, datetime64, _CharLike, dt.datetime] = ...,
+ __format: Union[_CharLike, Tuple[_CharLike, _IntLike]] = ...,
) -> None: ...
@overload
- def __init__(self, __value: int, __format: str) -> None: ...
+ def __init__(self, __value: int, __format: Union[_CharLike, Tuple[_CharLike, _IntLike]]) -> None: ...
def __add__(self, other: Union[timedelta64, int]) -> datetime64: ...
def __sub__(self, other: Union[timedelta64, datetime64, int]) -> timedelta64: ...
+ def __rsub__(self, other: Union[datetime64, int]) -> timedelta64: ...
+
+# Support for `__index__` was added in python 3.8 (bpo-20092)
+if sys.version_info >= (3, 8):
+ _IntValue = Union[SupportsInt, _CharLike, SupportsIndex]
+ _FloatValue = Union[None, _CharLike, SupportsFloat, SupportsIndex]
+ _ComplexValue = Union[None, _CharLike, SupportsFloat, SupportsComplex, SupportsIndex]
+else:
+ _IntValue = Union[SupportsInt, _CharLike]
+ _FloatValue = Union[None, _CharLike, SupportsFloat]
+ _ComplexValue = Union[None, _CharLike, SupportsFloat, SupportsComplex]
+
+class integer(number): # type: ignore
+ # NOTE: `__index__` is technically defined in the bottom-most
+ # sub-classes (`int64`, `uint32`, etc)
+ def __index__(self) -> int: ...
-class integer(number, _real_generic): ... # type: ignore
class signedinteger(integer): ... # type: ignore
class int8(signedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class int16(signedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class int32(signedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class int64(signedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class timedelta64(signedinteger):
- def __init__(self, __value: Any = ..., __format: str = ...) -> None: ...
+ def __init__(
+ self,
+ __value: Union[None, int, _CharLike, dt.timedelta, timedelta64] = ...,
+ __format: Union[_CharLike, Tuple[_CharLike, _IntLike]] = ...,
+ ) -> None: ...
@overload
def __add__(self, other: Union[timedelta64, int]) -> timedelta64: ...
@overload
def __add__(self, other: datetime64) -> datetime64: ...
def __sub__(self, other: Union[timedelta64, int]) -> timedelta64: ...
- if sys.version_info[0] < 3:
- @overload
- def __div__(self, other: timedelta64) -> float: ...
- @overload
- def __div__(self, other: float) -> timedelta64: ...
@overload
def __truediv__(self, other: timedelta64) -> float: ...
@overload
@@ -440,72 +547,69 @@ class timedelta64(signedinteger):
class unsignedinteger(integer): ... # type: ignore
class uint8(unsignedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class uint16(unsignedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class uint32(unsignedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class uint64(unsignedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class inexact(number): ... # type: ignore
-class floating(inexact, _real_generic): ... # type: ignore
+class floating(inexact): ... # type: ignore
+
+_FloatType = TypeVar('_FloatType', bound=floating)
class float16(floating):
- def __init__(self, __value: Optional[SupportsFloat] = ...) -> None: ...
+ def __init__(self, __value: _FloatValue = ...) -> None: ...
class float32(floating):
- def __init__(self, __value: Optional[SupportsFloat] = ...) -> None: ...
+ def __init__(self, __value: _FloatValue = ...) -> None: ...
-class float64(floating):
- def __init__(self, __value: Optional[SupportsFloat] = ...) -> None: ...
+class float64(floating, float):
+ def __init__(self, __value: _FloatValue = ...) -> None: ...
-class complexfloating(inexact): ... # type: ignore
-
-class complex64(complexfloating):
- def __init__(
- self,
- __value: Union[None, SupportsInt, SupportsFloat, SupportsComplex] = ...
- ) -> None: ...
+class complexfloating(inexact, Generic[_FloatType]): # type: ignore
@property
- def real(self) -> float32: ...
+ def real(self) -> _FloatType: ... # type: ignore[override]
@property
- def imag(self) -> float32: ...
+ def imag(self) -> _FloatType: ... # type: ignore[override]
+ def __abs__(self) -> _FloatType: ... # type: ignore[override]
-class complex128(complexfloating):
- def __init__(
- self,
- __value: Union[None, SupportsInt, SupportsFloat, SupportsComplex] = ...
- ) -> None: ...
- @property
- def real(self) -> float64: ...
- @property
- def imag(self) -> float64: ...
+class complex64(complexfloating[float32]):
+ def __init__(self, __value: _ComplexValue = ...) -> None: ...
-class flexible(_real_generic): ... # type: ignore
+class complex128(complexfloating[float64], complex):
+ def __init__(self, __value: _ComplexValue = ...) -> None: ...
+
+class flexible(generic): ... # type: ignore
class void(flexible):
- def __init__(self, __value: Union[int, integer, bool_, bytes, bytes_]): ...
+ def __init__(self, __value: Union[int, integer, bool_, bytes]): ...
+ @property
+ def real(self: _ArraySelf) -> _ArraySelf: ...
+ @property
+ def imag(self: _ArraySelf) -> _ArraySelf: ...
-class character(_real_generic): ... # type: ignore
+class character(flexible): ... # type: ignore
-class bytes_(character):
+class bytes_(character, bytes):
@overload
def __init__(self, __value: object = ...) -> None: ...
@overload
def __init__(
- self, __value: Union[str, str_], encoding: str = ..., errors: str = ...
+ self, __value: str, encoding: str = ..., errors: str = ...
) -> None: ...
-class str_(character):
+class str_(character, str):
@overload
def __init__(self, __value: object = ...) -> None: ...
@overload
def __init__(
- self, __value: Union[bytes, bytes_], encoding: str = ..., errors: str = ...
+ self, __value: bytes, encoding: str = ..., errors: str = ...
) -> None: ...
# TODO(alan): Platform dependent types
@@ -521,48 +625,66 @@ def array(
dtype: DtypeLike = ...,
*,
copy: bool = ...,
- order: Optional[str] = ...,
+ order: _OrderKACF = ...,
subok: bool = ...,
ndmin: int = ...,
+ like: ArrayLike = ...,
) -> ndarray: ...
def zeros(
- shape: _ShapeLike, dtype: DtypeLike = ..., order: Optional[str] = ...
+ shape: _ShapeLike,
+ dtype: DtypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ like: ArrayLike = ...,
) -> ndarray: ...
def ones(
- shape: _ShapeLike, dtype: DtypeLike = ..., order: Optional[str] = ...
+ shape: _ShapeLike,
+ dtype: DtypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ like: ArrayLike = ...,
) -> ndarray: ...
def empty(
- shape: _ShapeLike, dtype: DtypeLike = ..., order: Optional[str] = ...
+ shape: _ShapeLike,
+ dtype: DtypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ like: ArrayLike = ...,
) -> ndarray: ...
def zeros_like(
a: ArrayLike,
dtype: DtypeLike = ...,
- order: str = ...,
+ order: _OrderKACF = ...,
subok: bool = ...,
shape: Optional[Union[int, Sequence[int]]] = ...,
) -> ndarray: ...
def ones_like(
a: ArrayLike,
dtype: DtypeLike = ...,
- order: str = ...,
+ order: _OrderKACF = ...,
subok: bool = ...,
shape: Optional[_ShapeLike] = ...,
) -> ndarray: ...
def empty_like(
a: ArrayLike,
dtype: DtypeLike = ...,
- order: str = ...,
+ order: _OrderKACF = ...,
subok: bool = ...,
shape: Optional[_ShapeLike] = ...,
) -> ndarray: ...
def full(
- shape: _ShapeLike, fill_value: Any, dtype: DtypeLike = ..., order: str = ...
+ shape: _ShapeLike,
+ fill_value: Any,
+ dtype: DtypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ like: ArrayLike = ...,
) -> ndarray: ...
def full_like(
a: ArrayLike,
fill_value: Any,
dtype: DtypeLike = ...,
- order: str = ...,
+ order: _OrderKACF = ...,
subok: bool = ...,
shape: Optional[_ShapeLike] = ...,
) -> ndarray: ...
@@ -572,8 +694,11 @@ def count_nonzero(
def isfortran(a: ndarray) -> bool: ...
def argwhere(a: ArrayLike) -> ndarray: ...
def flatnonzero(a: ArrayLike) -> ndarray: ...
-def correlate(a: ArrayLike, v: ArrayLike, mode: str = ...) -> ndarray: ...
-def convolve(a: ArrayLike, v: ArrayLike, mode: str = ...) -> ndarray: ...
+
+_CorrelateMode = Literal["valid", "same", "full"]
+
+def correlate(a: ArrayLike, v: ArrayLike, mode: _CorrelateMode = ...) -> ndarray: ...
+def convolve(a: ArrayLike, v: ArrayLike, mode: _CorrelateMode = ...) -> ndarray: ...
def outer(a: ArrayLike, b: ArrayLike, out: ndarray = ...) -> ndarray: ...
def tensordot(
a: ArrayLike,
@@ -604,11 +729,17 @@ def cross(
def indices(
dimensions: Sequence[int], dtype: dtype = ..., sparse: bool = ...
) -> Union[ndarray, Tuple[ndarray, ...]]: ...
-def fromfunction(function: Callable, shape: Tuple[int, int], **kwargs) -> Any: ...
+def fromfunction(
+ function: Callable,
+ shape: Tuple[int, int],
+ *,
+ like: ArrayLike = ...,
+ **kwargs,
+) -> Any: ...
def isscalar(element: Any) -> bool: ...
def binary_repr(num: int, width: Optional[int] = ...) -> str: ...
def base_repr(number: int, base: int = ..., padding: int = ...) -> str: ...
-def identity(n: int, dtype: DtypeLike = ...) -> ndarray: ...
+def identity(n: int, dtype: DtypeLike = ..., *, like: ArrayLike = ...) -> ndarray: ...
def allclose(
a: ArrayLike,
b: ArrayLike,
@@ -689,10 +820,8 @@ class ufunc:
axes: List[Any] = ...,
axis: int = ...,
keepdims: bool = ...,
- # TODO: make this precise when we can use Literal.
- casting: str = ...,
- # TODO: make this precise when we can use Literal.
- order: Optional[str] = ...,
+ casting: _Casting = ...,
+ order: _OrderKACF = ...,
dtype: DtypeLike = ...,
subok: bool = ...,
signature: Union[str, Tuple[str]] = ...,
@@ -871,7 +1000,6 @@ def find_common_type(
# Functions from np.core.fromnumeric
_Mode = Literal["raise", "wrap", "clip"]
-_Order = Literal["C", "F", "A"]
_PartitionKind = Literal["introselect"]
_SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"]
_Side = Literal["left", "right"]
@@ -896,9 +1024,9 @@ _Number = TypeVar('_Number', bound=number)
_NumberLike = Union[int, float, complex, number, bool_]
# An array-like object consisting of integers
-_Int = Union[int, integer]
-_Bool = Union[bool, bool_]
-_IntOrBool = Union[_Int, _Bool]
+_IntLike = Union[int, integer]
+_BoolLike = Union[bool, bool_]
+_IntOrBool = Union[_IntLike, _BoolLike]
_ArrayLikeIntNested = ArrayLike # TODO: wait for support for recursive types
_ArrayLikeBoolNested = ArrayLike # TODO: wait for support for recursive types
@@ -911,8 +1039,8 @@ _ArrayLikeIntOrBool = Union[
Sequence[_ArrayLikeBoolNested],
]
_ArrayLikeBool = Union[
- _Bool,
- Sequence[_Bool],
+ _BoolLike,
+ Sequence[_BoolLike],
ndarray
]
@@ -953,7 +1081,7 @@ def take(
out: Optional[ndarray] = ...,
mode: _Mode = ...,
) -> Union[_ScalarNumpy, ndarray]: ...
-def reshape(a: ArrayLike, newshape: _ShapeLike, order: _Order = ...) -> ndarray: ...
+def reshape(a: ArrayLike, newshape: _ShapeLike, order: _OrderACF = ...) -> ndarray: ...
@overload
def choose(
a: _ScalarIntOrBool,
@@ -1067,7 +1195,7 @@ def trace(
dtype: DtypeLike = ...,
out: Optional[ndarray] = ...,
) -> Union[number, ndarray]: ...
-def ravel(a: ArrayLike, order: _Order = ...) -> ndarray: ...
+def ravel(a: ArrayLike, order: _OrderKACF = ...) -> ndarray: ...
def nonzero(a: ArrayLike) -> Tuple[ndarray, ...]: ...
def shape(a: ArrayLike) -> _Shape: ...
def compress(
@@ -1237,3 +1365,114 @@ def amin(
initial: _NumberLike = ...,
where: _ArrayLikeBool = ...,
) -> Union[number, ndarray]: ...
+
+# TODO: `np.prod()``: For object arrays `initial` does not necessarily
+# have to be a numerical scalar.
+# The only requirement is that it is compatible
+# with the `.__mul__()` method(s) of the passed array's elements.
+
+# Note that the same situation holds for all wrappers around
+# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`).
+
+@overload
+def prod(
+ a: _Number,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+) -> _Number: ...
+@overload
+def prod(
+ a: ArrayLike,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+) -> number: ...
+@overload
+def prod(
+ a: ArrayLike,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: Optional[ndarray] = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+) -> Union[number, ndarray]: ...
+def cumprod(
+ a: ArrayLike,
+ axis: Optional[int] = ...,
+ dtype: DtypeLike = ...,
+ out: Optional[ndarray] = ...,
+) -> ndarray: ...
+def ndim(a: ArrayLike) -> int: ...
+def size(a: ArrayLike, axis: Optional[int] = ...) -> int: ...
+@overload
+def around(
+ a: _Number, decimals: int = ..., out: Optional[ndarray] = ...
+) -> _Number: ...
+@overload
+def around(
+ a: _NumberLike, decimals: int = ..., out: Optional[ndarray] = ...
+) -> number: ...
+@overload
+def around(
+ a: ArrayLike, decimals: int = ..., out: Optional[ndarray] = ...
+) -> ndarray: ...
+@overload
+def mean(
+ a: ArrayLike,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+) -> number: ...
+@overload
+def mean(
+ a: ArrayLike,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: Optional[ndarray] = ...,
+ keepdims: bool = ...,
+) -> Union[number, ndarray]: ...
+@overload
+def std(
+ a: ArrayLike,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ ddof: int = ...,
+ keepdims: Literal[False] = ...,
+) -> number: ...
+@overload
+def std(
+ a: ArrayLike,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: Optional[ndarray] = ...,
+ ddof: int = ...,
+ keepdims: bool = ...,
+) -> Union[number, ndarray]: ...
+@overload
+def var(
+ a: ArrayLike,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ ddof: int = ...,
+ keepdims: Literal[False] = ...,
+) -> number: ...
+@overload
+def var(
+ a: ArrayLike,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: Optional[ndarray] = ...,
+ ddof: int = ...,
+ keepdims: bool = ...,
+) -> Union[number, ndarray]: ...
diff --git a/numpy/char.pyi b/numpy/char.pyi
new file mode 100644
index 000000000..0e7342c0b
--- /dev/null
+++ b/numpy/char.pyi
@@ -0,0 +1,53 @@
+from typing import Any
+
+equal: Any
+not_equal: Any
+greater_equal: Any
+less_equal: Any
+greater: Any
+less: Any
+str_len: Any
+add: Any
+multiply: Any
+mod: Any
+capitalize: Any
+center: Any
+count: Any
+decode: Any
+encode: Any
+endswith: Any
+expandtabs: Any
+find: Any
+index: Any
+isalnum: Any
+isalpha: Any
+isdigit: Any
+islower: Any
+isspace: Any
+istitle: Any
+isupper: Any
+join: Any
+ljust: Any
+lower: Any
+lstrip: Any
+partition: Any
+replace: Any
+rfind: Any
+rindex: Any
+rjust: Any
+rpartition: Any
+rsplit: Any
+rstrip: Any
+split: Any
+splitlines: Any
+startswith: Any
+strip: Any
+swapcase: Any
+title: Any
+translate: Any
+upper: Any
+zfill: Any
+isnumeric: Any
+isdecimal: Any
+array: Any
+asarray: Any
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index c3b4374f4..879b3645d 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -12,6 +12,7 @@ NOTE: Many of the methods of ndarray have corresponding functions.
from numpy.core import numerictypes as _numerictypes
from numpy.core import dtype
from numpy.core.function_base import add_newdoc
+from numpy.core.overrides import array_function_like_doc
###############################################################################
#
@@ -786,7 +787,8 @@ add_newdoc('numpy.core', 'broadcast', ('reset',
add_newdoc('numpy.core.multiarray', 'array',
"""
- array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0)
+ array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0,
+ like=None)
Create an array.
@@ -829,6 +831,9 @@ add_newdoc('numpy.core.multiarray', 'array',
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -895,11 +900,14 @@ add_newdoc('numpy.core.multiarray', 'array',
matrix([[1, 2],
[3, 4]])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'empty',
"""
- empty(shape, dtype=float, order='C')
+ empty(shape, dtype=float, order='C', *, like=None)
Return a new array of given shape and type, without initializing entries.
@@ -914,6 +922,9 @@ add_newdoc('numpy.core.multiarray', 'empty',
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -946,7 +957,10 @@ add_newdoc('numpy.core.multiarray', 'empty',
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #uninitialized
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'scalar',
"""
@@ -964,7 +978,7 @@ add_newdoc('numpy.core.multiarray', 'scalar',
add_newdoc('numpy.core.multiarray', 'zeros',
"""
- zeros(shape, dtype=float, order='C')
+ zeros(shape, dtype=float, order='C', *, like=None)
Return a new array of given shape and type, filled with zeros.
@@ -979,6 +993,9 @@ add_newdoc('numpy.core.multiarray', 'zeros',
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1013,7 +1030,10 @@ add_newdoc('numpy.core.multiarray', 'zeros',
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
@@ -1025,7 +1045,7 @@ add_newdoc('numpy.core.multiarray', 'set_typeDict',
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
- fromstring(string, dtype=float, count=-1, sep='')
+ fromstring(string, dtype=float, count=-1, sep='', *, like=None)
A new 1-D array initialized from text data in a string.
@@ -1058,6 +1078,9 @@ add_newdoc('numpy.core.multiarray', 'fromstring',
text, the binary mode of `fromstring` will first encode it into
bytes using either utf-8 (python 3) or the default encoding
(python 2), neither of which produce sane results.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1081,7 +1104,10 @@ add_newdoc('numpy.core.multiarray', 'fromstring',
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'compare_chararrays',
"""
@@ -1122,7 +1148,7 @@ add_newdoc('numpy.core.multiarray', 'compare_chararrays',
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
- fromiter(iterable, dtype, count=-1)
+ fromiter(iterable, dtype, count=-1, *, like=None)
Create a new 1-dimensional array from an iterable object.
@@ -1135,6 +1161,9 @@ add_newdoc('numpy.core.multiarray', 'fromiter',
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1152,11 +1181,14 @@ add_newdoc('numpy.core.multiarray', 'fromiter',
>>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
- fromfile(file, dtype=float, count=-1, sep='', offset=0)
+ fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None)
Construct an array from data in a text or binary file.
@@ -1195,6 +1227,9 @@ add_newdoc('numpy.core.multiarray', 'fromfile',
Only permitted for binary files.
.. versionadded:: 1.17.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
See also
--------
@@ -1241,11 +1276,14 @@ add_newdoc('numpy.core.multiarray', 'fromfile',
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
- frombuffer(buffer, dtype=float, count=-1, offset=0)
+ frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None)
Interpret a buffer as a 1-dimensional array.
@@ -1259,6 +1297,9 @@ add_newdoc('numpy.core.multiarray', 'frombuffer',
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Notes
-----
@@ -1283,7 +1324,10 @@ add_newdoc('numpy.core.multiarray', 'frombuffer',
>>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
@@ -1293,7 +1337,7 @@ add_newdoc('numpy.core.multiarray', 'correlate',
add_newdoc('numpy.core.multiarray', 'arange',
"""
- arange([start,] stop[, step,], dtype=None)
+ arange([start,] stop[, step,], dtype=None, *, like=None)
Return evenly spaced values within a given interval.
@@ -1322,6 +1366,9 @@ add_newdoc('numpy.core.multiarray', 'arange',
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1350,7 +1397,10 @@ add_newdoc('numpy.core.multiarray', 'arange',
>>> np.arange(3,7,2)
array([3, 5])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
@@ -3223,7 +3273,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
- arr.newbyteorder(new_order='S')
+ arr.newbyteorder(new_order='S', /)
Return the array with the same data viewed with a different byte order.
@@ -4689,14 +4739,14 @@ add_newdoc('numpy.core', 'ufunc', ('signature',
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
- reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
+ reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
- Reduces `a`'s dimension by one, by applying ufunc along one axis.
+ Reduces `array`'s dimension by one, by applying ufunc along one axis.
- Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
- :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
+ Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
+ :math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
- ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
+ ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
@@ -4709,7 +4759,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
Parameters
----------
- a : array_like
+ array : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
@@ -4742,7 +4792,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
- the result will broadcast correctly against the original `arr`.
+ the result will broadcast correctly against the original `array`.
.. versionadded:: 1.7.0
initial : scalar, optional
@@ -4756,7 +4806,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
- of `a`, and selects elements to include in the reduction. Note
+ of `array`, and selects elements to include in the reduction. Note
that for ufuncs like ``minimum`` that do not have an identity
defined, one has to pass in also ``initial``.
@@ -4898,28 +4948,28 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate',
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
- reduceat(a, indices, axis=0, dtype=None, out=None)
+ reduceat(array, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
- ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
+ ``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
- ``indices[i+1] = a.shape[axis]``.
+ ``indices[i+1] = array.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
- simply ``a[indices[i]]``.
- * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
+ simply ``array[indices[i]]``.
+ * if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
- larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
+ larger than `array` (this happens if ``len(indices) > array.shape[axis]``).
Parameters
----------
- a : array_like
+ array : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
@@ -4949,14 +4999,15 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
-----
A descriptive example:
- If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
- ``ufunc.reduceat(a, indices)[::2]`` where `indices` is
+ If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as
+ ``ufunc.reduceat(array, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
- ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
+ ``indices = zeros(2 * len(array) - 1)``,
+ ``indices[1::2] = range(1, len(array))``.
- Don't be fooled by this attribute's name: `reduceat(a)` is not
- necessarily smaller than `a`.
+ Don't be fooled by this attribute's name: `reduceat(array)` is not
+ necessarily smaller than `array`.
Examples
--------
@@ -5005,7 +5056,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
add_newdoc('numpy.core', 'ufunc', ('outer',
r"""
- outer(A, B, **kwargs)
+ outer(A, B, /, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
@@ -5075,7 +5126,7 @@ add_newdoc('numpy.core', 'ufunc', ('outer',
add_newdoc('numpy.core', 'ufunc', ('at',
"""
- at(a, indices, b=None)
+ at(a, indices, b=None, /)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
@@ -5491,6 +5542,45 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""))
+add_newdoc('numpy.core.multiarray', 'dtype', ('metadata',
+ """
+ Either ``None`` or a readonly dictionary of metadata (mappingproxy).
+
+ The metadata field can be set using any dictionary at data-type
+ creation. NumPy currently has no uniform approach to propagating
+ metadata; although some array operations preserve it, there is no
+ guarantee that others will.
+
+ .. warning::
+
+ Although used in certain projects, this feature was long undocumented
+ and is not well supported. Some aspects of metadata propagation
+ are expected to change in the future.
+
+ Examples
+ --------
+
+ >>> dt = np.dtype(float, metadata={"key": "value"})
+ >>> dt.metadata["key"]
+ 'value'
+ >>> arr = np.array([1, 2, 3], dtype=dt)
+ >>> arr.dtype.metadata
+ mappingproxy({'key': 'value'})
+
+ Adding arrays with identical datatypes currently preserves the metadata:
+
+ >>> (arr + arr).dtype.metadata
+ mappingproxy({'key': 'value'})
+
+ But if the arrays have different dtype metadata, the metadata may be
+ dropped:
+
+ >>> dt2 = np.dtype(float, metadata={"key2": "value2"})
+ >>> arr2 = np.array([3, 2, 1], dtype=dt2)
+ >>> (arr + arr2).dtype.metadata is None
+ True # The metadata field is cleared so None is returned
+ """))
+
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
@@ -5647,7 +5737,7 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('type',
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
- newbyteorder(new_order='S')
+ newbyteorder(new_order='S', /)
Return a new dtype with a different byte order.
@@ -6023,7 +6113,7 @@ add_newdoc('numpy.core.numerictypes', 'generic',
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
- newbyteorder(new_order='S')
+ newbyteorder(new_order='S', /)
Return a new `dtype` with a different byte order.
diff --git a/numpy/core/_asarray.py b/numpy/core/_asarray.py
index 1b06c328f..a406308f3 100644
--- a/numpy/core/_asarray.py
+++ b/numpy/core/_asarray.py
@@ -3,7 +3,11 @@ Functions in the ``as*array`` family that promote array-likes into arrays.
`require` fits this category despite its name not matching this pattern.
"""
-from .overrides import set_module
+from .overrides import (
+ array_function_dispatch,
+ set_array_function_like_doc,
+ set_module,
+)
from .multiarray import array
@@ -11,8 +15,14 @@ __all__ = [
"asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "require",
]
+
+def _asarray_dispatcher(a, dtype=None, order=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def asarray(a, dtype=None, order=None):
+def asarray(a, dtype=None, order=None, *, like=None):
"""Convert the input to an array.
Parameters
@@ -30,6 +40,9 @@ def asarray(a, dtype=None, order=None):
'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
'K' (keep) preserve input order
Defaults to 'C'.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -83,11 +96,20 @@ def asarray(a, dtype=None, order=None):
True
"""
+ if like is not None:
+ return _asarray_with_like(a, dtype=dtype, order=order, like=like)
+
return array(a, dtype, copy=False, order=order)
+_asarray_with_like = array_function_dispatch(
+ _asarray_dispatcher
+)(asarray)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def asanyarray(a, dtype=None, order=None):
+def asanyarray(a, dtype=None, order=None, *, like=None):
"""Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
@@ -105,6 +127,9 @@ def asanyarray(a, dtype=None, order=None):
'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
'K' (keep) preserve input order
Defaults to 'C'.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -140,11 +165,24 @@ def asanyarray(a, dtype=None, order=None):
True
"""
+ if like is not None:
+ return _asanyarray_with_like(a, dtype=dtype, order=order, like=like)
+
return array(a, dtype, copy=False, order=order, subok=True)
+_asanyarray_with_like = array_function_dispatch(
+ _asarray_dispatcher
+)(asanyarray)
+
+
+def _asarray_contiguous_fortran_dispatcher(a, dtype=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def ascontiguousarray(a, dtype=None):
+def ascontiguousarray(a, dtype=None, *, like=None):
"""
Return a contiguous array (ndim >= 1) in memory (C order).
@@ -154,6 +192,9 @@ def ascontiguousarray(a, dtype=None):
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -181,11 +222,20 @@ def ascontiguousarray(a, dtype=None):
so it will not preserve 0-d arrays.
"""
+ if like is not None:
+ return _ascontiguousarray_with_like(a, dtype=dtype, like=like)
+
return array(a, dtype, copy=False, order='C', ndmin=1)
+_ascontiguousarray_with_like = array_function_dispatch(
+ _asarray_contiguous_fortran_dispatcher
+)(ascontiguousarray)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def asfortranarray(a, dtype=None):
+def asfortranarray(a, dtype=None, *, like=None):
"""
Return an array (ndim >= 1) laid out in Fortran order in memory.
@@ -195,6 +245,9 @@ def asfortranarray(a, dtype=None):
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -222,11 +275,24 @@ def asfortranarray(a, dtype=None):
so it will not preserve 0-d arrays.
"""
+ if like is not None:
+ return _asfortranarray_with_like(a, dtype=dtype, like=like)
+
return array(a, dtype, copy=False, order='F', ndmin=1)
+_asfortranarray_with_like = array_function_dispatch(
+ _asarray_contiguous_fortran_dispatcher
+)(asfortranarray)
+
+
+def _require_dispatcher(a, dtype=None, requirements=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def require(a, dtype=None, requirements=None):
+def require(a, dtype=None, requirements=None, *, like=None):
"""
Return an ndarray of the provided type that satisfies requirements.
@@ -250,6 +316,9 @@ def require(a, dtype=None, requirements=None):
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -293,6 +362,14 @@ def require(a, dtype=None, requirements=None):
UPDATEIFCOPY : False
"""
+ if like is not None:
+ return _require_with_like(
+ a,
+ dtype=dtype,
+ requirements=requirements,
+ like=like,
+ )
+
possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
'A': 'A', 'ALIGNED': 'A',
@@ -327,3 +404,8 @@ def require(a, dtype=None, requirements=None):
arr = arr.copy(order)
break
return arr
+
+
+_require_with_like = array_function_dispatch(
+ _require_dispatcher
+)(require)
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 5d9642ea8..ad1530419 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -1628,6 +1628,3 @@ def set_string_function(f, repr=True):
return multiarray.set_string_function(_default_array_str, 0)
else:
return multiarray.set_string_function(f, repr)
-
-set_string_function(_default_array_str, False)
-set_string_function(_default_array_repr, True)
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 86e28b104..dc5c2577a 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -8,12 +8,12 @@ sys.path.insert(0, os.path.dirname(__file__))
import ufunc_docstrings as docstrings
sys.path.pop(0)
-Zero = "PyInt_FromLong(0)"
-One = "PyInt_FromLong(1)"
+Zero = "PyLong_FromLong(0)"
+One = "PyLong_FromLong(1)"
True_ = "(Py_INCREF(Py_True), Py_True)"
False_ = "(Py_INCREF(Py_False), Py_False)"
None_ = object()
-AllOnes = "PyInt_FromLong(-1)"
+AllOnes = "PyLong_FromLong(-1)"
MinusInfinity = 'PyFloat_FromDouble(-NPY_INFINITY)'
ReorderableNone = "(Py_INCREF(Py_None), Py_None)"
@@ -1042,7 +1042,7 @@ def make_arrays(funcdict):
#ifndef NPY_DISABLE_OPTIMIZATION
#include "{dname}.dispatch.h"
#endif
- NPY_CPU_DISPATCH_CALL_XB({name}_functions[{k}] = {tname}_{name})
+ NPY_CPU_DISPATCH_CALL_XB({name}_functions[{k}] = {tname}_{name});
""").format(
dname=dname, name=name, tname=tname, k=k
))
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index b07def736..b1524b891 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -460,7 +460,7 @@ def repeat(a, repeats, axis=None):
--------
tile : Tile an array.
unique : Find the unique elements of an array.
-
+
Examples
--------
>>> np.repeat(3, 4)
@@ -2007,8 +2007,8 @@ def compress(condition, a, axis=None, out=None):
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
- np.extract: Equivalent method when working on 1-D arrays
- ufuncs-output-type
+ extract: Equivalent method when working on 1-D arrays
+ :ref:`ufuncs-output-type`
Examples
--------
@@ -2082,7 +2082,7 @@ def clip(a, a_min, a_max, out=None, **kwargs):
See Also
--------
- ufuncs-output-type
+ :ref:`ufuncs-output-type`
Examples
--------
@@ -2278,7 +2278,7 @@ def any(a, axis=None, out=None, keepdims=np._NoValue):
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
- See `ufuncs-output-type` for more details.
+ See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
@@ -2363,7 +2363,7 @@ def all(a, axis=None, out=None, keepdims=np._NoValue):
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
- will consist of 0.0's and 1.0's). See `ufuncs-output-type` for more
+ will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` for more
details.
keepdims : bool, optional
@@ -2442,7 +2442,7 @@ def cumsum(a, axis=None, dtype=None, out=None):
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
- but the type will be cast if necessary. See `ufuncs-output-type` for
+ but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
more details.
Returns
@@ -2613,7 +2613,7 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
- See `ufuncs-output-type` for more details.
+ See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
@@ -2738,7 +2738,7 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
- See `ufuncs-output-type` for more details.
+ See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
@@ -2948,7 +2948,7 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
See Also
--------
ndarray.prod : equivalent method
- ufuncs-output-type
+ :ref:`ufuncs-output-type`
Notes
-----
@@ -3044,7 +3044,7 @@ def cumprod(a, axis=None, dtype=None, out=None):
See Also
--------
- ufuncs-output-type
+ :ref:`ufuncs-output-type`
Notes
-----
@@ -3190,7 +3190,7 @@ def around(a, decimals=0, out=None):
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
- values will be cast if necessary. See `ufuncs-output-type` for more
+ values will be cast if necessary. See :ref:`ufuncs-output-type` for more
details.
Returns
@@ -3305,7 +3305,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
- See `ufuncs-output-type` for more details.
+ See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
@@ -3440,12 +3440,12 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
See Also
--------
var, mean, nanmean, nanstd, nanvar
- ufuncs-output-type
+ :ref:`ufuncs-output-type`
Notes
-----
The standard deviation is the square root of the average of the squared
- deviations from the mean, i.e., ``std = sqrt(mean(x))``, where
+ deviations from the mean, i.e., ``std = sqrt(mean(x))``, where
``x = abs(a - a.mean())**2``.
The average squared deviation is typically calculated as ``x.sum() / N``,
@@ -3566,7 +3566,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
See Also
--------
std, mean, nanmean, nanstd, nanvar
- ufuncs-output-type
+ :ref:`ufuncs-output-type`
Notes
-----
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index b2f17cfeb..8a1fee99b 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -165,7 +165,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
if axis != 0:
y = _nx.moveaxis(y, 0, axis)
-
+
if _nx.issubdtype(dtype, _nx.integer):
_nx.floor(y, out=y)
@@ -207,7 +207,7 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
- base : float, optional
+ base : array_like, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi
new file mode 100644
index 000000000..c6ebbd5f5
--- /dev/null
+++ b/numpy/core/function_base.pyi
@@ -0,0 +1,56 @@
+import sys
+from typing import overload, Tuple, Union, Sequence, Any
+
+from numpy import ndarray, inexact, _NumberLike
+from numpy.typing import ArrayLike, DtypeLike, _SupportsArray
+
+if sys.version_info >= (3, 8):
+ from typing import SupportsIndex, Literal
+else:
+ from typing_extensions import Literal, Protocol
+
+ class SupportsIndex(Protocol):
+ def __index__(self) -> int: ...
+
+# TODO: wait for support for recursive types
+_ArrayLikeNested = Sequence[Sequence[Any]]
+_ArrayLikeNumber = Union[
+ _NumberLike, Sequence[_NumberLike], ndarray, _SupportsArray, _ArrayLikeNested
+]
+@overload
+def linspace(
+ start: _ArrayLikeNumber,
+ stop: _ArrayLikeNumber,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: Literal[False] = ...,
+ dtype: DtypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> ndarray: ...
+@overload
+def linspace(
+ start: _ArrayLikeNumber,
+ stop: _ArrayLikeNumber,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: Literal[True] = ...,
+ dtype: DtypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> Tuple[ndarray, inexact]: ...
+def logspace(
+ start: _ArrayLikeNumber,
+ stop: _ArrayLikeNumber,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeNumber = ...,
+ dtype: DtypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> ndarray: ...
+def geomspace(
+ start: _ArrayLikeNumber,
+ stop: _ArrayLikeNumber,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: DtypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> ndarray: ...
diff --git a/numpy/core/include/numpy/arrayscalars.h b/numpy/core/include/numpy/arrayscalars.h
index 6dce88df3..b282a2cd4 100644
--- a/numpy/core/include/numpy/arrayscalars.h
+++ b/numpy/core/include/numpy/arrayscalars.h
@@ -134,8 +134,7 @@ typedef struct {
char obval;
} PyScalarObject;
-#define PyStringScalarObject PyStringObject
-#define PyStringScalarObject PyStringObject
+#define PyStringScalarObject PyBytesObject
typedef struct {
/* note that the PyObject_HEAD macro lives right here */
PyUnicodeObject base;
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index bbcf468c1..6eca4afdb 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -1759,8 +1759,8 @@ typedef struct {
} npy_stride_sort_item;
/************************************************************
- * This is the form of the struct that's returned pointed by the
- * PyCObject attribute of an array __array_struct__. See
+ * This is the form of the struct that's stored in the
+ * PyCapsule returned by an array's __array_struct__ attribute. See
* https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full
* documentation.
************************************************************/
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 509e23a51..4dbf9d84e 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -24,7 +24,6 @@
#define _NPY_CPUARCH_H_
#include "numpyconfig.h"
-#include <string.h> /* for memcpy */
#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
/*
@@ -111,8 +110,6 @@
information about your platform (OS, CPU and compiler)
#endif
-#define NPY_COPY_PYOBJECT_PTR(dst, src) memcpy(dst, src, sizeof(PyObject *))
-
#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64))
#define NPY_CPU_HAVE_UNALIGNED_ACCESS 1
#else
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index 10325050d..225c9554c 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -141,9 +141,9 @@ def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
-def concatenate(arrays, axis=None, out=None):
+def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
"""
- concatenate((a1, a2, ...), axis=0, out=None)
+ concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind")
Join a sequence of arrays along an existing axis.
@@ -159,6 +159,16 @@ def concatenate(arrays, axis=None, out=None):
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
+ dtype : str or dtype
+ If provided, the destination array will have this dtype. Cannot be
+ provided together with `out`.
+
+ ..versionadded:: 1.20.0
+
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Defaults to 'same_kind'.
+
+ ..versionadded:: 1.20.0
Returns
-------
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 84066dd30..a023bf0da 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -21,7 +21,7 @@ from .multiarray import (
from . import overrides
from . import umath
from . import shape_base
-from .overrides import set_module
+from .overrides import set_array_function_like_doc, set_module
from .umath import (multiply, invert, sin, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
@@ -141,8 +141,13 @@ def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
return res
+def _ones_dispatcher(shape, dtype=None, order=None, *, like=None):
+ return(like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def ones(shape, dtype=None, order='C'):
+def ones(shape, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with ones.
@@ -157,6 +162,9 @@ def ones(shape, dtype=None, order='C'):
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -189,11 +197,19 @@ def ones(shape, dtype=None, order='C'):
[1., 1.]])
"""
+ if like is not None:
+ return _ones_with_like(shape, dtype=dtype, order=order, like=like)
+
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
+_ones_with_like = array_function_dispatch(
+ _ones_dispatcher
+)(ones)
+
+
def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@@ -265,8 +281,13 @@ def ones_like(a, dtype=None, order='K', subok=True, shape=None):
return res
+def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None):
+ return(like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def full(shape, fill_value, dtype=None, order='C'):
+def full(shape, fill_value, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with `fill_value`.
@@ -282,6 +303,9 @@ def full(shape, fill_value, dtype=None, order='C'):
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -309,6 +333,9 @@ def full(shape, fill_value, dtype=None, order='C'):
[1, 2]])
"""
+ if like is not None:
+ return _full_with_like(shape, fill_value, dtype=dtype, order=order, like=like)
+
if dtype is None:
fill_value = asarray(fill_value)
dtype = fill_value.dtype
@@ -317,6 +344,11 @@ def full(shape, fill_value, dtype=None, order='C'):
return a
+_full_with_like = array_function_dispatch(
+ _full_dispatcher
+)(full)
+
+
def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None):
return (a,)
@@ -1754,8 +1786,13 @@ def indices(dimensions, dtype=int, sparse=False):
return res
+def _fromfunction_dispatcher(function, shape, *, dtype=None, like=None, **kwargs):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def fromfunction(function, shape, *, dtype=float, **kwargs):
+def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
"""
Construct an array by executing a function over each coordinate.
@@ -1776,6 +1813,9 @@ def fromfunction(function, shape, *, dtype=float, **kwargs):
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1806,10 +1846,18 @@ def fromfunction(function, shape, *, dtype=float, **kwargs):
[2, 3, 4]])
"""
+ if like is not None:
+ return _fromfunction_with_like(function, shape, dtype=dtype, like=like, **kwargs)
+
args = indices(shape, dtype=dtype)
return function(*args, **kwargs)
+_fromfunction_with_like = array_function_dispatch(
+ _fromfunction_dispatcher
+)(fromfunction)
+
+
def _frombuffer(buf, dtype, shape, order):
return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
@@ -2082,8 +2130,13 @@ def _maketup(descr, val):
return tuple(res)
+def _identity_dispatcher(n, dtype=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def identity(n, dtype=None):
+def identity(n, dtype=None, *, like=None):
"""
Return the identity array.
@@ -2096,6 +2149,9 @@ def identity(n, dtype=None):
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -2111,8 +2167,16 @@ def identity(n, dtype=None):
[0., 0., 1.]])
"""
+ if like is not None:
+ return _identity_with_like(n, dtype=dtype, like=like)
+
from numpy import eye
- return eye(n, dtype=dtype)
+ return eye(n, dtype=dtype, like=like)
+
+
+_identity_with_like = array_function_dispatch(
+ _identity_dispatcher
+)(identity)
def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index 816b11293..c2b5fb7fa 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -12,6 +12,27 @@ from numpy.compat._inspect import getargspec
ARRAY_FUNCTION_ENABLED = bool(
int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1)))
+array_function_like_doc = (
+ """like : array_like
+ Reference object to allow the creation of arrays which are not
+ NumPy arrays. If an array-like passed in as ``like`` supports
+ the ``__array_function__`` protocol, the result will be defined
+ by it. In this case, it ensures the creation of an array object
+ compatible with that passed in via this argument.
+
+ .. note::
+ The ``like`` keyword is an experimental feature pending on
+ acceptance of :ref:`NEP 35 <NEP35>`."""
+)
+
+def set_array_function_like_doc(public_api):
+ if public_api.__doc__ is not None:
+ public_api.__doc__ = public_api.__doc__.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ )
+ return public_api
+
add_docstring(
implement_array_function,
diff --git a/numpy/core/records.py b/numpy/core/records.py
index e95be0e3f..c2f6c6965 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -374,7 +374,7 @@ class recarray(ndarray):
See Also
--------
- rec.fromrecords : Construct a record array from data.
+ core.records.fromrecords : Construct a record array from data.
record : fundamental data-type for `recarray`.
format_parser : determine a data-type from formats, names, titles.
@@ -630,7 +630,7 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None,
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
-
+
>>> x1 = np.array([1, 2, 3, 4])
>>> x2 = np.array(['a', 'dd', 'xyz', '12'])
>>> x3 = np.array([1.1, 2, 3,4])
@@ -911,7 +911,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
shape = (shape,)
if hasattr(fd, 'readinto'):
- # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase interface.
+ # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase interface.
# Example of fd: gzip, BytesIO, BufferedReader
# file already opened
ctx = contextlib_nullcontext(fd)
@@ -958,7 +958,7 @@ def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
"""
Construct a record array from a wide-variety of objects.
- A general-purpose record array constructor that dispatches to the
+ A general-purpose record array constructor that dispatches to the
appropriate `recarray` creation function based on the inputs (see Notes).
Parameters
@@ -996,7 +996,7 @@ def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
`obj` is a string, then call the `fromstring` constructor. If `obj` is a
list or a tuple, then if the first object is an `~numpy.ndarray`, call
`fromarrays`, otherwise call `fromrecords`. If `obj` is a
- `~numpy.recarray`, then make a copy of the data in the recarray
+ `~numpy.recarray`, then make a copy of the data in the recarray
(if ``copy=True``) and use the new formats, names, and titles. If `obj`
is a file, then call `fromfile`. Finally, if obj is an `ndarray`, then
return ``obj.view(recarray)``, making a copy of the data if ``copy=True``.
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index aede12080..92dcacede 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -9,7 +9,7 @@ from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
-from distutils.sysconfig import get_config_var
+from sysconfig import get_config_var
from numpy.compat import npy_load_module
from setup_common import * # noqa: F403
@@ -687,26 +687,6 @@ def configuration(parent_package='',top_path=None):
subst_dict)
#######################################################################
- # npysort library #
- #######################################################################
-
- # This library is created for the build but it is not installed
- npysort_sources = [join('src', 'common', 'npy_sort.h.src'),
- join('src', 'npysort', 'quicksort.c.src'),
- join('src', 'npysort', 'mergesort.c.src'),
- join('src', 'npysort', 'timsort.c.src'),
- join('src', 'npysort', 'heapsort.c.src'),
- join('src', 'npysort', 'radixsort.c.src'),
- join('src', 'common', 'npy_partition.h.src'),
- join('src', 'npysort', 'selection.c.src'),
- join('src', 'common', 'npy_binsearch.h.src'),
- join('src', 'npysort', 'binsearch.c.src'),
- ]
- config.add_library('npysort',
- sources=npysort_sources,
- include_dirs=[])
-
- #######################################################################
# multiarray_tests module #
#######################################################################
@@ -790,6 +770,8 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'dtypemeta.h'),
join('src', 'multiarray', 'dragon4.h'),
+ join('src', 'multiarray', 'einsum_debug.h'),
+ join('src', 'multiarray', 'einsum_sumprod.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
@@ -825,7 +807,7 @@ def configuration(parent_package='',top_path=None):
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
# add library sources as distuils does not consider libraries
# dependencies
- ] + npysort_sources + npymath_sources
+ ] + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'abstractdtypes.c'),
@@ -853,6 +835,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'dragon4.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
+ join('src', 'multiarray', 'einsum_sumprod.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
@@ -877,6 +860,16 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'typeinfo.c'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'vdot.c'),
+ join('src', 'common', 'npy_sort.h.src'),
+ join('src', 'npysort', 'quicksort.c.src'),
+ join('src', 'npysort', 'mergesort.c.src'),
+ join('src', 'npysort', 'timsort.c.src'),
+ join('src', 'npysort', 'heapsort.c.src'),
+ join('src', 'npysort', 'radixsort.c.src'),
+ join('src', 'common', 'npy_partition.h.src'),
+ join('src', 'npysort', 'selection.c.src'),
+ join('src', 'common', 'npy_binsearch.h.src'),
+ join('src', 'npysort', 'binsearch.c.src'),
]
#######################################################################
@@ -927,7 +920,7 @@ def configuration(parent_package='',top_path=None):
config.add_extension('_multiarray_umath',
sources=multiarray_src + umath_src +
- npymath_sources + common_src +
+ common_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
@@ -938,7 +931,7 @@ def configuration(parent_package='',top_path=None):
],
depends=deps + multiarray_deps + umath_deps +
common_deps,
- libraries=['npymath', 'npysort'],
+ libraries=['npymath'],
extra_info=extra_info)
#######################################################################
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 7a76bbf9d..e4dc30d4c 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -539,7 +539,8 @@ def _accumulate(values):
def _concatenate_shapes(shapes, axis):
"""Given array shapes, return the resulting shape and slices prefixes.
- These help in nested concatation.
+ These help in nested concatenation.
+
Returns
-------
shape: tuple of int
diff --git a/numpy/core/src/common/array_assign.c b/numpy/core/src/common/array_assign.c
index d626d1260..67abcae24 100644
--- a/numpy/core/src/common/array_assign.c
+++ b/numpy/core/src/common/array_assign.c
@@ -14,7 +14,6 @@
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#include <numpy/ndarraytypes.h>
-
#include "npy_config.h"
#include "npy_pycompat.h"
@@ -67,12 +66,12 @@ broadcast_strides(int ndim, npy_intp const *shape,
broadcast_error: {
PyObject *errmsg;
- errmsg = PyUString_FromFormat("could not broadcast %s from shape ",
+ errmsg = PyUnicode_FromFormat("could not broadcast %s from shape ",
strides_name);
PyUString_ConcatAndDel(&errmsg,
build_shape_string(strides_ndim, strides_shape));
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" into shape "));
+ PyUnicode_FromString(" into shape "));
PyUString_ConcatAndDel(&errmsg,
build_shape_string(ndim, shape));
PyErr_SetObject(PyExc_ValueError, errmsg);
diff --git a/numpy/core/src/common/npy_binsearch.h.src b/numpy/core/src/common/npy_binsearch.h.src
index ce3b34b0e..052c44482 100644
--- a/numpy/core/src/common/npy_binsearch.h.src
+++ b/numpy/core/src/common/npy_binsearch.h.src
@@ -40,12 +40,12 @@ typedef struct {
* cfloat, cdouble, clongdouble, datetime, timedelta#
*/
-NPY_VISIBILITY_HIDDEN void
+NPY_NO_EXPORT void
binsearch_@side@_@suff@(const char *arr, const char *key, char *ret,
npy_intp arr_len, npy_intp key_len,
npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
PyArrayObject *unused);
-NPY_VISIBILITY_HIDDEN int
+NPY_NO_EXPORT int
argbinsearch_@side@_@suff@(const char *arr, const char *key,
const char *sort, char *ret,
npy_intp arr_len, npy_intp key_len,
@@ -54,12 +54,12 @@ argbinsearch_@side@_@suff@(const char *arr, const char *key,
PyArrayObject *unused);
/**end repeat1**/
-NPY_VISIBILITY_HIDDEN void
+NPY_NO_EXPORT void
npy_binsearch_@side@(const char *arr, const char *key, char *ret,
npy_intp arr_len, npy_intp key_len,
npy_intp arr_str, npy_intp key_str,
npy_intp ret_str, PyArrayObject *cmp);
-NPY_VISIBILITY_HIDDEN int
+NPY_NO_EXPORT int
npy_argbinsearch_@side@(const char *arr, const char *key,
const char *sort, char *ret,
npy_intp arr_len, npy_intp key_len,
diff --git a/numpy/core/src/common/npy_cblas.h b/numpy/core/src/common/npy_cblas.h
index 97308238a..072993ec2 100644
--- a/numpy/core/src/common/npy_cblas.h
+++ b/numpy/core/src/common/npy_cblas.h
@@ -47,8 +47,10 @@ enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};
#ifdef HAVE_BLAS_ILP64
#define CBLAS_INT npy_int64
+#define CBLAS_INT_MAX NPY_MAX_INT64
#else
#define CBLAS_INT int
+#define CBLAS_INT_MAX INT_MAX
#endif
#define BLASNAME(name) CBLAS_FUNC(name)
@@ -59,6 +61,39 @@ enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};
#undef BLASINT
#undef BLASNAME
+
+/*
+ * Convert NumPy stride to BLAS stride. Returns 0 if conversion cannot be done
+ * (BLAS won't handle negative or zero strides the way we want).
+ */
+static NPY_INLINE CBLAS_INT
+blas_stride(npy_intp stride, unsigned itemsize)
+{
+ /*
+ * Should probably check pointer alignment also, but this may cause
+ * problems if we require complex to be 16 byte aligned.
+ */
+ if (stride > 0 && (stride % itemsize) == 0) {
+ stride /= itemsize;
+ if (stride <= CBLAS_INT_MAX) {
+ return stride;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Define a chunksize for CBLAS.
+ *
+ * The chunksize is the greatest power of two less than CBLAS_INT_MAX.
+ */
+#if NPY_MAX_INTP > CBLAS_INT_MAX
+# define NPY_CBLAS_CHUNK (CBLAS_INT_MAX / 2 + 1)
+#else
+# define NPY_CBLAS_CHUNK NPY_MAX_INTP
+#endif
+
+
#ifdef __cplusplus
}
#endif
diff --git a/numpy/core/src/common/npy_cpu_dispatch.h b/numpy/core/src/common/npy_cpu_dispatch.h
index 846d1ebb9..274520852 100644
--- a/numpy/core/src/common/npy_cpu_dispatch.h
+++ b/numpy/core/src/common/npy_cpu_dispatch.h
@@ -217,44 +217,49 @@
* func_type the_callee(const int *src, int *dst, func_type *cb)
* {
* // direct call
- * NPY_CPU_DISPATCH_CALL(dispatch_me, (src, dst))
+ * NPY_CPU_DISPATCH_CALL(dispatch_me, (src, dst));
* // assign the pointer
- * NPY_CPU_DISPATCH_CALL(*cb = dispatch_me)
+ * *cb = NPY_CPU_DISPATCH_CALL(dispatch_me);
+ * // or
+ * NPY_CPU_DISPATCH_CALL(*cb = dispatch_me);
* // return the pointer
- * NPY_CPU_DISPATCH_CALL(return dispatch_me)
+ * return NPY_CPU_DISPATCH_CALL(dispatch_me);
* }
*/
#define NPY_CPU_DISPATCH_CALL(...) \
- if (0) {/*DUMMY*/} \
NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_CB_, __VA_ARGS__) \
NPY__CPU_DISPATCH_BASELINE_CALL(NPY_CPU_DISPATCH_CALL_BASE_CB_, __VA_ARGS__)
// Preprocessor callbacks
#define NPY_CPU_DISPATCH_CALL_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \
- else if (TESTED_FEATURES) { NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__; }
+ (TESTED_FEATURES) ? (NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) :
#define NPY_CPU_DISPATCH_CALL_BASE_CB_(LEFT, ...) \
- else { LEFT __VA_ARGS__; }
+ (LEFT __VA_ARGS__)
/**
* Macro NPY_CPU_DISPATCH_CALL_XB(LEFT, ...)
*
- * Same as `NPY_CPU_DISPATCH_DECLARE` but exclude the baseline declration even
- * if it was provided within the configration statments.
+ * Same as `NPY_CPU_DISPATCH_DECLARE` but exclude the baseline declaration even
+ * if it was provided within the configration statements.
+ * Returns void.
*/
+#define NPY_CPU_DISPATCH_CALL_XB_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \
+ (TESTED_FEATURES) ? (void) (NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) :
#define NPY_CPU_DISPATCH_CALL_XB(...) \
- if (0) {/*DUMMY*/} \
- NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_CB_, __VA_ARGS__)
+ NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_XB_CB_, __VA_ARGS__) \
+ ((void) 0 /* discarded expression value */)
/**
* Macro NPY_CPU_DISPATCH_CALL_ALL(LEFT, ...)
*
* Same as `NPY_CPU_DISPATCH_CALL` but dispatching all the required optimizations for
* the exported functions and variables instead of highest interested one.
+ * Returns void.
*/
#define NPY_CPU_DISPATCH_CALL_ALL(...) \
- NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_ALL_CB_, __VA_ARGS__) \
- NPY__CPU_DISPATCH_BASELINE_CALL(NPY_CPU_DISPATCH_CALL_ALL_BASE_CB_, __VA_ARGS__)
+ (NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_ALL_CB_, __VA_ARGS__) \
+ NPY__CPU_DISPATCH_BASELINE_CALL(NPY_CPU_DISPATCH_CALL_ALL_BASE_CB_, __VA_ARGS__))
// Preprocessor callbacks
#define NPY_CPU_DISPATCH_CALL_ALL_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \
- if (TESTED_FEATURES) { NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__; }
+ ((TESTED_FEATURES) ? (NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) : (void) 0),
#define NPY_CPU_DISPATCH_CALL_ALL_BASE_CB_(LEFT, ...) \
- { LEFT __VA_ARGS__; }
+ ( LEFT __VA_ARGS__ )
#endif // NPY_CPU_DISPATCH_H_
diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src
index dfcf98c74..69bbc83a2 100644
--- a/numpy/core/src/common/npy_cpu_features.c.src
+++ b/numpy/core/src/common/npy_cpu_features.c.src
@@ -19,11 +19,11 @@ npy__cpu_init_features(void);
* Multiple features can be present, and separated by space, comma, or tab.
* Raises an error if parsing fails or if the feature was not enabled
*/
-static void
+static int
npy__cpu_try_disable_env(void);
/* Ensure the build's CPU baseline features are supported at runtime */
-static void
+static int
npy__cpu_validate_baseline(void);
/******************** Public Definitions *********************/
@@ -40,11 +40,12 @@ NPY_VISIBILITY_HIDDEN int
npy_cpu_init(void)
{
npy__cpu_init_features();
- npy__cpu_validate_baseline();
- npy__cpu_try_disable_env();
-
- if (PyErr_Occurred())
+ if (npy__cpu_validate_baseline() < 0) {
+ return -1;
+ }
+ if (npy__cpu_try_disable_env() < 0) {
return -1;
+ }
return 0;
}
@@ -142,7 +143,7 @@ npy__cpu_dispatch_fid(const char *feature)
return 0;
}
-static void
+static int
npy__cpu_validate_baseline(void)
{
#if !defined(NPY_DISABLE_OPTIMIZATION) && NPY_WITH_CPU_BASELINE_N > 0
@@ -165,16 +166,18 @@ npy__cpu_validate_baseline(void)
"(" NPY_WITH_CPU_BASELINE ") but your machine doesn't support:\n(%s).",
baseline_failure
);
+ return -1;
}
#endif
+ return 0;
}
-static void
+static int
npy__cpu_try_disable_env(void)
{
char *disenv = getenv("NPY_DISABLE_CPU_FEATURES");
if (disenv == NULL || disenv[0] == 0) {
- return;
+ return 0;
}
#define NPY__CPU_ENV_ERR_HEAD \
"During parsing environment variable 'NPY_DISABLE_CPU_FEATURES':\n"
@@ -187,7 +190,7 @@ npy__cpu_try_disable_env(void)
"Length of environment variable 'NPY_DISABLE_CPU_FEATURES' is %d, only %d accepted",
var_len, NPY__MAX_VAR_LEN - 1
);
- return;
+ return -1;
}
char disable_features[NPY__MAX_VAR_LEN];
memcpy(disable_features, disenv, var_len);
@@ -210,7 +213,7 @@ npy__cpu_try_disable_env(void)
"(" NPY_WITH_CPU_BASELINE ").",
feature
);
- break;
+ return -1;
}
// check if the feature is part of dispatched features
int feature_id = npy__cpu_dispatch_fid(feature);
@@ -236,36 +239,43 @@ npy__cpu_try_disable_env(void)
*nexist_cur = '\0';
if (nexist[0] != '\0') {
*(nexist_cur-1) = '\0'; // trim the last space
- PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
- NPY__CPU_ENV_ERR_HEAD
- "You cannot disable CPU features (%s), since "
- "they are not part of the dispatched optimizations\n"
- "(" NPY_WITH_CPU_DISPATCH ").",
- nexist
- );
+ if (PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
+ NPY__CPU_ENV_ERR_HEAD
+ "You cannot disable CPU features (%s), since "
+ "they are not part of the dispatched optimizations\n"
+ "(" NPY_WITH_CPU_DISPATCH ").",
+ nexist
+ ) < 0) {
+ return -1;
+ }
}
*notsupp_cur = '\0';
if (notsupp[0] != '\0') {
*(notsupp_cur-1) = '\0'; // trim the last space
- PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
- NPY__CPU_ENV_ERR_HEAD
- "You cannot disable CPU features (%s), since "
- "they are not supported by your machine.",
- notsupp
- );
+ if (PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
+ NPY__CPU_ENV_ERR_HEAD
+ "You cannot disable CPU features (%s), since "
+ "they are not supported by your machine.",
+ notsupp
+ ) < 0) {
+ return -1;
+ }
}
#else
- PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
- NPY__CPU_ENV_ERR_HEAD
- "You cannot use environment variable 'NPY_DISABLE_CPU_FEATURES', since "
- #ifdef NPY_DISABLE_OPTIMIZATION
- "the NumPy library was compiled with optimization disabled."
- #else
- "the NumPy library was compiled without any dispatched optimizations."
- #endif
- );
+ if (PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
+ NPY__CPU_ENV_ERR_HEAD
+ "You cannot use environment variable 'NPY_DISABLE_CPU_FEATURES', since "
+ #ifdef NPY_DISABLE_OPTIMIZATION
+ "the NumPy library was compiled with optimization disabled."
+ #else
+ "the NumPy library was compiled without any dispatched optimizations."
+ #endif
+ ) < 0) {
+ return -1;
+ }
#endif
+ return 0;
}
/****************************************************************
diff --git a/numpy/core/src/common/npy_partition.h.src b/numpy/core/src/common/npy_partition.h.src
index 97dc2536b..72c2095f1 100644
--- a/numpy/core/src/common/npy_partition.h.src
+++ b/numpy/core/src/common/npy_partition.h.src
@@ -42,12 +42,12 @@
* npy_cdouble, npy_clongdouble#
*/
-NPY_VISIBILITY_HIDDEN int introselect_@suff@(@type@ *v, npy_intp num,
+NPY_NO_EXPORT int introselect_@suff@(@type@ *v, npy_intp num,
npy_intp kth,
npy_intp * pivots,
npy_intp * npiv,
void *NOT_USED);
-NPY_VISIBILITY_HIDDEN int aintroselect_@suff@(@type@ *v, npy_intp* tosort, npy_intp num,
+NPY_NO_EXPORT int aintroselect_@suff@(@type@ *v, npy_intp* tosort, npy_intp num,
npy_intp kth,
npy_intp * pivots,
npy_intp * npiv,
diff --git a/numpy/core/src/common/npy_sort.h.src b/numpy/core/src/common/npy_sort.h.src
index 16a105499..ddbde0c9b 100644
--- a/numpy/core/src/common/npy_sort.h.src
+++ b/numpy/core/src/common/npy_sort.h.src
@@ -33,14 +33,14 @@ static NPY_INLINE int npy_get_msb(npy_uintp unum)
* cfloat, cdouble, clongdouble, datetime, timedelta#
*/
-int quicksort_@suff@(void *vec, npy_intp cnt, void *null);
-int heapsort_@suff@(void *vec, npy_intp cnt, void *null);
-int mergesort_@suff@(void *vec, npy_intp cnt, void *null);
-int timsort_@suff@(void *vec, npy_intp cnt, void *null);
-int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+NPY_NO_EXPORT int quicksort_@suff@(void *vec, npy_intp cnt, void *null);
+NPY_NO_EXPORT int heapsort_@suff@(void *vec, npy_intp cnt, void *null);
+NPY_NO_EXPORT int mergesort_@suff@(void *vec, npy_intp cnt, void *null);
+NPY_NO_EXPORT int timsort_@suff@(void *vec, npy_intp cnt, void *null);
+NPY_NO_EXPORT int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+NPY_NO_EXPORT int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+NPY_NO_EXPORT int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+NPY_NO_EXPORT int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
/**end repeat**/
@@ -50,8 +50,8 @@ int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
* longlong, ulonglong#
*/
-int radixsort_@suff@(void *vec, npy_intp cnt, void *null);
-int aradixsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+NPY_NO_EXPORT int radixsort_@suff@(void *vec, npy_intp cnt, void *null);
+NPY_NO_EXPORT int aradixsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
/**end repeat**/
@@ -69,14 +69,14 @@ int aradixsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
* #suff = string, unicode#
*/
-int quicksort_@suff@(void *vec, npy_intp cnt, void *arr);
-int heapsort_@suff@(void *vec, npy_intp cnt, void *arr);
-int mergesort_@suff@(void *vec, npy_intp cnt, void *arr);
-int timsort_@suff@(void *vec, npy_intp cnt, void *arr);
-int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int quicksort_@suff@(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int heapsort_@suff@(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int mergesort_@suff@(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int timsort_@suff@(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
/**end repeat**/
@@ -88,13 +88,13 @@ int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
*/
-int npy_quicksort(void *vec, npy_intp cnt, void *arr);
-int npy_heapsort(void *vec, npy_intp cnt, void *arr);
-int npy_mergesort(void *vec, npy_intp cnt, void *arr);
-int npy_timsort(void *vec, npy_intp cnt, void *arr);
-int npy_aquicksort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_quicksort(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_heapsort(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_mergesort(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_timsort(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_aquicksort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
#endif
diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h
index 9d8b4ab5e..4af9e4d17 100644
--- a/numpy/core/src/common/simd/avx2/arithmetic.h
+++ b/numpy/core/src/common/simd/avx2/arithmetic.h
@@ -72,4 +72,48 @@
#define npyv_div_f32 _mm256_div_ps
#define npyv_div_f64 _mm256_div_pd
+/***************************
+ * FUSED
+ ***************************/
+#ifdef NPY_HAVE_FMA3
+ // multiply and add, a*b + c
+ #define npyv_muladd_f32 _mm256_fmadd_ps
+ #define npyv_muladd_f64 _mm256_fmadd_pd
+ // multiply and subtract, a*b - c
+ #define npyv_mulsub_f32 _mm256_fmsub_ps
+ #define npyv_mulsub_f64 _mm256_fmsub_pd
+ // negate multiply and add, -(a*b) + c
+ #define npyv_nmuladd_f32 _mm256_fnmadd_ps
+ #define npyv_nmuladd_f64 _mm256_fnmadd_pd
+ // negate multiply and subtract, -(a*b) - c
+ #define npyv_nmulsub_f32 _mm256_fnmsub_ps
+ #define npyv_nmulsub_f64 _mm256_fnmsub_pd
+#else
+ // multiply and add, a*b + c
+ NPY_FINLINE npyv_f32 npyv_muladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_add_f32(npyv_mul_f32(a, b), c); }
+ NPY_FINLINE npyv_f64 npyv_muladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_add_f64(npyv_mul_f64(a, b), c); }
+ // multiply and subtract, a*b - c
+ NPY_FINLINE npyv_f32 npyv_mulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_sub_f32(npyv_mul_f32(a, b), c); }
+ NPY_FINLINE npyv_f64 npyv_mulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_sub_f64(npyv_mul_f64(a, b), c); }
+ // negate multiply and add, -(a*b) + c
+ NPY_FINLINE npyv_f32 npyv_nmuladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_sub_f32(c, npyv_mul_f32(a, b)); }
+ NPY_FINLINE npyv_f64 npyv_nmuladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_sub_f64(c, npyv_mul_f64(a, b)); }
+ // negate multiply and subtract, -(a*b) - c
+ NPY_FINLINE npyv_f32 npyv_nmulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ {
+ npyv_f32 neg_a = npyv_xor_f32(a, npyv_setall_f32(-0.0f));
+ return npyv_sub_f32(npyv_mul_f32(neg_a, b), c);
+ }
+ NPY_FINLINE npyv_f64 npyv_nmulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ {
+ npyv_f64 neg_a = npyv_xor_f64(a, npyv_setall_f64(-0.0));
+ return npyv_sub_f64(npyv_mul_f64(neg_a, b), c);
+ }
+#endif // !NPY_HAVE_FMA3
#endif // _NPY_SIMD_AVX2_ARITHMETIC_H
diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h
index fcaef0efd..824ae818e 100644
--- a/numpy/core/src/common/simd/avx512/arithmetic.h
+++ b/numpy/core/src/common/simd/avx512/arithmetic.h
@@ -113,4 +113,20 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b)
#define npyv_div_f32 _mm512_div_ps
#define npyv_div_f64 _mm512_div_pd
+/***************************
+ * FUSED
+ ***************************/
+// multiply and add, a*b + c
+#define npyv_muladd_f32 _mm512_fmadd_ps
+#define npyv_muladd_f64 _mm512_fmadd_pd
+// multiply and subtract, a*b - c
+#define npyv_mulsub_f32 _mm512_fmsub_ps
+#define npyv_mulsub_f64 _mm512_fmsub_pd
+// negate multiply and add, -(a*b) + c
+#define npyv_nmuladd_f32 _mm512_fnmadd_ps
+#define npyv_nmuladd_f64 _mm512_fnmadd_pd
+// negate multiply and subtract, -(a*b) - c
+#define npyv_nmulsub_f32 _mm512_fnmsub_ps
+#define npyv_nmulsub_f64 _mm512_fnmsub_pd
+
#endif // _NPY_SIMD_AVX512_ARITHMETIC_H
diff --git a/numpy/core/src/common/simd/neon/arithmetic.h b/numpy/core/src/common/simd/neon/arithmetic.h
index ec8b8ecd0..5eeee1bb6 100644
--- a/numpy/core/src/common/simd/neon/arithmetic.h
+++ b/numpy/core/src/common/simd/neon/arithmetic.h
@@ -75,4 +75,47 @@
#endif
#define npyv_div_f64 vdivq_f64
+/***************************
+ * FUSED F32
+ ***************************/
+#ifdef NPY_HAVE_NEON_VFPV4 // FMA
+ // multiply and add, a*b + c
+ NPY_FINLINE npyv_f32 npyv_muladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vfmaq_f32(c, a, b); }
+ // multiply and subtract, a*b - c
+ NPY_FINLINE npyv_f32 npyv_mulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vfmaq_f32(vnegq_f32(c), a, b); }
+ // negate multiply and add, -(a*b) + c
+ NPY_FINLINE npyv_f32 npyv_nmuladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vfmsq_f32(c, a, b); }
+ // negate multiply and subtract, -(a*b) - c
+ NPY_FINLINE npyv_f32 npyv_nmulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vfmsq_f32(vnegq_f32(c), a, b); }
+#else
+ // multiply and add, a*b + c
+ NPY_FINLINE npyv_f32 npyv_muladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vmlaq_f32(c, a, b); }
+ // multiply and subtract, a*b - c
+ NPY_FINLINE npyv_f32 npyv_mulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vmlaq_f32(vnegq_f32(c), a, b); }
+ // negate multiply and add, -(a*b) + c
+ NPY_FINLINE npyv_f32 npyv_nmuladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vmlsq_f32(c, a, b); }
+ // negate multiply and subtract, -(a*b) - c
+ NPY_FINLINE npyv_f32 npyv_nmulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vmlsq_f32(vnegq_f32(c), a, b); }
+#endif
+/***************************
+ * FUSED F64
+ ***************************/
+#if NPY_SIMD_F64
+ NPY_FINLINE npyv_f64 npyv_muladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return vfmaq_f64(c, a, b); }
+ NPY_FINLINE npyv_f64 npyv_mulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return vfmaq_f64(vnegq_f64(c), a, b); }
+ NPY_FINLINE npyv_f64 npyv_nmuladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return vfmsq_f64(c, a, b); }
+ NPY_FINLINE npyv_f64 npyv_nmulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return vfmsq_f64(vnegq_f64(c), a, b); }
+#endif // NPY_SIMD_F64
#endif // _NPY_SIMD_NEON_ARITHMETIC_H
diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h
index 12d0af05c..717dacd39 100644
--- a/numpy/core/src/common/simd/sse/arithmetic.h
+++ b/numpy/core/src/common/simd/sse/arithmetic.h
@@ -91,5 +91,60 @@ NPY_FINLINE __m128i npyv_mul_u8(__m128i a, __m128i b)
// TODO: emulate integer division
#define npyv_div_f32 _mm_div_ps
#define npyv_div_f64 _mm_div_pd
-
+/***************************
+ * FUSED
+ ***************************/
+#ifdef NPY_HAVE_FMA3
+ // multiply and add, a*b + c
+ #define npyv_muladd_f32 _mm_fmadd_ps
+ #define npyv_muladd_f64 _mm_fmadd_pd
+ // multiply and subtract, a*b - c
+ #define npyv_mulsub_f32 _mm_fmsub_ps
+ #define npyv_mulsub_f64 _mm_fmsub_pd
+ // negate multiply and add, -(a*b) + c
+ #define npyv_nmuladd_f32 _mm_fnmadd_ps
+ #define npyv_nmuladd_f64 _mm_fnmadd_pd
+ // negate multiply and subtract, -(a*b) - c
+ #define npyv_nmulsub_f32 _mm_fnmsub_ps
+ #define npyv_nmulsub_f64 _mm_fnmsub_pd
+#elif defined(NPY_HAVE_FMA4)
+ // multiply and add, a*b + c
+ #define npyv_muladd_f32 _mm_macc_ps
+ #define npyv_muladd_f64 _mm_macc_pd
+ // multiply and subtract, a*b - c
+ #define npyv_mulsub_f32 _mm_msub_ps
+ #define npyv_mulsub_f64 _mm_msub_pd
+ // negate multiply and add, -(a*b) + c
+ #define npyv_nmuladd_f32 _mm_nmacc_ps
+ #define npyv_nmuladd_f64 _mm_nmacc_pd
+#else
+ // multiply and add, a*b + c
+ NPY_FINLINE npyv_f32 npyv_muladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_add_f32(npyv_mul_f32(a, b), c); }
+ NPY_FINLINE npyv_f64 npyv_muladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_add_f64(npyv_mul_f64(a, b), c); }
+ // multiply and subtract, a*b - c
+ NPY_FINLINE npyv_f32 npyv_mulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_sub_f32(npyv_mul_f32(a, b), c); }
+ NPY_FINLINE npyv_f64 npyv_mulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_sub_f64(npyv_mul_f64(a, b), c); }
+ // negate multiply and add, -(a*b) + c
+ NPY_FINLINE npyv_f32 npyv_nmuladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_sub_f32(c, npyv_mul_f32(a, b)); }
+ NPY_FINLINE npyv_f64 npyv_nmuladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_sub_f64(c, npyv_mul_f64(a, b)); }
+#endif // NPY_HAVE_FMA3
+#ifndef NPY_HAVE_FMA3 // for FMA4 and NON-FMA3
+ // negate multiply and subtract, -(a*b) - c
+ NPY_FINLINE npyv_f32 npyv_nmulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ {
+ npyv_f32 neg_a = npyv_xor_f32(a, npyv_setall_f32(-0.0f));
+ return npyv_sub_f32(npyv_mul_f32(neg_a, b), c);
+ }
+ NPY_FINLINE npyv_f64 npyv_nmulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ {
+ npyv_f64 neg_a = npyv_xor_f64(a, npyv_setall_f64(-0.0));
+ return npyv_sub_f64(npyv_mul_f64(neg_a, b), c);
+ }
+#endif // !NPY_HAVE_FMA3
#endif // _NPY_SIMD_SSE_ARITHMETIC_H
diff --git a/numpy/core/src/common/simd/vsx/arithmetic.h b/numpy/core/src/common/simd/vsx/arithmetic.h
index dd23b5b11..6ef007676 100644
--- a/numpy/core/src/common/simd/vsx/arithmetic.h
+++ b/numpy/core/src/common/simd/vsx/arithmetic.h
@@ -100,4 +100,20 @@
#define npyv_div_f32 vec_div
#define npyv_div_f64 vec_div
+/***************************
+ * FUSED
+ ***************************/
+// multiply and add, a*b + c
+#define npyv_muladd_f32 vec_madd
+#define npyv_muladd_f64 vec_madd
+// multiply and subtract, a*b - c
+#define npyv_mulsub_f32 vec_msub
+#define npyv_mulsub_f64 vec_msub
+// negate multiply and add, -(a*b) + c
+#define npyv_nmuladd_f32 vec_nmsub // equivalent to -(a*b - c)
+#define npyv_nmuladd_f64 vec_nmsub
+// negate multiply and subtract, -(a*b) - c
+#define npyv_nmulsub_f32 vec_nmadd // equivalent to -(a*b + c)
+#define npyv_nmulsub_f64 vec_nmadd
+
#endif // _NPY_SIMD_VSX_ARITHMETIC_H
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index da631c830..ea04c82bd 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -9,8 +9,6 @@
#include "common.h"
#include "mem_overlap.h"
#include "npy_extint128.h"
-#include "common.h"
-
#if defined(MS_WIN32) || defined(__CYGWIN__)
#define EXPORT(x) __declspec(dllexport) x
@@ -188,7 +186,7 @@ test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args)
Py_DECREF(bound);
goto clean_itx;
}
- bounds[i] = PyInt_AsLong(bound);
+ bounds[i] = PyLong_AsSsize_t(bound);
Py_DECREF(bound);
}
@@ -347,7 +345,7 @@ test_neighborhood_iterator_oob(PyObject* NPY_UNUSED(self), PyObject* args)
Py_DECREF(bound);
goto clean_itx;
}
- bounds[i] = PyInt_AsLong(bound);
+ bounds[i] = PyLong_AsSsize_t(bound);
Py_DECREF(bound);
}
@@ -371,7 +369,7 @@ test_neighborhood_iterator_oob(PyObject* NPY_UNUSED(self), PyObject* args)
Py_DECREF(bound);
goto clean_itx;
}
- bounds[i] = PyInt_AsLong(bound);
+ bounds[i] = PyLong_AsSsize_t(bound);
Py_DECREF(bound);
}
@@ -1155,11 +1153,11 @@ array_solve_diophantine(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject
}
for (j = 0; j < nterms; ++j) {
- terms[j].a = (npy_int64)PyInt_AsSsize_t(PyTuple_GET_ITEM(A, j));
+ terms[j].a = (npy_int64)PyLong_AsSsize_t(PyTuple_GET_ITEM(A, j));
if (error_converting(terms[j].a)) {
goto fail;
}
- terms[j].ub = (npy_int64)PyInt_AsSsize_t(PyTuple_GET_ITEM(U, j));
+ terms[j].ub = (npy_int64)PyLong_AsSsize_t(PyTuple_GET_ITEM(U, j));
if (error_converting(terms[j].ub)) {
goto fail;
}
@@ -1735,8 +1733,8 @@ get_struct_alignments(PyObject *NPY_UNUSED(self), PyObject *args) {
/**begin repeat
* #N = 1,2,3#
*/
- alignment = PyInt_FromLong(_ALIGN(struct TestStruct@N@));
- size = PyInt_FromLong(sizeof(struct TestStruct@N@));
+ alignment = PyLong_FromLong(_ALIGN(struct TestStruct@N@));
+ size = PyLong_FromLong(sizeof(struct TestStruct@N@));
val = PyTuple_Pack(2, alignment, size);
Py_DECREF(alignment);
Py_DECREF(size);
@@ -1902,7 +1900,7 @@ PrintFloat_Printf_g(PyObject *obj, int precision)
PyOS_snprintf(str, sizeof(str), "%.*g", precision, val);
}
- return PyUString_FromString(str);
+ return PyUnicode_FromString(str);
}
@@ -1952,7 +1950,7 @@ run_byteorder_converter(PyObject* NPY_UNUSED(self), PyObject *args)
case NPY_SWAP: return PyUnicode_FromString("NPY_SWAP");
case NPY_IGNORE: return PyUnicode_FromString("NPY_IGNORE");
}
- return PyInt_FromLong(byteorder);
+ return PyLong_FromLong(byteorder);
}
static PyObject *
@@ -1967,7 +1965,7 @@ run_sortkind_converter(PyObject* NPY_UNUSED(self), PyObject *args)
case NPY_HEAPSORT: return PyUnicode_FromString("NPY_HEAPSORT");
case NPY_STABLESORT: return PyUnicode_FromString("NPY_STABLESORT");
}
- return PyInt_FromLong(kind);
+ return PyLong_FromLong(kind);
}
static PyObject *
@@ -1980,7 +1978,7 @@ run_selectkind_converter(PyObject* NPY_UNUSED(self), PyObject *args)
switch (kind) {
case NPY_INTROSELECT: return PyUnicode_FromString("NPY_INTROSELECT");
}
- return PyInt_FromLong(kind);
+ return PyLong_FromLong(kind);
}
static PyObject *
@@ -1994,7 +1992,7 @@ run_searchside_converter(PyObject* NPY_UNUSED(self), PyObject *args)
case NPY_SEARCHLEFT: return PyUnicode_FromString("NPY_SEARCHLEFT");
case NPY_SEARCHRIGHT: return PyUnicode_FromString("NPY_SEARCHRIGHT");
}
- return PyInt_FromLong(side);
+ return PyLong_FromLong(side);
}
static PyObject *
@@ -2010,7 +2008,7 @@ run_order_converter(PyObject* NPY_UNUSED(self), PyObject *args)
case NPY_FORTRANORDER: return PyUnicode_FromString("NPY_FORTRANORDER");
case NPY_KEEPORDER: return PyUnicode_FromString("NPY_KEEPORDER");
}
- return PyInt_FromLong(order);
+ return PyLong_FromLong(order);
}
static PyObject *
@@ -2025,7 +2023,7 @@ run_clipmode_converter(PyObject* NPY_UNUSED(self), PyObject *args)
case NPY_WRAP: return PyUnicode_FromString("NPY_WRAP");
case NPY_RAISE: return PyUnicode_FromString("NPY_RAISE");
}
- return PyInt_FromLong(mode);
+ return PyLong_FromLong(mode);
}
static PyObject *
@@ -2042,7 +2040,7 @@ run_casting_converter(PyObject* NPY_UNUSED(self), PyObject *args)
case NPY_SAME_KIND_CASTING: return PyUnicode_FromString("NPY_SAME_KIND_CASTING");
case NPY_UNSAFE_CASTING: return PyUnicode_FromString("NPY_UNSAFE_CASTING");
}
- return PyInt_FromLong(casting);
+ return PyLong_FromLong(casting);
}
static PyObject *
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index 795fc7315..887deff53 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -2,17 +2,12 @@
#include <Python.h>
#include "structmember.h"
-#if PY_VERSION_HEX >= 0x03060000
#include <pymem.h>
/* public api in 3.7 */
#if PY_VERSION_HEX < 0x03070000
#define PyTraceMalloc_Track _PyTraceMalloc_Track
#define PyTraceMalloc_Untrack _PyTraceMalloc_Untrack
#endif
-#else
-#define PyTraceMalloc_Track(...)
-#define PyTraceMalloc_Untrack(...)
-#endif
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c
index ffb5bd632..3f3fd1387 100644
--- a/numpy/core/src/multiarray/array_coercion.c
+++ b/numpy/core/src/multiarray/array_coercion.c
@@ -548,7 +548,7 @@ update_shape(int curr_ndim, int *max_ndim,
success = -1;
if (!sequence) {
/* Remove dimensions that we cannot use: */
- *max_ndim -= new_ndim + i;
+ *max_ndim -= new_ndim - i;
}
else {
assert(i == 0);
diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c
index 9ea8efdd9..613fe6b3f 100644
--- a/numpy/core/src/multiarray/arrayfunction_override.c
+++ b/numpy/core/src/multiarray/arrayfunction_override.c
@@ -26,7 +26,6 @@ static PyObject *
get_array_function(PyObject *obj)
{
static PyObject *ndarray_array_function = NULL;
- PyObject *array_function;
if (ndarray_array_function == NULL) {
ndarray_array_function = get_ndarray_array_function();
@@ -38,7 +37,7 @@ get_array_function(PyObject *obj)
return ndarray_array_function;
}
- array_function = PyArray_LookupSpecial(obj, "__array_function__");
+ PyObject *array_function = PyArray_LookupSpecial(obj, "__array_function__");
if (array_function == NULL && PyErr_Occurred()) {
PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
}
@@ -53,9 +52,7 @@ get_array_function(PyObject *obj)
static void
pyobject_array_insert(PyObject **array, int length, int index, PyObject *item)
{
- int j;
-
- for (j = length; j > index; j--) {
+ for (int j = length; j > index; j--) {
array[j] = array[j - 1];
}
array[index] = item;
@@ -74,18 +71,16 @@ get_implementing_args_and_methods(PyObject *relevant_args,
PyObject **methods)
{
int num_implementing_args = 0;
- Py_ssize_t i;
- int j;
PyObject **items = PySequence_Fast_ITEMS(relevant_args);
Py_ssize_t length = PySequence_Fast_GET_SIZE(relevant_args);
- for (i = 0; i < length; i++) {
+ for (Py_ssize_t i = 0; i < length; i++) {
int new_class = 1;
PyObject *argument = items[i];
/* Have we seen this type before? */
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
if (Py_TYPE(argument) == Py_TYPE(implementing_args[j])) {
new_class = 0;
break;
@@ -109,7 +104,7 @@ get_implementing_args_and_methods(PyObject *relevant_args,
/* "subclasses before superclasses, otherwise left to right" */
arg_index = num_implementing_args;
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
PyObject *other_type;
other_type = (PyObject *)Py_TYPE(implementing_args[j]);
if (PyObject_IsInstance(argument, other_type)) {
@@ -129,7 +124,7 @@ get_implementing_args_and_methods(PyObject *relevant_args,
return num_implementing_args;
fail:
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
Py_DECREF(implementing_args[j]);
Py_DECREF(methods[j]);
}
@@ -161,13 +156,10 @@ NPY_NO_EXPORT PyObject *
array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
PyObject *kwargs)
{
- Py_ssize_t j;
- PyObject *implementation, *result;
-
PyObject **items = PySequence_Fast_ITEMS(types);
Py_ssize_t length = PySequence_Fast_GET_SIZE(types);
- for (j = 0; j < length; j++) {
+ for (Py_ssize_t j = 0; j < length; j++) {
int is_subclass = PyObject_IsSubclass(
items[j], (PyObject *)&PyArray_Type);
if (is_subclass == -1) {
@@ -179,11 +171,11 @@ array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
}
}
- implementation = PyObject_GetAttr(func, npy_ma_str_implementation);
+ PyObject *implementation = PyObject_GetAttr(func, npy_ma_str_implementation);
if (implementation == NULL) {
return NULL;
}
- result = PyObject_Call(implementation, args, kwargs);
+ PyObject *result = PyObject_Call(implementation, args, kwargs);
Py_DECREF(implementation);
return result;
}
@@ -208,32 +200,32 @@ call_array_function(PyObject* argument, PyObject* method,
}
-/*
- * Implements the __array_function__ protocol for a function, as described in
- * in NEP-18. See numpy.core.overrides for a full docstring.
+/**
+ * Internal handler for the array-function dispatching. The helper returns
+ * either the result, or NotImplemented (as a borrowed reference).
+ *
+ * @param public_api The public API symbol used for dispatching
+ * @param relevant_args Arguments which may implement __array_function__
+ * @param args Original arguments
+ * @param kwargs Original keyword arguments
+ *
+ * @returns The result of the dispatched version, or a borrowed reference
+ * to NotImplemented to indicate the default implementation should
+ * be used.
*/
NPY_NO_EXPORT PyObject *
-array_implement_array_function(
- PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
+array_implement_array_function_internal(
+ PyObject *public_api, PyObject *relevant_args,
+ PyObject *args, PyObject *kwargs)
{
- PyObject *implementation, *public_api, *relevant_args, *args, *kwargs;
-
- PyObject *types = NULL;
PyObject *implementing_args[NPY_MAXARGS];
PyObject *array_function_methods[NPY_MAXARGS];
+ PyObject *types = NULL;
- int j, any_overrides;
- int num_implementing_args = 0;
PyObject *result = NULL;
static PyObject *errmsg_formatter = NULL;
- if (!PyArg_UnpackTuple(
- positional_args, "implement_array_function", 5, 5,
- &implementation, &public_api, &relevant_args, &args, &kwargs)) {
- return NULL;
- }
-
relevant_args = PySequence_Fast(
relevant_args,
"dispatcher for __array_function__ did not return an iterable");
@@ -242,7 +234,7 @@ array_implement_array_function(
}
/* Collect __array_function__ implementations */
- num_implementing_args = get_implementing_args_and_methods(
+ int num_implementing_args = get_implementing_args_and_methods(
relevant_args, implementing_args, array_function_methods);
if (num_implementing_args == -1) {
goto cleanup;
@@ -254,15 +246,19 @@ array_implement_array_function(
* arguments implement __array_function__ at all (e.g., if they are all
* built-in types).
*/
- any_overrides = 0;
- for (j = 0; j < num_implementing_args; j++) {
+ int any_overrides = 0;
+ for (int j = 0; j < num_implementing_args; j++) {
if (!is_default_array_function(array_function_methods[j])) {
any_overrides = 1;
break;
}
}
if (!any_overrides) {
- result = PyObject_Call(implementation, args, kwargs);
+ /*
+ * When the default implementation should be called, return
+ * `Py_NotImplemented` to indicate this.
+ */
+ result = Py_NotImplemented;
goto cleanup;
}
@@ -275,14 +271,14 @@ array_implement_array_function(
if (types == NULL) {
goto cleanup;
}
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
PyObject *arg_type = (PyObject *)Py_TYPE(implementing_args[j]);
Py_INCREF(arg_type);
PyTuple_SET_ITEM(types, j, arg_type);
}
/* Call __array_function__ methods */
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
PyObject *argument = implementing_args[j];
PyObject *method = array_function_methods[j];
@@ -319,7 +315,7 @@ array_implement_array_function(
}
cleanup:
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
Py_DECREF(implementing_args[j]);
Py_DECREF(array_function_methods[j]);
}
@@ -330,6 +326,92 @@ cleanup:
/*
+ * Implements the __array_function__ protocol for a Python function, as described in
+ * in NEP-18. See numpy.core.overrides for a full docstring.
+ */
+NPY_NO_EXPORT PyObject *
+array_implement_array_function(
+ PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
+{
+ PyObject *implementation, *public_api, *relevant_args, *args, *kwargs;
+
+ if (!PyArg_UnpackTuple(
+ positional_args, "implement_array_function", 5, 5,
+ &implementation, &public_api, &relevant_args, &args, &kwargs)) {
+ return NULL;
+ }
+
+ /* Remove `like=` kwarg, which is NumPy-exclusive and thus not present
+ * in downstream libraries.
+ */
+ if (kwargs != NULL && PyDict_Contains(kwargs, npy_ma_str_like)) {
+ PyDict_DelItem(kwargs, npy_ma_str_like);
+ }
+
+ PyObject *res = array_implement_array_function_internal(
+ public_api, relevant_args, args, kwargs);
+
+ if (res == Py_NotImplemented) {
+ return PyObject_Call(implementation, args, kwargs);
+ }
+ return res;
+}
+
+
+/*
+ * Implements the __array_function__ protocol for C array creation functions
+ * only. Added as an extension to NEP-18 in an effort to bring NEP-35 to
+ * life with minimal dispatch overhead.
+ */
+NPY_NO_EXPORT PyObject *
+array_implement_c_array_function_creation(
+ const char *function_name, PyObject *args, PyObject *kwargs)
+{
+ if (kwargs == NULL) {
+ return Py_NotImplemented;
+ }
+
+ /* Remove `like=` kwarg, which is NumPy-exclusive and thus not present
+ * in downstream libraries. If that key isn't present, return NULL and
+ * let originating call to continue.
+ */
+ if (!PyDict_Contains(kwargs, npy_ma_str_like)) {
+ return Py_NotImplemented;
+ }
+
+ PyObject *relevant_args = PyTuple_Pack(1,
+ PyDict_GetItem(kwargs, npy_ma_str_like));
+ if (relevant_args == NULL) {
+ return NULL;
+ }
+ PyDict_DelItem(kwargs, npy_ma_str_like);
+
+ PyObject *numpy_module = PyImport_Import(npy_ma_str_numpy);
+ if (numpy_module == NULL) {
+ return NULL;
+ }
+
+ PyObject *public_api = PyObject_GetAttrString(numpy_module, function_name);
+ Py_DECREF(numpy_module);
+ if (public_api == NULL) {
+ return NULL;
+ }
+ if (!PyCallable_Check(public_api)) {
+ Py_DECREF(public_api);
+ return PyErr_Format(PyExc_RuntimeError,
+ "numpy.%s is not callable.",
+ function_name);
+ }
+
+ PyObject* result = array_implement_array_function_internal(
+ public_api, relevant_args, args, kwargs);
+
+ Py_DECREF(public_api);
+ return result;
+}
+
+
+/*
* Python wrapper for get_implementing_args_and_methods, for testing purposes.
*/
NPY_NO_EXPORT PyObject *
@@ -337,8 +419,6 @@ array__get_implementing_args(
PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
{
PyObject *relevant_args;
- int j;
- int num_implementing_args = 0;
PyObject *implementing_args[NPY_MAXARGS];
PyObject *array_function_methods[NPY_MAXARGS];
PyObject *result = NULL;
@@ -355,7 +435,7 @@ array__get_implementing_args(
return NULL;
}
- num_implementing_args = get_implementing_args_and_methods(
+ int num_implementing_args = get_implementing_args_and_methods(
relevant_args, implementing_args, array_function_methods);
if (num_implementing_args == -1) {
goto cleanup;
@@ -366,14 +446,14 @@ array__get_implementing_args(
if (result == NULL) {
goto cleanup;
}
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
PyObject *argument = implementing_args[j];
Py_INCREF(argument);
PyList_SET_ITEM(result, j, argument);
}
cleanup:
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
Py_DECREF(implementing_args[j]);
Py_DECREF(array_function_methods[j]);
}
diff --git a/numpy/core/src/multiarray/arrayfunction_override.h b/numpy/core/src/multiarray/arrayfunction_override.h
index 0d224e2b6..fdcf1746d 100644
--- a/numpy/core/src/multiarray/arrayfunction_override.h
+++ b/numpy/core/src/multiarray/arrayfunction_override.h
@@ -10,6 +10,10 @@ array__get_implementing_args(
PyObject *NPY_UNUSED(dummy), PyObject *positional_args);
NPY_NO_EXPORT PyObject *
+array_implement_c_array_function_creation(
+ const char *function_name, PyObject *args, PyObject *kwargs);
+
+NPY_NO_EXPORT PyObject *
array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
PyObject *kwargs);
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 95c650674..5da1b5f29 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -416,7 +416,7 @@ WARN_IN_DEALLOC(PyObject* warning, const char * msg) {
if (PyErr_WarnEx(warning, msg, 1) < 0) {
PyObject * s;
- s = PyUString_FromString("array_dealloc");
+ s = PyUnicode_FromString("array_dealloc");
if (s) {
PyErr_WriteUnraisable(s);
Py_DECREF(s);
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 9508fb5ad..ecaca72a1 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -47,7 +47,7 @@ static NPY_INLINE npy_bool
PySequence_NoString_Check(PyObject *op) {
return
PySequence_Check(op) &&
- !PyString_Check(op) &&
+ !PyBytes_Check(op) &&
!PyUnicode_Check(op) &&
!PyArray_IsZeroDim(op);
}
@@ -175,7 +175,7 @@ MyPyLong_AsUnsigned@Type@ (PyObject *obj)
*
* #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, LONG, UINT, ULONG,
* LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE#
- * #func1 = PyBool_FromLong, PyInt_FromLong*6, PyLong_FromUnsignedLong*2,
+ * #func1 = PyBool_FromLong, PyLong_FromLong*6, PyLong_FromUnsignedLong*2,
* PyLong_FromLongLong, PyLong_FromUnsignedLongLong,
* MyPyFloat_FromHalf, PyFloat_FromDouble*2#
* #func2 = PyObject_IsTrue, MyPyLong_AsLong*6, MyPyLong_AsUnsignedLong*2,
@@ -648,7 +648,7 @@ static PyObject *
OBJECT_getitem(void *ip, void *NPY_UNUSED(ap))
{
PyObject *obj;
- NPY_COPY_PYOBJECT_PTR(&obj, ip);
+ memcpy(&obj, ip, sizeof(obj));
if (obj == NULL) {
Py_RETURN_NONE;
}
@@ -664,12 +664,12 @@ OBJECT_setitem(PyObject *op, void *ov, void *NPY_UNUSED(ap))
{
PyObject *obj;
- NPY_COPY_PYOBJECT_PTR(&obj, ov);
+ memcpy(&obj, ov, sizeof(obj));
Py_INCREF(op);
Py_XDECREF(obj);
- NPY_COPY_PYOBJECT_PTR(ov, &op);
+ memcpy(ov, &op, sizeof(op));
return PyErr_Occurred() ? -1 : 0;
}
@@ -865,7 +865,7 @@ VOID_setitem(PyObject *op, void *input, void *vap)
npy_intp names_size = PyTuple_GET_SIZE(descr->names);
if (names_size != PyTuple_Size(op)) {
- errmsg = PyUString_FromFormat(
+ errmsg = PyUnicode_FromFormat(
"could not assign tuple of length %zd to structure "
"with %" NPY_INTP_FMT " fields.",
PyTuple_Size(op), names_size);
@@ -2237,11 +2237,11 @@ OBJECT_copyswapn(PyObject **dst, npy_intp dstride, PyObject **src,
dstp = (unsigned char*)dst;
srcp = (unsigned char*)src;
for (i = 0; i < n; i++) {
- NPY_COPY_PYOBJECT_PTR(&tmp, srcp);
+ memcpy(&tmp, srcp, sizeof(tmp));
Py_XINCREF(tmp);
- NPY_COPY_PYOBJECT_PTR(&tmp, dstp);
+ memcpy(&tmp, dstp, sizeof(tmp));
Py_XDECREF(tmp);
- NPY_COPY_PYOBJECT_PTR(dstp, srcp);
+ memcpy(dstp, srcp, sizeof(tmp));
dstp += dstride;
srcp += sstride;
}
@@ -2265,11 +2265,11 @@ OBJECT_copyswap(PyObject **dst, PyObject **src, int NPY_UNUSED(swap),
}
else {
PyObject *tmp;
- NPY_COPY_PYOBJECT_PTR(&tmp, src);
+ memcpy(&tmp, src, sizeof(tmp));
Py_XINCREF(tmp);
- NPY_COPY_PYOBJECT_PTR(&tmp, dst);
+ memcpy(&tmp, dst, sizeof(tmp));
Py_XDECREF(tmp);
- NPY_COPY_PYOBJECT_PTR(dst, src);
+ memcpy(dst, src, sizeof(tmp));
}
}
}
@@ -2686,7 +2686,7 @@ OBJECT_nonzero (PyObject **ip, PyArrayObject *ap)
}
else {
PyObject *obj;
- NPY_COPY_PYOBJECT_PTR(&obj, ip);
+ memcpy(&obj, ip, sizeof(obj));
if (obj == NULL) {
return NPY_FALSE;
}
@@ -4461,7 +4461,7 @@ set_typeinfo(PyObject *dict)
return -1;
}
}
- key = PyInt_FromLong(NPY_@name2@);
+ key = PyLong_FromLong(NPY_@name2@);
if (key == NULL) {
return -1;
}
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index 8b482dc03..af40cdc2c 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -267,7 +267,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
child = (PyArray_Descr*)PyTuple_GetItem(item, 0);
offset_obj = PyTuple_GetItem(item, 1);
- new_offset = PyInt_AsLong(offset_obj);
+ new_offset = PyLong_AsLong(offset_obj);
if (error_converting(new_offset)) {
return -1;
}
@@ -931,7 +931,7 @@ _descriptor_from_pep3118_format(char const *s)
}
*p = '\0';
- str = PyUString_FromStringAndSize(buf, strlen(buf));
+ str = PyUnicode_FromStringAndSize(buf, strlen(buf));
if (str == NULL) {
free(buf);
return NULL;
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index 92ab75053..43d88271b 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -392,7 +392,7 @@ __New_PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out,
else {
val = PyArray_DIM(arrnew,i);
}
- PyTuple_SET_ITEM(newshape, i, PyInt_FromLong((long)val));
+ PyTuple_SET_ITEM(newshape, i, PyLong_FromLong((long)val));
}
arr2 = (PyArrayObject *)PyArray_Reshape(arr1, newshape);
Py_DECREF(arr1);
@@ -1023,7 +1023,7 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o
if (min != NULL) {
if (PyArray_ISUNSIGNED(self)) {
int cmp;
- zero = PyInt_FromLong(0);
+ zero = PyLong_FromLong(0);
cmp = PyObject_RichCompareBool(min, zero, Py_LT);
if (cmp == -1) {
Py_DECREF(zero);
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 2abc79167..6af71f351 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -12,7 +12,6 @@
#include "abstractdtypes.h"
#include "usertypes.h"
-#include "common.h"
#include "npy_buffer.h"
#include "get_attr_string.h"
@@ -127,26 +126,6 @@ PyArray_DTypeFromObject(PyObject *obj, int maxdims, PyArray_Descr **out_dtype)
return 0;
}
-
-/* new reference */
-NPY_NO_EXPORT PyArray_Descr *
-_array_typedescr_fromstr(char const *c_str)
-{
- PyArray_Descr *descr = NULL;
- PyObject *stringobj = PyString_FromString(c_str);
-
- if (stringobj == NULL) {
- return NULL;
- }
- if (PyArray_DescrConverter(stringobj, &descr) != NPY_SUCCEED) {
- Py_DECREF(stringobj);
- return NULL;
- }
- Py_DECREF(stringobj);
- return descr;
-}
-
-
NPY_NO_EXPORT char *
index2ptr(PyArrayObject *mp, npy_intp i)
{
@@ -169,7 +148,7 @@ NPY_NO_EXPORT int
_zerofill(PyArrayObject *ret)
{
if (PyDataType_REFCHK(PyArray_DESCR(ret))) {
- PyObject *zero = PyInt_FromLong(0);
+ PyObject *zero = PyLong_FromLong(0);
PyArray_FillObjectArray(ret, zero);
Py_DECREF(zero);
if (PyErr_Occurred()) {
@@ -264,10 +243,10 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
for (i = 0; i < n && vals[i] < 0; i++);
if (i == n) {
- return PyUString_FromFormat("()%s", ending);
+ return PyUnicode_FromFormat("()%s", ending);
}
else {
- ret = PyUString_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
+ ret = PyUnicode_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
if (ret == NULL) {
return NULL;
}
@@ -275,10 +254,10 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
for (; i < n; ++i) {
if (vals[i] < 0) {
- tmp = PyUString_FromString(",newaxis");
+ tmp = PyUnicode_FromString(",newaxis");
}
else {
- tmp = PyUString_FromFormat(",%" NPY_INTP_FMT, vals[i]);
+ tmp = PyUnicode_FromFormat(",%" NPY_INTP_FMT, vals[i]);
}
if (tmp == NULL) {
Py_DECREF(ret);
@@ -292,10 +271,10 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
}
if (i == 1) {
- tmp = PyUString_FromFormat(",)%s", ending);
+ tmp = PyUnicode_FromFormat(",)%s", ending);
}
else {
- tmp = PyUString_FromFormat(")%s", ending);
+ tmp = PyUnicode_FromFormat(")%s", ending);
}
PyUString_ConcatAndDel(&ret, tmp);
return ret;
@@ -310,7 +289,7 @@ dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j)
*shape1 = NULL, *shape2 = NULL,
*shape1_i = NULL, *shape2_j = NULL;
- format = PyUString_FromString("shapes %s and %s not aligned:"
+ format = PyUnicode_FromString("shapes %s and %s not aligned:"
" %d (dim %d) != %d (dim %d)");
shape1 = convert_shape_to_string(PyArray_NDIM(a), PyArray_DIMS(a), "");
@@ -333,7 +312,7 @@ dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j)
goto end;
}
- errmsg = PyUString_Format(format, fmt_args);
+ errmsg = PyUnicode_Format(format, fmt_args);
if (errmsg != NULL) {
PyErr_SetObject(PyExc_ValueError, errmsg);
}
@@ -373,10 +352,7 @@ _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset)
*descr = (PyArray_Descr *)PyTuple_GET_ITEM(value, 0);
off = PyTuple_GET_ITEM(value, 1);
- if (PyInt_Check(off)) {
- *offset = PyInt_AsSsize_t(off);
- }
- else if (PyLong_Check(off)) {
+ if (PyLong_Check(off)) {
*offset = PyLong_AsSsize_t(off);
}
else {
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index 793cefaf8..ef9bc79da 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -2,7 +2,6 @@
#define _NPY_PRIVATE_COMMON_H_
#include "structmember.h"
#include <numpy/npy_common.h>
-#include <numpy/npy_cpu.h>
#include <numpy/ndarraytypes.h>
#include <limits.h>
#include "npy_import.h"
@@ -292,43 +291,6 @@ npy_memchr(char * haystack, char needle,
return p;
}
-/*
- * Convert NumPy stride to BLAS stride. Returns 0 if conversion cannot be done
- * (BLAS won't handle negative or zero strides the way we want).
- */
-static NPY_INLINE int
-blas_stride(npy_intp stride, unsigned itemsize)
-{
- /*
- * Should probably check pointer alignment also, but this may cause
- * problems if we require complex to be 16 byte aligned.
- */
- if (stride > 0 && npy_is_aligned((void *)stride, itemsize)) {
- stride /= itemsize;
-#ifndef HAVE_BLAS_ILP64
- if (stride <= INT_MAX) {
-#else
- if (stride <= NPY_MAX_INT64) {
-#endif
- return stride;
- }
- }
- return 0;
-}
-
-/*
- * Define a chunksize for CBLAS. CBLAS counts in integers.
- */
-#if NPY_MAX_INTP > INT_MAX
-# ifndef HAVE_BLAS_ILP64
-# define NPY_CBLAS_CHUNK (INT_MAX / 2 + 1)
-# else
-# define NPY_CBLAS_CHUNK (NPY_MAX_INT64 / 2 + 1)
-# endif
-#else
-# define NPY_CBLAS_CHUNK NPY_MAX_INTP
-#endif
-
#include "ucsnarrow.h"
/*
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index a8e4aa789..061db2250 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -249,7 +249,7 @@ arr__monotonicity(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
NPY_END_THREADS
Py_DECREF(arr_x);
- return PyInt_FromLong(monotonic);
+ return PyLong_FromLong(monotonic);
}
/*
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index e41fdc8f1..dd18f71fd 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -6,7 +6,6 @@
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
#include "numpy/arrayscalars.h"
-#include "numpy/arrayobject.h"
#include "npy_config.h"
#include "npy_pycompat.h"
@@ -1152,7 +1151,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp const *vals)
}
for (i = 0; i < len; i++) {
#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- PyObject *o = PyInt_FromLong((long) vals[i]);
+ PyObject *o = PyLong_FromLong((long) vals[i]);
#else
PyObject *o = PyLong_FromLongLong((npy_longlong) vals[i]);
#endif
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index e7cbeaa77..29a2bb0e8 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -8,9 +8,6 @@
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
#include "numpy/arrayscalars.h"
-
-#include "npy_config.h"
-
#include "npy_pycompat.h"
#include "common.h"
@@ -248,13 +245,13 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
return -1;
}
PyTuple_SET_ITEM(tupobj,0,obj);
- obj = PyUString_FromString((const char *)format);
+ obj = PyUnicode_FromString((const char *)format);
if (obj == NULL) {
Py_DECREF(tupobj);
Py_DECREF(it);
return -1;
}
- strobj = PyUString_Format(obj, tupobj);
+ strobj = PyUnicode_Format(obj, tupobj);
Py_DECREF(obj);
Py_DECREF(tupobj);
if (strobj == NULL) {
@@ -403,7 +400,7 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
}
}
/* Python integer */
- else if (PyLong_Check(obj) || PyInt_Check(obj)) {
+ else if (PyLong_Check(obj)) {
/* Try long long before unsigned long long */
npy_longlong ll_v = PyLong_AsLongLong(obj);
if (error_converting(ll_v)) {
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 94cd1e5fa..d9121707b 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -92,11 +92,14 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num)
PyObject *key;
PyObject *cobj;
- key = PyInt_FromLong(type_num);
+ key = PyLong_FromLong(type_num);
cobj = PyDict_GetItem(obj, key);
Py_DECREF(key);
- if (cobj && NpyCapsule_Check(cobj)) {
- castfunc = NpyCapsule_AsVoidPtr(cobj);
+ if (cobj && PyCapsule_CheckExact(cobj)) {
+ castfunc = PyCapsule_GetPointer(cobj, NULL);
+ if (castfunc == NULL) {
+ return NULL;
+ }
}
}
}
@@ -340,25 +343,6 @@ PyArray_CanCastSafely(int fromtype, int totype)
if (fromtype == totype) {
return 1;
}
- /* Special-cases for some types */
- switch (fromtype) {
- case NPY_DATETIME:
- case NPY_TIMEDELTA:
- case NPY_OBJECT:
- case NPY_VOID:
- return 0;
- case NPY_BOOL:
- return 1;
- }
- switch (totype) {
- case NPY_BOOL:
- case NPY_DATETIME:
- case NPY_TIMEDELTA:
- return 0;
- case NPY_OBJECT:
- case NPY_VOID:
- return 1;
- }
from = PyArray_DescrFromType(fromtype);
/*
@@ -1989,7 +1973,7 @@ PyArray_Zero(PyArrayObject *arr)
}
if (zero_obj == NULL) {
- zero_obj = PyInt_FromLong((long) 0);
+ zero_obj = PyLong_FromLong((long) 0);
if (zero_obj == NULL) {
return NULL;
}
@@ -2035,7 +2019,7 @@ PyArray_One(PyArrayObject *arr)
}
if (one_obj == NULL) {
- one_obj = PyInt_FromLong((long) 1);
+ one_obj = PyLong_FromLong((long) 1);
if (one_obj == NULL) {
return NULL;
}
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 15824e9e2..956dfd3bb 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -300,12 +300,12 @@ _update_descr_and_dimensions(PyArray_Descr **des, npy_intp *newdims,
}
if (tuple) {
for (i = 0; i < numnew; i++) {
- mydim[i] = (npy_intp) PyInt_AsLong(
+ mydim[i] = (npy_intp) PyLong_AsLong(
PyTuple_GET_ITEM(old->subarray->shape, i));
}
}
else {
- mydim[0] = (npy_intp) PyInt_AsLong(old->subarray->shape);
+ mydim[0] = (npy_intp) PyLong_AsLong(old->subarray->shape);
}
if (newstrides) {
@@ -868,11 +868,14 @@ PyArray_NewFromDescr_int(
func = PyObject_GetAttr((PyObject *)fa, npy_ma_str_array_finalize);
if (func && func != Py_None) {
- if (NpyCapsule_Check(func)) {
+ if (PyCapsule_CheckExact(func)) {
/* A C-function is stored here */
PyArray_FinalizeFunc *cfunc;
- cfunc = NpyCapsule_AsVoidPtr(func);
+ cfunc = PyCapsule_GetPointer(func, NULL);
Py_DECREF(func);
+ if (cfunc == NULL) {
+ goto fail;
+ }
if (cfunc((PyArrayObject *)fa, obj) < 0) {
goto fail;
}
@@ -1575,6 +1578,7 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth,
return obj;
}
+
/*NUMPY_API
* steals reference to newtype --- acc. NULL
*/
@@ -1733,10 +1737,8 @@ NPY_NO_EXPORT PyObject *
PyArray_FromStructInterface(PyObject *input)
{
PyArray_Descr *thetype = NULL;
- char buf[40];
PyArrayInterface *inter;
PyObject *attr;
- PyArrayObject *ret;
char endian = NPY_NATBYTE;
attr = PyArray_LookupSpecial_OnInstance(input, "__array_struct__");
@@ -1747,7 +1749,7 @@ PyArray_FromStructInterface(PyObject *input)
return Py_NotImplemented;
}
}
- if (!NpyCapsule_Check(attr)) {
+ if (!PyCapsule_CheckExact(attr)) {
if (PyType_Check(input) && PyObject_HasAttrString(attr, "__get__")) {
/*
* If the input is a class `attr` should be a property-like object.
@@ -1759,7 +1761,10 @@ PyArray_FromStructInterface(PyObject *input)
}
goto fail;
}
- inter = NpyCapsule_AsVoidPtr(attr);
+ inter = PyCapsule_GetPointer(attr, NULL);
+ if (inter == NULL) {
+ goto fail;
+ }
if (inter->two != 2) {
goto fail;
}
@@ -1776,20 +1781,26 @@ PyArray_FromStructInterface(PyObject *input)
}
if (thetype == NULL) {
- PyOS_snprintf(buf, sizeof(buf),
- "%c%c%d", endian, inter->typekind, inter->itemsize);
- if (!(thetype=_array_typedescr_fromstr(buf))) {
+ PyObject *type_str = PyUnicode_FromFormat(
+ "%c%c%d", endian, inter->typekind, inter->itemsize);
+ if (type_str == NULL) {
+ Py_DECREF(attr);
+ return NULL;
+ }
+ int ok = PyArray_DescrConverter(type_str, &thetype);
+ Py_DECREF(type_str);
+ if (ok != NPY_SUCCEED) {
Py_DECREF(attr);
return NULL;
}
}
- ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ PyObject *ret = PyArray_NewFromDescrAndBase(
&PyArray_Type, thetype,
inter->nd, inter->shape, inter->strides, inter->data,
inter->flags, NULL, input);
Py_DECREF(attr);
- return (PyObject *)ret;
+ return ret;
fail:
PyErr_SetString(PyExc_ValueError, "invalid __array_struct__");
@@ -1803,41 +1814,21 @@ PyArray_FromStructInterface(PyObject *input)
*/
NPY_NO_EXPORT int
_is_default_descr(PyObject *descr, PyObject *typestr) {
- PyObject *tuple, *name, *typestr2;
- PyObject *tmp = NULL;
- int ret = 0;
-
if (!PyList_Check(descr) || PyList_GET_SIZE(descr) != 1) {
return 0;
}
- tuple = PyList_GET_ITEM(descr, 0);
+ PyObject *tuple = PyList_GET_ITEM(descr, 0);
if (!(PyTuple_Check(tuple) && PyTuple_GET_SIZE(tuple) == 2)) {
return 0;
}
- name = PyTuple_GET_ITEM(tuple, 0);
+ PyObject *name = PyTuple_GET_ITEM(tuple, 0);
if (!(PyUnicode_Check(name) && PyUnicode_GetLength(name) == 0)) {
return 0;
}
- typestr2 = PyTuple_GET_ITEM(tuple, 1);
- /* Allow unicode type strings */
- if (PyUnicode_Check(typestr2)) {
- tmp = PyUnicode_AsASCIIString(typestr2);
- if (tmp == NULL) {
- return 0;
- }
- typestr2 = tmp;
- }
- if (PyBytes_Check(typestr2) &&
- PyObject_RichCompareBool(typestr, typestr2, Py_EQ)) {
- ret = 1;
- }
- Py_XDECREF(tmp);
-
- return ret;
+ PyObject *typestr2 = PyTuple_GET_ITEM(tuple, 1);
+ return PyObject_RichCompareBool(typestr, typestr2, Py_EQ);
}
-#define PyIntOrLong_Check(obj) (PyInt_Check(obj) || PyLong_Check(obj))
-
/*NUMPY_API*/
NPY_NO_EXPORT PyObject *
PyArray_FromInterface(PyObject *origin)
@@ -1849,7 +1840,7 @@ PyArray_FromInterface(PyObject *origin)
PyArray_Descr *dtype = NULL;
char *data = NULL;
Py_buffer view;
- int res, i, n;
+ int i, n;
npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS];
int dataflags = NPY_ARRAY_BEHAVED;
@@ -1889,26 +1880,15 @@ PyArray_FromInterface(PyObject *origin)
return NULL;
}
- /* Allow unicode type strings */
- if (PyUnicode_Check(attr)) {
- PyObject *tmp = PyUnicode_AsASCIIString(attr);
- if (tmp == NULL) {
- goto fail;
- }
- attr = tmp;
- }
- else {
- Py_INCREF(attr);
- }
-
- if (!PyBytes_Check(attr)) {
+ /* allow bytes for backwards compatibility */
+ if (!PyBytes_Check(attr) && !PyUnicode_Check(attr)) {
PyErr_SetString(PyExc_TypeError,
"__array_interface__ typestr must be a string");
goto fail;
}
+
/* Get dtype from type string */
- dtype = _array_typedescr_fromstr(PyString_AS_STRING(attr));
- if (dtype == NULL) {
+ if (PyArray_DescrConverter(attr, &dtype) != NPY_SUCCEED) {
goto fail;
}
@@ -1922,16 +1902,24 @@ PyArray_FromInterface(PyObject *origin)
goto fail;
}
PyArray_Descr *new_dtype = NULL;
+ if (descr != NULL) {
+ int is_default = _is_default_descr(descr, attr);
+ if (is_default < 0) {
+ goto fail;
+ }
+ if (!is_default) {
+ if (PyArray_DescrConverter2(descr, &new_dtype) != NPY_SUCCEED) {
+ goto fail;
+ }
+ if (new_dtype != NULL) {
+ Py_DECREF(dtype);
+ dtype = new_dtype;
+ }
+ }
- if (descr != NULL && !_is_default_descr(descr, attr) &&
- PyArray_DescrConverter2(descr, &new_dtype) == NPY_SUCCEED &&
- new_dtype != NULL) {
- Py_DECREF(dtype);
- dtype = new_dtype;
}
- }
- Py_DECREF(attr); /* Pairs with the unicode handling above */
+ }
/* Get shape tuple from interface specification */
attr = _PyDict_GetItemStringWithError(iface, "shape");
@@ -1990,22 +1978,16 @@ PyArray_FromInterface(PyObject *origin)
goto fail;
}
dataptr = PyTuple_GET_ITEM(attr, 0);
- if (PyString_Check(dataptr)) {
- res = sscanf(PyString_AsString(dataptr),
- "%p", (void **)&data);
- if (res < 1) {
- PyErr_SetString(PyExc_TypeError,
- "__array_interface__ data string cannot be converted");
+ if (PyLong_Check(dataptr)) {
+ data = PyLong_AsVoidPtr(dataptr);
+ if (data == NULL && PyErr_Occurred()) {
goto fail;
}
}
- else if (PyIntOrLong_Check(dataptr)) {
- data = PyLong_AsVoidPtr(dataptr);
- }
else {
PyErr_SetString(PyExc_TypeError,
"first element of __array_interface__ data tuple "
- "must be integer or string.");
+ "must be an integer.");
goto fail;
}
if (PyObject_IsTrue(PyTuple_GET_ITEM(attr,1))) {
@@ -2265,7 +2247,10 @@ PyArray_EnsureAnyArray(PyObject *op)
return PyArray_EnsureArray(op);
}
-/* TODO: Put the order parameter in PyArray_CopyAnyInto and remove this */
+/*
+ * Private implementation of PyArray_CopyAnyInto with an additional order
+ * parameter.
+ */
NPY_NO_EXPORT int
PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
{
@@ -2750,7 +2735,7 @@ _calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, i
return -1;
}
- zero = PyInt_FromLong(0);
+ zero = PyLong_FromLong(0);
if (!zero) {
Py_DECREF(*next);
*next = NULL;
@@ -2895,14 +2880,14 @@ PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject *step, PyArray_Descr
Py_INCREF(dtype);
}
if (!step || step == Py_None) {
- step = PyInt_FromLong(1);
+ step = PyLong_FromLong(1);
}
else {
Py_XINCREF(step);
}
if (!stop || stop == Py_None) {
stop = start;
- start = PyInt_FromLong(0);
+ start = PyLong_FromLong(0);
}
else {
Py_INCREF(start);
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 8f3948c23..f2225809a 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -1435,14 +1435,14 @@ raise_if_datetime64_metadata_cast_error(char *object_type,
}
else {
PyObject *errmsg;
- errmsg = PyUString_FromFormat("Cannot cast %s "
+ errmsg = PyUnicode_FromFormat("Cannot cast %s "
"from metadata ", object_type);
errmsg = append_metastr_to_string(src_meta, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
+ PyUnicode_FromString(" to "));
errmsg = append_metastr_to_string(dst_meta, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" according to the rule %s",
+ PyUnicode_FromFormat(" according to the rule %s",
npy_casting_to_string(casting)));
PyErr_SetObject(PyExc_TypeError, errmsg);
Py_DECREF(errmsg);
@@ -1467,14 +1467,14 @@ raise_if_timedelta64_metadata_cast_error(char *object_type,
}
else {
PyObject *errmsg;
- errmsg = PyUString_FromFormat("Cannot cast %s "
+ errmsg = PyUnicode_FromFormat("Cannot cast %s "
"from metadata ", object_type);
errmsg = append_metastr_to_string(src_meta, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
+ PyUnicode_FromString(" to "));
errmsg = append_metastr_to_string(dst_meta, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" according to the rule %s",
+ PyUnicode_FromFormat(" according to the rule %s",
npy_casting_to_string(casting)));
PyErr_SetObject(PyExc_TypeError, errmsg);
Py_DECREF(errmsg);
@@ -1601,15 +1601,15 @@ compute_datetime_metadata_greatest_common_divisor(
incompatible_units: {
PyObject *errmsg;
- errmsg = PyUString_FromString("Cannot get "
+ errmsg = PyUnicode_FromString("Cannot get "
"a common metadata divisor for "
"NumPy datetime metadata ");
errmsg = append_metastr_to_string(meta1, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
+ PyUnicode_FromString(" and "));
errmsg = append_metastr_to_string(meta2, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" because they have "
+ PyUnicode_FromString(" because they have "
"incompatible nonlinear base time units"));
PyErr_SetObject(PyExc_TypeError, errmsg);
Py_DECREF(errmsg);
@@ -1617,12 +1617,12 @@ incompatible_units: {
}
units_overflow: {
PyObject *errmsg;
- errmsg = PyUString_FromString("Integer overflow "
+ errmsg = PyUnicode_FromString("Integer overflow "
"getting a common metadata divisor for "
"NumPy datetime metadata ");
errmsg = append_metastr_to_string(meta1, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
+ PyUnicode_FromString(" and "));
errmsg = append_metastr_to_string(meta2, 0, errmsg);
PyErr_SetObject(PyExc_OverflowError, errmsg);
Py_DECREF(errmsg);
@@ -1717,6 +1717,10 @@ parse_datetime_unit_from_string(char const *str, Py_ssize_t len, char const *met
return NPY_FR_as;
}
}
+ else if (len == 3 && !strncmp(str, "\xce\xbcs", 3)) {
+ /* greek small letter mu, utf8-encoded */
+ return NPY_FR_us;
+ }
else if (len == 7 && !strncmp(str, "generic", 7)) {
return NPY_FR_GENERIC;
}
@@ -1747,9 +1751,9 @@ convert_datetime_metadata_to_tuple(PyArray_DatetimeMetaData *meta)
}
PyTuple_SET_ITEM(dt_tuple, 0,
- PyUString_FromString(_datetime_strings[meta->base]));
+ PyUnicode_FromString(_datetime_strings[meta->base]));
PyTuple_SET_ITEM(dt_tuple, 1,
- PyInt_FromLong(meta->num));
+ PyLong_FromLong(meta->num));
return dt_tuple;
}
@@ -1764,22 +1768,16 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
PyArray_DatetimeMetaData *out_meta,
npy_bool from_pickle)
{
- char *basestr = NULL;
- Py_ssize_t len = 0, tuple_size;
int den = 1;
- PyObject *unit_str = NULL;
if (!PyTuple_Check(tuple)) {
- PyObject *errmsg;
- errmsg = PyUString_FromString("Require tuple for tuple to NumPy "
- "datetime metadata conversion, not ");
- PyUString_ConcatAndDel(&errmsg, PyObject_Repr(tuple));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+ PyErr_Format(PyExc_TypeError,
+ "Require tuple for tuple to NumPy "
+ "datetime metadata conversion, not %R", tuple);
return -1;
}
- tuple_size = PyTuple_GET_SIZE(tuple);
+ Py_ssize_t tuple_size = PyTuple_GET_SIZE(tuple);
if (tuple_size < 2 || tuple_size > 4) {
PyErr_SetString(PyExc_TypeError,
"Require tuple of size 2 to 4 for "
@@ -1787,18 +1785,22 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
return -1;
}
- unit_str = PyTuple_GET_ITEM(tuple, 0);
- Py_INCREF(unit_str);
- if (PyUnicode_Check(unit_str)) {
- /* Allow unicode format strings: convert to bytes */
- PyObject *tmp = PyUnicode_AsASCIIString(unit_str);
- Py_DECREF(unit_str);
+ PyObject *unit_str = PyTuple_GET_ITEM(tuple, 0);
+ if (PyBytes_Check(unit_str)) {
+ /* Allow bytes format strings: convert to unicode */
+ PyObject *tmp = PyUnicode_FromEncodedObject(unit_str, NULL, NULL);
if (tmp == NULL) {
return -1;
}
unit_str = tmp;
}
- if (PyBytes_AsStringAndSize(unit_str, &basestr, &len) < 0) {
+ else {
+ Py_INCREF(unit_str);
+ }
+
+ Py_ssize_t len;
+ char const *basestr = PyUnicode_AsUTF8AndSize(unit_str, &len);
+ if (basestr == NULL) {
Py_DECREF(unit_str);
return -1;
}
@@ -1812,7 +1814,7 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
Py_DECREF(unit_str);
/* Convert the values to longs */
- out_meta->num = PyInt_AsLong(PyTuple_GET_ITEM(tuple, 1));
+ out_meta->num = PyLong_AsLong(PyTuple_GET_ITEM(tuple, 1));
if (error_converting(out_meta->num)) {
return -1;
}
@@ -1837,11 +1839,10 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
if (from_pickle) {
/* if (event == 1) */
PyObject *one = PyLong_FromLong(1);
- int equal_one;
if (one == NULL) {
return -1;
}
- equal_one = PyObject_RichCompareBool(event, one, Py_EQ);
+ int equal_one = PyObject_RichCompareBool(event, one, Py_EQ);
Py_DECREF(one);
if (equal_one == -1) {
return -1;
@@ -1868,7 +1869,7 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
return -1;
}
}
- den = PyInt_AsLong(PyTuple_GET_ITEM(tuple, 2));
+ den = PyLong_AsLong(PyTuple_GET_ITEM(tuple, 2));
if (error_converting(den)) {
return -1;
}
@@ -1900,26 +1901,23 @@ NPY_NO_EXPORT int
convert_pyobject_to_datetime_metadata(PyObject *obj,
PyArray_DatetimeMetaData *out_meta)
{
- PyObject *ascii = NULL;
- char *str = NULL;
- Py_ssize_t len = 0;
-
if (PyTuple_Check(obj)) {
return convert_datetime_metadata_tuple_to_datetime_metadata(
obj, out_meta, NPY_FALSE);
}
- /* Get an ASCII string */
- if (PyUnicode_Check(obj)) {
- /* Allow unicode format strings: convert to bytes */
- ascii = PyUnicode_AsASCIIString(obj);
- if (ascii == NULL) {
+ /* Get a UTF8 string */
+ PyObject *utf8 = NULL;
+ if (PyBytes_Check(obj)) {
+ /* Allow bytes format strings: convert to unicode */
+ utf8 = PyUnicode_FromEncodedObject(obj, NULL, NULL);
+ if (utf8 == NULL) {
return -1;
}
}
- else if (PyBytes_Check(obj)) {
- ascii = obj;
- Py_INCREF(ascii);
+ else if (PyUnicode_Check(obj)) {
+ utf8 = obj;
+ Py_INCREF(utf8);
}
else {
PyErr_SetString(PyExc_TypeError,
@@ -1927,24 +1925,26 @@ convert_pyobject_to_datetime_metadata(PyObject *obj,
return -1;
}
- if (PyBytes_AsStringAndSize(ascii, &str, &len) < 0) {
- Py_DECREF(ascii);
+ Py_ssize_t len = 0;
+ char const *str = PyUnicode_AsUTF8AndSize(utf8, &len);
+ if (str == NULL) {
+ Py_DECREF(utf8);
return -1;
}
if (len > 0 && str[0] == '[') {
int r = parse_datetime_metadata_from_metastr(str, len, out_meta);
- Py_DECREF(ascii);
+ Py_DECREF(utf8);
return r;
}
else {
if (parse_datetime_extended_unit_from_string(str, len,
NULL, out_meta) < 0) {
- Py_DECREF(ascii);
+ Py_DECREF(utf8);
return -1;
}
- Py_DECREF(ascii);
+ Py_DECREF(utf8);
return 0;
}
}
@@ -1973,7 +1973,7 @@ append_metastr_to_string(PyArray_DatetimeMetaData *meta,
if (meta->base == NPY_FR_GENERIC) {
/* Without brackets, give a string "generic" */
if (skip_brackets) {
- PyUString_ConcatAndDel(&ret, PyUString_FromString("generic"));
+ PyUString_ConcatAndDel(&ret, PyUnicode_FromString("generic"));
return ret;
}
/* But with brackets, append nothing */
@@ -1994,18 +1994,18 @@ append_metastr_to_string(PyArray_DatetimeMetaData *meta,
if (num == 1) {
if (skip_brackets) {
- res = PyUString_FromFormat("%s", basestr);
+ res = PyUnicode_FromFormat("%s", basestr);
}
else {
- res = PyUString_FromFormat("[%s]", basestr);
+ res = PyUnicode_FromFormat("[%s]", basestr);
}
}
else {
if (skip_brackets) {
- res = PyUString_FromFormat("%d%s", num, basestr);
+ res = PyUnicode_FromFormat("%d%s", num, basestr);
}
else {
- res = PyUString_FromFormat("[%d%s]", num, basestr);
+ res = PyUnicode_FromFormat("[%d%s]", num, basestr);
}
}
@@ -2108,7 +2108,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->year = PyInt_AsLong(tmp);
+ out->year = PyLong_AsLong(tmp);
if (error_converting(out->year)) {
Py_DECREF(tmp);
return -1;
@@ -2120,7 +2120,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->month = PyInt_AsLong(tmp);
+ out->month = PyLong_AsLong(tmp);
if (error_converting(out->month)) {
Py_DECREF(tmp);
return -1;
@@ -2132,7 +2132,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->day = PyInt_AsLong(tmp);
+ out->day = PyLong_AsLong(tmp);
if (error_converting(out->day)) {
Py_DECREF(tmp);
return -1;
@@ -2166,7 +2166,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->hour = PyInt_AsLong(tmp);
+ out->hour = PyLong_AsLong(tmp);
if (error_converting(out->hour)) {
Py_DECREF(tmp);
return -1;
@@ -2178,7 +2178,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->min = PyInt_AsLong(tmp);
+ out->min = PyLong_AsLong(tmp);
if (error_converting(out->min)) {
Py_DECREF(tmp);
return -1;
@@ -2190,7 +2190,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->sec = PyInt_AsLong(tmp);
+ out->sec = PyLong_AsLong(tmp);
if (error_converting(out->sec)) {
Py_DECREF(tmp);
return -1;
@@ -2202,7 +2202,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->us = PyInt_AsLong(tmp);
+ out->us = PyLong_AsLong(tmp);
if (error_converting(out->us)) {
Py_DECREF(tmp);
return -1;
@@ -2350,32 +2350,33 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
NPY_CASTING casting, npy_datetime *out)
{
if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
- PyObject *bytes = NULL;
- char *str = NULL;
- Py_ssize_t len = 0;
- npy_datetimestruct dts;
- NPY_DATETIMEUNIT bestunit = NPY_FR_ERROR;
+ PyObject *utf8 = NULL;
- /* Convert to an ASCII string for the date parser */
- if (PyUnicode_Check(obj)) {
- bytes = PyUnicode_AsASCIIString(obj);
- if (bytes == NULL) {
+ /* Convert to an UTF8 string for the date parser */
+ if (PyBytes_Check(obj)) {
+ utf8 = PyUnicode_FromEncodedObject(obj, NULL, NULL);
+ if (utf8 == NULL) {
return -1;
}
}
else {
- bytes = obj;
- Py_INCREF(bytes);
+ utf8 = obj;
+ Py_INCREF(utf8);
}
- if (PyBytes_AsStringAndSize(bytes, &str, &len) < 0) {
- Py_DECREF(bytes);
+
+ Py_ssize_t len = 0;
+ char const *str = PyUnicode_AsUTF8AndSize(utf8, &len);
+ if (str == NULL) {
+ Py_DECREF(utf8);
return -1;
}
/* Parse the ISO date */
+ npy_datetimestruct dts;
+ NPY_DATETIMEUNIT bestunit = NPY_FR_ERROR;
if (parse_iso_8601_datetime(str, len, meta->base, casting,
&dts, &bestunit, NULL) < 0) {
- Py_DECREF(bytes);
+ Py_DECREF(utf8);
return -1;
}
@@ -2386,15 +2387,15 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
if (convert_datetimestruct_to_datetime(meta, &dts, out) < 0) {
- Py_DECREF(bytes);
+ Py_DECREF(utf8);
return -1;
}
- Py_DECREF(bytes);
+ Py_DECREF(utf8);
return 0;
}
/* Do no conversion on raw integers */
- else if (PyInt_Check(obj) || PyLong_Check(obj)) {
+ else if (PyLong_Check(obj)) {
/* Don't allow conversion from an integer without specifying a unit */
if (meta->base == NPY_FR_ERROR || meta->base == NPY_FR_GENERIC) {
PyErr_SetString(PyExc_ValueError, "Converting an integer to a "
@@ -2544,24 +2545,25 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
NPY_CASTING casting, npy_timedelta *out)
{
if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
- PyObject *bytes = NULL;
- char *str = NULL;
- Py_ssize_t len = 0;
+ PyObject *utf8 = NULL;
int succeeded = 0;
- /* Convert to an ASCII string for the date parser */
- if (PyUnicode_Check(obj)) {
- bytes = PyUnicode_AsASCIIString(obj);
- if (bytes == NULL) {
+ /* Convert to an UTF8 string for the date parser */
+ if (PyBytes_Check(obj)) {
+ utf8 = PyUnicode_FromEncodedObject(obj, NULL, NULL);
+ if (utf8 == NULL) {
return -1;
}
}
else {
- bytes = obj;
- Py_INCREF(bytes);
+ utf8 = obj;
+ Py_INCREF(utf8);
}
- if (PyBytes_AsStringAndSize(bytes, &str, &len) < 0) {
- Py_DECREF(bytes);
+
+ Py_ssize_t len = 0;
+ char const *str = PyUnicode_AsUTF8AndSize(utf8, &len);
+ if (str == NULL) {
+ Py_DECREF(utf8);
return -1;
}
@@ -2582,7 +2584,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
succeeded = 1;
}
}
- Py_DECREF(bytes);
+ Py_DECREF(utf8);
if (succeeded) {
/* Use generic units if none was specified */
@@ -2595,7 +2597,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
}
/* Do no conversion on raw integers */
- else if (PyInt_Check(obj) || PyLong_Check(obj)) {
+ else if (PyLong_Check(obj)) {
/* Use the default unit if none was specified */
if (meta->base == NPY_FR_ERROR) {
meta->base = NPY_DATETIME_DEFAULTUNIT;
@@ -2699,7 +2701,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
if (tmp == NULL) {
return -1;
}
- seconds = PyInt_AsLong(tmp);
+ seconds = PyLong_AsLong(tmp);
if (error_converting(seconds)) {
Py_DECREF(tmp);
return -1;
@@ -2711,7 +2713,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
if (tmp == NULL) {
return -1;
}
- useconds = PyInt_AsLong(tmp);
+ useconds = PyLong_AsLong(tmp);
if (error_converting(useconds)) {
Py_DECREF(tmp);
return -1;
@@ -3320,8 +3322,7 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
type_nums[2] = NPY_TIMEDELTA;
}
else {
- if (PyInt_Check(objs[1]) ||
- PyLong_Check(objs[1]) ||
+ if (PyLong_Check(objs[1]) ||
PyArray_IsScalar(objs[1], Integer) ||
is_any_numpy_timedelta(objs[1])) {
type_nums[1] = NPY_TIMEDELTA;
diff --git a/numpy/core/src/multiarray/datetime_busdaycal.c b/numpy/core/src/multiarray/datetime_busdaycal.c
index 6936a803f..2374eaa63 100644
--- a/numpy/core/src/multiarray/datetime_busdaycal.c
+++ b/numpy/core/src/multiarray/datetime_busdaycal.c
@@ -168,7 +168,7 @@ invalid_weekmask_string:
return 0;
}
- val = PyInt_AsLong(f);
+ val = PyLong_AsLong(f);
if (error_converting(val)) {
Py_DECREF(f);
Py_DECREF(obj);
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 67d57975b..ee05d215e 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -386,7 +386,7 @@ _convert_from_tuple(PyObject *obj, int align)
}
for (int i=0; i < shape.len; i++) {
PyTuple_SET_ITEM(newdescr->subarray->shape, i,
- PyInt_FromLong((long)shape.ptr[i]));
+ PyLong_FromLong((long)shape.ptr[i]));
if (PyTuple_GET_ITEM(newdescr->subarray->shape, i) == NULL) {
Py_DECREF(newdescr);
@@ -472,7 +472,7 @@ _convert_from_array_descr(PyObject *obj, int align)
if (PyUnicode_GetLength(name) == 0) {
Py_DECREF(name);
if (title == NULL) {
- name = PyUString_FromFormat("f%d", i);
+ name = PyUnicode_FromFormat("f%d", i);
if (name == NULL) {
goto fail;
}
@@ -537,7 +537,7 @@ _convert_from_array_descr(PyObject *obj, int align)
goto fail;
}
PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
- PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize));
+ PyTuple_SET_ITEM(tup, 1, PyLong_FromLong((long) totalsize));
/*
* Title can be "meta-data". Only insert it
@@ -660,7 +660,7 @@ _convert_from_list(PyObject *obj, int align)
}
maxalign = PyArray_MAX(maxalign, _align);
}
- PyObject *size_obj = PyInt_FromLong((long) totalsize);
+ PyObject *size_obj = PyLong_FromLong((long) totalsize);
if (!size_obj) {
Py_DECREF(conv);
goto fail;
@@ -673,7 +673,7 @@ _convert_from_list(PyObject *obj, int align)
}
PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
PyTuple_SET_ITEM(tup, 1, size_obj);
- PyObject *key = PyUString_FromFormat("f%d", i);
+ PyObject *key = PyUnicode_FromFormat("f%d", i);
if (!key) {
Py_DECREF(tup);
goto fail;
@@ -1112,7 +1112,7 @@ _convert_from_dict(PyObject *obj, int align)
/* Build item to insert (descr, offset, [title])*/
int len = 2;
PyObject *title = NULL;
- PyObject *ind = PyInt_FromLong(i);
+ PyObject *ind = PyLong_FromLong(i);
if (titles) {
title=PyObject_GetItem(titles, ind);
if (title && title != Py_None) {
@@ -1166,7 +1166,7 @@ _convert_from_dict(PyObject *obj, int align)
goto fail;
}
- PyTuple_SET_ITEM(tup, 1, PyInt_FromLong(offset));
+ PyTuple_SET_ITEM(tup, 1, PyLong_FromLong(offset));
/* Flag whether the fields are specified out of order */
if (offset < totalsize) {
has_out_of_order_fields = 1;
@@ -1190,7 +1190,7 @@ _convert_from_dict(PyObject *obj, int align)
if (align && _align > 1) {
totalsize = NPY_NEXT_ALIGNED_OFFSET(totalsize, _align);
}
- PyTuple_SET_ITEM(tup, 1, PyInt_FromLong(totalsize));
+ PyTuple_SET_ITEM(tup, 1, PyLong_FromLong(totalsize));
totalsize += newdescr->elsize;
}
if (len == 3) {
@@ -1887,10 +1887,10 @@ arraydescr_protocol_typestr_get(PyArray_Descr *self)
size >>= 2;
}
if (self->type_num == NPY_OBJECT) {
- ret = PyUString_FromFormat("%c%c", endian, basic_);
+ ret = PyUnicode_FromFormat("%c%c", endian, basic_);
}
else {
- ret = PyUString_FromFormat("%c%c%d", endian, basic_, size);
+ ret = PyUnicode_FromFormat("%c%c%d", endian, basic_, size);
}
if (PyDataType_ISDATETIME(self)) {
PyArray_DatetimeMetaData *meta;
@@ -1950,7 +1950,7 @@ arraydescr_ndim_get(PyArray_Descr *self)
Py_ssize_t ndim;
if (!PyDataType_HASSUBARRAY(self)) {
- return PyInt_FromLong(0);
+ return PyLong_FromLong(0);
}
/*
@@ -1958,7 +1958,7 @@ arraydescr_ndim_get(PyArray_Descr *self)
* for tuple argument
*/
ndim = PyTuple_Size(self->subarray->shape);
- return PyInt_FromLong(ndim);
+ return PyLong_FromLong(ndim);
}
@@ -1974,7 +1974,7 @@ arraydescr_protocol_descr_get(PyArray_Descr *self)
if (dobj == NULL) {
return NULL;
}
- PyTuple_SET_ITEM(dobj, 0, PyUString_FromString(""));
+ PyTuple_SET_ITEM(dobj, 0, PyUnicode_FromString(""));
PyTuple_SET_ITEM(dobj, 1, arraydescr_protocol_typestr_get(self));
res = PyList_New(1);
if (res == NULL) {
@@ -2010,7 +2010,7 @@ arraydescr_isbuiltin_get(PyArray_Descr *self)
if (PyTypeNum_ISUSERDEF(self->type_num)) {
val = 2;
}
- return PyInt_FromLong(val);
+ return PyLong_FromLong(val);
}
static int
@@ -2153,7 +2153,7 @@ arraydescr_names_set(PyArray_Descr *self, PyObject *val)
PyObject *item;
int valid = 1;
item = PySequence_GetItem(val, i);
- valid = PyUString_Check(item);
+ valid = PyUnicode_Check(item);
Py_DECREF(item);
if (!valid) {
PyErr_Format(PyExc_ValueError,
@@ -2391,11 +2391,11 @@ _get_pickleabletype_from_datetime_metadata(PyArray_Descr *dtype)
PyTuple_SET_ITEM(dt_tuple, 0,
PyBytes_FromString(_datetime_strings[meta->base]));
PyTuple_SET_ITEM(dt_tuple, 1,
- PyInt_FromLong(meta->num));
+ PyLong_FromLong(meta->num));
PyTuple_SET_ITEM(dt_tuple, 2,
- PyInt_FromLong(1));
+ PyLong_FromLong(1));
PyTuple_SET_ITEM(dt_tuple, 3,
- PyInt_FromLong(1));
+ PyLong_FromLong(1));
PyTuple_SET_ITEM(ret, 1, dt_tuple);
@@ -2450,7 +2450,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args))
if (self->type_num == NPY_UNICODE) {
elsize >>= 2;
}
- obj = PyUString_FromFormat("%c%d",self->kind, elsize);
+ obj = PyUnicode_FromFormat("%c%d",self->kind, elsize);
}
PyTuple_SET_ITEM(ret, 1, Py_BuildValue("(NOO)", obj, Py_False, Py_True));
@@ -2468,7 +2468,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args))
if (PyDataType_ISDATETIME(self)) {
PyObject *newobj;
state = PyTuple_New(9);
- PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version));
+ PyTuple_SET_ITEM(state, 0, PyLong_FromLong(version));
/*
* newobj is a tuple of the Python metadata dictionary
* and tuple of date_time info (str, num)
@@ -2483,16 +2483,16 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args))
}
else if (self->metadata) {
state = PyTuple_New(9);
- PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version));
+ PyTuple_SET_ITEM(state, 0, PyLong_FromLong(version));
Py_INCREF(self->metadata);
PyTuple_SET_ITEM(state, 8, self->metadata);
}
else { /* Use version 3 pickle format */
state = PyTuple_New(8);
- PyTuple_SET_ITEM(state, 0, PyInt_FromLong(3));
+ PyTuple_SET_ITEM(state, 0, PyLong_FromLong(3));
}
- PyTuple_SET_ITEM(state, 1, PyUString_FromFormat("%c", endian));
+ PyTuple_SET_ITEM(state, 1, PyUnicode_FromFormat("%c", endian));
PyTuple_SET_ITEM(state, 2, arraydescr_subdescr_get(self));
if (PyDataType_HASFIELDS(self)) {
Py_INCREF(self->names);
@@ -2516,9 +2516,9 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args))
elsize = -1;
alignment = -1;
}
- PyTuple_SET_ITEM(state, 5, PyInt_FromLong(elsize));
- PyTuple_SET_ITEM(state, 6, PyInt_FromLong(alignment));
- PyTuple_SET_ITEM(state, 7, PyInt_FromLong(self->flags));
+ PyTuple_SET_ITEM(state, 5, PyLong_FromLong(elsize));
+ PyTuple_SET_ITEM(state, 6, PyLong_FromLong(alignment));
+ PyTuple_SET_ITEM(state, 7, PyLong_FromLong(self->flags));
PyTuple_SET_ITEM(ret, 2, state);
return ret;
@@ -2628,7 +2628,7 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
default:
/* raise an error */
if (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0)) > 5) {
- version = PyInt_AsLong(PyTuple_GET_ITEM(args, 0));
+ version = PyLong_AsLong(PyTuple_GET_ITEM(args, 0));
}
else {
version = -1;
@@ -2651,7 +2651,7 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
if (version == 1 || version == 0) {
if (fields != Py_None) {
PyObject *key, *list;
- key = PyInt_FromLong(-1);
+ key = PyLong_FromLong(-1);
list = PyDict_GetItemWithError(fields, key);
if (!list) {
if (!PyErr_Occurred()) {
@@ -2788,7 +2788,7 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
for (i = 0; i < PyTuple_GET_SIZE(names); ++i) {
name = PyTuple_GET_ITEM(names, i);
- if (!PyUString_Check(name)) {
+ if (!PyUnicode_Check(name)) {
names_ok = 0;
break;
}
@@ -2894,7 +2894,7 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
PyArray_DatetimeMetaData temp_dt_data;
if ((! PyTuple_Check(metadata)) || (PyTuple_Size(metadata) != 2)) {
- errmsg = PyUString_FromString("Invalid datetime dtype (metadata, c_metadata): ");
+ errmsg = PyUnicode_FromString("Invalid datetime dtype (metadata, c_metadata): ");
PyUString_ConcatAndDel(&errmsg, PyObject_Repr(metadata));
PyErr_SetObject(PyExc_ValueError, errmsg);
Py_DECREF(errmsg);
@@ -3020,7 +3020,7 @@ PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian)
if (NPY_TITLE_KEY(key, value)) {
continue;
}
- if (!PyUString_Check(key) || !PyTuple_Check(value) ||
+ if (!PyUnicode_Check(key) || !PyTuple_Check(value) ||
((len=PyTuple_GET_SIZE(value)) < 2)) {
continue;
}
@@ -3393,7 +3393,7 @@ arraydescr_field_subset_view(PyArray_Descr *self, PyObject *ind)
/* disallow duplicate field indices */
if (PyDict_Contains(fields, name)) {
PyObject *msg = NULL;
- PyObject *fmt = PyUString_FromString(
+ PyObject *fmt = PyUnicode_FromString(
"duplicate field of name {!r}");
if (fmt != NULL) {
msg = PyObject_CallMethod(fmt, "format", "O", name);
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index 553d0effb..a7b252a77 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -3093,7 +3093,7 @@ Dragon4_Positional_##Type##_opt(npy_type *val, Dragon4_Options *opt)\
free_dragon4_bigint_scratch(scratch);\
return NULL;\
}\
- ret = PyUString_FromString(scratch->repr);\
+ ret = PyUnicode_FromString(scratch->repr);\
free_dragon4_bigint_scratch(scratch);\
return ret;\
}\
@@ -3130,7 +3130,7 @@ Dragon4_Scientific_##Type##_opt(npy_type *val, Dragon4_Options *opt)\
free_dragon4_bigint_scratch(scratch);\
return NULL;\
}\
- ret = PyUString_FromString(scratch->repr);\
+ ret = PyUnicode_FromString(scratch->repr);\
free_dragon4_bigint_scratch(scratch);\
return ret;\
}\
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index c9868a2c8..42c66ee7f 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -17,7 +17,6 @@
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#include <numpy/arrayobject.h>
-#include <numpy/npy_cpu.h>
#include "npy_pycompat.h"
@@ -114,18 +113,18 @@ _strided_to_strided_move_references(char *dst, npy_intp dst_stride,
{
PyObject *src_ref = NULL, *dst_ref = NULL;
while (N > 0) {
- NPY_COPY_PYOBJECT_PTR(&src_ref, src);
- NPY_COPY_PYOBJECT_PTR(&dst_ref, dst);
+ memcpy(&src_ref, src, sizeof(src_ref));
+ memcpy(&dst_ref, dst, sizeof(dst_ref));
/* Release the reference in dst */
NPY_DT_DBG_REFTRACE("dec dst ref", dst_ref);
Py_XDECREF(dst_ref);
/* Move the reference */
NPY_DT_DBG_REFTRACE("move src ref", src_ref);
- NPY_COPY_PYOBJECT_PTR(dst, &src_ref);
+ memcpy(dst, &src_ref, sizeof(src_ref));
/* Set the source reference to NULL */
src_ref = NULL;
- NPY_COPY_PYOBJECT_PTR(src, &src_ref);
+ memcpy(src, &src_ref, sizeof(src_ref));
src += src_stride;
dst += dst_stride;
@@ -143,12 +142,12 @@ _strided_to_strided_copy_references(char *dst, npy_intp dst_stride,
{
PyObject *src_ref = NULL, *dst_ref = NULL;
while (N > 0) {
- NPY_COPY_PYOBJECT_PTR(&src_ref, src);
- NPY_COPY_PYOBJECT_PTR(&dst_ref, dst);
+ memcpy(&src_ref, src, sizeof(src_ref));
+ memcpy(&dst_ref, dst, sizeof(dst_ref));
/* Copy the reference */
NPY_DT_DBG_REFTRACE("copy src ref", src_ref);
- NPY_COPY_PYOBJECT_PTR(dst, &src_ref);
+ memcpy(dst, &src_ref, sizeof(src_ref));
/* Claim the reference */
Py_XINCREF(src_ref);
/* Release the reference in dst */
@@ -694,7 +693,7 @@ _aligned_strided_to_strided_cast_decref_src(char *dst, npy_intp dst_stride,
return -1;
}
/* After casting, decrement the source ref and set it to NULL */
- NPY_COPY_PYOBJECT_PTR(&src_ref, src);
+ memcpy(&src_ref, src, sizeof(src_ref));
Py_XDECREF(src_ref);
memset(src, 0, sizeof(PyObject *));
NPY_DT_DBG_REFTRACE("dec src ref (cast object -> not object)", src_ref);
@@ -3218,7 +3217,7 @@ _null_to_strided_reference_setzero(char *dst,
PyObject *dst_ref = NULL;
while (N > 0) {
- NPY_COPY_PYOBJECT_PTR(&dst_ref, dst);
+ memcpy(&dst_ref, dst, sizeof(dst_ref));
/* Release the reference in dst and set it to NULL */
NPY_DT_DBG_REFTRACE("dec dest ref (to set zero)", dst_ref);
@@ -3349,7 +3348,7 @@ _strided_to_null_dec_src_ref_reference(char *NPY_UNUSED(dst),
while (N > 0) {
/* Release the reference in src and set it to NULL */
NPY_DT_DBG_REFTRACE("dec src ref (null dst)", src_ref);
- NPY_COPY_PYOBJECT_PTR(&src_ref, src);
+ memcpy(&src_ref, src, sizeof(src_ref));
Py_XDECREF(src_ref);
memset(src, 0, sizeof(PyObject *));
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index 3026e68e9..d07dc700d 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -253,7 +253,7 @@ datetime_known_scalar_types(
* must take charge. Otherwise we would attempt casting which does not
* truly support this. Only object arrays are special cased in this way.
*/
- return (PyType_IsSubtype(pytype, &PyString_Type) ||
+ return (PyType_IsSubtype(pytype, &PyBytes_Type) ||
PyType_IsSubtype(pytype, &PyUnicode_Type));
}
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index 2538e05c6..6ad375f67 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -16,7 +16,6 @@
#define _MULTIARRAYMODULE
#include <numpy/npy_common.h>
#include <numpy/arrayobject.h>
-#include <numpy/halffloat.h>
#include <npy_pycompat.h>
#include <ctype.h>
@@ -25,1898 +24,8 @@
#include "common.h"
#include "ctors.h"
-#ifdef NPY_HAVE_SSE_INTRINSICS
-#define EINSUM_USE_SSE1 1
-#else
-#define EINSUM_USE_SSE1 0
-#endif
-
-#ifdef NPY_HAVE_SSE2_INTRINSICS
-#define EINSUM_USE_SSE2 1
-#else
-#define EINSUM_USE_SSE2 0
-#endif
-
-#if EINSUM_USE_SSE1
-#include <xmmintrin.h>
-#endif
-
-#if EINSUM_USE_SSE2
-#include <emmintrin.h>
-#endif
-
-#define EINSUM_IS_SSE_ALIGNED(x) ((((npy_intp)x)&0xf) == 0)
-
-/********** PRINTF DEBUG TRACING **************/
-#define NPY_EINSUM_DBG_TRACING 0
-
-#if NPY_EINSUM_DBG_TRACING
-#define NPY_EINSUM_DBG_PRINT(s) printf("%s", s);
-#define NPY_EINSUM_DBG_PRINT1(s, p1) printf(s, p1);
-#define NPY_EINSUM_DBG_PRINT2(s, p1, p2) printf(s, p1, p2);
-#define NPY_EINSUM_DBG_PRINT3(s, p1, p2, p3) printf(s);
-#else
-#define NPY_EINSUM_DBG_PRINT(s)
-#define NPY_EINSUM_DBG_PRINT1(s, p1)
-#define NPY_EINSUM_DBG_PRINT2(s, p1, p2)
-#define NPY_EINSUM_DBG_PRINT3(s, p1, p2, p3)
-#endif
-/**********************************************/
-
-/**begin repeat
- * #name = byte, short, int, long, longlong,
- * ubyte, ushort, uint, ulong, ulonglong,
- * half, float, double, longdouble,
- * cfloat, cdouble, clongdouble#
- * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong,
- * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
- * npy_half, npy_float, npy_double, npy_longdouble,
- * npy_cfloat, npy_cdouble, npy_clongdouble#
- * #temptype = npy_byte, npy_short, npy_int, npy_long, npy_longlong,
- * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
- * npy_float, npy_float, npy_double, npy_longdouble,
- * npy_float, npy_double, npy_longdouble#
- * #to = ,,,,,
- * ,,,,,
- * npy_float_to_half,,,,
- * ,,#
- * #from = ,,,,,
- * ,,,,,
- * npy_half_to_float,,,,
- * ,,#
- * #complex = 0*5,
- * 0*5,
- * 0*4,
- * 1*3#
- * #float32 = 0*5,
- * 0*5,
- * 0,1,0,0,
- * 0*3#
- * #float64 = 0*5,
- * 0*5,
- * 0,0,1,0,
- * 0*3#
- */
-
-/**begin repeat1
- * #nop = 1, 2, 3, 1000#
- * #noplabel = one, two, three, any#
- */
-static void
-@name@_sum_of_products_@noplabel@(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
-#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
- char *data0 = dataptr[0];
- npy_intp stride0 = strides[0];
-#endif
-#if (@nop@ == 2 || @nop@ == 3) && !@complex@
- char *data1 = dataptr[1];
- npy_intp stride1 = strides[1];
-#endif
-#if (@nop@ == 3) && !@complex@
- char *data2 = dataptr[2];
- npy_intp stride2 = strides[2];
-#endif
-#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
- char *data_out = dataptr[@nop@];
- npy_intp stride_out = strides[@nop@];
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_@noplabel@ (%d)\n", (int)count);
-
- while (count--) {
-#if !@complex@
-# if @nop@ == 1
- *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) +
- @from@(*(@type@ *)data_out));
- data0 += stride0;
- data_out += stride_out;
-# elif @nop@ == 2
- *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) *
- @from@(*(@type@ *)data1) +
- @from@(*(@type@ *)data_out));
- data0 += stride0;
- data1 += stride1;
- data_out += stride_out;
-# elif @nop@ == 3
- *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) *
- @from@(*(@type@ *)data1) *
- @from@(*(@type@ *)data2) +
- @from@(*(@type@ *)data_out));
- data0 += stride0;
- data1 += stride1;
- data2 += stride2;
- data_out += stride_out;
-# else
- @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
- int i;
- for (i = 1; i < nop; ++i) {
- temp *= @from@(*(@type@ *)dataptr[i]);
- }
- *(@type@ *)dataptr[nop] = @to@(temp +
- @from@(*(@type@ *)dataptr[i]));
- for (i = 0; i <= nop; ++i) {
- dataptr[i] += strides[i];
- }
-# endif
-#else /* complex */
-# if @nop@ == 1
- ((@temptype@ *)data_out)[0] = ((@temptype@ *)data0)[0] +
- ((@temptype@ *)data_out)[0];
- ((@temptype@ *)data_out)[1] = ((@temptype@ *)data0)[1] +
- ((@temptype@ *)data_out)[1];
- data0 += stride0;
- data_out += stride_out;
-# else
-# if @nop@ <= 3
-#define _SUMPROD_NOP @nop@
-# else
-#define _SUMPROD_NOP nop
-# endif
- @temptype@ re, im, tmp;
- int i;
- re = ((@temptype@ *)dataptr[0])[0];
- im = ((@temptype@ *)dataptr[0])[1];
- for (i = 1; i < _SUMPROD_NOP; ++i) {
- tmp = re * ((@temptype@ *)dataptr[i])[0] -
- im * ((@temptype@ *)dataptr[i])[1];
- im = re * ((@temptype@ *)dataptr[i])[1] +
- im * ((@temptype@ *)dataptr[i])[0];
- re = tmp;
- }
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[0] = re +
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[0];
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[1] = im +
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[1];
-
- for (i = 0; i <= _SUMPROD_NOP; ++i) {
- dataptr[i] += strides[i];
- }
-#undef _SUMPROD_NOP
-# endif
-#endif
- }
-}
-
-#if @nop@ == 1
-
-static void
-@name@_sum_of_products_contig_one(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @type@ *data_out = (@type@ *)dataptr[1];
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_one (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
-#if !@complex@
- data_out[@i@] = @to@(@from@(data0[@i@]) +
- @from@(data_out[@i@]));
-#else
- ((@temptype@ *)data_out + 2*@i@)[0] =
- ((@temptype@ *)data0 + 2*@i@)[0] +
- ((@temptype@ *)data_out + 2*@i@)[0];
- ((@temptype@ *)data_out + 2*@i@)[1] =
- ((@temptype@ *)data0 + 2*@i@)[1] +
- ((@temptype@ *)data_out + 2*@i@)[1];
-#endif
-/**end repeat2**/
- case 0:
- return;
- }
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
-#if !@complex@
- data_out[@i@] = @to@(@from@(data0[@i@]) +
- @from@(data_out[@i@]));
-#else /* complex */
- ((@temptype@ *)data_out + 2*@i@)[0] =
- ((@temptype@ *)data0 + 2*@i@)[0] +
- ((@temptype@ *)data_out + 2*@i@)[0];
- ((@temptype@ *)data_out + 2*@i@)[1] =
- ((@temptype@ *)data0 + 2*@i@)[1] +
- ((@temptype@ *)data_out + 2*@i@)[1];
-#endif
-/**end repeat2**/
- data0 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-#elif @nop@ == 2 && !@complex@
-
-static void
-@name@_sum_of_products_contig_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @type@ *data1 = (@type@ *)dataptr[1];
- @type@ *data_out = (@type@ *)dataptr[2];
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, b;
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, b;
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- data_out[@i@] = @to@(@from@(data0[@i@]) *
- @from@(data1[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
- case 0:
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) &&
- EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@));
- b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
- _mm_store_ps(data_out+@i@, b);
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) &&
- EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@));
- b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
- _mm_store_pd(data_out+@i@, b);
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@));
- b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
- _mm_storeu_ps(data_out+@i@, b);
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@));
- b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
- _mm_storeu_pd(data_out+@i@, b);
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- data_out[@i@] = @to@(@from@(data0[@i@]) *
- @from@(data1[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
-#endif
- data0 += 8;
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-/* Some extra specializations for the two operand case */
-static void
-@name@_sum_of_products_stride0_contig_outcontig_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
- @type@ *data1 = (@type@ *)dataptr[1];
- @type@ *data_out = (@type@ *)dataptr[2];
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, b, value0_sse;
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, b, value0_sse;
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outcontig_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- data_out[@i@] = @to@(value0 *
- @from@(data1[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
- case 0:
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- value0_sse = _mm_set_ps1(value0);
-
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(value0_sse, _mm_load_ps(data1+@i@));
- b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
- _mm_store_ps(data_out+@i@, b);
-/**end repeat2**/
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- if (count > 0) {
- goto finish_after_unrolled_loop;
- }
- else {
- return;
- }
- }
-#elif EINSUM_USE_SSE2 && @float64@
- value0_sse = _mm_set1_pd(value0);
-
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(value0_sse, _mm_load_pd(data1+@i@));
- b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
- _mm_store_pd(data_out+@i@, b);
-/**end repeat2**/
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- if (count > 0) {
- goto finish_after_unrolled_loop;
- }
- else {
- return;
- }
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(value0_sse, _mm_loadu_ps(data1+@i@));
- b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
- _mm_storeu_ps(data_out+@i@, b);
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(value0_sse, _mm_loadu_pd(data1+@i@));
- b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
- _mm_storeu_pd(data_out+@i@, b);
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- data_out[@i@] = @to@(value0 *
- @from@(data1[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
-#endif
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- if (count > 0) {
- goto finish_after_unrolled_loop;
- }
-}
-
-static void
-@name@_sum_of_products_contig_stride0_outcontig_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
- @type@ *data_out = (@type@ *)dataptr[2];
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, b, value1_sse;
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, b, value1_sse;
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outcontig_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- data_out[@i@] = @to@(@from@(data0[@i@])*
- value1 +
- @from@(data_out[@i@]));
-/**end repeat2**/
- case 0:
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- value1_sse = _mm_set_ps1(value1);
-
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(_mm_load_ps(data0+@i@), value1_sse);
- b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
- _mm_store_ps(data_out+@i@, b);
-/**end repeat2**/
- data0 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- value1_sse = _mm_set1_pd(value1);
-
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(_mm_load_pd(data0+@i@), value1_sse);
- b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
- _mm_store_pd(data_out+@i@, b);
-/**end repeat2**/
- data0 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), value1_sse);
- b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
- _mm_storeu_ps(data_out+@i@, b);
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), value1_sse);
- b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
- _mm_storeu_pd(data_out+@i@, b);
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- data_out[@i@] = @to@(@from@(data0[@i@])*
- value1 +
- @from@(data_out[@i@]));
-/**end repeat2**/
-#endif
- data0 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-static void
-@name@_sum_of_products_contig_contig_outstride0_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @type@ *data1 = (@type@ *)dataptr[1];
- @temptype@ accum = 0;
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, accum_sse = _mm_setzero_ps();
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, accum_sse = _mm_setzero_pd();
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_contig_outstride0_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- accum += @from@(data0[@i@]) * @from@(data1[@i@]);
-/**end repeat2**/
- case 0:
- *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum);
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@));
- accum_sse = _mm_add_ps(accum_sse, a);
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
- }
-
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@));
- accum_sse = _mm_add_pd(accum_sse, a);
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
- }
-
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@));
- accum_sse = _mm_add_ps(accum_sse, a);
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@));
- accum_sse = _mm_add_pd(accum_sse, a);
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- accum += @from@(data0[@i@]) * @from@(data1[@i@]);
-/**end repeat2**/
-#endif
- data0 += 8;
- data1 += 8;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-#elif EINSUM_USE_SSE2 && @float64@
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-#endif
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-static void
-@name@_sum_of_products_stride0_contig_outstride0_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
- @type@ *data1 = (@type@ *)dataptr[1];
- @temptype@ accum = 0;
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, accum_sse = _mm_setzero_ps();
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, accum_sse = _mm_setzero_pd();
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outstride0_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- accum += @from@(data1[@i@]);
-/**end repeat2**/
- case 0:
- *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value0 * accum);
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data1)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data1+@i@));
-/**end repeat2**/
- data1 += 8;
- }
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data1)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data1+@i@));
-/**end repeat2**/
- data1 += 8;
- }
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data1+@i@));
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data1+@i@));
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- accum += @from@(data1[@i@]);
-/**end repeat2**/
-#endif
- data1 += 8;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-#elif EINSUM_USE_SSE2 && @float64@
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-#endif
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-static void
-@name@_sum_of_products_contig_stride0_outstride0_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
- @temptype@ accum = 0;
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, accum_sse = _mm_setzero_ps();
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, accum_sse = _mm_setzero_pd();
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outstride0_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- accum += @from@(data0[@i@]);
-/**end repeat2**/
- case 0:
- *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum * value1);
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@));
-/**end repeat2**/
- data0 += 8;
- }
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@));
-/**end repeat2**/
- data0 += 8;
- }
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@));
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@));
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- accum += @from@(data0[@i@]);
-/**end repeat2**/
-#endif
- data0 += 8;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-#elif EINSUM_USE_SSE2 && @float64@
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-#endif
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-#elif @nop@ == 3 && !@complex@
-
-static void
-@name@_sum_of_products_contig_three(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @type@ *data1 = (@type@ *)dataptr[1];
- @type@ *data2 = (@type@ *)dataptr[2];
- @type@ *data_out = (@type@ *)dataptr[3];
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- data_out[@i@] = @to@(@from@(data0[@i@]) *
- @from@(data1[@i@]) *
- @from@(data2[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
- data2 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
-
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- if (count-- == 0) {
- return;
- }
- data_out[@i@] = @to@(@from@(data0[@i@]) *
- @from@(data1[@i@]) *
- @from@(data2[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
-}
-
-#else /* @nop@ > 3 || @complex */
-
-static void
-@name@_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_@noplabel@ (%d)\n",
- (int)count);
-
- while (count--) {
-#if !@complex@
- @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
- int i;
- for (i = 1; i < nop; ++i) {
- temp *= @from@(*(@type@ *)dataptr[i]);
- }
- *(@type@ *)dataptr[nop] = @to@(temp +
- @from@(*(@type@ *)dataptr[i]));
- for (i = 0; i <= nop; ++i) {
- dataptr[i] += sizeof(@type@);
- }
-#else /* complex */
-# if @nop@ <= 3
-# define _SUMPROD_NOP @nop@
-# else
-# define _SUMPROD_NOP nop
-# endif
- @temptype@ re, im, tmp;
- int i;
- re = ((@temptype@ *)dataptr[0])[0];
- im = ((@temptype@ *)dataptr[0])[1];
- for (i = 1; i < _SUMPROD_NOP; ++i) {
- tmp = re * ((@temptype@ *)dataptr[i])[0] -
- im * ((@temptype@ *)dataptr[i])[1];
- im = re * ((@temptype@ *)dataptr[i])[1] +
- im * ((@temptype@ *)dataptr[i])[0];
- re = tmp;
- }
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[0] = re +
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[0];
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[1] = im +
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[1];
-
- for (i = 0; i <= _SUMPROD_NOP; ++i) {
- dataptr[i] += sizeof(@type@);
- }
-# undef _SUMPROD_NOP
-#endif
- }
-}
-
-#endif /* functions for various @nop@ */
-
-#if @nop@ == 1
-
-static void
-@name@_sum_of_products_contig_outstride0_one(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
-#if @complex@
- @temptype@ accum_re = 0, accum_im = 0;
- @temptype@ *data0 = (@temptype@ *)dataptr[0];
-#else
- @temptype@ accum = 0;
- @type@ *data0 = (@type@ *)dataptr[0];
-#endif
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, accum_sse = _mm_setzero_ps();
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, accum_sse = _mm_setzero_pd();
-#endif
-
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_outstride0_one (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
-#if !@complex@
- accum += @from@(data0[@i@]);
-#else /* complex */
- accum_re += data0[2*@i@+0];
- accum_im += data0[2*@i@+1];
-#endif
-/**end repeat2**/
- case 0:
-#if @complex@
- ((@temptype@ *)dataptr[1])[0] += accum_re;
- ((@temptype@ *)dataptr[1])[1] += accum_im;
-#else
- *((@type@ *)dataptr[1]) = @to@(accum +
- @from@(*((@type@ *)dataptr[1])));
-#endif
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@));
-/**end repeat2**/
- data0 += 8;
- }
-
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@));
-/**end repeat2**/
- data0 += 8;
- }
-
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@));
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@));
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
-# if !@complex@
- accum += @from@(data0[@i@]);
-# else /* complex */
- accum_re += data0[2*@i@+0];
- accum_im += data0[2*@i@+1];
-# endif
-/**end repeat2**/
-#endif
-
-#if !@complex@
- data0 += 8;
-#else
- data0 += 8*2;
-#endif
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-#elif EINSUM_USE_SSE2 && @float64@
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-#endif
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-#endif /* @nop@ == 1 */
-
-static void
-@name@_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
-#if @complex@
- @temptype@ accum_re = 0, accum_im = 0;
-#else
- @temptype@ accum = 0;
-#endif
-
-#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
- char *data0 = dataptr[0];
- npy_intp stride0 = strides[0];
-#endif
-#if (@nop@ == 2 || @nop@ == 3) && !@complex@
- char *data1 = dataptr[1];
- npy_intp stride1 = strides[1];
-#endif
-#if (@nop@ == 3) && !@complex@
- char *data2 = dataptr[2];
- npy_intp stride2 = strides[2];
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_outstride0_@noplabel@ (%d)\n",
- (int)count);
-
- while (count--) {
-#if !@complex@
-# if @nop@ == 1
- accum += @from@(*(@type@ *)data0);
- data0 += stride0;
-# elif @nop@ == 2
- accum += @from@(*(@type@ *)data0) *
- @from@(*(@type@ *)data1);
- data0 += stride0;
- data1 += stride1;
-# elif @nop@ == 3
- accum += @from@(*(@type@ *)data0) *
- @from@(*(@type@ *)data1) *
- @from@(*(@type@ *)data2);
- data0 += stride0;
- data1 += stride1;
- data2 += stride2;
-# else
- @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
- int i;
- for (i = 1; i < nop; ++i) {
- temp *= @from@(*(@type@ *)dataptr[i]);
- }
- accum += temp;
- for (i = 0; i < nop; ++i) {
- dataptr[i] += strides[i];
- }
-# endif
-#else /* complex */
-# if @nop@ == 1
- accum_re += ((@temptype@ *)data0)[0];
- accum_im += ((@temptype@ *)data0)[1];
- data0 += stride0;
-# else
-# if @nop@ <= 3
-#define _SUMPROD_NOP @nop@
-# else
-#define _SUMPROD_NOP nop
-# endif
- @temptype@ re, im, tmp;
- int i;
- re = ((@temptype@ *)dataptr[0])[0];
- im = ((@temptype@ *)dataptr[0])[1];
- for (i = 1; i < _SUMPROD_NOP; ++i) {
- tmp = re * ((@temptype@ *)dataptr[i])[0] -
- im * ((@temptype@ *)dataptr[i])[1];
- im = re * ((@temptype@ *)dataptr[i])[1] +
- im * ((@temptype@ *)dataptr[i])[0];
- re = tmp;
- }
- accum_re += re;
- accum_im += im;
- for (i = 0; i < _SUMPROD_NOP; ++i) {
- dataptr[i] += strides[i];
- }
-#undef _SUMPROD_NOP
-# endif
-#endif
- }
-
-#if @complex@
-# if @nop@ <= 3
- ((@temptype@ *)dataptr[@nop@])[0] += accum_re;
- ((@temptype@ *)dataptr[@nop@])[1] += accum_im;
-# else
- ((@temptype@ *)dataptr[nop])[0] += accum_re;
- ((@temptype@ *)dataptr[nop])[1] += accum_im;
-# endif
-#else
-# if @nop@ <= 3
- *((@type@ *)dataptr[@nop@]) = @to@(accum +
- @from@(*((@type@ *)dataptr[@nop@])));
-# else
- *((@type@ *)dataptr[nop]) = @to@(accum +
- @from@(*((@type@ *)dataptr[nop])));
-# endif
-#endif
-
-}
-
-/**end repeat1**/
-
-/**end repeat**/
-
-
-/* Do OR of ANDs for the boolean type */
-
-/**begin repeat
- * #nop = 1, 2, 3, 1000#
- * #noplabel = one, two, three, any#
- */
-
-static void
-bool_sum_of_products_@noplabel@(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
-#if (@nop@ <= 3)
- char *data0 = dataptr[0];
- npy_intp stride0 = strides[0];
-#endif
-#if (@nop@ == 2 || @nop@ == 3)
- char *data1 = dataptr[1];
- npy_intp stride1 = strides[1];
-#endif
-#if (@nop@ == 3)
- char *data2 = dataptr[2];
- npy_intp stride2 = strides[2];
-#endif
-#if (@nop@ <= 3)
- char *data_out = dataptr[@nop@];
- npy_intp stride_out = strides[@nop@];
-#endif
-
- while (count--) {
-#if @nop@ == 1
- *(npy_bool *)data_out = *(npy_bool *)data0 ||
- *(npy_bool *)data_out;
- data0 += stride0;
- data_out += stride_out;
-#elif @nop@ == 2
- *(npy_bool *)data_out = (*(npy_bool *)data0 &&
- *(npy_bool *)data1) ||
- *(npy_bool *)data_out;
- data0 += stride0;
- data1 += stride1;
- data_out += stride_out;
-#elif @nop@ == 3
- *(npy_bool *)data_out = (*(npy_bool *)data0 &&
- *(npy_bool *)data1 &&
- *(npy_bool *)data2) ||
- *(npy_bool *)data_out;
- data0 += stride0;
- data1 += stride1;
- data2 += stride2;
- data_out += stride_out;
-#else
- npy_bool temp = *(npy_bool *)dataptr[0];
- int i;
- for (i = 1; i < nop; ++i) {
- temp = temp && *(npy_bool *)dataptr[i];
- }
- *(npy_bool *)dataptr[nop] = temp || *(npy_bool *)dataptr[i];
- for (i = 0; i <= nop; ++i) {
- dataptr[i] += strides[i];
- }
-#endif
- }
-}
-
-static void
-bool_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
-#if (@nop@ <= 3)
- char *data0 = dataptr[0];
-#endif
-#if (@nop@ == 2 || @nop@ == 3)
- char *data1 = dataptr[1];
-#endif
-#if (@nop@ == 3)
- char *data2 = dataptr[2];
-#endif
-#if (@nop@ <= 3)
- char *data_out = dataptr[@nop@];
-#endif
-
-#if (@nop@ <= 3)
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat1
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
-# if @nop@ == 1
- ((npy_bool *)data_out)[@i@] = ((npy_bool *)data0)[@i@] ||
- ((npy_bool *)data_out)[@i@];
-# elif @nop@ == 2
- ((npy_bool *)data_out)[@i@] =
- (((npy_bool *)data0)[@i@] &&
- ((npy_bool *)data1)[@i@]) ||
- ((npy_bool *)data_out)[@i@];
-# elif @nop@ == 3
- ((npy_bool *)data_out)[@i@] =
- (((npy_bool *)data0)[@i@] &&
- ((npy_bool *)data1)[@i@] &&
- ((npy_bool *)data2)[@i@]) ||
- ((npy_bool *)data_out)[@i@];
-# endif
-/**end repeat1**/
- case 0:
- return;
- }
-#endif
-
-/* Unroll the loop by 8 for fixed-size nop */
-#if (@nop@ <= 3)
- while (count >= 8) {
- count -= 8;
-#else
- while (count--) {
-#endif
-
-# if @nop@ == 1
-/**begin repeat1
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- *((npy_bool *)data_out + @i@) = (*((npy_bool *)data0 + @i@)) ||
- (*((npy_bool *)data_out + @i@));
-/**end repeat1**/
- data0 += 8*sizeof(npy_bool);
- data_out += 8*sizeof(npy_bool);
-# elif @nop@ == 2
-/**begin repeat1
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- *((npy_bool *)data_out + @i@) =
- ((*((npy_bool *)data0 + @i@)) &&
- (*((npy_bool *)data1 + @i@))) ||
- (*((npy_bool *)data_out + @i@));
-/**end repeat1**/
- data0 += 8*sizeof(npy_bool);
- data1 += 8*sizeof(npy_bool);
- data_out += 8*sizeof(npy_bool);
-# elif @nop@ == 3
-/**begin repeat1
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- *((npy_bool *)data_out + @i@) =
- ((*((npy_bool *)data0 + @i@)) &&
- (*((npy_bool *)data1 + @i@)) &&
- (*((npy_bool *)data2 + @i@))) ||
- (*((npy_bool *)data_out + @i@));
-/**end repeat1**/
- data0 += 8*sizeof(npy_bool);
- data1 += 8*sizeof(npy_bool);
- data2 += 8*sizeof(npy_bool);
- data_out += 8*sizeof(npy_bool);
-# else
- npy_bool temp = *(npy_bool *)dataptr[0];
- int i;
- for (i = 1; i < nop; ++i) {
- temp = temp && *(npy_bool *)dataptr[i];
- }
- *(npy_bool *)dataptr[nop] = temp || *(npy_bool *)dataptr[i];
- for (i = 0; i <= nop; ++i) {
- dataptr[i] += sizeof(npy_bool);
- }
-# endif
- }
-
- /* If the loop was unrolled, we need to finish it off */
-#if (@nop@ <= 3)
- goto finish_after_unrolled_loop;
-#endif
-}
-
-static void
-bool_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
- npy_bool accum = 0;
-
-#if (@nop@ <= 3)
- char *data0 = dataptr[0];
- npy_intp stride0 = strides[0];
-#endif
-#if (@nop@ == 2 || @nop@ == 3)
- char *data1 = dataptr[1];
- npy_intp stride1 = strides[1];
-#endif
-#if (@nop@ == 3)
- char *data2 = dataptr[2];
- npy_intp stride2 = strides[2];
-#endif
-
- while (count--) {
-#if @nop@ == 1
- accum = *(npy_bool *)data0 || accum;
- data0 += stride0;
-#elif @nop@ == 2
- accum = (*(npy_bool *)data0 && *(npy_bool *)data1) || accum;
- data0 += stride0;
- data1 += stride1;
-#elif @nop@ == 3
- accum = (*(npy_bool *)data0 &&
- *(npy_bool *)data1 &&
- *(npy_bool *)data2) || accum;
- data0 += stride0;
- data1 += stride1;
- data2 += stride2;
-#else
- npy_bool temp = *(npy_bool *)dataptr[0];
- int i;
- for (i = 1; i < nop; ++i) {
- temp = temp && *(npy_bool *)dataptr[i];
- }
- accum = temp || accum;
- for (i = 0; i <= nop; ++i) {
- dataptr[i] += strides[i];
- }
-#endif
- }
-
-# if @nop@ <= 3
- *((npy_bool *)dataptr[@nop@]) = accum || *((npy_bool *)dataptr[@nop@]);
-# else
- *((npy_bool *)dataptr[nop]) = accum || *((npy_bool *)dataptr[nop]);
-# endif
-}
-
-/**end repeat**/
-
-typedef void (*sum_of_products_fn)(int, char **, npy_intp const*, npy_intp);
-
-/* These tables need to match up with the type enum */
-static sum_of_products_fn
-_contig_outstride0_unary_specialization_table[NPY_NTYPES] = {
-/**begin repeat
- * #name = bool,
- * byte, ubyte,
- * short, ushort,
- * int, uint,
- * long, ulong,
- * longlong, ulonglong,
- * float, double, longdouble,
- * cfloat, cdouble, clongdouble,
- * object, string, unicode, void,
- * datetime, timedelta, half#
- * #use = 0,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1, 1,
- * 1, 1, 1,
- * 0, 0, 0, 0,
- * 0, 0, 1#
- */
-#if @use@
- &@name@_sum_of_products_contig_outstride0_one,
-#else
- NULL,
-#endif
-/**end repeat**/
-}; /* End of _contig_outstride0_unary_specialization_table */
-
-static sum_of_products_fn _binary_specialization_table[NPY_NTYPES][5] = {
-/**begin repeat
- * #name = bool,
- * byte, ubyte,
- * short, ushort,
- * int, uint,
- * long, ulong,
- * longlong, ulonglong,
- * float, double, longdouble,
- * cfloat, cdouble, clongdouble,
- * object, string, unicode, void,
- * datetime, timedelta, half#
- * #use = 0,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1, 1,
- * 0, 0, 0,
- * 0, 0, 0, 0,
- * 0, 0, 1#
- */
-#if @use@
-{
- &@name@_sum_of_products_stride0_contig_outstride0_two,
- &@name@_sum_of_products_stride0_contig_outcontig_two,
- &@name@_sum_of_products_contig_stride0_outstride0_two,
- &@name@_sum_of_products_contig_stride0_outcontig_two,
- &@name@_sum_of_products_contig_contig_outstride0_two,
-},
-#else
- {NULL, NULL, NULL, NULL, NULL},
-#endif
-/**end repeat**/
-}; /* End of _binary_specialization_table */
-
-static sum_of_products_fn _outstride0_specialized_table[NPY_NTYPES][4] = {
-/**begin repeat
- * #name = bool,
- * byte, ubyte,
- * short, ushort,
- * int, uint,
- * long, ulong,
- * longlong, ulonglong,
- * float, double, longdouble,
- * cfloat, cdouble, clongdouble,
- * object, string, unicode, void,
- * datetime, timedelta, half#
- * #use = 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1, 1,
- * 1, 1, 1,
- * 0, 0, 0, 0,
- * 0, 0, 1#
- */
-#if @use@
-{
- &@name@_sum_of_products_outstride0_any,
- &@name@_sum_of_products_outstride0_one,
- &@name@_sum_of_products_outstride0_two,
- &@name@_sum_of_products_outstride0_three
-},
-#else
- {NULL, NULL, NULL, NULL},
-#endif
-/**end repeat**/
-}; /* End of _outstride0_specialized_table */
-
-static sum_of_products_fn _allcontig_specialized_table[NPY_NTYPES][4] = {
-/**begin repeat
- * #name = bool,
- * byte, ubyte,
- * short, ushort,
- * int, uint,
- * long, ulong,
- * longlong, ulonglong,
- * float, double, longdouble,
- * cfloat, cdouble, clongdouble,
- * object, string, unicode, void,
- * datetime, timedelta, half#
- * #use = 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1, 1,
- * 1, 1, 1,
- * 0, 0, 0, 0,
- * 0, 0, 1#
- */
-#if @use@
-{
- &@name@_sum_of_products_contig_any,
- &@name@_sum_of_products_contig_one,
- &@name@_sum_of_products_contig_two,
- &@name@_sum_of_products_contig_three
-},
-#else
- {NULL, NULL, NULL, NULL},
-#endif
-/**end repeat**/
-}; /* End of _allcontig_specialized_table */
-
-static sum_of_products_fn _unspecialized_table[NPY_NTYPES][4] = {
-/**begin repeat
- * #name = bool,
- * byte, ubyte,
- * short, ushort,
- * int, uint,
- * long, ulong,
- * longlong, ulonglong,
- * float, double, longdouble,
- * cfloat, cdouble, clongdouble,
- * object, string, unicode, void,
- * datetime, timedelta, half#
- * #use = 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1, 1,
- * 1, 1, 1,
- * 0, 0, 0, 0,
- * 0, 0, 1#
- */
-#if @use@
-{
- &@name@_sum_of_products_any,
- &@name@_sum_of_products_one,
- &@name@_sum_of_products_two,
- &@name@_sum_of_products_three
-},
-#else
- {NULL, NULL, NULL, NULL},
-#endif
-/**end repeat**/
-}; /* End of _unnspecialized_table */
-
-static sum_of_products_fn
-get_sum_of_products_function(int nop, int type_num,
- npy_intp itemsize, npy_intp const *fixed_strides)
-{
- int iop;
-
- if (type_num >= NPY_NTYPES) {
- return NULL;
- }
-
- /* contiguous reduction */
- if (nop == 1 && fixed_strides[0] == itemsize && fixed_strides[1] == 0) {
- sum_of_products_fn ret =
- _contig_outstride0_unary_specialization_table[type_num];
- if (ret != NULL) {
- return ret;
- }
- }
-
- /* nop of 2 has more specializations */
- if (nop == 2) {
- /* Encode the zero/contiguous strides */
- int code;
- code = (fixed_strides[0] == 0) ? 0 :
- (fixed_strides[0] == itemsize) ? 2*2*1 : 8;
- code += (fixed_strides[1] == 0) ? 0 :
- (fixed_strides[1] == itemsize) ? 2*1 : 8;
- code += (fixed_strides[2] == 0) ? 0 :
- (fixed_strides[2] == itemsize) ? 1 : 8;
- if (code >= 2 && code < 7) {
- sum_of_products_fn ret =
- _binary_specialization_table[type_num][code-2];
- if (ret != NULL) {
- return ret;
- }
- }
- }
-
- /* Inner loop with an output stride of 0 */
- if (fixed_strides[nop] == 0) {
- return _outstride0_specialized_table[type_num][nop <= 3 ? nop : 0];
- }
-
- /* Check for all contiguous */
- for (iop = 0; iop < nop + 1; ++iop) {
- if (fixed_strides[iop] != itemsize) {
- break;
- }
- }
-
- /* Contiguous loop */
- if (iop == nop + 1) {
- return _allcontig_specialized_table[type_num][nop <= 3 ? nop : 0];
- }
-
- /* None of the above specializations caught it, general loops */
- return _unspecialized_table[type_num][nop <= 3 ? nop : 0];
-}
+#include "einsum_sumprod.h"
+#include "einsum_debug.h"
/*
diff --git a/numpy/core/src/multiarray/einsum_debug.h b/numpy/core/src/multiarray/einsum_debug.h
new file mode 100644
index 000000000..9aa81fcbd
--- /dev/null
+++ b/numpy/core/src/multiarray/einsum_debug.h
@@ -0,0 +1,28 @@
+/*
+ * This file provides debug macros used by the other einsum files.
+ *
+ * Copyright (c) 2011 by Mark Wiebe (mwwiebe@gmail.com)
+ * The University of British Columbia
+ *
+ * See LICENSE.txt for the license.
+ */
+#ifndef _NPY_MULTIARRAY_EINSUM_DEBUG_H
+#define _NPY_MULTIARRAY_EINSUM_DEBUG_H
+
+/********** PRINTF DEBUG TRACING **************/
+#define NPY_EINSUM_DBG_TRACING 0
+
+#if NPY_EINSUM_DBG_TRACING
+#include <cstdio>
+#define NPY_EINSUM_DBG_PRINT(s) printf("%s", s);
+#define NPY_EINSUM_DBG_PRINT1(s, p1) printf(s, p1);
+#define NPY_EINSUM_DBG_PRINT2(s, p1, p2) printf(s, p1, p2);
+#define NPY_EINSUM_DBG_PRINT3(s, p1, p2, p3) printf(s);
+#else
+#define NPY_EINSUM_DBG_PRINT(s)
+#define NPY_EINSUM_DBG_PRINT1(s, p1)
+#define NPY_EINSUM_DBG_PRINT2(s, p1, p2)
+#define NPY_EINSUM_DBG_PRINT3(s, p1, p2, p3)
+#endif
+
+#endif
diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src
new file mode 100644
index 000000000..c58e74287
--- /dev/null
+++ b/numpy/core/src/multiarray/einsum_sumprod.c.src
@@ -0,0 +1,1897 @@
+/*
+ * This file provides optimized sum of product implementations used internally
+ * by einsum.
+ *
+ * Copyright (c) 2011 by Mark Wiebe (mwwiebe@gmail.com)
+ * The University of British Columbia
+ *
+ * See LICENSE.txt for the license.
+ */
+
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
+
+#include <numpy/npy_common.h>
+#include <numpy/ndarraytypes.h> /* for NPY_NTYPES */
+#include <numpy/halffloat.h>
+
+#include "einsum_sumprod.h"
+#include "einsum_debug.h"
+
+
+#ifdef NPY_HAVE_SSE_INTRINSICS
+#define EINSUM_USE_SSE1 1
+#else
+#define EINSUM_USE_SSE1 0
+#endif
+
+#ifdef NPY_HAVE_SSE2_INTRINSICS
+#define EINSUM_USE_SSE2 1
+#else
+#define EINSUM_USE_SSE2 0
+#endif
+
+#if EINSUM_USE_SSE1
+#include <xmmintrin.h>
+#endif
+
+#if EINSUM_USE_SSE2
+#include <emmintrin.h>
+#endif
+
+#define EINSUM_IS_SSE_ALIGNED(x) ((((npy_intp)x)&0xf) == 0)
+
+/**********************************************/
+
+/**begin repeat
+ * #name = byte, short, int, long, longlong,
+ * ubyte, ushort, uint, ulong, ulonglong,
+ * half, float, double, longdouble,
+ * cfloat, cdouble, clongdouble#
+ * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong,
+ * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
+ * npy_half, npy_float, npy_double, npy_longdouble,
+ * npy_cfloat, npy_cdouble, npy_clongdouble#
+ * #temptype = npy_byte, npy_short, npy_int, npy_long, npy_longlong,
+ * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
+ * npy_float, npy_float, npy_double, npy_longdouble,
+ * npy_float, npy_double, npy_longdouble#
+ * #to = ,,,,,
+ * ,,,,,
+ * npy_float_to_half,,,,
+ * ,,#
+ * #from = ,,,,,
+ * ,,,,,
+ * npy_half_to_float,,,,
+ * ,,#
+ * #complex = 0*5,
+ * 0*5,
+ * 0*4,
+ * 1*3#
+ * #float32 = 0*5,
+ * 0*5,
+ * 0,1,0,0,
+ * 0*3#
+ * #float64 = 0*5,
+ * 0*5,
+ * 0,0,1,0,
+ * 0*3#
+ */
+
+/**begin repeat1
+ * #nop = 1, 2, 3, 1000#
+ * #noplabel = one, two, three, any#
+ */
+static void
+@name@_sum_of_products_@noplabel@(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
+ char *data0 = dataptr[0];
+ npy_intp stride0 = strides[0];
+#endif
+#if (@nop@ == 2 || @nop@ == 3) && !@complex@
+ char *data1 = dataptr[1];
+ npy_intp stride1 = strides[1];
+#endif
+#if (@nop@ == 3) && !@complex@
+ char *data2 = dataptr[2];
+ npy_intp stride2 = strides[2];
+#endif
+#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
+ char *data_out = dataptr[@nop@];
+ npy_intp stride_out = strides[@nop@];
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_@noplabel@ (%d)\n", (int)count);
+
+ while (count--) {
+#if !@complex@
+# if @nop@ == 1
+ *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) +
+ @from@(*(@type@ *)data_out));
+ data0 += stride0;
+ data_out += stride_out;
+# elif @nop@ == 2
+ *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) *
+ @from@(*(@type@ *)data1) +
+ @from@(*(@type@ *)data_out));
+ data0 += stride0;
+ data1 += stride1;
+ data_out += stride_out;
+# elif @nop@ == 3
+ *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) *
+ @from@(*(@type@ *)data1) *
+ @from@(*(@type@ *)data2) +
+ @from@(*(@type@ *)data_out));
+ data0 += stride0;
+ data1 += stride1;
+ data2 += stride2;
+ data_out += stride_out;
+# else
+ @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp *= @from@(*(@type@ *)dataptr[i]);
+ }
+ *(@type@ *)dataptr[nop] = @to@(temp +
+ @from@(*(@type@ *)dataptr[i]));
+ for (i = 0; i <= nop; ++i) {
+ dataptr[i] += strides[i];
+ }
+# endif
+#else /* complex */
+# if @nop@ == 1
+ ((@temptype@ *)data_out)[0] = ((@temptype@ *)data0)[0] +
+ ((@temptype@ *)data_out)[0];
+ ((@temptype@ *)data_out)[1] = ((@temptype@ *)data0)[1] +
+ ((@temptype@ *)data_out)[1];
+ data0 += stride0;
+ data_out += stride_out;
+# else
+# if @nop@ <= 3
+#define _SUMPROD_NOP @nop@
+# else
+#define _SUMPROD_NOP nop
+# endif
+ @temptype@ re, im, tmp;
+ int i;
+ re = ((@temptype@ *)dataptr[0])[0];
+ im = ((@temptype@ *)dataptr[0])[1];
+ for (i = 1; i < _SUMPROD_NOP; ++i) {
+ tmp = re * ((@temptype@ *)dataptr[i])[0] -
+ im * ((@temptype@ *)dataptr[i])[1];
+ im = re * ((@temptype@ *)dataptr[i])[1] +
+ im * ((@temptype@ *)dataptr[i])[0];
+ re = tmp;
+ }
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[0] = re +
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[0];
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[1] = im +
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[1];
+
+ for (i = 0; i <= _SUMPROD_NOP; ++i) {
+ dataptr[i] += strides[i];
+ }
+#undef _SUMPROD_NOP
+# endif
+#endif
+ }
+}
+
+#if @nop@ == 1
+
+static void
+@name@_sum_of_products_contig_one(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @type@ *data_out = (@type@ *)dataptr[1];
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_one (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+#if !@complex@
+ data_out[@i@] = @to@(@from@(data0[@i@]) +
+ @from@(data_out[@i@]));
+#else
+ ((@temptype@ *)data_out + 2*@i@)[0] =
+ ((@temptype@ *)data0 + 2*@i@)[0] +
+ ((@temptype@ *)data_out + 2*@i@)[0];
+ ((@temptype@ *)data_out + 2*@i@)[1] =
+ ((@temptype@ *)data0 + 2*@i@)[1] +
+ ((@temptype@ *)data_out + 2*@i@)[1];
+#endif
+/**end repeat2**/
+ case 0:
+ return;
+ }
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+#if !@complex@
+ data_out[@i@] = @to@(@from@(data0[@i@]) +
+ @from@(data_out[@i@]));
+#else /* complex */
+ ((@temptype@ *)data_out + 2*@i@)[0] =
+ ((@temptype@ *)data0 + 2*@i@)[0] +
+ ((@temptype@ *)data_out + 2*@i@)[0];
+ ((@temptype@ *)data_out + 2*@i@)[1] =
+ ((@temptype@ *)data0 + 2*@i@)[1] +
+ ((@temptype@ *)data_out + 2*@i@)[1];
+#endif
+/**end repeat2**/
+ data0 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+#elif @nop@ == 2 && !@complex@
+
+static void
+@name@_sum_of_products_contig_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @type@ *data1 = (@type@ *)dataptr[1];
+ @type@ *data_out = (@type@ *)dataptr[2];
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, b;
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, b;
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ data_out[@i@] = @to@(@from@(data0[@i@]) *
+ @from@(data1[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+ case 0:
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) &&
+ EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@));
+ b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
+ _mm_store_ps(data_out+@i@, b);
+/**end repeat2**/
+ data0 += 8;
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) &&
+ EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@));
+ b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
+ _mm_store_pd(data_out+@i@, b);
+/**end repeat2**/
+ data0 += 8;
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@));
+ b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
+ _mm_storeu_ps(data_out+@i@, b);
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@));
+ b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
+ _mm_storeu_pd(data_out+@i@, b);
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ data_out[@i@] = @to@(@from@(data0[@i@]) *
+ @from@(data1[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+#endif
+ data0 += 8;
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+/* Some extra specializations for the two operand case */
+static void
+@name@_sum_of_products_stride0_contig_outcontig_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
+ @type@ *data1 = (@type@ *)dataptr[1];
+ @type@ *data_out = (@type@ *)dataptr[2];
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, b, value0_sse;
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, b, value0_sse;
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outcontig_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ data_out[@i@] = @to@(value0 *
+ @from@(data1[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+ case 0:
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ value0_sse = _mm_set_ps1(value0);
+
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(value0_sse, _mm_load_ps(data1+@i@));
+ b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
+ _mm_store_ps(data_out+@i@, b);
+/**end repeat2**/
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ if (count > 0) {
+ goto finish_after_unrolled_loop;
+ }
+ else {
+ return;
+ }
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ value0_sse = _mm_set1_pd(value0);
+
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(value0_sse, _mm_load_pd(data1+@i@));
+ b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
+ _mm_store_pd(data_out+@i@, b);
+/**end repeat2**/
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ if (count > 0) {
+ goto finish_after_unrolled_loop;
+ }
+ else {
+ return;
+ }
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(value0_sse, _mm_loadu_ps(data1+@i@));
+ b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
+ _mm_storeu_ps(data_out+@i@, b);
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(value0_sse, _mm_loadu_pd(data1+@i@));
+ b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
+ _mm_storeu_pd(data_out+@i@, b);
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ data_out[@i@] = @to@(value0 *
+ @from@(data1[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+#endif
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ if (count > 0) {
+ goto finish_after_unrolled_loop;
+ }
+}
+
+static void
+@name@_sum_of_products_contig_stride0_outcontig_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
+ @type@ *data_out = (@type@ *)dataptr[2];
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, b, value1_sse;
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, b, value1_sse;
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outcontig_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ data_out[@i@] = @to@(@from@(data0[@i@])*
+ value1 +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+ case 0:
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ value1_sse = _mm_set_ps1(value1);
+
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(_mm_load_ps(data0+@i@), value1_sse);
+ b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
+ _mm_store_ps(data_out+@i@, b);
+/**end repeat2**/
+ data0 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ value1_sse = _mm_set1_pd(value1);
+
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_load_pd(data0+@i@), value1_sse);
+ b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
+ _mm_store_pd(data_out+@i@, b);
+/**end repeat2**/
+ data0 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), value1_sse);
+ b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
+ _mm_storeu_ps(data_out+@i@, b);
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), value1_sse);
+ b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
+ _mm_storeu_pd(data_out+@i@, b);
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ data_out[@i@] = @to@(@from@(data0[@i@])*
+ value1 +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+#endif
+ data0 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+static void
+@name@_sum_of_products_contig_contig_outstride0_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @type@ *data1 = (@type@ *)dataptr[1];
+ @temptype@ accum = 0;
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, accum_sse = _mm_setzero_ps();
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, accum_sse = _mm_setzero_pd();
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_contig_outstride0_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ accum += @from@(data0[@i@]) * @from@(data1[@i@]);
+/**end repeat2**/
+ case 0:
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum);
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+ _mm_prefetch(data1 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@));
+ accum_sse = _mm_add_ps(accum_sse, a);
+/**end repeat2**/
+ data0 += 8;
+ data1 += 8;
+ }
+
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+ _mm_prefetch(data1 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@));
+ accum_sse = _mm_add_pd(accum_sse, a);
+/**end repeat2**/
+ data0 += 8;
+ data1 += 8;
+ }
+
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+ _mm_prefetch(data1 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@));
+ accum_sse = _mm_add_ps(accum_sse, a);
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+ _mm_prefetch(data1 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@));
+ accum_sse = _mm_add_pd(accum_sse, a);
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ accum += @from@(data0[@i@]) * @from@(data1[@i@]);
+/**end repeat2**/
+#endif
+ data0 += 8;
+ data1 += 8;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+#endif
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+static void
+@name@_sum_of_products_stride0_contig_outstride0_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
+ @type@ *data1 = (@type@ *)dataptr[1];
+ @temptype@ accum = 0;
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, accum_sse = _mm_setzero_ps();
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, accum_sse = _mm_setzero_pd();
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outstride0_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ accum += @from@(data1[@i@]);
+/**end repeat2**/
+ case 0:
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value0 * accum);
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data1)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data1+@i@));
+/**end repeat2**/
+ data1 += 8;
+ }
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data1)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data1+@i@));
+/**end repeat2**/
+ data1 += 8;
+ }
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data1+@i@));
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data1+@i@));
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ accum += @from@(data1[@i@]);
+/**end repeat2**/
+#endif
+ data1 += 8;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+#endif
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+static void
+@name@_sum_of_products_contig_stride0_outstride0_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
+ @temptype@ accum = 0;
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, accum_sse = _mm_setzero_ps();
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, accum_sse = _mm_setzero_pd();
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outstride0_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ accum += @from@(data0[@i@]);
+/**end repeat2**/
+ case 0:
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum * value1);
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@));
+/**end repeat2**/
+ data0 += 8;
+ }
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@));
+/**end repeat2**/
+ data0 += 8;
+ }
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@));
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@));
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ accum += @from@(data0[@i@]);
+/**end repeat2**/
+#endif
+ data0 += 8;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+#endif
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+#elif @nop@ == 3 && !@complex@
+
+static void
+@name@_sum_of_products_contig_three(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @type@ *data1 = (@type@ *)dataptr[1];
+ @type@ *data2 = (@type@ *)dataptr[2];
+ @type@ *data_out = (@type@ *)dataptr[3];
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ data_out[@i@] = @to@(@from@(data0[@i@]) *
+ @from@(data1[@i@]) *
+ @from@(data2[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+ data0 += 8;
+ data1 += 8;
+ data2 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ if (count-- == 0) {
+ return;
+ }
+ data_out[@i@] = @to@(@from@(data0[@i@]) *
+ @from@(data1[@i@]) *
+ @from@(data2[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+}
+
+#else /* @nop@ > 3 || @complex */
+
+static void
+@name@_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_@noplabel@ (%d)\n",
+ (int)count);
+
+ while (count--) {
+#if !@complex@
+ @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp *= @from@(*(@type@ *)dataptr[i]);
+ }
+ *(@type@ *)dataptr[nop] = @to@(temp +
+ @from@(*(@type@ *)dataptr[i]));
+ for (i = 0; i <= nop; ++i) {
+ dataptr[i] += sizeof(@type@);
+ }
+#else /* complex */
+# if @nop@ <= 3
+# define _SUMPROD_NOP @nop@
+# else
+# define _SUMPROD_NOP nop
+# endif
+ @temptype@ re, im, tmp;
+ int i;
+ re = ((@temptype@ *)dataptr[0])[0];
+ im = ((@temptype@ *)dataptr[0])[1];
+ for (i = 1; i < _SUMPROD_NOP; ++i) {
+ tmp = re * ((@temptype@ *)dataptr[i])[0] -
+ im * ((@temptype@ *)dataptr[i])[1];
+ im = re * ((@temptype@ *)dataptr[i])[1] +
+ im * ((@temptype@ *)dataptr[i])[0];
+ re = tmp;
+ }
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[0] = re +
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[0];
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[1] = im +
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[1];
+
+ for (i = 0; i <= _SUMPROD_NOP; ++i) {
+ dataptr[i] += sizeof(@type@);
+ }
+# undef _SUMPROD_NOP
+#endif
+ }
+}
+
+#endif /* functions for various @nop@ */
+
+#if @nop@ == 1
+
+static void
+@name@_sum_of_products_contig_outstride0_one(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+#if @complex@
+ @temptype@ accum_re = 0, accum_im = 0;
+ @temptype@ *data0 = (@temptype@ *)dataptr[0];
+#else
+ @temptype@ accum = 0;
+ @type@ *data0 = (@type@ *)dataptr[0];
+#endif
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, accum_sse = _mm_setzero_ps();
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, accum_sse = _mm_setzero_pd();
+#endif
+
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_outstride0_one (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+#if !@complex@
+ accum += @from@(data0[@i@]);
+#else /* complex */
+ accum_re += data0[2*@i@+0];
+ accum_im += data0[2*@i@+1];
+#endif
+/**end repeat2**/
+ case 0:
+#if @complex@
+ ((@temptype@ *)dataptr[1])[0] += accum_re;
+ ((@temptype@ *)dataptr[1])[1] += accum_im;
+#else
+ *((@type@ *)dataptr[1]) = @to@(accum +
+ @from@(*((@type@ *)dataptr[1])));
+#endif
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@));
+/**end repeat2**/
+ data0 += 8;
+ }
+
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@));
+/**end repeat2**/
+ data0 += 8;
+ }
+
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@));
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@));
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+# if !@complex@
+ accum += @from@(data0[@i@]);
+# else /* complex */
+ accum_re += data0[2*@i@+0];
+ accum_im += data0[2*@i@+1];
+# endif
+/**end repeat2**/
+#endif
+
+#if !@complex@
+ data0 += 8;
+#else
+ data0 += 8*2;
+#endif
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+#endif
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+#endif /* @nop@ == 1 */
+
+static void
+@name@_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+#if @complex@
+ @temptype@ accum_re = 0, accum_im = 0;
+#else
+ @temptype@ accum = 0;
+#endif
+
+#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
+ char *data0 = dataptr[0];
+ npy_intp stride0 = strides[0];
+#endif
+#if (@nop@ == 2 || @nop@ == 3) && !@complex@
+ char *data1 = dataptr[1];
+ npy_intp stride1 = strides[1];
+#endif
+#if (@nop@ == 3) && !@complex@
+ char *data2 = dataptr[2];
+ npy_intp stride2 = strides[2];
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_outstride0_@noplabel@ (%d)\n",
+ (int)count);
+
+ while (count--) {
+#if !@complex@
+# if @nop@ == 1
+ accum += @from@(*(@type@ *)data0);
+ data0 += stride0;
+# elif @nop@ == 2
+ accum += @from@(*(@type@ *)data0) *
+ @from@(*(@type@ *)data1);
+ data0 += stride0;
+ data1 += stride1;
+# elif @nop@ == 3
+ accum += @from@(*(@type@ *)data0) *
+ @from@(*(@type@ *)data1) *
+ @from@(*(@type@ *)data2);
+ data0 += stride0;
+ data1 += stride1;
+ data2 += stride2;
+# else
+ @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp *= @from@(*(@type@ *)dataptr[i]);
+ }
+ accum += temp;
+ for (i = 0; i < nop; ++i) {
+ dataptr[i] += strides[i];
+ }
+# endif
+#else /* complex */
+# if @nop@ == 1
+ accum_re += ((@temptype@ *)data0)[0];
+ accum_im += ((@temptype@ *)data0)[1];
+ data0 += stride0;
+# else
+# if @nop@ <= 3
+#define _SUMPROD_NOP @nop@
+# else
+#define _SUMPROD_NOP nop
+# endif
+ @temptype@ re, im, tmp;
+ int i;
+ re = ((@temptype@ *)dataptr[0])[0];
+ im = ((@temptype@ *)dataptr[0])[1];
+ for (i = 1; i < _SUMPROD_NOP; ++i) {
+ tmp = re * ((@temptype@ *)dataptr[i])[0] -
+ im * ((@temptype@ *)dataptr[i])[1];
+ im = re * ((@temptype@ *)dataptr[i])[1] +
+ im * ((@temptype@ *)dataptr[i])[0];
+ re = tmp;
+ }
+ accum_re += re;
+ accum_im += im;
+ for (i = 0; i < _SUMPROD_NOP; ++i) {
+ dataptr[i] += strides[i];
+ }
+#undef _SUMPROD_NOP
+# endif
+#endif
+ }
+
+#if @complex@
+# if @nop@ <= 3
+ ((@temptype@ *)dataptr[@nop@])[0] += accum_re;
+ ((@temptype@ *)dataptr[@nop@])[1] += accum_im;
+# else
+ ((@temptype@ *)dataptr[nop])[0] += accum_re;
+ ((@temptype@ *)dataptr[nop])[1] += accum_im;
+# endif
+#else
+# if @nop@ <= 3
+ *((@type@ *)dataptr[@nop@]) = @to@(accum +
+ @from@(*((@type@ *)dataptr[@nop@])));
+# else
+ *((@type@ *)dataptr[nop]) = @to@(accum +
+ @from@(*((@type@ *)dataptr[nop])));
+# endif
+#endif
+
+}
+
+/**end repeat1**/
+
+/**end repeat**/
+
+
+/* Do OR of ANDs for the boolean type */
+
+/**begin repeat
+ * #nop = 1, 2, 3, 1000#
+ * #noplabel = one, two, three, any#
+ */
+
+static void
+bool_sum_of_products_@noplabel@(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+#if (@nop@ <= 3)
+ char *data0 = dataptr[0];
+ npy_intp stride0 = strides[0];
+#endif
+#if (@nop@ == 2 || @nop@ == 3)
+ char *data1 = dataptr[1];
+ npy_intp stride1 = strides[1];
+#endif
+#if (@nop@ == 3)
+ char *data2 = dataptr[2];
+ npy_intp stride2 = strides[2];
+#endif
+#if (@nop@ <= 3)
+ char *data_out = dataptr[@nop@];
+ npy_intp stride_out = strides[@nop@];
+#endif
+
+ while (count--) {
+#if @nop@ == 1
+ *(npy_bool *)data_out = *(npy_bool *)data0 ||
+ *(npy_bool *)data_out;
+ data0 += stride0;
+ data_out += stride_out;
+#elif @nop@ == 2
+ *(npy_bool *)data_out = (*(npy_bool *)data0 &&
+ *(npy_bool *)data1) ||
+ *(npy_bool *)data_out;
+ data0 += stride0;
+ data1 += stride1;
+ data_out += stride_out;
+#elif @nop@ == 3
+ *(npy_bool *)data_out = (*(npy_bool *)data0 &&
+ *(npy_bool *)data1 &&
+ *(npy_bool *)data2) ||
+ *(npy_bool *)data_out;
+ data0 += stride0;
+ data1 += stride1;
+ data2 += stride2;
+ data_out += stride_out;
+#else
+ npy_bool temp = *(npy_bool *)dataptr[0];
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp = temp && *(npy_bool *)dataptr[i];
+ }
+ *(npy_bool *)dataptr[nop] = temp || *(npy_bool *)dataptr[i];
+ for (i = 0; i <= nop; ++i) {
+ dataptr[i] += strides[i];
+ }
+#endif
+ }
+}
+
+static void
+bool_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+#if (@nop@ <= 3)
+ char *data0 = dataptr[0];
+#endif
+#if (@nop@ == 2 || @nop@ == 3)
+ char *data1 = dataptr[1];
+#endif
+#if (@nop@ == 3)
+ char *data2 = dataptr[2];
+#endif
+#if (@nop@ <= 3)
+ char *data_out = dataptr[@nop@];
+#endif
+
+#if (@nop@ <= 3)
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat1
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+# if @nop@ == 1
+ ((npy_bool *)data_out)[@i@] = ((npy_bool *)data0)[@i@] ||
+ ((npy_bool *)data_out)[@i@];
+# elif @nop@ == 2
+ ((npy_bool *)data_out)[@i@] =
+ (((npy_bool *)data0)[@i@] &&
+ ((npy_bool *)data1)[@i@]) ||
+ ((npy_bool *)data_out)[@i@];
+# elif @nop@ == 3
+ ((npy_bool *)data_out)[@i@] =
+ (((npy_bool *)data0)[@i@] &&
+ ((npy_bool *)data1)[@i@] &&
+ ((npy_bool *)data2)[@i@]) ||
+ ((npy_bool *)data_out)[@i@];
+# endif
+/**end repeat1**/
+ case 0:
+ return;
+ }
+#endif
+
+/* Unroll the loop by 8 for fixed-size nop */
+#if (@nop@ <= 3)
+ while (count >= 8) {
+ count -= 8;
+#else
+ while (count--) {
+#endif
+
+# if @nop@ == 1
+/**begin repeat1
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ *((npy_bool *)data_out + @i@) = (*((npy_bool *)data0 + @i@)) ||
+ (*((npy_bool *)data_out + @i@));
+/**end repeat1**/
+ data0 += 8*sizeof(npy_bool);
+ data_out += 8*sizeof(npy_bool);
+# elif @nop@ == 2
+/**begin repeat1
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ *((npy_bool *)data_out + @i@) =
+ ((*((npy_bool *)data0 + @i@)) &&
+ (*((npy_bool *)data1 + @i@))) ||
+ (*((npy_bool *)data_out + @i@));
+/**end repeat1**/
+ data0 += 8*sizeof(npy_bool);
+ data1 += 8*sizeof(npy_bool);
+ data_out += 8*sizeof(npy_bool);
+# elif @nop@ == 3
+/**begin repeat1
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ *((npy_bool *)data_out + @i@) =
+ ((*((npy_bool *)data0 + @i@)) &&
+ (*((npy_bool *)data1 + @i@)) &&
+ (*((npy_bool *)data2 + @i@))) ||
+ (*((npy_bool *)data_out + @i@));
+/**end repeat1**/
+ data0 += 8*sizeof(npy_bool);
+ data1 += 8*sizeof(npy_bool);
+ data2 += 8*sizeof(npy_bool);
+ data_out += 8*sizeof(npy_bool);
+# else
+ npy_bool temp = *(npy_bool *)dataptr[0];
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp = temp && *(npy_bool *)dataptr[i];
+ }
+ *(npy_bool *)dataptr[nop] = temp || *(npy_bool *)dataptr[i];
+ for (i = 0; i <= nop; ++i) {
+ dataptr[i] += sizeof(npy_bool);
+ }
+# endif
+ }
+
+ /* If the loop was unrolled, we need to finish it off */
+#if (@nop@ <= 3)
+ goto finish_after_unrolled_loop;
+#endif
+}
+
+static void
+bool_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+ npy_bool accum = 0;
+
+#if (@nop@ <= 3)
+ char *data0 = dataptr[0];
+ npy_intp stride0 = strides[0];
+#endif
+#if (@nop@ == 2 || @nop@ == 3)
+ char *data1 = dataptr[1];
+ npy_intp stride1 = strides[1];
+#endif
+#if (@nop@ == 3)
+ char *data2 = dataptr[2];
+ npy_intp stride2 = strides[2];
+#endif
+
+ while (count--) {
+#if @nop@ == 1
+ accum = *(npy_bool *)data0 || accum;
+ data0 += stride0;
+#elif @nop@ == 2
+ accum = (*(npy_bool *)data0 && *(npy_bool *)data1) || accum;
+ data0 += stride0;
+ data1 += stride1;
+#elif @nop@ == 3
+ accum = (*(npy_bool *)data0 &&
+ *(npy_bool *)data1 &&
+ *(npy_bool *)data2) || accum;
+ data0 += stride0;
+ data1 += stride1;
+ data2 += stride2;
+#else
+ npy_bool temp = *(npy_bool *)dataptr[0];
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp = temp && *(npy_bool *)dataptr[i];
+ }
+ accum = temp || accum;
+ for (i = 0; i <= nop; ++i) {
+ dataptr[i] += strides[i];
+ }
+#endif
+ }
+
+# if @nop@ <= 3
+ *((npy_bool *)dataptr[@nop@]) = accum || *((npy_bool *)dataptr[@nop@]);
+# else
+ *((npy_bool *)dataptr[nop]) = accum || *((npy_bool *)dataptr[nop]);
+# endif
+}
+
+/**end repeat**/
+
+/* These tables need to match up with the type enum */
+static sum_of_products_fn
+_contig_outstride0_unary_specialization_table[NPY_NTYPES] = {
+/**begin repeat
+ * #name = bool,
+ * byte, ubyte,
+ * short, ushort,
+ * int, uint,
+ * long, ulong,
+ * longlong, ulonglong,
+ * float, double, longdouble,
+ * cfloat, cdouble, clongdouble,
+ * object, string, unicode, void,
+ * datetime, timedelta, half#
+ * #use = 0,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1, 1,
+ * 1, 1, 1,
+ * 0, 0, 0, 0,
+ * 0, 0, 1#
+ */
+#if @use@
+ &@name@_sum_of_products_contig_outstride0_one,
+#else
+ NULL,
+#endif
+/**end repeat**/
+}; /* End of _contig_outstride0_unary_specialization_table */
+
+static sum_of_products_fn _binary_specialization_table[NPY_NTYPES][5] = {
+/**begin repeat
+ * #name = bool,
+ * byte, ubyte,
+ * short, ushort,
+ * int, uint,
+ * long, ulong,
+ * longlong, ulonglong,
+ * float, double, longdouble,
+ * cfloat, cdouble, clongdouble,
+ * object, string, unicode, void,
+ * datetime, timedelta, half#
+ * #use = 0,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1, 1,
+ * 0, 0, 0,
+ * 0, 0, 0, 0,
+ * 0, 0, 1#
+ */
+#if @use@
+{
+ &@name@_sum_of_products_stride0_contig_outstride0_two,
+ &@name@_sum_of_products_stride0_contig_outcontig_two,
+ &@name@_sum_of_products_contig_stride0_outstride0_two,
+ &@name@_sum_of_products_contig_stride0_outcontig_two,
+ &@name@_sum_of_products_contig_contig_outstride0_two,
+},
+#else
+ {NULL, NULL, NULL, NULL, NULL},
+#endif
+/**end repeat**/
+}; /* End of _binary_specialization_table */
+
+static sum_of_products_fn _outstride0_specialized_table[NPY_NTYPES][4] = {
+/**begin repeat
+ * #name = bool,
+ * byte, ubyte,
+ * short, ushort,
+ * int, uint,
+ * long, ulong,
+ * longlong, ulonglong,
+ * float, double, longdouble,
+ * cfloat, cdouble, clongdouble,
+ * object, string, unicode, void,
+ * datetime, timedelta, half#
+ * #use = 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1, 1,
+ * 1, 1, 1,
+ * 0, 0, 0, 0,
+ * 0, 0, 1#
+ */
+#if @use@
+{
+ &@name@_sum_of_products_outstride0_any,
+ &@name@_sum_of_products_outstride0_one,
+ &@name@_sum_of_products_outstride0_two,
+ &@name@_sum_of_products_outstride0_three
+},
+#else
+ {NULL, NULL, NULL, NULL},
+#endif
+/**end repeat**/
+}; /* End of _outstride0_specialized_table */
+
+static sum_of_products_fn _allcontig_specialized_table[NPY_NTYPES][4] = {
+/**begin repeat
+ * #name = bool,
+ * byte, ubyte,
+ * short, ushort,
+ * int, uint,
+ * long, ulong,
+ * longlong, ulonglong,
+ * float, double, longdouble,
+ * cfloat, cdouble, clongdouble,
+ * object, string, unicode, void,
+ * datetime, timedelta, half#
+ * #use = 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1, 1,
+ * 1, 1, 1,
+ * 0, 0, 0, 0,
+ * 0, 0, 1#
+ */
+#if @use@
+{
+ &@name@_sum_of_products_contig_any,
+ &@name@_sum_of_products_contig_one,
+ &@name@_sum_of_products_contig_two,
+ &@name@_sum_of_products_contig_three
+},
+#else
+ {NULL, NULL, NULL, NULL},
+#endif
+/**end repeat**/
+}; /* End of _allcontig_specialized_table */
+
+static sum_of_products_fn _unspecialized_table[NPY_NTYPES][4] = {
+/**begin repeat
+ * #name = bool,
+ * byte, ubyte,
+ * short, ushort,
+ * int, uint,
+ * long, ulong,
+ * longlong, ulonglong,
+ * float, double, longdouble,
+ * cfloat, cdouble, clongdouble,
+ * object, string, unicode, void,
+ * datetime, timedelta, half#
+ * #use = 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1, 1,
+ * 1, 1, 1,
+ * 0, 0, 0, 0,
+ * 0, 0, 1#
+ */
+#if @use@
+{
+ &@name@_sum_of_products_any,
+ &@name@_sum_of_products_one,
+ &@name@_sum_of_products_two,
+ &@name@_sum_of_products_three
+},
+#else
+ {NULL, NULL, NULL, NULL},
+#endif
+/**end repeat**/
+}; /* End of _unnspecialized_table */
+
+NPY_VISIBILITY_HIDDEN sum_of_products_fn
+get_sum_of_products_function(int nop, int type_num,
+ npy_intp itemsize, npy_intp const *fixed_strides)
+{
+ int iop;
+
+ if (type_num >= NPY_NTYPES) {
+ return NULL;
+ }
+
+ /* contiguous reduction */
+ if (nop == 1 && fixed_strides[0] == itemsize && fixed_strides[1] == 0) {
+ sum_of_products_fn ret =
+ _contig_outstride0_unary_specialization_table[type_num];
+ if (ret != NULL) {
+ return ret;
+ }
+ }
+
+ /* nop of 2 has more specializations */
+ if (nop == 2) {
+ /* Encode the zero/contiguous strides */
+ int code;
+ code = (fixed_strides[0] == 0) ? 0 :
+ (fixed_strides[0] == itemsize) ? 2*2*1 : 8;
+ code += (fixed_strides[1] == 0) ? 0 :
+ (fixed_strides[1] == itemsize) ? 2*1 : 8;
+ code += (fixed_strides[2] == 0) ? 0 :
+ (fixed_strides[2] == itemsize) ? 1 : 8;
+ if (code >= 2 && code < 7) {
+ sum_of_products_fn ret =
+ _binary_specialization_table[type_num][code-2];
+ if (ret != NULL) {
+ return ret;
+ }
+ }
+ }
+
+ /* Inner loop with an output stride of 0 */
+ if (fixed_strides[nop] == 0) {
+ return _outstride0_specialized_table[type_num][nop <= 3 ? nop : 0];
+ }
+
+ /* Check for all contiguous */
+ for (iop = 0; iop < nop + 1; ++iop) {
+ if (fixed_strides[iop] != itemsize) {
+ break;
+ }
+ }
+
+ /* Contiguous loop */
+ if (iop == nop + 1) {
+ return _allcontig_specialized_table[type_num][nop <= 3 ? nop : 0];
+ }
+
+ /* None of the above specializations caught it, general loops */
+ return _unspecialized_table[type_num][nop <= 3 ? nop : 0];
+}
diff --git a/numpy/core/src/multiarray/einsum_sumprod.h b/numpy/core/src/multiarray/einsum_sumprod.h
new file mode 100644
index 000000000..c6cf18ec6
--- /dev/null
+++ b/numpy/core/src/multiarray/einsum_sumprod.h
@@ -0,0 +1,12 @@
+#ifndef _NPY_MULTIARRAY_EINSUM_SUMPROD_H
+#define _NPY_MULTIARRAY_EINSUM_SUMPROD_H
+
+#include <numpy/npy_common.h>
+
+typedef void (*sum_of_products_fn)(int, char **, npy_intp const*, npy_intp);
+
+NPY_VISIBILITY_HIDDEN sum_of_products_fn
+get_sum_of_products_function(int nop, int type_num,
+ npy_intp itemsize, npy_intp const *fixed_strides);
+
+#endif
diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c
index d5f24e75a..9b7d8deae 100644
--- a/numpy/core/src/multiarray/flagsobject.c
+++ b/numpy/core/src/multiarray/flagsobject.c
@@ -307,7 +307,7 @@ arrayflags_farray_get(PyArrayFlagsObject *self)
static PyObject *
arrayflags_num_get(PyArrayFlagsObject *self)
{
- return PyInt_FromLong(self->flags);
+ return PyLong_FromLong(self->flags);
}
/* relies on setflags order being write, align, uic */
@@ -711,7 +711,7 @@ arrayflags_print(PyArrayFlagsObject *self)
if (fl & NPY_ARRAY_WARN_ON_WRITE) {
_warn_on_write = " (with WARN_ON_WRITE=True)";
}
- return PyUString_FromFormat(
+ return PyUnicode_FromFormat(
" %s : %s\n %s : %s\n"
" %s : %s\n %s : %s%s\n"
" %s : %s\n %s : %s\n"
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index 9066f52a8..3575d6fad 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -28,7 +28,7 @@
static PyObject *
array_ndim_get(PyArrayObject *self)
{
- return PyInt_FromLong(PyArray_NDIM(self));
+ return PyLong_FromLong(PyArray_NDIM(self));
}
static PyObject *
@@ -217,7 +217,7 @@ array_protocol_descr_get(PyArrayObject *self)
if (dobj == NULL) {
return NULL;
}
- PyTuple_SET_ITEM(dobj, 0, PyString_FromString(""));
+ PyTuple_SET_ITEM(dobj, 0, PyUnicode_FromString(""));
PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self));
res = PyList_New(1);
if (res == NULL) {
@@ -318,7 +318,7 @@ array_interface_get(PyArrayObject *self)
return NULL;
}
- obj = PyInt_FromLong(3);
+ obj = PyLong_FromLong(3);
ret = PyDict_SetItemString(dict, "version", obj);
Py_DECREF(obj);
if (ret < 0) {
@@ -413,7 +413,7 @@ array_data_set(PyArrayObject *self, PyObject *op)
static PyObject *
array_itemsize_get(PyArrayObject *self)
{
- return PyInt_FromLong((long) PyArray_DESCR(self)->elsize);
+ return PyLong_FromLong((long) PyArray_DESCR(self)->elsize);
}
static PyObject *
@@ -421,13 +421,13 @@ array_size_get(PyArrayObject *self)
{
npy_intp size=PyArray_SIZE(self);
#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyInt_FromLong((long) size);
+ return PyLong_FromLong((long) size);
#else
if (size > NPY_MAX_LONG || size < NPY_MIN_LONG) {
return PyLong_FromLongLong(size);
}
else {
- return PyInt_FromLong((long) size);
+ return PyLong_FromLong((long) size);
}
#endif
}
@@ -437,13 +437,13 @@ array_nbytes_get(PyArrayObject *self)
{
npy_intp nbytes = PyArray_NBYTES(self);
#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyInt_FromLong((long) nbytes);
+ return PyLong_FromLong((long) nbytes);
#else
if (nbytes > NPY_MAX_LONG || nbytes < NPY_MIN_LONG) {
return PyLong_FromLongLong(nbytes);
}
else {
- return PyInt_FromLong((long) nbytes);
+ return PyLong_FromLong((long) nbytes);
}
#endif
}
@@ -621,7 +621,6 @@ static PyObject *
array_struct_get(PyArrayObject *self)
{
PyArrayInterface *inter;
- PyObject *ret;
inter = (PyArrayInterface *)PyArray_malloc(sizeof(PyArrayInterface));
if (inter==NULL) {
@@ -673,8 +672,14 @@ array_struct_get(PyArrayObject *self)
else {
inter->descr = NULL;
}
+ PyObject *ret = PyCapsule_New(inter, NULL, gentype_struct_free);
+ if (ret == NULL) {
+ return NULL;
+ }
Py_INCREF(self);
- ret = NpyCapsule_FromVoidPtrAndDesc(inter, self, gentype_struct_free);
+ if (PyCapsule_SetContext(ret, self) < 0) {
+ return NULL;
+ }
return ret;
}
diff --git a/numpy/core/src/multiarray/hashdescr.c b/numpy/core/src/multiarray/hashdescr.c
index 0b23b6c21..c596a7098 100644
--- a/numpy/core/src/multiarray/hashdescr.c
+++ b/numpy/core/src/multiarray/hashdescr.c
@@ -132,7 +132,7 @@ static int _array_descr_walk_fields(PyObject *names, PyObject* fields, PyObject*
"(Hash) names and fields inconsistent ???");
return -1;
}
- if (!PyUString_Check(key)) {
+ if (!PyUnicode_Check(key)) {
PyErr_SetString(PyExc_SystemError,
"(Hash) key of dtype dict not a string ???");
return -1;
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index ac5b90400..96f501c55 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -1411,10 +1411,10 @@ static PyObject *
arraymultiter_size_get(PyArrayMultiIterObject *self)
{
#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyInt_FromLong((long) self->size);
+ return PyLong_FromLong((long) self->size);
#else
if (self->size < NPY_MAX_LONG) {
- return PyInt_FromLong((long) self->size);
+ return PyLong_FromLong((long) self->size);
}
else {
return PyLong_FromLongLong((npy_longlong) self->size);
@@ -1426,10 +1426,10 @@ static PyObject *
arraymultiter_index_get(PyArrayMultiIterObject *self)
{
#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyInt_FromLong((long) self->index);
+ return PyLong_FromLong((long) self->index);
#else
if (self->size < NPY_MAX_LONG) {
- return PyInt_FromLong((long) self->index);
+ return PyLong_FromLong((long) self->index);
}
else {
return PyLong_FromLongLong((npy_longlong) self->index);
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index db15ff1d5..fdf248c97 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -946,9 +946,9 @@ get_view_from_index(PyArrayObject *self, PyArrayObject **view,
}
break;
case HAS_SLICE:
- if (NpySlice_GetIndicesEx(indices[i].object,
- PyArray_DIMS(self)[orig_dim],
- &start, &stop, &step, &n_steps) < 0) {
+ if (PySlice_GetIndicesEx(indices[i].object,
+ PyArray_DIMS(self)[orig_dim],
+ &start, &stop, &step, &n_steps) < 0) {
return -1;
}
if (n_steps <= 0) {
@@ -1418,7 +1418,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
return 0;
}
else if (tup == NULL){
- PyObject *errmsg = PyUString_FromString("no field of name ");
+ PyObject *errmsg = PyUnicode_FromString("no field of name ");
PyUString_Concat(&errmsg, ind);
PyErr_SetObject(PyExc_ValueError, errmsg);
Py_DECREF(errmsg);
@@ -2438,7 +2438,7 @@ mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices,
* Attempt to set a meaningful exception. Could also find out
* if a boolean index was converted.
*/
- errmsg = PyUString_FromString("shape mismatch: indexing arrays could not "
+ errmsg = PyUnicode_FromString("shape mismatch: indexing arrays could not "
"be broadcast together with shapes ");
if (errmsg == NULL) {
return -1;
@@ -3183,7 +3183,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
goto finish;
broadcast_error:
- errmsg = PyUString_FromString("shape mismatch: value array "
+ errmsg = PyUnicode_FromString("shape mismatch: value array "
"of shape ");
if (errmsg == NULL) {
goto finish;
@@ -3204,7 +3204,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
goto finish;
}
- tmp = PyUString_FromString("could not be broadcast to indexing "
+ tmp = PyUnicode_FromString("could not be broadcast to indexing "
"result of shape ");
PyUString_ConcatAndDel(&errmsg, tmp);
if (errmsg == NULL) {
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index e0b36e80f..f7cb2185b 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -1508,14 +1508,14 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype,
else {
PyObject *itemp, *otemp;
PyObject *res;
- NPY_COPY_PYOBJECT_PTR(&itemp, iptr);
- NPY_COPY_PYOBJECT_PTR(&otemp, optr);
+ memcpy(&itemp, iptr, sizeof(itemp));
+ memcpy(&otemp, optr, sizeof(otemp));
Py_XINCREF(itemp);
/* call deepcopy on this argument */
res = PyObject_CallFunctionObjArgs(deepcopy, itemp, visit, NULL);
Py_XDECREF(itemp);
Py_XDECREF(otemp);
- NPY_COPY_PYOBJECT_PTR(optr, &res);
+ memcpy(optr, &res, sizeof(res));
}
}
@@ -1676,7 +1676,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
Py_BuildValue("ONc",
(PyObject *)Py_TYPE(self),
Py_BuildValue("(N)",
- PyInt_FromLong(0)),
+ PyLong_FromLong(0)),
/* dummy data-type */
'b'));
@@ -1701,7 +1701,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
Py_DECREF(ret);
return NULL;
}
- PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version));
+ PyTuple_SET_ITEM(state, 0, PyLong_FromLong(version));
PyTuple_SET_ITEM(state, 1, PyObject_GetAttrString((PyObject *)self,
"shape"));
descr = PyArray_DESCR(self);
@@ -1763,7 +1763,7 @@ array_reduce_ex_picklebuffer(PyArrayObject *self, int protocol)
#if PY_VERSION_HEX >= 0x03080000
/* we expect protocol 5 to be available in Python 3.8 */
pickle_module = PyImport_ImportModule("pickle");
-#elif PY_VERSION_HEX >= 0x03060000
+#else
pickle_module = PyImport_ImportModule("pickle5");
if (pickle_module == NULL) {
/* for protocol 5, raise a clear ImportError if pickle5 is not found
@@ -1772,10 +1772,6 @@ array_reduce_ex_picklebuffer(PyArrayObject *self, int protocol)
"requires the pickle5 module for Python >=3.6 and <3.8");
return NULL;
}
-#else
- PyErr_SetString(PyExc_ValueError, "pickle protocol 5 is not available "
- "for Python < 3.6");
- return NULL;
#endif
if (pickle_module == NULL){
return NULL;
@@ -2585,9 +2581,10 @@ array_complex(PyArrayObject *self, PyObject *NPY_UNUSED(args))
PyArrayObject *arr;
PyArray_Descr *dtype;
PyObject *c;
+
if (PyArray_SIZE(self) != 1) {
- PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\
- "be converted to Python scalars");
+ PyErr_SetString(PyExc_TypeError,
+ "only length-1 arrays can be converted to Python scalars");
return NULL;
}
@@ -2598,38 +2595,18 @@ array_complex(PyArrayObject *self, PyObject *NPY_UNUSED(args))
if (!PyArray_CanCastArrayTo(self, dtype, NPY_SAME_KIND_CASTING) &&
!(PyArray_TYPE(self) == NPY_OBJECT)) {
- PyObject *err, *msg_part;
+ PyObject *descr = (PyObject*)PyArray_DESCR(self);
+
Py_DECREF(dtype);
- err = PyString_FromString("unable to convert ");
- if (err == NULL) {
- return NULL;
- }
- msg_part = PyObject_Repr((PyObject*)PyArray_DESCR(self));
- if (msg_part == NULL) {
- Py_DECREF(err);
- return NULL;
- }
- PyString_ConcatAndDel(&err, msg_part);
- if (err == NULL) {
- return NULL;
- }
- msg_part = PyString_FromString(", to complex.");
- if (msg_part == NULL) {
- Py_DECREF(err);
- return NULL;
- }
- PyString_ConcatAndDel(&err, msg_part);
- if (err == NULL) {
- return NULL;
- }
- PyErr_SetObject(PyExc_TypeError, err);
- Py_DECREF(err);
+ PyErr_Format(PyExc_TypeError,
+ "Unable to convert %R to complex", descr);
return NULL;
}
if (PyArray_TYPE(self) == NPY_OBJECT) {
/* let python try calling __complex__ on the object. */
PyObject *args, *res;
+
Py_DECREF(dtype);
args = Py_BuildValue("(O)", *((PyObject**)PyArray_DATA(self)));
if (args == NULL) {
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 7c5ceb962..490a60393 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -65,7 +65,6 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
#include "templ_common.h" /* for npy_mul_with_overflow_intp */
#include "compiled_base.h"
#include "mem_overlap.h"
-#include "alloc.h"
#include "typeinfo.h"
#include "get_attr_string.h"
@@ -363,7 +362,8 @@ PyArray_GetSubType(int narrays, PyArrayObject **arrays) {
*/
NPY_NO_EXPORT PyArrayObject *
PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
- PyArrayObject* ret)
+ PyArrayObject* ret, PyArray_Descr *dtype,
+ NPY_CASTING casting)
{
int iarrays, idim, ndim;
npy_intp shape[NPY_MAXDIMS];
@@ -427,6 +427,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
}
if (ret != NULL) {
+ assert(dtype == NULL);
if (PyArray_NDIM(ret) != ndim) {
PyErr_SetString(PyExc_ValueError,
"Output array has wrong dimensionality");
@@ -446,10 +447,16 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
/* Get the priority subtype for the array */
PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays);
- /* Get the resulting dtype from combining all the arrays */
- PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
if (dtype == NULL) {
- return NULL;
+ /* Get the resulting dtype from combining all the arrays */
+ dtype = (PyArray_Descr *)PyArray_ResultType(
+ narrays, arrays, 0, NULL);
+ if (dtype == NULL) {
+ return NULL;
+ }
+ }
+ else {
+ Py_INCREF(dtype);
}
/*
@@ -495,7 +502,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
/* Copy the data for this array */
if (PyArray_AssignArray((PyArrayObject *)sliding_view, arrays[iarrays],
- NULL, NPY_SAME_KIND_CASTING) < 0) {
+ NULL, casting) < 0) {
Py_DECREF(sliding_view);
Py_DECREF(ret);
return NULL;
@@ -515,7 +522,9 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
*/
NPY_NO_EXPORT PyArrayObject *
PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
- NPY_ORDER order, PyArrayObject *ret)
+ NPY_ORDER order, PyArrayObject *ret,
+ PyArray_Descr *dtype, NPY_CASTING casting,
+ npy_bool casting_not_passed)
{
int iarrays;
npy_intp shape = 0;
@@ -542,7 +551,10 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
}
}
+ int out_passed = 0;
if (ret != NULL) {
+ assert(dtype == NULL);
+ out_passed = 1;
if (PyArray_NDIM(ret) != 1) {
PyErr_SetString(PyExc_ValueError,
"Output array must be 1D");
@@ -561,10 +573,16 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
/* Get the priority subtype for the array */
PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays);
- /* Get the resulting dtype from combining all the arrays */
- PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
if (dtype == NULL) {
- return NULL;
+ /* Get the resulting dtype from combining all the arrays */
+ dtype = (PyArray_Descr *)PyArray_ResultType(
+ narrays, arrays, 0, NULL);
+ if (dtype == NULL) {
+ return NULL;
+ }
+ }
+ else {
+ Py_INCREF(dtype);
}
stride = dtype->elsize;
@@ -594,10 +612,37 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
return NULL;
}
+ int give_deprecation_warning = 1; /* To give warning for just one input array. */
for (iarrays = 0; iarrays < narrays; ++iarrays) {
/* Adjust the window dimensions for this array */
sliding_view->dimensions[0] = PyArray_SIZE(arrays[iarrays]);
+ if (!PyArray_CanCastArrayTo(
+ arrays[iarrays], PyArray_DESCR(ret), casting)) {
+ /* This should be an error, but was previously allowed here. */
+ if (casting_not_passed && out_passed) {
+ /* NumPy 1.20, 2020-09-03 */
+ if (give_deprecation_warning && DEPRECATE(
+ "concatenate() with `axis=None` will use same-kind "
+ "casting by default in the future. Please use "
+ "`casting='unsafe'` to retain the old behaviour. "
+ "In the future this will be a TypeError.") < 0) {
+ Py_DECREF(sliding_view);
+ Py_DECREF(ret);
+ return NULL;
+ }
+ give_deprecation_warning = 0;
+ }
+ else {
+ npy_set_invalid_cast_error(
+ PyArray_DESCR(arrays[iarrays]), PyArray_DESCR(ret),
+ casting, PyArray_NDIM(arrays[iarrays]) == 0);
+ Py_DECREF(sliding_view);
+ Py_DECREF(ret);
+ return NULL;
+ }
+ }
+
/* Copy the data for this array */
if (PyArray_CopyAsFlat((PyArrayObject *)sliding_view, arrays[iarrays],
order) < 0) {
@@ -615,8 +660,21 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
return ret;
}
+
+/**
+ * Implementation for np.concatenate
+ *
+ * @param op Sequence of arrays to concatenate
+ * @param axis Axis to concatenate along
+ * @param ret output array to fill
+ * @param dtype Forced output array dtype (cannot be combined with ret)
+ * @param casting Casting mode used
+ * @param casting_not_passed Deprecation helper
+ */
NPY_NO_EXPORT PyObject *
-PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret)
+PyArray_ConcatenateInto(PyObject *op,
+ int axis, PyArrayObject *ret, PyArray_Descr *dtype,
+ NPY_CASTING casting, npy_bool casting_not_passed)
{
int iarrays, narrays;
PyArrayObject **arrays;
@@ -626,6 +684,12 @@ PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret)
"The first input argument needs to be a sequence");
return NULL;
}
+ if (ret != NULL && dtype != NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "concatenate() only takes `out` or `dtype` as an "
+ "argument, but both were provided.");
+ return NULL;
+ }
/* Convert the input list into arrays */
narrays = PySequence_Size(op);
@@ -652,10 +716,13 @@ PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret)
}
if (axis >= NPY_MAXDIMS) {
- ret = PyArray_ConcatenateFlattenedArrays(narrays, arrays, NPY_CORDER, ret);
+ ret = PyArray_ConcatenateFlattenedArrays(
+ narrays, arrays, NPY_CORDER, ret, dtype,
+ casting, casting_not_passed);
}
else {
- ret = PyArray_ConcatenateArrays(narrays, arrays, axis, ret);
+ ret = PyArray_ConcatenateArrays(
+ narrays, arrays, axis, ret, dtype, casting);
}
for (iarrays = 0; iarrays < narrays; ++iarrays) {
@@ -687,7 +754,16 @@ fail:
NPY_NO_EXPORT PyObject *
PyArray_Concatenate(PyObject *op, int axis)
{
- return PyArray_ConcatenateInto(op, axis, NULL);
+ /* retain legacy behaviour for casting */
+ NPY_CASTING casting;
+ if (axis >= NPY_MAXDIMS) {
+ casting = NPY_UNSAFE_CASTING;
+ }
+ else {
+ casting = NPY_SAME_KIND_CASTING;
+ }
+ return PyArray_ConcatenateInto(
+ op, axis, NULL, NULL, casting, 0);
}
static int
@@ -1582,13 +1658,16 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
npy_bool subok = NPY_FALSE;
npy_bool copy = NPY_TRUE;
int ndmin = 0, nd;
+ PyObject* like;
PyArray_Descr *type = NULL;
PyArray_Descr *oldtype = NULL;
NPY_ORDER order = NPY_KEEPORDER;
int flags = 0;
- static char *kwd[]= {"object", "dtype", "copy", "order", "subok",
- "ndmin", NULL};
+ PyObject* array_function_result = NULL;
+
+ static char *kwd[] = {"object", "dtype", "copy", "order", "subok",
+ "ndmin", "like", NULL};
if (PyTuple_GET_SIZE(args) > 2) {
PyErr_Format(PyExc_TypeError,
@@ -1597,6 +1676,12 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
return NULL;
}
+ array_function_result = array_implement_c_array_function_creation(
+ "array", args, kws);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
/* super-fast path for ndarray argument calls */
if (PyTuple_GET_SIZE(args) == 0) {
goto full_path;
@@ -1674,13 +1759,14 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
}
full_path:
- if (!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i:array", kwd,
+ if (!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i$O:array", kwd,
&op,
PyArray_DescrConverter2, &type,
PyArray_BoolConverter, &copy,
PyArray_OrderConverter, &order,
PyArray_BoolConverter, &subok,
- &ndmin)) {
+ &ndmin,
+ &like)) {
goto clean_type;
}
@@ -1817,20 +1903,29 @@ static PyObject *
array_empty(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"shape", "dtype", "order", NULL};
+ static char *kwlist[] = {"shape", "dtype", "order", "like", NULL};
PyArray_Descr *typecode = NULL;
PyArray_Dims shape = {NULL, 0};
NPY_ORDER order = NPY_CORDER;
+ PyObject *like = NULL;
npy_bool is_f_order;
+ PyObject *array_function_result = NULL;
PyArrayObject *ret = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&:empty", kwlist,
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&$O:empty", kwlist,
PyArray_IntpConverter, &shape,
PyArray_DescrConverter, &typecode,
- PyArray_OrderConverter, &order)) {
+ PyArray_OrderConverter, &order,
+ &like)) {
goto fail;
}
+ array_function_result = array_implement_c_array_function_creation(
+ "empty", args, kwds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
switch (order) {
case NPY_CORDER:
is_f_order = NPY_FALSE;
@@ -1956,9 +2051,9 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
return NULL;
}
}
- if (!PyString_Check(obj)) {
+ if (!PyBytes_Check(obj)) {
PyErr_SetString(PyExc_TypeError,
- "initializing object must be a string");
+ "initializing object must be a bytes object");
Py_XDECREF(tmpobj);
return NULL;
}
@@ -1968,7 +2063,7 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
Py_XDECREF(tmpobj);
return NULL;
}
- dptr = PyString_AS_STRING(obj);
+ dptr = PyBytes_AS_STRING(obj);
}
}
ret = PyArray_Scalar(dptr, typecode, NULL);
@@ -1984,20 +2079,29 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
static PyObject *
array_zeros(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"shape", "dtype", "order", NULL};
+ static char *kwlist[] = {"shape", "dtype", "order", "like", NULL};
PyArray_Descr *typecode = NULL;
PyArray_Dims shape = {NULL, 0};
NPY_ORDER order = NPY_CORDER;
+ PyObject *like = NULL;
npy_bool is_f_order = NPY_FALSE;
+ PyObject *array_function_result = NULL;
PyArrayObject *ret = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&:zeros", kwlist,
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&$O:zeros", kwlist,
PyArray_IntpConverter, &shape,
PyArray_DescrConverter, &typecode,
- PyArray_OrderConverter, &order)) {
+ PyArray_OrderConverter, &order,
+ &like)) {
goto fail;
}
+ array_function_result = array_implement_c_array_function_creation(
+ "zeros", args, kwds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
switch (order) {
case NPY_CORDER:
is_f_order = NPY_FALSE;
@@ -2050,16 +2154,24 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds
Py_ssize_t nin = -1;
char *sep = NULL;
Py_ssize_t s;
- static char *kwlist[] = {"string", "dtype", "count", "sep", NULL};
+ static char *kwlist[] = {"string", "dtype", "count", "sep", "like", NULL};
+ PyObject *like = NULL;
PyArray_Descr *descr = NULL;
+ PyObject *array_function_result = NULL;
if (!PyArg_ParseTupleAndKeywords(args, keywds,
- "s#|O&" NPY_SSIZE_T_PYFMT "s:fromstring", kwlist,
- &data, &s, PyArray_DescrConverter, &descr, &nin, &sep)) {
+ "s#|O&" NPY_SSIZE_T_PYFMT "s$O:fromstring", kwlist,
+ &data, &s, PyArray_DescrConverter, &descr, &nin, &sep, &like)) {
Py_XDECREF(descr);
return NULL;
}
+ array_function_result = array_implement_c_array_function_creation(
+ "fromstring", args, keywds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
/* binary mode, condition copied from PyArray_FromString */
if (sep == NULL || strlen(sep) == 0) {
/* Numpy 1.14, 2017-10-19 */
@@ -2082,19 +2194,27 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL;
char *sep = "";
Py_ssize_t nin = -1;
- static char *kwlist[] = {"file", "dtype", "count", "sep", "offset", NULL};
+ static char *kwlist[] = {"file", "dtype", "count", "sep", "offset", "like", NULL};
+ PyObject *like = NULL;
PyArray_Descr *type = NULL;
+ PyObject *array_function_result = NULL;
int own;
npy_off_t orig_pos = 0, offset = 0;
FILE *fp;
if (!PyArg_ParseTupleAndKeywords(args, keywds,
- "O|O&" NPY_SSIZE_T_PYFMT "s" NPY_OFF_T_PYFMT ":fromfile", kwlist,
- &file, PyArray_DescrConverter, &type, &nin, &sep, &offset)) {
+ "O|O&" NPY_SSIZE_T_PYFMT "s" NPY_OFF_T_PYFMT "$O:fromfile", kwlist,
+ &file, PyArray_DescrConverter, &type, &nin, &sep, &offset, &like)) {
Py_XDECREF(type);
return NULL;
}
+ array_function_result = array_implement_c_array_function_creation(
+ "fromfile", args, keywds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
file = NpyPath_PathlikeToFspath(file);
if (file == NULL) {
return NULL;
@@ -2106,7 +2226,7 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
Py_DECREF(file);
return NULL;
}
- if (PyString_Check(file) || PyUnicode_Check(file)) {
+ if (PyBytes_Check(file) || PyUnicode_Check(file)) {
Py_SETREF(file, npy_PyFile_OpenFile(file, "rb"));
if (file == NULL) {
Py_XDECREF(type);
@@ -2161,15 +2281,24 @@ array_fromiter(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
{
PyObject *iter;
Py_ssize_t nin = -1;
- static char *kwlist[] = {"iter", "dtype", "count", NULL};
+ static char *kwlist[] = {"iter", "dtype", "count", "like", NULL};
+ PyObject *like = NULL;
PyArray_Descr *descr = NULL;
+ PyObject *array_function_result = NULL;
if (!PyArg_ParseTupleAndKeywords(args, keywds,
- "OO&|" NPY_SSIZE_T_PYFMT ":fromiter", kwlist,
- &iter, PyArray_DescrConverter, &descr, &nin)) {
+ "OO&|" NPY_SSIZE_T_PYFMT "$O:fromiter", kwlist,
+ &iter, PyArray_DescrConverter, &descr, &nin, &like)) {
Py_XDECREF(descr);
return NULL;
}
+
+ array_function_result = array_implement_c_array_function_creation(
+ "fromiter", args, keywds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
return PyArray_FromIter(iter, descr, (npy_intp)nin);
}
@@ -2178,15 +2307,24 @@ array_frombuffer(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds
{
PyObject *obj = NULL;
Py_ssize_t nin = -1, offset = 0;
- static char *kwlist[] = {"buffer", "dtype", "count", "offset", NULL};
+ static char *kwlist[] = {"buffer", "dtype", "count", "offset", "like", NULL};
+ PyObject *like = NULL;
PyArray_Descr *type = NULL;
+ PyObject *array_function_result = NULL;
if (!PyArg_ParseTupleAndKeywords(args, keywds,
- "O|O&" NPY_SSIZE_T_PYFMT NPY_SSIZE_T_PYFMT ":frombuffer", kwlist,
- &obj, PyArray_DescrConverter, &type, &nin, &offset)) {
+ "O|O&" NPY_SSIZE_T_PYFMT NPY_SSIZE_T_PYFMT "$O:frombuffer", kwlist,
+ &obj, PyArray_DescrConverter, &type, &nin, &offset, &like)) {
Py_XDECREF(type);
return NULL;
}
+
+ array_function_result = array_implement_c_array_function_creation(
+ "frombuffer", args, keywds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
if (type == NULL) {
type = PyArray_DescrFromType(NPY_DEFAULT_TYPE);
}
@@ -2198,11 +2336,27 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
{
PyObject *a0;
PyObject *out = NULL;
+ PyArray_Descr *dtype = NULL;
+ NPY_CASTING casting = NPY_SAME_KIND_CASTING;
+ PyObject *casting_obj = NULL;
+ PyObject *res;
int axis = 0;
- static char *kwlist[] = {"seq", "axis", "out", NULL};
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O:concatenate", kwlist,
- &a0, PyArray_AxisConverter, &axis, &out)) {
+ static char *kwlist[] = {"seq", "axis", "out", "dtype", "casting", NULL};
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O$O&O:concatenate", kwlist,
+ &a0, PyArray_AxisConverter, &axis, &out,
+ PyArray_DescrConverter2, &dtype, &casting_obj)) {
+ return NULL;
+ }
+ int casting_not_passed = 0;
+ if (casting_obj == NULL) {
+ /*
+ * Casting was not passed in, needed for deprecation only.
+ * This should be simplified once the deprecation is finished.
+ */
+ casting_not_passed = 1;
+ }
+ else if (!PyArray_CastingConverter(casting_obj, &casting)) {
+ Py_XDECREF(dtype);
return NULL;
}
if (out != NULL) {
@@ -2211,10 +2365,14 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
}
else if (!PyArray_Check(out)) {
PyErr_SetString(PyExc_TypeError, "'out' must be an array");
+ Py_XDECREF(dtype);
return NULL;
}
}
- return PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out);
+ res = PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out, dtype,
+ casting, casting_not_passed);
+ Py_XDECREF(dtype);
+ return res;
}
static PyObject *
@@ -2635,7 +2793,7 @@ array_einsum(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
arg0 = PyTuple_GET_ITEM(args, 0);
/* einsum('i,j', a, b), einsum('i,j->ij', a, b) */
- if (PyString_Check(arg0) || PyUnicode_Check(arg0)) {
+ if (PyBytes_Check(arg0) || PyUnicode_Check(arg0)) {
nop = einsum_sub_op_from_str(args, &str_obj, &subscripts, op);
}
/* einsum(a, [0], b, [1]), einsum(a, [0], b, [1], [0,1]) */
@@ -2766,17 +2924,27 @@ array_correlate2(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
static PyObject *
array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) {
PyObject *o_start = NULL, *o_stop = NULL, *o_step = NULL, *range=NULL;
- static char *kwd[]= {"start", "stop", "step", "dtype", NULL};
+ PyObject *like = NULL;
+ PyObject *array_function_result = NULL;
+ static char *kwd[] = {"start", "stop", "step", "dtype", "like", NULL};
PyArray_Descr *typecode = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&:arange", kwd,
+ if (!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&$O:arange", kwd,
&o_start,
&o_stop,
&o_step,
- PyArray_DescrConverter2, &typecode)) {
+ PyArray_DescrConverter2, &typecode,
+ &like)) {
Py_XDECREF(typecode);
return NULL;
}
+
+ array_function_result = array_implement_c_array_function_creation(
+ "arange", args, kws);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
range = PyArray_ArangeObj(o_start, o_stop, o_step, typecode);
Py_XDECREF(typecode);
@@ -2810,7 +2978,7 @@ array__get_ndarray_c_version(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObje
if (!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) {
return NULL;
}
- return PyInt_FromLong( (long) PyArray_GetNDArrayCVersion() );
+ return PyLong_FromLong( (long) PyArray_GetNDArrayCVersion() );
}
/*NUMPY_API
@@ -3708,7 +3876,7 @@ _vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kw
}
if (PyArray_TYPE(char_array) == NPY_STRING) {
- method = PyObject_GetAttr((PyObject *)&PyString_Type, method_name);
+ method = PyObject_GetAttr((PyObject *)&PyBytes_Type, method_name);
}
else if (PyArray_TYPE(char_array) == NPY_UNICODE) {
method = PyObject_GetAttr((PyObject *)&PyUnicode_Type, method_name);
@@ -3950,7 +4118,7 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
return NULL;
}
- return PyInt_FromLong(axis);
+ return PyLong_FromLong(axis);
}
@@ -4169,7 +4337,7 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
if (PyType_Ready(&PyComplex_Type) < 0) {
return -1;
}
- if (PyType_Ready(&PyString_Type) < 0) {
+ if (PyType_Ready(&PyBytes_Type) < 0) {
return -1;
}
if (PyType_Ready(&PyUnicode_Type) < 0) {
@@ -4292,13 +4460,13 @@ set_flaginfo(PyObject *d)
newd = PyDict_New();
#define _addnew(key, val, one) \
- PyDict_SetItemString(newd, #key, s=PyInt_FromLong(val)); \
+ PyDict_SetItemString(newd, #key, s=PyLong_FromLong(val)); \
Py_DECREF(s); \
- PyDict_SetItemString(newd, #one, s=PyInt_FromLong(val)); \
+ PyDict_SetItemString(newd, #one, s=PyLong_FromLong(val)); \
Py_DECREF(s)
#define _addone(key, val) \
- PyDict_SetItemString(newd, #key, s=PyInt_FromLong(val)); \
+ PyDict_SetItemString(newd, #key, s=PyLong_FromLong(val)); \
Py_DECREF(s)
_addnew(OWNDATA, NPY_ARRAY_OWNDATA, O);
@@ -4331,28 +4499,33 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ndmin = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis1 = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis2 = NULL;
+NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_like = NULL;
+NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_numpy = NULL;
static int
intern_strings(void)
{
- npy_ma_str_array = PyUString_InternFromString("__array__");
- npy_ma_str_array_prepare = PyUString_InternFromString("__array_prepare__");
- npy_ma_str_array_wrap = PyUString_InternFromString("__array_wrap__");
- npy_ma_str_array_finalize = PyUString_InternFromString("__array_finalize__");
- npy_ma_str_ufunc = PyUString_InternFromString("__array_ufunc__");
- npy_ma_str_implementation = PyUString_InternFromString("_implementation");
- npy_ma_str_order = PyUString_InternFromString("order");
- npy_ma_str_copy = PyUString_InternFromString("copy");
- npy_ma_str_dtype = PyUString_InternFromString("dtype");
- npy_ma_str_ndmin = PyUString_InternFromString("ndmin");
- npy_ma_str_axis1 = PyUString_InternFromString("axis1");
- npy_ma_str_axis2 = PyUString_InternFromString("axis2");
+ npy_ma_str_array = PyUnicode_InternFromString("__array__");
+ npy_ma_str_array_prepare = PyUnicode_InternFromString("__array_prepare__");
+ npy_ma_str_array_wrap = PyUnicode_InternFromString("__array_wrap__");
+ npy_ma_str_array_finalize = PyUnicode_InternFromString("__array_finalize__");
+ npy_ma_str_ufunc = PyUnicode_InternFromString("__array_ufunc__");
+ npy_ma_str_implementation = PyUnicode_InternFromString("_implementation");
+ npy_ma_str_order = PyUnicode_InternFromString("order");
+ npy_ma_str_copy = PyUnicode_InternFromString("copy");
+ npy_ma_str_dtype = PyUnicode_InternFromString("dtype");
+ npy_ma_str_ndmin = PyUnicode_InternFromString("ndmin");
+ npy_ma_str_axis1 = PyUnicode_InternFromString("axis1");
+ npy_ma_str_axis2 = PyUnicode_InternFromString("axis2");
+ npy_ma_str_like = PyUnicode_InternFromString("like");
+ npy_ma_str_numpy = PyUnicode_InternFromString("numpy");
return npy_ma_str_array && npy_ma_str_array_prepare &&
npy_ma_str_array_wrap && npy_ma_str_array_finalize &&
npy_ma_str_ufunc && npy_ma_str_implementation &&
npy_ma_str_order && npy_ma_str_copy && npy_ma_str_dtype &&
- npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2;
+ npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2 &&
+ npy_ma_str_like && npy_ma_str_numpy;
}
static struct PyModuleDef moduledef = {
@@ -4477,14 +4650,14 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) {
goto err;
}
- c_api = NpyCapsule_FromVoidPtr((void *)PyArray_API, NULL);
+ c_api = PyCapsule_New((void *)PyArray_API, NULL, NULL);
if (c_api == NULL) {
goto err;
}
PyDict_SetItemString(d, "_ARRAY_API", c_api);
Py_DECREF(c_api);
- c_api = NpyCapsule_FromVoidPtr((void *)PyUFunc_API, NULL);
+ c_api = PyCapsule_New((void *)PyUFunc_API, NULL, NULL);
if (c_api == NULL) {
goto err;
}
@@ -4502,11 +4675,11 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) {
*/
PyDict_SetItemString (d, "error", PyExc_Exception);
- s = PyInt_FromLong(NPY_TRACE_DOMAIN);
+ s = PyLong_FromLong(NPY_TRACE_DOMAIN);
PyDict_SetItemString(d, "tracemalloc_domain", s);
Py_DECREF(s);
- s = PyUString_FromString("3.1");
+ s = PyUnicode_FromString("3.1");
PyDict_SetItemString(d, "__version__", s);
Py_DECREF(s);
@@ -4540,7 +4713,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) {
}
Py_DECREF(s);
- s = NpyCapsule_FromVoidPtr((void *)_datetime_strings, NULL);
+ s = PyCapsule_New((void *)_datetime_strings, NULL, NULL);
if (s == NULL) {
goto err;
}
@@ -4548,7 +4721,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) {
Py_DECREF(s);
#define ADDCONST(NAME) \
- s = PyInt_FromLong(NPY_##NAME); \
+ s = PyLong_FromLong(NPY_##NAME); \
PyDict_SetItemString(d, #NAME, s); \
Py_DECREF(s)
diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h
index dd437e091..d3ee3337c 100644
--- a/numpy/core/src/multiarray/multiarraymodule.h
+++ b/numpy/core/src/multiarray/multiarraymodule.h
@@ -13,5 +13,7 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ndmin;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis1;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis2;
+NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_like;
+NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_numpy;
#endif
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index a0dda4090..4bc6d2ca1 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -1755,7 +1755,7 @@ broadcast_error: {
char *tmpstr;
if (op_axes == NULL) {
- errmsg = PyUString_FromString("operands could not be broadcast "
+ errmsg = PyUnicode_FromString("operands could not be broadcast "
"together with shapes ");
if (errmsg == NULL) {
return 0;
@@ -1776,7 +1776,7 @@ broadcast_error: {
}
}
if (itershape != NULL) {
- tmp = PyUString_FromString("and requested shape ");
+ tmp = PyUnicode_FromString("and requested shape ");
if (tmp == NULL) {
Py_DECREF(errmsg);
return 0;
@@ -1801,7 +1801,7 @@ broadcast_error: {
Py_DECREF(errmsg);
}
else {
- errmsg = PyUString_FromString("operands could not be broadcast "
+ errmsg = PyUnicode_FromString("operands could not be broadcast "
"together with remapped shapes "
"[original->remapped]: ");
for (iop = 0; iop < nop; ++iop) {
@@ -1843,7 +1843,7 @@ broadcast_error: {
}
}
if (itershape != NULL) {
- tmp = PyUString_FromString("and requested shape ");
+ tmp = PyUnicode_FromString("and requested shape ");
if (tmp == NULL) {
Py_DECREF(errmsg);
return 0;
@@ -1877,11 +1877,11 @@ operand_different_than_broadcast: {
/* Start of error message */
if (op_flags[iop] & NPY_ITER_READONLY) {
- errmsg = PyUString_FromString("non-broadcastable operand "
+ errmsg = PyUnicode_FromString("non-broadcastable operand "
"with shape ");
}
else {
- errmsg = PyUString_FromString("non-broadcastable output "
+ errmsg = PyUnicode_FromString("non-broadcastable output "
"operand with shape ");
}
if (errmsg == NULL) {
@@ -1913,7 +1913,7 @@ operand_different_than_broadcast: {
}
}
- tmp = PyUString_FromString(" [remapped to ");
+ tmp = PyUnicode_FromString(" [remapped to ");
if (tmp == NULL) {
return 0;
}
@@ -1932,7 +1932,7 @@ operand_different_than_broadcast: {
}
}
- tmp = PyUString_FromString(" doesn't match the broadcast shape ");
+ tmp = PyUnicode_FromString(" doesn't match the broadcast shape ");
if (tmp == NULL) {
return 0;
}
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 1c68a4803..8839d1be7 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -894,7 +894,7 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self),
Py_DECREF(item);
return NULL;
}
- axis = PyInt_AsLong(v);
+ axis = PyLong_AsLong(v);
Py_DECREF(v);
if (axis < 0 || axis >= NPY_MAXDIMS) {
PyErr_SetString(PyExc_ValueError,
@@ -1142,7 +1142,7 @@ npyiter_dealloc(NewNpyArrayIterObject *self)
"results.", 1) < 0) {
PyObject *s;
- s = PyUString_FromString("npyiter_dealloc");
+ s = PyUnicode_FromString("npyiter_dealloc");
if (s) {
PyErr_WriteUnraisable(s);
Py_DECREF(s);
@@ -1522,7 +1522,7 @@ static PyObject *npyiter_shape_get(NewNpyArrayIterObject *self)
if (ret != NULL) {
for (idim = 0; idim < ndim; ++idim) {
PyTuple_SET_ITEM(ret, idim,
- PyInt_FromLong(shape[idim]));
+ PyLong_FromLong(shape[idim]));
}
return ret;
}
@@ -1551,7 +1551,7 @@ static PyObject *npyiter_multi_index_get(NewNpyArrayIterObject *self)
}
for (idim = 0; idim < ndim; ++idim) {
PyTuple_SET_ITEM(ret, idim,
- PyInt_FromLong(multi_index[idim]));
+ PyLong_FromLong(multi_index[idim]));
}
return ret;
}
@@ -1605,7 +1605,7 @@ npyiter_multi_index_set(NewNpyArrayIterObject *self, PyObject *value)
}
for (idim = 0; idim < ndim; ++idim) {
PyObject *v = PySequence_GetItem(value, idim);
- multi_index[idim] = PyInt_AsLong(v);
+ multi_index[idim] = PyLong_AsLong(v);
if (error_converting(multi_index[idim])) {
Py_XDECREF(v);
return -1;
@@ -1641,7 +1641,7 @@ static PyObject *npyiter_index_get(NewNpyArrayIterObject *self)
if (NpyIter_HasIndex(self->iter)) {
npy_intp ind = *NpyIter_GetIndexPtr(self->iter);
- return PyInt_FromLong(ind);
+ return PyLong_FromLong(ind);
}
else {
PyErr_SetString(PyExc_ValueError,
@@ -1665,7 +1665,7 @@ static int npyiter_index_set(NewNpyArrayIterObject *self, PyObject *value)
if (NpyIter_HasIndex(self->iter)) {
npy_intp ind;
- ind = PyInt_AsLong(value);
+ ind = PyLong_AsLong(value);
if (error_converting(ind)) {
return -1;
}
@@ -1697,7 +1697,7 @@ static PyObject *npyiter_iterindex_get(NewNpyArrayIterObject *self)
return NULL;
}
- return PyInt_FromLong(NpyIter_GetIterIndex(self->iter));
+ return PyLong_FromLong(NpyIter_GetIterIndex(self->iter));
}
static int npyiter_iterindex_set(NewNpyArrayIterObject *self, PyObject *value)
@@ -1715,7 +1715,7 @@ static int npyiter_iterindex_set(NewNpyArrayIterObject *self, PyObject *value)
return -1;
}
- iterindex = PyInt_AsLong(value);
+ iterindex = PyLong_AsLong(value);
if (error_converting(iterindex)) {
return -1;
}
@@ -1751,8 +1751,8 @@ static PyObject *npyiter_iterrange_get(NewNpyArrayIterObject *self)
return NULL;
}
- PyTuple_SET_ITEM(ret, 0, PyInt_FromLong(istart));
- PyTuple_SET_ITEM(ret, 1, PyInt_FromLong(iend));
+ PyTuple_SET_ITEM(ret, 0, PyLong_FromLong(istart));
+ PyTuple_SET_ITEM(ret, 1, PyLong_FromLong(iend));
return ret;
}
@@ -1900,7 +1900,7 @@ static PyObject *npyiter_ndim_get(NewNpyArrayIterObject *self)
return NULL;
}
- return PyInt_FromLong(NpyIter_GetNDim(self->iter));
+ return PyLong_FromLong(NpyIter_GetNDim(self->iter));
}
static PyObject *npyiter_nop_get(NewNpyArrayIterObject *self)
@@ -1911,7 +1911,7 @@ static PyObject *npyiter_nop_get(NewNpyArrayIterObject *self)
return NULL;
}
- return PyInt_FromLong(NpyIter_GetNOp(self->iter));
+ return PyLong_FromLong(NpyIter_GetNOp(self->iter));
}
static PyObject *npyiter_itersize_get(NewNpyArrayIterObject *self)
@@ -1922,7 +1922,7 @@ static PyObject *npyiter_itersize_get(NewNpyArrayIterObject *self)
return NULL;
}
- return PyInt_FromLong(NpyIter_GetIterSize(self->iter));
+ return PyLong_FromLong(NpyIter_GetIterSize(self->iter));
}
static PyObject *npyiter_finished_get(NewNpyArrayIterObject *self)
@@ -2221,7 +2221,7 @@ npyiter_subscript(NewNpyArrayIterObject *self, PyObject *op)
return NULL;
}
- if (PyInt_Check(op) || PyLong_Check(op) ||
+ if (PyLong_Check(op) ||
(PyIndex_Check(op) && !PySequence_Check(op))) {
npy_intp i = PyArray_PyIntAsIntp(op);
if (error_converting(i)) {
@@ -2231,8 +2231,8 @@ npyiter_subscript(NewNpyArrayIterObject *self, PyObject *op)
}
else if (PySlice_Check(op)) {
Py_ssize_t istart = 0, iend = 0, istep = 0, islicelength;
- if (NpySlice_GetIndicesEx(op, NpyIter_GetNOp(self->iter),
- &istart, &iend, &istep, &islicelength) < 0) {
+ if (PySlice_GetIndicesEx(op, NpyIter_GetNOp(self->iter),
+ &istart, &iend, &istep, &islicelength) < 0) {
return NULL;
}
if (istep != 1) {
@@ -2270,7 +2270,7 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
return -1;
}
- if (PyInt_Check(op) || PyLong_Check(op) ||
+ if (PyLong_Check(op) ||
(PyIndex_Check(op) && !PySequence_Check(op))) {
npy_intp i = PyArray_PyIntAsIntp(op);
if (error_converting(i)) {
@@ -2280,8 +2280,8 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
}
else if (PySlice_Check(op)) {
Py_ssize_t istart = 0, iend = 0, istep = 0, islicelength = 0;
- if (NpySlice_GetIndicesEx(op, NpyIter_GetNOp(self->iter),
- &istart, &iend, &istep, &islicelength) < 0) {
+ if (PySlice_GetIndicesEx(op, NpyIter_GetNOp(self->iter),
+ &istart, &iend, &istep, &islicelength) < 0) {
return -1;
}
if (istep != 1) {
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 19ac7d7f9..87c3c9b0a 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -398,7 +398,7 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
const int optimize_fpexps = 1;
if (PyInt_Check(o2)) {
- *out_exponent = (double)PyInt_AsLong(o2);
+ *out_exponent = (double)PyLong_AsLong(o2);
return NPY_INTPOS_SCALAR;
}
if (optimize_fpexps && PyFloat_Check(o2)) {
@@ -448,7 +448,7 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
}
return NPY_NOSCALAR;
}
- val = PyInt_AsSsize_t(value);
+ val = PyLong_AsSsize_t(value);
if (error_converting(val)) {
PyErr_Clear();
return NPY_NOSCALAR;
@@ -826,7 +826,7 @@ _array_nonzero(PyArrayObject *mp)
n = PyArray_SIZE(mp);
if (n == 1) {
int res;
- if (Npy_EnterRecursiveCall(" while converting array to bool")) {
+ if (Py_EnterRecursiveCall(" while converting array to bool")) {
return -1;
}
res = PyArray_DESCR(mp)->f->nonzero(PyArray_DATA(mp), mp);
@@ -880,7 +880,7 @@ array_scalar_forward(PyArrayObject *v,
/* Need to guard against recursion if our array holds references */
if (PyDataType_REFCHK(PyArray_DESCR(v))) {
PyObject *res;
- if (Npy_EnterRecursiveCall(where) != 0) {
+ if (Py_EnterRecursiveCall(where) != 0) {
Py_DECREF(scalar);
return NULL;
}
diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c
index c869b5eea..0f84449af 100644
--- a/numpy/core/src/multiarray/refcount.c
+++ b/numpy/core/src/multiarray/refcount.c
@@ -36,7 +36,7 @@ PyArray_Item_INCREF(char *data, PyArray_Descr *descr)
return;
}
if (descr->type_num == NPY_OBJECT) {
- NPY_COPY_PYOBJECT_PTR(&temp, data);
+ memcpy(&temp, data, sizeof(temp));
Py_XINCREF(temp);
}
else if (PyDataType_HASFIELDS(descr)) {
@@ -98,7 +98,7 @@ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr)
}
if (descr->type_num == NPY_OBJECT) {
- NPY_COPY_PYOBJECT_PTR(&temp, data);
+ memcpy(&temp, data, sizeof(temp));
Py_XDECREF(temp);
}
else if (PyDataType_HASFIELDS(descr)) {
@@ -181,7 +181,7 @@ PyArray_INCREF(PyArrayObject *mp)
}
else {
for( i = 0; i < n; i++, data++) {
- NPY_COPY_PYOBJECT_PTR(&temp, data);
+ memcpy(&temp, data, sizeof(temp));
Py_XINCREF(temp);
}
}
@@ -192,7 +192,7 @@ PyArray_INCREF(PyArrayObject *mp)
return -1;
}
while(it->index < it->size) {
- NPY_COPY_PYOBJECT_PTR(&temp, it->dataptr);
+ memcpy(&temp, it->dataptr, sizeof(temp));
Py_XINCREF(temp);
PyArray_ITER_NEXT(it);
}
@@ -238,7 +238,7 @@ PyArray_XDECREF(PyArrayObject *mp)
}
else {
for (i = 0; i < n; i++, data++) {
- NPY_COPY_PYOBJECT_PTR(&temp, data);
+ memcpy(&temp, data, sizeof(temp));
Py_XDECREF(temp);
}
}
@@ -246,7 +246,7 @@ PyArray_XDECREF(PyArrayObject *mp)
else { /* handles misaligned data too */
PyArray_RawIterBaseInit(&it, mp);
while(it.index < it.size) {
- NPY_COPY_PYOBJECT_PTR(&temp, it.dataptr);
+ memcpy(&temp, it.dataptr, sizeof(temp));
Py_XDECREF(temp);
PyArray_ITER_NEXT(&it);
}
@@ -292,7 +292,7 @@ static void
_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
{
if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) {
- if ((obj == Py_None) || (PyInt_Check(obj) && PyInt_AsLong(obj)==0)) {
+ if ((obj == Py_None) || (PyInt_Check(obj) && PyLong_AsLong(obj)==0)) {
return;
}
else {
@@ -309,7 +309,7 @@ _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
}
if (dtype->type_num == NPY_OBJECT) {
Py_XINCREF(obj);
- NPY_COPY_PYOBJECT_PTR(optr, &obj);
+ memcpy(optr, &obj, sizeof(obj));
}
else if (PyDataType_HASFIELDS(dtype)) {
PyObject *key, *value, *title = NULL;
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index 6f3d102a4..b2f52f554 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -138,7 +138,7 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr)
}
else if (_CHK(Flexible)) {
if (_CHK(String)) {
- return (void *)PyString_AS_STRING(scalar);
+ return (void *)PyBytes_AS_STRING(scalar);
}
if (_CHK(Unicode)) {
/* Treat this the same as the NPY_UNICODE base class */
@@ -380,7 +380,7 @@ PyArray_ScalarFromObject(PyObject *object)
}
/*
* Booleans in Python are implemented as a subclass of integers,
- * so PyBool_Check must be called before PyInt_Check.
+ * so PyBool_Check must be called before PyLong_Check.
*/
if (PyBool_Check(object)) {
if (object == Py_True) {
@@ -395,7 +395,7 @@ PyArray_ScalarFromObject(PyObject *object)
if (ret == NULL) {
return NULL;
}
- PyArrayScalar_VAL(ret, Long) = PyInt_AS_LONG(object);
+ PyArrayScalar_VAL(ret, Long) = PyLong_AsLong(object);
}
else if (PyFloat_Check(object)) {
ret = PyArrayScalar_New(Double);
@@ -755,8 +755,8 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
}
if (PyTypeNum_ISFLEXIBLE(type_num)) {
if (type_num == NPY_STRING) {
- destptr = PyString_AS_STRING(obj);
- ((PyStringObject *)obj)->ob_shash = -1;
+ destptr = PyBytes_AS_STRING(obj);
+ ((PyBytesObject *)obj)->ob_shash = -1;
memcpy(destptr, data, itemsize);
return obj;
}
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 58b9e2c30..d0efaa2a0 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -149,7 +149,7 @@ static PyObject *
gentype_add(PyObject *m1, PyObject* m2)
{
/* special case str.__radd__, which should not call array_add */
- if (PyString_Check(m1) || PyUnicode_Check(m1)) {
+ if (PyBytes_Check(m1) || PyUnicode_Check(m1)) {
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
}
@@ -447,7 +447,7 @@ _void_to_hex(const char* argbuf, const Py_ssize_t arglen,
}
memcpy(&retbuf[j], echars, strlen(echars));
- retval = PyUString_FromStringAndSize(retbuf, slen);
+ retval = PyUnicode_FromStringAndSize(retbuf, slen);
PyMem_Free(retbuf);
return retval;
@@ -518,21 +518,21 @@ datetimetype_repr(PyObject *self)
*/
if ((scal->obmeta.num == 1 && scal->obmeta.base != NPY_FR_h) ||
scal->obmeta.base == NPY_FR_GENERIC) {
- ret = PyUString_FromString("numpy.datetime64('");
+ ret = PyUnicode_FromString("numpy.datetime64('");
PyUString_ConcatAndDel(&ret,
- PyUString_FromString(iso));
+ PyUnicode_FromString(iso));
PyUString_ConcatAndDel(&ret,
- PyUString_FromString("')"));
+ PyUnicode_FromString("')"));
}
else {
- ret = PyUString_FromString("numpy.datetime64('");
+ ret = PyUnicode_FromString("numpy.datetime64('");
PyUString_ConcatAndDel(&ret,
- PyUString_FromString(iso));
+ PyUnicode_FromString(iso));
PyUString_ConcatAndDel(&ret,
- PyUString_FromString("','"));
+ PyUnicode_FromString("','"));
ret = append_metastr_to_string(&scal->obmeta, 1, ret);
PyUString_ConcatAndDel(&ret,
- PyUString_FromString("')"));
+ PyUnicode_FromString("')"));
}
return ret;
@@ -554,31 +554,31 @@ timedeltatype_repr(PyObject *self)
/* The value */
if (scal->obval == NPY_DATETIME_NAT) {
- ret = PyUString_FromString("numpy.timedelta64('NaT'");
+ ret = PyUnicode_FromString("numpy.timedelta64('NaT'");
}
else {
/*
* Can't use "%lld" if HAVE_LONG_LONG is not defined
*/
#if defined(HAVE_LONG_LONG)
- ret = PyUString_FromFormat("numpy.timedelta64(%lld",
+ ret = PyUnicode_FromFormat("numpy.timedelta64(%lld",
(long long)scal->obval);
#else
- ret = PyUString_FromFormat("numpy.timedelta64(%ld",
+ ret = PyUnicode_FromFormat("numpy.timedelta64(%ld",
(long)scal->obval);
#endif
}
/* The metadata unit */
if (scal->obmeta.base == NPY_FR_GENERIC) {
PyUString_ConcatAndDel(&ret,
- PyUString_FromString(")"));
+ PyUnicode_FromString(")"));
}
else {
PyUString_ConcatAndDel(&ret,
- PyUString_FromString(",'"));
+ PyUnicode_FromString(",'"));
ret = append_metastr_to_string(&scal->obmeta, 1, ret);
PyUString_ConcatAndDel(&ret,
- PyUString_FromString("')"));
+ PyUnicode_FromString("')"));
}
return ret;
@@ -611,7 +611,7 @@ datetimetype_str(PyObject *self)
return NULL;
}
- return PyUString_FromString(iso);
+ return PyUnicode_FromString(iso);
}
static char *_datetime_verbose_strings[NPY_DATETIME_NUMUNITS] = {
@@ -657,21 +657,21 @@ timedeltatype_str(PyObject *self)
}
if (scal->obval == NPY_DATETIME_NAT) {
- ret = PyUString_FromString("NaT");
+ ret = PyUnicode_FromString("NaT");
}
else {
/*
* Can't use "%lld" if HAVE_LONG_LONG is not defined
*/
#if defined(HAVE_LONG_LONG)
- ret = PyUString_FromFormat("%lld ",
+ ret = PyUnicode_FromFormat("%lld ",
(long long)(scal->obval * scal->obmeta.num));
#else
- ret = PyUString_FromFormat("%ld ",
+ ret = PyUnicode_FromFormat("%ld ",
(long)(scal->obval * scal->obmeta.num));
#endif
PyUString_ConcatAndDel(&ret,
- PyUString_FromString(basestr));
+ PyUnicode_FromString(basestr));
}
return ret;
@@ -795,7 +795,7 @@ legacy_@name@_format@kind@(@type@ val)
PyOS_snprintf(buf, sizeof(buf), "(%s%sj)", re, im);
}
- return PyUString_FromString(buf);
+ return PyUnicode_FromString(buf);
}
#undef _FMT1
@@ -836,7 +836,7 @@ legacy_@name@_format@kind@(npy_@name@ val){
strcpy(&buf[cnt],".0");
}
- return PyUString_FromString(buf);
+ return PyUnicode_FromString(buf);
}
#undef _FMT1
@@ -904,7 +904,7 @@ c@name@type_@kind@(PyObject *self)
return NULL;
}
- PyUString_ConcatAndDel(&istr, PyUString_FromString("j"));
+ PyUString_ConcatAndDel(&istr, PyUnicode_FromString("j"));
return istr;
}
@@ -915,13 +915,13 @@ c@name@type_@kind@(PyObject *self)
}
}
else if (npy_isnan(val.real)) {
- rstr = PyUString_FromString("nan");
+ rstr = PyUnicode_FromString("nan");
}
else if (val.real > 0){
- rstr = PyUString_FromString("inf");
+ rstr = PyUnicode_FromString("inf");
}
else {
- rstr = PyUString_FromString("-inf");
+ rstr = PyUnicode_FromString("-inf");
}
if (npy_isfinite(val.imag)) {
@@ -931,19 +931,19 @@ c@name@type_@kind@(PyObject *self)
}
}
else if (npy_isnan(val.imag)) {
- istr = PyUString_FromString("+nan");
+ istr = PyUnicode_FromString("+nan");
}
else if (val.imag > 0){
- istr = PyUString_FromString("+inf");
+ istr = PyUnicode_FromString("+inf");
}
else {
- istr = PyUString_FromString("-inf");
+ istr = PyUnicode_FromString("-inf");
}
- ret = PyUString_FromString("(");
+ ret = PyUnicode_FromString("(");
PyUString_ConcatAndDel(&ret, rstr);
PyUString_ConcatAndDel(&ret, istr);
- PyUString_ConcatAndDel(&ret, PyUString_FromString("j)"));
+ PyUString_ConcatAndDel(&ret, PyUnicode_FromString("j)"));
return ret;
}
@@ -1058,7 +1058,7 @@ gentype_richcompare(PyObject *self, PyObject *other, int cmp_op)
static PyObject *
gentype_ndim_get(PyObject *NPY_UNUSED(self))
{
- return PyInt_FromLong(0);
+ return PyLong_FromLong(0);
}
static PyObject *
@@ -1099,7 +1099,7 @@ inttype_numerator_get(PyObject *self)
static PyObject *
inttype_denominator_get(PyObject *self)
{
- return PyInt_FromLong(1);
+ return PyLong_FromLong(1);
}
@@ -1119,7 +1119,7 @@ gentype_itemsize_get(PyObject *self)
typecode = PyArray_DescrFromScalar(self);
elsize = typecode->elsize;
- ret = PyInt_FromLong((long) elsize);
+ ret = PyLong_FromLong((long) elsize);
Py_DECREF(typecode);
return ret;
}
@@ -1127,7 +1127,7 @@ gentype_itemsize_get(PyObject *self)
static PyObject *
gentype_size_get(PyObject *NPY_UNUSED(self))
{
- return PyInt_FromLong(1);
+ return PyLong_FromLong(1);
}
static PyObject *
@@ -1147,12 +1147,16 @@ gentype_sizeof(PyObject *self)
NPY_NO_EXPORT void
gentype_struct_free(PyObject *ptr)
{
- PyArrayInterface *arrif;
- PyObject *context;
-
- arrif = (PyArrayInterface*)PyCapsule_GetPointer(ptr, NULL);
- context = (PyObject *)PyCapsule_GetContext(ptr);
- Py_DECREF(context);
+ PyArrayInterface *arrif = (PyArrayInterface*)PyCapsule_GetPointer(ptr, NULL);
+ if (arrif == NULL) {
+ PyErr_WriteUnraisable(ptr);
+ return;
+ }
+ PyObject *context = (PyObject *)PyCapsule_GetContext(ptr);
+ if (context == NULL && PyErr_Occurred()) {
+ PyErr_WriteUnraisable(ptr);
+ }
+ Py_XDECREF(context);
Py_XDECREF(arrif->descr);
PyArray_free(arrif->shape);
PyArray_free(arrif);
@@ -1307,7 +1311,7 @@ gentype_imag_get(PyObject *self)
ret = PyObject_GetAttrString(obj, "imag");
if (ret == NULL) {
PyErr_Clear();
- obj = PyInt_FromLong(0);
+ obj = PyLong_FromLong(0);
newtype = PyArray_DescrFromType(NPY_OBJECT);
ret = PyArray_Scalar((char *)&obj, newtype, NULL);
Py_DECREF(newtype);
@@ -2893,7 +2897,7 @@ bool_arrtype_nonzero(PyObject *a)
* ulong, ulonglong#
* #Name = Byte, Short, Int, Long, UByte, UShort, LongLong, UInt,
* ULong, ULongLong#
- * #type = PyInt_FromLong*6, PyLong_FromLongLong*1,
+ * #type = PyLong_FromLong*6, PyLong_FromLongLong*1,
* PyLong_FromUnsignedLong*2, PyLong_FromUnsignedLongLong#
*/
static PyNumberMethods @name@_arrtype_as_number;
@@ -2922,7 +2926,7 @@ bool_index(PyObject *a)
return NULL;
}
else {
- return PyInt_FromLong(PyArrayScalar_VAL(a, Bool));
+ return PyLong_FromLong(PyArrayScalar_VAL(a, Bool));
}
}
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 30507112d..1a38fe956 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -133,7 +133,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) {
/* Fill new memory with zeros */
if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_ITEM_REFCOUNT)) {
- PyObject *zero = PyInt_FromLong(0);
+ PyObject *zero = PyLong_FromLong(0);
char *optr;
optr = PyArray_BYTES(self) + oldnbytes;
npy_intp n_new = newsize - oldsize;
@@ -332,7 +332,7 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype)
for (i = 0; i < nsize; i++) {
Py_INCREF(zero);
- NPY_COPY_PYOBJECT_PTR(optr, &zero);
+ memcpy(optr, &zero, sizeof(zero));
optr += sizeof(zero);
}
}
@@ -458,7 +458,7 @@ _attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims,
static void
raise_reshape_size_mismatch(PyArray_Dims *newshape, PyArrayObject *arr)
{
- PyObject *msg = PyUString_FromFormat("cannot reshape array of size %zd "
+ PyObject *msg = PyUnicode_FromFormat("cannot reshape array of size %zd "
"into shape ", PyArray_SIZE(arr));
PyObject *tmp = convert_shape_to_string(newshape->len, newshape->ptr, "");
@@ -997,10 +997,10 @@ build_shape_string(npy_intp n, npy_intp const *vals)
}
if (i == n) {
- return PyUString_FromFormat("()");
+ return PyUnicode_FromFormat("()");
}
else {
- ret = PyUString_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
+ ret = PyUnicode_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
if (ret == NULL) {
return NULL;
}
@@ -1008,10 +1008,10 @@ build_shape_string(npy_intp n, npy_intp const *vals)
for (; i < n; ++i) {
if (vals[i] < 0) {
- tmp = PyUString_FromString(",newaxis");
+ tmp = PyUnicode_FromString(",newaxis");
}
else {
- tmp = PyUString_FromFormat(",%" NPY_INTP_FMT, vals[i]);
+ tmp = PyUnicode_FromFormat(",%" NPY_INTP_FMT, vals[i]);
}
if (tmp == NULL) {
Py_DECREF(ret);
@@ -1024,7 +1024,7 @@ build_shape_string(npy_intp n, npy_intp const *vals)
}
}
- tmp = PyUString_FromFormat(")");
+ tmp = PyUnicode_FromFormat(")");
PyUString_ConcatAndDel(&ret, tmp);
return ret;
}
diff --git a/numpy/core/src/multiarray/strfuncs.c b/numpy/core/src/multiarray/strfuncs.c
index 363cbdba2..d9d9b7c0a 100644
--- a/numpy/core/src/multiarray/strfuncs.c
+++ b/numpy/core/src/multiarray/strfuncs.c
@@ -3,14 +3,25 @@
#include <Python.h>
#include <numpy/arrayobject.h>
-
#include "npy_pycompat.h"
-
+#include "npy_import.h"
#include "strfuncs.h"
static PyObject *PyArray_StrFunction = NULL;
static PyObject *PyArray_ReprFunction = NULL;
+
+static void
+npy_PyErr_SetStringChained(PyObject *type, const char *message)
+{
+ PyObject *exc, *val, *tb;
+
+ PyErr_Fetch(&exc, &val, &tb);
+ PyErr_SetString(type, message);
+ npy_PyErr_ChainExceptionsCause(exc, val, tb);
+}
+
+
/*NUMPY_API
* Set the array print function to be a Python function.
*/
@@ -36,164 +47,52 @@ PyArray_SetStringFunction(PyObject *op, int repr)
}
-/*
- * Extend string. On failure, returns NULL and leaves *strp alone.
- * XXX we do this in multiple places; time for a string library?
- */
-static char *
-extend_str(char **strp, Py_ssize_t n, Py_ssize_t *maxp)
-{
- char *str = *strp;
- Py_ssize_t new_cap;
-
- if (n >= *maxp - 16) {
- new_cap = *maxp * 2;
-
- if (new_cap <= *maxp) { /* overflow */
- return NULL;
- }
- str = PyArray_realloc(*strp, new_cap);
- if (str != NULL) {
- *strp = str;
- *maxp = new_cap;
- }
- }
- return str;
-}
-
-
-static int
-dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd,
- npy_intp const *dimensions, npy_intp const *strides, PyArrayObject* self)
-{
- PyObject *op = NULL, *sp = NULL;
- char *ostring;
- npy_intp i, N, ret = 0;
-
-#define CHECK_MEMORY do { \
- if (extend_str(string, *n, max_n) == NULL) { \
- ret = -1; \
- goto end; \
- } \
- } while (0)
-
- if (nd == 0) {
- if ((op = PyArray_GETITEM(self, data)) == NULL) {
- return -1;
- }
- sp = PyObject_Repr(op);
- if (sp == NULL) {
- ret = -1;
- goto end;
- }
- ostring = PyString_AsString(sp);
- N = PyString_Size(sp)*sizeof(char);
- *n += N;
- CHECK_MEMORY;
- memmove(*string + (*n - N), ostring, N);
- }
- else {
- CHECK_MEMORY;
- (*string)[*n] = '[';
- *n += 1;
- for (i = 0; i < dimensions[0]; i++) {
- if (dump_data(string, n, max_n,
- data + (*strides)*i,
- nd - 1, dimensions + 1,
- strides + 1, self) < 0) {
- return -1;
- }
- CHECK_MEMORY;
- if (i < dimensions[0] - 1) {
- (*string)[*n] = ',';
- (*string)[*n+1] = ' ';
- *n += 2;
- }
- }
- CHECK_MEMORY;
- (*string)[*n] = ']';
- *n += 1;
- }
-
-#undef CHECK_MEMORY
-
-end:
- Py_XDECREF(op);
- Py_XDECREF(sp);
- return ret;
-}
-
-
-static PyObject *
-array_repr_builtin(PyArrayObject *self, int repr)
-{
- PyObject *ret;
- char *string;
- /* max_n initial value is arbitrary, dump_data will extend it */
- Py_ssize_t n = 0, max_n = PyArray_NBYTES(self) * 4 + 7;
-
- if ((string = PyArray_malloc(max_n)) == NULL) {
- return PyErr_NoMemory();
- }
-
- if (dump_data(&string, &n, &max_n, PyArray_DATA(self),
- PyArray_NDIM(self), PyArray_DIMS(self),
- PyArray_STRIDES(self), self) < 0) {
- PyArray_free(string);
- return NULL;
- }
-
- if (repr) {
- if (PyArray_ISEXTENDED(self)) {
- ret = PyUString_FromFormat("array(%s, '%c%d')",
- string,
- PyArray_DESCR(self)->type,
- PyArray_DESCR(self)->elsize);
- }
- else {
- ret = PyUString_FromFormat("array(%s, '%c')",
- string,
- PyArray_DESCR(self)->type);
- }
- }
- else {
- ret = PyUString_FromStringAndSize(string, n);
- }
-
- PyArray_free(string);
- return ret;
-}
-
-
NPY_NO_EXPORT PyObject *
array_repr(PyArrayObject *self)
{
- PyObject *s;
+ static PyObject *repr = NULL;
- if (PyArray_ReprFunction == NULL) {
- s = array_repr_builtin(self, 1);
+ if (PyArray_ReprFunction != NULL) {
+ return PyObject_CallFunctionObjArgs(PyArray_ReprFunction, self, NULL);
}
- else {
- s = PyObject_CallFunctionObjArgs(PyArray_ReprFunction, self, NULL);
+
+ /*
+ * We need to do a delayed import here as initialization on module load
+ * leads to circular import problems.
+ */
+ npy_cache_import("numpy.core.arrayprint", "_default_array_repr", &repr);
+ if (repr == NULL) {
+ npy_PyErr_SetStringChained(PyExc_RuntimeError,
+ "Unable to configure default ndarray.__repr__");
+ return NULL;
}
- return s;
+ return PyObject_CallFunctionObjArgs(repr, self, NULL);
}
NPY_NO_EXPORT PyObject *
array_str(PyArrayObject *self)
{
- PyObject *s;
+ static PyObject *str = NULL;
- if (PyArray_StrFunction == NULL) {
- s = array_repr_builtin(self, 0);
+ if (PyArray_StrFunction != NULL) {
+ return PyObject_CallFunctionObjArgs(PyArray_StrFunction, self, NULL);
}
- else {
- s = PyObject_CallFunctionObjArgs(PyArray_StrFunction, self, NULL);
+
+ /*
+ * We need to do a delayed import here as initialization on module load leads
+ * to circular import problems.
+ */
+ npy_cache_import("numpy.core.arrayprint", "_default_array_str", &str);
+ if (str == NULL) {
+ npy_PyErr_SetStringChained(PyExc_RuntimeError,
+ "Unable to configure default ndarray.__str__");
+ return NULL;
}
- return s;
+ return PyObject_CallFunctionObjArgs(str, self, NULL);
}
+
NPY_NO_EXPORT PyObject *
array_format(PyArrayObject *self, PyObject *args)
{
@@ -221,4 +120,3 @@ array_format(PyArrayObject *self, PyObject *args)
);
}
}
-
diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c
index 09b948218..b19dee418 100644
--- a/numpy/core/src/multiarray/temp_elide.c
+++ b/numpy/core/src/multiarray/temp_elide.c
@@ -62,12 +62,8 @@
#define NPY_ELIDE_DEBUG 0
#define NPY_MAX_STACKSIZE 10
-#if PY_VERSION_HEX >= 0x03060000
/* TODO can pep523 be used to somehow? */
#define PYFRAMEEVAL_FUNC "_PyEval_EvalFrameDefault"
-#else
-#define PYFRAMEEVAL_FUNC "PyEval_EvalFrameEx"
-#endif
/*
* Heuristic size of the array in bytes at which backtrace overhead generation
* becomes less than speed gained by in-place operations. Depends on stack depth
diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c
index 0c8d49970..6b6c6bd9d 100644
--- a/numpy/core/src/multiarray/usertypes.c
+++ b/numpy/core/src/multiarray/usertypes.c
@@ -268,11 +268,11 @@ PyArray_RegisterCastFunc(PyArray_Descr *descr, int totype,
return -1;
}
}
- key = PyInt_FromLong(totype);
+ key = PyLong_FromLong(totype);
if (PyErr_Occurred()) {
return -1;
}
- cobj = NpyCapsule_FromVoidPtr((void *)castfunc, NULL);
+ cobj = PyCapsule_New((void *)castfunc, NULL, NULL);
if (cobj == NULL) {
Py_DECREF(key);
return -1;
diff --git a/numpy/core/src/npymath/npy_math_private.h b/numpy/core/src/npymath/npy_math_private.h
index e4a919db6..212d11a0b 100644
--- a/numpy/core/src/npymath/npy_math_private.h
+++ b/numpy/core/src/npymath/npy_math_private.h
@@ -25,7 +25,6 @@
#include "npy_fpmath.h"
#include "numpy/npy_math.h"
-#include "numpy/npy_cpu.h"
#include "numpy/npy_endian.h"
#include "numpy/npy_common.h"
diff --git a/numpy/core/src/npysort/binsearch.c.src b/numpy/core/src/npysort/binsearch.c.src
index c04e197b7..41165897b 100644
--- a/numpy/core/src/npysort/binsearch.c.src
+++ b/numpy/core/src/npysort/binsearch.c.src
@@ -35,7 +35,7 @@
* #CMP = LT, LTE#
*/
-NPY_VISIBILITY_HIDDEN void
+NPY_NO_EXPORT void
binsearch_@side@_@suff@(const char *arr, const char *key, char *ret,
npy_intp arr_len, npy_intp key_len,
npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
@@ -81,7 +81,7 @@ binsearch_@side@_@suff@(const char *arr, const char *key, char *ret,
}
}
-NPY_VISIBILITY_HIDDEN int
+NPY_NO_EXPORT int
argbinsearch_@side@_@suff@(const char *arr, const char *key,
const char *sort, char *ret,
npy_intp arr_len, npy_intp key_len,
@@ -153,7 +153,7 @@ argbinsearch_@side@_@suff@(const char *arr, const char *key,
* #CMP = <, <=#
*/
-NPY_VISIBILITY_HIDDEN void
+NPY_NO_EXPORT void
npy_binsearch_@side@(const char *arr, const char *key, char *ret,
npy_intp arr_len, npy_intp key_len,
npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
@@ -195,7 +195,7 @@ npy_binsearch_@side@(const char *arr, const char *key, char *ret,
}
}
-NPY_VISIBILITY_HIDDEN int
+NPY_NO_EXPORT int
npy_argbinsearch_@side@(const char *arr, const char *key,
const char *sort, char *ret,
npy_intp arr_len, npy_intp key_len,
diff --git a/numpy/core/src/npysort/heapsort.c.src b/numpy/core/src/npysort/heapsort.c.src
index c2e3b63cb..4bfea1388 100644
--- a/numpy/core/src/npysort/heapsort.c.src
+++ b/numpy/core/src/npysort/heapsort.c.src
@@ -60,7 +60,7 @@
* npy_cdouble, npy_clongdouble, npy_datetime, npy_timedelta#
*/
-int
+NPY_NO_EXPORT int
heapsort_@suff@(void *start, npy_intp n, void *NOT_USED)
{
@type@ tmp, *a;
@@ -111,7 +111,7 @@ heapsort_@suff@(void *start, npy_intp n, void *NOT_USED)
}
-int
+NPY_NO_EXPORT int
aheapsort_@suff@(void *vv, npy_intp *tosort, npy_intp n, void *NOT_USED)
{
@type@ *v = vv;
@@ -177,7 +177,7 @@ aheapsort_@suff@(void *vv, npy_intp *tosort, npy_intp n, void *NOT_USED)
* #type = npy_char, npy_ucs4#
*/
-int
+NPY_NO_EXPORT int
heapsort_@suff@(void *start, npy_intp n, void *varr)
{
PyArrayObject *arr = varr;
@@ -231,7 +231,7 @@ heapsort_@suff@(void *start, npy_intp n, void *varr)
}
-int
+NPY_NO_EXPORT int
aheapsort_@suff@(void *vv, npy_intp *tosort, npy_intp n, void *varr)
{
@type@ *v = vv;
@@ -291,7 +291,7 @@ aheapsort_@suff@(void *vv, npy_intp *tosort, npy_intp n, void *varr)
*/
-int
+NPY_NO_EXPORT int
npy_heapsort(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -348,7 +348,7 @@ npy_heapsort(void *start, npy_intp num, void *varr)
}
-int
+NPY_NO_EXPORT int
npy_aheapsort(void *vv, npy_intp *tosort, npy_intp n, void *varr)
{
char *v = vv;
diff --git a/numpy/core/src/npysort/mergesort.c.src b/numpy/core/src/npysort/mergesort.c.src
index 6f659617a..f83fbf758 100644
--- a/numpy/core/src/npysort/mergesort.c.src
+++ b/numpy/core/src/npysort/mergesort.c.src
@@ -103,7 +103,7 @@ mergesort0_@suff@(@type@ *pl, @type@ *pr, @type@ *pw)
}
-int
+NPY_NO_EXPORT int
mergesort_@suff@(void *start, npy_intp num, void *NOT_USED)
{
@type@ *pl, *pr, *pw;
@@ -166,7 +166,7 @@ amergesort0_@suff@(npy_intp *pl, npy_intp *pr, @type@ *v, npy_intp *pw)
}
-int
+NPY_NO_EXPORT int
amergesort_@suff@(void *v, npy_intp *tosort, npy_intp num, void *NOT_USED)
{
npy_intp *pl, *pr, *pw;
@@ -245,7 +245,7 @@ mergesort0_@suff@(@type@ *pl, @type@ *pr, @type@ *pw, @type@ *vp, size_t len)
}
-int
+NPY_NO_EXPORT int
mergesort_@suff@(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -326,7 +326,7 @@ amergesort0_@suff@(npy_intp *pl, npy_intp *pr, @type@ *v, npy_intp *pw, size_t l
}
-int
+NPY_NO_EXPORT int
amergesort_@suff@(void *v, npy_intp *tosort, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -407,7 +407,7 @@ npy_mergesort0(char *pl, char *pr, char *pw, char *vp, npy_intp elsize,
}
-int
+NPY_NO_EXPORT int
npy_mergesort(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -485,7 +485,7 @@ npy_amergesort0(npy_intp *pl, npy_intp *pr, char *v, npy_intp *pw,
}
-int
+NPY_NO_EXPORT int
npy_amergesort(void *v, npy_intp *tosort, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
diff --git a/numpy/core/src/npysort/quicksort.c.src b/numpy/core/src/npysort/quicksort.c.src
index 49a2c4906..933f75808 100644
--- a/numpy/core/src/npysort/quicksort.c.src
+++ b/numpy/core/src/npysort/quicksort.c.src
@@ -85,7 +85,7 @@
* npy_cdouble, npy_clongdouble, npy_datetime, npy_timedelta#
*/
-int
+NPY_NO_EXPORT int
quicksort_@suff@(void *start, npy_intp num, void *NOT_USED)
{
@type@ vp;
@@ -160,7 +160,7 @@ stack_pop:
}
-int
+NPY_NO_EXPORT int
aquicksort_@suff@(void *vv, npy_intp* tosort, npy_intp num, void *NOT_USED)
{
@type@ *v = vv;
@@ -253,7 +253,7 @@ stack_pop:
* #type = npy_char, npy_ucs4#
*/
-int
+NPY_NO_EXPORT int
quicksort_@suff@(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -341,7 +341,7 @@ stack_pop:
}
-int
+NPY_NO_EXPORT int
aquicksort_@suff@(void *vv, npy_intp* tosort, npy_intp num, void *varr)
{
@type@ *v = vv;
@@ -434,7 +434,7 @@ stack_pop:
*/
-int
+NPY_NO_EXPORT int
npy_quicksort(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -539,7 +539,7 @@ stack_pop:
}
-int
+NPY_NO_EXPORT int
npy_aquicksort(void *vv, npy_intp* tosort, npy_intp num, void *varr)
{
char *v = vv;
diff --git a/numpy/core/src/npysort/radixsort.c.src b/numpy/core/src/npysort/radixsort.c.src
index 72887d7e4..99d8ed42a 100644
--- a/numpy/core/src/npysort/radixsort.c.src
+++ b/numpy/core/src/npysort/radixsort.c.src
@@ -46,7 +46,7 @@ nth_byte_@suff@(@type@ key, npy_intp l) {
return (key >> (l << 3)) & 0xFF;
}
-@type@*
+static @type@*
radixsort0_@suff@(@type@ *arr, @type@ *aux, npy_intp num)
{
npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } };
@@ -95,7 +95,7 @@ radixsort0_@suff@(@type@ *arr, @type@ *aux, npy_intp num)
return arr;
}
-int
+NPY_NO_EXPORT int
radixsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr))
{
void *sorted;
@@ -136,7 +136,7 @@ radixsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr))
return 0;
}
-npy_intp*
+static npy_intp*
aradixsort0_@suff@(@type@ *arr, npy_intp *aux, npy_intp *tosort, npy_intp num)
{
npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } };
@@ -185,7 +185,7 @@ aradixsort0_@suff@(@type@ *arr, npy_intp *aux, npy_intp *tosort, npy_intp num)
return tosort;
}
-int
+NPY_NO_EXPORT int
aradixsort_@suff@(void *start, npy_intp* tosort, npy_intp num, void *NPY_UNUSED(varr))
{
npy_intp *sorted;
diff --git a/numpy/core/src/npysort/selection.c.src b/numpy/core/src/npysort/selection.c.src
index be645450f..4fd955200 100644
--- a/numpy/core/src/npysort/selection.c.src
+++ b/numpy/core/src/npysort/selection.c.src
@@ -280,7 +280,7 @@ static int
* kth 8: 0 1 2 3 4 5 6 [8 7] -> stack []
*
*/
-int
+NPY_NO_EXPORT int
@name@introselect_@suff@(@type@ *v,
#if @arg@
npy_intp* tosort,
diff --git a/numpy/core/src/npysort/timsort.c.src b/numpy/core/src/npysort/timsort.c.src
index 26313ca5b..3fdd46f61 100644
--- a/numpy/core/src/npysort/timsort.c.src
+++ b/numpy/core/src/npysort/timsort.c.src
@@ -42,7 +42,7 @@
-npy_intp compute_min_run(npy_intp num)
+static npy_intp compute_min_run(npy_intp num)
{
npy_intp r = 0;
@@ -476,7 +476,7 @@ force_collapse_@suff@(@type@ *arr, run *stack, npy_intp *stack_ptr,
}
-int
+NPY_NO_EXPORT int
timsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr))
{
int ret;
@@ -854,7 +854,7 @@ aforce_collapse_@suff@(@type@ *arr, npy_intp *tosort, run *stack,
}
-int
+NPY_NO_EXPORT int
atimsort_@suff@(void *v, npy_intp *tosort, npy_intp num,
void *NPY_UNUSED(varr))
{
@@ -904,7 +904,7 @@ cleanup:
* run length to reduce the cost of insertion sort.
*/
-npy_intp compute_min_run_short(npy_intp num)
+static npy_intp compute_min_run_short(npy_intp num)
{
npy_intp r = 0;
@@ -1303,7 +1303,7 @@ force_collapse_@suff@(@type@ *arr, run *stack, npy_intp *stack_ptr,
}
-int
+NPY_NO_EXPORT int
timsort_@suff@(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -1691,7 +1691,7 @@ aforce_collapse_@suff@(@type@ *arr, npy_intp *tosort, run *stack,
}
-int
+NPY_NO_EXPORT int
atimsort_@suff@(void *start, npy_intp *tosort, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -2128,7 +2128,7 @@ npy_force_collapse(char *arr, run *stack, npy_intp *stack_ptr,
}
-int
+NPY_NO_EXPORT int
npy_timsort(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -2524,7 +2524,7 @@ npy_aforce_collapse(char *arr, npy_intp *tosort, run *stack,
}
-int
+NPY_NO_EXPORT int
npy_atimsort(void *start, npy_intp *tosort, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
diff --git a/numpy/core/src/umath/_rational_tests.c.src b/numpy/core/src/umath/_rational_tests.c.src
index 13e33d0a5..08c259d98 100644
--- a/numpy/core/src/umath/_rational_tests.c.src
+++ b/numpy/core/src/umath/_rational_tests.c.src
@@ -406,8 +406,9 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
Py_INCREF(x[0]);
return x[0];
}
- else if (PyString_Check(x[0])) {
- const char* s = PyString_AS_STRING(x[0]);
+ // TODO: allow construction from unicode strings
+ else if (PyBytes_Check(x[0])) {
+ const char* s = PyBytes_AS_STRING(x[0]);
rational x;
if (scan_rational(&s,&x)) {
const char* p;
@@ -429,7 +430,7 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
PyObject* y;
int eq;
x[i] = PyTuple_GET_ITEM(args, i);
- n[i] = PyInt_AsLong(x[i]);
+ n[i] = PyLong_AsLong(x[i]);
if (error_converting(n[i])) {
if (PyErr_ExceptionMatches(PyExc_TypeError)) {
PyErr_Format(PyExc_TypeError,
@@ -440,7 +441,7 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
return 0;
}
/* Check that we had an exact integer */
- y = PyInt_FromLong(n[i]);
+ y = PyLong_FromLong(n[i]);
if (!y) {
return 0;
}
@@ -477,7 +478,7 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
else { \
PyObject* y_; \
int eq_; \
- long n_ = PyInt_AsLong(object); \
+ long n_ = PyLong_AsLong(object); \
if (error_converting(n_)) { \
if (PyErr_ExceptionMatches(PyExc_TypeError)) { \
PyErr_Clear(); \
@@ -486,7 +487,7 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
} \
return 0; \
} \
- y_ = PyInt_FromLong(n_); \
+ y_ = PyLong_FromLong(n_); \
if (!y_) { \
return 0; \
} \
@@ -526,11 +527,11 @@ static PyObject*
pyrational_repr(PyObject* self) {
rational x = ((PyRational*)self)->r;
if (d(x)!=1) {
- return PyUString_FromFormat(
+ return PyUnicode_FromFormat(
"rational(%ld,%ld)",(long)x.n,(long)d(x));
}
else {
- return PyUString_FromFormat(
+ return PyUnicode_FromFormat(
"rational(%ld)",(long)x.n);
}
}
@@ -539,11 +540,11 @@ static PyObject*
pyrational_str(PyObject* self) {
rational x = ((PyRational*)self)->r;
if (d(x)!=1) {
- return PyUString_FromFormat(
+ return PyUnicode_FromFormat(
"%ld/%ld",(long)x.n,(long)d(x));
}
else {
- return PyUString_FromFormat(
+ return PyUnicode_FromFormat(
"%ld",(long)x.n);
}
}
@@ -590,7 +591,7 @@ RATIONAL_BINOP_2(floor_divide,
}
RATIONAL_UNOP(negative,rational,rational_negative(x),PyRational_FromRational)
RATIONAL_UNOP(absolute,rational,rational_abs(x),PyRational_FromRational)
-RATIONAL_UNOP(int,long,rational_int(x),PyInt_FromLong)
+RATIONAL_UNOP(int,long,rational_int(x),PyLong_FromLong)
RATIONAL_UNOP(float,double,rational_double(x),PyFloat_FromDouble)
static PyObject*
@@ -646,12 +647,12 @@ static PyNumberMethods pyrational_as_number = {
static PyObject*
pyrational_n(PyObject* self, void* closure) {
- return PyInt_FromLong(((PyRational*)self)->r.n);
+ return PyLong_FromLong(((PyRational*)self)->r.n);
}
static PyObject*
pyrational_d(PyObject* self, void* closure) {
- return PyInt_FromLong(d(((PyRational*)self)->r));
+ return PyLong_FromLong(d(((PyRational*)self)->r));
}
static PyGetSetDef pyrational_getset[] = {
@@ -726,17 +727,17 @@ npyrational_setitem(PyObject* item, void* data, void* arr) {
r = ((PyRational*)item)->r;
}
else {
- long n = PyInt_AsLong(item);
+ long long n = PyLong_AsLongLong(item);
PyObject* y;
int eq;
if (error_converting(n)) {
return -1;
}
- y = PyInt_FromLong(n);
+ y = PyLong_FromLongLong(n);
if (!y) {
return -1;
}
- eq = PyObject_RichCompareBool(item,y,Py_EQ);
+ eq = PyObject_RichCompareBool(item, y, Py_EQ);
Py_DECREF(y);
if (eq<0) {
return -1;
@@ -748,7 +749,7 @@ npyrational_setitem(PyObject* item, void* data, void* arr) {
}
r = make_rational_int(n);
}
- memcpy(data,&r,sizeof(rational));
+ memcpy(data, &r, sizeof(rational));
return 0;
}
@@ -1126,7 +1127,7 @@ PyMODINIT_FUNC PyInit__rational_tests(void) {
if (PyErr_Occurred()) {
goto fail;
}
- numpy_str = PyUString_FromString("numpy");
+ numpy_str = PyUnicode_FromString("numpy");
if (!numpy_str) {
goto fail;
}
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index d08aabd64..660c296d6 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -480,7 +480,7 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
return NULL;
}
- if (PyString_Check(signature)) {
+ if (PyBytes_Check(signature)) {
sig_str = signature;
} else if (PyUnicode_Check(signature)) {
sig_str = PyUnicode_AsUTF8String(signature);
@@ -493,7 +493,7 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
NULL, NULL, NULL,
0, nin, nout, PyUFunc_None, "no name",
"doc:none",
- 1, PyString_AS_STRING(sig_str));
+ 1, PyBytes_AS_STRING(sig_str));
if (sig_str != signature) {
Py_DECREF(sig_str);
}
@@ -588,11 +588,11 @@ static PyObject *
UMath_Tests_test_dispatch(PyObject *NPY_UNUSED(dummy), PyObject *NPY_UNUSED(dummy2))
{
const char *highest_func, *highest_var;
- NPY_CPU_DISPATCH_CALL(highest_func = _umath_tests_dispatch_func, ())
- NPY_CPU_DISPATCH_CALL(highest_var = _umath_tests_dispatch_var)
+ NPY_CPU_DISPATCH_CALL(highest_func = _umath_tests_dispatch_func, ());
+ NPY_CPU_DISPATCH_CALL(highest_var = _umath_tests_dispatch_var);
const char *highest_func_xb = "nobase", *highest_var_xb = "nobase";
- NPY_CPU_DISPATCH_CALL_XB(highest_func_xb = _umath_tests_dispatch_func, ())
- NPY_CPU_DISPATCH_CALL_XB(highest_var_xb = _umath_tests_dispatch_var)
+ NPY_CPU_DISPATCH_CALL_XB(highest_func_xb = _umath_tests_dispatch_func, ());
+ NPY_CPU_DISPATCH_CALL_XB(highest_var_xb = _umath_tests_dispatch_var);
PyObject *dict = PyDict_New(), *item;
if (dict == NULL) {
@@ -610,7 +610,7 @@ UMath_Tests_test_dispatch(PyObject *NPY_UNUSED(dummy), PyObject *NPY_UNUSED(dumm
if (item == NULL || PyDict_SetItemString(dict, "all", item) < 0) {
goto err;
}
- NPY_CPU_DISPATCH_CALL_ALL(_umath_tests_dispatch_attach, (item))
+ NPY_CPU_DISPATCH_CALL_ALL(_umath_tests_dispatch_attach, (item));
if (PyErr_Occurred()) {
goto err;
}
@@ -671,7 +671,7 @@ PyMODINIT_FUNC PyInit__umath_tests(void) {
d = PyModule_GetDict(m);
- version = PyString_FromString("0.1");
+ version = PyUnicode_FromString("0.1");
PyDict_SetItemString(d, "__version__", version);
Py_DECREF(version);
diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c
index 3404a0c6a..cd81f7734 100644
--- a/numpy/core/src/umath/extobj.c
+++ b/numpy/core/src/umath/extobj.c
@@ -109,8 +109,8 @@ _error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *
errtype, name);
goto fail;
}
- args = Py_BuildValue("NN", PyUString_FromString(errtype),
- PyInt_FromLong((long) retstatus));
+ args = Py_BuildValue("NN", PyUnicode_FromString(errtype),
+ PyLong_FromLong((long) retstatus));
if (args == NULL) {
goto fail;
}
@@ -212,7 +212,7 @@ _extract_pyvals(PyObject *ref, const char *name, int *bufsize,
}
if (bufsize != NULL) {
- *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0));
+ *bufsize = PyLong_AsLong(PyList_GET_ITEM(ref, 0));
if (error_converting(*bufsize)) {
return -1;
}
@@ -229,7 +229,7 @@ _extract_pyvals(PyObject *ref, const char *name, int *bufsize,
}
if (errmask != NULL) {
- *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1));
+ *errmask = PyLong_AsLong(PyList_GET_ITEM(ref, 1));
if (*errmask < 0) {
if (PyErr_Occurred()) {
return -1;
diff --git a/numpy/core/src/umath/funcs.inc.src b/numpy/core/src/umath/funcs.inc.src
index 273779ee8..9b04dc779 100644
--- a/numpy/core/src/umath/funcs.inc.src
+++ b/numpy/core/src/umath/funcs.inc.src
@@ -26,13 +26,13 @@ Py_square(PyObject *o)
static PyObject *
Py_get_one(PyObject *NPY_UNUSED(o))
{
- return PyInt_FromLong(1);
+ return PyLong_FromLong(1);
}
static PyObject *
Py_reciprocal(PyObject *o)
{
- PyObject *one = PyInt_FromLong(1);
+ PyObject *one = PyLong_FromLong(1);
PyObject *result;
if (!one) {
diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c
index bf6e5a698..a0090e302 100644
--- a/numpy/core/src/umath/override.c
+++ b/numpy/core/src/umath/override.c
@@ -605,7 +605,7 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
goto fail;
}
- method_name = PyUString_FromString(method);
+ method_name = PyUnicode_FromString(method);
if (method_name == NULL) {
goto fail;
}
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index d0fb2f6ed..f1423d8b9 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -16,7 +16,6 @@
#include "npy_config.h"
#include <numpy/arrayobject.h>
-#include "npy_config.h"
#include "npy_pycompat.h"
#include "ctors.h"
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index b47ccd291..f693eb5c2 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -2432,15 +2432,15 @@ _get_identity(PyUFuncObject *ufunc, npy_bool *reorderable) {
switch(ufunc->identity) {
case PyUFunc_One:
*reorderable = 1;
- return PyInt_FromLong(1);
+ return PyLong_FromLong(1);
case PyUFunc_Zero:
*reorderable = 1;
- return PyInt_FromLong(0);
+ return PyLong_FromLong(0);
case PyUFunc_MinusOne:
*reorderable = 1;
- return PyInt_FromLong(-1);
+ return PyLong_FromLong(-1);
case PyUFunc_ReorderableNone:
*reorderable = 1;
@@ -3318,7 +3318,6 @@ get_binary_op_function(PyUFuncObject *ufunc, int *otype,
void **out_innerloopdata)
{
int i;
- PyUFunc_Loop1d *funcdata;
NPY_UF_DBG_PRINT1("Getting binary op function for type number %d\n",
*otype);
@@ -3326,7 +3325,7 @@ get_binary_op_function(PyUFuncObject *ufunc, int *otype,
/* If the type is custom and there are userloops, search for it here */
if (ufunc->userloops != NULL && PyTypeNum_ISUSERDEF(*otype)) {
PyObject *key, *obj;
- key = PyInt_FromLong(*otype);
+ key = PyLong_FromLong(*otype);
if (key == NULL) {
return -1;
}
@@ -3336,7 +3335,10 @@ get_binary_op_function(PyUFuncObject *ufunc, int *otype,
return -1;
}
else if (obj != NULL) {
- funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
+ PyUFunc_Loop1d *funcdata = PyCapsule_GetPointer(obj, NULL);
+ if (funcdata == NULL) {
+ return -1;
+ }
while (funcdata != NULL) {
int *types = funcdata->arg_types;
@@ -4834,8 +4836,8 @@ ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args)
if (res == NULL) {
return NULL;
}
- PyList_SET_ITEM(res, 0, PyInt_FromLong(NPY_BUFSIZE));
- PyList_SET_ITEM(res, 1, PyInt_FromLong(UFUNC_ERR_DEFAULT));
+ PyList_SET_ITEM(res, 0, PyLong_FromLong(NPY_BUFSIZE));
+ PyList_SET_ITEM(res, 1, PyLong_FromLong(UFUNC_ERR_DEFAULT));
PyList_SET_ITEM(res, 2, Py_None); Py_INCREF(Py_None);
return res;
}
@@ -5155,7 +5157,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc,
return -1;
}
- key = PyInt_FromLong((long) user_dtype->type_num);
+ key = PyLong_FromLong((long) user_dtype->type_num);
if (key == NULL) {
return -1;
}
@@ -5190,9 +5192,12 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc,
result = -1;
}
else {
- PyUFunc_Loop1d *current;
int cmp = 1;
- current = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(cobj);
+ PyUFunc_Loop1d *current = PyCapsule_GetPointer(cobj, NULL);
+ if (current == NULL) {
+ result = -1;
+ goto done;
+ }
while (current != NULL) {
cmp = cmp_arg_types(current->arg_types,
arg_typenums, ufunc->nargs);
@@ -5226,6 +5231,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc,
}
}
+done:
PyArray_free(arg_typenums);
Py_DECREF(key);
@@ -5257,7 +5263,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
if (ufunc->userloops == NULL) {
ufunc->userloops = PyDict_New();
}
- key = PyInt_FromLong((long) usertype);
+ key = PyLong_FromLong((long) usertype);
if (key == NULL) {
return -1;
}
@@ -5294,7 +5300,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
}
/* If it's not there, then make one and return. */
else if (cobj == NULL) {
- cobj = NpyCapsule_FromVoidPtr((void *)funcdata, _loop1d_list_free);
+ cobj = PyCapsule_New((void *)funcdata, NULL, _loop1d_list_free);
if (cobj == NULL) {
goto fail;
}
@@ -5312,7 +5318,10 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
* is exactly like this one, then just replace.
* Otherwise insert.
*/
- current = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(cobj);
+ current = PyCapsule_GetPointer(cobj, NULL);
+ if (current == NULL) {
+ goto fail;
+ }
while (current != NULL) {
cmp = cmp_arg_types(current->arg_types, newtypes, ufunc->nargs);
if (cmp >= 0) {
@@ -5383,7 +5392,7 @@ ufunc_dealloc(PyUFuncObject *ufunc)
static PyObject *
ufunc_repr(PyUFuncObject *ufunc)
{
- return PyUString_FromFormat("<ufunc '%s'>", ufunc->name);
+ return PyUnicode_FromFormat("<ufunc '%s'>", ufunc->name);
}
static int
@@ -5995,7 +6004,7 @@ ufunc_get_doc(PyUFuncObject *ufunc)
}
if (ufunc->doc != NULL) {
PyUString_ConcatAndDel(&doc,
- PyUString_FromFormat("\n\n%s", ufunc->doc));
+ PyUnicode_FromFormat("\n\n%s", ufunc->doc));
}
return doc;
}
@@ -6003,25 +6012,25 @@ ufunc_get_doc(PyUFuncObject *ufunc)
static PyObject *
ufunc_get_nin(PyUFuncObject *ufunc)
{
- return PyInt_FromLong(ufunc->nin);
+ return PyLong_FromLong(ufunc->nin);
}
static PyObject *
ufunc_get_nout(PyUFuncObject *ufunc)
{
- return PyInt_FromLong(ufunc->nout);
+ return PyLong_FromLong(ufunc->nout);
}
static PyObject *
ufunc_get_nargs(PyUFuncObject *ufunc)
{
- return PyInt_FromLong(ufunc->nargs);
+ return PyLong_FromLong(ufunc->nargs);
}
static PyObject *
ufunc_get_ntypes(PyUFuncObject *ufunc)
{
- return PyInt_FromLong(ufunc->ntypes);
+ return PyLong_FromLong(ufunc->ntypes);
}
static PyObject *
@@ -6051,7 +6060,7 @@ ufunc_get_types(PyUFuncObject *ufunc)
t[ni + 2 + j] = _typecharfromnum(ufunc->types[n]);
n++;
}
- str = PyUString_FromStringAndSize(t, no + ni + 2);
+ str = PyUnicode_FromStringAndSize(t, no + ni + 2);
PyList_SET_ITEM(list, k, str);
}
PyArray_free(t);
@@ -6061,7 +6070,7 @@ ufunc_get_types(PyUFuncObject *ufunc)
static PyObject *
ufunc_get_name(PyUFuncObject *ufunc)
{
- return PyUString_FromString(ufunc->name);
+ return PyUnicode_FromString(ufunc->name);
}
static PyObject *
@@ -6077,7 +6086,7 @@ ufunc_get_signature(PyUFuncObject *ufunc)
if (!ufunc->core_enabled) {
Py_RETURN_NONE;
}
- return PyUString_FromString(ufunc->core_signature);
+ return PyUnicode_FromString(ufunc->core_signature);
}
#undef _typecharfromnum
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index ea20bb24f..aa6f34d59 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -36,17 +36,17 @@ npy_casting_to_py_object(NPY_CASTING casting)
{
switch (casting) {
case NPY_NO_CASTING:
- return PyUString_FromString("no");
+ return PyUnicode_FromString("no");
case NPY_EQUIV_CASTING:
- return PyUString_FromString("equiv");
+ return PyUnicode_FromString("equiv");
case NPY_SAFE_CASTING:
- return PyUString_FromString("safe");
+ return PyUnicode_FromString("safe");
case NPY_SAME_KIND_CASTING:
- return PyUString_FromString("same_kind");
+ return PyUnicode_FromString("same_kind");
case NPY_UNSAFE_CASTING:
- return PyUString_FromString("unsafe");
+ return PyUnicode_FromString("unsafe");
default:
- return PyInt_FromLong(casting);
+ return PyLong_FromLong(casting);
}
}
@@ -1336,7 +1336,6 @@ find_userloop(PyUFuncObject *ufunc,
void **out_innerloopdata)
{
npy_intp i, nin = ufunc->nin, j, nargs = nin + ufunc->nout;
- PyUFunc_Loop1d *funcdata;
/* Use this to try to avoid repeating the same userdef loop search */
int last_userdef = -1;
@@ -1356,7 +1355,7 @@ find_userloop(PyUFuncObject *ufunc,
last_userdef = type_num;
- key = PyInt_FromLong(type_num);
+ key = PyLong_FromLong(type_num);
if (key == NULL) {
return -1;
}
@@ -1368,9 +1367,11 @@ find_userloop(PyUFuncObject *ufunc,
else if (obj == NULL) {
continue;
}
- for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- funcdata != NULL;
- funcdata = funcdata->next) {
+ PyUFunc_Loop1d *funcdata = PyCapsule_GetPointer(obj, NULL);
+ if (funcdata == NULL) {
+ return -1;
+ }
+ for (; funcdata != NULL; funcdata = funcdata->next) {
int *types = funcdata->arg_types;
for (j = 0; j < nargs; ++j) {
@@ -1744,7 +1745,6 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
char *out_err_dst_typecode)
{
npy_intp i, nop = self->nin + self->nout;
- PyUFunc_Loop1d *funcdata;
/* Use this to try to avoid repeating the same userdef loop search */
int last_userdef = -1;
@@ -1764,7 +1764,7 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
last_userdef = type_num;
- key = PyInt_FromLong(type_num);
+ key = PyLong_FromLong(type_num);
if (key == NULL) {
return -1;
}
@@ -1776,9 +1776,11 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
else if (obj == NULL) {
continue;
}
- for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- funcdata != NULL;
- funcdata = funcdata->next) {
+ PyUFunc_Loop1d *funcdata = PyCapsule_GetPointer(obj, NULL);
+ if (funcdata == NULL) {
+ return -1;
+ }
+ for (; funcdata != NULL; funcdata = funcdata->next) {
int *types = funcdata->arg_types;
switch (ufunc_loop_matches(self, op,
input_casting, output_casting,
@@ -1816,7 +1818,6 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
PyArray_Descr **out_dtype)
{
int i, j, nin = self->nin, nop = nin + self->nout;
- PyUFunc_Loop1d *funcdata;
/* Use this to try to avoid repeating the same userdef loop search */
int last_userdef = -1;
@@ -1831,7 +1832,7 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
last_userdef = type_num;
- key = PyInt_FromLong(type_num);
+ key = PyLong_FromLong(type_num);
if (key == NULL) {
return -1;
}
@@ -1844,9 +1845,11 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
continue;
}
- for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- funcdata != NULL;
- funcdata = funcdata->next) {
+ PyUFunc_Loop1d *funcdata = PyCapsule_GetPointer(obj, NULL);
+ if (funcdata == NULL) {
+ return -1;
+ }
+ for (; funcdata != NULL; funcdata = funcdata->next) {
int *types = funcdata->arg_types;
int matched = 1;
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 708a27ad0..474db0245 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -75,7 +75,8 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) {
int nin, nout, i, nargs;
PyUFunc_PyFuncData *fdata;
PyUFuncObject *self;
- char *fname, *str, *types, *doc;
+ const char *fname = NULL;
+ char *str, *types, *doc;
Py_ssize_t fname_len = -1;
void * ptr, **data;
int offset[2];
@@ -95,12 +96,12 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) {
pyname = PyObject_GetAttrString(function, "__name__");
if (pyname) {
- (void) PyString_AsStringAndSize(pyname, &fname, &fname_len);
+ fname = PyUnicode_AsUTF8AndSize(pyname, &fname_len);
}
- if (PyErr_Occurred()) {
+ if (fname == NULL) {
+ PyErr_Clear();
fname = "?";
fname_len = 1;
- PyErr_Clear();
}
/*
@@ -237,23 +238,23 @@ NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_pyvals_name = NULL;
static int
intern_strings(void)
{
- if (!(npy_um_str_out = PyUString_InternFromString("out"))) return -1;
- if (!(npy_um_str_where = PyUString_InternFromString("where"))) return -1;
- if (!(npy_um_str_axes = PyUString_InternFromString("axes"))) return -1;
- if (!(npy_um_str_axis = PyUString_InternFromString("axis"))) return -1;
- if (!(npy_um_str_keepdims = PyUString_InternFromString("keepdims"))) return -1;
- if (!(npy_um_str_casting = PyUString_InternFromString("casting"))) return -1;
- if (!(npy_um_str_order = PyUString_InternFromString("order"))) return -1;
- if (!(npy_um_str_dtype = PyUString_InternFromString("dtype"))) return -1;
- if (!(npy_um_str_subok = PyUString_InternFromString("subok"))) return -1;
- if (!(npy_um_str_signature = PyUString_InternFromString("signature"))) return -1;
- if (!(npy_um_str_sig = PyUString_InternFromString("sig"))) return -1;
- if (!(npy_um_str_extobj = PyUString_InternFromString("extobj"))) return -1;
- if (!(npy_um_str_array_prepare = PyUString_InternFromString("__array_prepare__"))) return -1;
- if (!(npy_um_str_array_wrap = PyUString_InternFromString("__array_wrap__"))) return -1;
- if (!(npy_um_str_array_finalize = PyUString_InternFromString("__array_finalize__"))) return -1;
- if (!(npy_um_str_ufunc = PyUString_InternFromString("__array_ufunc__"))) return -1;
- if (!(npy_um_str_pyvals_name = PyUString_InternFromString(UFUNC_PYVALS_NAME))) return -1;
+ if (!(npy_um_str_out = PyUnicode_InternFromString("out"))) return -1;
+ if (!(npy_um_str_where = PyUnicode_InternFromString("where"))) return -1;
+ if (!(npy_um_str_axes = PyUnicode_InternFromString("axes"))) return -1;
+ if (!(npy_um_str_axis = PyUnicode_InternFromString("axis"))) return -1;
+ if (!(npy_um_str_keepdims = PyUnicode_InternFromString("keepdims"))) return -1;
+ if (!(npy_um_str_casting = PyUnicode_InternFromString("casting"))) return -1;
+ if (!(npy_um_str_order = PyUnicode_InternFromString("order"))) return -1;
+ if (!(npy_um_str_dtype = PyUnicode_InternFromString("dtype"))) return -1;
+ if (!(npy_um_str_subok = PyUnicode_InternFromString("subok"))) return -1;
+ if (!(npy_um_str_signature = PyUnicode_InternFromString("signature"))) return -1;
+ if (!(npy_um_str_sig = PyUnicode_InternFromString("sig"))) return -1;
+ if (!(npy_um_str_extobj = PyUnicode_InternFromString("extobj"))) return -1;
+ if (!(npy_um_str_array_prepare = PyUnicode_InternFromString("__array_prepare__"))) return -1;
+ if (!(npy_um_str_array_wrap = PyUnicode_InternFromString("__array_wrap__"))) return -1;
+ if (!(npy_um_str_array_finalize = PyUnicode_InternFromString("__array_finalize__"))) return -1;
+ if (!(npy_um_str_ufunc = PyUnicode_InternFromString("__array_ufunc__"))) return -1;
+ if (!(npy_um_str_pyvals_name = PyUnicode_InternFromString(UFUNC_PYVALS_NAME))) return -1;
return 0;
}
diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py
index d18df2e9c..a6c8cc8b2 100644
--- a/numpy/core/tests/test_array_coercion.py
+++ b/numpy/core/tests/test_array_coercion.py
@@ -11,6 +11,7 @@ from itertools import product
import numpy as np
from numpy.core._rational_tests import rational
+from numpy.core._multiarray_umath import _discover_array_parameters
from numpy.testing import (
assert_array_equal, assert_warns, IS_PYPY)
@@ -478,6 +479,27 @@ class TestNested:
with pytest.raises(ValueError):
np.array([[], np.empty((0, 1))], dtype=object)
+ def test_array_of_different_depths(self):
+ # When multiple arrays (or array-likes) are included in a
+ # sequences and have different depth, we currently discover
+ # as many dimensions as they share. (see also gh-17224)
+ arr = np.zeros((3, 2))
+ mismatch_first_dim = np.zeros((1, 2))
+ mismatch_second_dim = np.zeros((3, 3))
+
+ dtype, shape = _discover_array_parameters(
+ [arr, mismatch_second_dim], dtype=np.dtype("O"))
+ assert shape == (2, 3)
+
+ dtype, shape = _discover_array_parameters(
+ [arr, mismatch_first_dim], dtype=np.dtype("O"))
+ assert shape == (2,)
+ # The second case is currently supported because the arrays
+ # can be stored as objects:
+ res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
+ assert res[0] is arr
+ assert res[1] is mismatch_first_dim
+
class TestBadSequences:
# These are tests for bad objects passed into `np.array`, in general
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 59a3954fd..f725091c5 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -26,6 +26,7 @@ class TestDateTime:
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
+ 'μs', # alias for us
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
@@ -2389,3 +2390,19 @@ class TestDateTimeData:
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
assert_equal(np.datetime_data(a.dtype), ('D', 1))
+
+ def test_bytes(self):
+ # byte units are converted to unicode
+ dt = np.datetime64('2000', (b'ms', 5))
+ assert np.datetime_data(dt.dtype) == ('ms', 5)
+
+ dt = np.datetime64('2000', b'5ms')
+ assert np.datetime_data(dt.dtype) == ('ms', 5)
+
+ def test_non_ascii(self):
+ # μs is normalized to μ
+ dt = np.datetime64('2000', ('μs', 5))
+ assert np.datetime_data(dt.dtype) == ('us', 5)
+
+ dt = np.datetime64('2000', '5μs')
+ assert np.datetime_data(dt.dtype) == ('us', 5)
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index f0eac24ee..17391e80c 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -708,23 +708,23 @@ class TestRaggedArray(_DeprecationTestCase):
self.assert_deprecated(lambda: np.array([[0], arr], dtype=np.float64))
-class TestTrimZeros(_DeprecationTestCase):
- # Numpy 1.20.0, 2020-07-31
- @pytest.mark.parametrize(
- "arr,exc_type",
- [(np.random.rand(10, 10).tolist(), ValueError),
- (np.random.rand(10).astype(str), FutureWarning)]
- )
- def test_deprecated(self, arr, exc_type):
- with warnings.catch_warnings():
- warnings.simplefilter('error', DeprecationWarning)
- try:
- np.trim_zeros(arr)
- except DeprecationWarning as ex:
- ex_cause = ex.__cause__
- assert_(isinstance(ex_cause, exc_type))
- else:
- raise AssertionError("No error raised during function call")
-
- out = np.lib.function_base._trim_zeros_old(arr)
- assert_array_equal(arr, out)
+class FlatteningConcatenateUnsafeCast(_DeprecationTestCase):
+ # NumPy 1.20, 2020-09-03
+ message = "concatenate with `axis=None` will use same-kind casting"
+
+ def test_deprecated(self):
+ self.assert_deprecated(np.concatenate,
+ args=(([0.], [1.]),),
+ kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64)))
+
+ def test_not_deprecated(self):
+ self.assert_not_deprecated(np.concatenate,
+ args=(([0.], [1.]),),
+ kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64),
+ 'casting': "unsafe"})
+
+ with assert_raises(TypeError):
+ # Tests should notice if the deprecation warning is given first...
+ np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64),
+ casting="same_kind")
+
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 03f10bf2d..6f8af1757 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -21,7 +21,6 @@ import builtins
from decimal import Decimal
import numpy as np
-from numpy.compat import strchar
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
@@ -2031,7 +2030,7 @@ class TestMethods:
strtype = '>i2'
else:
strtype = '<i2'
- mydtype = [('name', strchar + '5'), ('col2', strtype)]
+ mydtype = [('name', 'U5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
@@ -3868,13 +3867,6 @@ class TestPickling:
with pytest.raises(ImportError):
array.__reduce_ex__(5)
- elif sys.version_info[:2] < (3, 6):
- # when calling __reduce_ex__ explicitly with protocol=5 on python
- # raise a ValueError saying that protocol 5 is not available for
- # this python version
- with pytest.raises(ValueError):
- array.__reduce_ex__(5)
-
def test_record_array_with_object_dtype(self):
my_object = object()
@@ -7436,6 +7428,18 @@ def test_array_interface_offset():
arr1 = np.asarray(DummyArray())
assert_equal(arr1, arr[1:])
+def test_array_interface_unicode_typestr():
+ arr = np.array([1, 2, 3], dtype='int32')
+ interface = dict(arr.__array_interface__)
+ interface['typestr'] = '\N{check mark}'
+
+ class DummyArray:
+ __array_interface__ = interface
+
+ # should not be UnicodeEncodeError
+ with pytest.raises(TypeError):
+ np.asarray(DummyArray())
+
def test_flat_element_deletion():
it = np.ones(3).flat
try:
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index badf48b33..ae5ee4c88 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -14,6 +14,7 @@ from numpy.testing import (
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, assert_array_max_ulp, HAS_REFCOUNT
)
+from numpy.core._rational_tests import rational
from hypothesis import assume, given, strategies as st
from hypothesis.extra import numpy as hynp
@@ -863,6 +864,30 @@ class TestTypes:
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
+ def test_can_cast_and_promote_usertypes(self):
+ # The rational type defines safe casting for signed integers,
+ # boolean. Rational itself *does* cast safely to double.
+ # (rational does not actually cast to all signed integers, e.g.
+ # int64 can be both long and longlong and it registers only the first)
+ valid_types = ["int8", "int16", "int32", "int64", "bool"]
+ invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V"
+
+ rational_dt = np.dtype(rational)
+ for numpy_dtype in valid_types:
+ numpy_dtype = np.dtype(numpy_dtype)
+ assert np.can_cast(numpy_dtype, rational_dt)
+ assert np.promote_types(numpy_dtype, rational_dt) is rational_dt
+
+ for numpy_dtype in invalid_types:
+ numpy_dtype = np.dtype(numpy_dtype)
+ assert not np.can_cast(numpy_dtype, rational_dt)
+ with pytest.raises(TypeError):
+ np.promote_types(numpy_dtype, rational_dt)
+
+ double_dt = np.dtype("double")
+ assert np.can_cast(rational_dt, double_dt)
+ assert np.promote_types(double_dt, rational_dt) is double_dt
+
def test_promote_types_strings(self):
assert_equal(np.promote_types('bool', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('b', 'S'), np.dtype('S4'))
@@ -897,6 +922,110 @@ class TestTypes:
assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20'))
assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30'))
+ @pytest.mark.parametrize("dtype",
+ list(np.typecodes["All"]) +
+ ["i,i", "S3", "S100", "U3", "U100", rational])
+ def test_promote_identical_types_metadata(self, dtype):
+ # The same type passed in twice to promote types always
+ # preserves metadata
+ metadata = {1: 1}
+ dtype = np.dtype(dtype, metadata=metadata)
+
+ res = np.promote_types(dtype, dtype)
+ assert res.metadata == dtype.metadata
+
+ # byte-swapping preserves and makes the dtype native:
+ dtype = dtype.newbyteorder()
+ if dtype.isnative:
+ # The type does not have byte swapping
+ return
+
+ res = np.promote_types(dtype, dtype)
+ if res.char in "?bhilqpBHILQPefdgFDGOmM":
+ # Metadata is lost for simple promotions (they create a new dtype)
+ assert res.metadata is None
+ else:
+ assert res.metadata == metadata
+ if dtype.kind != "V":
+ # the result is native (except for structured void)
+ assert res.isnative
+
+ @pytest.mark.slow
+ @pytest.mark.parametrize(["dtype1", "dtype2"],
+ itertools.product(
+ list(np.typecodes["All"]) +
+ ["i,i", "S3", "S100", "U3", "U100", rational],
+ repeat=2))
+ def test_promote_types_metadata(self, dtype1, dtype2):
+ """Metadata handling in promotion does not appear formalized
+ right now in NumPy. This test should thus be considered to
+ document behaviour, rather than test the correct definition of it.
+
+ This test is very ugly, it was useful for rewriting part of the
+ promotion, but probably should eventually be replaced/deleted
+ (i.e. when metadata handling in promotion is better defined).
+ """
+ metadata1 = {1: 1}
+ metadata2 = {2: 2}
+ dtype1 = np.dtype(dtype1, metadata=metadata1)
+ dtype2 = np.dtype(dtype2, metadata=metadata2)
+
+ try:
+ res = np.promote_types(dtype1, dtype2)
+ except TypeError:
+ # Promotion failed, this test only checks metadata
+ return
+
+ # The rules for when metadata is preserved and which dtypes metadta
+ # will be used are very confusing and depend on multiple paths.
+ # This long if statement attempts to reproduce this:
+ if dtype1.type is rational or dtype2.type is rational:
+ # User dtype promotion preserves byte-order here:
+ if np.can_cast(res, dtype1):
+ assert res.metadata == dtype1.metadata
+ else:
+ assert res.metadata == dtype2.metadata
+
+ elif res.char in "?bhilqpBHILQPefdgFDGOmM":
+ # All simple types lose metadata (due to using promotion table):
+ assert res.metadata is None
+ elif res.kind in "SU" and dtype1 == dtype2:
+ # Strings give precedence to the second dtype:
+ assert res is dtype2
+ elif res == dtype1:
+ # If one result is the result, it is usually returned unchanged:
+ assert res is dtype1
+ elif res == dtype2:
+ # If one result is the result, it is usually returned unchanged:
+ assert res is dtype2
+ elif dtype1.kind == "S" and dtype2.kind == "U":
+ # Promotion creates a new unicode dtype from scratch
+ assert res.metadata is None
+ elif dtype1.kind == "U" and dtype2.kind == "S":
+ # Promotion creates a new unicode dtype from scratch
+ assert res.metadata is None
+ elif res.kind in "SU" and dtype2.kind != res.kind:
+ # We build on top of dtype1:
+ assert res.metadata == dtype1.metadata
+ elif res.kind in "SU" and res.kind == dtype1.kind:
+ assert res.metadata == dtype1.metadata
+ elif res.kind in "SU" and res.kind == dtype2.kind:
+ assert res.metadata == dtype2.metadata
+ else:
+ assert res.metadata is None
+
+ # Try again for byteswapped version
+ dtype1 = dtype1.newbyteorder()
+ assert dtype1.metadata == metadata1
+ res_bs = np.promote_types(dtype1, dtype2)
+ if res_bs.names is not None:
+ # Structured promotion doesn't remove byteswap:
+ assert res_bs.newbyteorder() == res
+ else:
+ assert res_bs == res
+ assert res_bs.metadata == res.metadata
+
+
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, complex))
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
index 7e73d8c03..42600a12b 100644
--- a/numpy/core/tests/test_overrides.py
+++ b/numpy/core/tests/test_overrides.py
@@ -1,5 +1,7 @@
import inspect
import sys
+import tempfile
+from io import StringIO
from unittest import mock
import numpy as np
@@ -425,3 +427,168 @@ class TestNumPyFunctions:
# note: the internal implementation of np.sum() calls the .sum() method
array = np.array(1).view(MyArray)
assert_equal(np.sum(array), 'summed')
+
+
+class TestArrayLike:
+
+ class MyArray():
+
+ def __init__(self, function=None):
+ self.function = function
+
+ def __array_function__(self, func, types, args, kwargs):
+ try:
+ my_func = getattr(TestArrayLike.MyArray, func.__name__)
+ except AttributeError:
+ return NotImplemented
+ return my_func(*args, **kwargs)
+
+ class MyNoArrayFunctionArray():
+
+ def __init__(self, function=None):
+ self.function = function
+
+ def add_method(name, arr_class, enable_value_error=False):
+ def _definition(*args, **kwargs):
+ # Check that `like=` isn't propagated downstream
+ assert 'like' not in kwargs
+
+ if enable_value_error and 'value_error' in kwargs:
+ raise ValueError
+
+ return arr_class(getattr(arr_class, name))
+ setattr(arr_class, name, _definition)
+
+ def func_args(*args, **kwargs):
+ return args, kwargs
+
+ @requires_array_function
+ def test_array_like_not_implemented(self):
+ TestArrayLike.add_method('array', TestArrayLike.MyArray)
+
+ ref = TestArrayLike.MyArray.array()
+
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ array_like = np.asarray(1, like=ref)
+
+ _array_tests = [
+ ('array', *func_args((1,))),
+ ('asarray', *func_args((1,))),
+ ('asanyarray', *func_args((1,))),
+ ('ascontiguousarray', *func_args((2, 3))),
+ ('asfortranarray', *func_args((2, 3))),
+ ('require', *func_args((np.arange(6).reshape(2, 3),),
+ requirements=['A', 'F'])),
+ ('empty', *func_args((1,))),
+ ('full', *func_args((1,), 2)),
+ ('ones', *func_args((1,))),
+ ('zeros', *func_args((1,))),
+ ('arange', *func_args(3)),
+ ('frombuffer', *func_args(b'\x00' * 8, dtype=int)),
+ ('fromiter', *func_args(range(3), dtype=int)),
+ ('fromstring', *func_args('1,2', dtype=int, sep=',')),
+ ('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))),
+ ('genfromtxt', *func_args(lambda: StringIO(u'1,2.1'),
+ dtype=[('int', 'i8'), ('float', 'f8')],
+ delimiter=',')),
+ ]
+
+ @pytest.mark.parametrize('function, args, kwargs', _array_tests)
+ @pytest.mark.parametrize('numpy_ref', [True, False])
+ @requires_array_function
+ def test_array_like(self, function, args, kwargs, numpy_ref):
+ TestArrayLike.add_method('array', TestArrayLike.MyArray)
+ TestArrayLike.add_method(function, TestArrayLike.MyArray)
+ np_func = getattr(np, function)
+ my_func = getattr(TestArrayLike.MyArray, function)
+
+ if numpy_ref is True:
+ ref = np.array(1)
+ else:
+ ref = TestArrayLike.MyArray.array()
+
+ like_args = tuple(a() if callable(a) else a for a in args)
+ array_like = np_func(*like_args, **kwargs, like=ref)
+
+ if numpy_ref is True:
+ assert type(array_like) is np.ndarray
+
+ np_args = tuple(a() if callable(a) else a for a in args)
+ np_arr = np_func(*np_args, **kwargs)
+
+ # Special-case np.empty to ensure values match
+ if function == "empty":
+ np_arr.fill(1)
+ array_like.fill(1)
+
+ assert_equal(array_like, np_arr)
+ else:
+ assert type(array_like) is TestArrayLike.MyArray
+ assert array_like.function is my_func
+
+ @pytest.mark.parametrize('function, args, kwargs', _array_tests)
+ @pytest.mark.parametrize('numpy_ref', [True, False])
+ @requires_array_function
+ def test_no_array_function_like(self, function, args, kwargs, numpy_ref):
+ TestArrayLike.add_method('array', TestArrayLike.MyNoArrayFunctionArray)
+ TestArrayLike.add_method(function, TestArrayLike.MyNoArrayFunctionArray)
+ np_func = getattr(np, function)
+ my_func = getattr(TestArrayLike.MyNoArrayFunctionArray, function)
+
+ if numpy_ref is True:
+ ref = np.array(1)
+ else:
+ ref = TestArrayLike.MyNoArrayFunctionArray.array()
+
+ like_args = tuple(a() if callable(a) else a for a in args)
+ array_like = np_func(*like_args, **kwargs, like=ref)
+
+ assert type(array_like) is np.ndarray
+ if numpy_ref is True:
+ np_args = tuple(a() if callable(a) else a for a in args)
+ np_arr = np_func(*np_args, **kwargs)
+
+ # Special-case np.empty to ensure values match
+ if function == "empty":
+ np_arr.fill(1)
+ array_like.fill(1)
+
+ assert_equal(array_like, np_arr)
+
+ @pytest.mark.parametrize('numpy_ref', [True, False])
+ def test_array_like_fromfile(self, numpy_ref):
+ TestArrayLike.add_method('array', TestArrayLike.MyArray)
+ TestArrayLike.add_method("fromfile", TestArrayLike.MyArray)
+
+ if numpy_ref is True:
+ ref = np.array(1)
+ else:
+ ref = TestArrayLike.MyArray.array()
+
+ data = np.random.random(5)
+
+ fname = tempfile.mkstemp()[1]
+ data.tofile(fname)
+
+ array_like = np.fromfile(fname, like=ref)
+ if numpy_ref is True:
+ assert type(array_like) is np.ndarray
+ np_res = np.fromfile(fname, like=ref)
+ assert_equal(np_res, data)
+ assert_equal(array_like, np_res)
+ else:
+ assert type(array_like) is TestArrayLike.MyArray
+ assert array_like.function is TestArrayLike.MyArray.fromfile
+
+ @requires_array_function
+ def test_exception_handling(self):
+ TestArrayLike.add_method(
+ 'array',
+ TestArrayLike.MyArray,
+ enable_value_error=True,
+ )
+
+ ref = TestArrayLike.MyArray.array()
+
+ with assert_raises(ValueError):
+ np.array(1, value_error=True, like=ref)
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 51cf7039f..2e731d4fa 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -14,7 +14,7 @@ from numpy.testing import (
assert_raises_regex, assert_warns, suppress_warnings,
_assert_valid_refcount, HAS_REFCOUNT,
)
-from numpy.testing._private.utils import _no_tracing
+from numpy.testing._private.utils import _no_tracing, requires_memory
from numpy.compat import asbytes, asunicode, pickle
try:
@@ -2488,3 +2488,39 @@ class TestRegression:
assert arr.size * arr.itemsize > 2 ** 31
c_arr = np.ctypeslib.as_ctypes(arr)
assert_equal(c_arr._length_, arr.size)
+
+ def test_complex_conversion_error(self):
+ # gh-17068
+ with pytest.raises(TypeError, match=r"Unable to convert dtype.*"):
+ complex(np.array("now", np.datetime64))
+
+ def test__array_interface__descr(self):
+ # gh-17068
+ dt = np.dtype(dict(names=['a', 'b'],
+ offsets=[0, 0],
+ formats=[np.int64, np.int64]))
+ descr = np.array((1, 1), dtype=dt).__array_interface__['descr']
+ assert descr == [('', '|V8')] # instead of [(b'', '|V8')]
+
+ @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
+ @requires_memory(free_bytes=9e9)
+ def test_dot_big_stride(self):
+ # gh-17111
+ # blas stride = stride//itemsize > int32 max
+ int32_max = np.iinfo(np.int32).max
+ n = int32_max + 3
+ a = np.empty([n], dtype=np.float32)
+ b = a[::n-1]
+ b[...] = 1
+ assert b.strides[0] > int32_max * b.dtype.itemsize
+ assert np.dot(b, b) == 2.0
+
+ def test_frompyfunc_name(self):
+ # name conversion was failing for python 3 strings
+ # resulting in the default '?' name. Also test utf-8
+ # encoding using non-ascii name.
+ def cassé(x):
+ return x
+
+ f = np.frompyfunc(cassé, 1, 1)
+ assert str(f) == "<ufunc 'cassé (vectorized)'>"
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index 94a916193..4e56ace90 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -342,19 +342,32 @@ class TestConcatenate:
assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
concatenate((a, b), out=np.empty(4))
- def test_out_dtype(self):
- out = np.empty(4, np.float32)
- res = concatenate((array([1, 2]), array([3, 4])), out=out)
- assert_(out is res)
-
- out = np.empty(4, np.complex64)
- res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out)
- assert_(out is res)
-
- # invalid cast
- out = np.empty(4, np.int32)
- assert_raises(TypeError, concatenate,
- (array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+ @pytest.mark.parametrize("axis", [None, 0])
+ @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"])
+ @pytest.mark.parametrize("casting",
+ ['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
+ def test_out_and_dtype(self, axis, out_dtype, casting):
+ # Compare usage of `out=out` with `dtype=out.dtype`
+ out = np.empty(4, dtype=out_dtype)
+ to_concat = (array([1.1, 2.2]), array([3.3, 4.4]))
+
+ if not np.can_cast(to_concat[0], out_dtype, casting=casting):
+ with assert_raises(TypeError):
+ concatenate(to_concat, out=out, axis=axis, casting=casting)
+ with assert_raises(TypeError):
+ concatenate(to_concat, dtype=out.dtype,
+ axis=axis, casting=casting)
+ else:
+ res_out = concatenate(to_concat, out=out,
+ axis=axis, casting=casting)
+ res_dtype = concatenate(to_concat, dtype=out.dtype,
+ axis=axis, casting=casting)
+ assert res_out is out
+ assert_array_equal(out, res_dtype)
+ assert res_dtype.dtype == out_dtype
+
+ with assert_raises(TypeError):
+ concatenate(to_concat, out=out, dtype=out_dtype, axis=axis)
def test_stack():
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index ae72687ca..818b2ad6c 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -2457,7 +2457,7 @@ class TestSpecialMethods:
assert_raises(ValueError, inner1d, a, a, out=())
def test_ufunc_override_with_super(self):
- # NOTE: this class is given as an example in doc/subclassing.py;
+ # NOTE: this class is used in doc/source/user/basics.subclassing.rst
# if you make any changes here, do update it there too.
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 76ba838b7..e8f7750fe 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -49,12 +49,11 @@ Then, we're ready to call ``foo_func``:
>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
"""
-__all__ = ['load_library', 'ndpointer', 'ctypes_load_library',
- 'c_intp', 'as_ctypes', 'as_array']
+__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array']
import os
from numpy import (
- integer, ndarray, dtype as _dtype, deprecate, array, frombuffer
+ integer, ndarray, dtype as _dtype, array, frombuffer
)
from numpy.core.multiarray import _flagdict, flagsobj
@@ -75,7 +74,6 @@ if ctypes is None:
"""
raise ImportError("ctypes is not available.")
- ctypes_load_library = _dummy
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
@@ -154,8 +152,6 @@ else:
## if no successful return in the libname_ext loop:
raise OSError("no file with expected extension")
- ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
- 'load_library')
def _num_fromflags(flaglist):
num = 0
diff --git a/numpy/ctypeslib.pyi b/numpy/ctypeslib.pyi
new file mode 100644
index 000000000..cacc97d68
--- /dev/null
+++ b/numpy/ctypeslib.pyi
@@ -0,0 +1,7 @@
+from typing import Any
+
+load_library: Any
+ndpointer: Any
+c_intp: Any
+as_ctypes: Any
+as_array: Any
diff --git a/numpy/distutils/__init__.pyi b/numpy/distutils/__init__.pyi
new file mode 100644
index 000000000..3938d68de
--- /dev/null
+++ b/numpy/distutils/__init__.pyi
@@ -0,0 +1,4 @@
+from typing import Any
+
+# TODO: remove when the full numpy namespace is defined
+def __getattr__(name: str) -> Any: ...
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index 85dc2f1e8..72ea0c388 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -152,6 +152,18 @@ class _Config:
By default(None), treated as True if the feature contains at
least one applicable flag. see `feature_can_autovec()`
+ "extra_checks": str or list, optional
+ Extra test case names for the CPU feature that need to be tested
+ against the compiler.
+
+ Each test case must have a C file named ``extra_xxxx.c``, where
+ ``xxxx`` is the case name in lower case, under 'conf_check_path'.
+ It should contain at least one intrinsic or function related to the test case.
+
+ If the compiler able to successfully compile the C file then `CCompilerOpt`
+ will add a C ``#define`` for it into the main dispatch header, e.g.
+ ```#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case.
+
**NOTES**:
* space can be used as separator with options that supports "str or list"
* case-sensitive for all values and feature name must be in upper-case.
@@ -230,7 +242,10 @@ class _Config:
F16C = dict(interest=11, implies="AVX"),
FMA3 = dict(interest=12, implies="F16C"),
AVX2 = dict(interest=13, implies="F16C"),
- AVX512F = dict(interest=20, implies="FMA3 AVX2", implies_detect=False),
+ AVX512F = dict(
+ interest=20, implies="FMA3 AVX2", implies_detect=False,
+ extra_checks="AVX512F_REDUCE"
+ ),
AVX512CD = dict(interest=21, implies="AVX512F"),
AVX512_KNL = dict(
interest=40, implies="AVX512CD", group="AVX512ER AVX512PF",
@@ -243,7 +258,8 @@ class _Config:
),
AVX512_SKX = dict(
interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ",
- detect="AVX512_SKX", implies_detect=False
+ detect="AVX512_SKX", implies_detect=False,
+ extra_checks="AVX512BW_MASK"
),
AVX512_CLX = dict(
interest=43, implies="AVX512_SKX", group="AVX512VNNI",
@@ -673,7 +689,7 @@ class _Distutils:
# intel and msvc compilers don't raise
# fatal errors when flags are wrong or unsupported
".*("
- "warning D9002|" # msvc, it should be work with any language.
+ "warning D9002|" # msvc, it should be work with any language.
"invalid argument for option" # intel
").*"
)
@@ -1137,7 +1153,7 @@ class _Feature:
continue
# list is used internally for these options
for option in (
- "implies", "group", "detect", "headers", "flags"
+ "implies", "group", "detect", "headers", "flags", "extra_checks"
) :
oval = feature.get(option)
if isinstance(oval, str):
@@ -1439,7 +1455,7 @@ class _Feature:
self.conf_check_path, "cpu_%s.c" % name.lower()
)
if not os.path.exists(test_path):
- self.dist_fatal("feature test file is not exist", path)
+ self.dist_fatal("feature test file is not exist", test_path)
test = self.dist_test(test_path, force_flags + self.cc_flags["werror"])
if not test:
@@ -1487,6 +1503,45 @@ class _Feature:
can = valid_flags and any(valid_flags)
return can
+ @_Cache.me
+ def feature_extra_checks(self, name):
+ """
+ Return a list of supported extra checks after testing them against
+ the compiler.
+
+ Parameters
+ ----------
+ names: str
+ CPU feature name in uppercase.
+ """
+ assert isinstance(name, str)
+ d = self.feature_supported[name]
+ extra_checks = d.get("extra_checks", [])
+ if not extra_checks:
+ return []
+
+ self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks)
+ flags = self.feature_flags(name)
+ available = []
+ not_available = []
+ for chk in extra_checks:
+ test_path = os.path.join(
+ self.conf_check_path, "extra_%s.c" % chk.lower()
+ )
+ if not os.path.exists(test_path):
+ self.dist_fatal("extra check file does not exist", test_path)
+
+ is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"])
+ if is_supported:
+ available.append(chk)
+ else:
+ not_available.append(chk)
+
+ if not_available:
+ self.dist_log("testing failed for checks", not_available, stderr=True)
+ return available
+
+
def feature_c_preprocessor(self, feature_name, tabs=0):
"""
Generate C preprocessor definitions and include headers of a CPU feature.
@@ -1520,14 +1575,18 @@ class _Feature:
prepr += [
"#include <%s>" % h for h in feature.get("headers", [])
]
- group = feature.get("group", [])
- for f in group:
- # Guard features in case of duplicate definitions
+
+ extra_defs = feature.get("group", [])
+ extra_defs += self.feature_extra_checks(feature_name)
+ for edef in extra_defs:
+ # Guard extra definitions in case of duplicate with
+ # another feature
prepr += [
- "#ifndef %sHAVE_%s" % (self.conf_c_prefix, f),
- "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, f),
+ "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef),
+ "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef),
"#endif",
]
+
if tabs > 0:
prepr = [('\t'*tabs) + l for l in prepr]
return '\n'.join(prepr)
@@ -2127,7 +2186,7 @@ class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
See Also
--------
- parse_targets() :
+ parse_targets :
Parsing the configuration statements of dispatch-able sources.
"""
to_compile = {}
@@ -2269,6 +2328,12 @@ class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
baseline_rows.append((
"Flags", (' '.join(baseline_flags) if baseline_flags else "none")
))
+ extra_checks = []
+ for name in baseline_names:
+ extra_checks += self.feature_extra_checks(name)
+ baseline_rows.append((
+ "Extra checks", (' '.join(extra_checks) if extra_checks else "none")
+ ))
########## dispatch ##########
if self.cc_noopt:
@@ -2307,14 +2372,21 @@ class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
else:
dispatch_rows.append(("Generated", ''))
for tar in self.feature_sorted(target_sources):
+ tar_as_seq = [tar] if isinstance(tar, str) else tar
sources = target_sources[tar]
name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
flags = ' '.join(self.feature_flags(tar))
implies = ' '.join(self.feature_sorted(self.feature_implies(tar)))
detect = ' '.join(self.feature_detect(tar))
+ extra_checks = []
+ for name in tar_as_seq:
+ extra_checks += self.feature_extra_checks(name)
+ extra_checks = (' '.join(extra_checks) if extra_checks else "none")
+
dispatch_rows.append(('', ''))
dispatch_rows.append((name, implies))
dispatch_rows.append(("Flags", flags))
+ dispatch_rows.append(("Extra checks", extra_checks))
dispatch_rows.append(("Detect", detect))
for src in sources:
dispatch_rows.append(("", src))
diff --git a/numpy/distutils/checks/extra_avx512bw_mask.c b/numpy/distutils/checks/extra_avx512bw_mask.c
new file mode 100644
index 000000000..9cfd0c2a5
--- /dev/null
+++ b/numpy/distutils/checks/extra_avx512bw_mask.c
@@ -0,0 +1,18 @@
+#include <immintrin.h>
+/**
+ * Test BW mask operations due to:
+ * - MSVC has supported it since vs2019 see,
+ * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
+ * - Clang >= v8.0
+ * - GCC >= v7.1
+ */
+int main(void)
+{
+ __mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1));
+ m64 = _kor_mask64(m64, m64);
+ m64 = _kxor_mask64(m64, m64);
+ m64 = _cvtu64_mask64(_cvtmask64_u64(m64));
+ m64 = _mm512_kunpackd(m64, m64);
+ m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64);
+ return (int)_cvtmask64_u64(m64);
+}
diff --git a/numpy/distutils/checks/extra_avx512f_reduce.c b/numpy/distutils/checks/extra_avx512f_reduce.c
new file mode 100644
index 000000000..f979d504e
--- /dev/null
+++ b/numpy/distutils/checks/extra_avx512f_reduce.c
@@ -0,0 +1,41 @@
+#include <immintrin.h>
+/**
+ * The following intrinsics don't have direct native support but compilers
+ * tend to emulate them.
+ * They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19
+ */
+int main(void)
+{
+ __m512 one_ps = _mm512_set1_ps(1.0f);
+ __m512d one_pd = _mm512_set1_pd(1.0);
+ __m512i one_i64 = _mm512_set1_epi64(1.0);
+ // add
+ float sum_ps = _mm512_reduce_add_ps(one_ps);
+ double sum_pd = _mm512_reduce_add_pd(one_pd);
+ int sum_int = (int)_mm512_reduce_add_epi64(one_i64);
+ sum_int += (int)_mm512_reduce_add_epi32(one_i64);
+ // mul
+ sum_ps += _mm512_reduce_mul_ps(one_ps);
+ sum_pd += _mm512_reduce_mul_pd(one_pd);
+ sum_int += (int)_mm512_reduce_mul_epi64(one_i64);
+ sum_int += (int)_mm512_reduce_mul_epi32(one_i64);
+ // min
+ sum_ps += _mm512_reduce_min_ps(one_ps);
+ sum_pd += _mm512_reduce_min_pd(one_pd);
+ sum_int += (int)_mm512_reduce_min_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_min_epu32(one_i64);
+ sum_int += (int)_mm512_reduce_min_epi64(one_i64);
+ // max
+ sum_ps += _mm512_reduce_max_ps(one_ps);
+ sum_pd += _mm512_reduce_max_pd(one_pd);
+ sum_int += (int)_mm512_reduce_max_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_max_epu32(one_i64);
+ sum_int += (int)_mm512_reduce_max_epi64(one_i64);
+ // and
+ sum_int += (int)_mm512_reduce_and_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_and_epi64(one_i64);
+ // or
+ sum_int += (int)_mm512_reduce_or_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_or_epi64(one_i64);
+ return (int)sum_ps + (int)sum_pd + sum_int;
+}
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index 1c3069363..a1c52412d 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -20,8 +20,6 @@ import os
import sys
import re
-from numpy.compat import open_latin1
-
from distutils.sysconfig import get_python_lib
from distutils.fancy_getopt import FancyGetopt
from distutils.errors import DistutilsModuleError, \
@@ -975,29 +973,27 @@ def is_free_format(file):
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
- f = open_latin1(file, 'r')
- line = f.readline()
- n = 10000 # the number of non-comment lines to scan for hints
- if _has_f_header(line):
- n = 0
- elif _has_f90_header(line):
- n = 0
- result = 1
- while n>0 and line:
- line = line.rstrip()
- if line and line[0]!='!':
- n -= 1
- if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
- result = 1
- break
+ with open(file, encoding='latin1') as f:
line = f.readline()
- f.close()
+ n = 10000 # the number of non-comment lines to scan for hints
+ if _has_f_header(line):
+ n = 0
+ elif _has_f90_header(line):
+ n = 0
+ result = 1
+ while n>0 and line:
+ line = line.rstrip()
+ if line and line[0]!='!':
+ n -= 1
+ if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
+ result = 1
+ break
+ line = f.readline()
return result
def has_f90_header(src):
- f = open_latin1(src, 'r')
- line = f.readline()
- f.close()
+ with open(src, encoding='latin1') as f:
+ line = f.readline()
return _has_f90_header(line) or _has_fix_header(line)
_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I)
@@ -1008,17 +1004,16 @@ def get_f77flags(src):
Return a dictionary {<fcompiler type>:<f77 flags>}.
"""
flags = {}
- f = open_latin1(src, 'r')
- i = 0
- for line in f:
- i += 1
- if i>20: break
- m = _f77flags_re.match(line)
- if not m: continue
- fcname = m.group('fcname').strip()
- fflags = m.group('fflags').strip()
- flags[fcname] = split_quoted(fflags)
- f.close()
+ with open(src, encoding='latin1') as f:
+ i = 0
+ for line in f:
+ i += 1
+ if i>20: break
+ m = _f77flags_re.match(line)
+ if not m: continue
+ fcname = m.group('fcname').strip()
+ fflags = m.group('fflags').strip()
+ flags[fcname] = split_quoted(fflags)
return flags
# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 7004b5d80..0d9d769c2 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -231,7 +231,7 @@ class GnuFCompiler(FCompiler):
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
- from distutils import sysconfig
+ import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 760bb7d5c..19f7482f2 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -171,7 +171,7 @@ from configparser import RawConfigParser as ConfigParser
from distutils.errors import DistutilsError
from distutils.dist import Distribution
-import distutils.sysconfig
+import sysconfig
from numpy.distutils import log
from distutils.util import get_platform
@@ -187,6 +187,7 @@ import distutils.ccompiler
import tempfile
import shutil
+__all__ = ['system_info']
# Determine number of bits
import platform
@@ -255,7 +256,7 @@ def libpaths(paths, bits):
if sys.platform == 'win32':
default_lib_dirs = ['C:\\',
- os.path.join(distutils.sysconfig.EXEC_PREFIX,
+ os.path.join(sysconfig.get_config_var('exec_prefix'),
'libs')]
default_runtime_dirs = []
default_include_dirs = []
@@ -2498,13 +2499,12 @@ class _numpy_info(system_info):
except AttributeError:
pass
- include_dirs.append(distutils.sysconfig.get_python_inc(
- prefix=os.sep.join(prefix)))
+ include_dirs.append(sysconfig.get_path('include'))
except ImportError:
pass
- py_incl_dir = distutils.sysconfig.get_python_inc()
+ py_incl_dir = sysconfig.get_path('include')
include_dirs.append(py_incl_dir)
- py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
+ py_pincl_dir = sysconfig.get_path('platinclude')
if py_pincl_dir not in include_dirs:
include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
@@ -2631,8 +2631,8 @@ class boost_python_info(system_info):
break
if not src_dir:
return
- py_incl_dirs = [distutils.sysconfig.get_python_inc()]
- py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
+ py_incl_dirs = [sysconfig.get_path('include')]
+ py_pincl_dir = sysconfig.get_path('platinclude')
if py_pincl_dir not in py_incl_dirs:
py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
diff --git a/numpy/distutils/tests/test_ccompiler_opt_conf.py b/numpy/distutils/tests/test_ccompiler_opt_conf.py
index 2f83a59e0..244748e58 100644
--- a/numpy/distutils/tests/test_ccompiler_opt_conf.py
+++ b/numpy/distutils/tests/test_ccompiler_opt_conf.py
@@ -66,11 +66,12 @@ class _TestConfFeatures(FakeCCompilerOpt):
self.test_implies(error_msg, search_in, feature_name, feature_dict)
self.test_group(error_msg, search_in, feature_name, feature_dict)
+ self.test_extra_checks(error_msg, search_in, feature_name, feature_dict)
def test_option_types(self, error_msg, option, val):
for tp, available in (
((str, list), (
- "implies", "headers", "flags", "group", "detect"
+ "implies", "headers", "flags", "group", "detect", "extra_checks"
)),
((str,), ("disable",)),
((int,), ("interest",)),
@@ -83,29 +84,25 @@ class _TestConfFeatures(FakeCCompilerOpt):
if not isinstance(val, tp):
error_tp = [t.__name__ for t in (*tp,)]
error_tp = ' or '.join(error_tp)
- raise AssertionError(error_msg + \
+ raise AssertionError(error_msg +
"expected '%s' type for option '%s' not '%s'" % (
error_tp, option, type(val).__name__
))
break
if not found_it:
- raise AssertionError(error_msg + \
- "invalid option name '%s'" % option
- )
+ raise AssertionError(error_msg + "invalid option name '%s'" % option)
def test_duplicates(self, error_msg, option, val):
if option not in (
- "implies", "headers", "flags", "group", "detect"
+ "implies", "headers", "flags", "group", "detect", "extra_checks"
) : return
if isinstance(val, str):
val = val.split()
if len(val) != len(set(val)):
- raise AssertionError(error_msg + \
- "duplicated values in option '%s'" % option
- )
+ raise AssertionError(error_msg + "duplicated values in option '%s'" % option)
def test_implies(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
@@ -117,21 +114,15 @@ class _TestConfFeatures(FakeCCompilerOpt):
implies = implies.split()
if feature_name in implies:
- raise AssertionError(error_msg + \
- "feature implies itself"
- )
+ raise AssertionError(error_msg + "feature implies itself")
for impl in implies:
impl_dict = search_in.get(impl)
if impl_dict is not None:
if "disable" in impl_dict:
- raise AssertionError(error_msg + \
- "implies disabled feature '%s'" % impl
- )
+ raise AssertionError(error_msg + "implies disabled feature '%s'" % impl)
continue
- raise AssertionError(error_msg + \
- "implies non-exist feature '%s'" % impl
- )
+ raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl)
def test_group(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
@@ -146,10 +137,26 @@ class _TestConfFeatures(FakeCCompilerOpt):
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
- raise AssertionError(error_msg + \
- "in option '%s', '%s' already exists as a feature name" % (
- option, f
- ))
+ raise AssertionError(error_msg +
+ "in option 'group', '%s' already exists as a feature name" % f
+ )
+
+ def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict):
+ if feature_dict.get("disabled") is not None:
+ return
+ extra_checks = feature_dict.get("extra_checks", "")
+ if not extra_checks:
+ return
+ if isinstance(extra_checks, str):
+ extra_checks = extra_checks.split()
+
+ for f in extra_checks:
+ impl_dict = search_in.get(f)
+ if not impl_dict or "disable" in impl_dict:
+ continue
+ raise AssertionError(error_msg +
+ "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f
+ )
class TestConfFeatures(unittest.TestCase):
def __init__(self, methodName="runTest"):
diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py
index 5f36c439f..9bb7251d8 100644
--- a/numpy/distutils/unixccompiler.py
+++ b/numpy/distutils/unixccompiler.py
@@ -26,7 +26,8 @@ def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts
self.compiler_so = ccomp
# ensure OPT environment variable is read
if 'OPT' in os.environ:
- from distutils.sysconfig import get_config_vars
+ # XXX who uses this?
+ from sysconfig import get_config_vars
opt = " ".join(os.environ['OPT'].split())
gcv_opt = " ".join(get_config_vars('OPT')[0].split())
ccomp_s = " ".join(self.compiler_so)
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
deleted file mode 100644
index 635c1b1b8..000000000
--- a/numpy/doc/basics.py
+++ /dev/null
@@ -1,341 +0,0 @@
-"""
-============
-Array basics
-============
-
-Array types and conversions between types
-=========================================
-
-NumPy supports a much greater variety of numerical types than Python does.
-This section shows which are available, and how to modify an array's data-type.
-
-The primitive types supported are tied closely to those in C:
-
-.. list-table::
- :header-rows: 1
-
- * - Numpy type
- - C type
- - Description
-
- * - `np.bool_`
- - ``bool``
- - Boolean (True or False) stored as a byte
-
- * - `np.byte`
- - ``signed char``
- - Platform-defined
-
- * - `np.ubyte`
- - ``unsigned char``
- - Platform-defined
-
- * - `np.short`
- - ``short``
- - Platform-defined
-
- * - `np.ushort`
- - ``unsigned short``
- - Platform-defined
-
- * - `np.intc`
- - ``int``
- - Platform-defined
-
- * - `np.uintc`
- - ``unsigned int``
- - Platform-defined
-
- * - `np.int_`
- - ``long``
- - Platform-defined
-
- * - `np.uint`
- - ``unsigned long``
- - Platform-defined
-
- * - `np.longlong`
- - ``long long``
- - Platform-defined
-
- * - `np.ulonglong`
- - ``unsigned long long``
- - Platform-defined
-
- * - `np.half` / `np.float16`
- -
- - Half precision float:
- sign bit, 5 bits exponent, 10 bits mantissa
-
- * - `np.single`
- - ``float``
- - Platform-defined single precision float:
- typically sign bit, 8 bits exponent, 23 bits mantissa
-
- * - `np.double`
- - ``double``
- - Platform-defined double precision float:
- typically sign bit, 11 bits exponent, 52 bits mantissa.
-
- * - `np.longdouble`
- - ``long double``
- - Platform-defined extended-precision float
-
- * - `np.csingle`
- - ``float complex``
- - Complex number, represented by two single-precision floats (real and imaginary components)
-
- * - `np.cdouble`
- - ``double complex``
- - Complex number, represented by two double-precision floats (real and imaginary components).
-
- * - `np.clongdouble`
- - ``long double complex``
- - Complex number, represented by two extended-precision floats (real and imaginary components).
-
-
-Since many of these have platform-dependent definitions, a set of fixed-size
-aliases are provided:
-
-.. list-table::
- :header-rows: 1
-
- * - Numpy type
- - C type
- - Description
-
- * - `np.int8`
- - ``int8_t``
- - Byte (-128 to 127)
-
- * - `np.int16`
- - ``int16_t``
- - Integer (-32768 to 32767)
-
- * - `np.int32`
- - ``int32_t``
- - Integer (-2147483648 to 2147483647)
-
- * - `np.int64`
- - ``int64_t``
- - Integer (-9223372036854775808 to 9223372036854775807)
-
- * - `np.uint8`
- - ``uint8_t``
- - Unsigned integer (0 to 255)
-
- * - `np.uint16`
- - ``uint16_t``
- - Unsigned integer (0 to 65535)
-
- * - `np.uint32`
- - ``uint32_t``
- - Unsigned integer (0 to 4294967295)
-
- * - `np.uint64`
- - ``uint64_t``
- - Unsigned integer (0 to 18446744073709551615)
-
- * - `np.intp`
- - ``intptr_t``
- - Integer used for indexing, typically the same as ``ssize_t``
-
- * - `np.uintp`
- - ``uintptr_t``
- - Integer large enough to hold a pointer
-
- * - `np.float32`
- - ``float``
- -
-
- * - `np.float64` / `np.float_`
- - ``double``
- - Note that this matches the precision of the builtin python `float`.
-
- * - `np.complex64`
- - ``float complex``
- - Complex number, represented by two 32-bit floats (real and imaginary components)
-
- * - `np.complex128` / `np.complex_`
- - ``double complex``
- - Note that this matches the precision of the builtin python `complex`.
-
-
-NumPy numerical types are instances of ``dtype`` (data-type) objects, each
-having unique characteristics. Once you have imported NumPy using
-
- ::
-
- >>> import numpy as np
-
-the dtypes are available as ``np.bool_``, ``np.float32``, etc.
-
-Advanced types, not listed in the table above, are explored in
-section :ref:`structured_arrays`.
-
-There are 5 basic numerical types representing booleans (bool), integers (int),
-unsigned integers (uint) floating point (float) and complex. Those with numbers
-in their name indicate the bitsize of the type (i.e. how many bits are needed
-to represent a single value in memory). Some types, such as ``int`` and
-``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit
-vs. 64-bit machines). This should be taken into account when interfacing
-with low-level code (such as C or Fortran) where the raw memory is addressed.
-
-Data-types can be used as functions to convert python numbers to array scalars
-(see the array scalar section for an explanation), python sequences of numbers
-to arrays of that type, or as arguments to the dtype keyword that many numpy
-functions or methods accept. Some examples::
-
- >>> import numpy as np
- >>> x = np.float32(1.0)
- >>> x
- 1.0
- >>> y = np.int_([1,2,4])
- >>> y
- array([1, 2, 4])
- >>> z = np.arange(3, dtype=np.uint8)
- >>> z
- array([0, 1, 2], dtype=uint8)
-
-Array types can also be referred to by character codes, mostly to retain
-backward compatibility with older packages such as Numeric. Some
-documentation may still refer to these, for example::
-
- >>> np.array([1, 2, 3], dtype='f')
- array([ 1., 2., 3.], dtype=float32)
-
-We recommend using dtype objects instead.
-
-To convert the type of an array, use the .astype() method (preferred) or
-the type itself as a function. For example: ::
-
- >>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE
- array([ 0., 1., 2.])
- >>> np.int8(z)
- array([0, 1, 2], dtype=int8)
-
-Note that, above, we use the *Python* float object as a dtype. NumPy knows
-that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``,
-that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``.
-The other data-types do not have Python equivalents.
-
-To determine the type of an array, look at the dtype attribute::
-
- >>> z.dtype
- dtype('uint8')
-
-dtype objects also contain information about the type, such as its bit-width
-and its byte-order. The data type can also be used indirectly to query
-properties of the type, such as whether it is an integer::
-
- >>> d = np.dtype(int)
- >>> d
- dtype('int32')
-
- >>> np.issubdtype(d, np.integer)
- True
-
- >>> np.issubdtype(d, np.floating)
- False
-
-
-Array Scalars
-=============
-
-NumPy generally returns elements of arrays as array scalars (a scalar
-with an associated dtype). Array scalars differ from Python scalars, but
-for the most part they can be used interchangeably (the primary
-exception is for versions of Python older than v2.x, where integer array
-scalars cannot act as indices for lists and tuples). There are some
-exceptions, such as when code requires very specific attributes of a scalar
-or when it checks specifically whether a value is a Python scalar. Generally,
-problems are easily fixed by explicitly converting array scalars
-to Python scalars, using the corresponding Python type function
-(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``).
-
-The primary advantage of using array scalars is that
-they preserve the array type (Python may not have a matching scalar type
-available, e.g. ``int16``). Therefore, the use of array scalars ensures
-identical behaviour between arrays and scalars, irrespective of whether the
-value is inside an array or not. NumPy scalars also have many of the same
-methods arrays do.
-
-Overflow Errors
-===============
-
-The fixed size of NumPy numeric types may cause overflow errors when a value
-requires more memory than available in the data type. For example,
-`numpy.power` evaluates ``100 * 10 ** 8`` correctly for 64-bit integers,
-but gives 1874919424 (incorrect) for a 32-bit integer.
-
- >>> np.power(100, 8, dtype=np.int64)
- 10000000000000000
- >>> np.power(100, 8, dtype=np.int32)
- 1874919424
-
-The behaviour of NumPy and Python integer types differs significantly for
-integer overflows and may confuse users expecting NumPy integers to behave
-similar to Python's ``int``. Unlike NumPy, the size of Python's ``int`` is
-flexible. This means Python integers may expand to accommodate any integer and
-will not overflow.
-
-NumPy provides `numpy.iinfo` and `numpy.finfo` to verify the
-minimum or maximum values of NumPy integer and floating point values
-respectively ::
-
- >>> np.iinfo(int) # Bounds of the default integer on this system.
- iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
- >>> np.iinfo(np.int32) # Bounds of a 32-bit integer
- iinfo(min=-2147483648, max=2147483647, dtype=int32)
- >>> np.iinfo(np.int64) # Bounds of a 64-bit integer
- iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
-
-If 64-bit integers are still too small the result may be cast to a
-floating point number. Floating point numbers offer a larger, but inexact,
-range of possible values.
-
- >>> np.power(100, 100, dtype=np.int64) # Incorrect even with 64-bit int
- 0
- >>> np.power(100, 100, dtype=np.float64)
- 1e+200
-
-Extended Precision
-==================
-
-Python's floating-point numbers are usually 64-bit floating-point numbers,
-nearly equivalent to ``np.float64``. In some unusual situations it may be
-useful to use floating-point numbers with more precision. Whether this
-is possible in numpy depends on the hardware and on the development
-environment: specifically, x86 machines provide hardware floating-point
-with 80-bit precision, and while most C compilers provide this as their
-``long double`` type, MSVC (standard for Windows builds) makes
-``long double`` identical to ``double`` (64 bits). NumPy makes the
-compiler's ``long double`` available as ``np.longdouble`` (and
-``np.clongdouble`` for the complex numbers). You can find out what your
-numpy provides with ``np.finfo(np.longdouble)``.
-
-NumPy does not provide a dtype with more precision than C's
-``long double``\\; in particular, the 128-bit IEEE quad precision
-data type (FORTRAN's ``REAL*16``\\) is not available.
-
-For efficient memory alignment, ``np.longdouble`` is usually stored
-padded with zero bits, either to 96 or 128 bits. Which is more efficient
-depends on hardware and development environment; typically on 32-bit
-systems they are padded to 96 bits, while on 64-bit systems they are
-typically padded to 128 bits. ``np.longdouble`` is padded to the system
-default; ``np.float96`` and ``np.float128`` are provided for users who
-want specific padding. In spite of the names, ``np.float96`` and
-``np.float128`` provide only as much precision as ``np.longdouble``,
-that is, 80 bits on most x86 machines and 64 bits in standard
-Windows builds.
-
-Be warned that even if ``np.longdouble`` offers more precision than
-python ``float``, it is easy to lose that extra precision, since
-python often forces values to pass through ``float``. For example,
-the ``%`` formatting operator requires its arguments to be converted
-to standard python types, and it is therefore impossible to preserve
-extended precision even if many decimal places are requested. It can
-be useful to test your code with the value
-``1 + np.finfo(np.longdouble).eps``.
-
-"""
diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py
deleted file mode 100644
index 4ac1fd129..000000000
--- a/numpy/doc/broadcasting.py
+++ /dev/null
@@ -1,180 +0,0 @@
-"""
-========================
-Broadcasting over arrays
-========================
-
-.. note::
- See `this article
- <https://numpy.org/devdocs/user/theory.broadcasting.html>`_
- for illustrations of broadcasting concepts.
-
-
-The term broadcasting describes how numpy treats arrays with different
-shapes during arithmetic operations. Subject to certain constraints,
-the smaller array is "broadcast" across the larger array so that they
-have compatible shapes. Broadcasting provides a means of vectorizing
-array operations so that looping occurs in C instead of Python. It does
-this without making needless copies of data and usually leads to
-efficient algorithm implementations. There are, however, cases where
-broadcasting is a bad idea because it leads to inefficient use of memory
-that slows computation.
-
-NumPy operations are usually done on pairs of arrays on an
-element-by-element basis. In the simplest case, the two arrays must
-have exactly the same shape, as in the following example:
-
- >>> a = np.array([1.0, 2.0, 3.0])
- >>> b = np.array([2.0, 2.0, 2.0])
- >>> a * b
- array([ 2., 4., 6.])
-
-NumPy's broadcasting rule relaxes this constraint when the arrays'
-shapes meet certain constraints. The simplest broadcasting example occurs
-when an array and a scalar value are combined in an operation:
-
->>> a = np.array([1.0, 2.0, 3.0])
->>> b = 2.0
->>> a * b
-array([ 2., 4., 6.])
-
-The result is equivalent to the previous example where ``b`` was an array.
-We can think of the scalar ``b`` being *stretched* during the arithmetic
-operation into an array with the same shape as ``a``. The new elements in
-``b`` are simply copies of the original scalar. The stretching analogy is
-only conceptual. NumPy is smart enough to use the original scalar value
-without actually making copies so that broadcasting operations are as
-memory and computationally efficient as possible.
-
-The code in the second example is more efficient than that in the first
-because broadcasting moves less memory around during the multiplication
-(``b`` is a scalar rather than an array).
-
-General Broadcasting Rules
-==========================
-When operating on two arrays, NumPy compares their shapes element-wise.
-It starts with the trailing (i.e. rightmost) dimensions and works its
-way left. Two dimensions are compatible when
-
-1) they are equal, or
-2) one of them is 1
-
-If these conditions are not met, a
-``ValueError: operands could not be broadcast together`` exception is
-thrown, indicating that the arrays have incompatible shapes. The size of
-the resulting array is the size that is not 1 along each axis of the inputs.
-
-Arrays do not need to have the same *number* of dimensions. For example,
-if you have a ``256x256x3`` array of RGB values, and you want to scale
-each color in the image by a different value, you can multiply the image
-by a one-dimensional array with 3 values. Lining up the sizes of the
-trailing axes of these arrays according to the broadcast rules, shows that
-they are compatible::
-
- Image (3d array): 256 x 256 x 3
- Scale (1d array): 3
- Result (3d array): 256 x 256 x 3
-
-When either of the dimensions compared is one, the other is
-used. In other words, dimensions with size 1 are stretched or "copied"
-to match the other.
-
-In the following example, both the ``A`` and ``B`` arrays have axes with
-length one that are expanded to a larger size during the broadcast
-operation::
-
- A (4d array): 8 x 1 x 6 x 1
- B (3d array): 7 x 1 x 5
- Result (4d array): 8 x 7 x 6 x 5
-
-Here are some more examples::
-
- A (2d array): 5 x 4
- B (1d array): 1
- Result (2d array): 5 x 4
-
- A (2d array): 5 x 4
- B (1d array): 4
- Result (2d array): 5 x 4
-
- A (3d array): 15 x 3 x 5
- B (3d array): 15 x 1 x 5
- Result (3d array): 15 x 3 x 5
-
- A (3d array): 15 x 3 x 5
- B (2d array): 3 x 5
- Result (3d array): 15 x 3 x 5
-
- A (3d array): 15 x 3 x 5
- B (2d array): 3 x 1
- Result (3d array): 15 x 3 x 5
-
-Here are examples of shapes that do not broadcast::
-
- A (1d array): 3
- B (1d array): 4 # trailing dimensions do not match
-
- A (2d array): 2 x 1
- B (3d array): 8 x 4 x 3 # second from last dimensions mismatched
-
-An example of broadcasting in practice::
-
- >>> x = np.arange(4)
- >>> xx = x.reshape(4,1)
- >>> y = np.ones(5)
- >>> z = np.ones((3,4))
-
- >>> x.shape
- (4,)
-
- >>> y.shape
- (5,)
-
- >>> x + y
- ValueError: operands could not be broadcast together with shapes (4,) (5,)
-
- >>> xx.shape
- (4, 1)
-
- >>> y.shape
- (5,)
-
- >>> (xx + y).shape
- (4, 5)
-
- >>> xx + y
- array([[ 1., 1., 1., 1., 1.],
- [ 2., 2., 2., 2., 2.],
- [ 3., 3., 3., 3., 3.],
- [ 4., 4., 4., 4., 4.]])
-
- >>> x.shape
- (4,)
-
- >>> z.shape
- (3, 4)
-
- >>> (x + z).shape
- (3, 4)
-
- >>> x + z
- array([[ 1., 2., 3., 4.],
- [ 1., 2., 3., 4.],
- [ 1., 2., 3., 4.]])
-
-Broadcasting provides a convenient way of taking the outer product (or
-any other outer operation) of two arrays. The following example shows an
-outer addition operation of two 1-d arrays::
-
- >>> a = np.array([0.0, 10.0, 20.0, 30.0])
- >>> b = np.array([1.0, 2.0, 3.0])
- >>> a[:, np.newaxis] + b
- array([[ 1., 2., 3.],
- [ 11., 12., 13.],
- [ 21., 22., 23.],
- [ 31., 32., 33.]])
-
-Here the ``newaxis`` index operator inserts a new axis into ``a``,
-making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
-with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
-
-"""
diff --git a/numpy/doc/byteswapping.py b/numpy/doc/byteswapping.py
deleted file mode 100644
index fe9461977..000000000
--- a/numpy/doc/byteswapping.py
+++ /dev/null
@@ -1,155 +0,0 @@
-"""
-
-=============================
- Byteswapping and byte order
-=============================
-
-Introduction to byte ordering and ndarrays
-==========================================
-
-The ``ndarray`` is an object that provide a python array interface to data
-in memory.
-
-It often happens that the memory that you want to view with an array is
-not of the same byte ordering as the computer on which you are running
-Python.
-
-For example, I might be working on a computer with a little-endian CPU -
-such as an Intel Pentium, but I have loaded some data from a file
-written by a computer that is big-endian. Let's say I have loaded 4
-bytes from a file written by a Sun (big-endian) computer. I know that
-these 4 bytes represent two 16-bit integers. On a big-endian machine, a
-two-byte integer is stored with the Most Significant Byte (MSB) first,
-and then the Least Significant Byte (LSB). Thus the bytes are, in memory order:
-
-#. MSB integer 1
-#. LSB integer 1
-#. MSB integer 2
-#. LSB integer 2
-
-Let's say the two integers were in fact 1 and 770. Because 770 = 256 *
-3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2.
-The bytes I have loaded from the file would have these contents:
-
->>> big_end_buffer = bytearray([0,1,3,2])
->>> big_end_buffer
-bytearray(b'\\x00\\x01\\x03\\x02')
-
-We might want to use an ``ndarray`` to access these integers. In that
-case, we can create an array around this memory, and tell numpy that
-there are two integers, and that they are 16 bit and big-endian:
-
->>> import numpy as np
->>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_buffer)
->>> big_end_arr[0]
-1
->>> big_end_arr[1]
-770
-
-Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian'
-(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For
-example, if our data represented a single unsigned 4-byte little-endian
-integer, the dtype string would be ``<u4``.
-
-In fact, why don't we try that?
-
->>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_buffer)
->>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3
-True
-
-Returning to our ``big_end_arr`` - in this case our underlying data is
-big-endian (data endianness) and we've set the dtype to match (the dtype
-is also big-endian). However, sometimes you need to flip these around.
-
-.. warning::
-
- Scalars currently do not include byte order information, so extracting
- a scalar from an array will return an integer in native byte order.
- Hence:
-
- >>> big_end_arr[0].dtype.byteorder == little_end_u4[0].dtype.byteorder
- True
-
-Changing byte ordering
-======================
-
-As you can imagine from the introduction, there are two ways you can
-affect the relationship between the byte ordering of the array and the
-underlying memory it is looking at:
-
-* Change the byte-ordering information in the array dtype so that it
- interprets the underlying data as being in a different byte order.
- This is the role of ``arr.newbyteorder()``
-* Change the byte-ordering of the underlying data, leaving the dtype
- interpretation as it was. This is what ``arr.byteswap()`` does.
-
-The common situations in which you need to change byte ordering are:
-
-#. Your data and dtype endianness don't match, and you want to change
- the dtype so that it matches the data.
-#. Your data and dtype endianness don't match, and you want to swap the
- data so that they match the dtype
-#. Your data and dtype endianness match, but you want the data swapped
- and the dtype to reflect this
-
-Data and dtype endianness don't match, change dtype to match data
------------------------------------------------------------------
-
-We make something where they don't match:
-
->>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_buffer)
->>> wrong_end_dtype_arr[0]
-256
-
-The obvious fix for this situation is to change the dtype so it gives
-the correct endianness:
-
->>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder()
->>> fixed_end_dtype_arr[0]
-1
-
-Note the array has not changed in memory:
-
->>> fixed_end_dtype_arr.tobytes() == big_end_buffer
-True
-
-Data and type endianness don't match, change data to match dtype
-----------------------------------------------------------------
-
-You might want to do this if you need the data in memory to be a certain
-ordering. For example you might be writing the memory out to a file
-that needs a certain byte ordering.
-
->>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap()
->>> fixed_end_mem_arr[0]
-1
-
-Now the array *has* changed in memory:
-
->>> fixed_end_mem_arr.tobytes() == big_end_buffer
-False
-
-Data and dtype endianness match, swap data and dtype
-----------------------------------------------------
-
-You may have a correctly specified array dtype, but you need the array
-to have the opposite byte order in memory, and you want the dtype to
-match so the array values make sense. In this case you just do both of
-the previous operations:
-
->>> swapped_end_arr = big_end_arr.byteswap().newbyteorder()
->>> swapped_end_arr[0]
-1
->>> swapped_end_arr.tobytes() == big_end_buffer
-False
-
-An easier way of casting the data to a specific dtype and byte ordering
-can be achieved with the ndarray astype method:
-
->>> swapped_end_arr = big_end_arr.astype('<i2')
->>> swapped_end_arr[0]
-1
->>> swapped_end_arr.tobytes() == big_end_buffer
-False
-
-"""
diff --git a/numpy/doc/constants.py b/numpy/doc/constants.py
index 2c629ad33..128493d90 100644
--- a/numpy/doc/constants.py
+++ b/numpy/doc/constants.py
@@ -135,10 +135,6 @@ add_newdoc('numpy', 'newaxis',
"""
A convenient alias for None, useful for indexing arrays.
- See Also
- --------
- `numpy.doc.indexing`
-
Examples
--------
>>> newaxis is None
diff --git a/numpy/doc/creation.py b/numpy/doc/creation.py
deleted file mode 100644
index 067f8bb33..000000000
--- a/numpy/doc/creation.py
+++ /dev/null
@@ -1,143 +0,0 @@
-"""
-==============
-Array Creation
-==============
-
-Introduction
-============
-
-There are 5 general mechanisms for creating arrays:
-
-1) Conversion from other Python structures (e.g., lists, tuples)
-2) Intrinsic numpy array creation objects (e.g., arange, ones, zeros,
- etc.)
-3) Reading arrays from disk, either from standard or custom formats
-4) Creating arrays from raw bytes through the use of strings or buffers
-5) Use of special library functions (e.g., random)
-
-This section will not cover means of replicating, joining, or otherwise
-expanding or mutating existing arrays. Nor will it cover creating object
-arrays or structured arrays. Both of those are covered in their own sections.
-
-Converting Python array_like Objects to NumPy Arrays
-====================================================
-
-In general, numerical data arranged in an array-like structure in Python can
-be converted to arrays through the use of the array() function. The most
-obvious examples are lists and tuples. See the documentation for array() for
-details for its use. Some objects may support the array-protocol and allow
-conversion to arrays this way. A simple way to find out if the object can be
-converted to a numpy array using array() is simply to try it interactively and
-see if it works! (The Python Way).
-
-Examples: ::
-
- >>> x = np.array([2,3,1,0])
- >>> x = np.array([2, 3, 1, 0])
- >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
- and types
- >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
-
-Intrinsic NumPy Array Creation
-==============================
-
-NumPy has built-in functions for creating arrays from scratch:
-
-zeros(shape) will create an array filled with 0 values with the specified
-shape. The default dtype is float64. ::
-
- >>> np.zeros((2, 3))
- array([[ 0., 0., 0.], [ 0., 0., 0.]])
-
-ones(shape) will create an array filled with 1 values. It is identical to
-zeros in all other respects.
-
-arange() will create arrays with regularly incrementing values. Check the
-docstring for complete information on the various ways it can be used. A few
-examples will be given here: ::
-
- >>> np.arange(10)
- array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
- >>> np.arange(2, 10, dtype=float)
- array([ 2., 3., 4., 5., 6., 7., 8., 9.])
- >>> np.arange(2, 3, 0.1)
- array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
-
-Note that there are some subtleties regarding the last usage that the user
-should be aware of that are described in the arange docstring.
-
-linspace() will create arrays with a specified number of elements, and
-spaced equally between the specified beginning and end values. For
-example: ::
-
- >>> np.linspace(1., 4., 6)
- array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
-
-The advantage of this creation function is that one can guarantee the
-number of elements and the starting and end point, which arange()
-generally will not do for arbitrary start, stop, and step values.
-
-indices() will create a set of arrays (stacked as a one-higher dimensioned
-array), one per dimension with each representing variation in that dimension.
-An example illustrates much better than a verbal description: ::
-
- >>> np.indices((3,3))
- array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
-
-This is particularly useful for evaluating functions of multiple dimensions on
-a regular grid.
-
-Reading Arrays From Disk
-========================
-
-This is presumably the most common case of large array creation. The details,
-of course, depend greatly on the format of data on disk and so this section
-can only give general pointers on how to handle various formats.
-
-Standard Binary Formats
------------------------
-
-Various fields have standard formats for array data. The following lists the
-ones with known python libraries to read them and return numpy arrays (there
-may be others for which it is possible to read and convert to numpy arrays so
-check the last section as well)
-::
-
- HDF5: h5py
- FITS: Astropy
-
-Examples of formats that cannot be read directly but for which it is not hard to
-convert are those formats supported by libraries like PIL (able to read and
-write many image formats such as jpg, png, etc).
-
-Common ASCII Formats
-------------------------
-
-Comma Separated Value files (CSV) are widely used (and an export and import
-option for programs like Excel). There are a number of ways of reading these
-files in Python. There are CSV functions in Python and functions in pylab
-(part of matplotlib).
-
-More generic ascii files can be read using the io package in scipy.
-
-Custom Binary Formats
----------------------
-
-There are a variety of approaches one can use. If the file has a relatively
-simple format then one can write a simple I/O library and use the numpy
-fromfile() function and .tofile() method to read and write numpy arrays
-directly (mind your byteorder though!) If a good C or C++ library exists that
-read the data, one can wrap that library with a variety of techniques though
-that certainly is much more work and requires significantly more advanced
-knowledge to interface with C or C++.
-
-Use of Special Libraries
-------------------------
-
-There are libraries that can be used to generate arrays for special purposes
-and it isn't possible to enumerate all of them. The most common uses are use
-of the many array generation functions in random that can generate arrays of
-random values, and some utility functions to generate special matrices (e.g.
-diagonal).
-
-"""
diff --git a/numpy/doc/dispatch.py b/numpy/doc/dispatch.py
deleted file mode 100644
index af70ed836..000000000
--- a/numpy/doc/dispatch.py
+++ /dev/null
@@ -1,271 +0,0 @@
-""".. _dispatch_mechanism:
-
-Numpy's dispatch mechanism, introduced in numpy version v1.16 is the
-recommended approach for writing custom N-dimensional array containers that are
-compatible with the numpy API and provide custom implementations of numpy
-functionality. Applications include `dask <http://dask.pydata.org>`_ arrays, an
-N-dimensional array distributed across multiple nodes, and `cupy
-<https://docs-cupy.chainer.org/en/stable/>`_ arrays, an N-dimensional array on
-a GPU.
-
-To get a feel for writing custom array containers, we'll begin with a simple
-example that has rather narrow utility but illustrates the concepts involved.
-
->>> import numpy as np
->>> class DiagonalArray:
-... def __init__(self, N, value):
-... self._N = N
-... self._i = value
-... def __repr__(self):
-... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
-... def __array__(self):
-... return self._i * np.eye(self._N)
-...
-
-Our custom array can be instantiated like:
-
->>> arr = DiagonalArray(5, 1)
->>> arr
-DiagonalArray(N=5, value=1)
-
-We can convert to a numpy array using :func:`numpy.array` or
-:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a
-standard ``numpy.ndarray``.
-
->>> np.asarray(arr)
-array([[1., 0., 0., 0., 0.],
- [0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 0.],
- [0., 0., 0., 1., 0.],
- [0., 0., 0., 0., 1.]])
-
-If we operate on ``arr`` with a numpy function, numpy will again use the
-``__array__`` interface to convert it to an array and then apply the function
-in the usual way.
-
->>> np.multiply(arr, 2)
-array([[2., 0., 0., 0., 0.],
- [0., 2., 0., 0., 0.],
- [0., 0., 2., 0., 0.],
- [0., 0., 0., 2., 0.],
- [0., 0., 0., 0., 2.]])
-
-
-Notice that the return type is a standard ``numpy.ndarray``.
-
->>> type(arr)
-numpy.ndarray
-
-How can we pass our custom array type through this function? Numpy allows a
-class to indicate that it would like to handle computations in a custom-defined
-way through the interfaces ``__array_ufunc__`` and ``__array_function__``. Let's
-take one at a time, starting with ``_array_ufunc__``. This method covers
-:ref:`ufuncs`, a class of functions that includes, for example,
-:func:`numpy.multiply` and :func:`numpy.sin`.
-
-The ``__array_ufunc__`` receives:
-
-- ``ufunc``, a function like ``numpy.multiply``
-- ``method``, a string, differentiating between ``numpy.multiply(...)`` and
- variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so
- on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``.
-- ``inputs``, which could be a mixture of different types
-- ``kwargs``, keyword arguments passed to the function
-
-For this example we will only handle the method ``__call__``.
-
->>> from numbers import Number
->>> class DiagonalArray:
-... def __init__(self, N, value):
-... self._N = N
-... self._i = value
-... def __repr__(self):
-... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
-... def __array__(self):
-... return self._i * np.eye(self._N)
-... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
-... if method == '__call__':
-... N = None
-... scalars = []
-... for input in inputs:
-... if isinstance(input, Number):
-... scalars.append(input)
-... elif isinstance(input, self.__class__):
-... scalars.append(input._i)
-... if N is not None:
-... if N != self._N:
-... raise TypeError("inconsistent sizes")
-... else:
-... N = self._N
-... else:
-... return NotImplemented
-... return self.__class__(N, ufunc(*scalars, **kwargs))
-... else:
-... return NotImplemented
-...
-
-Now our custom array type passes through numpy functions.
-
->>> arr = DiagonalArray(5, 1)
->>> np.multiply(arr, 3)
-DiagonalArray(N=5, value=3)
->>> np.add(arr, 3)
-DiagonalArray(N=5, value=4)
->>> np.sin(arr)
-DiagonalArray(N=5, value=0.8414709848078965)
-
-At this point ``arr + 3`` does not work.
-
->>> arr + 3
-TypeError: unsupported operand type(s) for *: 'DiagonalArray' and 'int'
-
-To support it, we need to define the Python interfaces ``__add__``, ``__lt__``,
-and so on to dispatch to the corresponding ufunc. We can achieve this
-conveniently by inheriting from the mixin
-:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`.
-
->>> import numpy.lib.mixins
->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
-... def __init__(self, N, value):
-... self._N = N
-... self._i = value
-... def __repr__(self):
-... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
-... def __array__(self):
-... return self._i * np.eye(self._N)
-... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
-... if method == '__call__':
-... N = None
-... scalars = []
-... for input in inputs:
-... if isinstance(input, Number):
-... scalars.append(input)
-... elif isinstance(input, self.__class__):
-... scalars.append(input._i)
-... if N is not None:
-... if N != self._N:
-... raise TypeError("inconsistent sizes")
-... else:
-... N = self._N
-... else:
-... return NotImplemented
-... return self.__class__(N, ufunc(*scalars, **kwargs))
-... else:
-... return NotImplemented
-...
-
->>> arr = DiagonalArray(5, 1)
->>> arr + 3
-DiagonalArray(N=5, value=4)
->>> arr > 0
-DiagonalArray(N=5, value=True)
-
-Now let's tackle ``__array_function__``. We'll create dict that maps numpy
-functions to our custom variants.
-
->>> HANDLED_FUNCTIONS = {}
->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
-... def __init__(self, N, value):
-... self._N = N
-... self._i = value
-... def __repr__(self):
-... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
-... def __array__(self):
-... return self._i * np.eye(self._N)
-... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
-... if method == '__call__':
-... N = None
-... scalars = []
-... for input in inputs:
-... # In this case we accept only scalar numbers or DiagonalArrays.
-... if isinstance(input, Number):
-... scalars.append(input)
-... elif isinstance(input, self.__class__):
-... scalars.append(input._i)
-... if N is not None:
-... if N != self._N:
-... raise TypeError("inconsistent sizes")
-... else:
-... N = self._N
-... else:
-... return NotImplemented
-... return self.__class__(N, ufunc(*scalars, **kwargs))
-... else:
-... return NotImplemented
-... def __array_function__(self, func, types, args, kwargs):
-... if func not in HANDLED_FUNCTIONS:
-... return NotImplemented
-... # Note: this allows subclasses that don't override
-... # __array_function__ to handle DiagonalArray objects.
-... if not all(issubclass(t, self.__class__) for t in types):
-... return NotImplemented
-... return HANDLED_FUNCTIONS[func](*args, **kwargs)
-...
-
-A convenient pattern is to define a decorator ``implements`` that can be used
-to add functions to ``HANDLED_FUNCTIONS``.
-
->>> def implements(np_function):
-... "Register an __array_function__ implementation for DiagonalArray objects."
-... def decorator(func):
-... HANDLED_FUNCTIONS[np_function] = func
-... return func
-... return decorator
-...
-
-Now we write implementations of numpy functions for ``DiagonalArray``.
-For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that
-calls ``numpy.sum(self)``, and the same for ``mean``.
-
->>> @implements(np.sum)
-... def sum(arr):
-... "Implementation of np.sum for DiagonalArray objects"
-... return arr._i * arr._N
-...
->>> @implements(np.mean)
-... def mean(arr):
-... "Implementation of np.mean for DiagonalArray objects"
-... return arr._i / arr._N
-...
->>> arr = DiagonalArray(5, 1)
->>> np.sum(arr)
-5
->>> np.mean(arr)
-0.2
-
-If the user tries to use any numpy functions not included in
-``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that
-this operation is not supported. For example, concatenating two
-``DiagonalArrays`` does not produce another diagonal array, so it is not
-supported.
-
->>> np.concatenate([arr, arr])
-TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [<class '__main__.DiagonalArray'>]
-
-Additionally, our implementations of ``sum`` and ``mean`` do not accept the
-optional arguments that numpy's implementation does.
-
->>> np.sum(arr, axis=0)
-TypeError: sum() got an unexpected keyword argument 'axis'
-
-The user always has the option of converting to a normal ``numpy.ndarray`` with
-:func:`numpy.asarray` and using standard numpy from there.
-
->>> np.concatenate([np.asarray(arr), np.asarray(arr)])
-array([[1., 0., 0., 0., 0.],
- [0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 0.],
- [0., 0., 0., 1., 0.],
- [0., 0., 0., 0., 1.],
- [1., 0., 0., 0., 0.],
- [0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 0.],
- [0., 0., 0., 1., 0.],
- [0., 0., 0., 0., 1.]])
-
-Refer to the `dask source code <https://github.com/dask/dask>`_ and
-`cupy source code <https://github.com/cupy/cupy>`_ for more fully-worked
-examples of custom array containers.
-
-See also :doc:`NEP 18<neps:nep-0018-array-function-protocol>`.
-"""
diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py
deleted file mode 100644
index 31130559b..000000000
--- a/numpy/doc/glossary.py
+++ /dev/null
@@ -1,475 +0,0 @@
-"""
-========
-Glossary
-========
-
-.. glossary::
-
- along an axis
- Axes are defined for arrays with more than one dimension. A
- 2-dimensional array has two corresponding axes: the first running
- vertically downwards across rows (axis 0), and the second running
- horizontally across columns (axis 1).
-
- Many operations can take place along one of these axes. For example,
- we can sum each row of an array, in which case we operate along
- columns, or axis 1::
-
- >>> x = np.arange(12).reshape((3,4))
-
- >>> x
- array([[ 0, 1, 2, 3],
- [ 4, 5, 6, 7],
- [ 8, 9, 10, 11]])
-
- >>> x.sum(axis=1)
- array([ 6, 22, 38])
-
- array
- A homogeneous container of numerical elements. Each element in the
- array occupies a fixed amount of memory (hence homogeneous), and
- can be a numerical element of a single type (such as float, int
- or complex) or a combination (such as ``(float, int, float)``). Each
- array has an associated data-type (or ``dtype``), which describes
- the numerical type of its elements::
-
- >>> x = np.array([1, 2, 3], float)
-
- >>> x
- array([ 1., 2., 3.])
-
- >>> x.dtype # floating point number, 64 bits of memory per element
- dtype('float64')
-
-
- # More complicated data type: each array element is a combination of
- # and integer and a floating point number
- >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)])
- array([(1, 2.0), (3, 4.0)],
- dtype=[('x', '<i4'), ('y', '<f8')])
-
- Fast element-wise operations, called a :term:`ufunc`, operate on arrays.
-
- array_like
- Any sequence that can be interpreted as an ndarray. This includes
- nested lists, tuples, scalars and existing arrays.
-
- attribute
- A property of an object that can be accessed using ``obj.attribute``,
- e.g., ``shape`` is an attribute of an array::
-
- >>> x = np.array([1, 2, 3])
- >>> x.shape
- (3,)
-
- big-endian
- When storing a multi-byte value in memory as a sequence of bytes, the
- sequence addresses/sends/stores the most significant byte first (lowest
- address) and the least significant byte last (highest address). Common in
- micro-processors and used for transmission of data over network protocols.
-
- BLAS
- `Basic Linear Algebra Subprograms <https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms>`_
-
- broadcast
- NumPy can do operations on arrays whose shapes are mismatched::
-
- >>> x = np.array([1, 2])
- >>> y = np.array([[3], [4]])
-
- >>> x
- array([1, 2])
-
- >>> y
- array([[3],
- [4]])
-
- >>> x + y
- array([[4, 5],
- [5, 6]])
-
- See `numpy.doc.broadcasting` for more information.
-
- C order
- See `row-major`
-
- column-major
- A way to represent items in a N-dimensional array in the 1-dimensional
- computer memory. In column-major order, the leftmost index "varies the
- fastest": for example the array::
-
- [[1, 2, 3],
- [4, 5, 6]]
-
- is represented in the column-major order as::
-
- [1, 4, 2, 5, 3, 6]
-
- Column-major order is also known as the Fortran order, as the Fortran
- programming language uses it.
-
- decorator
- An operator that transforms a function. For example, a ``log``
- decorator may be defined to print debugging information upon
- function execution::
-
- >>> def log(f):
- ... def new_logging_func(*args, **kwargs):
- ... print("Logging call with parameters:", args, kwargs)
- ... return f(*args, **kwargs)
- ...
- ... return new_logging_func
-
- Now, when we define a function, we can "decorate" it using ``log``::
-
- >>> @log
- ... def add(a, b):
- ... return a + b
-
- Calling ``add`` then yields:
-
- >>> add(1, 2)
- Logging call with parameters: (1, 2) {}
- 3
-
- dictionary
- Resembling a language dictionary, which provides a mapping between
- words and descriptions thereof, a Python dictionary is a mapping
- between two objects::
-
- >>> x = {1: 'one', 'two': [1, 2]}
-
- Here, `x` is a dictionary mapping keys to values, in this case
- the integer 1 to the string "one", and the string "two" to
- the list ``[1, 2]``. The values may be accessed using their
- corresponding keys::
-
- >>> x[1]
- 'one'
-
- >>> x['two']
- [1, 2]
-
- Note that dictionaries are not stored in any specific order. Also,
- most mutable (see *immutable* below) objects, such as lists, may not
- be used as keys.
-
- For more information on dictionaries, read the
- `Python tutorial <https://docs.python.org/tutorial/>`_.
-
- field
- In a :term:`structured data type`, each sub-type is called a `field`.
- The `field` has a name (a string), a type (any valid dtype), and
- an optional `title`. See :ref:`arrays.dtypes`
-
- Fortran order
- See `column-major`
-
- flattened
- Collapsed to a one-dimensional array. See `numpy.ndarray.flatten`
- for details.
-
- homogeneous
- Describes a block of memory comprised of blocks, each block comprised of
- items and of the same size, and blocks are interpreted in exactly the
- same way. In the simplest case each block contains a single item, for
- instance int32 or float64.
-
- immutable
- An object that cannot be modified after execution is called
- immutable. Two common examples are strings and tuples.
-
- instance
- A class definition gives the blueprint for constructing an object::
-
- >>> class House:
- ... wall_colour = 'white'
-
- Yet, we have to *build* a house before it exists::
-
- >>> h = House() # build a house
-
- Now, ``h`` is called a ``House`` instance. An instance is therefore
- a specific realisation of a class.
-
- iterable
- A sequence that allows "walking" (iterating) over items, typically
- using a loop such as::
-
- >>> x = [1, 2, 3]
- >>> [item**2 for item in x]
- [1, 4, 9]
-
- It is often used in combination with ``enumerate``::
- >>> keys = ['a','b','c']
- >>> for n, k in enumerate(keys):
- ... print("Key %d: %s" % (n, k))
- ...
- Key 0: a
- Key 1: b
- Key 2: c
-
- itemsize
- The size of the dtype element in bytes.
-
- list
- A Python container that can hold any number of objects or items.
- The items do not have to be of the same type, and can even be
- lists themselves::
-
- >>> x = [2, 2.0, "two", [2, 2.0]]
-
- The list `x` contains 4 items, each which can be accessed individually::
-
- >>> x[2] # the string 'two'
- 'two'
-
- >>> x[3] # a list, containing an integer 2 and a float 2.0
- [2, 2.0]
-
- It is also possible to select more than one item at a time,
- using *slicing*::
-
- >>> x[0:2] # or, equivalently, x[:2]
- [2, 2.0]
-
- In code, arrays are often conveniently expressed as nested lists::
-
-
- >>> np.array([[1, 2], [3, 4]])
- array([[1, 2],
- [3, 4]])
-
- For more information, read the section on lists in the `Python
- tutorial <https://docs.python.org/tutorial/>`_. For a mapping
- type (key-value), see *dictionary*.
-
- little-endian
- When storing a multi-byte value in memory as a sequence of bytes, the
- sequence addresses/sends/stores the least significant byte first (lowest
- address) and the most significant byte last (highest address). Common in
- x86 processors.
-
- mask
- A boolean array, used to select only certain elements for an operation::
-
- >>> x = np.arange(5)
- >>> x
- array([0, 1, 2, 3, 4])
-
- >>> mask = (x > 2)
- >>> mask
- array([False, False, False, True, True])
-
- >>> x[mask] = -1
- >>> x
- array([ 0, 1, 2, -1, -1])
-
- masked array
- Array that suppressed values indicated by a mask::
-
- >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
- >>> x
- masked_array(data = [-- 2.0 --],
- mask = [ True False True],
- fill_value = 1e+20)
-
- >>> x + [1, 2, 3]
- masked_array(data = [-- 4.0 --],
- mask = [ True False True],
- fill_value = 1e+20)
-
-
- Masked arrays are often used when operating on arrays containing
- missing or invalid entries.
-
- matrix
- A 2-dimensional ndarray that preserves its two-dimensional nature
- throughout operations. It has certain special operations, such as ``*``
- (matrix multiplication) and ``**`` (matrix power), defined::
-
- >>> x = np.mat([[1, 2], [3, 4]])
- >>> x
- matrix([[1, 2],
- [3, 4]])
-
- >>> x**2
- matrix([[ 7, 10],
- [15, 22]])
-
- method
- A function associated with an object. For example, each ndarray has a
- method called ``repeat``::
-
- >>> x = np.array([1, 2, 3])
- >>> x.repeat(2)
- array([1, 1, 2, 2, 3, 3])
-
- ndarray
- See *array*.
-
- record array
- An :term:`ndarray` with :term:`structured data type` which has been
- subclassed as ``np.recarray`` and whose dtype is of type ``np.record``,
- making the fields of its data type to be accessible by attribute.
-
- reference
- If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
- ``a`` and ``b`` are different names for the same Python object.
-
- row-major
- A way to represent items in a N-dimensional array in the 1-dimensional
- computer memory. In row-major order, the rightmost index "varies
- the fastest": for example the array::
-
- [[1, 2, 3],
- [4, 5, 6]]
-
- is represented in the row-major order as::
-
- [1, 2, 3, 4, 5, 6]
-
- Row-major order is also known as the C order, as the C programming
- language uses it. New NumPy arrays are by default in row-major order.
-
- self
- Often seen in method signatures, ``self`` refers to the instance
- of the associated class. For example:
-
- >>> class Paintbrush:
- ... color = 'blue'
- ...
- ... def paint(self):
- ... print("Painting the city %s!" % self.color)
- ...
- >>> p = Paintbrush()
- >>> p.color = 'red'
- >>> p.paint() # self refers to 'p'
- Painting the city red!
-
- slice
- Used to select only certain elements from a sequence:
-
- >>> x = range(5)
- >>> x
- [0, 1, 2, 3, 4]
-
- >>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
- [1, 2]
-
- >>> x[1:5:2] # slice from 1 to 5, but skipping every second element
- [1, 3]
-
- >>> x[::-1] # slice a sequence in reverse
- [4, 3, 2, 1, 0]
-
- Arrays may have more than one dimension, each which can be sliced
- individually:
-
- >>> x = np.array([[1, 2], [3, 4]])
- >>> x
- array([[1, 2],
- [3, 4]])
-
- >>> x[:, 1]
- array([2, 4])
-
- structure
- See :term:`structured data type`
-
- structured data type
- A data type composed of other datatypes
-
- subarray data type
- A :term:`structured data type` may contain a :term:`ndarray` with its
- own dtype and shape:
-
- >>> dt = np.dtype([('a', np.int32), ('b', np.float32, (3,))])
- >>> np.zeros(3, dtype=dt)
- array([(0, [0., 0., 0.]), (0, [0., 0., 0.]), (0, [0., 0., 0.])],
- dtype=[('a', '<i4'), ('b', '<f4', (3,))])
-
- title
- In addition to field names, structured array fields may have an
- associated :ref:`title <titles>` which is an alias to the name and is
- commonly used for plotting.
-
- tuple
- A sequence that may contain a variable number of types of any
- kind. A tuple is immutable, i.e., once constructed it cannot be
- changed. Similar to a list, it can be indexed and sliced::
-
- >>> x = (1, 'one', [1, 2])
- >>> x
- (1, 'one', [1, 2])
-
- >>> x[0]
- 1
-
- >>> x[:2]
- (1, 'one')
-
- A useful concept is "tuple unpacking", which allows variables to
- be assigned to the contents of a tuple::
-
- >>> x, y = (1, 2)
- >>> x, y = 1, 2
-
- This is often used when a function returns multiple values:
-
- >>> def return_many():
- ... return 1, 'alpha', None
-
- >>> a, b, c = return_many()
- >>> a, b, c
- (1, 'alpha', None)
-
- >>> a
- 1
- >>> b
- 'alpha'
-
- ufunc
- Universal function. A fast element-wise, :term:`vectorized
- <vectorization>` array operation. Examples include ``add``, ``sin`` and
- ``logical_or``.
-
- vectorization
- Optimizing a looping block by specialized code. In a traditional sense,
- vectorization performs the same operation on multiple elements with
- fixed strides between them via specialized hardware. Compilers know how
- to take advantage of well-constructed loops to implement such
- optimizations. NumPy uses :ref:`vectorization <whatis-vectorization>`
- to mean any optimization via specialized code performing the same
- operations on multiple elements, typically achieving speedups by
- avoiding some of the overhead in looking up and converting the elements.
-
- view
- An array that does not own its data, but refers to another array's
- data instead. For example, we may create a view that only shows
- every second element of another array::
-
- >>> x = np.arange(5)
- >>> x
- array([0, 1, 2, 3, 4])
-
- >>> y = x[::2]
- >>> y
- array([0, 2, 4])
-
- >>> x[0] = 3 # changing x changes y as well, since y is a view on x
- >>> y
- array([3, 2, 4])
-
- wrapper
- Python is a high-level (highly abstracted, or English-like) language.
- This abstraction comes at a price in execution speed, and sometimes
- it becomes necessary to use lower level languages to do fast
- computations. A wrapper is code that provides a bridge between
- high and the low level languages, allowing, e.g., Python to execute
- code written in C or Fortran.
-
- Examples include ctypes, SWIG and Cython (which wraps C and C++)
- and f2py (which wraps Fortran).
-
-"""
diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py
deleted file mode 100644
index c7dda2790..000000000
--- a/numpy/doc/indexing.py
+++ /dev/null
@@ -1,456 +0,0 @@
-"""
-==============
-Array indexing
-==============
-
-Array indexing refers to any use of the square brackets ([]) to index
-array values. There are many options to indexing, which give numpy
-indexing great power, but with power comes some complexity and the
-potential for confusion. This section is just an overview of the
-various options and issues related to indexing. Aside from single
-element indexing, the details on most of these options are to be
-found in related sections.
-
-Assignment vs referencing
-=========================
-
-Most of the following examples show the use of indexing when
-referencing data in an array. The examples work just as well
-when assigning to an array. See the section at the end for
-specific examples and explanations on how assignments work.
-
-Single element indexing
-=======================
-
-Single element indexing for a 1-D array is what one expects. It work
-exactly like that for other standard Python sequences. It is 0-based,
-and accepts negative indices for indexing from the end of the array. ::
-
- >>> x = np.arange(10)
- >>> x[2]
- 2
- >>> x[-2]
- 8
-
-Unlike lists and tuples, numpy arrays support multidimensional indexing
-for multidimensional arrays. That means that it is not necessary to
-separate each dimension's index into its own set of square brackets. ::
-
- >>> x.shape = (2,5) # now x is 2-dimensional
- >>> x[1,3]
- 8
- >>> x[1,-1]
- 9
-
-Note that if one indexes a multidimensional array with fewer indices
-than dimensions, one gets a subdimensional array. For example: ::
-
- >>> x[0]
- array([0, 1, 2, 3, 4])
-
-That is, each index specified selects the array corresponding to the
-rest of the dimensions selected. In the above example, choosing 0
-means that the remaining dimension of length 5 is being left unspecified,
-and that what is returned is an array of that dimensionality and size.
-It must be noted that the returned array is not a copy of the original,
-but points to the same values in memory as does the original array.
-In this case, the 1-D array at the first position (0) is returned.
-So using a single index on the returned array, results in a single
-element being returned. That is: ::
-
- >>> x[0][2]
- 2
-
-So note that ``x[0,2] = x[0][2]`` though the second case is more
-inefficient as a new temporary array is created after the first index
-that is subsequently indexed by 2.
-
-Note to those used to IDL or Fortran memory order as it relates to
-indexing. NumPy uses C-order indexing. That means that the last
-index usually represents the most rapidly changing memory location,
-unlike Fortran or IDL, where the first index represents the most
-rapidly changing location in memory. This difference represents a
-great potential for confusion.
-
-Other indexing options
-======================
-
-It is possible to slice and stride arrays to extract arrays of the
-same number of dimensions, but of different sizes than the original.
-The slicing and striding works exactly the same way it does for lists
-and tuples except that they can be applied to multiple dimensions as
-well. A few examples illustrates best: ::
-
- >>> x = np.arange(10)
- >>> x[2:5]
- array([2, 3, 4])
- >>> x[:-7]
- array([0, 1, 2])
- >>> x[1:7:2]
- array([1, 3, 5])
- >>> y = np.arange(35).reshape(5,7)
- >>> y[1:5:2,::3]
- array([[ 7, 10, 13],
- [21, 24, 27]])
-
-Note that slices of arrays do not copy the internal array data but
-only produce new views of the original data. This is different from
-list or tuple slicing and an explicit ``copy()`` is recommended if
-the original data is not required anymore.
-
-It is possible to index arrays with other arrays for the purposes of
-selecting lists of values out of arrays into new arrays. There are
-two different ways of accomplishing this. One uses one or more arrays
-of index values. The other involves giving a boolean array of the proper
-shape to indicate the values to be selected. Index arrays are a very
-powerful tool that allow one to avoid looping over individual elements in
-arrays and thus greatly improve performance.
-
-It is possible to use special features to effectively increase the
-number of dimensions in an array through indexing so the resulting
-array acquires the shape needed for use in an expression or with a
-specific function.
-
-Index arrays
-============
-
-NumPy arrays may be indexed with other arrays (or any other sequence-
-like object that can be converted to an array, such as lists, with the
-exception of tuples; see the end of this document for why this is). The
-use of index arrays ranges from simple, straightforward cases to
-complex, hard-to-understand cases. For all cases of index arrays, what
-is returned is a copy of the original data, not a view as one gets for
-slices.
-
-Index arrays must be of integer type. Each value in the array indicates
-which value in the array to use in place of the index. To illustrate: ::
-
- >>> x = np.arange(10,1,-1)
- >>> x
- array([10, 9, 8, 7, 6, 5, 4, 3, 2])
- >>> x[np.array([3, 3, 1, 8])]
- array([7, 7, 9, 2])
-
-
-The index array consisting of the values 3, 3, 1 and 8 correspondingly
-create an array of length 4 (same as the index array) where each index
-is replaced by the value the index array has in the array being indexed.
-
-Negative values are permitted and work as they do with single indices
-or slices: ::
-
- >>> x[np.array([3,3,-3,8])]
- array([7, 7, 4, 2])
-
-It is an error to have index values out of bounds: ::
-
- >>> x[np.array([3, 3, 20, 8])]
- <type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
-
-Generally speaking, what is returned when index arrays are used is
-an array with the same shape as the index array, but with the type
-and values of the array being indexed. As an example, we can use a
-multidimensional index array instead: ::
-
- >>> x[np.array([[1,1],[2,3]])]
- array([[9, 9],
- [8, 7]])
-
-Indexing Multi-dimensional arrays
-=================================
-
-Things become more complex when multidimensional arrays are indexed,
-particularly with multidimensional index arrays. These tend to be
-more unusual uses, but they are permitted, and they are useful for some
-problems. We'll start with the simplest multidimensional case (using
-the array y from the previous examples): ::
-
- >>> y[np.array([0,2,4]), np.array([0,1,2])]
- array([ 0, 15, 30])
-
-In this case, if the index arrays have a matching shape, and there is
-an index array for each dimension of the array being indexed, the
-resultant array has the same shape as the index arrays, and the values
-correspond to the index set for each position in the index arrays. In
-this example, the first index value is 0 for both index arrays, and
-thus the first value of the resultant array is y[0,0]. The next value
-is y[2,1], and the last is y[4,2].
-
-If the index arrays do not have the same shape, there is an attempt to
-broadcast them to the same shape. If they cannot be broadcast to the
-same shape, an exception is raised: ::
-
- >>> y[np.array([0,2,4]), np.array([0,1])]
- <type 'exceptions.ValueError'>: shape mismatch: objects cannot be
- broadcast to a single shape
-
-The broadcasting mechanism permits index arrays to be combined with
-scalars for other indices. The effect is that the scalar value is used
-for all the corresponding values of the index arrays: ::
-
- >>> y[np.array([0,2,4]), 1]
- array([ 1, 15, 29])
-
-Jumping to the next level of complexity, it is possible to only
-partially index an array with index arrays. It takes a bit of thought
-to understand what happens in such cases. For example if we just use
-one index array with y: ::
-
- >>> y[np.array([0,2,4])]
- array([[ 0, 1, 2, 3, 4, 5, 6],
- [14, 15, 16, 17, 18, 19, 20],
- [28, 29, 30, 31, 32, 33, 34]])
-
-What results is the construction of a new array where each value of
-the index array selects one row from the array being indexed and the
-resultant array has the resulting shape (number of index elements,
-size of row).
-
-An example of where this may be useful is for a color lookup table
-where we want to map the values of an image into RGB triples for
-display. The lookup table could have a shape (nlookup, 3). Indexing
-such an array with an image with shape (ny, nx) with dtype=np.uint8
-(or any integer type so long as values are with the bounds of the
-lookup table) will result in an array of shape (ny, nx, 3) where a
-triple of RGB values is associated with each pixel location.
-
-In general, the shape of the resultant array will be the concatenation
-of the shape of the index array (or the shape that all the index arrays
-were broadcast to) with the shape of any unused dimensions (those not
-indexed) in the array being indexed.
-
-Boolean or "mask" index arrays
-==============================
-
-Boolean arrays used as indices are treated in a different manner
-entirely than index arrays. Boolean arrays must be of the same shape
-as the initial dimensions of the array being indexed. In the
-most straightforward case, the boolean array has the same shape: ::
-
- >>> b = y>20
- >>> y[b]
- array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
-
-Unlike in the case of integer index arrays, in the boolean case, the
-result is a 1-D array containing all the elements in the indexed array
-corresponding to all the true elements in the boolean array. The
-elements in the indexed array are always iterated and returned in
-:term:`row-major` (C-style) order. The result is also identical to
-``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy
-of the data, not a view as one gets with slices.
-
-The result will be multidimensional if y has more dimensions than b.
-For example: ::
-
- >>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y
- array([False, False, False, True, True])
- >>> y[b[:,5]]
- array([[21, 22, 23, 24, 25, 26, 27],
- [28, 29, 30, 31, 32, 33, 34]])
-
-Here the 4th and 5th rows are selected from the indexed array and
-combined to make a 2-D array.
-
-In general, when the boolean array has fewer dimensions than the array
-being indexed, this is equivalent to y[b, ...], which means
-y is indexed by b followed by as many : as are needed to fill
-out the rank of y.
-Thus the shape of the result is one dimension containing the number
-of True elements of the boolean array, followed by the remaining
-dimensions of the array being indexed.
-
-For example, using a 2-D boolean array of shape (2,3)
-with four True elements to select rows from a 3-D array of shape
-(2,3,5) results in a 2-D result of shape (4,5): ::
-
- >>> x = np.arange(30).reshape(2,3,5)
- >>> x
- array([[[ 0, 1, 2, 3, 4],
- [ 5, 6, 7, 8, 9],
- [10, 11, 12, 13, 14]],
- [[15, 16, 17, 18, 19],
- [20, 21, 22, 23, 24],
- [25, 26, 27, 28, 29]]])
- >>> b = np.array([[True, True, False], [False, True, True]])
- >>> x[b]
- array([[ 0, 1, 2, 3, 4],
- [ 5, 6, 7, 8, 9],
- [20, 21, 22, 23, 24],
- [25, 26, 27, 28, 29]])
-
-For further details, consult the numpy reference documentation on array indexing.
-
-Combining index arrays with slices
-==================================
-
-Index arrays may be combined with slices. For example: ::
-
- >>> y[np.array([0, 2, 4]), 1:3]
- array([[ 1, 2],
- [15, 16],
- [29, 30]])
-
-In effect, the slice and index array operation are independent.
-The slice operation extracts columns with index 1 and 2,
-(i.e. the 2nd and 3rd columns),
-followed by the index array operation which extracts rows with
-index 0, 2 and 4 (i.e the first, third and fifth rows).
-
-This is equivalent to::
-
- >>> y[:, 1:3][np.array([0, 2, 4]), :]
- array([[ 1, 2],
- [15, 16],
- [29, 30]])
-
-Likewise, slicing can be combined with broadcasted boolean indices: ::
-
- >>> b = y > 20
- >>> b
- array([[False, False, False, False, False, False, False],
- [False, False, False, False, False, False, False],
- [False, False, False, False, False, False, False],
- [ True, True, True, True, True, True, True],
- [ True, True, True, True, True, True, True]])
- >>> y[b[:,5],1:3]
- array([[22, 23],
- [29, 30]])
-
-Structural indexing tools
-=========================
-
-To facilitate easy matching of array shapes with expressions and in
-assignments, the np.newaxis object can be used within array indices
-to add new dimensions with a size of 1. For example: ::
-
- >>> y.shape
- (5, 7)
- >>> y[:,np.newaxis,:].shape
- (5, 1, 7)
-
-Note that there are no new elements in the array, just that the
-dimensionality is increased. This can be handy to combine two
-arrays in a way that otherwise would require explicitly reshaping
-operations. For example: ::
-
- >>> x = np.arange(5)
- >>> x[:,np.newaxis] + x[np.newaxis,:]
- array([[0, 1, 2, 3, 4],
- [1, 2, 3, 4, 5],
- [2, 3, 4, 5, 6],
- [3, 4, 5, 6, 7],
- [4, 5, 6, 7, 8]])
-
-The ellipsis syntax maybe used to indicate selecting in full any
-remaining unspecified dimensions. For example: ::
-
- >>> z = np.arange(81).reshape(3,3,3,3)
- >>> z[1,...,2]
- array([[29, 32, 35],
- [38, 41, 44],
- [47, 50, 53]])
-
-This is equivalent to: ::
-
- >>> z[1,:,:,2]
- array([[29, 32, 35],
- [38, 41, 44],
- [47, 50, 53]])
-
-Assigning values to indexed arrays
-==================================
-
-As mentioned, one can select a subset of an array to assign to using
-a single index, slices, and index and mask arrays. The value being
-assigned to the indexed array must be shape consistent (the same shape
-or broadcastable to the shape the index produces). For example, it is
-permitted to assign a constant to a slice: ::
-
- >>> x = np.arange(10)
- >>> x[2:7] = 1
-
-or an array of the right size: ::
-
- >>> x[2:7] = np.arange(5)
-
-Note that assignments may result in changes if assigning
-higher types to lower types (like floats to ints) or even
-exceptions (assigning complex to floats or ints): ::
-
- >>> x[1] = 1.2
- >>> x[1]
- 1
- >>> x[1] = 1.2j
- TypeError: can't convert complex to int
-
-
-Unlike some of the references (such as array and mask indices)
-assignments are always made to the original data in the array
-(indeed, nothing else would make sense!). Note though, that some
-actions may not work as one may naively expect. This particular
-example is often surprising to people: ::
-
- >>> x = np.arange(0, 50, 10)
- >>> x
- array([ 0, 10, 20, 30, 40])
- >>> x[np.array([1, 1, 3, 1])] += 1
- >>> x
- array([ 0, 11, 20, 31, 40])
-
-Where people expect that the 1st location will be incremented by 3.
-In fact, it will only be incremented by 1. The reason is because
-a new array is extracted from the original (as a temporary) containing
-the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
-and then the temporary is assigned back to the original array. Thus
-the value of the array at x[1]+1 is assigned to x[1] three times,
-rather than being incremented 3 times.
-
-Dealing with variable numbers of indices within programs
-========================================================
-
-The index syntax is very powerful but limiting when dealing with
-a variable number of indices. For example, if you want to write
-a function that can handle arguments with various numbers of
-dimensions without having to write special case code for each
-number of possible dimensions, how can that be done? If one
-supplies to the index a tuple, the tuple will be interpreted
-as a list of indices. For example (using the previous definition
-for the array z): ::
-
- >>> indices = (1,1,1,1)
- >>> z[indices]
- 40
-
-So one can use code to construct tuples of any number of indices
-and then use these within an index.
-
-Slices can be specified within programs by using the slice() function
-in Python. For example: ::
-
- >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
- >>> z[indices]
- array([39, 40])
-
-Likewise, ellipsis can be specified by code by using the Ellipsis
-object: ::
-
- >>> indices = (1, Ellipsis, 1) # same as [1,...,1]
- >>> z[indices]
- array([[28, 31, 34],
- [37, 40, 43],
- [46, 49, 52]])
-
-For this reason it is possible to use the output from the np.nonzero()
-function directly as an index since it always returns a tuple of index
-arrays.
-
-Because the special treatment of tuples, they are not automatically
-converted to an array as a list would be. As an example: ::
-
- >>> z[[1,1,1,1]] # produces a large array
- array([[[[27, 28, 29],
- [30, 31, 32], ...
- >>> z[(1,1,1,1)] # returns a single value
- 40
-
-"""
diff --git a/numpy/doc/internals.py b/numpy/doc/internals.py
deleted file mode 100644
index 6718f1108..000000000
--- a/numpy/doc/internals.py
+++ /dev/null
@@ -1,162 +0,0 @@
-"""
-===============
-Array Internals
-===============
-
-Internal organization of numpy arrays
-=====================================
-
-It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to NumPy".
-
-NumPy arrays consist of two major components, the raw array data (from now on,
-referred to as the data buffer), and the information about the raw array data.
-The data buffer is typically what people think of as arrays in C or Fortran,
-a contiguous (and fixed) block of memory containing fixed sized data items.
-NumPy also contains a significant set of data that describes how to interpret
-the data in the data buffer. This extra information contains (among other things):
-
- 1) The basic data element's size in bytes
- 2) The start of the data within the data buffer (an offset relative to the
- beginning of the data buffer).
- 3) The number of dimensions and the size of each dimension
- 4) The separation between elements for each dimension (the 'stride'). This
- does not have to be a multiple of the element size
- 5) The byte order of the data (which may not be the native byte order)
- 6) Whether the buffer is read-only
- 7) Information (via the dtype object) about the interpretation of the basic
- data element. The basic data element may be as simple as a int or a float,
- or it may be a compound object (e.g., struct-like), a fixed character field,
- or Python object pointers.
- 8) Whether the array is to interpreted as C-order or Fortran-order.
-
-This arrangement allow for very flexible use of arrays. One thing that it allows
-is simple changes of the metadata to change the interpretation of the array buffer.
-Changing the byteorder of the array is a simple change involving no rearrangement
-of the data. The shape of the array can be changed very easily without changing
-anything in the data buffer or any data copying at all
-
-Among other things that are made possible is one can create a new array metadata
-object that uses the same data buffer
-to create a new view of that data buffer that has a different interpretation
-of the buffer (e.g., different shape, offset, byte order, strides, etc) but
-shares the same data bytes. Many operations in numpy do just this such as
-slices. Other operations, such as transpose, don't move data elements
-around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move.
-
-Typically these new versions of the array metadata but the same data buffer are
-new 'views' into the data buffer. There is a different ndarray object, but it
-uses the same data buffer. This is why it is necessary to force copies through
-use of the .copy() method if one really wants to make a new and independent
-copy of the data buffer.
-
-New views into arrays mean the object reference counts for the data buffer
-increase. Simply doing away with the original array object will not remove the
-data buffer if other views of it still exist.
-
-Multidimensional Array Indexing Order Issues
-============================================
-
-What is the right way to index
-multi-dimensional arrays? Before you jump to conclusions about the one and
-true way to index multi-dimensional arrays, it pays to understand why this is
-a confusing issue. This section will try to explain in detail how numpy
-indexing works and why we adopt the convention we do for images, and when it
-may be appropriate to adopt other conventions.
-
-The first thing to understand is
-that there are two conflicting conventions for indexing 2-dimensional arrays.
-Matrix notation uses the first index to indicate which row is being selected and
-the second index to indicate which column is selected. This is opposite the
-geometrically oriented-convention for images where people generally think the
-first index represents x position (i.e., column) and the second represents y
-position (i.e., row). This alone is the source of much confusion;
-matrix-oriented users and image-oriented users expect two different things with
-regard to indexing.
-
-The second issue to understand is how indices correspond
-to the order the array is stored in memory. In Fortran the first index is the
-most rapidly varying index when moving through the elements of a two
-dimensional array as it is stored in memory. If you adopt the matrix
-convention for indexing, then this means the matrix is stored one column at a
-time (since the first index moves to the next row as it changes). Thus Fortran
-is considered a Column-major language. C has just the opposite convention. In
-C, the last index changes most rapidly as one moves through the array as
-stored in memory. Thus C is a Row-major language. The matrix is stored by
-rows. Note that in both cases it presumes that the matrix convention for
-indexing is being used, i.e., for both Fortran and C, the first index is the
-row. Note this convention implies that the indexing convention is invariant
-and that the data order changes to keep that so.
-
-But that's not the only way
-to look at it. Suppose one has large two-dimensional arrays (images or
-matrices) stored in data files. Suppose the data are stored by rows rather than
-by columns. If we are to preserve our index convention (whether matrix or
-image) that means that depending on the language we use, we may be forced to
-reorder the data if it is read into memory to preserve our indexing
-convention. For example if we read row-ordered data into memory without
-reordering, it will match the matrix indexing convention for C, but not for
-Fortran. Conversely, it will match the image indexing convention for Fortran,
-but not for C. For C, if one is using data stored in row order, and one wants
-to preserve the image index convention, the data must be reordered when
-reading into memory.
-
-In the end, which you do for Fortran or C depends on
-which is more important, not reordering data or preserving the indexing
-convention. For large images, reordering data is potentially expensive, and
-often the indexing convention is inverted to avoid that.
-
-The situation with
-numpy makes this issue yet more complicated. The internal machinery of numpy
-arrays is flexible enough to accept any ordering of indices. One can simply
-reorder indices by manipulating the internal stride information for arrays
-without reordering the data at all. NumPy will know how to map the new index
-order to the data without moving the data.
-
-So if this is true, why not choose
-the index order that matches what you most expect? In particular, why not define
-row-ordered images to use the image convention? (This is sometimes referred
-to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN'
-order options for array ordering in numpy.) The drawback of doing this is
-potential performance penalties. It's common to access the data sequentially,
-either implicitly in array operations or explicitly by looping over rows of an
-image. When that is done, then the data will be accessed in non-optimal order.
-As the first index is incremented, what is actually happening is that elements
-spaced far apart in memory are being sequentially accessed, with usually poor
-memory access speeds. For example, for a two dimensional image 'im' defined so
-that im[0, 10] represents the value at x=0, y=10. To be consistent with usual
-Python behavior then im[0] would represent a column at x=0. Yet that data
-would be spread over the whole array since the data are stored in row order.
-Despite the flexibility of numpy's indexing, it can't really paper over the fact
-basic operations are rendered inefficient because of data order or that getting
-contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs
-im[0]), thus one can't use an idiom such as for row in im; for col in im does
-work, but doesn't yield contiguous column data.
-
-As it turns out, numpy is
-smart enough when dealing with ufuncs to determine which index is the most
-rapidly varying one in memory and uses that for the innermost loop. Thus for
-ufuncs there is no large intrinsic advantage to either approach in most cases.
-On the other hand, use of .flat with an FORTRAN ordered array will lead to
-non-optimal memory access as adjacent elements in the flattened array (iterator,
-actually) are not contiguous in memory.
-
-Indeed, the fact is that Python
-indexing on lists and other sequences naturally leads to an outside-to inside
-ordering (the first index gets the largest grouping, the next the next largest,
-and the last gets the smallest element). Since image data are normally stored
-by rows, this corresponds to position within rows being the last item indexed.
-
-If you do want to use Fortran ordering realize that
-there are two approaches to consider: 1) accept that the first index is just not
-the most rapidly changing in memory and have all your I/O routines reorder
-your data when going from memory to disk or visa versa, or use numpy's
-mechanism for mapping the first index to the most rapidly varying data. We
-recommend the former if possible. The disadvantage of the latter is that many
-of numpy's functions will yield arrays without Fortran ordering unless you are
-careful to use the 'order' keyword. Doing this would be highly inconvenient.
-
-Otherwise we recommend simply learning to reverse the usual order of indices
-when accessing elements of an array. Granted, it goes against the grain, but
-it is more in line with Python semantics and the natural order of the data.
-
-"""
diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py
deleted file mode 100644
index fc1c4cd01..000000000
--- a/numpy/doc/misc.py
+++ /dev/null
@@ -1,226 +0,0 @@
-"""
-=============
-Miscellaneous
-=============
-
-IEEE 754 Floating Point Special Values
---------------------------------------
-
-Special values defined in numpy: nan, inf,
-
-NaNs can be used as a poor-man's mask (if you don't care what the
-original value was)
-
-Note: cannot use equality to test NaNs. E.g.: ::
-
- >>> myarr = np.array([1., 0., np.nan, 3.])
- >>> np.nonzero(myarr == np.nan)
- (array([], dtype=int64),)
- >>> np.nan == np.nan # is always False! Use special numpy functions instead.
- False
- >>> myarr[myarr == np.nan] = 0. # doesn't work
- >>> myarr
- array([ 1., 0., NaN, 3.])
- >>> myarr[np.isnan(myarr)] = 0. # use this instead find
- >>> myarr
- array([ 1., 0., 0., 3.])
-
-Other related special value functions: ::
-
- isinf(): True if value is inf
- isfinite(): True if not nan or inf
- nan_to_num(): Map nan to 0, inf to max float, -inf to min float
-
-The following corresponds to the usual functions except that nans are excluded
-from the results: ::
-
- nansum()
- nanmax()
- nanmin()
- nanargmax()
- nanargmin()
-
- >>> x = np.arange(10.)
- >>> x[3] = np.nan
- >>> x.sum()
- nan
- >>> np.nansum(x)
- 42.0
-
-How numpy handles numerical exceptions
---------------------------------------
-
-The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow``
-and ``'ignore'`` for ``underflow``. But this can be changed, and it can be
-set individually for different kinds of exceptions. The different behaviors
-are:
-
- - 'ignore' : Take no action when the exception occurs.
- - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module).
- - 'raise' : Raise a `FloatingPointError`.
- - 'call' : Call a function specified using the `seterrcall` function.
- - 'print' : Print a warning directly to ``stdout``.
- - 'log' : Record error in a Log object specified by `seterrcall`.
-
-These behaviors can be set for all kinds of errors or specific ones:
-
- - all : apply to all numeric exceptions
- - invalid : when NaNs are generated
- - divide : divide by zero (for integers as well!)
- - overflow : floating point overflows
- - underflow : floating point underflows
-
-Note that integer divide-by-zero is handled by the same machinery.
-These behaviors are set on a per-thread basis.
-
-Examples
---------
-
-::
-
- >>> oldsettings = np.seterr(all='warn')
- >>> np.zeros(5,dtype=np.float32)/0.
- invalid value encountered in divide
- >>> j = np.seterr(under='ignore')
- >>> np.array([1.e-100])**10
- >>> j = np.seterr(invalid='raise')
- >>> np.sqrt(np.array([-1.]))
- FloatingPointError: invalid value encountered in sqrt
- >>> def errorhandler(errstr, errflag):
- ... print("saw stupid error!")
- >>> np.seterrcall(errorhandler)
- <function err_handler at 0x...>
- >>> j = np.seterr(all='call')
- >>> np.zeros(5, dtype=np.int32)/0
- FloatingPointError: invalid value encountered in divide
- saw stupid error!
- >>> j = np.seterr(**oldsettings) # restore previous
- ... # error-handling settings
-
-Interfacing to C
-----------------
-Only a survey of the choices. Little detail on how each works.
-
-1) Bare metal, wrap your own C-code manually.
-
- - Plusses:
-
- - Efficient
- - No dependencies on other tools
-
- - Minuses:
-
- - Lots of learning overhead:
-
- - need to learn basics of Python C API
- - need to learn basics of numpy C API
- - need to learn how to handle reference counting and love it.
-
- - Reference counting often difficult to get right.
-
- - getting it wrong leads to memory leaks, and worse, segfaults
-
- - API will change for Python 3.0!
-
-2) Cython
-
- - Plusses:
-
- - avoid learning C API's
- - no dealing with reference counting
- - can code in pseudo python and generate C code
- - can also interface to existing C code
- - should shield you from changes to Python C api
- - has become the de-facto standard within the scientific Python community
- - fast indexing support for arrays
-
- - Minuses:
-
- - Can write code in non-standard form which may become obsolete
- - Not as flexible as manual wrapping
-
-3) ctypes
-
- - Plusses:
-
- - part of Python standard library
- - good for interfacing to existing sharable libraries, particularly
- Windows DLLs
- - avoids API/reference counting issues
- - good numpy support: arrays have all these in their ctypes
- attribute: ::
-
- a.ctypes.data a.ctypes.get_strides
- a.ctypes.data_as a.ctypes.shape
- a.ctypes.get_as_parameter a.ctypes.shape_as
- a.ctypes.get_data a.ctypes.strides
- a.ctypes.get_shape a.ctypes.strides_as
-
- - Minuses:
-
- - can't use for writing code to be turned into C extensions, only a wrapper
- tool.
-
-4) SWIG (automatic wrapper generator)
-
- - Plusses:
-
- - around a long time
- - multiple scripting language support
- - C++ support
- - Good for wrapping large (many functions) existing C libraries
-
- - Minuses:
-
- - generates lots of code between Python and the C code
- - can cause performance problems that are nearly impossible to optimize
- out
- - interface files can be hard to write
- - doesn't necessarily avoid reference counting issues or needing to know
- API's
-
-5) scipy.weave
-
- - Plusses:
-
- - can turn many numpy expressions into C code
- - dynamic compiling and loading of generated C code
- - can embed pure C code in Python module and have weave extract, generate
- interfaces and compile, etc.
-
- - Minuses:
-
- - Future very uncertain: it's the only part of Scipy not ported to Python 3
- and is effectively deprecated in favor of Cython.
-
-6) Psyco
-
- - Plusses:
-
- - Turns pure python into efficient machine code through jit-like
- optimizations
- - very fast when it optimizes well
-
- - Minuses:
-
- - Only on intel (windows?)
- - Doesn't do much for numpy?
-
-Interfacing to Fortran:
------------------------
-The clear choice to wrap Fortran code is
-`f2py <https://docs.scipy.org/doc/numpy/f2py/>`_.
-
-Pyfort is an older alternative, but not supported any longer.
-Fwrap is a newer project that looked promising but isn't being developed any
-longer.
-
-Interfacing to C++:
--------------------
- 1) Cython
- 2) CXX
- 3) Boost.python
- 4) SWIG
- 5) SIP (used mainly in PyQT)
-
-"""
diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py
deleted file mode 100644
index 359d4f7f4..000000000
--- a/numpy/doc/structured_arrays.py
+++ /dev/null
@@ -1,646 +0,0 @@
-"""
-=================
-Structured Arrays
-=================
-
-Introduction
-============
-
-Structured arrays are ndarrays whose datatype is a composition of simpler
-datatypes organized as a sequence of named :term:`fields <field>`. For example,
-::
-
- >>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
- ... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')])
- >>> x
- array([('Rex', 9, 81.), ('Fido', 3, 27.)],
- dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
-
-Here ``x`` is a one-dimensional array of length two whose datatype is a
-structure with three fields: 1. A string of length 10 or less named 'name', 2.
-a 32-bit integer named 'age', and 3. a 32-bit float named 'weight'.
-
-If you index ``x`` at position 1 you get a structure::
-
- >>> x[1]
- ('Fido', 3, 27.0)
-
-You can access and modify individual fields of a structured array by indexing
-with the field name::
-
- >>> x['age']
- array([9, 3], dtype=int32)
- >>> x['age'] = 5
- >>> x
- array([('Rex', 5, 81.), ('Fido', 5, 27.)],
- dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
-
-Structured datatypes are designed to be able to mimic 'structs' in the C
-language, and share a similar memory layout. They are meant for interfacing with
-C code and for low-level manipulation of structured buffers, for example for
-interpreting binary blobs. For these purposes they support specialized features
-such as subarrays, nested datatypes, and unions, and allow control over the
-memory layout of the structure.
-
-Users looking to manipulate tabular data, such as stored in csv files, may find
-other pydata projects more suitable, such as xarray, pandas, or DataArray.
-These provide a high-level interface for tabular data analysis and are better
-optimized for that use. For instance, the C-struct-like memory layout of
-structured arrays in numpy can lead to poor cache behavior in comparison.
-
-.. _defining-structured-types:
-
-Structured Datatypes
-====================
-
-A structured datatype can be thought of as a sequence of bytes of a certain
-length (the structure's :term:`itemsize`) which is interpreted as a collection
-of fields. Each field has a name, a datatype, and a byte offset within the
-structure. The datatype of a field may be any numpy datatype including other
-structured datatypes, and it may also be a :term:`subarray data type` which
-behaves like an ndarray of a specified shape. The offsets of the fields are
-arbitrary, and fields may even overlap. These offsets are usually determined
-automatically by numpy, but can also be specified.
-
-Structured Datatype Creation
-----------------------------
-
-Structured datatypes may be created using the function :func:`numpy.dtype`.
-There are 4 alternative forms of specification which vary in flexibility and
-conciseness. These are further documented in the
-:ref:`Data Type Objects <arrays.dtypes.constructing>` reference page, and in
-summary they are:
-
-1. A list of tuples, one tuple per field
-
- Each tuple has the form ``(fieldname, datatype, shape)`` where shape is
- optional. ``fieldname`` is a string (or tuple if titles are used, see
- :ref:`Field Titles <titles>` below), ``datatype`` may be any object
- convertible to a datatype, and ``shape`` is a tuple of integers specifying
- subarray shape.
-
- >>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2, 2))])
- dtype([('x', '<f4'), ('y', '<f4'), ('z', '<f4', (2, 2))])
-
- If ``fieldname`` is the empty string ``''``, the field will be given a
- default name of the form ``f#``, where ``#`` is the integer index of the
- field, counting from 0 from the left::
-
- >>> np.dtype([('x', 'f4'), ('', 'i4'), ('z', 'i8')])
- dtype([('x', '<f4'), ('f1', '<i4'), ('z', '<i8')])
-
- The byte offsets of the fields within the structure and the total
- structure itemsize are determined automatically.
-
-2. A string of comma-separated dtype specifications
-
- In this shorthand notation any of the :ref:`string dtype specifications
- <arrays.dtypes.constructing>` may be used in a string and separated by
- commas. The itemsize and byte offsets of the fields are determined
- automatically, and the field names are given the default names ``f0``,
- ``f1``, etc. ::
-
- >>> np.dtype('i8, f4, S3')
- dtype([('f0', '<i8'), ('f1', '<f4'), ('f2', 'S3')])
- >>> np.dtype('3int8, float32, (2, 3)float64')
- dtype([('f0', 'i1', (3,)), ('f1', '<f4'), ('f2', '<f8', (2, 3))])
-
-3. A dictionary of field parameter arrays
-
- This is the most flexible form of specification since it allows control
- over the byte-offsets of the fields and the itemsize of the structure.
-
- The dictionary has two required keys, 'names' and 'formats', and four
- optional keys, 'offsets', 'itemsize', 'aligned' and 'titles'. The values
- for 'names' and 'formats' should respectively be a list of field names and
- a list of dtype specifications, of the same length. The optional 'offsets'
- value should be a list of integer byte-offsets, one for each field within
- the structure. If 'offsets' is not given the offsets are determined
- automatically. The optional 'itemsize' value should be an integer
- describing the total size in bytes of the dtype, which must be large
- enough to contain all the fields.
- ::
-
- >>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4', 'f4']})
- dtype([('col1', '<i4'), ('col2', '<f4')])
- >>> np.dtype({'names': ['col1', 'col2'],
- ... 'formats': ['i4', 'f4'],
- ... 'offsets': [0, 4],
- ... 'itemsize': 12})
- dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12})
-
- Offsets may be chosen such that the fields overlap, though this will mean
- that assigning to one field may clobber any overlapping field's data. As
- an exception, fields of :class:`numpy.object` type cannot overlap with
- other fields, because of the risk of clobbering the internal object
- pointer and then dereferencing it.
-
- The optional 'aligned' value can be set to ``True`` to make the automatic
- offset computation use aligned offsets (see :ref:`offsets-and-alignment`),
- as if the 'align' keyword argument of :func:`numpy.dtype` had been set to
- True.
-
- The optional 'titles' value should be a list of titles of the same length
- as 'names', see :ref:`Field Titles <titles>` below.
-
-4. A dictionary of field names
-
- The use of this form of specification is discouraged, but documented here
- because older numpy code may use it. The keys of the dictionary are the
- field names and the values are tuples specifying type and offset::
-
- >>> np.dtype({'col1': ('i1', 0), 'col2': ('f4', 1)})
- dtype([('col1', 'i1'), ('col2', '<f4')])
-
- This form is discouraged because Python dictionaries do not preserve order
- in Python versions before Python 3.6, and the order of the fields in a
- structured dtype has meaning. :ref:`Field Titles <titles>` may be
- specified by using a 3-tuple, see below.
-
-Manipulating and Displaying Structured Datatypes
-------------------------------------------------
-
-The list of field names of a structured datatype can be found in the ``names``
-attribute of the dtype object::
-
- >>> d = np.dtype([('x', 'i8'), ('y', 'f4')])
- >>> d.names
- ('x', 'y')
-
-The field names may be modified by assigning to the ``names`` attribute using a
-sequence of strings of the same length.
-
-The dtype object also has a dictionary-like attribute, ``fields``, whose keys
-are the field names (and :ref:`Field Titles <titles>`, see below) and whose
-values are tuples containing the dtype and byte offset of each field. ::
-
- >>> d.fields
- mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)})
-
-Both the ``names`` and ``fields`` attributes will equal ``None`` for
-unstructured arrays. The recommended way to test if a dtype is structured is
-with `if dt.names is not None` rather than `if dt.names`, to account for dtypes
-with 0 fields.
-
-The string representation of a structured datatype is shown in the "list of
-tuples" form if possible, otherwise numpy falls back to using the more general
-dictionary form.
-
-.. _offsets-and-alignment:
-
-Automatic Byte Offsets and Alignment
-------------------------------------
-
-Numpy uses one of two methods to automatically determine the field byte offsets
-and the overall itemsize of a structured datatype, depending on whether
-``align=True`` was specified as a keyword argument to :func:`numpy.dtype`.
-
-By default (``align=False``), numpy will pack the fields together such that
-each field starts at the byte offset the previous field ended, and the fields
-are contiguous in memory. ::
-
- >>> def print_offsets(d):
- ... print("offsets:", [d.fields[name][1] for name in d.names])
- ... print("itemsize:", d.itemsize)
- >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2'))
- offsets: [0, 1, 2, 6, 7, 15]
- itemsize: 17
-
-If ``align=True`` is set, numpy will pad the structure in the same way many C
-compilers would pad a C-struct. Aligned structures can give a performance
-improvement in some cases, at the cost of increased datatype size. Padding
-bytes are inserted between fields such that each field's byte offset will be a
-multiple of that field's alignment, which is usually equal to the field's size
-in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The
-structure will also have trailing padding added so that its itemsize is a
-multiple of the largest field's alignment. ::
-
- >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2', align=True))
- offsets: [0, 1, 4, 8, 16, 24]
- itemsize: 32
-
-Note that although almost all modern C compilers pad in this way by default,
-padding in C structs is C-implementation-dependent so this memory layout is not
-guaranteed to exactly match that of a corresponding struct in a C program. Some
-work may be needed, either on the numpy side or the C side, to obtain exact
-correspondence.
-
-If offsets were specified using the optional ``offsets`` key in the
-dictionary-based dtype specification, setting ``align=True`` will check that
-each field's offset is a multiple of its size and that the itemsize is a
-multiple of the largest field size, and raise an exception if not.
-
-If the offsets of the fields and itemsize of a structured array satisfy the
-alignment conditions, the array will have the ``ALIGNED`` :attr:`flag
-<numpy.ndarray.flags>` set.
-
-A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an
-aligned dtype or array to a packed one and vice versa. It takes either a dtype
-or structured ndarray as an argument, and returns a copy with fields re-packed,
-with or without padding bytes.
-
-.. _titles:
-
-Field Titles
-------------
-
-In addition to field names, fields may also have an associated :term:`title`,
-an alternate name, which is sometimes used as an additional description or
-alias for the field. The title may be used to index an array, just like a
-field name.
-
-To add titles when using the list-of-tuples form of dtype specification, the
-field name may be specified as a tuple of two strings instead of a single
-string, which will be the field's title and field name respectively. For
-example::
-
- >>> np.dtype([(('my title', 'name'), 'f4')])
- dtype([(('my title', 'name'), '<f4')])
-
-When using the first form of dictionary-based specification, the titles may be
-supplied as an extra ``'titles'`` key as described above. When using the second
-(discouraged) dictionary-based specification, the title can be supplied by
-providing a 3-element tuple ``(datatype, offset, title)`` instead of the usual
-2-element tuple::
-
- >>> np.dtype({'name': ('i4', 0, 'my title')})
- dtype([(('my title', 'name'), '<i4')])
-
-The ``dtype.fields`` dictionary will contain titles as keys, if any
-titles are used. This means effectively that a field with a title will be
-represented twice in the fields dictionary. The tuple values for these fields
-will also have a third element, the field title. Because of this, and because
-the ``names`` attribute preserves the field order while the ``fields``
-attribute may not, it is recommended to iterate through the fields of a dtype
-using the ``names`` attribute of the dtype, which will not list titles, as
-in::
-
- >>> for name in d.names:
- ... print(d.fields[name][:2])
- (dtype('int64'), 0)
- (dtype('float32'), 8)
-
-Union types
------------
-
-Structured datatypes are implemented in numpy to have base type
-:class:`numpy.void` by default, but it is possible to interpret other numpy
-types as structured types using the ``(base_dtype, dtype)`` form of dtype
-specification described in
-:ref:`Data Type Objects <arrays.dtypes.constructing>`. Here, ``base_dtype`` is
-the desired underlying dtype, and fields and flags will be copied from
-``dtype``. This dtype is similar to a 'union' in C.
-
-Indexing and Assignment to Structured arrays
-============================================
-
-Assigning data to a Structured Array
-------------------------------------
-
-There are a number of ways to assign values to a structured array: Using python
-tuples, using scalar values, or using other structured arrays.
-
-Assignment from Python Native Types (Tuples)
-````````````````````````````````````````````
-
-The simplest way to assign values to a structured array is using python tuples.
-Each assigned value should be a tuple of length equal to the number of fields
-in the array, and not a list or array as these will trigger numpy's
-broadcasting rules. The tuple's elements are assigned to the successive fields
-of the array, from left to right::
-
- >>> x = np.array([(1, 2, 3), (4, 5, 6)], dtype='i8, f4, f8')
- >>> x[1] = (7, 8, 9)
- >>> x
- array([(1, 2., 3.), (7, 8., 9.)],
- dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '<f8')])
-
-Assignment from Scalars
-```````````````````````
-
-A scalar assigned to a structured element will be assigned to all fields. This
-happens when a scalar is assigned to a structured array, or when an
-unstructured array is assigned to a structured array::
-
- >>> x = np.zeros(2, dtype='i8, f4, ?, S1')
- >>> x[:] = 3
- >>> x
- array([(3, 3., True, b'3'), (3, 3., True, b'3')],
- dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
- >>> x[:] = np.arange(2)
- >>> x
- array([(0, 0., False, b'0'), (1, 1., True, b'1')],
- dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
-
-Structured arrays can also be assigned to unstructured arrays, but only if the
-structured datatype has just a single field::
-
- >>> twofield = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')])
- >>> onefield = np.zeros(2, dtype=[('A', 'i4')])
- >>> nostruct = np.zeros(2, dtype='i4')
- >>> nostruct[:] = twofield
- Traceback (most recent call last):
- ...
- TypeError: Cannot cast array data from dtype([('A', '<i4'), ('B', '<i4')]) to dtype('int32') according to the rule 'unsafe'
-
-Assignment from other Structured Arrays
-```````````````````````````````````````
-
-Assignment between two structured arrays occurs as if the source elements had
-been converted to tuples and then assigned to the destination elements. That
-is, the first field of the source array is assigned to the first field of the
-destination array, and the second field likewise, and so on, regardless of
-field names. Structured arrays with a different number of fields cannot be
-assigned to each other. Bytes of the destination structure which are not
-included in any of the fields are unaffected. ::
-
- >>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')])
- >>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')])
- >>> b[:] = a
- >>> b
- array([(0., b'0.0', b''), (0., b'0.0', b''), (0., b'0.0', b'')],
- dtype=[('x', '<f4'), ('y', 'S3'), ('z', 'O')])
-
-
-Assignment involving subarrays
-``````````````````````````````
-
-When assigning to fields which are subarrays, the assigned value will first be
-broadcast to the shape of the subarray.
-
-Indexing Structured Arrays
---------------------------
-
-Accessing Individual Fields
-```````````````````````````
-
-Individual fields of a structured array may be accessed and modified by indexing
-the array with the field name. ::
-
- >>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
- >>> x['foo']
- array([1, 3])
- >>> x['foo'] = 10
- >>> x
- array([(10, 2.), (10, 4.)],
- dtype=[('foo', '<i8'), ('bar', '<f4')])
-
-The resulting array is a view into the original array. It shares the same
-memory locations and writing to the view will modify the original array. ::
-
- >>> y = x['bar']
- >>> y[:] = 11
- >>> x
- array([(10, 11.), (10, 11.)],
- dtype=[('foo', '<i8'), ('bar', '<f4')])
-
-This view has the same dtype and itemsize as the indexed field, so it is
-typically a non-structured array, except in the case of nested structures.
-
- >>> y.dtype, y.shape, y.strides
- (dtype('float32'), (2,), (12,))
-
-If the accessed field is a subarray, the dimensions of the subarray
-are appended to the shape of the result::
-
- >>> x = np.zeros((2, 2), dtype=[('a', np.int32), ('b', np.float64, (3, 3))])
- >>> x['a'].shape
- (2, 2)
- >>> x['b'].shape
- (2, 2, 3, 3)
-
-Accessing Multiple Fields
-```````````````````````````
-
-One can index and assign to a structured array with a multi-field index, where
-the index is a list of field names.
-
-.. warning::
- The behavior of multi-field indexes changed from Numpy 1.15 to Numpy 1.16.
-
-The result of indexing with a multi-field index is a view into the original
-array, as follows::
-
- >>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')])
- >>> a[['a', 'c']]
- array([(0, 0.), (0, 0.), (0, 0.)],
- dtype={'names':['a','c'], 'formats':['<i4','<f4'], 'offsets':[0,8], 'itemsize':12})
-
-Assignment to the view modifies the original array. The view's fields will be
-in the order they were indexed. Note that unlike for single-field indexing, the
-dtype of the view has the same itemsize as the original array, and has fields
-at the same offsets as in the original array, and unindexed fields are merely
-missing.
-
-.. warning::
- In Numpy 1.15, indexing an array with a multi-field index returned a copy of
- the result above, but with fields packed together in memory as if
- passed through :func:`numpy.lib.recfunctions.repack_fields`.
-
- The new behavior as of Numpy 1.16 leads to extra "padding" bytes at the
- location of unindexed fields compared to 1.15. You will need to update any
- code which depends on the data having a "packed" layout. For instance code
- such as::
-
- >>> a[['a', 'c']].view('i8') # Fails in Numpy 1.16
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype
-
- will need to be changed. This code has raised a ``FutureWarning`` since
- Numpy 1.12, and similar code has raised ``FutureWarning`` since 1.7.
-
- In 1.16 a number of functions have been introduced in the
- :mod:`numpy.lib.recfunctions` module to help users account for this
- change. These are
- :func:`numpy.lib.recfunctions.repack_fields`.
- :func:`numpy.lib.recfunctions.structured_to_unstructured`,
- :func:`numpy.lib.recfunctions.unstructured_to_structured`,
- :func:`numpy.lib.recfunctions.apply_along_fields`,
- :func:`numpy.lib.recfunctions.assign_fields_by_name`, and
- :func:`numpy.lib.recfunctions.require_fields`.
-
- The function :func:`numpy.lib.recfunctions.repack_fields` can always be
- used to reproduce the old behavior, as it will return a packed copy of the
- structured array. The code above, for example, can be replaced with:
-
- >>> from numpy.lib.recfunctions import repack_fields
- >>> repack_fields(a[['a', 'c']]).view('i8') # supported in 1.16
- array([0, 0, 0])
-
- Furthermore, numpy now provides a new function
- :func:`numpy.lib.recfunctions.structured_to_unstructured` which is a safer
- and more efficient alternative for users who wish to convert structured
- arrays to unstructured arrays, as the view above is often indeded to do.
- This function allows safe conversion to an unstructured type taking into
- account padding, often avoids a copy, and also casts the datatypes
- as needed, unlike the view. Code such as:
-
- >>> b = np.zeros(3, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
- >>> b[['x', 'z']].view('f4')
- array([0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)
-
- can be made safer by replacing with:
-
- >>> from numpy.lib.recfunctions import structured_to_unstructured
- >>> structured_to_unstructured(b[['x', 'z']])
- array([0, 0, 0])
-
-
-Assignment to an array with a multi-field index modifies the original array::
-
- >>> a[['a', 'c']] = (2, 3)
- >>> a
- array([(2, 0, 3.), (2, 0, 3.), (2, 0, 3.)],
- dtype=[('a', '<i4'), ('b', '<i4'), ('c', '<f4')])
-
-This obeys the structured array assignment rules described above. For example,
-this means that one can swap the values of two fields using appropriate
-multi-field indexes::
-
- >>> a[['a', 'c']] = a[['c', 'a']]
-
-Indexing with an Integer to get a Structured Scalar
-```````````````````````````````````````````````````
-
-Indexing a single element of a structured array (with an integer index) returns
-a structured scalar::
-
- >>> x = np.array([(1, 2., 3.)], dtype='i, f, f')
- >>> scalar = x[0]
- >>> scalar
- (1, 2., 3.)
- >>> type(scalar)
- <class 'numpy.void'>
-
-Unlike other numpy scalars, structured scalars are mutable and act like views
-into the original array, such that modifying the scalar will modify the
-original array. Structured scalars also support access and assignment by field
-name::
-
- >>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
- >>> s = x[0]
- >>> s['bar'] = 100
- >>> x
- array([(1, 100.), (3, 4.)],
- dtype=[('foo', '<i8'), ('bar', '<f4')])
-
-Similarly to tuples, structured scalars can also be indexed with an integer::
-
- >>> scalar = np.array([(1, 2., 3.)], dtype='i, f, f')[0]
- >>> scalar[0]
- 1
- >>> scalar[1] = 4
-
-Thus, tuples might be thought of as the native Python equivalent to numpy's
-structured types, much like native python integers are the equivalent to
-numpy's integer types. Structured scalars may be converted to a tuple by
-calling :func:`ndarray.item`::
-
- >>> scalar.item(), type(scalar.item())
- ((1, 4.0, 3.0), <class 'tuple'>)
-
-Viewing Structured Arrays Containing Objects
---------------------------------------------
-
-In order to prevent clobbering object pointers in fields of
-:class:`numpy.object` type, numpy currently does not allow views of structured
-arrays containing objects.
-
-Structure Comparison
---------------------
-
-If the dtypes of two void structured arrays are equal, testing the equality of
-the arrays will result in a boolean array with the dimensions of the original
-arrays, with elements set to ``True`` where all fields of the corresponding
-structures are equal. Structured dtypes are equal if the field names,
-dtypes and titles are the same, ignoring endianness, and the fields are in
-the same order::
-
- >>> a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
- >>> b = np.ones(2, dtype=[('a', 'i4'), ('b', 'i4')])
- >>> a == b
- array([False, False])
-
-Currently, if the dtypes of two void structured arrays are not equivalent the
-comparison fails, returning the scalar value ``False``. This behavior is
-deprecated as of numpy 1.10 and will raise an error or perform elementwise
-comparison in the future.
-
-The ``<`` and ``>`` operators always return ``False`` when comparing void
-structured arrays, and arithmetic and bitwise operations are not supported.
-
-Record Arrays
-=============
-
-As an optional convenience numpy provides an ndarray subclass,
-:class:`numpy.recarray`, and associated helper functions in the
-:mod:`numpy.rec` submodule, that allows access to fields of structured arrays
-by attribute instead of only by index. Record arrays also use a special
-datatype, :class:`numpy.record`, that allows field access by attribute on the
-structured scalars obtained from the array.
-
-The simplest way to create a record array is with :func:`numpy.rec.array`::
-
- >>> recordarr = np.rec.array([(1, 2., 'Hello'), (2, 3., "World")],
- ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
- >>> recordarr.bar
- array([ 2., 3.], dtype=float32)
- >>> recordarr[1:2]
- rec.array([(2, 3., b'World')],
- dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
- >>> recordarr[1:2].foo
- array([2], dtype=int32)
- >>> recordarr.foo[1:2]
- array([2], dtype=int32)
- >>> recordarr[1].baz
- b'World'
-
-:func:`numpy.rec.array` can convert a wide variety of arguments into record
-arrays, including structured arrays::
-
- >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
- ... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
- >>> recordarr = np.rec.array(arr)
-
-The :mod:`numpy.rec` module provides a number of other convenience functions for
-creating record arrays, see :ref:`record array creation routines
-<routines.array-creation.rec>`.
-
-A record array representation of a structured array can be obtained using the
-appropriate `view <numpy-ndarray-view>`_::
-
- >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
- ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
- >>> recordarr = arr.view(dtype=np.dtype((np.record, arr.dtype)),
- ... type=np.recarray)
-
-For convenience, viewing an ndarray as type :class:`np.recarray` will
-automatically convert to :class:`np.record` datatype, so the dtype can be left
-out of the view::
-
- >>> recordarr = arr.view(np.recarray)
- >>> recordarr.dtype
- dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
-
-To get back to a plain ndarray both the dtype and type must be reset. The
-following view does so, taking into account the unusual case that the
-recordarr was not a structured type::
-
- >>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
-
-Record array fields accessed by index or by attribute are returned as a record
-array if the field has a structured type but as a plain ndarray otherwise. ::
-
- >>> recordarr = np.rec.array([('Hello', (1, 2)), ("World", (3, 4))],
- ... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
- >>> type(recordarr.foo)
- <class 'numpy.ndarray'>
- >>> type(recordarr.bar)
- <class 'numpy.recarray'>
-
-Note that if a field has the same name as an ndarray attribute, the ndarray
-attribute takes precedence. Such fields will be inaccessible by attribute but
-will still be accessible by index.
-
-"""
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
deleted file mode 100644
index 7dc10e1c8..000000000
--- a/numpy/doc/subclassing.py
+++ /dev/null
@@ -1,752 +0,0 @@
-"""=============================
-Subclassing ndarray in python
-=============================
-
-Introduction
-------------
-
-Subclassing ndarray is relatively simple, but it has some complications
-compared to other Python objects. On this page we explain the machinery
-that allows you to subclass ndarray, and the implications for
-implementing a subclass.
-
-ndarrays and object creation
-============================
-
-Subclassing ndarray is complicated by the fact that new instances of
-ndarray classes can come about in three different ways. These are:
-
-#. Explicit constructor call - as in ``MySubClass(params)``. This is
- the usual route to Python instance creation.
-#. View casting - casting an existing ndarray as a given subclass
-#. New from template - creating a new instance from a template
- instance. Examples include returning slices from a subclassed array,
- creating return types from ufuncs, and copying arrays. See
- :ref:`new-from-template` for more details
-
-The last two are characteristics of ndarrays - in order to support
-things like array slicing. The complications of subclassing ndarray are
-due to the mechanisms numpy has to support these latter two routes of
-instance creation.
-
-.. _view-casting:
-
-View casting
-------------
-
-*View casting* is the standard ndarray mechanism by which you take an
-ndarray of any subclass, and return a view of the array as another
-(specified) subclass:
-
->>> import numpy as np
->>> # create a completely useless ndarray subclass
->>> class C(np.ndarray): pass
->>> # create a standard ndarray
->>> arr = np.zeros((3,))
->>> # take a view of it, as our useless subclass
->>> c_arr = arr.view(C)
->>> type(c_arr)
-<class 'C'>
-
-.. _new-from-template:
-
-Creating new from template
---------------------------
-
-New instances of an ndarray subclass can also come about by a very
-similar mechanism to :ref:`view-casting`, when numpy finds it needs to
-create a new instance from a template instance. The most obvious place
-this has to happen is when you are taking slices of subclassed arrays.
-For example:
-
->>> v = c_arr[1:]
->>> type(v) # the view is of type 'C'
-<class 'C'>
->>> v is c_arr # but it's a new instance
-False
-
-The slice is a *view* onto the original ``c_arr`` data. So, when we
-take a view from the ndarray, we return a new ndarray, of the same
-class, that points to the data in the original.
-
-There are other points in the use of ndarrays where we need such views,
-such as copying arrays (``c_arr.copy()``), creating ufunc output arrays
-(see also :ref:`array-wrap`), and reducing methods (like
-``c_arr.mean()``).
-
-Relationship of view casting and new-from-template
---------------------------------------------------
-
-These paths both use the same machinery. We make the distinction here,
-because they result in different input to your methods. Specifically,
-:ref:`view-casting` means you have created a new instance of your array
-type from any potential subclass of ndarray. :ref:`new-from-template`
-means you have created a new instance of your class from a pre-existing
-instance, allowing you - for example - to copy across attributes that
-are particular to your subclass.
-
-Implications for subclassing
-----------------------------
-
-If we subclass ndarray, we need to deal not only with explicit
-construction of our array type, but also :ref:`view-casting` or
-:ref:`new-from-template`. NumPy has the machinery to do this, and this
-machinery that makes subclassing slightly non-standard.
-
-There are two aspects to the machinery that ndarray uses to support
-views and new-from-template in subclasses.
-
-The first is the use of the ``ndarray.__new__`` method for the main work
-of object initialization, rather then the more usual ``__init__``
-method. The second is the use of the ``__array_finalize__`` method to
-allow subclasses to clean up after the creation of views and new
-instances from templates.
-
-A brief Python primer on ``__new__`` and ``__init__``
-=====================================================
-
-``__new__`` is a standard Python method, and, if present, is called
-before ``__init__`` when we create a class instance. See the `python
-__new__ documentation
-<https://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail.
-
-For example, consider the following Python code:
-
-.. testcode::
-
- class C:
- def __new__(cls, *args):
- print('Cls in __new__:', cls)
- print('Args in __new__:', args)
- # The `object` type __new__ method takes a single argument.
- return object.__new__(cls)
-
- def __init__(self, *args):
- print('type(self) in __init__:', type(self))
- print('Args in __init__:', args)
-
-meaning that we get:
-
->>> c = C('hello')
-Cls in __new__: <class 'C'>
-Args in __new__: ('hello',)
-type(self) in __init__: <class 'C'>
-Args in __init__: ('hello',)
-
-When we call ``C('hello')``, the ``__new__`` method gets its own class
-as first argument, and the passed argument, which is the string
-``'hello'``. After python calls ``__new__``, it usually (see below)
-calls our ``__init__`` method, with the output of ``__new__`` as the
-first argument (now a class instance), and the passed arguments
-following.
-
-As you can see, the object can be initialized in the ``__new__``
-method or the ``__init__`` method, or both, and in fact ndarray does
-not have an ``__init__`` method, because all the initialization is
-done in the ``__new__`` method.
-
-Why use ``__new__`` rather than just the usual ``__init__``? Because
-in some cases, as for ndarray, we want to be able to return an object
-of some other class. Consider the following:
-
-.. testcode::
-
- class D(C):
- def __new__(cls, *args):
- print('D cls is:', cls)
- print('D args in __new__:', args)
- return C.__new__(C, *args)
-
- def __init__(self, *args):
- # we never get here
- print('In D __init__')
-
-meaning that:
-
->>> obj = D('hello')
-D cls is: <class 'D'>
-D args in __new__: ('hello',)
-Cls in __new__: <class 'C'>
-Args in __new__: ('hello',)
->>> type(obj)
-<class 'C'>
-
-The definition of ``C`` is the same as before, but for ``D``, the
-``__new__`` method returns an instance of class ``C`` rather than
-``D``. Note that the ``__init__`` method of ``D`` does not get
-called. In general, when the ``__new__`` method returns an object of
-class other than the class in which it is defined, the ``__init__``
-method of that class is not called.
-
-This is how subclasses of the ndarray class are able to return views
-that preserve the class type. When taking a view, the standard
-ndarray machinery creates the new ndarray object with something
-like::
-
- obj = ndarray.__new__(subtype, shape, ...
-
-where ``subdtype`` is the subclass. Thus the returned view is of the
-same class as the subclass, rather than being of class ``ndarray``.
-
-That solves the problem of returning views of the same type, but now
-we have a new problem. The machinery of ndarray can set the class
-this way, in its standard methods for taking views, but the ndarray
-``__new__`` method knows nothing of what we have done in our own
-``__new__`` method in order to set attributes, and so on. (Aside -
-why not call ``obj = subdtype.__new__(...`` then? Because we may not
-have a ``__new__`` method with the same call signature).
-
-The role of ``__array_finalize__``
-==================================
-
-``__array_finalize__`` is the mechanism that numpy provides to allow
-subclasses to handle the various ways that new instances get created.
-
-Remember that subclass instances can come about in these three ways:
-
-#. explicit constructor call (``obj = MySubClass(params)``). This will
- call the usual sequence of ``MySubClass.__new__`` then (if it exists)
- ``MySubClass.__init__``.
-#. :ref:`view-casting`
-#. :ref:`new-from-template`
-
-Our ``MySubClass.__new__`` method only gets called in the case of the
-explicit constructor call, so we can't rely on ``MySubClass.__new__`` or
-``MySubClass.__init__`` to deal with the view casting and
-new-from-template. It turns out that ``MySubClass.__array_finalize__``
-*does* get called for all three methods of object creation, so this is
-where our object creation housekeeping usually goes.
-
-* For the explicit constructor call, our subclass will need to create a
- new ndarray instance of its own class. In practice this means that
- we, the authors of the code, will need to make a call to
- ``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to
- ``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an
- existing array (see below)
-* For view casting and new-from-template, the equivalent of
- ``ndarray.__new__(MySubClass,...`` is called, at the C level.
-
-The arguments that ``__array_finalize__`` receives differ for the three
-methods of instance creation above.
-
-The following code allows us to look at the call sequences and arguments:
-
-.. testcode::
-
- import numpy as np
-
- class C(np.ndarray):
- def __new__(cls, *args, **kwargs):
- print('In __new__ with class %s' % cls)
- return super(C, cls).__new__(cls, *args, **kwargs)
-
- def __init__(self, *args, **kwargs):
- # in practice you probably will not need or want an __init__
- # method for your subclass
- print('In __init__ with class %s' % self.__class__)
-
- def __array_finalize__(self, obj):
- print('In array_finalize:')
- print(' self type is %s' % type(self))
- print(' obj type is %s' % type(obj))
-
-
-Now:
-
->>> # Explicit constructor
->>> c = C((10,))
-In __new__ with class <class 'C'>
-In array_finalize:
- self type is <class 'C'>
- obj type is <type 'NoneType'>
-In __init__ with class <class 'C'>
->>> # View casting
->>> a = np.arange(10)
->>> cast_a = a.view(C)
-In array_finalize:
- self type is <class 'C'>
- obj type is <type 'numpy.ndarray'>
->>> # Slicing (example of new-from-template)
->>> cv = c[:1]
-In array_finalize:
- self type is <class 'C'>
- obj type is <class 'C'>
-
-The signature of ``__array_finalize__`` is::
-
- def __array_finalize__(self, obj):
-
-One sees that the ``super`` call, which goes to
-``ndarray.__new__``, passes ``__array_finalize__`` the new object, of our
-own class (``self``) as well as the object from which the view has been
-taken (``obj``). As you can see from the output above, the ``self`` is
-always a newly created instance of our subclass, and the type of ``obj``
-differs for the three instance creation methods:
-
-* When called from the explicit constructor, ``obj`` is ``None``
-* When called from view casting, ``obj`` can be an instance of any
- subclass of ndarray, including our own.
-* When called in new-from-template, ``obj`` is another instance of our
- own subclass, that we might use to update the new ``self`` instance.
-
-Because ``__array_finalize__`` is the only method that always sees new
-instances being created, it is the sensible place to fill in instance
-defaults for new object attributes, among other tasks.
-
-This may be clearer with an example.
-
-Simple example - adding an extra attribute to ndarray
------------------------------------------------------
-
-.. testcode::
-
- import numpy as np
-
- class InfoArray(np.ndarray):
-
- def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
- strides=None, order=None, info=None):
- # Create the ndarray instance of our type, given the usual
- # ndarray input arguments. This will call the standard
- # ndarray constructor, but return an object of our type.
- # It also triggers a call to InfoArray.__array_finalize__
- obj = super(InfoArray, subtype).__new__(subtype, shape, dtype,
- buffer, offset, strides,
- order)
- # set the new 'info' attribute to the value passed
- obj.info = info
- # Finally, we must return the newly created object:
- return obj
-
- def __array_finalize__(self, obj):
- # ``self`` is a new object resulting from
- # ndarray.__new__(InfoArray, ...), therefore it only has
- # attributes that the ndarray.__new__ constructor gave it -
- # i.e. those of a standard ndarray.
- #
- # We could have got to the ndarray.__new__ call in 3 ways:
- # From an explicit constructor - e.g. InfoArray():
- # obj is None
- # (we're in the middle of the InfoArray.__new__
- # constructor, and self.info will be set when we return to
- # InfoArray.__new__)
- if obj is None: return
- # From view casting - e.g arr.view(InfoArray):
- # obj is arr
- # (type(obj) can be InfoArray)
- # From new-from-template - e.g infoarr[:3]
- # type(obj) is InfoArray
- #
- # Note that it is here, rather than in the __new__ method,
- # that we set the default value for 'info', because this
- # method sees all creation of default objects - with the
- # InfoArray.__new__ constructor, but also with
- # arr.view(InfoArray).
- self.info = getattr(obj, 'info', None)
- # We do not need to return anything
-
-
-Using the object looks like this:
-
- >>> obj = InfoArray(shape=(3,)) # explicit constructor
- >>> type(obj)
- <class 'InfoArray'>
- >>> obj.info is None
- True
- >>> obj = InfoArray(shape=(3,), info='information')
- >>> obj.info
- 'information'
- >>> v = obj[1:] # new-from-template - here - slicing
- >>> type(v)
- <class 'InfoArray'>
- >>> v.info
- 'information'
- >>> arr = np.arange(10)
- >>> cast_arr = arr.view(InfoArray) # view casting
- >>> type(cast_arr)
- <class 'InfoArray'>
- >>> cast_arr.info is None
- True
-
-This class isn't very useful, because it has the same constructor as the
-bare ndarray object, including passing in buffers and shapes and so on.
-We would probably prefer the constructor to be able to take an already
-formed ndarray from the usual numpy calls to ``np.array`` and return an
-object.
-
-Slightly more realistic example - attribute added to existing array
--------------------------------------------------------------------
-
-Here is a class that takes a standard ndarray that already exists, casts
-as our type, and adds an extra attribute.
-
-.. testcode::
-
- import numpy as np
-
- class RealisticInfoArray(np.ndarray):
-
- def __new__(cls, input_array, info=None):
- # Input array is an already formed ndarray instance
- # We first cast to be our class type
- obj = np.asarray(input_array).view(cls)
- # add the new attribute to the created instance
- obj.info = info
- # Finally, we must return the newly created object:
- return obj
-
- def __array_finalize__(self, obj):
- # see InfoArray.__array_finalize__ for comments
- if obj is None: return
- self.info = getattr(obj, 'info', None)
-
-
-So:
-
- >>> arr = np.arange(5)
- >>> obj = RealisticInfoArray(arr, info='information')
- >>> type(obj)
- <class 'RealisticInfoArray'>
- >>> obj.info
- 'information'
- >>> v = obj[1:]
- >>> type(v)
- <class 'RealisticInfoArray'>
- >>> v.info
- 'information'
-
-.. _array-ufunc:
-
-``__array_ufunc__`` for ufuncs
-------------------------------
-
- .. versionadded:: 1.13
-
-A subclass can override what happens when executing numpy ufuncs on it by
-overriding the default ``ndarray.__array_ufunc__`` method. This method is
-executed *instead* of the ufunc and should return either the result of the
-operation, or :obj:`NotImplemented` if the operation requested is not
-implemented.
-
-The signature of ``__array_ufunc__`` is::
-
- def __array_ufunc__(ufunc, method, *inputs, **kwargs):
-
- - *ufunc* is the ufunc object that was called.
- - *method* is a string indicating how the Ufunc was called, either
- ``"__call__"`` to indicate it was called directly, or one of its
- :ref:`methods<ufuncs.methods>`: ``"reduce"``, ``"accumulate"``,
- ``"reduceat"``, ``"outer"``, or ``"at"``.
- - *inputs* is a tuple of the input arguments to the ``ufunc``
- - *kwargs* contains any optional or keyword arguments passed to the
- function. This includes any ``out`` arguments, which are always
- contained in a tuple.
-
-A typical implementation would convert any inputs or outputs that are
-instances of one's own class, pass everything on to a superclass using
-``super()``, and finally return the results after possible
-back-conversion. An example, taken from the test case
-``test_ufunc_override_with_super`` in ``core/tests/test_umath.py``, is the
-following.
-
-.. testcode::
-
- input numpy as np
-
- class A(np.ndarray):
- def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
- args = []
- in_no = []
- for i, input_ in enumerate(inputs):
- if isinstance(input_, A):
- in_no.append(i)
- args.append(input_.view(np.ndarray))
- else:
- args.append(input_)
-
- outputs = out
- out_no = []
- if outputs:
- out_args = []
- for j, output in enumerate(outputs):
- if isinstance(output, A):
- out_no.append(j)
- out_args.append(output.view(np.ndarray))
- else:
- out_args.append(output)
- kwargs['out'] = tuple(out_args)
- else:
- outputs = (None,) * ufunc.nout
-
- info = {}
- if in_no:
- info['inputs'] = in_no
- if out_no:
- info['outputs'] = out_no
-
- results = super(A, self).__array_ufunc__(ufunc, method,
- *args, **kwargs)
- if results is NotImplemented:
- return NotImplemented
-
- if method == 'at':
- if isinstance(inputs[0], A):
- inputs[0].info = info
- return
-
- if ufunc.nout == 1:
- results = (results,)
-
- results = tuple((np.asarray(result).view(A)
- if output is None else output)
- for result, output in zip(results, outputs))
- if results and isinstance(results[0], A):
- results[0].info = info
-
- return results[0] if len(results) == 1 else results
-
-So, this class does not actually do anything interesting: it just
-converts any instances of its own to regular ndarray (otherwise, we'd
-get infinite recursion!), and adds an ``info`` dictionary that tells
-which inputs and outputs it converted. Hence, e.g.,
-
->>> a = np.arange(5.).view(A)
->>> b = np.sin(a)
->>> b.info
-{'inputs': [0]}
->>> b = np.sin(np.arange(5.), out=(a,))
->>> b.info
-{'outputs': [0]}
->>> a = np.arange(5.).view(A)
->>> b = np.ones(1).view(A)
->>> c = a + b
->>> c.info
-{'inputs': [0, 1]}
->>> a += b
->>> a.info
-{'inputs': [0, 1], 'outputs': [0]}
-
-Note that another approach would be to to use ``getattr(ufunc,
-methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example,
-the result would be identical, but there is a difference if another operand
-also defines ``__array_ufunc__``. E.g., lets assume that we evalulate
-``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has
-an override. If you use ``super`` as in the example,
-``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which
-means it cannot evaluate the result itself. Thus, it will return
-`NotImplemented` and so will our class ``A``. Then, control will be passed
-over to ``b``, which either knows how to deal with us and produces a result,
-or does not and returns `NotImplemented`, raising a ``TypeError``.
-
-If instead, we replace our ``super`` call with ``getattr(ufunc, method)``, we
-effectively do ``np.add(a.view(np.ndarray), b)``. Again, ``B.__array_ufunc__``
-will be called, but now it sees an ``ndarray`` as the other argument. Likely,
-it will know how to handle this, and return a new instance of the ``B`` class
-to us. Our example class is not set up to handle this, but it might well be
-the best approach if, e.g., one were to re-implement ``MaskedArray`` using
-``__array_ufunc__``.
-
-As a final note: if the ``super`` route is suited to a given class, an
-advantage of using it is that it helps in constructing class hierarchies.
-E.g., suppose that our other class ``B`` also used the ``super`` in its
-``__array_ufunc__`` implementation, and we created a class ``C`` that depended
-on both, i.e., ``class C(A, B)`` (with, for simplicity, not another
-``__array_ufunc__`` override). Then any ufunc on an instance of ``C`` would
-pass on to ``A.__array_ufunc__``, the ``super`` call in ``A`` would go to
-``B.__array_ufunc__``, and the ``super`` call in ``B`` would go to
-``ndarray.__array_ufunc__``, thus allowing ``A`` and ``B`` to collaborate.
-
-.. _array-wrap:
-
-``__array_wrap__`` for ufuncs and other functions
--------------------------------------------------
-
-Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using
-``__array_wrap__`` and ``__array_prepare__``. These two allowed one to
-change the output type of a ufunc, but, in contrast to
-``__array_ufunc__``, did not allow one to make any changes to the inputs.
-It is hoped to eventually deprecate these, but ``__array_wrap__`` is also
-used by other numpy functions and methods, such as ``squeeze``, so at the
-present time is still needed for full functionality.
-
-Conceptually, ``__array_wrap__`` "wraps up the action" in the sense of
-allowing a subclass to set the type of the return value and update
-attributes and metadata. Let's show how this works with an example. First
-we return to the simpler example subclass, but with a different name and
-some print statements:
-
-.. testcode::
-
- import numpy as np
-
- class MySubClass(np.ndarray):
-
- def __new__(cls, input_array, info=None):
- obj = np.asarray(input_array).view(cls)
- obj.info = info
- return obj
-
- def __array_finalize__(self, obj):
- print('In __array_finalize__:')
- print(' self is %s' % repr(self))
- print(' obj is %s' % repr(obj))
- if obj is None: return
- self.info = getattr(obj, 'info', None)
-
- def __array_wrap__(self, out_arr, context=None):
- print('In __array_wrap__:')
- print(' self is %s' % repr(self))
- print(' arr is %s' % repr(out_arr))
- # then just call the parent
- return super(MySubClass, self).__array_wrap__(self, out_arr, context)
-
-We run a ufunc on an instance of our new array:
-
->>> obj = MySubClass(np.arange(5), info='spam')
-In __array_finalize__:
- self is MySubClass([0, 1, 2, 3, 4])
- obj is array([0, 1, 2, 3, 4])
->>> arr2 = np.arange(5)+1
->>> ret = np.add(arr2, obj)
-In __array_wrap__:
- self is MySubClass([0, 1, 2, 3, 4])
- arr is array([1, 3, 5, 7, 9])
-In __array_finalize__:
- self is MySubClass([1, 3, 5, 7, 9])
- obj is MySubClass([0, 1, 2, 3, 4])
->>> ret
-MySubClass([1, 3, 5, 7, 9])
->>> ret.info
-'spam'
-
-Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method
-with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result
-of the addition. In turn, the default ``__array_wrap__``
-(``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``,
-and called ``__array_finalize__`` - hence the copying of the ``info``
-attribute. This has all happened at the C level.
-
-But, we could do anything we wanted:
-
-.. testcode::
-
- class SillySubClass(np.ndarray):
-
- def __array_wrap__(self, arr, context=None):
- return 'I lost your data'
-
->>> arr1 = np.arange(5)
->>> obj = arr1.view(SillySubClass)
->>> arr2 = np.arange(5)
->>> ret = np.multiply(obj, arr2)
->>> ret
-'I lost your data'
-
-So, by defining a specific ``__array_wrap__`` method for our subclass,
-we can tweak the output from ufuncs. The ``__array_wrap__`` method
-requires ``self``, then an argument - which is the result of the ufunc -
-and an optional parameter *context*. This parameter is returned by
-ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc,
-domain of the ufunc), but is not set by other numpy functions. Though,
-as seen above, it is possible to do otherwise, ``__array_wrap__`` should
-return an instance of its containing class. See the masked array
-subclass for an implementation.
-
-In addition to ``__array_wrap__``, which is called on the way out of the
-ufunc, there is also an ``__array_prepare__`` method which is called on
-the way into the ufunc, after the output arrays are created but before any
-computation has been performed. The default implementation does nothing
-but pass through the array. ``__array_prepare__`` should not attempt to
-access the array data or resize the array, it is intended for setting the
-output array type, updating attributes and metadata, and performing any
-checks based on the input that may be desired before computation begins.
-Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or
-subclass thereof or raise an error.
-
-Extra gotchas - custom ``__del__`` methods and ndarray.base
------------------------------------------------------------
-
-One of the problems that ndarray solves is keeping track of memory
-ownership of ndarrays and their views. Consider the case where we have
-created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``.
-The two objects are looking at the same memory. NumPy keeps track of
-where the data came from for a particular array or view, with the
-``base`` attribute:
-
->>> # A normal ndarray, that owns its own data
->>> arr = np.zeros((4,))
->>> # In this case, base is None
->>> arr.base is None
-True
->>> # We take a view
->>> v1 = arr[1:]
->>> # base now points to the array that it derived from
->>> v1.base is arr
-True
->>> # Take a view of a view
->>> v2 = v1[1:]
->>> # base points to the view it derived from
->>> v2.base is v1
-True
-
-In general, if the array owns its own memory, as for ``arr`` in this
-case, then ``arr.base`` will be None - there are some exceptions to this
-- see the numpy book for more details.
-
-The ``base`` attribute is useful in being able to tell whether we have
-a view or the original array. This in turn can be useful if we need
-to know whether or not to do some specific cleanup when the subclassed
-array is deleted. For example, we may only want to do the cleanup if
-the original array is deleted, but not the views. For an example of
-how this can work, have a look at the ``memmap`` class in
-``numpy.core``.
-
-Subclassing and Downstream Compatibility
-----------------------------------------
-
-When sub-classing ``ndarray`` or creating duck-types that mimic the ``ndarray``
-interface, it is your responsibility to decide how aligned your APIs will be
-with those of numpy. For convenience, many numpy functions that have a corresponding
-``ndarray`` method (e.g., ``sum``, ``mean``, ``take``, ``reshape``) work by checking
-if the first argument to a function has a method of the same name. If it exists, the
-method is called instead of coercing the arguments to a numpy array.
-
-For example, if you want your sub-class or duck-type to be compatible with
-numpy's ``sum`` function, the method signature for this object's ``sum`` method
-should be the following:
-
-.. testcode::
-
- def sum(self, axis=None, dtype=None, out=None, keepdims=False):
- ...
-
-This is the exact same method signature for ``np.sum``, so now if a user calls
-``np.sum`` on this object, numpy will call the object's own ``sum`` method and
-pass in these arguments enumerated above in the signature, and no errors will
-be raised because the signatures are completely compatible with each other.
-
-If, however, you decide to deviate from this signature and do something like this:
-
-.. testcode::
-
- def sum(self, axis=None, dtype=None):
- ...
-
-This object is no longer compatible with ``np.sum`` because if you call ``np.sum``,
-it will pass in unexpected arguments ``out`` and ``keepdims``, causing a TypeError
-to be raised.
-
-If you wish to maintain compatibility with numpy and its subsequent versions (which
-might add new keyword arguments) but do not want to surface all of numpy's arguments,
-your function's signature should accept ``**kwargs``. For example:
-
-.. testcode::
-
- def sum(self, axis=None, dtype=None, **unused_kwargs):
- ...
-
-This object is now compatible with ``np.sum`` again because any extraneous arguments
-(i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the
-``**unused_kwargs`` parameter.
-
-"""
diff --git a/numpy/emath.pyi b/numpy/emath.pyi
new file mode 100644
index 000000000..032ec9505
--- /dev/null
+++ b/numpy/emath.pyi
@@ -0,0 +1,11 @@
+from typing import Any
+
+sqrt: Any
+log: Any
+log2: Any
+logn: Any
+log10: Any
+power: Any
+arccos: Any
+arcsin: Any
+arctanh: Any
diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi
new file mode 100644
index 000000000..602517957
--- /dev/null
+++ b/numpy/f2py/__init__.pyi
@@ -0,0 +1,5 @@
+from typing import Any
+
+run_main: Any
+compile: Any
+f2py_testing: Any
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index ccbc9b0fb..35f77eec2 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -286,11 +286,11 @@ static int f2py_size(PyArrayObject* var, ...)
"""
cppmacros[
- 'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyInt_FromLong(v))'
+ 'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyLong_FromLong(v))'
cppmacros[
- 'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyInt_FromLong(v))'
+ 'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyLong_FromLong(v))'
needs['pyobj_from_int1'] = ['signed_char']
-cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyInt_FromLong(v))'
+cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyLong_FromLong(v))'
cppmacros[
'pyobj_from_long1'] = '#define pyobj_from_long1(v) (PyLong_FromLong(v))'
needs['pyobj_from_long_long1'] = ['long_long']
@@ -320,10 +320,10 @@ cppmacros[
'pyobj_from_complex_float1'] = '#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_string1'] = ['string']
cppmacros[
- 'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyString_FromString((char *)v))'
+ 'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))'
needs['pyobj_from_string1size'] = ['string']
cppmacros[
- 'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUString_FromStringAndSize((char *)v, len))'
+ 'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUnicode_FromStringAndSize((char *)v, len))'
needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR']
cppmacros['TRYPYARRAYTEMPLATE'] = """\
/* New SciPy */
@@ -436,9 +436,9 @@ cppmacros['GETSTRFROMPYTUPLE'] = """\
PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
if (rv_cb_str == NULL)\\
goto capi_fail;\\
- if (PyString_Check(rv_cb_str)) {\\
+ if (PyBytes_Check(rv_cb_str)) {\\
str[len-1]='\\0';\\
- STRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\
+ STRINGCOPYN((str),PyString_AS_STRING((PyBytesObject*)rv_cb_str),(len));\\
} else {\\
PRINTPYOBJERR(rv_cb_str);\\
PyErr_SetString(#modulename#_error,\"string object expected\");\\
@@ -655,7 +655,7 @@ fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(cha
STRINGCOPYN(*str,PyArray_DATA(arr),*len+1);
return 1;
}
- if (PyString_Check(obj)) {
+ if (PyBytes_Check(obj)) {
tmp = obj;
Py_INCREF(tmp);
}
@@ -730,7 +730,7 @@ static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) {
*v = (int)PyInt_AS_LONG(obj);
return 1;
}
- tmp = PyNumber_Int(obj);
+ tmp = PyNumber_Long(obj);
if (tmp) {
*v = PyInt_AS_LONG(tmp);
Py_DECREF(tmp);
@@ -738,7 +738,7 @@ static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) {
}
if (PyComplex_Check(obj))
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
/*pass*/;
else if (PySequence_Check(obj))
tmp = PySequence_GetItem(obj,0);
@@ -762,7 +762,7 @@ static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) {
*v = PyInt_AS_LONG(obj);
return 1;
}
- tmp = PyNumber_Int(obj);
+ tmp = PyNumber_Long(obj);
if (tmp) {
*v = PyInt_AS_LONG(tmp);
Py_DECREF(tmp);
@@ -770,7 +770,7 @@ static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) {
}
if (PyComplex_Check(obj))
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
/*pass*/;
else if (PySequence_Check(obj))
tmp = PySequence_GetItem(obj,0);
@@ -807,7 +807,7 @@ static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess)
}
if (PyComplex_Check(obj))
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
/*pass*/;
else if (PySequence_Check(obj))
tmp = PySequence_GetItem(obj,0);
@@ -868,7 +868,7 @@ static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) {
}
if (PyComplex_Check(obj))
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
/*pass*/;
else if (PySequence_Check(obj))
tmp = PySequence_GetItem(obj,0);
@@ -978,7 +978,7 @@ static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char
(*v).r = PyLong_AsDouble(obj);
return (!PyErr_Occurred());
}
- if (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) {
+ if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) {
PyObject *tmp = PySequence_GetItem(obj,0);
if (tmp) {
if (complex_double_from_pyobj(v,tmp,errmess)) {
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 56f2033ff..a14f60194 100755
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -202,7 +202,7 @@ PyMODINIT_FUNC PyInit_#modulename#(void) {
\tif (PyErr_Occurred())
\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;}
\td = PyModule_GetDict(m);
-\ts = PyString_FromString(\"$R""" + """evision: $\");
+\ts = PyUnicode_FromString(\"$R""" + """evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
\tPy_DECREF(s);
\ts = PyUnicode_FromString(
diff --git a/numpy/f2py/src/test/foomodule.c b/numpy/f2py/src/test/foomodule.c
index caf3590d4..88ec62440 100644
--- a/numpy/f2py/src/test/foomodule.c
+++ b/numpy/f2py/src/test/foomodule.c
@@ -121,7 +121,7 @@ void initfoo() {
m = Py_InitModule("foo", foo_module_methods);
d = PyModule_GetDict(m);
- s = PyString_FromString("This module 'foo' demonstrates the usage of fortranobject.");
+ s = PyUnicode_FromString("This module 'foo' demonstrates the usage of fortranobject.");
PyDict_SetItemString(d, "__doc__", s);
/* Fortran objects: */
diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
index 0db33e714..0411b62e0 100644
--- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
+++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
@@ -1,14 +1,9 @@
-/* File: wrapmodule.c
- * This file is auto-generated with f2py (version:2_1330).
- * Hand edited by Pearu.
- * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
- * written by Pearu Peterson <pearu@cens.ioc.ee>.
- * See http://cens.ioc.ee/projects/f2py2e/
- * Generation date: Fri Oct 21 22:41:12 2005
- * $Revision:$
- * $Date:$
- * Do not edit this file directly unless you know what you are doing!!!
+/*
+ * This file was auto-generated with f2py (version:2_1330) and hand edited by
+ * Pearu for testing purposes. Do not edit this file unless you know what you
+ * are doing!!!
*/
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -55,7 +50,7 @@ static PyObject *f2py_rout_wrap_call(PyObject *capi_self,
if (tmp == NULL) {
goto fail;
}
- dims[i] = (npy_intp)PyInt_AsLong(tmp);
+ dims[i] = (npy_intp)PyLong_AsLong(tmp);
Py_DECREF(tmp);
if (dims[i] == -1 && PyErr_Occurred()) {
goto fail;
@@ -107,8 +102,8 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self,
dimensions = PyTuple_New(PyArray_NDIM(arr));
strides = PyTuple_New(PyArray_NDIM(arr));
for (i=0;i<PyArray_NDIM(arr);++i) {
- PyTuple_SetItem(dimensions,i,PyInt_FromLong(PyArray_DIM(arr,i)));
- PyTuple_SetItem(strides,i,PyInt_FromLong(PyArray_STRIDE(arr,i)));
+ PyTuple_SetItem(dimensions,i,PyLong_FromLong(PyArray_DIM(arr,i)));
+ PyTuple_SetItem(strides,i,PyLong_FromLong(PyArray_STRIDE(arr,i)));
}
return Py_BuildValue("siNNO(cciii)ii",s,PyArray_NDIM(arr),
dimensions,strides,
@@ -149,15 +144,15 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) {
if (PyErr_Occurred())
Py_FatalError("can't initialize module wrap (failed to import numpy)");
d = PyModule_GetDict(m);
- s = PyString_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n"
-" arr = call(type_num,dims,intent,obj)\n"
-".");
+ s = PyUnicode_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n"
+ " arr = call(type_num,dims,intent,obj)\n"
+ ".");
PyDict_SetItemString(d, "__doc__", s);
wrap_error = PyErr_NewException ("wrap.error", NULL, NULL);
Py_DECREF(s);
#define ADDCONST(NAME, CONST) \
- s = PyInt_FromLong(CONST); \
+ s = PyLong_FromLong(CONST); \
PyDict_SetItemString(d, NAME, s); \
Py_DECREF(s)
diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi
new file mode 100644
index 000000000..45190517f
--- /dev/null
+++ b/numpy/fft/__init__.pyi
@@ -0,0 +1,20 @@
+from typing import Any
+
+fft: Any
+ifft: Any
+rfft: Any
+irfft: Any
+hfft: Any
+ihfft: Any
+rfftn: Any
+irfftn: Any
+rfft2: Any
+irfft2: Any
+fft2: Any
+ifft2: Any
+fftn: Any
+ifftn: Any
+fftshift: Any
+ifftshift: Any
+fftfreq: Any
+rfftfreq: Any
diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py
index 3dacd9ee1..927ee1af1 100644
--- a/numpy/fft/helper.py
+++ b/numpy/fft/helper.py
@@ -2,7 +2,6 @@
Discrete Fourier Transforms - helper.py
"""
-from numpy.compat import integer_types
from numpy.core import integer, empty, arange, asarray, roll
from numpy.core.overrides import array_function_dispatch, set_module
@@ -10,7 +9,7 @@ from numpy.core.overrides import array_function_dispatch, set_module
__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
-integer_types = integer_types + (integer,)
+integer_types = (int, integer)
def _fftshift_dispatcher(x, axes=None):
diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py
index 68f5990af..3fb700bb3 100644
--- a/numpy/fft/tests/test_helper.py
+++ b/numpy/fft/tests/test_helper.py
@@ -85,7 +85,6 @@ class TestFFTShift:
def test_equal_to_original(self):
""" Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """
- from numpy.compat import integer_types
from numpy.core import asarray, concatenate, arange, take
def original_fftshift(x, axes=None):
@@ -94,7 +93,7 @@ class TestFFTShift:
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
- elif isinstance(axes, integer_types):
+ elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
@@ -110,7 +109,7 @@ class TestFFTShift:
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
- elif isinstance(axes, integer_types):
+ elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi
new file mode 100644
index 000000000..413e2ae1b
--- /dev/null
+++ b/numpy/lib/__init__.pyi
@@ -0,0 +1,177 @@
+from typing import Any
+
+emath: Any
+math: Any
+tracemalloc_domain: Any
+Arrayterator: Any
+iscomplexobj: Any
+isrealobj: Any
+imag: Any
+iscomplex: Any
+isreal: Any
+nan_to_num: Any
+real: Any
+real_if_close: Any
+typename: Any
+asfarray: Any
+mintypecode: Any
+asscalar: Any
+common_type: Any
+ravel_multi_index: Any
+unravel_index: Any
+mgrid: Any
+ogrid: Any
+r_: Any
+c_: Any
+s_: Any
+index_exp: Any
+ix_: Any
+ndenumerate: Any
+ndindex: Any
+fill_diagonal: Any
+diag_indices: Any
+diag_indices_from: Any
+select: Any
+piecewise: Any
+trim_zeros: Any
+copy: Any
+iterable: Any
+percentile: Any
+diff: Any
+gradient: Any
+angle: Any
+unwrap: Any
+sort_complex: Any
+disp: Any
+flip: Any
+rot90: Any
+extract: Any
+place: Any
+vectorize: Any
+asarray_chkfinite: Any
+average: Any
+bincount: Any
+digitize: Any
+cov: Any
+corrcoef: Any
+msort: Any
+median: Any
+sinc: Any
+hamming: Any
+hanning: Any
+bartlett: Any
+blackman: Any
+kaiser: Any
+trapz: Any
+i0: Any
+add_newdoc: Any
+add_docstring: Any
+meshgrid: Any
+delete: Any
+insert: Any
+append: Any
+interp: Any
+add_newdoc_ufunc: Any
+quantile: Any
+column_stack: Any
+row_stack: Any
+dstack: Any
+array_split: Any
+split: Any
+hsplit: Any
+vsplit: Any
+dsplit: Any
+apply_over_axes: Any
+expand_dims: Any
+apply_along_axis: Any
+kron: Any
+tile: Any
+get_array_wrap: Any
+take_along_axis: Any
+put_along_axis: Any
+broadcast_to: Any
+broadcast_arrays: Any
+diag: Any
+diagflat: Any
+eye: Any
+fliplr: Any
+flipud: Any
+tri: Any
+triu: Any
+tril: Any
+vander: Any
+histogram2d: Any
+mask_indices: Any
+tril_indices: Any
+tril_indices_from: Any
+triu_indices: Any
+triu_indices_from: Any
+fix: Any
+isneginf: Any
+isposinf: Any
+pad: Any
+poly: Any
+roots: Any
+polyint: Any
+polyder: Any
+polyadd: Any
+polysub: Any
+polymul: Any
+polydiv: Any
+polyval: Any
+poly1d: Any
+polyfit: Any
+RankWarning: Any
+issubclass_: Any
+issubsctype: Any
+issubdtype: Any
+deprecate: Any
+deprecate_with_doc: Any
+get_include: Any
+info: Any
+source: Any
+who: Any
+lookfor: Any
+byte_bounds: Any
+safe_eval: Any
+ediff1d: Any
+intersect1d: Any
+setxor1d: Any
+union1d: Any
+setdiff1d: Any
+unique: Any
+in1d: Any
+isin: Any
+savetxt: Any
+loadtxt: Any
+genfromtxt: Any
+ndfromtxt: Any
+mafromtxt: Any
+recfromtxt: Any
+recfromcsv: Any
+load: Any
+loads: Any
+save: Any
+savez: Any
+savez_compressed: Any
+packbits: Any
+unpackbits: Any
+fromregex: Any
+DataSource: Any
+nansum: Any
+nanmax: Any
+nanmin: Any
+nanargmax: Any
+nanargmin: Any
+nanmean: Any
+nanmedian: Any
+nanpercentile: Any
+nanvar: Any
+nanstd: Any
+nanprod: Any
+nancumsum: Any
+nancumprod: Any
+nanquantile: Any
+histogram: Any
+histogramdd: Any
+histogram_bin_edges: Any
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index 7560bf4da..f5368526d 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -5,7 +5,7 @@ __docformat__ = "restructuredtext en"
import numpy as np
import numpy.core.numeric as nx
-from numpy.compat import asbytes, asunicode, bytes
+from numpy.compat import asbytes, asunicode
def _decode_line(line, encoding=None):
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index df9a110c5..6a2ad004c 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -278,7 +278,7 @@ def unique(ar, return_index=False, return_inverse=False,
ar = np.moveaxis(ar, axis, 0)
except np.AxisError:
# this removes the "axis1" or "axis2" prefix from the error message
- raise np.AxisError(axis, ar.ndim)
+ raise np.AxisError(axis, ar.ndim) from None
# Must reshape to a contiguous 2D array for this to work...
orig_shape, orig_dtype = ar.shape, ar.dtype
@@ -300,10 +300,10 @@ def unique(ar, return_index=False, return_inverse=False,
# array with shape `(len(ar),)`. Because `dtype` in this case has
# itemsize 0, the total size of the result is still 0 bytes.
consolidated = np.empty(len(ar), dtype=dtype)
- except TypeError:
+ except TypeError as e:
# There's no good way to do this for object arrays, etc...
msg = 'The axis argument to unique is not supported for dtype {dt}'
- raise TypeError(msg.format(dt=ar.dtype))
+ raise TypeError(msg.format(dt=ar.dtype)) from e
def reshape_uniq(uniq):
n = len(uniq)
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 556227c0d..c43b2fb53 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1625,63 +1625,7 @@ def trim_zeros(filt, trim='fb'):
[1, 2]
"""
- try:
- return _trim_zeros_new(filt, trim)
- except Exception as ex:
- # Numpy 1.20.0, 2020-07-31
- warning = DeprecationWarning(
- "in the future trim_zeros will require a 1-D array as input "
- "that supports elementwise comparisons with zero"
- )
- warning.__cause__ = ex
-
- # Fall back to the old implementation if an exception is encountered
- # Note that the same exception may or may not be raised here as well
- ret = _trim_zeros_old(filt, trim)
- warnings.warn(warning, stacklevel=3)
- return ret
-
-
-def _trim_zeros_new(filt, trim='fb'):
- """Newer optimized implementation of ``trim_zeros()``."""
- arr_any = np.asanyarray(filt)
- arr = arr_any != 0 if arr_any.dtype != bool else arr_any
- if arr is False:
- # not all dtypes support elementwise comparisons with `0` (e.g. str);
- # they will return `False` instead
- raise TypeError('elementwise comparison failed; unsupported data type')
- elif arr.ndim != 1:
- raise ValueError('trim_zeros requires an array of exactly one dimension')
- elif not len(arr):
- return filt
-
- trim_upper = trim.upper()
- first = last = None
-
- if 'F' in trim_upper:
- first = arr.argmax()
- # If `arr[first] is False` then so are all other elements
- if not arr[first]:
- return filt[:0]
-
- if 'B' in trim_upper:
- last = len(arr) - arr[::-1].argmax()
- # If `arr[last - 1] is False` then so are all other elements
- if not arr[last - 1]:
- return filt[:0]
-
- return filt[first:last]
-
-
-def _trim_zeros_old(filt, trim='fb'):
- """
- Older unoptimized implementation of ``trim_zeros()``.
-
- Used as fallback in case an exception is encountered
- in ``_trim_zeros_new()``.
-
- """
first = 0
trim = trim.upper()
if 'F' in trim:
@@ -1991,8 +1935,8 @@ class vectorize:
.. versionadded:: 1.7.0
cache : bool, optional
- If `True`, then cache the first function call that determines the number
- of outputs if `otypes` is not provided.
+ If `True`, then cache the first function call that determines the number
+ of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
@@ -2789,8 +2733,8 @@ def blackman(M):
return array([])
if M == 1:
return ones(1, float)
- n = arange(0, M)
- return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
+ n = arange(1-M, M, 2)
+ return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1))
@set_module('numpy')
@@ -2898,8 +2842,8 @@ def bartlett(M):
return array([])
if M == 1:
return ones(1, float)
- n = arange(0, M)
- return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
+ n = arange(1-M, M, 2)
+ return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1))
@set_module('numpy')
@@ -3002,8 +2946,8 @@ def hanning(M):
return array([])
if M == 1:
return ones(1, float)
- n = arange(0, M)
- return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
+ n = arange(1-M, M, 2)
+ return 0.5 + 0.5*cos(pi*n/(M-1))
@set_module('numpy')
@@ -3102,8 +3046,8 @@ def hamming(M):
return array([])
if M == 1:
return ones(1, float)
- n = arange(0, M)
- return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
+ n = arange(1-M, M, 2)
+ return 0.54 + 0.46*cos(pi*n/(M-1))
## Code from cephes for i0
@@ -4285,10 +4229,9 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
See Also
--------
- index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
- using indexing notation.
- index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
- using indexing notation.
+ mgrid : Construct a multi-dimensional "meshgrid" using indexing notation.
+ ogrid : Construct an open multi-dimensional "meshgrid" using indexing
+ notation.
Examples
--------
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 6093f7e9d..9d3de69dd 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -1,6 +1,7 @@
import functools
import sys
import math
+import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
@@ -659,7 +660,15 @@ class ndindex:
Increment the multi-dimensional index by one.
This method is for backward compatibility only: do not use.
+
+ .. deprecated:: 1.20.0
+ This method has been advised against since numpy 1.8.0, but only
+ started emitting DeprecationWarning as of this version.
"""
+ # NumPy 1.20.0, 2020-09-08
+ warnings.warn(
+ "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead",
+ DeprecationWarning, stacklevel=2)
next(self)
def __next__(self):
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 003550432..409016adb 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -244,7 +244,7 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
- `ufuncs-output-type` for more details.
+ :ref:`ufuncs-output-type` for more details.
.. versionadded:: 1.8.0
keepdims : bool, optional
@@ -359,7 +359,7 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
- `ufuncs-output-type` for more details.
+ :ref:`ufuncs-output-type` for more details.
.. versionadded:: 1.8.0
keepdims : bool, optional
@@ -584,7 +584,7 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
- `ufuncs-output-type` for more details. The casting of NaN to integer
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
can yield unexpected results.
.. versionadded:: 1.8.0
@@ -681,7 +681,7 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
- `ufuncs-output-type` for more details. The casting of NaN to integer
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
can yield unexpected results.
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
@@ -749,7 +749,7 @@ def nancumsum(a, axis=None, dtype=None, out=None):
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
- but the type will be cast if necessary. See `ufuncs-output-type` for
+ but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
more details.
Returns
@@ -888,7 +888,7 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
- `ufuncs-output-type` for more details.
+ :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
@@ -1256,7 +1256,7 @@ def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
Compute the qth quantile of the data along the specified axis,
while ignoring nan values.
Returns the qth quantile(s) of the array elements.
-
+
.. versionadded:: 1.15.0
Parameters
@@ -1472,7 +1472,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
mean : Average
var : Variance while not ignoring NaNs
nanstd, nanmean
- ufuncs-output-type
+ :ref:`ufuncs-output-type`
Notes
-----
@@ -1624,7 +1624,7 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
--------
var, mean, std
nanvar, nanmean
- ufuncs-output-type
+ :ref:`ufuncs-output-type`
Notes
-----
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 58affc2fc..90e16643c 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -14,7 +14,7 @@ from . import format
from ._datasource import DataSource
from numpy.core import overrides
from numpy.core.multiarray import packbits, unpackbits
-from numpy.core.overrides import set_module
+from numpy.core.overrides import set_array_function_like_doc, set_module
from numpy.core._internal import recursive
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
@@ -23,7 +23,7 @@ from ._iotools import (
)
from numpy.compat import (
- asbytes, asstr, asunicode, bytes, os_fspath, os_PathLike,
+ asbytes, asstr, asunicode, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
@@ -712,44 +712,14 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
zipf = zipfile_factory(file, mode="w", compression=compression)
- if sys.version_info >= (3, 6):
- # Since Python 3.6 it is possible to write directly to a ZIP file.
- for key, val in namedict.items():
- fname = key + '.npy'
- val = np.asanyarray(val)
- # always force zip64, gh-10776
- with zipf.open(fname, 'w', force_zip64=True) as fid:
- format.write_array(fid, val,
- allow_pickle=allow_pickle,
- pickle_kwargs=pickle_kwargs)
- else:
- # Stage arrays in a temporary file on disk, before writing to zip.
-
- # Import deferred for startup time improvement
- import tempfile
- # Since target file might be big enough to exceed capacity of a global
- # temporary directory, create temp file side-by-side with the target file.
- file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
- fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
- os.close(fd)
- try:
- for key, val in namedict.items():
- fname = key + '.npy'
- fid = open(tmpfile, 'wb')
- try:
- format.write_array(fid, np.asanyarray(val),
- allow_pickle=allow_pickle,
- pickle_kwargs=pickle_kwargs)
- fid.close()
- fid = None
- zipf.write(tmpfile, arcname=fname)
- except IOError as exc:
- raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
- finally:
- if fid:
- fid.close()
- finally:
- os.remove(tmpfile)
+ for key, val in namedict.items():
+ fname = key + '.npy'
+ val = np.asanyarray(val)
+ # always force zip64, gh-10776
+ with zipf.open(fname, 'w', force_zip64=True) as fid:
+ format.write_array(fid, val,
+ allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs)
zipf.close()
@@ -790,10 +760,17 @@ def _getconv(dtype):
_loadtxt_chunksize = 50000
+def _loadtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None,
+ converters=None, skiprows=None, usecols=None, unpack=None,
+ ndmin=None, encoding=None, max_rows=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
- ndmin=0, encoding='bytes', max_rows=None):
+ ndmin=0, encoding='bytes', max_rows=None, *, like=None):
r"""
Load data from a text file.
@@ -838,8 +815,9 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
fourth column the same way as ``usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
- unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
- data-type, arrays are returned for each field. Default is False.
+ unpacked using ``x, y, z = loadtxt(...)``. When used with a
+ structured data-type, arrays are returned for each field.
+ Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
@@ -860,6 +838,9 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
is to read all the lines.
.. versionadded:: 1.16.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -917,6 +898,14 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
[-17.57, 63.94]])
"""
+ if like is not None:
+ return _loadtxt_with_like(
+ fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ converters=converters, skiprows=skiprows, usecols=usecols,
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
+ max_rows=max_rows, like=like
+ )
+
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Nested functions used by loadtxt.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -1201,6 +1190,11 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
return X
+_loadtxt_with_like = array_function_dispatch(
+ _loadtxt_dispatcher
+)(loadtxt)
+
+
def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
header=None, footer=None, comments=None,
encoding=None):
@@ -1497,7 +1491,7 @@ def fromregex(file, regexp, dtype, encoding=None):
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
- `doc.structured_arrays`.
+ `basics.rec`.
Examples
--------
@@ -1554,6 +1548,18 @@ def fromregex(file, regexp, dtype, encoding=None):
#####--------------------------------------------------------------------------
+def _genfromtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None,
+ skip_header=None, skip_footer=None, converters=None,
+ missing_values=None, filling_values=None, usecols=None,
+ names=None, excludelist=None, deletechars=None,
+ replace_space=None, autostrip=None, case_sensitive=None,
+ defaultfmt=None, unpack=None, usemask=None, loose=None,
+ invalid_raise=None, max_rows=None, encoding=None, *,
+ like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
@@ -1562,7 +1568,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
- invalid_raise=True, max_rows=None, encoding='bytes'):
+ invalid_raise=True, max_rows=None, encoding='bytes', *,
+ like=None):
"""
Load data from a text file, with missing values handled as specified.
@@ -1634,7 +1641,9 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
- unpacked using ``x, y, z = loadtxt(...)``
+ unpacked using ``x, y, z = genfromtxt(...)``. When used with a
+ structured data-type, arrays are returned for each field.
+ Default is False.
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
@@ -1659,6 +1668,9 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
to None the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1737,6 +1749,21 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
dtype=[('f0', 'S12'), ('f1', 'S12')])
"""
+
+ if like is not None:
+ return _genfromtxt_with_like(
+ fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ skip_header=skip_header, skip_footer=skip_footer,
+ converters=converters, missing_values=missing_values,
+ filling_values=filling_values, usecols=usecols, names=names,
+ excludelist=excludelist, deletechars=deletechars,
+ replace_space=replace_space, autostrip=autostrip,
+ case_sensitive=case_sensitive, defaultfmt=defaultfmt,
+ unpack=unpack, usemask=usemask, loose=loose,
+ invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
+ like=like
+ )
+
if max_rows is not None:
if skip_footer:
raise ValueError(
@@ -2245,9 +2272,23 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
+ output = np.squeeze(output)
if unpack:
- return output.squeeze().T
- return output.squeeze()
+ if names is None:
+ return output.T
+ elif len(names) == 1:
+ # squeeze single-name dtypes too
+ return output[names[0]]
+ else:
+ # For structured arrays with multiple fields,
+ # return an array for each field.
+ return [output[field] for field in names]
+ return output
+
+
+_genfromtxt_with_like = array_function_dispatch(
+ _genfromtxt_dispatcher
+)(genfromtxt)
def ndfromtxt(fname, **kwargs):
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index 555a3d5a8..2b0d38c37 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -14,6 +14,22 @@ module provide the mathematically valid answers in the complex plane::
Similarly, `sqrt`, other base logarithms, `power` and trig functions are
correctly handled. See their respective docstrings for specific examples.
+Functions
+---------
+
+.. autosummary::
+ :toctree: generated/
+
+ sqrt
+ log
+ log2
+ logn
+ log10
+ power
+ arccos
+ arcsin
+ arctanh
+
"""
import numpy.core.numeric as nx
import numpy.core.numerictypes as nt
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 127338975..cbc4641d8 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -870,7 +870,7 @@ def split(ary, indices_or_sections, axis=0):
N = ary.shape[axis]
if N % sections:
raise ValueError(
- 'array split does not result in an equal division')
+ 'array split does not result in an equal division') from None
return array_split(ary, indices_or_sections, axis)
diff --git a/numpy/lib/tests/test_financial_expired.py b/numpy/lib/tests/test_financial_expired.py
index e1d05da0c..66bb08026 100644
--- a/numpy/lib/tests/test_financial_expired.py
+++ b/numpy/lib/tests/test_financial_expired.py
@@ -3,10 +3,11 @@ import pytest
import numpy as np
+@pytest.mark.skipif(sys.version_info[:2] < (3, 7),
+ reason="requires python 3.7 or higher")
def test_financial_expired():
- if sys.version_info[:2] >= (3, 7):
- match = 'NEP 32'
- else:
- match = None
- with pytest.raises(AttributeError, match=match):
- np.fv
+ match = 'NEP 32'
+ with pytest.warns(RuntimeWarning, match=match):
+ func = np.fv
+ with pytest.raises(RuntimeError, match=match):
+ func(1, 2, 3)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 34a395ee4..7bddb941c 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1225,6 +1225,10 @@ class TestTrimZeros:
assert_array_equal(arr, res)
+ def test_list_to_list(self):
+ res = trim_zeros(self.a.tolist())
+ assert isinstance(res, list)
+
class TestExtins:
def test_basic(self):
@@ -1801,28 +1805,28 @@ class TestFilterwindows:
def test_hanning(self):
# check symmetry
w = hanning(10)
- assert_array_almost_equal(w, flipud(w), 7)
+ assert_equal(w, flipud(w))
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
def test_hamming(self):
# check symmetry
w = hamming(10)
- assert_array_almost_equal(w, flipud(w), 7)
+ assert_equal(w, flipud(w))
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
def test_bartlett(self):
# check symmetry
w = bartlett(10)
- assert_array_almost_equal(w, flipud(w), 7)
+ assert_equal(w, flipud(w))
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
def test_blackman(self):
# check symmetry
w = blackman(10)
- assert_array_almost_equal(w, flipud(w), 7)
+ assert_equal(w, flipud(w))
# check known value
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index a23c6b007..aa4499764 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -19,7 +19,7 @@ from ctypes import c_bool
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
-from numpy.compat import asbytes, bytes
+from numpy.compat import asbytes
from numpy.ma.testutils import assert_equal
from numpy.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
@@ -1026,7 +1026,7 @@ class TestLoadTxt(LoadTxtBase):
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
- def test_structure_unpack(self):
+ def test_unpack_structured(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
@@ -2358,6 +2358,51 @@ M 33 21.99
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
+ def test_unpack_structured(self):
+ # Regression test for gh-4341
+ # Unpacking should work on structured arrays
+ txt = TextIO("M 21 72\nF 35 58")
+ dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')}
+ a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True)
+ assert_equal(a.dtype, np.dtype('S1'))
+ assert_equal(b.dtype, np.dtype('i4'))
+ assert_equal(c.dtype, np.dtype('f4'))
+ assert_array_equal(a, np.array([b'M', b'F']))
+ assert_array_equal(b, np.array([21, 35]))
+ assert_array_equal(c, np.array([72., 58.]))
+
+ def test_unpack_auto_dtype(self):
+ # Regression test for gh-4341
+ # Unpacking should work when dtype=None
+ txt = TextIO("M 21 72.\nF 35 58.")
+ expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.]))
+ test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8")
+ for arr, result in zip(expected, test):
+ assert_array_equal(arr, result)
+ assert_equal(arr.dtype, result.dtype)
+
+ def test_unpack_single_name(self):
+ # Regression test for gh-4341
+ # Unpacking should work when structured dtype has only one field
+ txt = TextIO("21\n35")
+ dt = {'names': ('a',), 'formats': ('i4',)}
+ expected = np.array([21, 35], dtype=np.int32)
+ test = np.genfromtxt(txt, dtype=dt, unpack=True)
+ assert_array_equal(expected, test)
+ assert_equal(expected.dtype, test.dtype)
+
+ def test_squeeze_scalar(self):
+ # Regression test for gh-4341
+ # Unpacking a scalar should give zero-dim output,
+ # even if dtype is structured
+ txt = TextIO("1")
+ dt = {'names': ('a',), 'formats': ('i4',)}
+ expected = np.array((1,), dtype=np.int32)
+ test = np.genfromtxt(txt, dtype=dt, unpack=True)
+ assert_array_equal(expected, test)
+ assert_equal((), test.shape)
+ assert_equal(expected.dtype, test.dtype)
+
class TestPathUsage:
# Test that pathlib.Path can be used
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index cd7484241..2b4cbdfbb 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -8,7 +8,7 @@ from numpy.core.numeric import (
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
-from numpy.core.overrides import set_module
+from numpy.core.overrides import set_array_function_like_doc, set_module
from numpy.core import overrides
from numpy.core import iinfo
@@ -149,8 +149,13 @@ def flipud(m):
return m[::-1, ...]
+def _eye_dispatcher(N, M=None, k=None, dtype=None, order=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def eye(N, M=None, k=0, dtype=float, order='C'):
+def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
@@ -171,6 +176,9 @@ def eye(N, M=None, k=0, dtype=float, order='C'):
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -194,6 +202,8 @@ def eye(N, M=None, k=0, dtype=float, order='C'):
[0., 0., 0.]])
"""
+ if like is not None:
+ return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like)
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
@@ -207,6 +217,11 @@ def eye(N, M=None, k=0, dtype=float, order='C'):
return m
+_eye_with_like = array_function_dispatch(
+ _eye_dispatcher
+)(eye)
+
+
def _diag_dispatcher(v, k=None):
return (v,)
@@ -343,8 +358,13 @@ def diagflat(v, k=0):
return wrap(res)
+def _tri_dispatcher(N, M=None, k=None, dtype=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def tri(N, M=None, k=0, dtype=float):
+def tri(N, M=None, k=0, dtype=float, *, like=None):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
@@ -361,6 +381,9 @@ def tri(N, M=None, k=0, dtype=float):
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -381,6 +404,9 @@ def tri(N, M=None, k=0, dtype=float):
[1., 1., 0., 0., 0.]])
"""
+ if like is not None:
+ return _tri_with_like(N, M=M, k=k, dtype=dtype, like=like)
+
if M is None:
M = N
@@ -393,6 +419,11 @@ def tri(N, M=None, k=0, dtype=float):
return m
+_tri_with_like = array_function_dispatch(
+ _tri_dispatcher
+)(tri)
+
+
def _trilu_dispatcher(m, k=None):
return (m,)
diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi
new file mode 100644
index 000000000..ffb05bb81
--- /dev/null
+++ b/numpy/linalg/__init__.pyi
@@ -0,0 +1,23 @@
+from typing import Any
+
+matrix_power: Any
+solve: Any
+tensorsolve: Any
+tensorinv: Any
+inv: Any
+cholesky: Any
+eigvals: Any
+eigvalsh: Any
+pinv: Any
+slogdet: Any
+det: Any
+svd: Any
+eig: Any
+eigh: Any
+lstsq: Any
+norm: Any
+qr: Any
+cond: Any
+matrix_rank: Any
+LinAlgError: Any
+multi_dot: Any
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
index 59647c67d..1807aadcf 100644
--- a/numpy/linalg/umath_linalg.c.src
+++ b/numpy/linalg/umath_linalg.c.src
@@ -3665,7 +3665,7 @@ PyObject *PyInit__umath_linalg(void)
return NULL;
}
- version = PyString_FromString(umath_linalg_version_string);
+ version = PyUnicode_FromString(umath_linalg_version_string);
if (version == NULL) {
return NULL;
}
diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi
new file mode 100644
index 000000000..d1259abcc
--- /dev/null
+++ b/numpy/ma/__init__.pyi
@@ -0,0 +1,225 @@
+from typing import Any
+
+core: Any
+extras: Any
+MAError: Any
+MaskError: Any
+MaskType: Any
+MaskedArray: Any
+abs: Any
+absolute: Any
+add: Any
+all: Any
+allclose: Any
+allequal: Any
+alltrue: Any
+amax: Any
+amin: Any
+angle: Any
+anom: Any
+anomalies: Any
+any: Any
+append: Any
+arange: Any
+arccos: Any
+arccosh: Any
+arcsin: Any
+arcsinh: Any
+arctan: Any
+arctan2: Any
+arctanh: Any
+argmax: Any
+argmin: Any
+argsort: Any
+around: Any
+array: Any
+asanyarray: Any
+asarray: Any
+bitwise_and: Any
+bitwise_or: Any
+bitwise_xor: Any
+bool_: Any
+ceil: Any
+choose: Any
+clip: Any
+common_fill_value: Any
+compress: Any
+compressed: Any
+concatenate: Any
+conjugate: Any
+convolve: Any
+copy: Any
+correlate: Any
+cos: Any
+cosh: Any
+count: Any
+cumprod: Any
+cumsum: Any
+default_fill_value: Any
+diag: Any
+diagonal: Any
+diff: Any
+divide: Any
+empty: Any
+empty_like: Any
+equal: Any
+exp: Any
+expand_dims: Any
+fabs: Any
+filled: Any
+fix_invalid: Any
+flatten_mask: Any
+flatten_structured_array: Any
+floor: Any
+floor_divide: Any
+fmod: Any
+frombuffer: Any
+fromflex: Any
+fromfunction: Any
+getdata: Any
+getmask: Any
+getmaskarray: Any
+greater: Any
+greater_equal: Any
+harden_mask: Any
+hypot: Any
+identity: Any
+ids: Any
+indices: Any
+inner: Any
+innerproduct: Any
+isMA: Any
+isMaskedArray: Any
+is_mask: Any
+is_masked: Any
+isarray: Any
+left_shift: Any
+less: Any
+less_equal: Any
+log: Any
+log10: Any
+log2: Any
+logical_and: Any
+logical_not: Any
+logical_or: Any
+logical_xor: Any
+make_mask: Any
+make_mask_descr: Any
+make_mask_none: Any
+mask_or: Any
+masked: Any
+masked_array: Any
+masked_equal: Any
+masked_greater: Any
+masked_greater_equal: Any
+masked_inside: Any
+masked_invalid: Any
+masked_less: Any
+masked_less_equal: Any
+masked_not_equal: Any
+masked_object: Any
+masked_outside: Any
+masked_print_option: Any
+masked_singleton: Any
+masked_values: Any
+masked_where: Any
+max: Any
+maximum: Any
+maximum_fill_value: Any
+mean: Any
+min: Any
+minimum: Any
+minimum_fill_value: Any
+mod: Any
+multiply: Any
+mvoid: Any
+ndim: Any
+negative: Any
+nomask: Any
+nonzero: Any
+not_equal: Any
+ones: Any
+outer: Any
+outerproduct: Any
+power: Any
+prod: Any
+product: Any
+ptp: Any
+put: Any
+putmask: Any
+ravel: Any
+remainder: Any
+repeat: Any
+reshape: Any
+resize: Any
+right_shift: Any
+round: Any
+round_: Any
+set_fill_value: Any
+shape: Any
+sin: Any
+sinh: Any
+size: Any
+soften_mask: Any
+sometrue: Any
+sort: Any
+sqrt: Any
+squeeze: Any
+std: Any
+subtract: Any
+sum: Any
+swapaxes: Any
+take: Any
+tan: Any
+tanh: Any
+trace: Any
+transpose: Any
+true_divide: Any
+var: Any
+where: Any
+zeros: Any
+apply_along_axis: Any
+apply_over_axes: Any
+atleast_1d: Any
+atleast_2d: Any
+atleast_3d: Any
+average: Any
+clump_masked: Any
+clump_unmasked: Any
+column_stack: Any
+compress_cols: Any
+compress_nd: Any
+compress_rowcols: Any
+compress_rows: Any
+count_masked: Any
+corrcoef: Any
+cov: Any
+diagflat: Any
+dot: Any
+dstack: Any
+ediff1d: Any
+flatnotmasked_contiguous: Any
+flatnotmasked_edges: Any
+hsplit: Any
+hstack: Any
+isin: Any
+in1d: Any
+intersect1d: Any
+mask_cols: Any
+mask_rowcols: Any
+mask_rows: Any
+masked_all: Any
+masked_all_like: Any
+median: Any
+mr_: Any
+notmasked_contiguous: Any
+notmasked_edges: Any
+polyfit: Any
+row_stack: Any
+setdiff1d: Any
+setxor1d: Any
+stack: Any
+unique: Any
+union1d: Any
+vander: Any
+vstack: Any
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 27f14a5e7..0ed2971e6 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -3858,8 +3858,6 @@ class TestMaskedArrayMathMethods:
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
- @pytest.mark.skipif(sys.platform=='win32' and sys.version_info < (3, 6),
- reason='Fails on Python < 3.6 on Windows, gh-9671')
@suppress_copy_mask_on_assignment
def test_varstd_specialcases(self):
# Test a special case for var
diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py
index 83bd7852e..f5855efcf 100644
--- a/numpy/ma/timer_comparison.py
+++ b/numpy/ma/timer_comparison.py
@@ -100,9 +100,9 @@ class ModuleTester:
header=header,
names=('x', 'y'))
assert cond, msg
- except ValueError:
+ except ValueError as e:
msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y'))
- raise ValueError(msg)
+ raise ValueError(msg) from e
def assert_array_equal(self, x, y, err_msg=''):
"""
diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi
new file mode 100644
index 000000000..b240bb327
--- /dev/null
+++ b/numpy/matrixlib/__init__.pyi
@@ -0,0 +1,6 @@
+from typing import Any
+
+matrix: Any
+bmat: Any
+mat: Any
+asmatrix: Any
diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi
new file mode 100644
index 000000000..817ba22ac
--- /dev/null
+++ b/numpy/polynomial/__init__.pyi
@@ -0,0 +1,9 @@
+from typing import Any
+
+Polynomial: Any
+Chebyshev: Any
+Legendre: Any
+Hermite: Any
+HermiteE: Any
+Laguerre: Any
+set_default_printstyle: Any
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index d99fd98f5..6745c9371 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -477,8 +477,6 @@ def chebline(off, scl):
"""
Chebyshev series whose graph is a straight line.
-
-
Parameters
----------
off, scl : scalars
@@ -492,7 +490,11 @@ def chebline(off, scl):
See Also
--------
- polyline
+ numpy.polynomial.polynomial.polyline
+ numpy.polynomial.legendre.legline
+ numpy.polynomial.laguerre.lagline
+ numpy.polynomial.hermite.hermline
+ numpy.polynomial.hermite_e.hermeline
Examples
--------
@@ -545,7 +547,11 @@ def chebfromroots(roots):
See Also
--------
- polyfromroots, legfromroots, lagfromroots, hermfromroots, hermefromroots
+ numpy.polynomial.polynomial.polyfromroots
+ numpy.polynomial.legendre.legfromroots
+ numpy.polynomial.laguerre.lagfromroots
+ numpy.polynomial.hermite.hermfromroots
+ numpy.polynomial.hermite_e.hermefromroots
Examples
--------
@@ -764,7 +770,7 @@ def chebdiv(c1, c2):
See Also
--------
- chebadd, chebsub, chemulx, chebmul, chebpow
+ chebadd, chebsub, chebmulx, chebmul, chebpow
Notes
-----
@@ -1601,7 +1607,7 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None):
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
- For more details, see `linalg.lstsq`.
+ For more details, see `numpy.linalg.lstsq`.
Warns
-----
@@ -1615,11 +1621,15 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None):
See Also
--------
- polyfit, legfit, lagfit, hermfit, hermefit
+ numpy.polynomial.polynomial.polyfit
+ numpy.polynomial.legendre.legfit
+ numpy.polynomial.laguerre.lagfit
+ numpy.polynomial.hermite.hermfit
+ numpy.polynomial.hermite_e.hermefit
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
chebweight : Chebyshev weight function.
- linalg.lstsq : Computes a least-squares fit from the matrix.
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
@@ -1729,7 +1739,11 @@ def chebroots(c):
See Also
--------
- polyroots, legroots, lagroots, hermroots, hermeroots
+ numpy.polynomial.polynomial.polyroots
+ numpy.polynomial.legendre.legroots
+ numpy.polynomial.laguerre.lagroots
+ numpy.polynomial.hermite.hermroots
+ numpy.polynomial.hermite_e.hermeroots
Notes
-----
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index 280cad39e..c679c5298 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -233,7 +233,11 @@ def hermline(off, scl):
See Also
--------
- polyline, chebline
+ numpy.polynomial.polynomial.polyline
+ numpy.polynomial.chebyshev.chebline
+ numpy.polynomial.legendre.legline
+ numpy.polynomial.laguerre.lagline
+ numpy.polynomial.hermite_e.hermeline
Examples
--------
@@ -286,7 +290,11 @@ def hermfromroots(roots):
See Also
--------
- polyfromroots, legfromroots, lagfromroots, chebfromroots, hermefromroots
+ numpy.polynomial.polynomial.polyfromroots
+ numpy.polynomial.legendre.legfromroots
+ numpy.polynomial.laguerre.lagfromroots
+ numpy.polynomial.chebyshev.chebfromroots
+ numpy.polynomial.hermite_e.hermefromroots
Examples
--------
@@ -1322,7 +1330,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None):
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
- For more details, see `linalg.lstsq`.
+ For more details, see `numpy.linalg.lstsq`.
Warns
-----
@@ -1336,11 +1344,15 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None):
See Also
--------
- chebfit, legfit, lagfit, polyfit, hermefit
+ numpy.polynomial.chebyshev.chebfit
+ numpy.polynomial.legendre.legfit
+ numpy.polynomial.laguerre.lagfit
+ numpy.polynomial.polynomial.polyfit
+ numpy.polynomial.hermite_e.hermefit
hermval : Evaluates a Hermite series.
hermvander : Vandermonde matrix of Hermite series.
hermweight : Hermite weight function
- linalg.lstsq : Computes a least-squares fit from the matrix.
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
@@ -1457,7 +1469,11 @@ def hermroots(c):
See Also
--------
- polyroots, legroots, lagroots, chebroots, hermeroots
+ numpy.polynomial.polynomial.polyroots
+ numpy.polynomial.legendre.legroots
+ numpy.polynomial.laguerre.lagroots
+ numpy.polynomial.chebyshev.chebroots
+ numpy.polynomial.hermite_e.hermeroots
Notes
-----
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index 9b3b25105..1ce8ebe04 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -218,8 +218,6 @@ def hermeline(off, scl):
"""
Hermite series whose graph is a straight line.
-
-
Parameters
----------
off, scl : scalars
@@ -233,7 +231,11 @@ def hermeline(off, scl):
See Also
--------
- polyline, chebline
+ numpy.polynomial.polynomial.polyline
+ numpy.polynomial.chebyshev.chebline
+ numpy.polynomial.legendre.legline
+ numpy.polynomial.laguerre.lagline
+ numpy.polynomial.hermite.hermline
Examples
--------
@@ -287,7 +289,11 @@ def hermefromroots(roots):
See Also
--------
- polyfromroots, legfromroots, lagfromroots, hermfromroots, chebfromroots
+ numpy.polynomial.polynomial.polyfromroots
+ numpy.polynomial.legendre.legfromroots
+ numpy.polynomial.laguerre.lagfromroots
+ numpy.polynomial.hermite.hermfromroots
+ numpy.polynomial.chebyshev.chebfromroots
Examples
--------
@@ -1315,7 +1321,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None):
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
- For more details, see `linalg.lstsq`.
+ For more details, see `numpy.linalg.lstsq`.
Warns
-----
@@ -1329,11 +1335,15 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None):
See Also
--------
- chebfit, legfit, polyfit, hermfit, polyfit
+ numpy.polynomial.chebyshev.chebfit
+ numpy.polynomial.legendre.legfit
+ numpy.polynomial.polynomial.polyfit
+ numpy.polynomial.hermite.hermfit
+ numpy.polynomial.laguerre.lagfit
hermeval : Evaluates a Hermite series.
hermevander : pseudo Vandermonde matrix of Hermite series.
hermeweight : HermiteE weight function.
- linalg.lstsq : Computes a least-squares fit from the matrix.
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
@@ -1452,7 +1462,11 @@ def hermeroots(c):
See Also
--------
- polyroots, legroots, lagroots, hermroots, chebroots
+ numpy.polynomial.polynomial.polyroots
+ numpy.polynomial.legendre.legroots
+ numpy.polynomial.laguerre.lagroots
+ numpy.polynomial.hermite.hermroots
+ numpy.polynomial.chebyshev.chebroots
Notes
-----
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index c1db13215..9cff0b71c 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -214,8 +214,6 @@ def lagline(off, scl):
"""
Laguerre series whose graph is a straight line.
-
-
Parameters
----------
off, scl : scalars
@@ -229,7 +227,11 @@ def lagline(off, scl):
See Also
--------
- polyline, chebline
+ numpy.polynomial.polynomial.polyline
+ numpy.polynomial.chebyshev.chebline
+ numpy.polynomial.legendre.legline
+ numpy.polynomial.hermite.hermline
+ numpy.polynomial.hermite_e.hermeline
Examples
--------
@@ -282,7 +284,11 @@ def lagfromroots(roots):
See Also
--------
- polyfromroots, legfromroots, chebfromroots, hermfromroots, hermefromroots
+ numpy.polynomial.polynomial.polyfromroots
+ numpy.polynomial.legendre.legfromroots
+ numpy.polynomial.chebyshev.chebfromroots
+ numpy.polynomial.hermite.hermfromroots
+ numpy.polynomial.hermite_e.hermefromroots
Examples
--------
@@ -1321,7 +1327,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None):
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
- For more details, see `linalg.lstsq`.
+ For more details, see `numpy.linalg.lstsq`.
Warns
-----
@@ -1335,11 +1341,15 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None):
See Also
--------
- chebfit, legfit, polyfit, hermfit, hermefit
+ numpy.polynomial.polynomial.polyfit
+ numpy.polynomial.legendre.legfit
+ numpy.polynomial.chebyshev.chebfit
+ numpy.polynomial.hermite.hermfit
+ numpy.polynomial.hermite_e.hermefit
lagval : Evaluates a Laguerre series.
lagvander : pseudo Vandermonde matrix of Laguerre series.
lagweight : Laguerre weight function.
- linalg.lstsq : Computes a least-squares fit from the matrix.
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
@@ -1455,7 +1465,11 @@ def lagroots(c):
See Also
--------
- polyroots, legroots, chebroots, hermroots, hermeroots
+ numpy.polynomial.polynomial.polyroots
+ numpy.polynomial.legendre.legroots
+ numpy.polynomial.chebyshev.chebroots
+ numpy.polynomial.hermite.hermroots
+ numpy.polynomial.hermite_e.hermeroots
Notes
-----
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index 7b5b665f2..427f9f82f 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -243,7 +243,11 @@ def legline(off, scl):
See Also
--------
- polyline, chebline
+ numpy.polynomial.polynomial.polyline
+ numpy.polynomial.chebyshev.chebline
+ numpy.polynomial.laguerre.lagline
+ numpy.polynomial.hermite.hermline
+ numpy.polynomial.hermite_e.hermeline
Examples
--------
@@ -296,7 +300,11 @@ def legfromroots(roots):
See Also
--------
- polyfromroots, chebfromroots, lagfromroots, hermfromroots, hermefromroots
+ numpy.polynomial.polynomial.polyfromroots
+ numpy.polynomial.chebyshev.chebfromroots
+ numpy.polynomial.laguerre.lagfromroots
+ numpy.polynomial.hermite.hermfromroots
+ numpy.polynomial.hermite_e.hermefromroots
Examples
--------
@@ -1343,7 +1351,7 @@ def legfit(x, y, deg, rcond=None, full=False, w=None):
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
- For more details, see `linalg.lstsq`.
+ For more details, see `numpy.linalg.lstsq`.
Warns
-----
@@ -1357,11 +1365,15 @@ def legfit(x, y, deg, rcond=None, full=False, w=None):
See Also
--------
- chebfit, polyfit, lagfit, hermfit, hermefit
+ numpy.polynomial.polynomial.polyfit
+ numpy.polynomial.chebyshev.chebfit
+ numpy.polynomial.laguerre.lagfit
+ numpy.polynomial.hermite.hermfit
+ numpy.polynomial.hermite_e.hermefit
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
legweight : Legendre weight function (= 1).
- linalg.lstsq : Computes a least-squares fit from the matrix.
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
@@ -1470,7 +1482,11 @@ def legroots(c):
See Also
--------
- polyroots, chebroots, lagroots, hermroots, hermeroots
+ numpy.polynomial.polynomial.polyroots
+ numpy.polynomial.chebyshev.chebroots
+ numpy.polynomial.laguerre.lagroots
+ numpy.polynomial.hermite.hermroots
+ numpy.polynomial.hermite_e.hermeroots
Notes
-----
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index 83693441f..1baa7d870 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -127,7 +127,11 @@ def polyline(off, scl):
See Also
--------
- chebline
+ numpy.polynomial.chebyshev.chebline
+ numpy.polynomial.legendre.legline
+ numpy.polynomial.laguerre.lagline
+ numpy.polynomial.hermite.hermline
+ numpy.polynomial.hermite_e.hermeline
Examples
--------
@@ -179,8 +183,11 @@ def polyfromroots(roots):
See Also
--------
- chebfromroots, legfromroots, lagfromroots, hermfromroots
- hermefromroots
+ numpy.polynomial.chebyshev.chebfromroots
+ numpy.polynomial.legendre.legfromroots
+ numpy.polynomial.laguerre.lagfromroots
+ numpy.polynomial.hermite.hermfromroots
+ numpy.polynomial.hermite_e.hermefromroots
Notes
-----
@@ -1267,7 +1274,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None):
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
- For more details, see `linalg.lstsq`.
+ For more details, see `numpy.linalg.lstsq`.
Raises
------
@@ -1281,10 +1288,14 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None):
See Also
--------
- chebfit, legfit, lagfit, hermfit, hermefit
+ numpy.polynomial.chebyshev.chebfit
+ numpy.polynomial.legendre.legfit
+ numpy.polynomial.laguerre.lagfit
+ numpy.polynomial.hermite.hermfit
+ numpy.polynomial.hermite_e.hermefit
polyval : Evaluates a polynomial.
polyvander : Vandermonde matrix for powers.
- linalg.lstsq : Computes a least-squares fit from the matrix.
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
@@ -1411,7 +1422,11 @@ def polyroots(c):
See Also
--------
- chebroots
+ numpy.polynomial.chebyshev.chebroots
+ numpy.polynomial.legendre.legroots
+ numpy.polynomial.laguerre.lagroots
+ numpy.polynomial.hermite.hermroots
+ numpy.polynomial.hermite_e.hermeroots
Notes
-----
diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi
new file mode 100644
index 000000000..f7c3cfafe
--- /dev/null
+++ b/numpy/random/__init__.pyi
@@ -0,0 +1,61 @@
+from typing import Any
+
+beta: Any
+binomial: Any
+bytes: Any
+chisquare: Any
+choice: Any
+dirichlet: Any
+exponential: Any
+f: Any
+gamma: Any
+geometric: Any
+get_state: Any
+gumbel: Any
+hypergeometric: Any
+laplace: Any
+logistic: Any
+lognormal: Any
+logseries: Any
+multinomial: Any
+multivariate_normal: Any
+negative_binomial: Any
+noncentral_chisquare: Any
+noncentral_f: Any
+normal: Any
+pareto: Any
+permutation: Any
+poisson: Any
+power: Any
+rand: Any
+randint: Any
+randn: Any
+random: Any
+random_integers: Any
+random_sample: Any
+ranf: Any
+rayleigh: Any
+sample: Any
+seed: Any
+set_state: Any
+shuffle: Any
+standard_cauchy: Any
+standard_exponential: Any
+standard_gamma: Any
+standard_normal: Any
+standard_t: Any
+triangular: Any
+uniform: Any
+vonmises: Any
+wald: Any
+weibull: Any
+zipf: Any
+Generator: Any
+RandomState: Any
+SeedSequence: Any
+MT19937: Any
+Philox: Any
+PCG64: Any
+SFC64: Any
+default_rng: Any
+BitGenerator: Any
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index 66847043b..e40dcefe3 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -5,6 +5,7 @@ import warnings
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
from cpython cimport (Py_INCREF, PyFloat_AsDouble)
+from cpython.mem cimport PyMem_Malloc, PyMem_Free
cimport cython
import numpy as np
@@ -28,6 +29,13 @@ from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
validate_output_shape
)
+cdef extern from "numpy/arrayobject.h":
+ int PyArray_ResolveWritebackIfCopy(np.ndarray)
+ object PyArray_FromArray(np.PyArrayObject *, np.PyArray_Descr *, int)
+
+ enum:
+ NPY_ARRAY_WRITEBACKIFCOPY
+
np.import_array()
cdef int64_t _safe_sum_nonneg_int64(size_t num_colors, int64_t *colors):
@@ -48,6 +56,77 @@ cdef int64_t _safe_sum_nonneg_int64(size_t num_colors, int64_t *colors):
return sum
+cdef inline void _shuffle_raw_wrap(bitgen_t *bitgen, np.npy_intp n,
+ np.npy_intp first, np.npy_intp itemsize,
+ np.npy_intp stride,
+ char* data, char* buf) nogil:
+ # We trick gcc into providing a specialized implementation for
+ # the most common case, yielding a ~33% performance improvement.
+ # Note that apparently, only one branch can ever be specialized.
+ if itemsize == sizeof(np.npy_intp):
+ _shuffle_raw(bitgen, n, first, sizeof(np.npy_intp), stride, data, buf)
+ else:
+ _shuffle_raw(bitgen, n, first, itemsize, stride, data, buf)
+
+
+cdef inline void _shuffle_raw(bitgen_t *bitgen, np.npy_intp n,
+ np.npy_intp first, np.npy_intp itemsize,
+ np.npy_intp stride,
+ char* data, char* buf) nogil:
+ """
+ Parameters
+ ----------
+ bitgen
+ Pointer to a bitgen_t instance.
+ n
+ Number of elements in data
+ first
+ First observation to shuffle. Shuffles n-1,
+ n-2, ..., first, so that when first=1 the entire
+ array is shuffled
+ itemsize
+ Size in bytes of item
+ stride
+ Array stride
+ data
+ Location of data
+ buf
+ Location of buffer (itemsize)
+ """
+ cdef np.npy_intp i, j
+
+ for i in reversed(range(first, n)):
+ j = random_interval(bitgen, i)
+ string.memcpy(buf, data + j * stride, itemsize)
+ string.memcpy(data + j * stride, data + i * stride, itemsize)
+ string.memcpy(data + i * stride, buf, itemsize)
+
+
+cdef inline void _shuffle_int(bitgen_t *bitgen, np.npy_intp n,
+ np.npy_intp first, int64_t* data) nogil:
+ """
+ Parameters
+ ----------
+ bitgen
+ Pointer to a bitgen_t instance.
+ n
+ Number of elements in data
+ first
+ First observation to shuffle. Shuffles n-1,
+ n-2, ..., first, so that when first=1 the entire
+ array is shuffled
+ data
+ Location of data
+ """
+ cdef np.npy_intp i, j
+ cdef int64_t temp
+ for i in reversed(range(first, n)):
+ j = random_bounded_uint64(bitgen, 0, i, 0, 0)
+ temp = data[j]
+ data[j] = data[i]
+ data[i] = temp
+
+
cdef bint _check_bit_generator(object bitgen):
"""Check if an object satisfies the BitGenerator interface.
"""
@@ -708,8 +787,8 @@ cdef class Generator:
idx = np.PyArray_Arange(0, pop_size_i, 1, np.NPY_INT64)
idx_data = <int64_t*>(<np.ndarray>idx).data
with self.lock, nogil:
- self._shuffle_int(pop_size_i, max(pop_size_i - size_i, 1),
- idx_data)
+ _shuffle_int(&self._bitgen, pop_size_i,
+ max(pop_size_i - size_i, 1), idx_data)
# Copy to allow potentially large array backing idx to be gc
idx = idx[(pop_size - size):].copy()
else:
@@ -737,7 +816,7 @@ cdef class Generator:
hash_set[loc] = j
idx_data[j - pop_size_i + size_i] = j
if shuffle:
- self._shuffle_int(size_i, 1, idx_data)
+ _shuffle_int(&self._bitgen, size_i, 1, idx_data)
idx.shape = shape
if is_scalar and isinstance(idx, np.ndarray):
@@ -4114,7 +4193,159 @@ cdef class Generator:
return diric
- # Shuffling and permutations:
+ def permuted(self, object x, *, axis=None, out=None):
+ """
+ permuted(x, axis=None, out=None)
+
+ Randomly permute `x` along axis `axis`.
+
+ Unlike `shuffle`, each slice along the given axis is shuffled
+ independently of the others.
+
+ Parameters
+ ----------
+ x : array_like, at least one-dimensional
+ Array to be shuffled.
+ axis : int, optional
+ Slices of `x` in this axis are shuffled. Each slice
+ is shuffled independently of the others. If `axis` is
+ None, the flattened array is shuffled.
+ out : ndarray, optional
+ If given, this is the destinaton of the shuffled array.
+ If `out` is None, a shuffled copy of the array is returned.
+
+ Returns
+ -------
+ ndarray
+ If `out` is None, a shuffled copy of `x` is returned.
+ Otherwise, the shuffled array is stored in `out`,
+ and `out` is returned
+
+ See Also
+ --------
+ shuffle
+ permutation
+
+ Examples
+ --------
+ Create a `numpy.random.Generator` instance:
+
+ >>> rng = np.random.default_rng()
+
+ Create a test array:
+
+ >>> x = np.arange(24).reshape(3, 8)
+ >>> x
+ array([[ 0, 1, 2, 3, 4, 5, 6, 7],
+ [ 8, 9, 10, 11, 12, 13, 14, 15],
+ [16, 17, 18, 19, 20, 21, 22, 23]])
+
+ Shuffle the rows of `x`:
+
+ >>> y = rng.permuted(x, axis=1)
+ >>> y
+ array([[ 4, 3, 6, 7, 1, 2, 5, 0], # random
+ [15, 10, 14, 9, 12, 11, 8, 13],
+ [17, 16, 20, 21, 18, 22, 23, 19]])
+
+ `x` has not been modified:
+
+ >>> x
+ array([[ 0, 1, 2, 3, 4, 5, 6, 7],
+ [ 8, 9, 10, 11, 12, 13, 14, 15],
+ [16, 17, 18, 19, 20, 21, 22, 23]])
+
+ To shuffle the rows of `x` in-place, pass `x` as the `out`
+ parameter:
+
+ >>> y = rng.permuted(x, axis=1, out=x)
+ >>> x
+ array([[ 3, 0, 4, 7, 1, 6, 2, 5], # random
+ [ 8, 14, 13, 9, 12, 11, 15, 10],
+ [17, 18, 16, 22, 19, 23, 20, 21]])
+
+ Note that when the ``out`` parameter is given, the return
+ value is ``out``:
+
+ >>> y is x
+ True
+ """
+
+ cdef int ax
+ cdef np.npy_intp axlen, axstride, itemsize
+ cdef void *buf
+ cdef np.flatiter it
+ cdef np.ndarray to_shuffle
+ cdef int status
+ cdef int flags
+
+ x = np.asarray(x)
+
+ if out is None:
+ out = x.copy(order='K')
+ else:
+ if type(out) is not np.ndarray:
+ raise TypeError('out must be a numpy array')
+ if out.shape != x.shape:
+ raise ValueError('out must have the same shape as x')
+ np.copyto(out, x, casting='safe')
+
+ if axis is None:
+ if x.ndim > 1:
+ if not (np.PyArray_FLAGS(out) & (np.NPY_ARRAY_C_CONTIGUOUS |
+ np.NPY_ARRAY_F_CONTIGUOUS)):
+ flags = (np.NPY_ARRAY_C_CONTIGUOUS |
+ NPY_ARRAY_WRITEBACKIFCOPY)
+ to_shuffle = PyArray_FromArray(<np.PyArrayObject *>out,
+ <np.PyArray_Descr *>NULL, flags)
+ self.shuffle(to_shuffle.ravel(order='K'))
+ # Because we only execute this block if out is not
+ # contiguous, we know this call will always result in a
+ # copy of to_shuffle back to out. I.e. status will be 1.
+ status = PyArray_ResolveWritebackIfCopy(to_shuffle)
+ assert status == 1
+ else:
+ # out is n-d with n > 1, but is either C- or F-contiguous,
+ # so we know out.ravel(order='A') is a view.
+ self.shuffle(out.ravel(order='A'))
+ else:
+ # out is 1-d
+ self.shuffle(out)
+ return out
+
+ ax = normalize_axis_index(axis, np.ndim(out))
+ itemsize = out.itemsize
+ axlen = out.shape[ax]
+ axstride = out.strides[ax]
+
+ it = np.PyArray_IterAllButAxis(out, &ax)
+
+ buf = PyMem_Malloc(itemsize)
+ if buf == NULL:
+ raise MemoryError('memory allocation failed in permuted')
+
+ if out.dtype.hasobject:
+ # Keep the GIL when shuffling an object array.
+ with self.lock:
+ while np.PyArray_ITER_NOTDONE(it):
+ _shuffle_raw_wrap(&self._bitgen, axlen, 0, itemsize,
+ axstride,
+ <char *>np.PyArray_ITER_DATA(it),
+ <char *>buf)
+ np.PyArray_ITER_NEXT(it)
+ else:
+ # out is not an object array, so we can release the GIL.
+ with self.lock, nogil:
+ while np.PyArray_ITER_NOTDONE(it):
+ _shuffle_raw_wrap(&self._bitgen, axlen, 0, itemsize,
+ axstride,
+ <char *>np.PyArray_ITER_DATA(it),
+ <char *>buf)
+ np.PyArray_ITER_NEXT(it)
+
+ PyMem_Free(buf)
+ return out
+
def shuffle(self, object x, axis=0):
"""
shuffle(x, axis=0)
@@ -4177,14 +4408,15 @@ cdef class Generator:
# when the function exits.
buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
buf_ptr = <char*><size_t>np.PyArray_DATA(buf)
- with self.lock:
- # We trick gcc into providing a specialized implementation for
- # the most common case, yielding a ~33% performance improvement.
- # Note that apparently, only one branch can ever be specialized.
- if itemsize == sizeof(np.npy_intp):
- self._shuffle_raw(n, 1, sizeof(np.npy_intp), stride, x_ptr, buf_ptr)
- else:
- self._shuffle_raw(n, 1, itemsize, stride, x_ptr, buf_ptr)
+ if x.dtype.hasobject:
+ with self.lock:
+ _shuffle_raw_wrap(&self._bitgen, n, 1, itemsize, stride,
+ x_ptr, buf_ptr)
+ else:
+ # Same as above, but the GIL is released.
+ with self.lock, nogil:
+ _shuffle_raw_wrap(&self._bitgen, n, 1, itemsize, stride,
+ x_ptr, buf_ptr)
elif isinstance(x, np.ndarray) and x.ndim and x.size:
x = np.swapaxes(x, 0, axis)
buf = np.empty_like(x[0, ...])
@@ -4207,56 +4439,6 @@ cdef class Generator:
j = random_interval(&self._bitgen, i)
x[i], x[j] = x[j], x[i]
- cdef inline _shuffle_raw(self, np.npy_intp n, np.npy_intp first,
- np.npy_intp itemsize, np.npy_intp stride,
- char* data, char* buf):
- """
- Parameters
- ----------
- n
- Number of elements in data
- first
- First observation to shuffle. Shuffles n-1,
- n-2, ..., first, so that when first=1 the entire
- array is shuffled
- itemsize
- Size in bytes of item
- stride
- Array stride
- data
- Location of data
- buf
- Location of buffer (itemsize)
- """
- cdef np.npy_intp i, j
- for i in reversed(range(first, n)):
- j = random_interval(&self._bitgen, i)
- string.memcpy(buf, data + j * stride, itemsize)
- string.memcpy(data + j * stride, data + i * stride, itemsize)
- string.memcpy(data + i * stride, buf, itemsize)
-
- cdef inline void _shuffle_int(self, np.npy_intp n, np.npy_intp first,
- int64_t* data) nogil:
- """
- Parameters
- ----------
- n
- Number of elements in data
- first
- First observation to shuffle. Shuffles n-1,
- n-2, ..., first, so that when first=1 the entire
- array is shuffled
- data
- Location of data
- """
- cdef np.npy_intp i, j
- cdef int64_t temp
- for i in reversed(range(first, n)):
- j = random_bounded_uint64(&self._bitgen, 0, i, 0, 0)
- temp = data[j]
- data[j] = data[i]
- data[i] = temp
-
def permutation(self, object x, axis=0):
"""
permutation(x, axis=0)
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index df305e689..d43e7f5aa 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -383,7 +383,7 @@ cdef class RandomState:
.. note::
New code should use the ``random`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -453,7 +453,7 @@ cdef class RandomState:
.. note::
New code should use the ``beta`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -503,7 +503,7 @@ cdef class RandomState:
.. note::
New code should use the ``exponential`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -552,7 +552,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_exponential`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -653,7 +653,7 @@ cdef class RandomState:
.. note::
New code should use the ``integers`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -774,7 +774,7 @@ cdef class RandomState:
.. note::
New code should use the ``bytes`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -812,7 +812,7 @@ cdef class RandomState:
.. note::
New code should use the ``choice`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1017,7 +1017,7 @@ cdef class RandomState:
.. note::
New code should use the ``uniform`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1185,7 +1185,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_normal`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
If positive int_like arguments are provided, `randn` generates an array
of shape ``(d0, d1, ..., dn)``, filled
@@ -1339,7 +1339,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_normal`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1414,7 +1414,7 @@ cdef class RandomState:
.. note::
New code should use the ``normal`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1514,7 +1514,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_gamma`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1595,7 +1595,7 @@ cdef class RandomState:
.. note::
New code should use the ``gamma`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1684,7 +1684,7 @@ cdef class RandomState:
.. note::
New code should use the ``f`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1772,7 +1772,7 @@ cdef class RandomState:
.. note::
New code should use the ``noncentral_f`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1857,7 +1857,7 @@ cdef class RandomState:
.. note::
New code should use the ``chisquare`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1930,7 +1930,7 @@ cdef class RandomState:
.. note::
New code should use the ``noncentral_chisquare`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2016,7 +2016,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_cauchy`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2092,7 +2092,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_t`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2197,7 +2197,7 @@ cdef class RandomState:
.. note::
New code should use the ``vonmises`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2295,7 +2295,7 @@ cdef class RandomState:
.. note::
New code should use the ``pareto`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2389,7 +2389,7 @@ cdef class RandomState:
.. note::
New code should use the ``weibull`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2485,7 +2485,7 @@ cdef class RandomState:
.. note::
New code should use the ``power`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2596,7 +2596,7 @@ cdef class RandomState:
.. note::
New code should use the ``laplace`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2687,7 +2687,7 @@ cdef class RandomState:
.. note::
New code should use the ``gumbel`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2809,7 +2809,7 @@ cdef class RandomState:
.. note::
New code should use the ``logistic`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2896,7 +2896,7 @@ cdef class RandomState:
.. note::
New code should use the ``lognormal`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3009,7 +3009,7 @@ cdef class RandomState:
.. note::
New code should use the ``rayleigh`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3091,7 +3091,7 @@ cdef class RandomState:
.. note::
New code should use the ``wald`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3164,7 +3164,7 @@ cdef class RandomState:
.. note::
New code should use the ``triangular`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3271,7 +3271,7 @@ cdef class RandomState:
.. note::
New code should use the ``binomial`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3421,7 +3421,7 @@ cdef class RandomState:
.. note::
New code should use the ``negative_binomial`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3506,7 +3506,7 @@ cdef class RandomState:
.. note::
New code should use the ``poisson`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3592,7 +3592,7 @@ cdef class RandomState:
.. note::
New code should use the ``zipf`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3682,7 +3682,7 @@ cdef class RandomState:
.. note::
New code should use the ``geometric`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3736,7 +3736,7 @@ cdef class RandomState:
.. note::
New code should use the ``hypergeometric`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3867,7 +3867,7 @@ cdef class RandomState:
.. note::
New code should use the ``logseries`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3960,7 +3960,7 @@ cdef class RandomState:
.. note::
New code should use the ``multivariate_normal`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -4134,7 +4134,7 @@ cdef class RandomState:
.. note::
New code should use the ``multinomial`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -4252,7 +4252,7 @@ cdef class RandomState:
.. note::
New code should use the ``dirichlet`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -4398,7 +4398,7 @@ cdef class RandomState:
.. note::
New code should use the ``shuffle`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -4493,7 +4493,7 @@ cdef class RandomState:
.. note::
New code should use the ``permutation`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index bb6d25ef1..6be7d852b 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -1039,6 +1039,56 @@ class TestRandomDist:
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
+ @pytest.mark.parametrize("dtype", [int, object])
+ @pytest.mark.parametrize("axis, expected",
+ [(None, np.array([[3, 7, 0, 9, 10, 11],
+ [8, 4, 2, 5, 1, 6]])),
+ (0, np.array([[6, 1, 2, 9, 10, 11],
+ [0, 7, 8, 3, 4, 5]])),
+ (1, np.array([[ 5, 3, 4, 0, 2, 1],
+ [11, 9, 10, 6, 8, 7]]))])
+ def test_permuted(self, dtype, axis, expected):
+ random = Generator(MT19937(self.seed))
+ x = np.arange(12).reshape(2, 6).astype(dtype)
+ random.permuted(x, axis=axis, out=x)
+ assert_array_equal(x, expected)
+
+ random = Generator(MT19937(self.seed))
+ x = np.arange(12).reshape(2, 6).astype(dtype)
+ y = random.permuted(x, axis=axis)
+ assert y.dtype == dtype
+ assert_array_equal(y, expected)
+
+ def test_permuted_with_strides(self):
+ random = Generator(MT19937(self.seed))
+ x0 = np.arange(22).reshape(2, 11)
+ x1 = x0.copy()
+ x = x0[:, ::3]
+ y = random.permuted(x, axis=1, out=x)
+ expected = np.array([[0, 9, 3, 6],
+ [14, 20, 11, 17]])
+ assert_array_equal(y, expected)
+ x1[:, ::3] = expected
+ # Verify that the original x0 was modified in-place as expected.
+ assert_array_equal(x1, x0)
+
+ def test_permuted_empty(self):
+ y = random.permuted([])
+ assert_array_equal(y, [])
+
+ @pytest.mark.parametrize('outshape', [(2, 3), 5])
+ def test_permuted_out_with_wrong_shape(self, outshape):
+ a = np.array([1, 2, 3])
+ out = np.zeros(outshape, dtype=a.dtype)
+ with pytest.raises(ValueError, match='same shape'):
+ random.permuted(a, out=out)
+
+ def test_permuted_out_with_wrong_type(self):
+ out = np.zeros((3, 5), dtype=np.int32)
+ x = np.ones((3, 5))
+ with pytest.raises(TypeError, match='Cannot cast'):
+ random.permuted(x, axis=1, out=out)
+
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
diff --git a/numpy/rec.pyi b/numpy/rec.pyi
new file mode 100644
index 000000000..c70ee5374
--- /dev/null
+++ b/numpy/rec.pyi
@@ -0,0 +1,5 @@
+from typing import Any
+
+record: Any
+recarray: Any
+format_parser: Any
diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi
new file mode 100644
index 000000000..c394a387d
--- /dev/null
+++ b/numpy/testing/__init__.pyi
@@ -0,0 +1,44 @@
+from typing import Any
+
+assert_equal: Any
+assert_almost_equal: Any
+assert_approx_equal: Any
+assert_array_equal: Any
+assert_array_less: Any
+assert_string_equal: Any
+assert_array_almost_equal: Any
+assert_raises: Any
+build_err_msg: Any
+decorate_methods: Any
+jiffies: Any
+memusage: Any
+print_assert_equal: Any
+raises: Any
+rundocs: Any
+runstring: Any
+verbose: Any
+measure: Any
+assert_: Any
+assert_array_almost_equal_nulp: Any
+assert_raises_regex: Any
+assert_array_max_ulp: Any
+assert_warns: Any
+assert_no_warnings: Any
+assert_allclose: Any
+IgnoreException: Any
+clear_and_catch_warnings: Any
+SkipTest: Any
+KnownFailureException: Any
+temppath: Any
+tempdir: Any
+IS_PYPY: Any
+HAS_REFCOUNT: Any
+suppress_warnings: Any
+assert_array_compare: Any
+_assert_valid_refcount: Any
+_gen_alignment_data: Any
+assert_no_gc_cycles: Any
+break_cycles: Any
+HAS_LAPACK64: Any
+TestCase: Any
+run_module_suite: Any
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 6a6cc664a..c3b9e04b6 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -1240,7 +1240,7 @@ def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):
if sys.version_info[:2] >= (3, 7):
if py37 is not None:
n_in_context = py37
- elif sys.version_info[:2] >= (3, 4):
+ else:
if py34 is not None:
n_in_context = py34
assert_equal(num_warns, n_in_context)
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index 5b2cbbffe..21b8b838f 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -145,18 +145,7 @@ PUBLIC_MODULES = ['numpy.' + s for s in [
"distutils.log",
"distutils.system_info",
"doc",
- "doc.basics",
- "doc.broadcasting",
- "doc.byteswapping",
"doc.constants",
- "doc.creation",
- "doc.dispatch",
- "doc.glossary",
- "doc.indexing",
- "doc.internals",
- "doc.misc",
- "doc.structured_arrays",
- "doc.subclassing",
"doc.ufuncs",
"f2py",
"fft",
@@ -367,18 +356,6 @@ def test_all_modules_are_expected():
SKIP_LIST_2 = [
'numpy.math',
'numpy.distutils.log.sys',
- 'numpy.distutils.system_info.copy',
- 'numpy.distutils.system_info.distutils',
- 'numpy.distutils.system_info.log',
- 'numpy.distutils.system_info.os',
- 'numpy.distutils.system_info.platform',
- 'numpy.distutils.system_info.re',
- 'numpy.distutils.system_info.shutil',
- 'numpy.distutils.system_info.subprocess',
- 'numpy.distutils.system_info.sys',
- 'numpy.distutils.system_info.tempfile',
- 'numpy.distutils.system_info.textwrap',
- 'numpy.distutils.system_info.warnings',
'numpy.doc.constants.re',
'numpy.doc.constants.textwrap',
'numpy.lib.emath',
diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py
index 3a00162cb..86fd5e787 100644
--- a/numpy/typing/__init__.py
+++ b/numpy/typing/__init__.py
@@ -93,3 +93,8 @@ Please see : https://numpy.org/devdocs/reference/arrays.dtypes.html
from ._array_like import _SupportsArray, ArrayLike
from ._shape import _Shape, _ShapeLike
from ._dtype_like import DtypeLike
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
+
diff --git a/numpy/tests/setup.py b/numpy/typing/setup.py
index f034cdf95..c444e769f 100644
--- a/numpy/tests/setup.py
+++ b/numpy/typing/setup.py
@@ -1,7 +1,8 @@
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
- config = Configuration('tests', parent_package, top_path)
- config.add_data_dir('typing')
+ config = Configuration('typing', parent_package, top_path)
+ config.add_subpackage('tests')
+ config.add_data_dir('tests/data')
return config
diff --git a/numpy/typing/tests/__init__.py b/numpy/typing/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/typing/tests/__init__.py
diff --git a/numpy/tests/typing/fail/array_like.py b/numpy/typing/tests/data/fail/array_like.py
index a97e72dc7..a97e72dc7 100644
--- a/numpy/tests/typing/fail/array_like.py
+++ b/numpy/typing/tests/data/fail/array_like.py
diff --git a/numpy/tests/typing/fail/dtype.py b/numpy/typing/tests/data/fail/dtype.py
index 3dc027daf..3dc027daf 100644
--- a/numpy/tests/typing/fail/dtype.py
+++ b/numpy/typing/tests/data/fail/dtype.py
diff --git a/numpy/typing/tests/data/fail/flatiter.py b/numpy/typing/tests/data/fail/flatiter.py
new file mode 100644
index 000000000..e8a82344f
--- /dev/null
+++ b/numpy/typing/tests/data/fail/flatiter.py
@@ -0,0 +1,25 @@
+from typing import Any
+
+import numpy as np
+from numpy.typing import DtypeLike, _SupportsArray
+
+
+class Index:
+ def __index__(self) -> int:
+ ...
+
+
+a: "np.flatiter[np.ndarray]"
+supports_array: _SupportsArray
+
+a.base = Any # E: Property "base" defined in "flatiter" is read-only
+a.coords = Any # E: Property "coords" defined in "flatiter" is read-only
+a.index = Any # E: Property "index" defined in "flatiter" is read-only
+a.copy(order='C') # E: Unexpected keyword argument
+
+# NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter`
+# does not accept objects with the `__array__` or `__index__` protocols;
+# boolean indexing is just plain broken (gh-17175)
+a[np.bool_()] # E: No overload variant of "__getitem__"
+a[Index()] # E: No overload variant of "__getitem__"
+a[supports_array] # E: No overload variant of "__getitem__"
diff --git a/numpy/tests/typing/fail/fromnumeric.py b/numpy/typing/tests/data/fail/fromnumeric.py
index 66f8a89d0..c9156895d 100644
--- a/numpy/tests/typing/fail/fromnumeric.py
+++ b/numpy/typing/tests/data/fail/fromnumeric.py
@@ -124,3 +124,31 @@ np.amin(a, keepdims=1.0) # E: No overload variant of "amin" matches argument ty
np.amin(a, out=1.0) # E: No overload variant of "amin" matches argument type
np.amin(a, initial=[1.0]) # E: No overload variant of "amin" matches argument type
np.amin(a, where=[1.0]) # E: List item 0 has incompatible type
+
+np.prod(a, axis=1.0) # E: No overload variant of "prod" matches argument type
+np.prod(a, out=False) # E: No overload variant of "prod" matches argument type
+np.prod(a, keepdims=1.0) # E: No overload variant of "prod" matches argument type
+np.prod(a, initial=int) # E: No overload variant of "prod" matches argument type
+np.prod(a, where=1.0) # E: No overload variant of "prod" matches argument type
+
+np.cumprod(a, axis=1.0) # E: Argument "axis" to "cumprod" has incompatible type
+np.cumprod(a, out=False) # E: Argument "out" to "cumprod" has incompatible type
+
+np.size(a, axis=1.0) # E: Argument "axis" to "size" has incompatible type
+
+np.around(a, decimals=1.0) # E: No overload variant of "around" matches argument type
+np.around(a, out=type) # E: No overload variant of "around" matches argument type
+
+np.mean(a, axis=1.0) # E: No overload variant of "mean" matches argument type
+np.mean(a, out=False) # E: No overload variant of "mean" matches argument type
+np.mean(a, keepdims=1.0) # E: No overload variant of "mean" matches argument type
+
+np.std(a, axis=1.0) # E: No overload variant of "std" matches argument type
+np.std(a, out=False) # E: No overload variant of "std" matches argument type
+np.std(a, ddof='test') # E: No overload variant of "std" matches argument type
+np.std(a, keepdims=1.0) # E: No overload variant of "std" matches argument type
+
+np.var(a, axis=1.0) # E: No overload variant of "var" matches argument type
+np.var(a, out=False) # E: No overload variant of "var" matches argument type
+np.var(a, ddof='test') # E: No overload variant of "var" matches argument type
+np.var(a, keepdims=1.0) # E: No overload variant of "var" matches argument type
diff --git a/numpy/typing/tests/data/fail/linspace.py b/numpy/typing/tests/data/fail/linspace.py
new file mode 100644
index 000000000..a9769c5d6
--- /dev/null
+++ b/numpy/typing/tests/data/fail/linspace.py
@@ -0,0 +1,13 @@
+import numpy as np
+
+np.linspace(None, 'bob') # E: No overload variant
+np.linspace(0, 2, num=10.0) # E: No overload variant
+np.linspace(0, 2, endpoint='True') # E: No overload variant
+np.linspace(0, 2, retstep=b'False') # E: No overload variant
+np.linspace(0, 2, dtype=0) # E: No overload variant
+np.linspace(0, 2, axis=None) # E: No overload variant
+
+np.logspace(None, 'bob') # E: Argument 1
+np.logspace(0, 2, base=None) # E: Argument "base"
+
+np.geomspace(None, 'bob') # E: Argument 1
diff --git a/numpy/typing/tests/data/fail/modules.py b/numpy/typing/tests/data/fail/modules.py
new file mode 100644
index 000000000..e7ffe8920
--- /dev/null
+++ b/numpy/typing/tests/data/fail/modules.py
@@ -0,0 +1,3 @@
+import numpy as np
+
+np.testing.bob # E: Module has no attribute
diff --git a/numpy/tests/typing/fail/ndarray.py b/numpy/typing/tests/data/fail/ndarray.py
index 5a5130d40..5a5130d40 100644
--- a/numpy/tests/typing/fail/ndarray.py
+++ b/numpy/typing/tests/data/fail/ndarray.py
diff --git a/numpy/tests/typing/fail/numerictypes.py b/numpy/typing/tests/data/fail/numerictypes.py
index dd03eacc1..dd03eacc1 100644
--- a/numpy/tests/typing/fail/numerictypes.py
+++ b/numpy/typing/tests/data/fail/numerictypes.py
diff --git a/numpy/tests/typing/fail/scalars.py b/numpy/typing/tests/data/fail/scalars.py
index 5d7221895..47c031163 100644
--- a/numpy/tests/typing/fail/scalars.py
+++ b/numpy/typing/tests/data/fail/scalars.py
@@ -32,11 +32,16 @@ dt_64 = np.datetime64(0, "D")
td_64 = np.timedelta64(1, "h")
dt_64 + dt_64 # E: Unsupported operand types
-
td_64 - dt_64 # E: Unsupported operand types
-td_64 / dt_64 # E: No overload
td_64 % 1 # E: Unsupported operand types
-td_64 % dt_64 # E: Unsupported operand types
+
+# NOTE: The 2 tests below currently don't work due to the broad
+# (i.e. untyped) signature of `generic.__truediv__()` and `.__mod__()`.
+# TODO: Revisit this once annotations are added to the
+# `_ArrayOrScalarCommon` magic methods.
+
+# td_64 / dt_64 # E: No overload
+# td_64 % dt_64 # E: Unsupported operand types
class A:
diff --git a/numpy/tests/typing/fail/simple.py b/numpy/typing/tests/data/fail/simple.py
index 57c08fb7d..57c08fb7d 100644
--- a/numpy/tests/typing/fail/simple.py
+++ b/numpy/typing/tests/data/fail/simple.py
diff --git a/numpy/tests/typing/fail/ufuncs.py b/numpy/typing/tests/data/fail/ufuncs.py
index 4da9d08ba..4da9d08ba 100644
--- a/numpy/tests/typing/fail/ufuncs.py
+++ b/numpy/typing/tests/data/fail/ufuncs.py
diff --git a/numpy/tests/typing/fail/warnings_and_errors.py b/numpy/typing/tests/data/fail/warnings_and_errors.py
index 7390cc45f..7390cc45f 100644
--- a/numpy/tests/typing/fail/warnings_and_errors.py
+++ b/numpy/typing/tests/data/fail/warnings_and_errors.py
diff --git a/numpy/tests/typing/mypy.ini b/numpy/typing/tests/data/mypy.ini
index 91d93588a..91d93588a 100644
--- a/numpy/tests/typing/mypy.ini
+++ b/numpy/typing/tests/data/mypy.ini
diff --git a/numpy/tests/typing/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py
index 6b823ca7e..6b823ca7e 100644
--- a/numpy/tests/typing/pass/array_like.py
+++ b/numpy/typing/tests/data/pass/array_like.py
diff --git a/numpy/tests/typing/pass/dtype.py b/numpy/typing/tests/data/pass/dtype.py
index cbae8c078..cbae8c078 100644
--- a/numpy/tests/typing/pass/dtype.py
+++ b/numpy/typing/tests/data/pass/dtype.py
diff --git a/numpy/typing/tests/data/pass/flatiter.py b/numpy/typing/tests/data/pass/flatiter.py
new file mode 100644
index 000000000..c0219eb2b
--- /dev/null
+++ b/numpy/typing/tests/data/pass/flatiter.py
@@ -0,0 +1,14 @@
+import numpy as np
+
+a = np.empty((2, 2)).flat
+
+a.base
+a.copy()
+a.coords
+a.index
+iter(a)
+next(a)
+a[0]
+a[[0, 1, 2]]
+a[...]
+a[:]
diff --git a/numpy/tests/typing/pass/fromnumeric.py b/numpy/typing/tests/data/pass/fromnumeric.py
index d9dd45c54..9e936e684 100644
--- a/numpy/tests/typing/pass/fromnumeric.py
+++ b/numpy/typing/tests/data/pass/fromnumeric.py
@@ -10,6 +10,7 @@ B.setflags(write=False)
a = np.bool_(True)
b = np.float32(1.0)
c = 1.0
+d = np.array(1.0, dtype=np.float32) # writeable
np.take(a, 0)
np.take(b, 0)
@@ -183,3 +184,77 @@ np.amin(A, axis=0)
np.amin(B, axis=0)
np.amin(A, keepdims=True)
np.amin(B, keepdims=True)
+
+np.prod(a)
+np.prod(b)
+np.prod(c)
+np.prod(A)
+np.prod(B)
+np.prod(a, dtype=None)
+np.prod(A, dtype=None)
+np.prod(A, axis=0)
+np.prod(B, axis=0)
+np.prod(A, keepdims=True)
+np.prod(B, keepdims=True)
+np.prod(b, out=d)
+np.prod(B, out=d)
+
+np.cumprod(a)
+np.cumprod(b)
+np.cumprod(c)
+np.cumprod(A)
+np.cumprod(B)
+
+np.ndim(a)
+np.ndim(b)
+np.ndim(c)
+np.ndim(A)
+np.ndim(B)
+
+np.size(a)
+np.size(b)
+np.size(c)
+np.size(A)
+np.size(B)
+
+np.around(a)
+np.around(b)
+np.around(c)
+np.around(A)
+np.around(B)
+
+np.mean(a)
+np.mean(b)
+np.mean(c)
+np.mean(A)
+np.mean(B)
+np.mean(A, axis=0)
+np.mean(B, axis=0)
+np.mean(A, keepdims=True)
+np.mean(B, keepdims=True)
+np.mean(b, out=d)
+np.mean(B, out=d)
+
+np.std(a)
+np.std(b)
+np.std(c)
+np.std(A)
+np.std(B)
+np.std(A, axis=0)
+np.std(B, axis=0)
+np.std(A, keepdims=True)
+np.std(B, keepdims=True)
+np.std(b, out=d)
+np.std(B, out=d)
+
+np.var(a)
+np.var(b)
+np.var(c)
+np.var(A)
+np.var(B)
+np.var(A, axis=0)
+np.var(B, axis=0)
+np.var(A, keepdims=True)
+np.var(B, keepdims=True)
+np.var(b, out=d)
+np.var(B, out=d)
diff --git a/numpy/typing/tests/data/pass/linspace.py b/numpy/typing/tests/data/pass/linspace.py
new file mode 100644
index 000000000..8c6d0d56b
--- /dev/null
+++ b/numpy/typing/tests/data/pass/linspace.py
@@ -0,0 +1,22 @@
+import numpy as np
+
+class Index:
+ def __index__(self) -> int:
+ return 0
+
+np.linspace(0, 2)
+np.linspace(0.5, [0, 1, 2])
+np.linspace([0, 1, 2], 3)
+np.linspace(0j, 2)
+np.linspace(0, 2, num=10)
+np.linspace(0, 2, endpoint=True)
+np.linspace(0, 2, retstep=True)
+np.linspace(0j, 2j, retstep=True)
+np.linspace(0, 2, dtype=bool)
+np.linspace([0, 1], [2, 3], axis=Index())
+
+np.logspace(0, 2, base=2)
+np.logspace(0, 2, base=2)
+np.logspace(0, 2, base=[1j, 2j], num=2)
+
+np.geomspace(1, 2)
diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py
new file mode 100644
index 000000000..321ce3c2b
--- /dev/null
+++ b/numpy/typing/tests/data/pass/literal.py
@@ -0,0 +1,43 @@
+from functools import partial
+from typing import Callable, List, Tuple
+
+import pytest # type: ignore
+import numpy as np
+
+AR = np.array(0)
+AR.setflags(write=False)
+
+KACF = frozenset({None, "K", "A", "C", "F"})
+ACF = frozenset({None, "A", "C", "F"})
+CF = frozenset({None, "C", "F"})
+
+order_list: List[Tuple[frozenset, Callable]] = [
+ (KACF, partial(np.ndarray, 1)),
+ (KACF, AR.tobytes),
+ (KACF, partial(AR.astype, int)),
+ (KACF, AR.copy),
+ (ACF, partial(AR.reshape, 1)),
+ (KACF, AR.flatten),
+ (KACF, AR.ravel),
+ (KACF, partial(np.array, 1)),
+ (CF, partial(np.zeros, 1)),
+ (CF, partial(np.ones, 1)),
+ (CF, partial(np.empty, 1)),
+ (CF, partial(np.full, 1, 1)),
+ (KACF, partial(np.zeros_like, AR)),
+ (KACF, partial(np.ones_like, AR)),
+ (KACF, partial(np.empty_like, AR)),
+ (KACF, partial(np.full_like, AR, 1)),
+ (KACF, partial(np.add, 1, 1)), # i.e. np.ufunc.__call__
+ (ACF, partial(np.reshape, AR, 1)),
+ (KACF, partial(np.ravel, AR)),
+]
+
+for order_set, func in order_list:
+ for order in order_set:
+ func(order=order)
+
+ invalid_orders = KACF - order_set
+ for order in invalid_orders:
+ with pytest.raises(ValueError):
+ func(order=order)
diff --git a/numpy/tests/typing/pass/ndarray_conversion.py b/numpy/typing/tests/data/pass/ndarray_conversion.py
index 303cf53e4..303cf53e4 100644
--- a/numpy/tests/typing/pass/ndarray_conversion.py
+++ b/numpy/typing/tests/data/pass/ndarray_conversion.py
diff --git a/numpy/tests/typing/pass/ndarray_shape_manipulation.py b/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py
index 0ca3dff39..0ca3dff39 100644
--- a/numpy/tests/typing/pass/ndarray_shape_manipulation.py
+++ b/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py
diff --git a/numpy/tests/typing/pass/numerictypes.py b/numpy/typing/tests/data/pass/numerictypes.py
index 4f205cabc..4f205cabc 100644
--- a/numpy/tests/typing/pass/numerictypes.py
+++ b/numpy/typing/tests/data/pass/numerictypes.py
diff --git a/numpy/tests/typing/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py
index 1c7ace282..c02e1ed36 100644
--- a/numpy/tests/typing/pass/scalars.py
+++ b/numpy/typing/tests/data/pass/scalars.py
@@ -1,27 +1,38 @@
+import sys
+import datetime as dt
+
import numpy as np
# Construction
+class D:
+ def __index__(self) -> int:
+ return 0
+
+
class C:
- def __complex__(self):
+ def __complex__(self) -> complex:
return 3j
class B:
- def __int__(self):
+ def __int__(self) -> int:
return 4
class A:
- def __float__(self):
+ def __float__(self) -> float:
return 4.0
np.complex64(3j)
+np.complex64(A())
np.complex64(C())
np.complex128(3j)
np.complex128(C())
np.complex128(None)
+np.complex64("1.2")
+np.complex128(b"2j")
np.int8(4)
np.int16(3.4)
@@ -29,11 +40,20 @@ np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
+np.int32("1")
+np.int64(b"2")
np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
+np.float32("1")
+np.float16(b"2.5")
+
+if sys.version_info >= (3, 8):
+ np.uint64(D())
+ np.float32(D())
+ np.complex64(D())
np.bytes_(b"hello")
np.bytes_("hello", 'utf-8')
@@ -66,14 +86,25 @@ np.uint64().shape
# Time structures
np.datetime64()
np.datetime64(0, "D")
+np.datetime64(0, b"D")
+np.datetime64(0, ('ms', 3))
np.datetime64("2019")
+np.datetime64(b"2019")
np.datetime64("2019", "D")
+np.datetime64(np.datetime64())
+np.datetime64(dt.datetime(2000, 5, 3))
np.datetime64(None)
np.datetime64(None, "D")
np.timedelta64()
np.timedelta64(0)
np.timedelta64(0, "D")
+np.timedelta64(0, ('ms', 3))
+np.timedelta64(0, b"D")
+np.timedelta64("3")
+np.timedelta64(b"5")
+np.timedelta64(np.timedelta64(2))
+np.timedelta64(dt.timedelta(2))
np.timedelta64(None)
np.timedelta64(None, "D")
diff --git a/numpy/tests/typing/pass/simple.py b/numpy/typing/tests/data/pass/simple.py
index 527050557..527050557 100644
--- a/numpy/tests/typing/pass/simple.py
+++ b/numpy/typing/tests/data/pass/simple.py
diff --git a/numpy/tests/typing/pass/simple_py3.py b/numpy/typing/tests/data/pass/simple_py3.py
index c05a1ce61..c05a1ce61 100644
--- a/numpy/tests/typing/pass/simple_py3.py
+++ b/numpy/typing/tests/data/pass/simple_py3.py
diff --git a/numpy/tests/typing/pass/ufuncs.py b/numpy/typing/tests/data/pass/ufuncs.py
index 82172952a..82172952a 100644
--- a/numpy/tests/typing/pass/ufuncs.py
+++ b/numpy/typing/tests/data/pass/ufuncs.py
diff --git a/numpy/tests/typing/pass/warnings_and_errors.py b/numpy/typing/tests/data/pass/warnings_and_errors.py
index 5b6ec2626..5b6ec2626 100644
--- a/numpy/tests/typing/pass/warnings_and_errors.py
+++ b/numpy/typing/tests/data/pass/warnings_and_errors.py
diff --git a/numpy/tests/typing/reveal/constants.py b/numpy/typing/tests/data/reveal/constants.py
index 8e00810bd..8e00810bd 100644
--- a/numpy/tests/typing/reveal/constants.py
+++ b/numpy/typing/tests/data/reveal/constants.py
diff --git a/numpy/typing/tests/data/reveal/flatiter.py b/numpy/typing/tests/data/reveal/flatiter.py
new file mode 100644
index 000000000..56cdc7a0e
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/flatiter.py
@@ -0,0 +1,14 @@
+import numpy as np
+
+a: "np.flatiter[np.ndarray]"
+
+reveal_type(a.base) # E: numpy.ndarray*
+reveal_type(a.copy()) # E: numpy.ndarray*
+reveal_type(a.coords) # E: tuple[builtins.int]
+reveal_type(a.index) # E: int
+reveal_type(iter(a)) # E: Iterator[numpy.generic*]
+reveal_type(next(a)) # E: numpy.generic
+reveal_type(a[0]) # E: numpy.generic
+reveal_type(a[[0, 1, 2]]) # E: numpy.ndarray*
+reveal_type(a[...]) # E: numpy.ndarray*
+reveal_type(a[:]) # E: numpy.ndarray*
diff --git a/numpy/tests/typing/reveal/fromnumeric.py b/numpy/typing/tests/data/reveal/fromnumeric.py
index f5feb3f5f..06501f6e2 100644
--- a/numpy/tests/typing/reveal/fromnumeric.py
+++ b/numpy/typing/tests/data/reveal/fromnumeric.py
@@ -10,6 +10,7 @@ B.setflags(write=False)
a = np.bool_(True)
b = np.float32(1.0)
c = 1.0
+d = np.array(1.0, dtype=np.float32) # writeable
reveal_type(np.take(a, 0)) # E: numpy.bool_
reveal_type(np.take(b, 0)) # E: numpy.float32
@@ -203,3 +204,75 @@ reveal_type(np.amin(A, axis=0)) # E: Union[numpy.number, numpy.ndarray]
reveal_type(np.amin(B, axis=0)) # E: Union[numpy.number, numpy.ndarray]
reveal_type(np.amin(A, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
reveal_type(np.amin(B, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+
+reveal_type(np.prod(a)) # E: numpy.number
+reveal_type(np.prod(b)) # E: numpy.float32
+reveal_type(np.prod(c)) # E: numpy.number
+reveal_type(np.prod(A)) # E: numpy.number
+reveal_type(np.prod(B)) # E: numpy.number
+reveal_type(np.prod(A, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.prod(B, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.prod(A, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.prod(B, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.prod(b, out=d)) # E: numpy.ndarray
+reveal_type(np.prod(B, out=d)) # E: numpy.ndarray
+
+reveal_type(np.cumprod(a)) # E: numpy.ndarray
+reveal_type(np.cumprod(b)) # E: numpy.ndarray
+reveal_type(np.cumprod(c)) # E: numpy.ndarray
+reveal_type(np.cumprod(A)) # E: numpy.ndarray
+reveal_type(np.cumprod(B)) # E: numpy.ndarray
+
+reveal_type(np.ndim(a)) # E: int
+reveal_type(np.ndim(b)) # E: int
+reveal_type(np.ndim(c)) # E: int
+reveal_type(np.ndim(A)) # E: int
+reveal_type(np.ndim(B)) # E: int
+
+reveal_type(np.size(a)) # E: int
+reveal_type(np.size(b)) # E: int
+reveal_type(np.size(c)) # E: int
+reveal_type(np.size(A)) # E: int
+reveal_type(np.size(B)) # E: int
+
+reveal_type(np.around(a)) # E: numpy.number
+reveal_type(np.around(b)) # E: numpy.float32
+reveal_type(np.around(c)) # E: numpy.number
+reveal_type(np.around(A)) # E: numpy.ndarray
+reveal_type(np.around(B)) # E: numpy.ndarray
+
+reveal_type(np.mean(a)) # E: numpy.number
+reveal_type(np.mean(b)) # E: numpy.number
+reveal_type(np.mean(c)) # E: numpy.number
+reveal_type(np.mean(A)) # E: numpy.number
+reveal_type(np.mean(B)) # E: numpy.number
+reveal_type(np.mean(A, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.mean(B, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.mean(A, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.mean(B, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.mean(b, out=d)) # E: numpy.ndarray
+reveal_type(np.mean(B, out=d)) # E: numpy.ndarray
+
+reveal_type(np.std(a)) # E: numpy.number
+reveal_type(np.std(b)) # E: numpy.number
+reveal_type(np.std(c)) # E: numpy.number
+reveal_type(np.std(A)) # E: numpy.number
+reveal_type(np.std(B)) # E: numpy.number
+reveal_type(np.std(A, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.std(B, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.std(A, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.std(B, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.std(b, out=d)) # E: numpy.ndarray
+reveal_type(np.std(B, out=d)) # E: numpy.ndarray
+
+reveal_type(np.var(a)) # E: numpy.number
+reveal_type(np.var(b)) # E: numpy.number
+reveal_type(np.var(c)) # E: numpy.number
+reveal_type(np.var(A)) # E: numpy.number
+reveal_type(np.var(B)) # E: numpy.number
+reveal_type(np.var(A, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.var(B, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.var(A, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.var(B, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.var(b, out=d)) # E: numpy.ndarray
+reveal_type(np.var(B, out=d)) # E: numpy.ndarray
diff --git a/numpy/typing/tests/data/reveal/linspace.py b/numpy/typing/tests/data/reveal/linspace.py
new file mode 100644
index 000000000..cfbbdf390
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/linspace.py
@@ -0,0 +1,6 @@
+import numpy as np
+
+reveal_type(np.linspace(0, 10)) # E: numpy.ndarray
+reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray, numpy.inexact]
+reveal_type(np.logspace(0, 10)) # E: numpy.ndarray
+reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray
diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.py
new file mode 100644
index 000000000..406463152
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/modules.py
@@ -0,0 +1,20 @@
+import numpy as np
+
+reveal_type(np) # E: ModuleType
+
+reveal_type(np.char) # E: ModuleType
+reveal_type(np.ctypeslib) # E: ModuleType
+reveal_type(np.emath) # E: ModuleType
+reveal_type(np.fft) # E: ModuleType
+reveal_type(np.lib) # E: ModuleType
+reveal_type(np.linalg) # E: ModuleType
+reveal_type(np.ma) # E: ModuleType
+reveal_type(np.matrixlib) # E: ModuleType
+reveal_type(np.polynomial) # E: ModuleType
+reveal_type(np.random) # E: ModuleType
+reveal_type(np.rec) # E: ModuleType
+reveal_type(np.testing) # E: ModuleType
+reveal_type(np.version) # E: ModuleType
+
+# TODO: Remove when annotations have been added to `np.testing.assert_equal`
+reveal_type(np.testing.assert_equal) # E: Any
diff --git a/numpy/tests/typing/reveal/ndarray_conversion.py b/numpy/typing/tests/data/reveal/ndarray_conversion.py
index 411adcf63..4ee637b75 100644
--- a/numpy/tests/typing/reveal/ndarray_conversion.py
+++ b/numpy/typing/tests/data/reveal/ndarray_conversion.py
@@ -9,7 +9,7 @@ reveal_type(nd.item(0, 1)) # E: Any
reveal_type(nd.item((0, 1))) # E: Any
# tolist
-reveal_type(nd.tolist()) # E: builtins.list[Any]
+reveal_type(nd.tolist()) # E: Any
# itemset does not return a value
# tostring is pretty simple
diff --git a/numpy/tests/typing/reveal/ndarray_shape_manipulation.py b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py
index a44e1cfa1..a44e1cfa1 100644
--- a/numpy/tests/typing/reveal/ndarray_shape_manipulation.py
+++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py
diff --git a/numpy/tests/typing/reveal/numerictypes.py b/numpy/typing/tests/data/reveal/numerictypes.py
index e026158cd..e026158cd 100644
--- a/numpy/tests/typing/reveal/numerictypes.py
+++ b/numpy/typing/tests/data/reveal/numerictypes.py
diff --git a/numpy/tests/typing/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py
index 8a9555fc3..882fe9612 100644
--- a/numpy/tests/typing/reveal/scalars.py
+++ b/numpy/typing/tests/data/reveal/scalars.py
@@ -28,3 +28,6 @@ reveal_type(td - 1) # E: numpy.timedelta64
reveal_type(td / 1.0) # E: numpy.timedelta64
reveal_type(td / td) # E: float
reveal_type(td % td) # E: numpy.timedelta64
+
+reveal_type(np.complex64().real) # E: numpy.float32
+reveal_type(np.complex128().imag) # E: numpy.float64
diff --git a/numpy/tests/typing/reveal/warnings_and_errors.py b/numpy/typing/tests/data/reveal/warnings_and_errors.py
index c428deb7a..c428deb7a 100644
--- a/numpy/tests/typing/reveal/warnings_and_errors.py
+++ b/numpy/typing/tests/data/reveal/warnings_and_errors.py
diff --git a/numpy/tests/test_typing.py b/numpy/typing/tests/test_typing.py
index 04ea3c64d..beb53ddec 100644
--- a/numpy/tests/test_typing.py
+++ b/numpy/typing/tests/test_typing.py
@@ -12,15 +12,13 @@ except ImportError:
else:
NO_MYPY = False
-TESTS_DIR = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
- "typing",
-)
-PASS_DIR = os.path.join(TESTS_DIR, "pass")
-FAIL_DIR = os.path.join(TESTS_DIR, "fail")
-REVEAL_DIR = os.path.join(TESTS_DIR, "reveal")
-MYPY_INI = os.path.join(TESTS_DIR, "mypy.ini")
-CACHE_DIR = os.path.join(TESTS_DIR, ".mypy_cache")
+
+DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
+PASS_DIR = os.path.join(DATA_DIR, "pass")
+FAIL_DIR = os.path.join(DATA_DIR, "fail")
+REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
+MYPY_INI = os.path.join(DATA_DIR, "mypy.ini")
+CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
def get_test_cases(directory):
@@ -89,7 +87,7 @@ def test_fail(path):
for i, line in enumerate(lines):
lineno = i + 1
- if " E:" not in line and lineno not in errors:
+ if line.startswith('#') or (" E:" not in line and lineno not in errors):
continue
target_line = lines[lineno - 1]
diff --git a/numpy/version.pyi b/numpy/version.pyi
new file mode 100644
index 000000000..6f3659e43
--- /dev/null
+++ b/numpy/version.pyi
@@ -0,0 +1,7 @@
+from typing import Any
+
+short_version: Any
+version: Any
+full_version: Any
+git_revision: Any
+release: Any