summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/build_test.yml2
-rw-r--r--.gitignore1
-rw-r--r--azure-steps-windows.yml2
-rw-r--r--benchmarks/benchmarks/bench_scalar.py12
-rw-r--r--build_requirements.txt2
-rw-r--r--doc/release/upcoming_changes/22539.change.rst19
-rw-r--r--doc/release/upcoming_changes/22539.deprecation.rst29
-rw-r--r--doc/release/upcoming_changes/23713.improvement.rst9
-rw-r--r--doc/source/index.rst1
-rw-r--r--doc/source/reference/arrays.scalars.rst2
-rw-r--r--numpy/__init__.pyi33
-rw-r--r--numpy/compat/py3k.py10
-rw-r--r--numpy/compat/tests/test_compat.py3
-rw-r--r--numpy/core/_add_newdocs_scalars.py2
-rw-r--r--numpy/core/code_generators/generate_umath.py52
-rw-r--r--numpy/core/einsumfunc.pyi53
-rw-r--r--numpy/core/fromnumeric.py10
-rw-r--r--numpy/core/fromnumeric.pyi4
-rw-r--r--numpy/core/numerictypes.py19
-rw-r--r--numpy/core/numerictypes.pyi5
-rw-r--r--numpy/core/src/multiarray/dtype_traversal.c267
-rw-r--r--numpy/core/src/multiarray/dtype_traversal.h9
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c3
-rw-r--r--numpy/core/src/multiarray/refcount.c7
-rw-r--r--numpy/core/src/multiarray/refcount.h3
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src38
-rw-r--r--numpy/core/src/multiarray/usertypes.c5
-rw-r--r--numpy/core/src/umath/loops.c.src42
-rw-r--r--numpy/core/src/umath/loops.h.src11
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c33
-rw-r--r--numpy/core/tests/test_arrayprint.py5
-rw-r--r--numpy/core/tests/test_nep50_promotions.py8
-rw-r--r--numpy/core/tests/test_numerictypes.py15
-rw-r--r--numpy/core/tests/test_regression.py4
-rw-r--r--numpy/core/tests/test_umath.py58
-rw-r--r--numpy/distutils/checks/cpu_avx512_spr.c4
-rw-r--r--numpy/exceptions.pyi24
-rwxr-xr-xnumpy/f2py/crackfortran.py6
-rw-r--r--numpy/f2py/tests/src/crackfortran/gh23533.f5
-rw-r--r--numpy/f2py/tests/test_crackfortran.py14
-rw-r--r--numpy/lib/function_base.py48
-rw-r--r--numpy/lib/index_tricks.py32
-rw-r--r--numpy/lib/npyio.py2
-rw-r--r--numpy/lib/tests/test_io.py11
-rw-r--r--numpy/ma/__init__.pyi1
-rw-r--r--numpy/ma/core.pyi3
-rw-r--r--numpy/ma/tests/test_core.py2
-rw-r--r--numpy/polynomial/chebyshev.py6
-rw-r--r--numpy/polynomial/hermite.py6
-rw-r--r--numpy/polynomial/hermite_e.py6
-rw-r--r--numpy/polynomial/laguerre.py6
-rw-r--r--numpy/polynomial/legendre.py6
-rw-r--r--numpy/polynomial/polynomial.py6
-rw-r--r--numpy/random/_common.pxd40
-rw-r--r--numpy/random/_common.pyx2
-rw-r--r--numpy/random/_mt19937.pyx8
-rw-r--r--numpy/random/_pcg64.pyx17
-rw-r--r--numpy/random/_philox.pyx10
-rw-r--r--numpy/random/_sfc64.pyx6
-rw-r--r--numpy/testing/_private/utils.py2
-rw-r--r--numpy/typing/tests/data/fail/einsumfunc.pyi3
-rw-r--r--numpy/typing/tests/data/fail/numerictypes.pyi2
-rw-r--r--numpy/typing/tests/data/pass/numerictypes.py5
-rw-r--r--numpy/typing/tests/data/reveal/einsumfunc.pyi5
-rw-r--r--numpy/typing/tests/data/reveal/modules.pyi2
-rw-r--r--numpy/typing/tests/data/reveal/numerictypes.pyi2
-rw-r--r--pyproject.toml2
-rw-r--r--test_requirements.txt2
68 files changed, 808 insertions, 266 deletions
diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml
index 759ebca01..70e11a4a4 100644
--- a/.github/workflows/build_test.yml
+++ b/.github/workflows/build_test.yml
@@ -365,7 +365,7 @@ jobs:
docker run --name the_container --interactive -v /:/host arm32v7/ubuntu:22.04 /bin/bash -c "
apt update &&
apt install -y git python3 python3-dev python3-pip &&
- python3 -m pip install cython==0.29.30 setuptools\<49.2.0 hypothesis==6.23.3 pytest==6.2.5 'typing_extensions>=4.2.0' &&
+ python3 -m pip install cython==0.29.34 setuptools\<49.2.0 hypothesis==6.23.3 pytest==6.2.5 'typing_extensions>=4.2.0' &&
ln -s /host/lib64 /lib64 &&
ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu &&
ln -s /host/usr/arm-linux-gnueabihf /usr/arm-linux-gnueabihf &&
diff --git a/.gitignore b/.gitignore
index c15a486d9..e5784971e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -179,6 +179,7 @@ benchmarks/html
benchmarks/env
benchmarks/numpy
benchmarks/_asv_compare.conf.json
+test.obj
# cythonized files
cythonize.dat
numpy/random/_mtrand/_mtrand.c
diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml
index a946addd6..0e2cd6cba 100644
--- a/azure-steps-windows.yml
+++ b/azure-steps-windows.yml
@@ -15,7 +15,7 @@ steps:
- powershell: |
# rtools 42+ does not support 32 bits builds.
- choco install -y rtools --noprogress --force --version=4.0.0.20220206
+ choco install --confirm --no-progress --side-by-side rtools --version=4.0.0.20220206
echo "##vso[task.setvariable variable=RTOOLS40_HOME]c:\rtools40"
displayName: 'Install rtools'
diff --git a/benchmarks/benchmarks/bench_scalar.py b/benchmarks/benchmarks/bench_scalar.py
index 650daa89d..638f66df5 100644
--- a/benchmarks/benchmarks/bench_scalar.py
+++ b/benchmarks/benchmarks/bench_scalar.py
@@ -65,3 +65,15 @@ class ScalarMath(Benchmark):
other + int32
other + int32
other + int32
+
+
+class ScalarStr(Benchmark):
+ # Test scalar to str conversion
+ params = [TYPES1]
+ param_names = ["type"]
+
+ def setup(self, typename):
+ self.a = np.array([100] * 100, dtype=typename)
+
+ def time_str_repr(self, typename):
+ res = [str(x) for x in self.a]
diff --git a/build_requirements.txt b/build_requirements.txt
index 075d454f2..de57da279 100644
--- a/build_requirements.txt
+++ b/build_requirements.txt
@@ -1,5 +1,5 @@
meson-python>=0.10.0
-Cython>=0.29.30,<3.0
+Cython>=0.29.34,<3.0
wheel==0.38.1
ninja
spin==0.3
diff --git a/doc/release/upcoming_changes/22539.change.rst b/doc/release/upcoming_changes/22539.change.rst
new file mode 100644
index 000000000..9df62be30
--- /dev/null
+++ b/doc/release/upcoming_changes/22539.change.rst
@@ -0,0 +1,19 @@
+``np.r_[]`` and ``np.c_[]`` with certain scalar values
+------------------------------------------------------
+In rare cases, using mainly ``np.r_`` with scalars can lead to different
+results. The main potential changes are highlighted by the following::
+
+ >>> np.r_[np.arange(5, dtype=np.uint8), -1].dtype
+ int16 # rather than the default integer (int64 or int32)
+ >>> np.r_[np.arange(5, dtype=np.int8), 255]
+ array([ 0, 1, 2, 3, 4, 255], dtype=int16)
+
+Where the second example returned::
+
+ array([ 0, 1, 2, 3, 4, -1], dtype=int8)
+
+The first one is due to a signed integer scalar with an unsigned integer
+array, while the second is due to ``255`` not fitting into ``int8`` and
+NumPy currently inspecting values to make this work.
+(Note that the second example is expected to change in the future due to
+:ref:`NEP 50 <NEP50>`; it will then raise an error.)
diff --git a/doc/release/upcoming_changes/22539.deprecation.rst b/doc/release/upcoming_changes/22539.deprecation.rst
new file mode 100644
index 000000000..a30434b7e
--- /dev/null
+++ b/doc/release/upcoming_changes/22539.deprecation.rst
@@ -0,0 +1,29 @@
+``np.find_common_type`` is deprecated
+-------------------------------------
+`numpy.find_common_type` is now deprecated and its use should be replaced
+with either `numpy.result_type` or `numpy.promote_types`.
+Most users leave the second ``scalar_types`` argument to ``find_common_type``
+as ``[]`` in which case ``np.result_type`` and ``np.promote_types`` are both
+faster and more robust.
+When not using ``scalar_types`` the main difference is that the replacement
+intentionally converts non-native byte-order to native byte order.
+Further, ``find_common_type`` returns ``object`` dtype rather than failing
+promotion. This leads to differences when the inputs are not all numeric.
+Importantly, this also happens for e.g. timedelta/datetime for which NumPy
+promotion rules are currently sometimes surprising.
+
+When the ``scalar_types`` argument is not ``[]`` things are more complicated.
+In most cases, using ``np.result_type`` and passing the Python values
+``0``, ``0.0``, or ``0j`` has the same result as using ``int``, ``float``,
+or ``complex`` in `scalar_types`.
+
+When ``scalar_types`` is constructed, ``np.result_type`` is the
+correct replacement and it may be passed scalar values like ``np.float32(0.0)``.
+Passing values other than 0, may lead to value-inspecting behavior
+(which ``np.find_common_type`` never used and NEP 50 may change in the future).
+The main possible change in behavior in this case, is when the array types
+are signed integers and scalar types are unsigned.
+
+If you are unsure about how to replace a use of ``scalar_types`` or when
+non-numeric dtypes are likely, please do not hesitate to open a NumPy issue
+to ask for help.
diff --git a/doc/release/upcoming_changes/23713.improvement.rst b/doc/release/upcoming_changes/23713.improvement.rst
new file mode 100644
index 000000000..15a4f412b
--- /dev/null
+++ b/doc/release/upcoming_changes/23713.improvement.rst
@@ -0,0 +1,9 @@
+Signed and unsigned integers always compare correctly
+-----------------------------------------------------
+When ``uint64`` and ``int64`` are mixed in NumPy, NumPy typically
+promotes both to ``float64``. This behavior may be argued about
+but is confusing for comparisons ``==``, ``<=``, since the results
+returned can be incorrect but the conversion is hidden since the
+result is a boolean.
+NumPy will now return the correct results for these by avoiding
+the cast to float.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f31e730d7..5e31ec0a4 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -17,7 +17,6 @@ NumPy documentation
**Version**: |version|
**Download documentation**:
-`PDF Version <https://numpy.org/doc/stable/numpy-user.pdf>`_ |
`Historical versions of documentation <https://numpy.org/doc/>`_
**Useful links**:
diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst
index 9a592984c..4dba54d6b 100644
--- a/doc/source/reference/arrays.scalars.rst
+++ b/doc/source/reference/arrays.scalars.rst
@@ -337,8 +337,6 @@ are also provided.
.. note that these are documented with ..attribute because that is what
autoclass does for aliases under the hood.
-.. autoclass:: numpy.bool8
-
.. attribute:: int8
int16
int32
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 8627f6c60..da1e98dd6 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -209,6 +209,8 @@ from numpy import (
random as random,
testing as testing,
version as version,
+ exceptions as exceptions,
+ dtypes as dtypes,
)
from numpy.core import defchararray, records
@@ -394,7 +396,6 @@ from numpy.core.numerictypes import (
issubsctype as issubsctype,
issubdtype as issubdtype,
sctype2char as sctype2char,
- find_common_type as find_common_type,
nbytes as nbytes,
cast as cast,
ScalarType as ScalarType,
@@ -411,6 +412,15 @@ from numpy.core.shape_base import (
vstack as vstack,
)
+from numpy.exceptions import (
+ ComplexWarning as ComplexWarning,
+ ModuleDeprecationWarning as ModuleDeprecationWarning,
+ VisibleDeprecationWarning as VisibleDeprecationWarning,
+ TooHardError as TooHardError,
+ DTypePromotionError as DTypePromotionError,
+ AxisError as AxisError,
+)
+
from numpy.lib import (
emath as emath,
)
@@ -664,13 +674,6 @@ test: PytestTester
#
# Placeholders for classes
-# Some of these are aliases; others are wrappers with an identical signature
-round_ = around
-product = prod
-cumproduct = cumprod
-sometrue = any
-alltrue = all
-
def show_config() -> None: ...
_NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray[Any, Any])
@@ -3319,22 +3322,8 @@ class _CopyMode(enum.Enum):
NEVER: L[2]
# Warnings
-class ModuleDeprecationWarning(DeprecationWarning): ...
-class VisibleDeprecationWarning(UserWarning): ...
-class ComplexWarning(RuntimeWarning): ...
class RankWarning(UserWarning): ...
-# Errors
-class TooHardError(RuntimeError): ...
-
-class AxisError(ValueError, IndexError):
- axis: None | int
- ndim: None | int
- @overload
- def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ...
- @overload
- def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None: ...
-
_CallType = TypeVar("_CallType", bound=_ErrFunc | _SupportsWrite[str])
class errstate(Generic[_CallType], ContextDecorator):
diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py
index 3d10bb988..d02c9f8fe 100644
--- a/numpy/compat/py3k.py
+++ b/numpy/compat/py3k.py
@@ -47,7 +47,15 @@ def asstr(s):
return str(s)
def isfileobj(f):
- return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter))
+ if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)):
+ return False
+ try:
+ # BufferedReader/Writer may raise OSError when
+ # fetching `fileno()` (e.g. when wrapping BytesIO).
+ f.fileno()
+ return True
+ except OSError:
+ return False
def open_latin1(filename, mode='r'):
return open(filename, mode=mode, encoding='iso-8859-1')
diff --git a/numpy/compat/tests/test_compat.py b/numpy/compat/tests/test_compat.py
index 2b8acbaa0..d4391565e 100644
--- a/numpy/compat/tests/test_compat.py
+++ b/numpy/compat/tests/test_compat.py
@@ -1,4 +1,5 @@
from os.path import join
+from io import BufferedReader, BytesIO
from numpy.compat import isfileobj
from numpy.testing import assert_
@@ -17,3 +18,5 @@ def test_isfileobj():
with open(filename, 'rb') as f:
assert_(isfileobj(f))
+
+ assert_(isfileobj(BufferedReader(BytesIO())) is False)
diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py
index 58661ed09..f9a6ad963 100644
--- a/numpy/core/_add_newdocs_scalars.py
+++ b/numpy/core/_add_newdocs_scalars.py
@@ -93,7 +93,7 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
add_newdoc('numpy.core.numerictypes', obj, docstring)
-add_newdoc_for_scalar_type('bool_', ['bool8'],
+add_newdoc_for_scalar_type('bool_', [],
"""
Boolean type (True or False), stored as a byte.
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index d0306bb72..a170f83be 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -109,6 +109,11 @@ def _check_order(types1, types2):
if t2i > t1i:
break
+ if types1 == "QQ?" and types2 == "qQ?":
+ # Explicitly allow this mixed case, rather than figure out what order
+ # is nicer or how to encode it.
+ return
+
raise TypeError(
f"Input dtypes are unsorted or duplicate: {types1} and {types2}")
@@ -523,49 +528,67 @@ defdict = {
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
- TD(all, out='?', dispatch=[('loops_comparison', bints+'fd')]),
- [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD(bints, out='?'),
+ [TypeDescription('q', FullTypeDescr, 'qQ', '?'),
+ TypeDescription('q', FullTypeDescr, 'Qq', '?')],
+ TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]),
TD('O', out='?'),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'greater_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
- TD(all, out='?', dispatch=[('loops_comparison', bints+'fd')]),
- [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD(bints, out='?'),
+ [TypeDescription('q', FullTypeDescr, 'qQ', '?'),
+ TypeDescription('q', FullTypeDescr, 'Qq', '?')],
+ TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]),
TD('O', out='?'),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'less':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
- TD(all, out='?', dispatch=[('loops_comparison', bints+'fd')]),
- [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD(bints, out='?'),
+ [TypeDescription('q', FullTypeDescr, 'qQ', '?'),
+ TypeDescription('q', FullTypeDescr, 'Qq', '?')],
+ TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]),
TD('O', out='?'),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'less_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
- TD(all, out='?', dispatch=[('loops_comparison', bints+'fd')]),
- [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD(bints, out='?'),
+ [TypeDescription('q', FullTypeDescr, 'qQ', '?'),
+ TypeDescription('q', FullTypeDescr, 'Qq', '?')],
+ TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]),
TD('O', out='?'),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
- TD(all, out='?', dispatch=[('loops_comparison', bints+'fd')]),
- [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD(bints, out='?'),
+ [TypeDescription('q', FullTypeDescr, 'qQ', '?'),
+ TypeDescription('q', FullTypeDescr, 'Qq', '?')],
+ TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]),
TD('O', out='?'),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'not_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.not_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
- TD(all, out='?', dispatch=[('loops_comparison', bints+'fd')]),
- [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD(bints, out='?'),
+ [TypeDescription('q', FullTypeDescr, 'qQ', '?'),
+ TypeDescription('q', FullTypeDescr, 'Qq', '?')],
+ TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]),
TD('O', out='?'),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'logical_and':
Ufunc(2, 1, True_,
@@ -1172,7 +1195,10 @@ def make_arrays(funcdict):
if t.func_data is FullTypeDescr:
tname = english_upper(chartoname[t.type])
datalist.append('(void *)NULL')
- cfunc_fname = f"{tname}_{t.in_}_{t.out}_{cfunc_alias}"
+ if t.out == "?":
+ cfunc_fname = f"{tname}_{t.in_}_bool_{cfunc_alias}"
+ else:
+ cfunc_fname = f"{tname}_{t.in_}_{t.out}_{cfunc_alias}"
elif isinstance(t.func_data, FuncNameSuffix):
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi
index c811a5783..ad483bb90 100644
--- a/numpy/core/einsumfunc.pyi
+++ b/numpy/core/einsumfunc.pyi
@@ -5,10 +5,6 @@ from numpy import (
ndarray,
dtype,
bool_,
- unsignedinteger,
- signedinteger,
- floating,
- complexfloating,
number,
_OrderKACF,
)
@@ -18,12 +14,14 @@ from numpy._typing import (
_ArrayLikeInt_co,
_ArrayLikeFloat_co,
_ArrayLikeComplex_co,
+ _ArrayLikeObject_co,
_DTypeLikeBool,
_DTypeLikeUInt,
_DTypeLikeInt,
_DTypeLikeFloat,
_DTypeLikeComplex,
_DTypeLikeComplex_co,
+ _DTypeLikeObject,
)
_ArrayType = TypeVar(
@@ -132,6 +130,51 @@ def einsum(
optimize: _OptimizeKind = ...,
) -> _ArrayType: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeObject_co,
+ out: None = ...,
+ dtype: None | _DTypeLikeObject = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: Any,
+ casting: _CastingUnsafe,
+ dtype: None | _DTypeLikeObject = ...,
+ out: None = ...,
+ order: _OrderKACF = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeObject_co,
+ out: _ArrayType,
+ dtype: None | _DTypeLikeObject = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayType: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: Any,
+ out: _ArrayType,
+ casting: _CastingUnsafe,
+ dtype: None | _DTypeLikeObject = ...,
+ order: _OrderKACF = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayType: ...
+
# NOTE: `einsum_call` is a hidden kwarg unavailable for public use.
# It is therefore excluded from the signatures below.
# NOTE: In practice the list consists of a `str` (first element)
@@ -139,6 +182,6 @@ def einsum(
def einsum_path(
subscripts: str | _ArrayLikeInt_co,
/,
- *operands: _ArrayLikeComplex_co,
+ *operands: _ArrayLikeComplex_co | _DTypeLikeObject,
optimize: _OptimizeKind = ...,
) -> tuple[list[Any], str]: ...
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 4608bc6de..69cabb33e 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -1801,9 +1801,10 @@ def ravel(a, order='C'):
Returns
-------
y : array_like
- y is an array of the same subtype as `a`, with shape ``(a.size,)``.
- Note that matrices are special cased for backward compatibility, if `a`
- is a matrix, then y is a 1-D ndarray.
+ y is a contiguous 1-D array of the same subtype as `a`,
+ with shape ``(a.size,)``.
+ Note that matrices are special cased for backward compatibility,
+ if `a` is a matrix, then y is a 1-D ndarray.
See Also
--------
@@ -1822,7 +1823,8 @@ def ravel(a, order='C'):
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
- may be preferable.
+ may be preferable. However, ``ravel`` supports ``K`` in the optional
+ ``order`` argument while ``reshape`` does not.
Examples
--------
diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi
index 17b17819d..43d178557 100644
--- a/numpy/core/fromnumeric.pyi
+++ b/numpy/core/fromnumeric.pyi
@@ -1047,3 +1047,7 @@ def var(
*,
where: _ArrayLikeBool_co = ...,
) -> _ArrayType: ...
+
+max = amax
+min = amin
+round = around
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 7a5948025..aea41bc2e 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -80,6 +80,7 @@ Exported symbols include:
"""
import numbers
+import warnings
from .multiarray import (
ndarray, array, dtype, datetime_data, datetime_as_string,
@@ -599,6 +600,16 @@ def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules.
+ .. deprecated:: NumPy 1.25
+
+ This function is deprecated, use `numpy.promote_types` or
+ `numpy.result_type` instead. To achieve semantics for the
+ `scalar_types` argument, use `numpy.result_type` and pass the Python
+ values `0`, `0.0`, or `0j`.
+ This will give the same results in almost all cases.
+ More information and rare exception can be found in the
+ `NumPy 1.25 release notes <https://numpy.org/devdocs/release/1.25.0-notes.html>`_.
+
Parameters
----------
array_types : sequence
@@ -646,6 +657,14 @@ def find_common_type(array_types, scalar_types):
dtype('complex128')
"""
+ # Deprecated 2022-11-07, NumPy 1.25
+ warnings.warn(
+ "np.find_common_type is deprecated. Please use `np.result_type` "
+ "or `np.promote_types`.\n"
+ "See https://numpy.org/devdocs/release/1.25.0-notes.html and the "
+ "docs for more information. (Deprecated NumPy 1.25)",
+ DeprecationWarning, stacklevel=2)
+
array_types = [dtype(x) for x in array_types]
scalar_types = [dtype(x) for x in scalar_types]
diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi
index d10e4822a..d05861b2e 100644
--- a/numpy/core/numerictypes.pyi
+++ b/numpy/core/numerictypes.pyi
@@ -118,11 +118,6 @@ def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ...
def sctype2char(sctype: DTypeLike) -> str: ...
-def find_common_type(
- array_types: Iterable[DTypeLike],
- scalar_types: Iterable[DTypeLike],
-) -> dtype[Any]: ...
-
cast: _typedict[_CastFunc]
nbytes: _typedict[int]
typecodes: _TypeCodes
diff --git a/numpy/core/src/multiarray/dtype_traversal.c b/numpy/core/src/multiarray/dtype_traversal.c
index 769c2e015..f76119f94 100644
--- a/numpy/core/src/multiarray/dtype_traversal.c
+++ b/numpy/core/src/multiarray/dtype_traversal.c
@@ -24,7 +24,6 @@
#include "alloc.h"
#include "array_method.h"
#include "dtypemeta.h"
-#include "refcount.h"
#include "dtype_traversal.h"
@@ -32,6 +31,11 @@
#define NPY_LOWLEVEL_BUFFER_BLOCKSIZE 128
+typedef int get_traverse_func_function(
+ void *traverse_context, PyArray_Descr *dtype, int aligned,
+ npy_intp stride, NPY_traverse_info *clear_info,
+ NPY_ARRAYMETHOD_FLAGS *flags);
+
/*
* Generic Clear function helpers:
*/
@@ -89,6 +93,45 @@ PyArray_GetClearFunction(
}
+/*
+ * Generic zerofill/fill function helper:
+ */
+
+static int
+get_zerofill_function(
+ void *traverse_context, PyArray_Descr *dtype, int aligned,
+ npy_intp stride, NPY_traverse_info *zerofill_info,
+ NPY_ARRAYMETHOD_FLAGS *flags)
+{
+ NPY_traverse_info_init(zerofill_info);
+ /* not that filling code bothers to check e.g. for floating point flags */
+ *flags = PyArrayMethod_MINIMAL_FLAGS;
+
+ get_traverse_loop_function *get_zerofill = NPY_DT_SLOTS(NPY_DTYPE(dtype))->get_fill_zero_loop;
+ if (get_zerofill == NULL) {
+ /* Allowed to be NULL (and accept it here) */
+ return 0;
+ }
+
+ if (get_zerofill(traverse_context, dtype, aligned, stride,
+ &zerofill_info->func, &zerofill_info->auxdata, flags) < 0) {
+ /* callee should clean up, but make sure outside debug mode */
+ assert(zerofill_info->func == NULL);
+ zerofill_info->func = NULL;
+ return -1;
+ }
+ if (zerofill_info->func == NULL) {
+ /* Zerofill also may return func=NULL without an error. */
+ return 0;
+ }
+
+ Py_INCREF(dtype);
+ zerofill_info->descr = dtype;
+
+ return 0;
+}
+
+
/****************** Python Object clear ***********************/
static int
@@ -157,7 +200,7 @@ npy_object_get_fill_zero_loop(void *NPY_UNUSED(traverse_context),
return 0;
}
-/**************** Structured DType clear funcationality ***************/
+/**************** Structured DType generic funcationality ***************/
/*
* Note that legacy user dtypes also make use of this. Someone managed to
@@ -172,20 +215,20 @@ npy_object_get_fill_zero_loop(void *NPY_UNUSED(traverse_context),
typedef struct {
npy_intp src_offset;
NPY_traverse_info info;
-} single_field_clear_data;
+} single_field_traverse_data;
typedef struct {
NpyAuxData base;
npy_intp field_count;
- single_field_clear_data fields[];
-} fields_clear_data;
+ single_field_traverse_data fields[];
+} fields_traverse_data;
/* traverse data free function */
static void
-fields_clear_data_free(NpyAuxData *data)
+fields_traverse_data_free(NpyAuxData *data)
{
- fields_clear_data *d = (fields_clear_data *)data;
+ fields_traverse_data *d = (fields_traverse_data *)data;
for (npy_intp i = 0; i < d->field_count; ++i) {
NPY_traverse_info_xfree(&d->fields[i].info);
@@ -196,16 +239,16 @@ fields_clear_data_free(NpyAuxData *data)
/* traverse data copy function (untested due to no direct use currently) */
static NpyAuxData *
-fields_clear_data_clone(NpyAuxData *data)
+fields_traverse_data_clone(NpyAuxData *data)
{
- fields_clear_data *d = (fields_clear_data *)data;
+ fields_traverse_data *d = (fields_traverse_data *)data;
npy_intp field_count = d->field_count;
- npy_intp structsize = sizeof(fields_clear_data) +
- field_count * sizeof(single_field_clear_data);
+ npy_intp structsize = sizeof(fields_traverse_data) +
+ field_count * sizeof(single_field_traverse_data);
/* Allocate the data and populate it */
- fields_clear_data *newdata = PyMem_Malloc(structsize);
+ fields_traverse_data *newdata = PyMem_Malloc(structsize);
if (newdata == NULL) {
return NULL;
}
@@ -213,15 +256,15 @@ fields_clear_data_clone(NpyAuxData *data)
newdata->field_count = 0;
/* Copy all the fields transfer data */
- single_field_clear_data *in_field = d->fields;
- single_field_clear_data *new_field = newdata->fields;
+ single_field_traverse_data *in_field = d->fields;
+ single_field_traverse_data *new_field = newdata->fields;
for (; newdata->field_count < field_count;
newdata->field_count++, in_field++, new_field++) {
new_field->src_offset = in_field->src_offset;
if (NPY_traverse_info_copy(&new_field->info, &in_field->info) < 0) {
- fields_clear_data_free((NpyAuxData *)newdata);
+ fields_traverse_data_free((NpyAuxData *)newdata);
return NULL;
}
}
@@ -236,7 +279,7 @@ traverse_fields_function(
char *data, npy_intp N, npy_intp stride,
NpyAuxData *auxdata)
{
- fields_clear_data *d = (fields_clear_data *)auxdata;
+ fields_traverse_data *d = (fields_traverse_data *)auxdata;
npy_intp i, field_count = d->field_count;
/* Do the traversing a block at a time for better memory caching */
@@ -245,7 +288,7 @@ traverse_fields_function(
for (;;) {
if (N > blocksize) {
for (i = 0; i < field_count; ++i) {
- single_field_clear_data field = d->fields[i];
+ single_field_traverse_data field = d->fields[i];
if (field.info.func(traverse_context,
field.info.descr, data + field.src_offset,
blocksize, stride, field.info.auxdata) < 0) {
@@ -257,7 +300,7 @@ traverse_fields_function(
}
else {
for (i = 0; i < field_count; ++i) {
- single_field_clear_data field = d->fields[i];
+ single_field_traverse_data field = d->fields[i];
if (field.info.func(traverse_context,
field.info.descr, data + field.src_offset,
N, stride, field.info.auxdata) < 0) {
@@ -271,10 +314,11 @@ traverse_fields_function(
static int
-get_clear_fields_transfer_function(
+get_fields_traverse_function(
void *traverse_context, PyArray_Descr *dtype, int NPY_UNUSED(aligned),
npy_intp stride, traverse_loop_function **out_func,
- NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags)
+ NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags,
+ get_traverse_func_function *get_traverse_func)
{
PyObject *names, *key, *tup, *title;
PyArray_Descr *fld_dtype;
@@ -285,19 +329,19 @@ get_clear_fields_transfer_function(
field_count = PyTuple_GET_SIZE(dtype->names);
/* Over-allocating here: less fields may be used */
- structsize = (sizeof(fields_clear_data) +
- field_count * sizeof(single_field_clear_data));
+ structsize = (sizeof(fields_traverse_data) +
+ field_count * sizeof(single_field_traverse_data));
/* Allocate the data and populate it */
- fields_clear_data *data = PyMem_Malloc(structsize);
+ fields_traverse_data *data = PyMem_Malloc(structsize);
if (data == NULL) {
PyErr_NoMemory();
return -1;
}
- data->base.free = &fields_clear_data_free;
- data->base.clone = &fields_clear_data_clone;
+ data->base.free = &fields_traverse_data_free;
+ data->base.clone = &fields_traverse_data_clone;
data->field_count = 0;
- single_field_clear_data *field = data->fields;
+ single_field_traverse_data *field = data->fields;
for (i = 0; i < field_count; ++i) {
int offset;
@@ -307,19 +351,26 @@ get_clear_fields_transfer_function(
NPY_AUXDATA_FREE((NpyAuxData *)data);
return -1;
}
- if (PyDataType_REFCHK(fld_dtype)) {
- NPY_ARRAYMETHOD_FLAGS clear_flags;
- if (get_clear_function(
- traverse_context, fld_dtype, 0,
- stride, &field->info, &clear_flags) < 0) {
- NPY_AUXDATA_FREE((NpyAuxData *)data);
- return -1;
- }
- *flags = PyArrayMethod_COMBINED_FLAGS(*flags, clear_flags);
- field->src_offset = offset;
- data->field_count++;
- field++;
+ if (get_traverse_func == &get_clear_function
+ && !PyDataType_REFCHK(fld_dtype)) {
+ /* No need to do clearing (could change to use NULL return) */
+ continue;
+ }
+ NPY_ARRAYMETHOD_FLAGS clear_flags;
+ if (get_traverse_func(
+ traverse_context, fld_dtype, 0,
+ stride, &field->info, &clear_flags) < 0) {
+ NPY_AUXDATA_FREE((NpyAuxData *)data);
+ return -1;
+ }
+ if (field->info.func == NULL) {
+ /* zerofill allows NULL func as "default" memset to zero */
+ continue;
}
+ *flags = PyArrayMethod_COMBINED_FLAGS(*flags, clear_flags);
+ field->src_offset = offset;
+ data->field_count++;
+ field++;
}
*out_func = &traverse_fields_function;
@@ -333,14 +384,14 @@ typedef struct {
NpyAuxData base;
npy_intp count;
NPY_traverse_info info;
-} subarray_clear_data;
+} subarray_traverse_data;
/* traverse data free function */
static void
-subarray_clear_data_free(NpyAuxData *data)
+subarray_traverse_data_free(NpyAuxData *data)
{
- subarray_clear_data *d = (subarray_clear_data *)data;
+ subarray_traverse_data *d = (subarray_traverse_data *)data;
NPY_traverse_info_xfree(&d->info);
PyMem_Free(d);
@@ -351,17 +402,17 @@ subarray_clear_data_free(NpyAuxData *data)
* We seem to be neither using nor exposing this right now, so leave it NULL.
* (The implementation below should be functional.)
*/
-#define subarray_clear_data_clone NULL
+#define subarray_traverse_data_clone NULL
-#ifndef subarray_clear_data_clone
+#ifndef subarray_traverse_data_clone
/* traverse data copy function */
static NpyAuxData *
-subarray_clear_data_clone(NpyAuxData *data)
+subarray_traverse_data_clone(NpyAuxData *data)
{
- subarray_clear_data *d = (subarray_clear_data *)data;
+ subarray_traverse_data *d = (subarray_traverse_data *)data;
/* Allocate the data and populate it */
- subarray_clear_data *newdata = PyMem_Malloc(sizeof(subarray_clear_data));
+ subarray_traverse_data *newdata = PyMem_Malloc(sizeof(subarray_traverse_data));
if (newdata == NULL) {
return NULL;
}
@@ -384,7 +435,7 @@ traverse_subarray_func(
char *data, npy_intp N, npy_intp stride,
NpyAuxData *auxdata)
{
- subarray_clear_data *subarr_data = (subarray_clear_data *)auxdata;
+ subarray_traverse_data *subarr_data = (subarray_traverse_data *)auxdata;
traverse_loop_function *func = subarr_data->info.func;
PyArray_Descr *sub_descr = subarr_data->info.descr;
@@ -404,27 +455,35 @@ traverse_subarray_func(
static int
-get_subarray_clear_func(
+get_subarray_traverse_func(
void *traverse_context, PyArray_Descr *dtype, int aligned,
npy_intp size, npy_intp stride, traverse_loop_function **out_func,
- NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags)
+ NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags,
+ get_traverse_func_function *get_traverse_func)
{
- subarray_clear_data *auxdata = PyMem_Malloc(sizeof(subarray_clear_data));
+ subarray_traverse_data *auxdata = PyMem_Malloc(sizeof(subarray_traverse_data));
if (auxdata == NULL) {
PyErr_NoMemory();
return -1;
}
auxdata->count = size;
- auxdata->base.free = &subarray_clear_data_free;
- auxdata->base.clone = subarray_clear_data_clone;
+ auxdata->base.free = &subarray_traverse_data_free;
+ auxdata->base.clone = subarray_traverse_data_clone;
- if (get_clear_function(
+ if (get_traverse_func(
traverse_context, dtype, aligned,
dtype->elsize, &auxdata->info, flags) < 0) {
PyMem_Free(auxdata);
return -1;
}
+ if (auxdata->info.func == NULL) {
+ /* zerofill allows func to be NULL, in which we need not do anything */
+ PyMem_Free(auxdata);
+ *out_func = NULL;
+ *out_auxdata = NULL;
+ return 0;
+ }
*out_func = &traverse_subarray_func;
*out_auxdata = (NpyAuxData *)auxdata;
@@ -469,9 +528,9 @@ npy_get_clear_void_and_legacy_user_dtype_loop(
size = PyArray_MultiplyList(shape.ptr, shape.len);
npy_free_cache_dim_obj(shape);
- if (get_subarray_clear_func(
+ if (get_subarray_traverse_func(
traverse_context, dtype->subarray->base, aligned, size, stride,
- out_func, out_auxdata, flags) < 0) {
+ out_func, out_auxdata, flags, &get_clear_function) < 0) {
return -1;
}
@@ -479,9 +538,9 @@ npy_get_clear_void_and_legacy_user_dtype_loop(
}
/* If there are fields, need to do each field */
else if (PyDataType_HASFIELDS(dtype)) {
- if (get_clear_fields_transfer_function(
+ if (get_fields_traverse_function(
traverse_context, dtype, aligned, stride,
- out_func, out_auxdata, flags) < 0) {
+ out_func, out_auxdata, flags, &get_clear_function) < 0) {
return -1;
}
return 0;
@@ -507,38 +566,86 @@ npy_get_clear_void_and_legacy_user_dtype_loop(
/**************** Structured DType zero fill ***************/
+
static int
-fill_zero_void_with_objects_strided_loop(
- void *NPY_UNUSED(traverse_context), PyArray_Descr *descr,
- char *data, npy_intp size, npy_intp stride,
- NpyAuxData *NPY_UNUSED(auxdata))
+zerofill_fields_function(
+ void *traverse_context, PyArray_Descr *descr,
+ char *data, npy_intp N, npy_intp stride,
+ NpyAuxData *auxdata)
{
- PyObject *zero = PyLong_FromLong(0);
- while (size--) {
- _fillobject(data, zero, descr);
- data += stride;
+ npy_intp itemsize = descr->elsize;
+
+ /*
+ * TODO: We could optimize this by chunking, but since we currently memset
+ * each element always, just loop manually.
+ */
+ while (N--) {
+ memset(data, 0, itemsize);
+ if (traverse_fields_function(
+ traverse_context, descr, data, 1, stride, auxdata) < 0) {
+ return -1;
+ }
+ data +=stride;
}
- Py_DECREF(zero);
return 0;
}
-
+/*
+ * Similar to other (e.g. clear) traversal loop getter, but unlike it, we
+ * do need to take care of zeroing out everything (in principle not gaps).
+ * So we add a memset before calling the actual traverse function for the
+ * structured path.
+ */
NPY_NO_EXPORT int
-npy_void_get_fill_zero_loop(void *NPY_UNUSED(traverse_context),
- PyArray_Descr *descr,
- int NPY_UNUSED(aligned),
- npy_intp NPY_UNUSED(fixed_stride),
- traverse_loop_function **out_loop,
- NpyAuxData **NPY_UNUSED(out_auxdata),
- NPY_ARRAYMETHOD_FLAGS *flags)
+npy_get_zerofill_void_and_legacy_user_dtype_loop(
+ void *traverse_context, PyArray_Descr *dtype, int aligned,
+ npy_intp stride, traverse_loop_function **out_func,
+ NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags)
{
- *flags = NPY_METH_NO_FLOATINGPOINT_ERRORS;
- if (PyDataType_REFCHK(descr)) {
- *flags |= NPY_METH_REQUIRES_PYAPI;
- *out_loop = &fill_zero_void_with_objects_strided_loop;
+ if (PyDataType_HASSUBARRAY(dtype)) {
+ PyArray_Dims shape = {NULL, -1};
+ npy_intp size;
+
+ if (!(PyArray_IntpConverter(dtype->subarray->shape, &shape))) {
+ PyErr_SetString(PyExc_ValueError,
+ "invalid subarray shape");
+ return -1;
+ }
+ size = PyArray_MultiplyList(shape.ptr, shape.len);
+ npy_free_cache_dim_obj(shape);
+
+ if (get_subarray_traverse_func(
+ traverse_context, dtype->subarray->base, aligned, size, stride,
+ out_func, out_auxdata, flags, &get_zerofill_function) < 0) {
+ return -1;
+ }
+
+ return 0;
}
- else {
- *out_loop = NULL;
+ /* If there are fields, need to do each field */
+ else if (PyDataType_HASFIELDS(dtype)) {
+ if (get_fields_traverse_function(
+ traverse_context, dtype, aligned, stride,
+ out_func, out_auxdata, flags, &get_zerofill_function) < 0) {
+ return -1;
+ }
+ if (((fields_traverse_data *)*out_auxdata)->field_count == 0) {
+ /* If there are no fields, just return NULL for zerofill */
+ NPY_AUXDATA_FREE(*out_auxdata);
+ *out_auxdata = NULL;
+ *out_func = NULL;
+ return 0;
+ }
+ /*
+ * Traversal skips fields that have no custom zeroing, so we need
+ * to take care of it.
+ */
+ *out_func = &zerofill_fields_function;
+ return 0;
}
+
+ /* Otherwise, assume there is nothing to do (user dtypes reach here) */
+ *out_auxdata = NULL;
+ *out_func = NULL;
return 0;
}
diff --git a/numpy/core/src/multiarray/dtype_traversal.h b/numpy/core/src/multiarray/dtype_traversal.h
index a9c185382..fc12c0f7b 100644
--- a/numpy/core/src/multiarray/dtype_traversal.h
+++ b/numpy/core/src/multiarray/dtype_traversal.h
@@ -29,11 +29,10 @@ npy_object_get_fill_zero_loop(
NPY_ARRAYMETHOD_FLAGS *flags);
NPY_NO_EXPORT int
-npy_void_get_fill_zero_loop(
- void *NPY_UNUSED(traverse_context), PyArray_Descr *descr,
- int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride),
- traverse_loop_function **out_loop, NpyAuxData **NPY_UNUSED(out_auxdata),
- NPY_ARRAYMETHOD_FLAGS *flags);
+npy_get_zerofill_void_and_legacy_user_dtype_loop(
+ void *traverse_context, PyArray_Descr *dtype, int aligned,
+ npy_intp stride, traverse_loop_function **out_func,
+ NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags);
/* Helper to deal with calling or nesting simple strided loops */
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index 4990afa18..9c1f384d8 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -873,7 +873,8 @@ dtypemeta_wrap_legacy_descriptor(PyArray_Descr *descr,
void_discover_descr_from_pyobject);
dt_slots->common_instance = void_common_instance;
dt_slots->ensure_canonical = void_ensure_canonical;
- dt_slots->get_fill_zero_loop = npy_void_get_fill_zero_loop;
+ dt_slots->get_fill_zero_loop =
+ npy_get_zerofill_void_and_legacy_user_dtype_loop;
dt_slots->get_clear_loop =
npy_get_clear_void_and_legacy_user_dtype_loop;
}
diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c
index d200957c3..876bb53e1 100644
--- a/numpy/core/src/multiarray/refcount.c
+++ b/numpy/core/src/multiarray/refcount.c
@@ -350,6 +350,11 @@ PyArray_XDECREF(PyArrayObject *mp)
return 0;
}
+
+static void
+_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype);
+
+
/*NUMPY_API
* Assumes contiguous
*/
@@ -392,7 +397,7 @@ PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj)
}
}
-NPY_NO_EXPORT void
+static void
_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
{
if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) {
diff --git a/numpy/core/src/multiarray/refcount.h b/numpy/core/src/multiarray/refcount.h
index 7f39b9ca4..16d34e292 100644
--- a/numpy/core/src/multiarray/refcount.h
+++ b/numpy/core/src/multiarray/refcount.h
@@ -24,7 +24,4 @@ PyArray_XDECREF(PyArrayObject *mp);
NPY_NO_EXPORT void
PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj);
-NPY_NO_EXPORT void
-_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype);
-
#endif /* NUMPY_CORE_SRC_MULTIARRAY_REFCOUNT_H_ */
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index c721800be..ff30737b5 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -279,7 +279,43 @@ static PyObject *
genint_type_str(PyObject *self)
{
PyObject *item, *item_str;
- item = gentype_generic_method(self, NULL, NULL, "item");
+ PyArray_Descr *descr = PyArray_DescrFromTypeObject((PyObject *)Py_TYPE(self));
+ void *val = scalar_value(self, descr);
+ switch (descr->type_num) {
+ case NPY_BYTE:
+ item = PyLong_FromLong(*(int8_t *)val);
+ break;
+ case NPY_UBYTE:
+ item = PyLong_FromUnsignedLong(*(uint8_t *)val);
+ break;
+ case NPY_SHORT:
+ item = PyLong_FromLong(*(int16_t *)val);
+ break;
+ case NPY_USHORT:
+ item = PyLong_FromUnsignedLong(*(uint16_t *)val);
+ break;
+ case NPY_INT:
+ item = PyLong_FromLong(*(int32_t *)val);
+ break;
+ case NPY_UINT:
+ item = PyLong_FromUnsignedLong(*(uint32_t *)val);
+ break;
+ case NPY_LONG:
+ item = PyLong_FromLong(*(int64_t *)val);
+ break;
+ case NPY_ULONG:
+ item = PyLong_FromUnsignedLong(*(uint64_t *)val);
+ break;
+ case NPY_LONGLONG:
+ item = PyLong_FromLongLong(*(long long *)val);
+ break;
+ case NPY_ULONGLONG:
+ item = PyLong_FromUnsignedLongLong(*(unsigned long long *)val);
+ break;
+ default:
+ item = gentype_generic_method(self, NULL, NULL, "item");
+ break;
+ }
if (item == NULL) {
return NULL;
}
diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c
index 16ca74ef8..5e36c914b 100644
--- a/numpy/core/src/multiarray/usertypes.c
+++ b/numpy/core/src/multiarray/usertypes.c
@@ -302,10 +302,11 @@ PyArray_RegisterDataType(PyArray_Descr *descr)
}
if (use_void_clearimpl) {
/* See comment where use_void_clearimpl is set... */
- PyArray_DTypeMeta *Void = PyArray_DTypeFromTypeNum(NPY_VOID);
NPY_DT_SLOTS(NPY_DTYPE(descr))->get_clear_loop = (
&npy_get_clear_void_and_legacy_user_dtype_loop);
- Py_DECREF(Void);
+ /* Also use the void zerofill since there may be objects */
+ NPY_DT_SLOTS(NPY_DTYPE(descr))->get_clear_loop = (
+ &npy_get_zerofill_void_and_legacy_user_dtype_loop);
}
return typenum;
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 397ebaca2..97a74b425 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -545,6 +545,48 @@ NPY_NO_EXPORT void
/**end repeat1**/
/**end repeat**/
+
+/*
+ * NOTE: It may be nice to vectorize these, OTOH, these are still faster
+ * than the cast we used to do.
+ */
+
+/**begin repeat
+ * #kind = equal, not_equal, less, less_equal, greater, greater_equal#
+ * #OP = ==, !=, <, <=, >, >=#
+ */
+NPY_NO_EXPORT void
+LONGLONG_Qq_bool_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP {
+ const npy_ulonglong in1 = *(npy_ulonglong *)ip1;
+ const npy_longlong in2 = *(npy_longlong *)ip2;
+ if (in2 < 0) {
+ *(npy_bool *)op1 = 0 @OP@ in2;
+ }
+ else {
+ *(npy_bool *)op1 = in1 @OP@ (npy_ulonglong)in2;
+ }
+ }
+}
+
+NPY_NO_EXPORT void
+LONGLONG_qQ_bool_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP {
+ const npy_longlong in1 = *(npy_longlong *)ip1;
+ const npy_ulonglong in2 = *(npy_ulonglong *)ip2;
+ if (in1 < 0) {
+ *(npy_bool *)op1 = in1 @OP@ 0;
+ }
+ else {
+ *(npy_bool *)op1 = (npy_ulonglong)in1 @OP@ in2;
+ }
+ }
+}
+/**end repeat**/
+
+
/*
*****************************************************************************
** DATETIME LOOPS **
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index ab54c1966..cce73aff8 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -211,6 +211,17 @@ NPY_NO_EXPORT void
/**end repeat1**/
/**end repeat**/
+/**begin repeat
+ * #kind = equal, not_equal, less, less_equal, greater, greater_equal#
+ * #OP = ==, !=, <, <=, >, >=#
+ */
+NPY_NO_EXPORT void
+LONGLONG_Qq_bool_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+LONGLONG_qQ_bool_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
+
+/**end repeat**/
+
#ifndef NPY_DISABLE_OPTIMIZATION
#include "loops_unary.dispatch.h"
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 12187d059..decd26580 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -381,8 +381,28 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc,
if (out_dtypes[0] == NULL) {
return -1;
}
- out_dtypes[1] = out_dtypes[0];
- Py_INCREF(out_dtypes[1]);
+ if (PyArray_ISINTEGER(operands[0])
+ && PyArray_ISINTEGER(operands[1])
+ && !PyDataType_ISINTEGER(out_dtypes[0])) {
+ /*
+ * NumPy promotion allows uint+int to go to float, avoid it
+ * (input must have been a mix of signed and unsigned)
+ */
+ if (PyArray_ISSIGNED(operands[0])) {
+ Py_SETREF(out_dtypes[0], PyArray_DescrFromType(NPY_LONGLONG));
+ out_dtypes[1] = PyArray_DescrFromType(NPY_ULONGLONG);
+ Py_INCREF(out_dtypes[1]);
+ }
+ else {
+ Py_SETREF(out_dtypes[0], PyArray_DescrFromType(NPY_ULONGLONG));
+ out_dtypes[1] = PyArray_DescrFromType(NPY_LONGLONG);
+ Py_INCREF(out_dtypes[1]);
+ }
+ }
+ else {
+ out_dtypes[1] = out_dtypes[0];
+ Py_INCREF(out_dtypes[1]);
+ }
}
else {
/* Not doing anything will lead to a loop no found error. */
@@ -398,15 +418,8 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc,
operands, type_tup, out_dtypes);
}
- /* Output type is always boolean */
+ /* Output type is always boolean (cannot fail for builtins) */
out_dtypes[2] = PyArray_DescrFromType(NPY_BOOL);
- if (out_dtypes[2] == NULL) {
- for (i = 0; i < 2; ++i) {
- Py_DECREF(out_dtypes[i]);
- out_dtypes[i] = NULL;
- }
- return -1;
- }
/* Check against the casting rules */
if (PyUFunc_ValidateCasting(ufunc, casting, operands, out_dtypes) < 0) {
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index b92c8ae8c..6796b4077 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -258,8 +258,7 @@ class TestArray2String:
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
-
- def test_structure_format(self):
+ def test_structure_format_mixed(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_equal(np.array2string(x),
@@ -301,6 +300,7 @@ class TestArray2String:
( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
)
+ def test_structure_format_int(self):
# See #8160
struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
assert_equal(np.array2string(struct_int),
@@ -310,6 +310,7 @@ class TestArray2String:
assert_equal(np.array2string(struct_2dint),
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
+ def test_structure_format_float(self):
# See #8172
array_scalar = np.array(
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
diff --git a/numpy/core/tests/test_nep50_promotions.py b/numpy/core/tests/test_nep50_promotions.py
index 10d91aa31..9e84c78c1 100644
--- a/numpy/core/tests/test_nep50_promotions.py
+++ b/numpy/core/tests/test_nep50_promotions.py
@@ -180,3 +180,11 @@ def test_nep50_integer_regression():
arr = np.array(1)
assert (arr + 2**63).dtype == np.float64
assert (arr[()] + 2**63).dtype == np.float64
+
+def test_nep50_with_axisconcatenator():
+ # I promised that this will be an error in the future in the 1.25
+ # release notes; test this (NEP 50 opt-in makes the deprecation an error).
+ np._set_promotion_state("weak")
+
+ with pytest.raises(OverflowError):
+ np.r_[np.arange(5, dtype=np.int8), 255]
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index 2ac77a312..bab5bf246 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -339,23 +339,28 @@ class TestEmptyField:
class TestCommonType:
def test_scalar_loses1(self):
- res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
+ with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+ res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
assert_(res == 'f4')
def test_scalar_loses2(self):
- res = np.find_common_type(['f4', 'f4'], ['i8'])
+ with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+ res = np.find_common_type(['f4', 'f4'], ['i8'])
assert_(res == 'f4')
def test_scalar_wins(self):
- res = np.find_common_type(['f4', 'f4', 'i2'], ['c8'])
+ with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+ res = np.find_common_type(['f4', 'f4', 'i2'], ['c8'])
assert_(res == 'c8')
def test_scalar_wins2(self):
- res = np.find_common_type(['u4', 'i4', 'i4'], ['f4'])
+ with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+ res = np.find_common_type(['u4', 'i4', 'i4'], ['f4'])
assert_(res == 'f8')
def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose
- res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
+ with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+ res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
assert_(res == 'f8')
class TestMultipleFields:
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 141636034..841144790 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -1667,7 +1667,9 @@ class TestRegression:
def test_find_common_type_boolean(self):
# Ticket #1695
- assert_(np.find_common_type([], ['?', '?']) == '?')
+ with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+ res = np.find_common_type([], ['?', '?'])
+ assert res == '?'
def test_empty_mul(self):
a = np.array([1.])
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index b4f8d0c69..9e3fe387b 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -369,6 +369,64 @@ class TestComparisons:
with pytest.raises(TypeError, match="No loop matching"):
np.equal(1, 1, sig=(None, None, "l"))
+ @pytest.mark.parametrize("dtypes", ["qQ", "Qq"])
+ @pytest.mark.parametrize('py_comp, np_comp', [
+ (operator.lt, np.less),
+ (operator.le, np.less_equal),
+ (operator.gt, np.greater),
+ (operator.ge, np.greater_equal),
+ (operator.eq, np.equal),
+ (operator.ne, np.not_equal)
+ ])
+ @pytest.mark.parametrize("vals", [(2**60, 2**60+1), (2**60+1, 2**60)])
+ def test_large_integer_direct_comparison(
+ self, dtypes, py_comp, np_comp, vals):
+ # Note that float(2**60) + 1 == float(2**60).
+ a1 = np.array([2**60], dtype=dtypes[0])
+ a2 = np.array([2**60 + 1], dtype=dtypes[1])
+ expected = py_comp(2**60, 2**60+1)
+
+ assert py_comp(a1, a2) == expected
+ assert np_comp(a1, a2) == expected
+ # Also check the scalars:
+ s1 = a1[0]
+ s2 = a2[0]
+ assert isinstance(s1, np.integer)
+ assert isinstance(s2, np.integer)
+ # The Python operator here is mainly interesting:
+ assert py_comp(s1, s2) == expected
+ assert np_comp(s1, s2) == expected
+
+ @pytest.mark.parametrize("dtype", np.typecodes['UnsignedInteger'])
+ @pytest.mark.parametrize('py_comp_func, np_comp_func', [
+ (operator.lt, np.less),
+ (operator.le, np.less_equal),
+ (operator.gt, np.greater),
+ (operator.ge, np.greater_equal),
+ (operator.eq, np.equal),
+ (operator.ne, np.not_equal)
+ ])
+ @pytest.mark.parametrize("flip", [True, False])
+ def test_unsigned_signed_direct_comparison(
+ self, dtype, py_comp_func, np_comp_func, flip):
+ if flip:
+ py_comp = lambda x, y: py_comp_func(y, x)
+ np_comp = lambda x, y: np_comp_func(y, x)
+ else:
+ py_comp = py_comp_func
+ np_comp = np_comp_func
+
+ arr = np.array([np.iinfo(dtype).max], dtype=dtype)
+ expected = py_comp(int(arr[0]), -1)
+
+ assert py_comp(arr, -1) == expected
+ assert np_comp(arr, -1) == expected
+ scalar = arr[0]
+ assert isinstance(scalar, np.integer)
+ # The Python operator here is mainly interesting:
+ assert py_comp(scalar, -1) == expected
+ assert np_comp(scalar, -1) == expected
+
class TestAdd:
def test_reduce_alignment(self):
diff --git a/numpy/distutils/checks/cpu_avx512_spr.c b/numpy/distutils/checks/cpu_avx512_spr.c
index 3c9575a57..9710d0b2f 100644
--- a/numpy/distutils/checks/cpu_avx512_spr.c
+++ b/numpy/distutils/checks/cpu_avx512_spr.c
@@ -15,6 +15,10 @@
int main(int argc, char **argv)
{
+/* clang has a bug regarding our spr coode, see gh-23730. */
+#if __clang__
+#error
+#endif
__m512h a = _mm512_loadu_ph((void*)argv[argc-1]);
__m512h temp = _mm512_fmadd_ph(a, a, a);
_mm512_storeu_ph((void*)(argv[argc-1]), temp);
diff --git a/numpy/exceptions.pyi b/numpy/exceptions.pyi
index 53b7a0c16..c76a0946b 100644
--- a/numpy/exceptions.pyi
+++ b/numpy/exceptions.pyi
@@ -1,8 +1,18 @@
-from numpy.exceptions import (
- ComplexWarning as ComplexWarning,
- ModuleDeprecationWarning as ModuleDeprecationWarning,
- VisibleDeprecationWarning as VisibleDeprecationWarning,
- TooHardError as TooHardError,
- AxisError as AxisError,
-)
+from typing import overload
+__all__: list[str]
+
+class ComplexWarning(RuntimeWarning): ...
+class ModuleDeprecationWarning(DeprecationWarning): ...
+class VisibleDeprecationWarning(UserWarning): ...
+class TooHardError(RuntimeError): ...
+class DTypePromotionError(TypeError): ...
+
+class AxisError(ValueError, IndexError):
+ axis: None | int
+ ndim: None | int
+ @overload
+ def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ...
+ @overload
+ def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None: ...
+ def __str__(self) -> str: ...
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index c0a79bcae..4871d2628 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -613,15 +613,15 @@ beginpattern90 = re.compile(
groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|'
r'endinterface|endsubroutine|endfunction')
endpattern = re.compile(
- beforethisafter % ('', groupends, groupends, r'.*'), re.I), 'end'
+ beforethisafter % ('', groupends, groupends, '.*'), re.I), 'end'
endifs = r'end\s*(if|do|where|select|while|forall|associate|block|' + \
r'critical|enum|team)'
endifpattern = re.compile(
- beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif'
+ beforethisafter % (r'[\w]*?', endifs, endifs, '.*'), re.I), 'endif'
#
moduleprocedures = r'module\s*procedure'
moduleprocedurepattern = re.compile(
- beforethisafter % ('', moduleprocedures, moduleprocedures, r'.*'), re.I), \
+ beforethisafter % ('', moduleprocedures, moduleprocedures, '.*'), re.I), \
'moduleprocedure'
implicitpattern = re.compile(
beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit'
diff --git a/numpy/f2py/tests/src/crackfortran/gh23533.f b/numpy/f2py/tests/src/crackfortran/gh23533.f
new file mode 100644
index 000000000..db522afa7
--- /dev/null
+++ b/numpy/f2py/tests/src/crackfortran/gh23533.f
@@ -0,0 +1,5 @@
+ SUBROUTINE EXAMPLE( )
+ IF( .TRUE. ) THEN
+ CALL DO_SOMETHING()
+ END IF ! ** .TRUE. **
+ END
diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py
index dc0f7e27a..49bfc13af 100644
--- a/numpy/f2py/tests/test_crackfortran.py
+++ b/numpy/f2py/tests/test_crackfortran.py
@@ -135,6 +135,7 @@ class TestMarkinnerspaces:
assert markinnerspaces("a 'b c' 'd e'") == "a 'b@_@c' 'd@_@e'"
assert markinnerspaces(r'a "b c" "d e"') == r'a "b@_@c" "d@_@e"'
+
class TestDimSpec(util.F2PyTest):
"""This test suite tests various expressions that are used as dimension
specifications.
@@ -244,6 +245,7 @@ class TestModuleDeclaration:
assert len(mod) == 1
assert mod[0]["vars"]["abar"]["="] == "bar('abar')"
+
class TestEval(util.F2PyTest):
def test_eval_scalar(self):
eval_scalar = crackfortran._eval_scalar
@@ -268,6 +270,7 @@ class TestFortranReader(util.F2PyTest):
mod = crackfortran.crackfortran([str(f_path)])
assert mod[0]['name'] == 'foo'
+
class TestUnicodeComment(util.F2PyTest):
sources = [util.getpath("tests", "src", "crackfortran", "unicode_comment.f90")]
@@ -278,6 +281,7 @@ class TestUnicodeComment(util.F2PyTest):
def test_encoding_comment(self):
self.module.foo(3)
+
class TestNameArgsPatternBacktracking:
@pytest.mark.parametrize(
['adversary'],
@@ -321,3 +325,13 @@ class TestFunctionReturn(util.F2PyTest):
def test_function_rettype(self):
# gh-23598
assert self.module.intproduct(3, 4) == 12
+
+
+class TestFortranGroupCounters(util.F2PyTest):
+ def test_end_if_comment(self):
+ # gh-23533
+ fpath = util.getpath("tests", "src", "crackfortran", "gh23533.f")
+ try:
+ crackfortran.crackfortran([str(fpath)])
+ except Exception as exc:
+ assert False, f"'crackfortran.crackfortran' raised an exception {exc}"
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 22371a038..02e141920 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -2999,10 +2999,15 @@ def blackman(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1))
@@ -3107,10 +3112,15 @@ def bartlett(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1))
@@ -3211,10 +3221,15 @@ def hanning(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return 0.5 + 0.5*cos(pi*n/(M-1))
@@ -3311,10 +3326,15 @@ def hamming(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return 0.54 + 0.46*cos(pi*n/(M-1))
@@ -3590,11 +3610,19 @@ def kaiser(M, beta):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range. (Simplified result_type with 0.0
+ # strongly typed. result-type is not/less order sensitive, but that mainly
+ # matters for integers anyway.)
+ values = np.array([0.0, M, beta])
+ M = values[1]
+ beta = values[2]
+
if M == 1:
- return np.ones(1, dtype=np.result_type(M, 0.0))
+ return np.ones(1, dtype=values.dtype)
n = arange(0, M)
alpha = (M-1)/2.0
- return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
+ return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(beta)
def _sinc_dispatcher(x):
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index e89bdbbc2..6913d2b95 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -7,7 +7,7 @@ import numpy as np
from .._utils import set_module
import numpy.core.numeric as _nx
from numpy.core.numeric import ScalarType, array
-from numpy.core.numerictypes import find_common_type, issubdtype
+from numpy.core.numerictypes import issubdtype
import numpy.matrixlib as matrixlib
from .function_base import diff
@@ -340,9 +340,8 @@ class AxisConcatenator:
axis = self.axis
objs = []
- scalars = []
- arraytypes = []
- scalartypes = []
+ # dtypes or scalars for weak scalar handling in result_type
+ result_type_objs = []
for k, item in enumerate(key):
scalar = False
@@ -388,10 +387,8 @@ class AxisConcatenator:
except (ValueError, TypeError) as e:
raise ValueError("unknown special directive") from e
elif type(item) in ScalarType:
- newobj = array(item, ndmin=ndmin)
- scalars.append(len(objs))
scalar = True
- scalartypes.append(newobj.dtype)
+ newobj = item
else:
item_ndim = np.ndim(item)
newobj = array(item, copy=False, subok=True, ndmin=ndmin)
@@ -403,15 +400,20 @@ class AxisConcatenator:
defaxes = list(range(ndmin))
axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
newobj = newobj.transpose(axes)
+
objs.append(newobj)
- if not scalar and isinstance(newobj, _nx.ndarray):
- arraytypes.append(newobj.dtype)
-
- # Ensure that scalars won't up-cast unless warranted
- final_dtype = find_common_type(arraytypes, scalartypes)
- if final_dtype is not None:
- for k in scalars:
- objs[k] = objs[k].astype(final_dtype)
+ if scalar:
+ result_type_objs.append(item)
+ else:
+ result_type_objs.append(newobj.dtype)
+
+ # Ensure that scalars won't up-cast unless warranted, for 0, drops
+ # through to error in concatenate.
+ if len(result_type_objs) != 0:
+ final_dtype = _nx.result_type(*result_type_objs)
+ # concatenate could do cast, but that can be overriden:
+ objs = [array(obj, copy=False, subok=True,
+ ndmin=ndmin, dtype=final_dtype) for obj in objs]
res = self.concatenate(tuple(objs), axis=axis)
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 22fb0eb7d..339b1dc62 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -260,7 +260,7 @@ class NpzFile(Mapping):
else:
return self.zip.read(key)
else:
- raise KeyError("%s is not a file in the archive" % key)
+ raise KeyError(f"{key} is not a file in the archive")
def __contains__(self, key):
return (key in self._files or key in self.files)
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 5a68fbc97..c1032df8e 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -232,6 +232,17 @@ class TestSavezLoad(RoundtripTest):
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
+
+ def test_tuple_getitem_raises(self):
+ # gh-23748
+ a = np.array([1, 2, 3])
+ f = BytesIO()
+ np.savez(f, a=a)
+ f.seek(0)
+ l = np.load(f)
+ with pytest.raises(KeyError, match="(1, 2)"):
+ l[1, 2]
+
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi
index 7f5cb56a8..ce72383e5 100644
--- a/numpy/ma/__init__.pyi
+++ b/numpy/ma/__init__.pyi
@@ -155,7 +155,6 @@ from numpy.ma.core import (
resize as resize,
right_shift as right_shift,
round as round,
- round_ as round_,
set_fill_value as set_fill_value,
shape as shape,
sin as sin,
diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi
index 15f37c422..e94ebce3c 100644
--- a/numpy/ma/core.pyi
+++ b/numpy/ma/core.pyi
@@ -435,8 +435,7 @@ def size(obj, axis=...): ...
def diff(a, /, n=..., axis=..., prepend=..., append=...): ...
def where(condition, x=..., y=...): ...
def choose(indices, choices, out=..., mode=...): ...
-def round_(a, decimals=..., out=...): ...
-round = round_
+def round(a, decimals=..., out=...): ...
def inner(a, b): ...
innerproduct = inner
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 55b15fc34..6ab1d7e4f 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -4543,7 +4543,7 @@ class TestMaskedArrayFunctions:
x = np.arange(4, dtype=np.int32)
y = np.arange(4, dtype=np.float32) * 2.2
test = where(x > 1.5, y, x).dtype
- control = np.find_common_type([np.int32, np.float32], [])
+ control = np.result_type(np.int32, np.float32)
assert_equal(test, control)
def test_where_broadcast(self):
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index c663ffab0..efbe13e0c 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -2012,6 +2012,12 @@ class Chebyshev(ABCPolyBase):
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
+ symbol : str, optional
+ Symbol used to represent the independent variable in string
+ representations of the polynomial expression, e.g. for printing.
+ The symbol must be a valid Python identifier. Default value is 'x'.
+
+ .. versionadded:: 1.24
"""
# Virtual Functions
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index e20339121..210df25f5 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -1675,6 +1675,12 @@ class Hermite(ABCPolyBase):
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
+ symbol : str, optional
+ Symbol used to represent the independent variable in string
+ representations of the polynomial expression, e.g. for printing.
+ The symbol must be a valid Python identifier. Default value is 'x'.
+
+ .. versionadded:: 1.24
"""
# Virtual Functions
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index 182c562c2..bdf29405b 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -1667,6 +1667,12 @@ class HermiteE(ABCPolyBase):
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
+ symbol : str, optional
+ Symbol used to represent the independent variable in string
+ representations of the polynomial expression, e.g. for printing.
+ The symbol must be a valid Python identifier. Default value is 'x'.
+
+ .. versionadded:: 1.24
"""
# Virtual Functions
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index 2eacceced..925d4898e 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -1623,6 +1623,12 @@ class Laguerre(ABCPolyBase):
Window, see `domain` for its use. The default value is [0, 1].
.. versionadded:: 1.6.0
+ symbol : str, optional
+ Symbol used to represent the independent variable in string
+ representations of the polynomial expression, e.g. for printing.
+ The symbol must be a valid Python identifier. Default value is 'x'.
+
+ .. versionadded:: 1.24
"""
# Virtual Functions
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index 028e2fe7b..8e9c19d94 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -1636,6 +1636,12 @@ class Legendre(ABCPolyBase):
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
+ symbol : str, optional
+ Symbol used to represent the independent variable in string
+ representations of the polynomial expression, e.g. for printing.
+ The symbol must be a valid Python identifier. Default value is 'x'.
+
+ .. versionadded:: 1.24
"""
# Virtual Functions
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index d102f5a30..ceadff0bf 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -1489,6 +1489,12 @@ class Polynomial(ABCPolyBase):
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
+ symbol : str, optional
+ Symbol used to represent the independent variable in string
+ representations of the polynomial expression, e.g. for printing.
+ The symbol must be a valid Python identifier. Default value is 'x'.
+
+ .. versionadded:: 1.24
"""
# Virtual Functions
diff --git a/numpy/random/_common.pxd b/numpy/random/_common.pxd
index 3eaf39ddf..659da0d2d 100644
--- a/numpy/random/_common.pxd
+++ b/numpy/random/_common.pxd
@@ -39,32 +39,32 @@ cdef extern from "include/aligned_malloc.h":
cdef void *PyArray_calloc_aligned(size_t n, size_t s)
cdef void PyArray_free_aligned(void *p)
-ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil
-ctypedef double (*random_double_0)(void *state) nogil
-ctypedef double (*random_double_1)(void *state, double a) nogil
-ctypedef double (*random_double_2)(void *state, double a, double b) nogil
-ctypedef double (*random_double_3)(void *state, double a, double b, double c) nogil
+ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil
+ctypedef double (*random_double_0)(void *state) noexcept nogil
+ctypedef double (*random_double_1)(void *state, double a) noexcept nogil
+ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil
+ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil
-ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) nogil
-ctypedef float (*random_float_0)(bitgen_t *state) nogil
-ctypedef float (*random_float_1)(bitgen_t *state, float a) nogil
+ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil
+ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil
+ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil
-ctypedef int64_t (*random_uint_0)(void *state) nogil
-ctypedef int64_t (*random_uint_d)(void *state, double a) nogil
-ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) nogil
-ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) nogil
-ctypedef int64_t (*random_uint_i)(void *state, int64_t a) nogil
-ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) nogil
+ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil
+ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil
+ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil
+ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil
+ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil
+ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil
-ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) nogil
-ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) nogil
+ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil
+ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil
-ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) nogil
-ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) nogil
+ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil
+ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil
-cdef double kahan_sum(double *darr, np.npy_intp n)
+cdef double kahan_sum(double *darr, np.npy_intp n) noexcept
-cdef inline double uint64_to_double(uint64_t rnd) nogil:
+cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil:
return (rnd >> 11) * (1.0 / 9007199254740992.0)
cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out)
diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx
index 7b6f69303..c5e4e3297 100644
--- a/numpy/random/_common.pyx
+++ b/numpy/random/_common.pyx
@@ -171,7 +171,7 @@ cdef object prepare_ctypes(bitgen_t *bitgen):
ctypes.c_void_p(<uintptr_t>bitgen))
return _ctypes
-cdef double kahan_sum(double *darr, np.npy_intp n):
+cdef double kahan_sum(double *darr, np.npy_intp n) noexcept:
"""
Parameters
----------
diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx
index 5a8d52e6b..8b991254a 100644
--- a/numpy/random/_mt19937.pyx
+++ b/numpy/random/_mt19937.pyx
@@ -28,16 +28,16 @@ cdef extern from "src/mt19937/mt19937.h":
enum:
RK_STATE_LEN
-cdef uint64_t mt19937_uint64(void *st) nogil:
+cdef uint64_t mt19937_uint64(void *st) noexcept nogil:
return mt19937_next64(<mt19937_state *> st)
-cdef uint32_t mt19937_uint32(void *st) nogil:
+cdef uint32_t mt19937_uint32(void *st) noexcept nogil:
return mt19937_next32(<mt19937_state *> st)
-cdef double mt19937_double(void *st) nogil:
+cdef double mt19937_double(void *st) noexcept nogil:
return mt19937_next_double(<mt19937_state *> st)
-cdef uint64_t mt19937_raw(void *st) nogil:
+cdef uint64_t mt19937_raw(void *st) noexcept nogil:
return <uint64_t>mt19937_next32(<mt19937_state *> st)
cdef class MT19937(BitGenerator):
diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx
index c0a10a812..f7891aa85 100644
--- a/numpy/random/_pcg64.pyx
+++ b/numpy/random/_pcg64.pyx
@@ -26,26 +26,26 @@ cdef extern from "src/pcg64/pcg64.h":
void pcg64_get_state(pcg64_state *state, uint64_t *state_arr, int *has_uint32, uint32_t *uinteger)
void pcg64_set_state(pcg64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger)
- uint64_t pcg64_cm_next64(pcg64_state *state) nogil
- uint32_t pcg64_cm_next32(pcg64_state *state) nogil
+ uint64_t pcg64_cm_next64(pcg64_state *state) noexcept nogil
+ uint32_t pcg64_cm_next32(pcg64_state *state) noexcept nogil
void pcg64_cm_advance(pcg64_state *state, uint64_t *step)
-cdef uint64_t pcg64_uint64(void* st) nogil:
+cdef uint64_t pcg64_uint64(void* st) noexcept nogil:
return pcg64_next64(<pcg64_state *>st)
-cdef uint32_t pcg64_uint32(void *st) nogil:
+cdef uint32_t pcg64_uint32(void *st) noexcept nogil:
return pcg64_next32(<pcg64_state *> st)
-cdef double pcg64_double(void* st) nogil:
+cdef double pcg64_double(void* st) noexcept nogil:
return uint64_to_double(pcg64_next64(<pcg64_state *>st))
-cdef uint64_t pcg64_cm_uint64(void* st) nogil:
+cdef uint64_t pcg64_cm_uint64(void* st) noexcept nogil:
return pcg64_cm_next64(<pcg64_state *>st)
-cdef uint32_t pcg64_cm_uint32(void *st) nogil:
+cdef uint32_t pcg64_cm_uint32(void *st) noexcept nogil:
return pcg64_cm_next32(<pcg64_state *> st)
-cdef double pcg64_cm_double(void* st) nogil:
+cdef double pcg64_cm_double(void* st) noexcept nogil:
return uint64_to_double(pcg64_cm_next64(<pcg64_state *>st))
cdef class PCG64(BitGenerator):
@@ -515,4 +515,3 @@ cdef class PCG64DXSM(BitGenerator):
pcg64_cm_advance(&self.rng_state, <uint64_t *>np.PyArray_DATA(d))
self._reset_state_variables()
return self
-
diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx
index d9a366e86..e5353460c 100644
--- a/numpy/random/_philox.pyx
+++ b/numpy/random/_philox.pyx
@@ -36,19 +36,19 @@ cdef extern from 'src/philox/philox.h':
ctypedef s_philox_state philox_state
- uint64_t philox_next64(philox_state *state) nogil
- uint32_t philox_next32(philox_state *state) nogil
+ uint64_t philox_next64(philox_state *state) noexcept nogil
+ uint32_t philox_next32(philox_state *state) noexcept nogil
void philox_jump(philox_state *state)
void philox_advance(uint64_t *step, philox_state *state)
-cdef uint64_t philox_uint64(void*st) nogil:
+cdef uint64_t philox_uint64(void*st) noexcept nogil:
return philox_next64(<philox_state *> st)
-cdef uint32_t philox_uint32(void *st) nogil:
+cdef uint32_t philox_uint32(void *st) noexcept nogil:
return philox_next32(<philox_state *> st)
-cdef double philox_double(void*st) nogil:
+cdef double philox_double(void*st) noexcept nogil:
return uint64_to_double(philox_next64(<philox_state *> st))
cdef class Philox(BitGenerator):
diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx
index 1daee34f8..419045c1d 100644
--- a/numpy/random/_sfc64.pyx
+++ b/numpy/random/_sfc64.pyx
@@ -21,13 +21,13 @@ cdef extern from "src/sfc64/sfc64.h":
void sfc64_set_state(sfc64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger)
-cdef uint64_t sfc64_uint64(void* st) nogil:
+cdef uint64_t sfc64_uint64(void* st) noexcept nogil:
return sfc64_next64(<sfc64_state *>st)
-cdef uint32_t sfc64_uint32(void *st) nogil:
+cdef uint32_t sfc64_uint32(void *st) noexcept nogil:
return sfc64_next32(<sfc64_state *> st)
-cdef double sfc64_double(void* st) nogil:
+cdef double sfc64_double(void* st) noexcept nogil:
return uint64_to_double(sfc64_next64(<sfc64_state *>st))
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 8ef886cac..cca7b8063 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -1067,7 +1067,7 @@ def assert_array_less(x, y, err_msg='', verbose=True):
Raises
------
AssertionError
- If actual and desired objects are not equal.
+ If x is not strictly smaller than y, element-wise.
See Also
--------
diff --git a/numpy/typing/tests/data/fail/einsumfunc.pyi b/numpy/typing/tests/data/fail/einsumfunc.pyi
index f0e3f1e95..2d1f37418 100644
--- a/numpy/typing/tests/data/fail/einsumfunc.pyi
+++ b/numpy/typing/tests/data/fail/einsumfunc.pyi
@@ -4,12 +4,9 @@ import numpy as np
AR_i: np.ndarray[Any, np.dtype[np.int64]]
AR_f: np.ndarray[Any, np.dtype[np.float64]]
AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
-AR_O: np.ndarray[Any, np.dtype[np.object_]]
AR_U: np.ndarray[Any, np.dtype[np.str_]]
np.einsum("i,i->i", AR_i, AR_m) # E: incompatible type
-np.einsum("i,i->i", AR_O, AR_O) # E: incompatible type
np.einsum("i,i->i", AR_f, AR_f, dtype=np.int32) # E: incompatible type
-np.einsum("i,i->i", AR_i, AR_i, dtype=np.timedelta64, casting="unsafe") # E: No overload variant
np.einsum("i,i->i", AR_i, AR_i, out=AR_U) # E: Value of type variable "_ArrayType" of "einsum" cannot be
np.einsum("i,i->i", AR_i, AR_i, out=AR_U, casting="unsafe") # E: No overload variant
diff --git a/numpy/typing/tests/data/fail/numerictypes.pyi b/numpy/typing/tests/data/fail/numerictypes.pyi
index a5c2814ef..ce5662d5e 100644
--- a/numpy/typing/tests/data/fail/numerictypes.pyi
+++ b/numpy/typing/tests/data/fail/numerictypes.pyi
@@ -9,5 +9,3 @@ np.maximum_sctype(1) # E: No overload variant
np.issubsctype(1, np.int64) # E: incompatible type
np.issubdtype(1, np.int64) # E: incompatible type
-
-np.find_common_type(np.int64, np.int64) # E: incompatible type
diff --git a/numpy/typing/tests/data/pass/numerictypes.py b/numpy/typing/tests/data/pass/numerictypes.py
index ab86f590d..63b6ad0e2 100644
--- a/numpy/typing/tests/data/pass/numerictypes.py
+++ b/numpy/typing/tests/data/pass/numerictypes.py
@@ -23,11 +23,6 @@ np.issubdtype(np.float64, np.float32)
np.sctype2char("S1")
np.sctype2char(list)
-np.find_common_type([], [np.int64, np.float32, complex])
-np.find_common_type((), (np.int64, np.float32, complex))
-np.find_common_type([np.int64, np.float32], [])
-np.find_common_type([np.float32], [np.int64, np.float64])
-
np.cast[int]
np.cast["i8"]
np.cast[np.int64]
diff --git a/numpy/typing/tests/data/reveal/einsumfunc.pyi b/numpy/typing/tests/data/reveal/einsumfunc.pyi
index d5f930149..5f6415f27 100644
--- a/numpy/typing/tests/data/reveal/einsumfunc.pyi
+++ b/numpy/typing/tests/data/reveal/einsumfunc.pyi
@@ -1,5 +1,6 @@
from typing import Any
import numpy as np
+import numpy.typing as npt
AR_LIKE_b: list[bool]
AR_LIKE_u: list[np.uint32]
@@ -7,10 +8,12 @@ AR_LIKE_i: list[int]
AR_LIKE_f: list[float]
AR_LIKE_c: list[complex]
AR_LIKE_U: list[str]
+AR_o: npt.NDArray[np.object_]
-OUT_f: np.ndarray[Any, np.dtype[np.float64]]
+OUT_f: npt.NDArray[np.float64]
reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Any
+reveal_type(np.einsum("i,i->i", AR_o, AR_o)) # E: Any
reveal_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: Any
reveal_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: Any
reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: Any
diff --git a/numpy/typing/tests/data/reveal/modules.pyi b/numpy/typing/tests/data/reveal/modules.pyi
index ba830eb0d..4191c564a 100644
--- a/numpy/typing/tests/data/reveal/modules.pyi
+++ b/numpy/typing/tests/data/reveal/modules.pyi
@@ -16,6 +16,8 @@ reveal_type(np.random) # E: ModuleType
reveal_type(np.rec) # E: ModuleType
reveal_type(np.testing) # E: ModuleType
reveal_type(np.version) # E: ModuleType
+reveal_type(np.exceptions) # E: ModuleType
+reveal_type(np.dtypes) # E: ModuleType
reveal_type(np.lib.format) # E: ModuleType
reveal_type(np.lib.mixins) # E: ModuleType
diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi
index e1857557d..d4399e2b1 100644
--- a/numpy/typing/tests/data/reveal/numerictypes.pyi
+++ b/numpy/typing/tests/data/reveal/numerictypes.pyi
@@ -21,8 +21,6 @@ reveal_type(np.issubclass_(1, 1)) # E: Literal[False]
reveal_type(np.sctype2char("S8")) # E: str
reveal_type(np.sctype2char(list)) # E: str
-reveal_type(np.find_common_type([np.int64], [np.int64])) # E: dtype[Any]
-
reveal_type(np.cast[int]) # E: _CastFunc
reveal_type(np.cast["i8"]) # E: _CastFunc
reveal_type(np.cast[np.int64]) # E: _CastFunc
diff --git a/pyproject.toml b/pyproject.toml
index 903a99bca..759b538fb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9,7 +9,7 @@ requires = [
# doesn't list it as a runtime requirement (at least in 0.11.0) - it's
# likely to be removed as a dependency in meson-python 0.12.0.
"wheel==0.38.1",
- "Cython>=0.29.30,<3.0",
+ "Cython>=0.29.34,<3.0",
# "meson-python>=0.10.0",
]
diff --git a/test_requirements.txt b/test_requirements.txt
index b8508f161..16f448eb8 100644
--- a/test_requirements.txt
+++ b/test_requirements.txt
@@ -1,4 +1,4 @@
-cython>=0.29.30,<3.0
+cython>=0.29.34,<3.0
wheel==0.38.1
setuptools==59.2.0
hypothesis==6.24.1