summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md9
-rw-r--r--doc/RELEASE_WALKTHROUGH.rst.txt4
-rw-r--r--doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst4
-rw-r--r--doc/neps/nep-0042-new-dtypes.rst9
-rw-r--r--doc/neps/nep-0043-extensible-ufuncs.rst10
-rw-r--r--doc/source/conf.py1
-rw-r--r--doc/source/docs/howto_build_docs.rst27
-rw-r--r--doc/source/reference/c-api/array.rst8
-rw-r--r--doc/source/release/1.21.0-notes.rst33
-rw-r--r--doc/source/user/building.rst84
-rw-r--r--doc/source/user/how-to-how-to.rst5
-rw-r--r--numpy/__init__.pyi142
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h2
-rw-r--r--numpy/core/overrides.py6
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src15
-rw-r--r--numpy/core/src/multiarray/array_method.c65
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c207
-rw-r--r--numpy/core/src/multiarray/datetime.c13
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c13
-rw-r--r--numpy/core/tests/test_api.py13
-rw-r--r--numpy/core/tests/test_deprecations.py35
-rw-r--r--numpy/core/tests/test_half.py6
-rw-r--r--numpy/core/tests/test_multiarray.py7
-rw-r--r--numpy/core/tests/test_numeric.py67
-rw-r--r--numpy/core/tests/test_regression.py4
-rw-r--r--numpy/distutils/intelccompiler.py2
-rw-r--r--numpy/lib/_version.py4
-rw-r--r--numpy/lib/function_base.pyi2
-rw-r--r--numpy/lib/index_tricks.py6
-rw-r--r--numpy/lib/polynomial.py9
-rw-r--r--numpy/lib/tests/test__version.py2
-rw-r--r--numpy/lib/tests/test_regression.py3
-rw-r--r--numpy/polynomial/_polybase.py10
-rw-r--r--numpy/polynomial/chebyshev.py9
-rw-r--r--numpy/polynomial/hermite.py9
-rw-r--r--numpy/polynomial/hermite_e.py9
-rw-r--r--numpy/polynomial/laguerre.py9
-rw-r--r--numpy/polynomial/legendre.py9
-rw-r--r--numpy/polynomial/polynomial.py9
-rw-r--r--numpy/testing/__init__.py1
-rw-r--r--numpy/testing/__init__.pyi146
-rw-r--r--numpy/testing/_private/utils.py4
-rw-r--r--numpy/testing/_private/utils.pyi396
-rwxr-xr-xnumpy/testing/setup.py1
-rw-r--r--numpy/testing/utils.py3
-rw-r--r--numpy/typing/__init__.py14
-rw-r--r--numpy/typing/_array_like.py6
-rw-r--r--numpy/typing/_callable.py46
-rw-r--r--numpy/typing/_char_codes.py100
-rw-r--r--numpy/typing/_dtype_like.py7
-rw-r--r--numpy/typing/_extended_precision.py24
-rw-r--r--numpy/typing/_generic_alias.py17
-rw-r--r--numpy/typing/_shape.py4
-rw-r--r--numpy/typing/tests/data/fail/testing.py26
-rw-r--r--numpy/typing/tests/data/reveal/modules.py3
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_conversion.py53
-rw-r--r--numpy/typing/tests/data/reveal/scalars.py12
-rw-r--r--numpy/typing/tests/data/reveal/testing.py173
-rw-r--r--numpy/typing/tests/test_generic_alias.py10
-rw-r--r--test_requirements.txt4
60 files changed, 1335 insertions, 596 deletions
diff --git a/README.md b/README.md
index 88c1151a0..4195ff917 100644
--- a/README.md
+++ b/README.md
@@ -29,10 +29,11 @@ Call for Contributions
The NumPy project welcomes your expertise and enthusiasm!
-Small improvements or fixes are always appreciated; issues labeled as "good
-first issue" may be a good starting point. If you are considering larger
-contributions to the source code, please contact us through the [mailing
-list](https://mail.python.org/mailman/listinfo/numpy-discussion) first.
+Small improvements or fixes are always appreciated; issues labeled as ["good
+first issue"](https://github.com/numpy/numpy/labels/good%20first%20issue)
+may be a good starting point. If you are considering larger contributions
+to the source code, please contact us through the [mailing
+list](https://mail.python.org/mailman/listinfo/numpy-discussion) first.
Writing code isn’t the only way to contribute to NumPy. You can also:
- review pull requests
diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt
index 4fbc7af1c..6febd554f 100644
--- a/doc/RELEASE_WALKTHROUGH.rst.txt
+++ b/doc/RELEASE_WALKTHROUGH.rst.txt
@@ -102,8 +102,8 @@ someone else, then create a new branch for the series. If the branch already
exists skip this::
$ cd ../numpy-wheels
- $ git co master
- $ git pull upstream master
+ $ git checkout main
+ $ git pull upstream main
$ git branch v1.19.x
Checkout the new branch and edit the ``azure-pipelines.yml`` and
diff --git a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst
index 427d91b7d..3a689a4dc 100644
--- a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst
+++ b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst
@@ -5,11 +5,11 @@ NEP 35 — Array Creation Dispatching With __array_function__
===========================================================
:Author: Peter Andreas Entschev <pentschev@nvidia.com>
-:Status: Draft
+:Status: Final
:Type: Standards Track
:Created: 2019-10-15
:Updated: 2020-11-06
-:Resolution:
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2021-May/081761.html
Abstract
--------
diff --git a/doc/neps/nep-0042-new-dtypes.rst b/doc/neps/nep-0042-new-dtypes.rst
index 1738bd1ab..bb85f1d10 100644
--- a/doc/neps/nep-0042-new-dtypes.rst
+++ b/doc/neps/nep-0042-new-dtypes.rst
@@ -1334,7 +1334,7 @@ Although verbose, the API will mimic the one for creating a new DType:
typedef struct{
int flags; /* e.g. whether the cast requires the API */
int nin, nout; /* Number of Input and outputs (always 1) */
- NPY_CASTING casting; /* The default casting level */
+ NPY_CASTING casting; /* The "minimal casting level" */
PyArray_DTypeMeta *dtypes; /* input and output DType class */
/* NULL terminated slots defining the methods */
PyType_Slot *slots;
@@ -1342,7 +1342,7 @@ Although verbose, the API will mimic the one for creating a new DType:
The focus differs between casting and general ufuncs. For example, for casts
``nin == nout == 1`` is always correct, while for ufuncs ``casting`` is
-expected to be usually `"safe"`.
+expected to be usually `"no"`.
**Notes:** We may initially allow users to define only a single loop.
Internally NumPy optimizes far more, and this should be made public
@@ -1357,6 +1357,11 @@ incrementally in one of two ways:
* Or, more likely, expose the ``get_loop`` function which is passed additional
information, such as the fixed strides (similar to our internal API).
+* The casting level denotes the minimal guaranteed casting level and can be
+ ``-1`` if the cast may be impossible. For most non-parametric casts, this
+ value will be the casting level. NumPy may skip the ``resolve_descriptors``
+ call for ``np.can_cast()`` when the result is ``True`` based on this level.
+
The example does not yet include setup and error handling. Since these are
similar to the UFunc machinery, they will be defined in :ref:`NEP 43 <NEP43>` and then
incorporated identically into casting.
diff --git a/doc/neps/nep-0043-extensible-ufuncs.rst b/doc/neps/nep-0043-extensible-ufuncs.rst
index 3c6407728..cd73108e4 100644
--- a/doc/neps/nep-0043-extensible-ufuncs.rst
+++ b/doc/neps/nep-0043-extensible-ufuncs.rst
@@ -262,8 +262,8 @@ to define string equality, will be added to a ufunc.
if given_descrs[2] is None:
out_descr = DTypes[2]()
- # The operation is always "safe" casting (most ufuncs are)
- return (given_descrs[0], given_descrs[1], out_descr), "safe"
+ # The operation is always "no" casting (most ufuncs are)
+ return (given_descrs[0], given_descrs[1], out_descr), "no"
def strided_loop(context, dimensions, data, strides, innerloop_data):
"""The 1-D strided loop, similar to those used in current ufuncs"""
@@ -434,7 +434,7 @@ a new ``ArrayMethod`` object:
# Casting safety information (almost always "safe", necessary to
# unify casting and universal functions)
- casting: Casting = "safe"
+ casting: Casting = "no"
# More general flags:
flags: int
@@ -751,7 +751,7 @@ This step is required to allocate output arrays and has to happen before
casting can be prepared.
While the returned casting-safety (``NPY_CASTING``) will almost always be
-"safe" for universal functions, including it has two big advantages:
+"no" for universal functions, including it has two big advantages:
* ``-1`` indicates that an error occurred. If a Python error is set, it will
be raised. If no Python error is set this will be considered an "impossible"
@@ -767,7 +767,7 @@ While the returned casting-safety (``NPY_CASTING``) will almost always be
perspective. Currently, this would use ``int64 + int64 -> int64`` and then
cast to ``int32``. An implementation that skips the cast would
have to signal that it effectively includes the "same-kind" cast and is
- thus not considered "safe".
+ thus not considered "no".
``get_loop`` method
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 5ba7f70b8..a49074922 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -295,6 +295,7 @@ intersphinx_mapping = {
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'scipy-lecture-notes': ('https://scipy-lectures.org', None),
'pytest': ('https://docs.pytest.org/en/stable', None),
+ 'numpy-tutorials': ('https://numpy.org/numpy-tutorials', None),
}
diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst
index 38ea1338c..737f69877 100644
--- a/doc/source/docs/howto_build_docs.rst
+++ b/doc/source/docs/howto_build_docs.rst
@@ -16,6 +16,33 @@ in several different formats.
.. _Sphinx: http://www.sphinx-doc.org/
+To build the documentation, you must first build NumPy.
+
+There are two options for building NumPy and its documentation- building with
+Gitpod or locally from source. Your choice depends on your operating system and
+familiarity with the command line.
+
+Gitpod
+------------
+
+Gitpod is an open-source platform that automatically creates
+the correct development environment right in your browser, reducing the need to
+install local development environments and deal with incompatible dependencies.
+
+If you are a Windows user, unfamiliar with using the command line or building
+NumPy's documentation for the first time, it is often faster to build with
+Gitpod. Here are the in-depth instructions for building NumPy with `building
+NumPy with Gitpod`_.
+
+.. _building NumPy with Gitpod: https://numpy.org/devdocs/dev/development_gitpod.html
+
+Building locally
+------------------
+
+Building locally on your machine gives you more granular control. If you are a
+MacOS or Linux user familiar with using the command line, you can continue with
+building NumPy locally by following the instructions below.
+
Instructions
------------
diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst
index 1673f1d6b..cb2f4b645 100644
--- a/doc/source/reference/c-api/array.rst
+++ b/doc/source/reference/c-api/array.rst
@@ -2647,6 +2647,12 @@ cost of a slight overhead.
- If the position of iter is changed, any subsequent call to
PyArrayNeighborhoodIter_Next is undefined behavior, and
PyArrayNeighborhoodIter_Reset must be called.
+ - If the position of iter is not the beginning of the data and the
+ underlying data for iter is contiguous, the iterator will point to the
+ start of the data instead of position pointed by iter.
+ To avoid this situation, iter should be moved to the required position
+ only after the creation of iterator, and PyArrayNeighborhoodIter_Reset
+ must be called.
.. code-block:: c
@@ -2656,7 +2662,7 @@ cost of a slight overhead.
/*For a 3x3 kernel */
bounds = {-1, 1, -1, 1};
- neigh_iter = (PyArrayNeighborhoodIterObject*)PyArrayNeighborhoodIter_New(
+ neigh_iter = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew(
iter, bounds, NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, NULL);
for(i = 0; i < iter->size; ++i) {
diff --git a/doc/source/release/1.21.0-notes.rst b/doc/source/release/1.21.0-notes.rst
index ac65b8fd0..c0d283b72 100644
--- a/doc/source/release/1.21.0-notes.rst
+++ b/doc/source/release/1.21.0-notes.rst
@@ -82,39 +82,6 @@ The methods in question are:
Future Changes
==============
-Promotion of strings with numbers and bools is deprecated
----------------------------------------------------------
-Any promotion of numbers and strings is deprecated and will
-give a ``FutureWarning`` the main affected functionalities
-are:
-
-* `numpy.promote_types` and `numpy.result_type` which will raise
- an error in this case in the future.
-* `numpy.concatenate` will raise an error when concatenating a string
- and numeric array. You can use ``dtype="S"`` to explicitly request
- a string result.
-* `numpy.array` and related functions will start returning ``object``
- arrays because these functions use ``object`` as a fallback when
- no common dtype can be found. However, it may happen that future
- releases of NumPy will generally error in these cases.
-
-This will mainly affect code such as::
-
- np.asarray(['string', 0])
-
-and::
-
- np.concatenate((['string'], [0]))
-
-in both cases adding ``dtype="U"`` or ``dtype="S"`` will give the
-previous (string) result, while ``dtype=object`` will ensure an array with
-object dtype is returned.
-
-Comparisons, universal functions, and casting are not affected by this.
-
-(`gh-18116 <https://github.com/numpy/numpy/pull/18116>`__)
-
-
Expired deprecations
====================
diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst
index 52d7330bf..10983ce8f 100644
--- a/doc/source/user/building.rst
+++ b/doc/source/user/building.rst
@@ -3,8 +3,30 @@
Building from source
====================
-A general overview of building NumPy from source is given here, with detailed
-instructions for specific platforms given separately.
+There are two options for building NumPy- building with Gitpod or locally from
+source. Your choice depends on your operating system and familiarity with the
+command line.
+
+Gitpod
+------------
+
+Gitpod is an open-source platform that automatically creates
+the correct development environment right in your browser, reducing the need to
+install local development environments and deal with incompatible dependencies.
+
+If you are a Windows user, unfamiliar with using the command line or building
+NumPy for the first time, it is often faster to build with Gitpod. Here are the
+in-depth instructions for building NumPy with `building NumPy with Gitpod`_.
+
+.. _building NumPy with Gitpod: https://numpy.org/devdocs/dev/development_gitpod.html
+
+Building locally
+------------------
+
+Building locally on your machine gives you
+more granular control. If you are a MacOS or Linux user familiar with using the
+command line, you can continue with building NumPy locally by following the
+instructions below.
..
This page is referenced from numpy/numpy/__init__.py. Please keep its
@@ -23,15 +45,16 @@ Building NumPy requires the following software installed:
2) Compilers
- To build any extension modules for Python, you'll need a C compiler.
- Various NumPy modules use FORTRAN 77 libraries, so you'll also need a
- FORTRAN 77 compiler installed.
+ While a FORTRAN 77 compiler is not necessary for building NumPy, it is
+ needed to run the ``numpy.f2py`` tests. These tests are skipped if the
+ compiler is not auto-detected.
Note that NumPy is developed mainly using GNU compilers and tested on
MSVC and Clang compilers. Compilers from other vendors such as Intel,
- Absoft, Sun, NAG, Compaq, Vast, Portland, Lahey, HP, IBM are only supported
- in the form of community feedback, and may not work out of the box.
- GCC 4.x (and later) compilers are recommended. On ARM64 (aarch64) GCC 8.x (and later) are recommended.
+ Absoft, Sun, NAG, Compaq, Vast, Portland, Lahey, HP, IBM are only
+ supported in the form of community feedback, and may not work out of the
+ box. GCC 4.x (and later) compilers are recommended. On ARM64 (aarch64)
+ GCC 8.x (and later) are recommended.
3) Linear Algebra libraries
@@ -67,7 +90,8 @@ To perform an in-place build that can be run from the source folder run::
Testing
-------
-Make sure to test your builds. To ensure everything stays in shape, see if all tests pass::
+Make sure to test your builds. To ensure everything stays in shape, see if
+all tests pass::
$ python runtests.py -v -m full
@@ -105,11 +129,12 @@ For more information see::
How to check the ABI of BLAS/LAPACK libraries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-One relatively simple and reliable way to check for the compiler used to build
-a library is to use ldd on the library. If libg2c.so is a dependency, this
-means that g77 has been used (note: g77 is no longer supported for building NumPy).
-If libgfortran.so is a dependency, gfortran has been used. If both are dependencies,
-this means both have been used, which is almost always a very bad idea.
+One relatively simple and reliable way to check for the compiler used to
+build a library is to use ldd on the library. If libg2c.so is a dependency,
+this means that g77 has been used (note: g77 is no longer supported for
+building NumPy). If libgfortran.so is a dependency, gfortran has been used.
+If both are dependencies, this means both have been used, which is almost
+always a very bad idea.
.. _accelerated-blas-lapack-libraries:
@@ -155,11 +180,11 @@ Alternatively one may use ``!`` or ``^`` to negate all items::
NPY_BLAS_ORDER='^blas,atlas' python setup.py build
-will allow using anything **but** NetLIB BLAS and ATLAS libraries, the order of the above
-list is retained.
+will allow using anything **but** NetLIB BLAS and ATLAS libraries, the order
+of the above list is retained.
-One cannot mix negation and positives, nor have multiple negations, such cases will
-raise an error.
+One cannot mix negation and positives, nor have multiple negations, such
+cases will raise an error.
LAPACK
~~~~~~
@@ -191,19 +216,19 @@ Alternatively one may use ``!`` or ``^`` to negate all items::
NPY_LAPACK_ORDER='^lapack' python setup.py build
-will allow using anything **but** the NetLIB LAPACK library, the order of the above
-list is retained.
+will allow using anything **but** the NetLIB LAPACK library, the order of
+the above list is retained.
-One cannot mix negation and positives, nor have multiple negations, such cases will
-raise an error.
+One cannot mix negation and positives, nor have multiple negations, such
+cases will raise an error.
.. deprecated:: 1.20
The native libraries on macOS, provided by Accelerate, are not fit for use
- in NumPy since they have bugs that cause wrong output under easily reproducible
- conditions. If the vendor fixes those bugs, the library could be reinstated,
- but until then users compiling for themselves should use another linear
- algebra library or use the built-in (but slower) default, see the next
- section.
+ in NumPy since they have bugs that cause wrong output under easily
+ reproducible conditions. If the vendor fixes those bugs, the library could
+ be reinstated, but until then users compiling for themselves should use
+ another linear algebra library or use the built-in (but slower) default,
+ see the next section.
Disabling ATLAS and other accelerated libraries
@@ -257,5 +282,6 @@ Supplying additional compiler flags
Additional compiler flags can be supplied by setting the ``OPT``,
``FOPT`` (for Fortran), and ``CC`` environment variables.
-When providing options that should improve the performance of the code ensure
-that you also set ``-DNDEBUG`` so that debugging code is not executed.
+When providing options that should improve the performance of the code
+ensure that you also set ``-DNDEBUG`` so that debugging code is not
+executed.
diff --git a/doc/source/user/how-to-how-to.rst b/doc/source/user/how-to-how-to.rst
index 16a2fc7a4..13d2b405f 100644
--- a/doc/source/user/how-to-how-to.rst
+++ b/doc/source/user/how-to-how-to.rst
@@ -105,10 +105,7 @@ deep dives intended to give understanding rather than immediate assistance,
and `References`, which give complete, autoritative data on some concrete
part of NumPy (like its API) but aren't obligated to paint a broader picture.
-For more on tutorials, see the `tutorial how-to`_.
-
-.. _`tutorial how-to`: https://github.com/numpy/numpy-tutorials/blob/master/tutorial_style.ipynb
-
+For more on tutorials, see :doc:`content/tutorial-style-guide`
******************************************************************************
Is this page an example of a how-to?
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index ac37eb8ad..a83e5d9c7 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -1226,20 +1226,9 @@ class _ArrayOrScalarCommon:
def __deepcopy__(self: _ArraySelf, __memo: Optional[dict] = ...) -> _ArraySelf: ...
def __eq__(self, other): ...
def __ne__(self, other): ...
- def astype(
- self: _ArraySelf,
- dtype: DTypeLike,
- order: _OrderKACF = ...,
- casting: _Casting = ...,
- subok: bool = ...,
- copy: bool = ...,
- ) -> _ArraySelf: ...
def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
def dump(self, file: str) -> None: ...
def dumps(self) -> bytes: ...
- def getfield(
- self: _ArraySelf, dtype: DTypeLike, offset: int = ...
- ) -> _ArraySelf: ...
def tobytes(self, order: _OrderKACF = ...) -> bytes: ...
# NOTE: `tostring()` is deprecated and therefore excluded
# def tostring(self, order=...): ...
@@ -1248,14 +1237,6 @@ class _ArrayOrScalarCommon:
) -> None: ...
# generics and 0d arrays return builtin scalars
def tolist(self) -> Any: ...
- @overload
- def view(self, type: Type[_NdArraySubClass]) -> _NdArraySubClass: ...
- @overload
- def view(self: _ArraySelf, dtype: DTypeLike = ...) -> _ArraySelf: ...
- @overload
- def view(
- self, dtype: DTypeLike, type: Type[_NdArraySubClass]
- ) -> _NdArraySubClass: ...
# TODO: Add proper signatures
def __getitem__(self, key) -> Any: ...
@@ -1637,6 +1618,12 @@ _T_co = TypeVar("_T_co", covariant=True)
_2Tuple = Tuple[_T, _T]
_Casting = L["no", "equiv", "safe", "same_kind", "unsafe"]
+_DTypeLike = Union[
+ dtype[_ScalarType],
+ Type[_ScalarType],
+ _SupportsDType[dtype[_ScalarType]],
+]
+
_ArrayUInt_co = NDArray[Union[bool_, unsignedinteger[Any]]]
_ArrayInt_co = NDArray[Union[bool_, integer[Any]]]
_ArrayFloat_co = NDArray[Union[bool_, integer[Any], floating[Any]]]
@@ -1874,6 +1861,53 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
self, *shape: SupportsIndex, order: _OrderACF = ...
) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def astype(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ order: _OrderKACF = ...,
+ casting: _Casting = ...,
+ subok: bool = ...,
+ copy: bool = ...,
+ ) -> NDArray[_ScalarType]: ...
+ @overload
+ def astype(
+ self,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ casting: _Casting = ...,
+ subok: bool = ...,
+ copy: bool = ...,
+ ) -> NDArray[Any]: ...
+
+ @overload
+ def view(self: _ArraySelf) -> _ArraySelf: ...
+ @overload
+ def view(self, type: Type[_NdArraySubClass]) -> _NdArraySubClass: ...
+ @overload
+ def view(self, dtype: _DTypeLike[_ScalarType]) -> NDArray[_ScalarType]: ...
+ @overload
+ def view(self, dtype: DTypeLike) -> NDArray[Any]: ...
+ @overload
+ def view(
+ self,
+ dtype: DTypeLike,
+ type: Type[_NdArraySubClass],
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def getfield(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ offset: SupportsIndex = ...
+ ) -> NDArray[_ScalarType]: ...
+ @overload
+ def getfield(
+ self,
+ dtype: DTypeLike,
+ offset: SupportsIndex = ...
+ ) -> NDArray[Any]: ...
+
# Dispatch to the underlying `generic` via protocols
def __int__(
self: ndarray[Any, dtype[SupportsInt]], # type: ignore[type-var]
@@ -2846,6 +2880,59 @@ class generic(_ArrayOrScalarCommon):
def byteswap(self: _ScalarType, inplace: L[False] = ...) -> _ScalarType: ...
@property
def flat(self: _ScalarType) -> flatiter[ndarray[Any, dtype[_ScalarType]]]: ...
+
+ @overload
+ def astype(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ order: _OrderKACF = ...,
+ casting: _Casting = ...,
+ subok: bool = ...,
+ copy: bool = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def astype(
+ self,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ casting: _Casting = ...,
+ subok: bool = ...,
+ copy: bool = ...,
+ ) -> Any: ...
+
+ # NOTE: `view` will perform a 0D->scalar cast,
+ # thus the array `type` is irrelevant to the output type
+ @overload
+ def view(
+ self: _ScalarType,
+ type: Type[ndarray[Any, Any]] = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def view(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ type: Type[ndarray[Any, Any]] = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def view(
+ self,
+ dtype: DTypeLike,
+ type: Type[ndarray[Any, Any]] = ...,
+ ) -> Any: ...
+
+ @overload
+ def getfield(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ offset: SupportsIndex = ...
+ ) -> _ScalarType: ...
+ @overload
+ def getfield(
+ self,
+ dtype: DTypeLike,
+ offset: SupportsIndex = ...
+ ) -> Any: ...
+
def item(
self,
__args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
@@ -3042,11 +3129,24 @@ class datetime64(generic):
if sys.version_info >= (3, 8):
_IntValue = Union[SupportsInt, _CharLike_co, SupportsIndex]
_FloatValue = Union[None, _CharLike_co, SupportsFloat, SupportsIndex]
- _ComplexValue = Union[None, _CharLike_co, SupportsFloat, SupportsComplex, SupportsIndex]
+ _ComplexValue = Union[
+ None,
+ _CharLike_co,
+ SupportsFloat,
+ SupportsComplex,
+ SupportsIndex,
+ complex, # `complex` is not a subtype of `SupportsComplex`
+ ]
else:
_IntValue = Union[SupportsInt, _CharLike_co]
_FloatValue = Union[None, _CharLike_co, SupportsFloat]
- _ComplexValue = Union[None, _CharLike_co, SupportsFloat, SupportsComplex]
+ _ComplexValue = Union[
+ None,
+ _CharLike_co,
+ SupportsFloat,
+ SupportsComplex,
+ complex,
+ ]
class integer(number[_NBit1]): # type: ignore
# NOTE: `__index__` is technically defined in the bottom-most
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index d1acfdf26..3ea66b049 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -1236,6 +1236,8 @@ struct PyArrayIterObject_tag {
_PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \
__npy_i++) { \
+ _PyAIT(it)->coordinates[__npy_i] = \
+ (__npy_ind / _PyAIT(it)->factors[__npy_i]); \
_PyAIT(it)->dataptr += \
(__npy_ind / _PyAIT(it)->factors[__npy_i]) \
* _PyAIT(it)->strides[__npy_i]; \
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index c2b5fb7fa..70085d896 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -18,11 +18,7 @@ array_function_like_doc = (
NumPy arrays. If an array-like passed in as ``like`` supports
the ``__array_function__`` protocol, the result will be defined
by it. In this case, it ensures the creation of an array object
- compatible with that passed in via this argument.
-
- .. note::
- The ``like`` keyword is an experimental feature pending on
- acceptance of :ref:`NEP 35 <NEP35>`."""
+ compatible with that passed in via this argument."""
)
def set_array_function_like_doc(public_api):
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index bfdeae079..79140bdb7 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -87,7 +87,7 @@ static int copy_@name@(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *ni
* For each point in itx, copy the current neighborhood into an array which
* is appended at the output list
*/
- for (i = 0; i < itx->size; ++i) {
+ for (i = itx->index; i < itx->size; ++i) {
PyArrayNeighborhoodIter_Reset(niterx);
for (j = 0; j < PyArray_NDIM(itx->ao); ++j) {
@@ -130,7 +130,7 @@ static int copy_object(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *ni
* For each point in itx, copy the current neighborhood into an array which
* is appended at the output list
*/
- for (i = 0; i < itx->size; ++i) {
+ for (i = itx->index; i < itx->size; ++i) {
PyArrayNeighborhoodIter_Reset(niterx);
for (j = 0; j < PyArray_NDIM(itx->ao); ++j) {
@@ -161,10 +161,11 @@ test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args)
PyArrayObject *ax, *afill;
PyArrayIterObject *itx;
int i, typenum, mode, st;
+ Py_ssize_t idxstart = 0;
npy_intp bounds[NPY_MAXDIMS*2];
PyArrayNeighborhoodIterObject *niterx;
- if (!PyArg_ParseTuple(args, "OOOi", &x, &b, &fill, &mode)) {
+ if (!PyArg_ParseTuple(args, "OOOi|n", &x, &b, &fill, &mode, &idxstart)) {
return NULL;
}
@@ -224,12 +225,20 @@ test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args)
}
}
+ if (idxstart >= itx->size) {
+ PyErr_SetString(PyExc_ValueError,
+ "start index not compatible with x input");
+ goto clean_itx;
+ }
+
niterx = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew(
(PyArrayIterObject*)itx, bounds, mode, afill);
if (niterx == NULL) {
goto clean_afill;
}
+ PyArray_ITER_GOTO1D((PyArrayIterObject*)itx, idxstart);
+
switch (typenum) {
case NPY_OBJECT:
st = copy_object(itx, niterx, bounds, &out);
diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c
index e13da12de..3ecc20d1d 100644
--- a/numpy/core/src/multiarray/array_method.c
+++ b/numpy/core/src/multiarray/array_method.c
@@ -210,10 +210,12 @@ validate_spec(PyArrayMethod_Spec *spec)
case NPY_UNSAFE_CASTING:
break;
default:
- PyErr_Format(PyExc_TypeError,
- "ArrayMethod has invalid casting `%d`. (method: %s)",
- spec->casting, spec->name);
- return -1;
+ if (spec->casting != -1) {
+ PyErr_Format(PyExc_TypeError,
+ "ArrayMethod has invalid casting `%d`. (method: %s)",
+ spec->casting, spec->name);
+ return -1;
+ }
}
for (int i = 0; i < nargs; i++) {
@@ -301,6 +303,13 @@ fill_arraymethod_from_slots(
/* Check whether the slots are valid: */
if (meth->resolve_descriptors == &default_resolve_descriptors) {
+ if (spec->casting == -1) {
+ PyErr_Format(PyExc_TypeError,
+ "Cannot set casting to -1 (invalid) when not providing "
+ "the default `resolve_descriptors` function. "
+ "(method: %s)", spec->name);
+ return -1;
+ }
for (int i = 0; i < meth->nin + meth->nout; i++) {
if (res->dtypes[i] == NULL) {
if (i < meth->nin) {
@@ -573,6 +582,8 @@ boundarraymethod__resolve_descripors(
/*
* The casting flags should be the most generic casting level (except the
* cast-is-view flag. If no input is parametric, it must match exactly.
+ *
+ * (Note that these checks are only debugging checks.)
*/
int parametric = 0;
for (int i = 0; i < nin + nout; i++) {
@@ -581,34 +592,34 @@ boundarraymethod__resolve_descripors(
break;
}
}
- if (!parametric) {
- /*
- * Non-parametric can only mismatch if it switches from no to equiv
- * (e.g. due to byteorder changes).
- */
- if (self->method->casting != (casting & ~_NPY_CAST_IS_VIEW) &&
- !(self->method->casting == NPY_NO_CASTING &&
- casting == NPY_EQUIV_CASTING)) {
- PyErr_Format(PyExc_RuntimeError,
- "resolve_descriptors cast level did not match stored one "
- "(expected %d, got %d) for method %s",
- self->method->casting, (casting & ~_NPY_CAST_IS_VIEW),
- self->method->name);
- Py_DECREF(result_tuple);
- return NULL;
- }
- }
- else {
+ if (self->method->casting != -1) {
NPY_CASTING cast = casting & ~_NPY_CAST_IS_VIEW;
- if (cast != PyArray_MinCastSafety(cast, self->method->casting)) {
+ if (self->method->casting !=
+ PyArray_MinCastSafety(cast, self->method->casting)) {
PyErr_Format(PyExc_RuntimeError,
- "resolve_descriptors cast level did not match stored one "
- "(expected %d, got %d) for method %s",
- self->method->casting, (casting & ~_NPY_CAST_IS_VIEW),
- self->method->name);
+ "resolve_descriptors cast level did not match stored one. "
+ "(set level is %d, got %d for method %s)",
+ self->method->casting, cast, self->method->name);
Py_DECREF(result_tuple);
return NULL;
}
+ if (!parametric) {
+ /*
+ * Non-parametric can only mismatch if it switches from equiv to no
+ * (e.g. due to byteorder changes).
+ */
+ if (cast != self->method->casting &&
+ self->method->casting != NPY_EQUIV_CASTING) {
+ PyErr_Format(PyExc_RuntimeError,
+ "resolve_descriptors cast level changed even though "
+ "the cast is non-parametric where the only possible "
+ "change should be from equivalent to no casting. "
+ "(set level is %d, got %d for method %s)",
+ self->method->casting, cast, self->method->name);
+ Py_DECREF(result_tuple);
+ return NULL;
+ }
+ }
}
return Py_BuildValue("iN", casting, result_tuple);
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 01ee56d16..1a962ef78 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -358,6 +358,45 @@ PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp)
}
+static NPY_CASTING
+_get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl,
+ PyArray_DTypeMeta *dtypes[2], PyArray_Descr *from, PyArray_Descr *to)
+{
+ PyArray_Descr *descrs[2] = {from, to};
+ PyArray_Descr *out_descrs[2];
+
+ NPY_CASTING casting = castingimpl->resolve_descriptors(
+ castingimpl, dtypes, descrs, out_descrs);
+ if (casting < 0) {
+ return -1;
+ }
+ /* The returned descriptors may not match, requiring a second check */
+ if (out_descrs[0] != descrs[0]) {
+ NPY_CASTING from_casting = PyArray_GetCastSafety(
+ descrs[0], out_descrs[0], NULL);
+ casting = PyArray_MinCastSafety(casting, from_casting);
+ if (casting < 0) {
+ goto finish;
+ }
+ }
+ if (descrs[1] != NULL && out_descrs[1] != descrs[1]) {
+ NPY_CASTING from_casting = PyArray_GetCastSafety(
+ descrs[1], out_descrs[1], NULL);
+ casting = PyArray_MinCastSafety(casting, from_casting);
+ if (casting < 0) {
+ goto finish;
+ }
+ }
+
+ finish:
+ Py_DECREF(out_descrs[0]);
+ Py_DECREF(out_descrs[1]);
+ /* NPY_NO_CASTING has to be used for (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW) */
+ assert(casting != (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW));
+ return casting;
+}
+
+
/**
* Given two dtype instances, find the correct casting safety.
*
@@ -375,7 +414,6 @@ NPY_NO_EXPORT NPY_CASTING
PyArray_GetCastSafety(
PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype)
{
- NPY_CASTING casting;
if (to != NULL) {
to_dtype = NPY_DTYPE(to);
}
@@ -389,41 +427,67 @@ PyArray_GetCastSafety(
}
PyArrayMethodObject *castingimpl = (PyArrayMethodObject *)meth;
-
PyArray_DTypeMeta *dtypes[2] = {NPY_DTYPE(from), to_dtype};
- PyArray_Descr *descrs[2] = {from, to};
- PyArray_Descr *out_descrs[2];
-
- casting = castingimpl->resolve_descriptors(
- castingimpl, dtypes, descrs, out_descrs);
+ NPY_CASTING casting = _get_cast_safety_from_castingimpl(castingimpl,
+ dtypes, from, to);
Py_DECREF(meth);
- if (casting < 0) {
+
+ return casting;
+}
+
+
+/**
+ * Check whether a cast is safe, see also `PyArray_GetCastSafety` for
+ * a similiar function. Unlike GetCastSafety, this function checks the
+ * `castingimpl->casting` when available. This allows for two things:
+ *
+ * 1. It avoids calling `resolve_descriptors` in some cases.
+ * 2. Strings need to discover the length, but in some cases we know that the
+ * cast is valid (assuming the string length is discovered first).
+ *
+ * The latter means that a `can_cast` could return True, but the cast fail
+ * because the parametric type cannot guess the correct output descriptor.
+ * (I.e. if `object_arr.astype("S")` did _not_ inspect the objects, and the
+ * user would have to guess the string length.)
+ *
+ * @param casting the requested casting safety.
+ * @param from
+ * @param to The descriptor to cast to (may be NULL)
+ * @param to_dtype If `to` is NULL, must pass the to_dtype (otherwise this
+ * is ignored).
+ * @return 0 for an invalid cast, 1 for a valid and -1 for an error.
+ */
+static int
+PyArray_CheckCastSafety(NPY_CASTING casting,
+ PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype)
+{
+ if (to != NULL) {
+ to_dtype = NPY_DTYPE(to);
+ }
+ PyObject *meth = PyArray_GetCastingImpl(NPY_DTYPE(from), to_dtype);
+ if (meth == NULL) {
return -1;
}
- /* The returned descriptors may not match, requiring a second check */
- if (out_descrs[0] != descrs[0]) {
- NPY_CASTING from_casting = PyArray_GetCastSafety(
- descrs[0], out_descrs[0], NULL);
- casting = PyArray_MinCastSafety(casting, from_casting);
- if (casting < 0) {
- goto finish;
- }
+ if (meth == Py_None) {
+ Py_DECREF(Py_None);
+ return -1;
}
- if (descrs[1] != NULL && out_descrs[1] != descrs[1]) {
- NPY_CASTING from_casting = PyArray_GetCastSafety(
- descrs[1], out_descrs[1], NULL);
- casting = PyArray_MinCastSafety(casting, from_casting);
- if (casting < 0) {
- goto finish;
- }
+ PyArrayMethodObject *castingimpl = (PyArrayMethodObject *)meth;
+
+ if (PyArray_MinCastSafety(castingimpl->casting, casting) == casting) {
+ /* No need to check using `castingimpl.resolve_descriptors()` */
+ return 1;
}
- finish:
- Py_DECREF(out_descrs[0]);
- Py_DECREF(out_descrs[1]);
- /* NPY_NO_CASTING has to be used for (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW) */
- assert(casting != (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW));
- return casting;
+ PyArray_DTypeMeta *dtypes[2] = {NPY_DTYPE(from), to_dtype};
+ NPY_CASTING safety = _get_cast_safety_from_castingimpl(castingimpl,
+ dtypes, from, to);
+ Py_DECREF(meth);
+ /* If casting is the smaller (or equal) safety we match */
+ if (safety < 0) {
+ return -1;
+ }
+ return PyArray_MinCastSafety(safety, casting) == casting;
}
@@ -565,6 +629,8 @@ NPY_NO_EXPORT npy_bool
PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to,
NPY_CASTING casting)
{
+ PyArray_DTypeMeta *to_dtype = NPY_DTYPE(to);
+
/*
* NOTE: This code supports U and S, this is identical to the code
* in `ctors.c` which does not allow these dtypes to be attached
@@ -576,21 +642,21 @@ PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to,
* TODO: We should grow support for `np.can_cast("d", "S")` being
* different from `np.can_cast("d", "S0")` here, at least for
* the python side API.
+ * The `to = NULL` branch, which considers "S0" to be "flexible"
+ * should probably be deprecated.
+ * (This logic is duplicated in `PyArray_CanCastArrayTo`)
*/
- NPY_CASTING safety;
if (PyDataType_ISUNSIZED(to) && to->subarray == NULL) {
- safety = PyArray_GetCastSafety(from, NULL, NPY_DTYPE(to));
- }
- else {
- safety = PyArray_GetCastSafety(from, to, NPY_DTYPE(to));
+ to = NULL; /* consider mainly S0 and U0 as S and U */
}
- if (safety < 0) {
+ int is_valid = PyArray_CheckCastSafety(casting, from, to, to_dtype);
+ /* Clear any errors and consider this unsafe (should likely be changed) */
+ if (is_valid < 0) {
PyErr_Clear();
return 0;
}
- /* If casting is the smaller (or equal) safety we match */
- return PyArray_MinCastSafety(safety, casting) == casting;
+ return is_valid;
}
@@ -610,28 +676,22 @@ can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data,
/*
* If the two dtypes are actually references to the same object
* or if casting type is forced unsafe then always OK.
+ *
+ * TODO: Assuming that unsafe casting always works is not actually correct
*/
if (scal_type == to || casting == NPY_UNSAFE_CASTING ) {
return 1;
}
- /* NOTE: This is roughly the same code as `PyArray_CanCastTypeTo`: */
- NPY_CASTING safety;
- if (PyDataType_ISUNSIZED(to) && to->subarray == NULL) {
- safety = PyArray_GetCastSafety(scal_type, NULL, NPY_DTYPE(to));
- }
- else {
- safety = PyArray_GetCastSafety(scal_type, to, NPY_DTYPE(to));
- }
- if (safety < 0) {
- PyErr_Clear();
- return 0;
- }
- safety = PyArray_MinCastSafety(safety, casting);
- if (safety == casting) {
+ int valid = PyArray_CheckCastSafety(casting, scal_type, to, NPY_DTYPE(to));
+ if (valid == 1) {
/* This is definitely a valid cast. */
return 1;
}
+ if (valid < 0) {
+ /* Probably must return 0, but just keep trying for now. */
+ PyErr_Clear();
+ }
/*
* If the scalar isn't a number, value-based casting cannot kick in and
@@ -692,14 +752,29 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to,
NPY_CASTING casting)
{
PyArray_Descr *from = PyArray_DESCR(arr);
+ PyArray_DTypeMeta *to_dtype = NPY_DTYPE(to);
- /* If it's a scalar, check the value */
- if (PyArray_NDIM(arr) == 0 && !PyArray_HASFIELDS(arr)) {
+ /* NOTE, TODO: The same logic as `PyArray_CanCastTypeTo`: */
+ if (PyDataType_ISUNSIZED(to) && to->subarray == NULL) {
+ to = NULL;
+ }
+
+ /*
+ * If it's a scalar, check the value. (This only currently matters for
+ * numeric types and for `to == NULL` it can't be numeric.)
+ */
+ if (PyArray_NDIM(arr) == 0 && !PyArray_HASFIELDS(arr) && to != NULL) {
return can_cast_scalar_to(from, PyArray_DATA(arr), to, casting);
}
- /* Otherwise, use the standard rules */
- return PyArray_CanCastTypeTo(from, to, casting);
+ /* Otherwise, use the standard rules (same as `PyArray_CanCastTypeTo`) */
+ int is_valid = PyArray_CheckCastSafety(casting, from, to, to_dtype);
+ /* Clear any errors and consider this unsafe (should likely be changed) */
+ if (is_valid < 0) {
+ PyErr_Clear();
+ return 0;
+ }
+ return is_valid;
}
@@ -2122,13 +2197,6 @@ PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth)
meth->method->name);
return -1;
}
- if ((meth->method->casting & ~_NPY_CAST_IS_VIEW) != NPY_NO_CASTING) {
- PyErr_Format(PyExc_TypeError,
- "A cast where input and output DType (class) are identical "
- "must signal `no-casting`. (method: %s)",
- meth->method->name);
- return -1;
- }
if (meth->dtypes[0]->within_dtype_castingimpl != NULL) {
PyErr_Format(PyExc_RuntimeError,
"A cast was already added for %S -> %S. (method: %s)",
@@ -2400,7 +2468,7 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to)
/* Find the correct casting level, and special case no-cast */
if (dtypes[0]->kind == dtypes[1]->kind && from_itemsize == to_itemsize) {
- spec.casting = NPY_NO_CASTING;
+ spec.casting = NPY_EQUIV_CASTING;
/* When there is no casting (equivalent C-types) use byteswap loops */
slots[0].slot = NPY_METH_resolve_descriptors;
@@ -2558,7 +2626,6 @@ cast_to_string_resolve_descriptors(
dtypes[1]->type_num == NPY_STRING);
return NPY_UNSAFE_CASTING;
}
- assert(self->casting == NPY_SAFE_CASTING);
if (loop_descrs[1]->elsize >= size) {
return NPY_SAFE_CASTING;
@@ -2600,9 +2667,9 @@ add_other_to_and_from_string_cast(
.dtypes = dtypes,
.slots = slots,
};
- /* Almost everything can be safely cast to string (except unicode) */
+ /* Almost everything can be same-kind cast to string (except unicode) */
if (other->type_num != NPY_UNICODE) {
- spec.casting = NPY_SAFE_CASTING;
+ spec.casting = NPY_SAME_KIND_CASTING; /* same-kind if too short */
}
else {
spec.casting = NPY_UNSAFE_CASTING;
@@ -2722,7 +2789,7 @@ PyArray_InitializeStringCasts(void)
{0, NULL}};
PyArrayMethod_Spec spec = {
.name = "string_to_string_cast",
- .casting = NPY_NO_CASTING,
+ .casting = NPY_UNSAFE_CASTING,
.nin = 1,
.nout = 1,
.flags = (NPY_METH_REQUIRES_PYAPI |
@@ -2935,7 +3002,7 @@ PyArray_GetGenericToVoidCastingImpl(void)
method->name = "any_to_void_cast";
method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI;
- method->casting = NPY_SAFE_CASTING;
+ method->casting = -1;
method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors;
method->get_strided_loop = &nonstructured_to_structured_get_loop;
@@ -3074,7 +3141,7 @@ PyArray_GetVoidToGenericCastingImpl(void)
method->name = "void_to_any_cast";
method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI;
- method->casting = NPY_UNSAFE_CASTING;
+ method->casting = -1;
method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors;
method->get_strided_loop = &structured_to_nonstructured_get_loop;
@@ -3306,7 +3373,7 @@ PyArray_InitializeVoidToVoidCast(void)
{0, NULL}};
PyArrayMethod_Spec spec = {
.name = "void_to_void_cast",
- .casting = NPY_NO_CASTING,
+ .casting = -1, /* may not cast at all */
.nin = 1,
.nout = 1,
.flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_SUPPORTS_UNALIGNED,
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index fdf4c0839..b9d81e836 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -950,10 +950,6 @@ convert_datetime_divisor_to_multiple(PyArray_DatetimeMetaData *meta,
return -1;
}
- ind = ((int)meta->base - (int)NPY_FR_Y)*2;
- totry = _multiples_table[ind];
- baseunit = _multiples_table[ind + 1];
-
num = 3;
if (meta->base == NPY_FR_W) {
num = 4;
@@ -962,6 +958,7 @@ convert_datetime_divisor_to_multiple(PyArray_DatetimeMetaData *meta,
num = 2;
}
if (meta->base >= NPY_FR_s) {
+ /* _multiplies_table only has entries up to NPY_FR_s */
ind = ((int)NPY_FR_s - (int)NPY_FR_Y)*2;
totry = _multiples_table[ind];
baseunit = _multiples_table[ind + 1];
@@ -974,6 +971,11 @@ convert_datetime_divisor_to_multiple(PyArray_DatetimeMetaData *meta,
num = 0;
}
}
+ else {
+ ind = ((int)meta->base - (int)NPY_FR_Y)*2;
+ totry = _multiples_table[ind];
+ baseunit = _multiples_table[ind + 1];
+ }
for (i = 0; i < num; i++) {
q = totry[i] / den;
@@ -3952,7 +3954,6 @@ time_to_string_resolve_descriptors(
return -1;
}
- assert(self->casting == NPY_UNSAFE_CASTING);
return NPY_UNSAFE_CASTING;
}
@@ -4059,7 +4060,7 @@ PyArray_InitializeDatetimeCasts()
.name = "datetime_casts",
.nin = 1,
.nout = 1,
- .casting = NPY_NO_CASTING,
+ .casting = NPY_UNSAFE_CASTING,
.flags = NPY_METH_SUPPORTS_UNALIGNED,
.slots = slots,
.dtypes = dtypes,
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index 40ca9ee2a..4ee721964 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -415,19 +415,6 @@ string_unicode_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other)
Py_INCREF(Py_NotImplemented);
return (PyArray_DTypeMeta *)Py_NotImplemented;
}
- if (other->type_num != NPY_STRING && other->type_num != NPY_UNICODE) {
- /* Deprecated 2020-12-19, NumPy 1.21. */
- if (DEPRECATE_FUTUREWARNING(
- "Promotion of numbers and bools to strings is deprecated. "
- "In the future, code such as `np.concatenate((['string'], [0]))` "
- "will raise an error, while `np.asarray(['string', 0])` will "
- "return an array with `dtype=object`. To avoid the warning "
- "while retaining a string result use `dtype='U'` (or 'S'). "
- "To get an array of Python objects use `dtype=object`. "
- "(Warning added in NumPy 1.21)") < 0) {
- return NULL;
- }
- }
/*
* The builtin types are ordered by complexity (aside from object) here.
* Arguably, we should not consider numbers and strings "common", but
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 9e99e0bc3..291cdae89 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -281,6 +281,19 @@ def test_array_astype():
a = np.array(1000, dtype='i4')
assert_raises(TypeError, a.astype, 'U1', casting='safe')
+@pytest.mark.parametrize("dt", ["S", "U"])
+def test_array_astype_to_string_discovery_empty(dt):
+ # See also gh-19085
+ arr = np.array([""], dtype=object)
+ # Note, the itemsize is the `0 -> 1` logic, which should change.
+ # The important part the test is rather that it does not error.
+ assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize
+
+ # check the same thing for `np.can_cast` (since it accepts arrays)
+ assert np.can_cast(arr, dt, casting="unsafe")
+ assert not np.can_cast(arr, dt, casting="same_kind")
+ # as well as for the object as a descriptor:
+ assert np.can_cast("O", dt, casting="unsafe")
@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"])
def test_array_astype_to_void(dt):
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index ffe0147b2..42e632e4a 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -1105,41 +1105,6 @@ class TestNoseDecoratorsDeprecated(_DeprecationTestCase):
self.assert_deprecated(_test_parametrize)
-class TestStringPromotion(_DeprecationTestCase):
- # Deprecated 2020-12-19, NumPy 1.21
- warning_cls = FutureWarning
- message = "Promotion of numbers and bools to strings is deprecated."
-
- @pytest.mark.parametrize("dtype", "?bhilqpBHILQPefdgFDG")
- @pytest.mark.parametrize("string_dt", ["S", "U"])
- def test_deprecated(self, dtype, string_dt):
- self.assert_deprecated(lambda: np.promote_types(dtype, string_dt))
-
- # concatenate has to be able to promote to find the result dtype:
- arr1 = np.ones(3, dtype=dtype)
- arr2 = np.ones(3, dtype=string_dt)
- self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=0))
- self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=None))
-
- self.assert_deprecated(lambda: np.array([arr1[0], arr2[0]]))
-
- @pytest.mark.parametrize("dtype", "?bhilqpBHILQPefdgFDG")
- @pytest.mark.parametrize("string_dt", ["S", "U"])
- def test_not_deprecated(self, dtype, string_dt):
- # The ufunc type resolvers run into this, but giving a futurewarning
- # here is unnecessary (it ends up as an error anyway), so test that
- # no warning is given:
- arr1 = np.ones(3, dtype=dtype)
- arr2 = np.ones(3, dtype=string_dt)
-
- # Adding two arrays uses result_type normally, which would fail:
- with pytest.raises(TypeError):
- self.assert_not_deprecated(lambda: arr1 + arr2)
- # np.equal uses a different type resolver:
- with pytest.raises(TypeError):
- self.assert_not_deprecated(lambda: np.equal(arr1, arr2))
-
-
class TestSingleElementSignature(_DeprecationTestCase):
# Deprecated 2021-04-01, NumPy 1.21
message = r"The use of a length 1"
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index 449a01d21..1b6fd21e1 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -71,10 +71,8 @@ class TestHalf:
def test_half_conversion_to_string(self, string_dt):
# Currently uses S/U32 (which is sufficient for float32)
expected_dt = np.dtype(f"{string_dt}32")
- with pytest.warns(FutureWarning):
- assert np.promote_types(np.float16, string_dt) == expected_dt
- with pytest.warns(FutureWarning):
- assert np.promote_types(string_dt, np.float16) == expected_dt
+ assert np.promote_types(np.float16, string_dt) == expected_dt
+ assert np.promote_types(string_dt, np.float16) == expected_dt
arr = np.ones(3, dtype=np.float16).astype(string_dt)
assert arr.dtype == expected_dt
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index d567653f5..25dd76256 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -6947,6 +6947,13 @@ class TestNeighborhoodIter:
x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
assert_array_equal(l, r)
+ # Test with start in the middle
+ r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
+ np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2)
+ assert_array_equal(l, r)
+
def test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index aba90ece5..f5113150e 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -848,12 +848,10 @@ class TestTypes:
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
- with pytest.warns(FutureWarning,
- match="Promotion of numbers and bools to strings"):
- assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
- assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
- assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
- assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
+ assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
+ assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
+ assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
+ assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
@@ -901,37 +899,32 @@ class TestTypes:
S = string_dtype
- with pytest.warns(FutureWarning,
- match="Promotion of numbers and bools to strings") as record:
- # Promote numeric with unsized string:
- assert_equal(promote_types('bool', S), np.dtype(S+'5'))
- assert_equal(promote_types('b', S), np.dtype(S+'4'))
- assert_equal(promote_types('u1', S), np.dtype(S+'3'))
- assert_equal(promote_types('u2', S), np.dtype(S+'5'))
- assert_equal(promote_types('u4', S), np.dtype(S+'10'))
- assert_equal(promote_types('u8', S), np.dtype(S+'20'))
- assert_equal(promote_types('i1', S), np.dtype(S+'4'))
- assert_equal(promote_types('i2', S), np.dtype(S+'6'))
- assert_equal(promote_types('i4', S), np.dtype(S+'11'))
- assert_equal(promote_types('i8', S), np.dtype(S+'21'))
- # Promote numeric with sized string:
- assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5'))
- assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30'))
- assert_equal(promote_types('b', S+'1'), np.dtype(S+'4'))
- assert_equal(promote_types('b', S+'30'), np.dtype(S+'30'))
- assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3'))
- assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30'))
- assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5'))
- assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30'))
- assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10'))
- assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30'))
- assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20'))
- assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30'))
- # Promote with object:
- assert_equal(promote_types('O', S+'30'), np.dtype('O'))
-
- assert len(record) == 22 # each string promotion gave one warning
-
+ # Promote numeric with unsized string:
+ assert_equal(promote_types('bool', S), np.dtype(S+'5'))
+ assert_equal(promote_types('b', S), np.dtype(S+'4'))
+ assert_equal(promote_types('u1', S), np.dtype(S+'3'))
+ assert_equal(promote_types('u2', S), np.dtype(S+'5'))
+ assert_equal(promote_types('u4', S), np.dtype(S+'10'))
+ assert_equal(promote_types('u8', S), np.dtype(S+'20'))
+ assert_equal(promote_types('i1', S), np.dtype(S+'4'))
+ assert_equal(promote_types('i2', S), np.dtype(S+'6'))
+ assert_equal(promote_types('i4', S), np.dtype(S+'11'))
+ assert_equal(promote_types('i8', S), np.dtype(S+'21'))
+ # Promote numeric with sized string:
+ assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5'))
+ assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('b', S+'1'), np.dtype(S+'4'))
+ assert_equal(promote_types('b', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3'))
+ assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5'))
+ assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10'))
+ assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20'))
+ assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30'))
+ # Promote with object:
+ assert_equal(promote_types('O', S+'30'), np.dtype('O'))
@pytest.mark.parametrize(["dtype1", "dtype2"],
[[np.dtype("V6"), np.dtype("V10")],
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index dbfb75c9a..312d0683d 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -782,9 +782,7 @@ class TestRegression:
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
- with pytest.warns(FutureWarning,
- match="Promotion of numbers and bools to strings"):
- np.hstack((t, s))
+ np.hstack((t, s))
def test_arr_transpose(self):
# Ticket #516
diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py
index 0388ad577..0fa1c11dd 100644
--- a/numpy/distutils/intelccompiler.py
+++ b/numpy/distutils/intelccompiler.py
@@ -58,7 +58,7 @@ class IntelEM64TCCompiler(UnixCCompiler):
v = self.get_version()
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
- self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 '
+ self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 '
'-fomit-frame-pointer -{}').format(mpopt)
compiler = self.cc_exe
diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py
index 00e00e9a7..bfac5f814 100644
--- a/numpy/lib/_version.py
+++ b/numpy/lib/_version.py
@@ -15,7 +15,7 @@ class NumpyVersion():
"""Parse and compare numpy version strings.
NumPy has the following versioning scheme (numbers given are examples; they
- can be > 9) in principle):
+ can be > 9 in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
@@ -54,7 +54,7 @@ class NumpyVersion():
def __init__(self, vstring):
self.vstring = vstring
- ver_main = re.match(r'\d\.\d+\.\d+', vstring)
+ ver_main = re.match(r'\d+\.\d+\.\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
diff --git a/numpy/lib/function_base.pyi b/numpy/lib/function_base.pyi
index da24ab21d..69c615c9c 100644
--- a/numpy/lib/function_base.pyi
+++ b/numpy/lib/function_base.pyi
@@ -30,7 +30,7 @@ def gradient(f, *varargs, axis=..., edge_order=...): ...
def diff(a, n=..., axis=..., prepend = ..., append = ...): ...
def interp(x, xp, fp, left=..., right=..., period=...): ...
def angle(z, deg=...): ...
-def unwrap(p, discont = ..., axis=...): ...
+def unwrap(p, discont = ..., axis=..., *, period=...): ...
def sort_complex(a): ...
def trim_zeros(filt, trim=...): ...
def extract(condition, arr): ...
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 72d8e9de4..5140ffa61 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -631,7 +631,8 @@ class ndindex:
Examples
--------
- # dimensions as individual arguments
+ Dimensions as individual arguments
+
>>> for index in np.ndindex(3, 2, 1):
... print(index)
(0, 0, 0)
@@ -641,7 +642,8 @@ class ndindex:
(2, 0, 0)
(2, 1, 0)
- # same dimensions - but in a tuple (3, 2, 1)
+ Same dimensions - but in a tuple ``(3, 2, 1)``
+
>>> for index in np.ndindex((3, 2, 1)):
... print(index)
(0, 0, 0)
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 56fcce621..23021cafa 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -489,8 +489,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
- Weights to apply to the y-coordinates of the sample points. For
- gaussian uncertainties, use 1/sigma (not 1/sigma**2).
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
cov : bool or str, optional
If given and not `False`, return not just the estimate but also its
covariance matrix. By default, the covariance are scaled by
@@ -498,7 +501,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
to be unreliable except in a relative sense and everything is scaled
such that the reduced chi2 is unity. This scaling is omitted if
``cov='unscaled'``, as is relevant for the case that the weights are
- 1/sigma**2, with sigma known to be a reliable estimate of the
+ w = 1/sigma, with sigma known to be a reliable estimate of the
uncertainty.
Returns
diff --git a/numpy/lib/tests/test__version.py b/numpy/lib/tests/test__version.py
index 182504631..e6d41ad93 100644
--- a/numpy/lib/tests/test__version.py
+++ b/numpy/lib/tests/test__version.py
@@ -7,7 +7,7 @@ from numpy.lib import NumpyVersion
def test_main_versions():
assert_(NumpyVersion('1.8.0') == '1.8.0')
- for ver in ['1.9.0', '2.0.0', '1.8.1']:
+ for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']:
assert_(NumpyVersion('1.8.0') < ver)
for ver in ['1.7.0', '1.7.1', '0.9.9']:
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index 94fac7ef0..373226277 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -64,8 +64,7 @@ class TestRegression:
def test_mem_string_concat(self):
# Ticket #469
x = np.array([])
- with pytest.warns(FutureWarning):
- np.append(x, 'asdasd\tasdasd')
+ np.append(x, 'asdasd\tasdasd')
def test_poly_div(self):
# Ticket #553
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index b04b8e66b..5525b232b 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -936,11 +936,11 @@ class ABCPolyBase(abc.ABC):
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
- Weights. If not None the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products
- ``w[i]*y[i]`` all have the same variance. The default value is
- None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have
+ the same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
.. versionadded:: 1.5.0
window : {[beg, end]}, optional
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index d24fc738f..210000ec4 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -1582,10 +1582,11 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
.. versionadded:: 1.5.0
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index eef5c25b2..c1b9f71c0 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -1310,10 +1310,11 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
Returns
-------
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index 05d1337b0..b7095c910 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -1301,10 +1301,11 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
Returns
-------
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index 69d557510..d3b6432dc 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -1307,10 +1307,11 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
Returns
-------
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index cd4da2a79..d4cf4accf 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -1321,10 +1321,11 @@ def legfit(x, y, deg, rcond=None, full=False, w=None):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
.. versionadded:: 1.5.0
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index 940eed5e3..d8a032068 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -1252,10 +1252,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None):
diagnostic information from the singular value decomposition (used
to solve the fit's matrix equation) is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
.. versionadded:: 1.5.0
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
index e1f87621f..064a74a16 100644
--- a/numpy/testing/__init__.py
+++ b/numpy/testing/__init__.py
@@ -8,6 +8,7 @@ away.
from unittest import TestCase
from ._private.utils import *
+from ._private.utils import _assert_valid_refcount, _gen_alignment_data
from ._private import decorators as dec
from ._private.nosetester import (
run_module_suite, NoseTester as Tester
diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi
index 395626f6b..955dae862 100644
--- a/numpy/testing/__init__.pyi
+++ b/numpy/testing/__init__.pyi
@@ -1,113 +1,53 @@
-import sys
-import warnings
-from typing import Any, List, ClassVar, Tuple, Set
-
-if sys.version_info >= (3, 8):
- from typing import Final
-else:
- from typing_extensions import Final
+from typing import List
from unittest import (
TestCase as TestCase,
)
-from unittest.case import (
+from numpy.testing._private.utils import (
+ assert_equal as assert_equal,
+ assert_almost_equal as assert_almost_equal,
+ assert_approx_equal as assert_approx_equal,
+ assert_array_equal as assert_array_equal,
+ assert_array_less as assert_array_less,
+ assert_string_equal as assert_string_equal,
+ assert_array_almost_equal as assert_array_almost_equal,
+ assert_raises as assert_raises,
+ build_err_msg as build_err_msg,
+ decorate_methods as decorate_methods,
+ jiffies as jiffies,
+ memusage as memusage,
+ print_assert_equal as print_assert_equal,
+ raises as raises,
+ rundocs as rundocs,
+ runstring as runstring,
+ verbose as verbose,
+ measure as measure,
+ assert_ as assert_,
+ assert_array_almost_equal_nulp as assert_array_almost_equal_nulp,
+ assert_raises_regex as assert_raises_regex,
+ assert_array_max_ulp as assert_array_max_ulp,
+ assert_warns as assert_warns,
+ assert_no_warnings as assert_no_warnings,
+ assert_allclose as assert_allclose,
+ IgnoreException as IgnoreException,
+ clear_and_catch_warnings as clear_and_catch_warnings,
SkipTest as SkipTest,
+ KnownFailureException as KnownFailureException,
+ temppath as temppath,
+ tempdir as tempdir,
+ IS_PYPY as IS_PYPY,
+ HAS_REFCOUNT as HAS_REFCOUNT,
+ suppress_warnings as suppress_warnings,
+ assert_array_compare as assert_array_compare,
+ assert_no_gc_cycles as assert_no_gc_cycles,
+ break_cycles as break_cycles,
+ HAS_LAPACK64 as HAS_LAPACK64,
)
__all__: List[str]
-def run_module_suite(file_to_run=..., argv=...): ...
-
-class KnownFailureException(Exception): ...
-class IgnoreException(Exception): ...
-
-class clear_and_catch_warnings(warnings.catch_warnings):
- class_modules: ClassVar[Tuple[str, ...]]
- modules: Set[str]
- def __init__(self, record=..., modules=...): ...
- def __enter__(self): ...
- def __exit__(self, *exc_info): ...
-
-class suppress_warnings:
- log: List[warnings.WarningMessage]
- def __init__(self, forwarding_rule=...): ...
- def filter(self, category=..., message=..., module=...): ...
- def record(self, category=..., message=..., module=...): ...
- def __enter__(self): ...
- def __exit__(self, *exc_info): ...
- def __call__(self, func): ...
-
-verbose: int
-IS_PYPY: Final[bool]
-HAS_REFCOUNT: Final[bool]
-HAS_LAPACK64: Final[bool]
-
-def assert_(val, msg=...): ...
-def memusage(processName=..., instance=...): ...
-def jiffies(_proc_pid_stat=..., _load_time=...): ...
-def build_err_msg(
- arrays,
- err_msg,
- header=...,
- verbose=...,
- names=...,
- precision=...,
-): ...
-def assert_equal(actual, desired, err_msg=..., verbose=...): ...
-def print_assert_equal(test_string, actual, desired): ...
-def assert_almost_equal(
- actual,
- desired,
- decimal=...,
- err_msg=...,
- verbose=...,
-): ...
-def assert_approx_equal(
- actual,
- desired,
- significant=...,
- err_msg=...,
- verbose=...,
-): ...
-def assert_array_compare(
- comparison,
- x,
- y,
- err_msg=...,
- verbose=...,
- header=...,
- precision=...,
- equal_nan=...,
- equal_inf=...,
-): ...
-def assert_array_equal(x, y, err_msg=..., verbose=...): ...
-def assert_array_almost_equal(x, y, decimal=..., err_msg=..., verbose=...): ...
-def assert_array_less(x, y, err_msg=..., verbose=...): ...
-def runstring(astr, dict): ...
-def assert_string_equal(actual, desired): ...
-def rundocs(filename=..., raise_on_error=...): ...
-def raises(*args): ...
-def assert_raises(*args, **kwargs): ...
-def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): ...
-def decorate_methods(cls, decorator, testmatch=...): ...
-def measure(code_str, times=..., label=...): ...
-def assert_allclose(
- actual,
- desired,
- rtol=...,
- atol=...,
- equal_nan=...,
- err_msg=...,
- verbose=...,
-): ...
-def assert_array_almost_equal_nulp(x, y, nulp=...): ...
-def assert_array_max_ulp(a, b, maxulp=..., dtype=...): ...
-def assert_warns(warning_class, *args, **kwargs): ...
-def assert_no_warnings(*args, **kwargs): ...
-def tempdir(*args, **kwargs): ...
-def temppath(*args, **kwargs): ...
-def assert_no_gc_cycles(*args, **kwargs): ...
-def break_cycles(): ...
-def _assert_valid_refcount(op): ...
-def _gen_alignment_data(dtype=..., type=..., max_size=...): ...
+def run_module_suite(
+ file_to_run: None | str = ...,
+ argv: None | List[str] = ...,
+) -> None: ...
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 393fedc27..487aa0b4c 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -35,8 +35,7 @@ __all__ = [
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
- '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles',
- 'break_cycles', 'HAS_LAPACK64'
+ 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64'
]
@@ -2518,4 +2517,3 @@ def _no_tracing(func):
finally:
sys.settrace(original_trace)
return wrapper
-
diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi
new file mode 100644
index 000000000..29915309f
--- /dev/null
+++ b/numpy/testing/_private/utils.pyi
@@ -0,0 +1,396 @@
+import os
+import sys
+import ast
+import types
+import warnings
+import unittest
+import contextlib
+from typing import (
+ Any,
+ AnyStr,
+ Callable,
+ ClassVar,
+ Dict,
+ Iterable,
+ List,
+ NoReturn,
+ overload,
+ Pattern,
+ Sequence,
+ Set,
+ Tuple,
+ Type,
+ type_check_only,
+ TypeVar,
+ Union,
+)
+
+from numpy import generic, dtype, number, object_, bool_, _FloatValue
+from numpy.typing import (
+ NDArray,
+ ArrayLike,
+ DTypeLike,
+ _ArrayLikeNumber_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeTD64_co,
+ _ArrayLikeDT64_co,
+)
+
+from unittest.case import (
+ SkipTest as SkipTest,
+)
+
+if sys.version_info >= (3, 8):
+ from typing import Final, SupportsIndex, Literal as L
+else:
+ from typing_extensions import Final, SupportsIndex, Literal as L
+
+_T = TypeVar("_T")
+_ET = TypeVar("_ET", bound=BaseException)
+_FT = TypeVar("_FT", bound=Callable[..., Any])
+
+# Must return a bool or an ndarray/generic type
+# that is supported by `np.logical_and.reduce`
+_ComparisonFunc = Callable[
+ [NDArray[Any], NDArray[Any]],
+ Union[
+ bool,
+ bool_,
+ number[Any],
+ NDArray[Union[bool_, number[Any], object_]],
+ ],
+]
+
+__all__: List[str]
+
+class KnownFailureException(Exception): ...
+class IgnoreException(Exception): ...
+
+class clear_and_catch_warnings(warnings.catch_warnings):
+ class_modules: ClassVar[Tuple[types.ModuleType, ...]]
+ modules: Set[types.ModuleType]
+ @overload
+ def __new__(
+ cls,
+ record: L[False] = ...,
+ modules: Iterable[types.ModuleType] = ...,
+ ) -> _clear_and_catch_warnings_without_records: ...
+ @overload
+ def __new__(
+ cls,
+ record: L[True],
+ modules: Iterable[types.ModuleType] = ...,
+ ) -> _clear_and_catch_warnings_with_records: ...
+ @overload
+ def __new__(
+ cls,
+ record: bool,
+ modules: Iterable[types.ModuleType] = ...,
+ ) -> clear_and_catch_warnings: ...
+ def __enter__(self) -> None | List[warnings.WarningMessage]: ...
+ def __exit__(
+ self,
+ __exc_type: None | Type[BaseException] = ...,
+ __exc_val: None | BaseException = ...,
+ __exc_tb: None | types.TracebackType = ...,
+ ) -> None: ...
+
+# Type-check only `clear_and_catch_warnings` subclasses for both values of the
+# `record` parameter. Copied from the stdlib `warnings` stubs.
+
+@type_check_only
+class _clear_and_catch_warnings_with_records(clear_and_catch_warnings):
+ def __enter__(self) -> List[warnings.WarningMessage]: ...
+
+@type_check_only
+class _clear_and_catch_warnings_without_records(clear_and_catch_warnings):
+ def __enter__(self) -> None: ...
+
+class suppress_warnings:
+ log: List[warnings.WarningMessage]
+ def __init__(
+ self,
+ forwarding_rule: L["always", "module", "once", "location"] = ...,
+ ) -> None: ...
+ def filter(
+ self,
+ category: Type[Warning] = ...,
+ message: str = ...,
+ module: None | types.ModuleType = ...,
+ ) -> None: ...
+ def record(
+ self,
+ category: Type[Warning] = ...,
+ message: str = ...,
+ module: None | types.ModuleType = ...,
+ ) -> List[warnings.WarningMessage]: ...
+ def __enter__(self: _T) -> _T: ...
+ def __exit__(
+ self,
+ __exc_type: None | Type[BaseException] = ...,
+ __exc_val: None | BaseException = ...,
+ __exc_tb: None | types.TracebackType = ...,
+ ) -> None: ...
+ def __call__(self, func: _FT) -> _FT: ...
+
+verbose: int
+IS_PYPY: Final[bool]
+HAS_REFCOUNT: Final[bool]
+HAS_LAPACK64: Final[bool]
+
+def assert_(val: object, msg: str | Callable[[], str] = ...) -> None: ...
+
+# Contrary to runtime we can't do `os.name` checks while type checking,
+# only `sys.platform` checks
+if sys.platform == "win32" or sys.platform == "cygwin":
+ def memusage(processName: str = ..., instance: int = ...) -> int: ...
+elif sys.platform == "linux":
+ def memusage(_proc_pid_stat: str | bytes | os.PathLike[Any] = ...) -> None | int: ...
+else:
+ def memusage() -> NoReturn: ...
+
+if sys.platform == "linux":
+ def jiffies(
+ _proc_pid_stat: str | bytes | os.PathLike[Any] = ...,
+ _load_time: List[float] = ...,
+ ) -> int: ...
+else:
+ def jiffies(_load_time: List[float] = ...) -> int: ...
+
+def build_err_msg(
+ arrays: Iterable[object],
+ err_msg: str,
+ header: str = ...,
+ verbose: bool = ...,
+ names: Sequence[str] = ...,
+ precision: None | SupportsIndex = ...,
+) -> str: ...
+
+def assert_equal(
+ actual: object,
+ desired: object,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+def print_assert_equal(
+ test_string: str,
+ actual: object,
+ desired: object,
+) -> None: ...
+
+def assert_almost_equal(
+ actual: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ desired: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ decimal: int = ...,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+# Anything that can be coerced into `builtins.float`
+def assert_approx_equal(
+ actual: _FloatValue,
+ desired: _FloatValue,
+ significant: int = ...,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+def assert_array_compare(
+ comparison: _ComparisonFunc,
+ x: ArrayLike,
+ y: ArrayLike,
+ err_msg: str = ...,
+ verbose: bool = ...,
+ header: str = ...,
+ precision: SupportsIndex = ...,
+ equal_nan: bool = ...,
+ equal_inf: bool = ...,
+) -> None: ...
+
+def assert_array_equal(
+ x: ArrayLike,
+ y: ArrayLike,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+def assert_array_almost_equal(
+ x: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ y: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ decimal: float = ...,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+@overload
+def assert_array_less(
+ x: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ y: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_array_less(
+ x: _ArrayLikeTD64_co,
+ y: _ArrayLikeTD64_co,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_array_less(
+ x: _ArrayLikeDT64_co,
+ y: _ArrayLikeDT64_co,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+def runstring(
+ astr: str | bytes | types.CodeType,
+ dict: None | Dict[str, Any],
+) -> Any: ...
+
+def assert_string_equal(actual: str, desired: str) -> None: ...
+
+def rundocs(
+ filename: None | str | os.PathLike[str] = ...,
+ raise_on_error: bool = ...,
+) -> None: ...
+
+def raises(*args: Type[BaseException]) -> Callable[[_FT], _FT]: ...
+
+@overload
+def assert_raises( # type: ignore
+ __expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...],
+ __callable: Callable[..., Any],
+ *args: Any,
+ **kwargs: Any,
+) -> None: ...
+@overload
+def assert_raises(
+ expected_exception: Type[_ET] | Tuple[Type[_ET], ...],
+ *,
+ msg: None | str = ...,
+) -> unittest.case._AssertRaisesContext[_ET]: ...
+
+@overload
+def assert_raises_regex(
+ __expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...],
+ __expected_regex: str | bytes | Pattern[Any],
+ __callable: Callable[..., Any],
+ *args: Any,
+ **kwargs: Any,
+) -> None: ...
+@overload
+def assert_raises_regex(
+ expected_exception: Type[_ET] | Tuple[Type[_ET], ...],
+ expected_regex: str | bytes | Pattern[Any],
+ *,
+ msg: None | str = ...,
+) -> unittest.case._AssertRaisesContext[_ET]: ...
+
+def decorate_methods(
+ cls: Type[Any],
+ decorator: Callable[[Callable[..., Any]], Any],
+ testmatch: None | str | bytes | Pattern[Any] = ...,
+) -> None: ...
+
+def measure(
+ code_str: str | bytes | ast.mod | ast.AST,
+ times: int = ...,
+ label: None | str = ...,
+) -> float: ...
+
+@overload
+def assert_allclose(
+ actual: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ desired: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ rtol: float = ...,
+ atol: float = ...,
+ equal_nan: bool = ...,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_allclose(
+ actual: _ArrayLikeTD64_co,
+ desired: _ArrayLikeTD64_co,
+ rtol: float = ...,
+ atol: float = ...,
+ equal_nan: bool = ...,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+def assert_array_almost_equal_nulp(
+ x: _ArrayLikeNumber_co,
+ y: _ArrayLikeNumber_co,
+ nulp: float = ...,
+) -> None: ...
+
+def assert_array_max_ulp(
+ a: _ArrayLikeNumber_co,
+ b: _ArrayLikeNumber_co,
+ maxulp: float = ...,
+ dtype: DTypeLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def assert_warns(
+ warning_class: Type[Warning],
+) -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_warns(
+ __warning_class: Type[Warning],
+ __func: Callable[..., _T],
+ *args: Any,
+ **kwargs: Any,
+) -> _T: ...
+
+@overload
+def assert_no_warnings() -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_no_warnings(
+ __func: Callable[..., _T],
+ *args: Any,
+ **kwargs: Any,
+) -> _T: ...
+
+@overload
+def tempdir(
+ suffix: None = ...,
+ prefix: None = ...,
+ dir: None = ...,
+) -> contextlib._GeneratorContextManager[str]: ...
+@overload
+def tempdir(
+ suffix: None | AnyStr = ...,
+ prefix: None | AnyStr = ...,
+ dir: None | AnyStr | os.PathLike[AnyStr] = ...,
+) -> contextlib._GeneratorContextManager[AnyStr]: ...
+
+@overload
+def temppath(
+ suffix: None = ...,
+ prefix: None = ...,
+ dir: None = ...,
+ text: bool = ...,
+) -> contextlib._GeneratorContextManager[str]: ...
+@overload
+def temppath(
+ suffix: None | AnyStr = ...,
+ prefix: None | AnyStr = ...,
+ dir: None | AnyStr | os.PathLike[AnyStr] = ...,
+ text: bool = ...,
+) -> contextlib._GeneratorContextManager[AnyStr]: ...
+
+@overload
+def assert_no_gc_cycles() -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_no_gc_cycles(
+ __func: Callable[..., Any],
+ *args: Any,
+ **kwargs: Any,
+) -> None: ...
+
+def break_cycles() -> None: ...
diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py
index 7652a94a2..6f203e872 100755
--- a/numpy/testing/setup.py
+++ b/numpy/testing/setup.py
@@ -7,6 +7,7 @@ def configuration(parent_package='',top_path=None):
config.add_subpackage('_private')
config.add_subpackage('tests')
config.add_data_files('*.pyi')
+ config.add_data_files('_private/*.pyi')
return config
if __name__ == '__main__':
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 753258c13..20a883304 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -12,6 +12,7 @@ warnings.warn("Importing from numpy.testing.utils is deprecated "
DeprecationWarning, stacklevel=2)
from ._private.utils import *
+from ._private.utils import _assert_valid_refcount, _gen_alignment_data
__all__ = [
'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
@@ -24,5 +25,5 @@ __all__ = [
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
- '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles'
+ 'assert_no_gc_cycles'
]
diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py
index 252123a19..19424169a 100644
--- a/numpy/typing/__init__.py
+++ b/numpy/typing/__init__.py
@@ -164,7 +164,7 @@ API
# NOTE: The API section will be appended with additional entries
# further down in this file
-from typing import TYPE_CHECKING, List
+from typing import TYPE_CHECKING, List, Any
if TYPE_CHECKING:
# typing_extensions is always available when type-checking
@@ -376,14 +376,14 @@ if TYPE_CHECKING:
_GUFunc_Nin2_Nout1,
)
else:
- _UFunc_Nin1_Nout1 = NotImplemented
- _UFunc_Nin2_Nout1 = NotImplemented
- _UFunc_Nin1_Nout2 = NotImplemented
- _UFunc_Nin2_Nout2 = NotImplemented
- _GUFunc_Nin2_Nout1 = NotImplemented
+ _UFunc_Nin1_Nout1 = Any
+ _UFunc_Nin2_Nout1 = Any
+ _UFunc_Nin1_Nout2 = Any
+ _UFunc_Nin2_Nout2 = Any
+ _GUFunc_Nin2_Nout1 = Any
# Clean up the namespace
-del TYPE_CHECKING, final, List
+del TYPE_CHECKING, final, List, Any
if __doc__ is not None:
from ._add_docstring import _docstrings
diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py
index 2b823ecc0..3bdbed8f8 100644
--- a/numpy/typing/_array_like.py
+++ b/numpy/typing/_array_like.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import sys
-from typing import Any, Sequence, TYPE_CHECKING, Union, TypeVar
+from typing import Any, Sequence, TYPE_CHECKING, Union, TypeVar, Generic
from numpy import (
ndarray,
@@ -34,7 +34,7 @@ _ScalarType = TypeVar("_ScalarType", bound=generic)
_DType = TypeVar("_DType", bound="dtype[Any]")
_DType_co = TypeVar("_DType_co", covariant=True, bound="dtype[Any]")
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
+if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
# The `_SupportsArray` protocol only cares about the default dtype
# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned
# array.
@@ -43,7 +43,7 @@ if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
class _SupportsArray(Protocol[_DType_co]):
def __array__(self) -> ndarray[Any, _DType_co]: ...
else:
- _SupportsArray = Any
+ class _SupportsArray(Generic[_DType_co]): ...
# TODO: Wait for support for recursive types
_NestedSequence = Union[
diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py
index 54f9b1425..8f911da3b 100644
--- a/numpy/typing/_callable.py
+++ b/numpy/typing/_callable.py
@@ -53,7 +53,7 @@ if sys.version_info >= (3, 8):
elif _HAS_TYPING_EXTENSIONS:
from typing_extensions import Protocol
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
+if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_2Tuple = Tuple[_T1, _T1]
@@ -332,25 +332,25 @@ if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
def __call__(self, __other: _T2) -> NDArray[bool_]: ...
else:
- _BoolOp = NotImplemented
- _BoolBitOp = NotImplemented
- _BoolSub = NotImplemented
- _BoolTrueDiv = NotImplemented
- _BoolMod = NotImplemented
- _BoolDivMod = NotImplemented
- _TD64Div = NotImplemented
- _IntTrueDiv = NotImplemented
- _UnsignedIntOp = NotImplemented
- _UnsignedIntBitOp = NotImplemented
- _UnsignedIntMod = NotImplemented
- _UnsignedIntDivMod = NotImplemented
- _SignedIntOp = NotImplemented
- _SignedIntBitOp = NotImplemented
- _SignedIntMod = NotImplemented
- _SignedIntDivMod = NotImplemented
- _FloatOp = NotImplemented
- _FloatMod = NotImplemented
- _FloatDivMod = NotImplemented
- _ComplexOp = NotImplemented
- _NumberOp = NotImplemented
- _ComparisonOp = NotImplemented
+ _BoolOp = Any
+ _BoolBitOp = Any
+ _BoolSub = Any
+ _BoolTrueDiv = Any
+ _BoolMod = Any
+ _BoolDivMod = Any
+ _TD64Div = Any
+ _IntTrueDiv = Any
+ _UnsignedIntOp = Any
+ _UnsignedIntBitOp = Any
+ _UnsignedIntMod = Any
+ _UnsignedIntDivMod = Any
+ _SignedIntOp = Any
+ _SignedIntBitOp = Any
+ _SignedIntMod = Any
+ _SignedIntDivMod = Any
+ _FloatOp = Any
+ _FloatMod = Any
+ _FloatDivMod = Any
+ _ComplexOp = Any
+ _NumberOp = Any
+ _ComparisonOp = Any
diff --git a/numpy/typing/_char_codes.py b/numpy/typing/_char_codes.py
index 6b33f995d..22ee168e9 100644
--- a/numpy/typing/_char_codes.py
+++ b/numpy/typing/_char_codes.py
@@ -8,7 +8,7 @@ if sys.version_info >= (3, 8):
elif _HAS_TYPING_EXTENSIONS:
from typing_extensions import Literal
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
+if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
_BoolCodes = Literal["?", "=?", "<?", ">?", "bool", "bool_", "bool8"]
_UInt8Codes = Literal["uint8", "u1", "=u1", "<u1", ">u1"]
@@ -120,52 +120,52 @@ if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
]
else:
- _BoolCodes = NotImplemented
-
- _UInt8Codes = NotImplemented
- _UInt16Codes = NotImplemented
- _UInt32Codes = NotImplemented
- _UInt64Codes = NotImplemented
-
- _Int8Codes = NotImplemented
- _Int16Codes = NotImplemented
- _Int32Codes = NotImplemented
- _Int64Codes = NotImplemented
-
- _Float16Codes = NotImplemented
- _Float32Codes = NotImplemented
- _Float64Codes = NotImplemented
-
- _Complex64Codes = NotImplemented
- _Complex128Codes = NotImplemented
-
- _ByteCodes = NotImplemented
- _ShortCodes = NotImplemented
- _IntCCodes = NotImplemented
- _IntPCodes = NotImplemented
- _IntCodes = NotImplemented
- _LongLongCodes = NotImplemented
-
- _UByteCodes = NotImplemented
- _UShortCodes = NotImplemented
- _UIntCCodes = NotImplemented
- _UIntPCodes = NotImplemented
- _UIntCodes = NotImplemented
- _ULongLongCodes = NotImplemented
-
- _HalfCodes = NotImplemented
- _SingleCodes = NotImplemented
- _DoubleCodes = NotImplemented
- _LongDoubleCodes = NotImplemented
-
- _CSingleCodes = NotImplemented
- _CDoubleCodes = NotImplemented
- _CLongDoubleCodes = NotImplemented
-
- _StrCodes = NotImplemented
- _BytesCodes = NotImplemented
- _VoidCodes = NotImplemented
- _ObjectCodes = NotImplemented
-
- _DT64Codes = NotImplemented
- _TD64Codes = NotImplemented
+ _BoolCodes = Any
+
+ _UInt8Codes = Any
+ _UInt16Codes = Any
+ _UInt32Codes = Any
+ _UInt64Codes = Any
+
+ _Int8Codes = Any
+ _Int16Codes = Any
+ _Int32Codes = Any
+ _Int64Codes = Any
+
+ _Float16Codes = Any
+ _Float32Codes = Any
+ _Float64Codes = Any
+
+ _Complex64Codes = Any
+ _Complex128Codes = Any
+
+ _ByteCodes = Any
+ _ShortCodes = Any
+ _IntCCodes = Any
+ _IntPCodes = Any
+ _IntCodes = Any
+ _LongLongCodes = Any
+
+ _UByteCodes = Any
+ _UShortCodes = Any
+ _UIntCCodes = Any
+ _UIntPCodes = Any
+ _UIntCodes = Any
+ _ULongLongCodes = Any
+
+ _HalfCodes = Any
+ _SingleCodes = Any
+ _DoubleCodes = Any
+ _LongDoubleCodes = Any
+
+ _CSingleCodes = Any
+ _CDoubleCodes = Any
+ _CLongDoubleCodes = Any
+
+ _StrCodes = Any
+ _BytesCodes = Any
+ _VoidCodes = Any
+ _ObjectCodes = Any
+
+ _DT64Codes = Any
+ _TD64Codes = Any
diff --git a/numpy/typing/_dtype_like.py b/numpy/typing/_dtype_like.py
index 405cc4a3c..b2ce3adb4 100644
--- a/numpy/typing/_dtype_like.py
+++ b/numpy/typing/_dtype_like.py
@@ -11,9 +11,6 @@ if sys.version_info >= (3, 8):
from typing import Protocol, TypedDict
elif _HAS_TYPING_EXTENSIONS:
from typing_extensions import Protocol, TypedDict
-
-if sys.version_info >= (3, 9):
- from types import GenericAlias
else:
from ._generic_alias import _GenericAlias as GenericAlias
@@ -62,7 +59,7 @@ from ._char_codes import (
_DTypeLikeNested = Any # TODO: wait for support for recursive types
_DType_co = TypeVar("_DType_co", covariant=True, bound=DType[Any])
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
+if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
# Mandatory keys
class _DTypeDictBase(TypedDict):
names: Sequence[str]
@@ -81,7 +78,7 @@ if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
def dtype(self) -> _DType_co: ...
else:
- _DTypeDict = NotImplemented
+ _DTypeDict = Any
class _SupportsDType: ...
_SupportsDType = GenericAlias(_SupportsDType, _DType_co)
diff --git a/numpy/typing/_extended_precision.py b/numpy/typing/_extended_precision.py
index 3f1ce2038..0900bc659 100644
--- a/numpy/typing/_extended_precision.py
+++ b/numpy/typing/_extended_precision.py
@@ -28,15 +28,15 @@ if TYPE_CHECKING:
complex256 = np.complexfloating[_128Bit, _128Bit]
complex512 = np.complexfloating[_256Bit, _256Bit]
else:
- uint128 = NotImplemented
- uint256 = NotImplemented
- int128 = NotImplemented
- int256 = NotImplemented
- float80 = NotImplemented
- float96 = NotImplemented
- float128 = NotImplemented
- float256 = NotImplemented
- complex160 = NotImplemented
- complex192 = NotImplemented
- complex256 = NotImplemented
- complex512 = NotImplemented
+ uint128 = Any
+ uint256 = Any
+ int128 = Any
+ int256 = Any
+ float80 = Any
+ float96 = Any
+ float128 = Any
+ float256 = Any
+ complex160 = Any
+ complex192 = Any
+ complex256 = Any
+ complex512 = Any
diff --git a/numpy/typing/_generic_alias.py b/numpy/typing/_generic_alias.py
index 0d30f54ca..8d65ef855 100644
--- a/numpy/typing/_generic_alias.py
+++ b/numpy/typing/_generic_alias.py
@@ -93,7 +93,7 @@ class _GenericAlias:
return super().__getattribute__("_origin")
@property
- def __args__(self) -> Tuple[Any, ...]:
+ def __args__(self) -> Tuple[object, ...]:
return super().__getattribute__("_args")
@property
@@ -101,16 +101,23 @@ class _GenericAlias:
"""Type variables in the ``GenericAlias``."""
return super().__getattribute__("_parameters")
- def __init__(self, origin: type, args: Any) -> None:
+ def __init__(
+ self,
+ origin: type,
+ args: object | Tuple[object, ...],
+ ) -> None:
self._origin = origin
self._args = args if isinstance(args, tuple) else (args,)
- self._parameters = tuple(_parse_parameters(args))
+ self._parameters = tuple(_parse_parameters(self.__args__))
@property
def __call__(self) -> type:
return self.__origin__
- def __reduce__(self: _T) -> Tuple[Type[_T], Tuple[type, Tuple[Any, ...]]]:
+ def __reduce__(self: _T) -> Tuple[
+ Type[_T],
+ Tuple[type, Tuple[object, ...]],
+ ]:
cls = type(self)
return cls, (self.__origin__, self.__args__)
@@ -148,7 +155,7 @@ class _GenericAlias:
origin = _to_str(self.__origin__)
return f"{origin}[{args}]"
- def __getitem__(self: _T, key: Any) -> _T:
+ def __getitem__(self: _T, key: object | Tuple[object, ...]) -> _T:
"""Return ``self[key]``."""
key_tup = key if isinstance(key, tuple) else (key,)
diff --git a/numpy/typing/_shape.py b/numpy/typing/_shape.py
index 0742be8a9..75698f3d3 100644
--- a/numpy/typing/_shape.py
+++ b/numpy/typing/_shape.py
@@ -1,5 +1,5 @@
import sys
-from typing import Sequence, Tuple, Union
+from typing import Sequence, Tuple, Union, Any
from . import _HAS_TYPING_EXTENSIONS
@@ -8,7 +8,7 @@ if sys.version_info >= (3, 8):
elif _HAS_TYPING_EXTENSIONS:
from typing_extensions import SupportsIndex
else:
- SupportsIndex = NotImplemented
+ SupportsIndex = Any
_Shape = Tuple[int, ...]
diff --git a/numpy/typing/tests/data/fail/testing.py b/numpy/typing/tests/data/fail/testing.py
new file mode 100644
index 000000000..e753a9810
--- /dev/null
+++ b/numpy/typing/tests/data/fail/testing.py
@@ -0,0 +1,26 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_U: npt.NDArray[np.str_]
+
+def func() -> bool: ...
+
+np.testing.assert_(True, msg=1) # E: incompatible type
+np.testing.build_err_msg(1, "test") # E: incompatible type
+np.testing.assert_almost_equal(AR_U, AR_U) # E: incompatible type
+np.testing.assert_approx_equal([1, 2, 3], [1, 2, 3]) # E: incompatible type
+np.testing.assert_array_almost_equal(AR_U, AR_U) # E: incompatible type
+np.testing.assert_array_less(AR_U, AR_U) # E: incompatible type
+np.testing.assert_string_equal(b"a", b"a") # E: incompatible type
+
+np.testing.assert_raises(expected_exception=TypeError, callable=func) # E: No overload variant
+np.testing.assert_raises_regex(expected_exception=TypeError, expected_regex="T", callable=func) # E: No overload variant
+
+np.testing.assert_allclose(AR_U, AR_U) # E: incompatible type
+np.testing.assert_array_almost_equal_nulp(AR_U, AR_U) # E: incompatible type
+np.testing.assert_array_max_ulp(AR_U, AR_U) # E: incompatible type
+
+np.testing.assert_warns(warning_class=RuntimeWarning, func=func) # E: No overload variant
+np.testing.assert_no_warnings(func=func) # E: No overload variant
+
+np.testing.assert_no_gc_cycles(func=func) # E: No overload variant
diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.py
index fa356969a..b045585b2 100644
--- a/numpy/typing/tests/data/reveal/modules.py
+++ b/numpy/typing/tests/data/reveal/modules.py
@@ -29,9 +29,6 @@ reveal_type(np.polynomial.laguerre) # E: ModuleType
reveal_type(np.polynomial.legendre) # E: ModuleType
reveal_type(np.polynomial.polynomial) # E: ModuleType
-# TODO: Remove when annotations have been added to `np.testing.assert_equal`
-reveal_type(np.testing.assert_equal) # E: Any
-
reveal_type(np.__path__) # E: list[builtins.str]
reveal_type(np.__version__) # E: str
reveal_type(np.__git_version__) # E: str
diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.py b/numpy/typing/tests/data/reveal/ndarray_conversion.py
index 4ee637b75..03f2faf43 100644
--- a/numpy/typing/tests/data/reveal/ndarray_conversion.py
+++ b/numpy/typing/tests/data/reveal/ndarray_conversion.py
@@ -1,12 +1,13 @@
import numpy as np
+import numpy.typing as npt
-nd = np.array([[1, 2], [3, 4]])
+nd: npt.NDArray[np.int_] = np.array([[1, 2], [3, 4]])
# item
-reveal_type(nd.item()) # E: Any
-reveal_type(nd.item(1)) # E: Any
-reveal_type(nd.item(0, 1)) # E: Any
-reveal_type(nd.item((0, 1))) # E: Any
+reveal_type(nd.item()) # E: int
+reveal_type(nd.item(1)) # E: int
+reveal_type(nd.item(0, 1)) # E: int
+reveal_type(nd.item((0, 1))) # E: int
# tolist
reveal_type(nd.tolist()) # E: Any
@@ -19,36 +20,32 @@ reveal_type(nd.tolist()) # E: Any
# dumps is pretty simple
# astype
-reveal_type(nd.astype("float")) # E: numpy.ndarray
-reveal_type(nd.astype(float)) # E: numpy.ndarray
-reveal_type(nd.astype(float, "K")) # E: numpy.ndarray
-reveal_type(nd.astype(float, "K", "unsafe")) # E: numpy.ndarray
-reveal_type(nd.astype(float, "K", "unsafe", True)) # E: numpy.ndarray
-reveal_type(nd.astype(float, "K", "unsafe", True, True)) # E: numpy.ndarray
+reveal_type(nd.astype("float")) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(nd.astype(float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(nd.astype(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K", "unsafe")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K", "unsafe", True)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K", "unsafe", True, True)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
# byteswap
-reveal_type(nd.byteswap()) # E: numpy.ndarray
-reveal_type(nd.byteswap(True)) # E: numpy.ndarray
+reveal_type(nd.byteswap()) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(nd.byteswap(True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
# copy
-reveal_type(nd.copy()) # E: numpy.ndarray
-reveal_type(nd.copy("C")) # E: numpy.ndarray
+reveal_type(nd.copy()) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(nd.copy("C")) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
-# view
-class SubArray(np.ndarray):
- pass
-
-
-reveal_type(nd.view()) # E: numpy.ndarray
-reveal_type(nd.view(np.int64)) # E: numpy.ndarray
-# replace `Any` with `numpy.matrix` when `matrix` will be added to stubs
-reveal_type(nd.view(np.int64, np.matrix)) # E: Any
-reveal_type(nd.view(np.int64, SubArray)) # E: SubArray
+reveal_type(nd.view()) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(nd.view(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.view(float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(nd.view(np.float64, np.matrix)) # E: numpy.matrix[Any, Any]
# getfield
-reveal_type(nd.getfield("float")) # E: numpy.ndarray
-reveal_type(nd.getfield(float)) # E: numpy.ndarray
-reveal_type(nd.getfield(float, 8)) # E: numpy.ndarray
+reveal_type(nd.getfield("float")) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(nd.getfield(float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(nd.getfield(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.getfield(np.float64, 8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
# setflags does not return a value
# fill does not return a value
diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py
index d98388422..c081a5c67 100644
--- a/numpy/typing/tests/data/reveal/scalars.py
+++ b/numpy/typing/tests/data/reveal/scalars.py
@@ -114,3 +114,15 @@ reveal_type(f8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(c16.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
reveal_type(U.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(S.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(i8.astype(float)) # E: Any
+reveal_type(i8.astype(np.float64)) # E: {float64}
+
+reveal_type(i8.view()) # E: {int64}
+reveal_type(i8.view(np.float64)) # E: {float64}
+reveal_type(i8.view(float)) # E: Any
+reveal_type(i8.view(np.float64, np.ndarray)) # E: {float64}
+
+reveal_type(i8.getfield(float)) # E: Any
+reveal_type(i8.getfield(np.float64)) # E: {float64}
+reveal_type(i8.getfield(np.float64, 8)) # E: {float64}
diff --git a/numpy/typing/tests/data/reveal/testing.py b/numpy/typing/tests/data/reveal/testing.py
new file mode 100644
index 000000000..e244c225c
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/testing.py
@@ -0,0 +1,173 @@
+from __future__ import annotations
+
+import re
+import sys
+from typing import Any, Callable, TypeVar
+from pathlib import Path
+
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_i8: npt.NDArray[np.int64]
+
+bool_obj: bool
+suppress_obj: np.testing.suppress_warnings
+FT = TypeVar("FT", bound=Callable[..., Any])
+
+def func() -> int: ...
+
+def func2(
+ x: npt.NDArray[np.number[Any]],
+ y: npt.NDArray[np.number[Any]],
+) -> npt.NDArray[np.bool_]: ...
+
+reveal_type(np.testing.KnownFailureException()) # E: KnownFailureException
+reveal_type(np.testing.IgnoreException()) # E: IgnoreException
+
+reveal_type(np.testing.clear_and_catch_warnings(modules=[np.testing])) # E: _clear_and_catch_warnings_without_records
+reveal_type(np.testing.clear_and_catch_warnings(True)) # E: _clear_and_catch_warnings_with_records
+reveal_type(np.testing.clear_and_catch_warnings(False)) # E: _clear_and_catch_warnings_without_records
+reveal_type(np.testing.clear_and_catch_warnings(bool_obj)) # E: clear_and_catch_warnings
+reveal_type(np.testing.clear_and_catch_warnings.class_modules) # E: tuple[_importlib_modulespec.ModuleType]
+reveal_type(np.testing.clear_and_catch_warnings.modules) # E: set[_importlib_modulespec.ModuleType]
+
+with np.testing.clear_and_catch_warnings(True) as c1:
+ reveal_type(c1) # E: builtins.list[warnings.WarningMessage]
+with np.testing.clear_and_catch_warnings() as c2:
+ reveal_type(c2) # E: None
+
+reveal_type(np.testing.suppress_warnings("once")) # E: suppress_warnings
+reveal_type(np.testing.suppress_warnings()(func)) # E: def () -> builtins.int
+reveal_type(suppress_obj.filter(RuntimeWarning)) # E: None
+reveal_type(suppress_obj.record(RuntimeWarning)) # E: list[warnings.WarningMessage]
+with suppress_obj as c3:
+ reveal_type(c3) # E: suppress_warnings
+
+reveal_type(np.testing.verbose) # E: int
+reveal_type(np.testing.IS_PYPY) # E: bool
+reveal_type(np.testing.HAS_REFCOUNT) # E: bool
+reveal_type(np.testing.HAS_LAPACK64) # E: bool
+
+reveal_type(np.testing.assert_(1, msg="test")) # E: None
+reveal_type(np.testing.assert_(2, msg=lambda: "test")) # E: None
+
+if sys.platform == "win32" or sys.platform == "cygwin":
+ reveal_type(np.testing.memusage()) # E: builtins.int
+elif sys.platform == "linux":
+ reveal_type(np.testing.memusage()) # E: Union[None, builtins.int]
+else:
+ reveal_type(np.testing.memusage()) # E: <nothing>
+
+reveal_type(np.testing.jiffies()) # E: builtins.int
+
+reveal_type(np.testing.build_err_msg([0, 1, 2], "test")) # E: str
+reveal_type(np.testing.build_err_msg(range(2), "test", header="header")) # E: str
+reveal_type(np.testing.build_err_msg(np.arange(9).reshape(3, 3), "test", verbose=False)) # E: str
+reveal_type(np.testing.build_err_msg("abc", "test", names=["x", "y"])) # E: str
+reveal_type(np.testing.build_err_msg([1.0, 2.0], "test", precision=5)) # E: str
+
+reveal_type(np.testing.assert_equal({1}, {1})) # E: None
+reveal_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail")) # E: None
+reveal_type(np.testing.assert_equal(1, 1.0, verbose=True)) # E: None
+
+reveal_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])) # E: None
+
+reveal_type(np.testing.assert_almost_equal(1.0, 1.1)) # E: None
+reveal_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail")) # E: None
+reveal_type(np.testing.assert_almost_equal(1, 1.0, verbose=True)) # E: None
+reveal_type(np.testing.assert_almost_equal(1, 1.0001, decimal=2)) # E: None
+
+reveal_type(np.testing.assert_approx_equal(1.0, 1.1)) # E: None
+reveal_type(np.testing.assert_approx_equal("1", "2", err_msg="fail")) # E: None
+reveal_type(np.testing.assert_approx_equal(1, 1.0, verbose=True)) # E: None
+reveal_type(np.testing.assert_approx_equal(1, 1.0001, significant=2)) # E: None
+
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, err_msg="test")) # E: None
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, verbose=True)) # E: None
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, header="header")) # E: None
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, precision=np.int64())) # E: None
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_nan=False)) # E: None
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_inf=True)) # E: None
+
+reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8)) # E: None
+reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8, err_msg="test")) # E: None
+reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8, verbose=True)) # E: None
+
+reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8)) # E: None
+reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, err_msg="test")) # E: None
+reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, verbose=True)) # E: None
+reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, decimal=1)) # E: None
+
+reveal_type(np.testing.assert_array_less(AR_i8, AR_f8)) # E: None
+reveal_type(np.testing.assert_array_less(AR_i8, AR_f8, err_msg="test")) # E: None
+reveal_type(np.testing.assert_array_less(AR_i8, AR_f8, verbose=True)) # E: None
+
+reveal_type(np.testing.runstring("1 + 1", {})) # E: Any
+reveal_type(np.testing.runstring("int64() + 1", {"int64": np.int64})) # E: Any
+
+reveal_type(np.testing.assert_string_equal("1", "1")) # E: None
+
+reveal_type(np.testing.rundocs()) # E: None
+reveal_type(np.testing.rundocs("test.py")) # E: None
+reveal_type(np.testing.rundocs(Path("test.py"), raise_on_error=True)) # E: None
+
+@np.testing.raises(RuntimeError, RuntimeWarning)
+def func3(a: int) -> bool: ...
+
+reveal_type(func3) # E: def (a: builtins.int) -> builtins.bool
+
+reveal_type(np.testing.assert_raises(RuntimeWarning)) # E: _AssertRaisesContext[builtins.RuntimeWarning]
+reveal_type(np.testing.assert_raises(RuntimeWarning, func3, 5)) # E: None
+
+reveal_type(np.testing.assert_raises_regex(RuntimeWarning, r"test")) # E: _AssertRaisesContext[builtins.RuntimeWarning]
+reveal_type(np.testing.assert_raises_regex(RuntimeWarning, b"test", func3, 5)) # E: None
+reveal_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), func3, 5)) # E: None
+
+class Test: ...
+
+def decorate(a: FT) -> FT:
+ return a
+
+reveal_type(np.testing.decorate_methods(Test, decorate)) # E: None
+reveal_type(np.testing.decorate_methods(Test, decorate, None)) # E: None
+reveal_type(np.testing.decorate_methods(Test, decorate, "test")) # E: None
+reveal_type(np.testing.decorate_methods(Test, decorate, b"test")) # E: None
+reveal_type(np.testing.decorate_methods(Test, decorate, re.compile("test"))) # E: None
+
+reveal_type(np.testing.measure("for i in range(1000): np.sqrt(i**2)")) # E: float
+reveal_type(np.testing.measure(b"for i in range(1000): np.sqrt(i**2)", times=5)) # E: float
+
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8)) # E: None
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, rtol=0.005)) # E: None
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, atol=1)) # E: None
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, equal_nan=True)) # E: None
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, err_msg="err")) # E: None
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, verbose=False)) # E: None
+
+reveal_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2)) # E: None
+
+reveal_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.testing.assert_warns(RuntimeWarning)) # E: _GeneratorContextManager[None]
+reveal_type(np.testing.assert_warns(RuntimeWarning, func3, 5)) # E: bool
+
+reveal_type(np.testing.assert_no_warnings()) # E: _GeneratorContextManager[None]
+reveal_type(np.testing.assert_no_warnings(func3, 5)) # E: bool
+
+reveal_type(np.testing.tempdir("test_dir")) # E: _GeneratorContextManager[builtins.str]
+reveal_type(np.testing.tempdir(prefix=b"test")) # E: _GeneratorContextManager[builtins.bytes]
+reveal_type(np.testing.tempdir("test_dir", dir=Path("here"))) # E: _GeneratorContextManager[builtins.str]
+
+reveal_type(np.testing.temppath("test_dir", text=True)) # E: _GeneratorContextManager[builtins.str]
+reveal_type(np.testing.temppath(prefix=b"test")) # E: _GeneratorContextManager[builtins.bytes]
+reveal_type(np.testing.temppath("test_dir", dir=Path("here"))) # E: _GeneratorContextManager[builtins.str]
+
+reveal_type(np.testing.assert_no_gc_cycles()) # E: _GeneratorContextManager[None]
+reveal_type(np.testing.assert_no_gc_cycles(func3, 5)) # E: None
+
+reveal_type(np.testing.break_cycles()) # E: None
+
+reveal_type(np.testing.TestCase()) # E: unittest.case.TestCase
+reveal_type(np.testing.run_module_suite(file_to_run="numpy/tests/test_matlib.py")) # E: None
diff --git a/numpy/typing/tests/test_generic_alias.py b/numpy/typing/tests/test_generic_alias.py
index 0b9917439..538d7eae5 100644
--- a/numpy/typing/tests/test_generic_alias.py
+++ b/numpy/typing/tests/test_generic_alias.py
@@ -21,8 +21,8 @@ if sys.version_info >= (3, 9):
NDArray_ref = types.GenericAlias(np.ndarray, (Any, DType_ref))
FuncType = Callable[[Union[_GenericAlias, types.GenericAlias]], Any]
else:
- DType_ref = NotImplemented
- NDArray_ref = NotImplemented
+ DType_ref = Any
+ NDArray_ref = Any
FuncType = Callable[[_GenericAlias], Any]
GETATTR_NAMES = sorted(set(dir(np.ndarray)) - _GenericAlias._ATTR_EXCEPTIONS)
@@ -41,6 +41,12 @@ class TestGenericAlias:
@pytest.mark.parametrize("name,func", [
("__init__", lambda n: n),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, Any)),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (Any,))),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (Any, Any))),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, T1)),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (T1,))),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (T1, T2))),
("__origin__", lambda n: n.__origin__),
("__args__", lambda n: n.__args__),
("__parameters__", lambda n: n.__parameters__),
diff --git a/test_requirements.txt b/test_requirements.txt
index e2a22464e..ca0edfbb4 100644
--- a/test_requirements.txt
+++ b/test_requirements.txt
@@ -1,10 +1,10 @@
cython==0.29.23
wheel<0.36.3
setuptools<49.2.0
-hypothesis==6.13.10
+hypothesis==6.13.14
pytest==6.2.4
pytz==2021.1
-pytest-cov==2.12.0
+pytest-cov==2.12.1
pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy'
# for numpy.random.test.test_extending
cffi