summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.circleci/config.yml6
-rw-r--r--.gitattributes1
-rw-r--r--.github/workflows/build_test.yml26
-rw-r--r--.github/workflows/cygwin.yml2
-rw-r--r--.gitmodules3
-rw-r--r--.gitpod.yml3
-rw-r--r--azure-pipelines.yml15
-rw-r--r--azure-steps-windows.yml2
-rw-r--r--doc/changelog/1.21.3-changelog.rst28
-rw-r--r--doc/neps/conf.py1
-rw-r--r--doc/release/upcoming_changes/17530.improvement.rst5
-rw-r--r--doc/release/upcoming_changes/19478.performance.rst11
-rw-r--r--doc/release/upcoming_changes/20000.deprecation.rst5
-rw-r--r--doc/release/upcoming_changes/20027.improvement.rst17
-rw-r--r--doc/release/upcoming_changes/20049.change.rst5
-rw-r--r--doc/source/conf.py1
-rw-r--r--doc/source/reference/arrays.datetime.rst18
-rw-r--r--doc/source/reference/arrays.dtypes.rst10
-rw-r--r--doc/source/reference/arrays.ndarray.rst1
-rw-r--r--doc/source/reference/routines.ma.rst3
-rw-r--r--doc/source/release.rst1
-rw-r--r--doc/source/release/1.21.3-notes.rst44
-rw-r--r--doc/source/user/absolute_beginners.rst10
-rw-r--r--doc/source/user/quickstart.rst9
-rw-r--r--environment.yml1
-rw-r--r--linter_requirements.txt2
-rw-r--r--numpy/__init__.pyi835
-rw-r--r--numpy/array_api/_array_object.py4
-rw-r--r--numpy/array_api/_typing.py26
-rw-r--r--numpy/array_api/tests/test_creation_functions.py15
-rw-r--r--numpy/core/_add_newdocs.py67
-rw-r--r--numpy/core/code_generators/generate_umath.py108
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py32
-rw-r--r--numpy/core/einsumfunc.py2
-rw-r--r--numpy/core/fromnumeric.py10
-rw-r--r--numpy/core/function_base.pyi13
-rw-r--r--numpy/core/getlimits.py1
-rw-r--r--numpy/core/include/numpy/experimental_dtype_api.h306
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h30
-rw-r--r--numpy/core/numerictypes.py6
-rw-r--r--numpy/core/overrides.py1
-rw-r--r--numpy/core/setup.py43
-rw-r--r--numpy/core/src/common/npy_sort.h.src7
-rw-r--r--numpy/core/src/common/npy_svml.h41
-rw-r--r--numpy/core/src/common/numpy_tag.h78
-rw-r--r--numpy/core/src/multiarray/array_method.c98
-rw-r--r--numpy/core/src/multiarray/array_method.h16
-rw-r--r--numpy/core/src/multiarray/arrayobject.c18
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src2
-rw-r--r--numpy/core/src/multiarray/compiled_base.c19
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c61
-rw-r--r--numpy/core/src/multiarray/datetime.c22
-rw-r--r--numpy/core/src/multiarray/descriptor.c27
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c2
-rw-r--r--numpy/core/src/multiarray/dtypemeta.h44
-rw-r--r--numpy/core/src/multiarray/experimental_public_dtype_api.c363
-rw-r--r--numpy/core/src/multiarray/experimental_public_dtype_api.h18
-rw-r--r--numpy/core/src/multiarray/item_selection.c29
-rw-r--r--numpy/core/src/multiarray/iterators.c36
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c5
-rw-r--r--numpy/core/src/multiarray/usertypes.c67
-rw-r--r--numpy/core/src/npymath/npy_math_private.h18
-rw-r--r--numpy/core/src/npysort/radixsort.c.src231
-rw-r--r--numpy/core/src/npysort/radixsort.cpp354
-rw-r--r--numpy/core/src/umath/_scaled_float_dtype.c52
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src35
-rw-r--r--numpy/core/src/umath/clip.c.src120
-rw-r--r--numpy/core/src/umath/clip.cpp282
-rw-r--r--numpy/core/src/umath/clip.h73
-rw-r--r--numpy/core/src/umath/clip.h.src18
-rw-r--r--numpy/core/src/umath/dispatching.c177
-rw-r--r--numpy/core/src/umath/dispatching.h4
-rw-r--r--numpy/core/src/umath/legacy_array_method.c19
-rw-r--r--numpy/core/src/umath/loops.h.src26
-rw-r--r--numpy/core/src/umath/loops_exponent_log.dispatch.c.src35
-rw-r--r--numpy/core/src/umath/loops_umath_fp.dispatch.c.src141
-rw-r--r--numpy/core/src/umath/reduction.c57
-rw-r--r--numpy/core/src/umath/reduction.h110
-rw-r--r--numpy/core/src/umath/simd.inc.src4
m---------numpy/core/src/umath/svml0
-rw-r--r--numpy/core/src/umath/ufunc_object.c811
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c33
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.h4
-rw-r--r--numpy/core/src/umath/umathmodule.c29
-rw-r--r--numpy/core/tests/data/generate_umath_validation_data.cpp137
-rw-r--r--numpy/core/tests/test_arrayprint.py1
-rw-r--r--numpy/core/tests/test_casting_unittests.py1
-rw-r--r--numpy/core/tests/test_custom_dtypes.py36
-rw-r--r--numpy/core/tests/test_datetime.py52
-rw-r--r--numpy/core/tests/test_deprecations.py23
-rw-r--r--numpy/core/tests/test_dtype.py9
-rw-r--r--numpy/core/tests/test_multiarray.py68
-rw-r--r--numpy/core/tests/test_nditer.py2
-rw-r--r--numpy/core/tests/test_numeric.py27
-rw-r--r--numpy/core/tests/test_scalarinherit.py1
-rw-r--r--numpy/core/tests/test_scalarmath.py4
-rw-r--r--numpy/core/tests/test_scalarprint.py1
-rw-r--r--numpy/core/tests/test_ufunc.py72
-rw-r--r--numpy/core/tests/test_umath.py44
-rw-r--r--numpy/core/tests/test_umath_accuracy.py1
-rw-r--r--numpy/ctypeslib.py13
-rw-r--r--numpy/distutils/ccompiler.py10
-rw-r--r--numpy/distutils/ccompiler_opt.py29
-rw-r--r--numpy/distutils/command/build_clib.py49
-rw-r--r--numpy/distutils/command/build_ext.py57
-rw-r--r--numpy/distutils/core.py2
-rw-r--r--numpy/distutils/extension.py4
-rw-r--r--numpy/distutils/mingw32ccompiler.py24
-rw-r--r--numpy/distutils/misc_util.py15
-rw-r--r--numpy/distutils/tests/test_system_info.py4
-rw-r--r--numpy/doc/constants.py1
-rw-r--r--numpy/f2py/cb_rules.py22
-rwxr-xr-xnumpy/f2py/rules.py506
-rw-r--r--numpy/f2py/setup.py3
-rw-r--r--numpy/f2py/tests/test_callback.py7
-rw-r--r--numpy/f2py/tests/util.py3
-rw-r--r--numpy/lib/_datasource.py1
-rw-r--r--numpy/lib/_iotools.py3
-rw-r--r--numpy/lib/function_base.py49
-rw-r--r--numpy/lib/function_base.pyi719
-rw-r--r--numpy/lib/histograms.pyi52
-rw-r--r--numpy/lib/nanfunctions.py174
-rw-r--r--numpy/lib/nanfunctions.pyi86
-rw-r--r--numpy/lib/polynomial.pyi308
-rw-r--r--numpy/lib/tests/test_format.py2
-rw-r--r--numpy/lib/tests/test_function_base.py112
-rw-r--r--numpy/lib/tests/test_index_tricks.py1
-rw-r--r--numpy/lib/tests/test_nanfunctions.py155
-rw-r--r--numpy/lib/tests/test_regression.py2
-rwxr-xr-xnumpy/linalg/lapack_lite/make_lite.py5
-rw-r--r--numpy/linalg/linalg.py2
-rw-r--r--numpy/linalg/setup.py3
-rw-r--r--numpy/linalg/tests/test_linalg.py3
-rw-r--r--numpy/ma/bench.py1
-rw-r--r--numpy/ma/core.py2
-rw-r--r--numpy/matrixlib/__init__.pyi12
-rw-r--r--numpy/matrixlib/defmatrix.pyi15
-rw-r--r--numpy/polynomial/tests/test_classes.py2
-rw-r--r--numpy/random/src/pcg64/pcg64.c3
-rw-r--r--numpy/random/tests/test_direct.py22
-rw-r--r--numpy/random/tests/test_generator_mt19937_regressions.py2
-rw-r--r--numpy/testing/_private/utils.py4
-rw-r--r--numpy/tests/test_ctypeslib.py17
-rw-r--r--numpy/typing/_nested_sequence.py5
-rw-r--r--numpy/typing/mypy_plugin.py5
-rw-r--r--numpy/typing/tests/data/fail/chararray.pyi62
-rw-r--r--numpy/typing/tests/data/fail/histograms.pyi13
-rw-r--r--numpy/typing/tests/data/fail/lib_function_base.pyi53
-rw-r--r--numpy/typing/tests/data/fail/lib_polynomial.pyi29
-rw-r--r--numpy/typing/tests/data/fail/ndarray_misc.pyi4
-rw-r--r--numpy/typing/tests/data/pass/array_constructors.py2
-rw-r--r--numpy/typing/tests/data/pass/array_like.py2
-rw-r--r--numpy/typing/tests/data/pass/einsumfunc.py2
-rw-r--r--numpy/typing/tests/data/pass/lib_utils.py1
-rw-r--r--numpy/typing/tests/data/pass/multiarray.py1
-rw-r--r--numpy/typing/tests/data/reveal/chararray.pyi129
-rw-r--r--numpy/typing/tests/data/reveal/histograms.pyi19
-rw-r--r--numpy/typing/tests/data/reveal/lib_function_base.pyi180
-rw-r--r--numpy/typing/tests/data/reveal/lib_polynomial.pyi111
-rw-r--r--numpy/typing/tests/data/reveal/matrix.pyi69
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_misc.pyi11
-rw-r--r--numpy/typing/tests/data/reveal/nested_sequence.pyi1
-rw-r--r--numpy/typing/tests/test_typing.py4
-rwxr-xr-xsetup.py12
-rw-r--r--test_requirements.txt6
-rwxr-xr-xtools/changelog.py1
-rw-r--r--tools/download-wheels.py1
-rw-r--r--tools/gitpod/gitpod.Dockerfile1
-rw-r--r--tools/openblas_support.py4
-rwxr-xr-xtools/travis-before-install.sh5
-rwxr-xr-xtools/travis-test.sh4
171 files changed, 6977 insertions, 2060 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index c343e9168..de7f52f81 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -24,6 +24,12 @@ jobs:
if [[ -v CI_PULL_REQUEST ]] ; then git pull --ff-only origin "refs/pull/${CI_PULL_REQUEST//*pull\//}/merge" ; fi
- run:
+ name: update submodules
+ command: |
+ git submodule init
+ git submodule update
+
+ - run:
name: create virtual environment, install dependencies
command: |
sudo apt-get update
diff --git a/.gitattributes b/.gitattributes
index a0676bee4..911db2b72 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -11,6 +11,7 @@ numpy/linalg/lapack_lite/f2c.c linguist-vendored
numpy/linalg/lapack_lite/f2c.h linguist-vendored
tools/npy_tempita/* linguist-vendored
numpy/core/include/numpy/libdivide/* linguist-vendored
+numpy/core/src/umath/svml/* linguist-vendored
# Mark some files as generated
numpy/linalg/lapack_lite/f2c_*.c linguist-generated
diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml
index 1c062d73c..cecb4d2af 100644
--- a/.github/workflows/build_test.yml
+++ b/.github/workflows/build_test.yml
@@ -56,7 +56,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: [3.9, 3.10.0-rc.2]
+ python-version: ["3.9", "3.10"]
steps:
- uses: actions/checkout@v2
with:
@@ -202,18 +202,18 @@ jobs:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
- #pypy37:
- #needs: [smoke_test]
- #runs-on: ubuntu-latest
- #steps:
- #- uses: actions/checkout@v2
- #with:
- #submodules: recursive
- #fetch-depth: 0
- #- uses: actions/setup-python@v2
- #with:
- #python-version: pypy-3.7-v7.3.4
- #- uses: ./.github/actions
+ pypy38:
+ needs: [smoke_test]
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ submodules: recursive
+ fetch-depth: 0
+ - uses: actions/setup-python@v2
+ with:
+ python-version: pypy-3.8-v7.3.6rc1
+ - uses: ./.github/actions
sdist:
needs: [smoke_test]
diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml
index 930ce39ff..2e644b3c1 100644
--- a/.github/workflows/cygwin.yml
+++ b/.github/workflows/cygwin.yml
@@ -24,7 +24,7 @@ jobs:
python38-cython python38-pip python38-wheel python38-cffi
python38-pytz python38-setuptools python38-pytest
python38-hypothesis liblapack-devel libopenblas
- gcc-fortran git dash
+ gcc-fortran gcc-g++ git dash
- name: Set Windows PATH
uses: egor-tensin/cleanup-path@v1
with:
diff --git a/.gitmodules b/.gitmodules
index 0d6857868..1ea274daf 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,6 @@
[submodule "doc/source/_static/scipy-mathjax"]
path = doc/source/_static/scipy-mathjax
url = https://github.com/scipy/scipy-mathjax.git
+[submodule "numpy/core/src/umath/svml"]
+ path = numpy/core/src/umath/svml
+ url = https://github.com/numpy/SVML.git
diff --git a/.gitpod.yml b/.gitpod.yml
index dfbee831a..f9c35fd9b 100644
--- a/.gitpod.yml
+++ b/.gitpod.yml
@@ -14,7 +14,6 @@ tasks:
python setup.py build_ext --inplace
echo "🛠 Completed rebuilding NumPy!! 🛠 "
echo "📖 Building docs 📖 "
- git submodule update --init
cd doc
make html
echo "✨ Pre-build complete! You can close this terminal ✨ "
@@ -60,4 +59,4 @@ github:
# add a "Review in Gitpod" button to the pull request's description (defaults to false)
addBadge: false
# add a label once the prebuild is ready to pull requests (defaults to false)
- addLabel: false \ No newline at end of file
+ addLabel: false
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 714f62912..f0c67b4aa 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -24,6 +24,9 @@ stages:
vmImage: 'ubuntu-20.04'
steps:
- script: |
+ git submodule update --init
+ displayName: 'Fetch submodules'
+ - script: |
if ! `gcc 2>/dev/null`; then
sudo apt install gcc
fi
@@ -72,6 +75,9 @@ stages:
vmImage: 'ubuntu-20.04'
steps:
- script: |
+ git submodule update --init
+ displayName: 'Fetch submodules'
+ - script: |
docker run -v $(pwd):/numpy -e CFLAGS="-msse2 -std=c99 -UNDEBUG" \
-e F77=gfortran-5 -e F90=gfortran-5 quay.io/pypa/manylinux2014_i686 \
/bin/bash -xc "cd numpy && \
@@ -249,6 +255,12 @@ stages:
TEST_MODE: full
BITS: 64
NPY_USE_BLAS_ILP64: '1'
+ PyPy38-64bit-fast:
+ PYTHON_VERSION: 'PyPy'
+ PYTHON_ARCH: 'x64'
+ TEST_MODE: fast
+ BITS: 64
+ NPY_USE_BLAS_ILP64: '1'
steps:
- template: azure-steps-windows.yml
@@ -259,6 +271,9 @@ stages:
vmImage: 'ubuntu-20.04'
steps:
- script: |
+ git submodule update --init
+ displayName: 'Fetch submodules'
+ - script: |
# create and activate conda environment
conda env create -f environment.yml
displayName: 'Create conda environment.'
diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml
index 9a5f9bb70..34f9797de 100644
--- a/azure-steps-windows.yml
+++ b/azure-steps-windows.yml
@@ -6,7 +6,7 @@ steps:
architecture: $(PYTHON_ARCH)
condition: not(contains(variables['PYTHON_VERSION'], 'PyPy'))
- powershell: |
- $url = "http://buildbot.pypy.org/nightly/py3.7/pypy-c-jit-latest-win64.zip"
+ $url = "http://buildbot.pypy.org/nightly/py3.8/pypy-c-jit-latest-win64.zip"
$output = "pypy.zip"
$wc = New-Object System.Net.WebClient
$wc.DownloadFile($url, $output)
diff --git a/doc/changelog/1.21.3-changelog.rst b/doc/changelog/1.21.3-changelog.rst
new file mode 100644
index 000000000..767794721
--- /dev/null
+++ b/doc/changelog/1.21.3-changelog.rst
@@ -0,0 +1,28 @@
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Aaron Meurer
+* Bas van Beek
+* Charles Harris
+* Developer-Ecosystem-Engineering +
+* Kevin Sheppard
+* Sebastian Berg
+* Warren Weckesser
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#19745 <https://github.com/numpy/numpy/pull/19745>`__: ENH: Add dtype-support to 3 `generic`/`ndarray` methods
+* `#19955 <https://github.com/numpy/numpy/pull/19955>`__: BUG: Resolve Divide by Zero on Apple silicon + test failures...
+* `#19958 <https://github.com/numpy/numpy/pull/19958>`__: MAINT: Mark type-check-only ufunc subclasses as ufunc aliases...
+* `#19994 <https://github.com/numpy/numpy/pull/19994>`__: BUG: np.tan(np.inf) test failure
+* `#20080 <https://github.com/numpy/numpy/pull/20080>`__: BUG: Correct incorrect advance in PCG with emulated int128
+* `#20081 <https://github.com/numpy/numpy/pull/20081>`__: BUG: Fix NaT handling in the PyArray_CompareFunc for datetime...
+* `#20082 <https://github.com/numpy/numpy/pull/20082>`__: DOC: Ensure that we add documentation also as to the dict for...
+* `#20106 <https://github.com/numpy/numpy/pull/20106>`__: BUG: core: result_type(0, np.timedelta64(4)) would seg. fault.
diff --git a/doc/neps/conf.py b/doc/neps/conf.py
index f01ee8a51..68805e50f 100644
--- a/doc/neps/conf.py
+++ b/doc/neps/conf.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# NumPy Enhancement Proposals documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 11 12:45:09 2017.
diff --git a/doc/release/upcoming_changes/17530.improvement.rst b/doc/release/upcoming_changes/17530.improvement.rst
new file mode 100644
index 000000000..07a23f0e5
--- /dev/null
+++ b/doc/release/upcoming_changes/17530.improvement.rst
@@ -0,0 +1,5 @@
+`ctypeslib.load_library` can now take any path-like object
+-----------------------------------------------------------------------
+All parameters in the can now take any :term:`python:path-like object`.
+This includes the likes of strings, bytes and objects implementing the
+:meth:`__fspath__<os.PathLike.__fspath__>` protocol.
diff --git a/doc/release/upcoming_changes/19478.performance.rst b/doc/release/upcoming_changes/19478.performance.rst
new file mode 100644
index 000000000..6a389c20e
--- /dev/null
+++ b/doc/release/upcoming_changes/19478.performance.rst
@@ -0,0 +1,11 @@
+Vectorize umath module using AVX-512
+-------------------------------------
+
+By leveraging Intel Short Vector Math Library (SVML), 18 umath functions
+(``exp2``, ``log2``, ``log10``, ``expm1``, ``log1p``, ``cbrt``, ``sin``,
+``cos``, ``tan``, ``arcsin``, ``arccos``, ``arctan``, ``sinh``, ``cosh``,
+``tanh``, ``arcsinh``, ``arccosh``, ``arctanh``) are vectorized using AVX-512
+instruction set for both single and double precision implementations. This
+change is currently enabled only for Linux users and on processors with
+AVX-512 instruction set. It provides an average speed up of 32x and 14x for
+single and double precision functions respectively.
diff --git a/doc/release/upcoming_changes/20000.deprecation.rst b/doc/release/upcoming_changes/20000.deprecation.rst
new file mode 100644
index 000000000..e0a56cd47
--- /dev/null
+++ b/doc/release/upcoming_changes/20000.deprecation.rst
@@ -0,0 +1,5 @@
+Passing boolean ``kth`` values to (arg-)partition has been deprecated
+---------------------------------------------------------------------
+`~numpy.partition` and `~numpy.argpartition` would previously accept boolean
+values for the ``kth`` parameter, which would subsequently be converted into
+integers. This behavior has now been deprecated.
diff --git a/doc/release/upcoming_changes/20027.improvement.rst b/doc/release/upcoming_changes/20027.improvement.rst
new file mode 100644
index 000000000..86b3bed74
--- /dev/null
+++ b/doc/release/upcoming_changes/20027.improvement.rst
@@ -0,0 +1,17 @@
+Missing parameters have been added to the ``nan<x>`` functions
+--------------------------------------------------------------
+A number of the ``nan<x>`` functions previously lacked parameters that were
+present in their ``<x>``-based counterpart, *e.g.* the ``where`` parameter was
+present in `~numpy.mean` but absent from `~numpy.nanmean`.
+
+The following parameters have now been added to the ``nan<x>`` functions:
+
+* nanmin: ``initial`` & ``where``
+* nanmax: ``initial`` & ``where``
+* nanargmin: ``keepdims`` & ``out``
+* nanargmax: ``keepdims`` & ``out``
+* nansum: ``initial`` & ``where``
+* nanprod: ``initial`` & ``where``
+* nanmean: ``where``
+* nanvar: ``where``
+* nanstd: ``where``
diff --git a/doc/release/upcoming_changes/20049.change.rst b/doc/release/upcoming_changes/20049.change.rst
new file mode 100644
index 000000000..e1f08b343
--- /dev/null
+++ b/doc/release/upcoming_changes/20049.change.rst
@@ -0,0 +1,5 @@
+Corrected ``advance`` in ``PCG64DSXM`` and ``PCG64``
+----------------------------------------------------
+Fixed a bug in the ``advance`` method of ``PCG64DSXM`` and ``PCG64``. The bug only
+affects results when the step was larger than :math:`2^{64}` on platforms
+that do not support 128-bit integers(e.g., Windows and 32-bit Linux).
diff --git a/doc/source/conf.py b/doc/source/conf.py
index d08f29e59..a7a885c34 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
import os
import re
import sys
diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst
index e3b8d270d..63c93821b 100644
--- a/doc/source/reference/arrays.datetime.rst
+++ b/doc/source/reference/arrays.datetime.rst
@@ -25,7 +25,7 @@ form of the string, and can be either a :ref:`date unit <arrays.dtypes.dateunits
:ref:`time unit <arrays.dtypes.timeunits>`. The date units are years ('Y'),
months ('M'), weeks ('W'), and days ('D'), while the time units are
hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and
-some additional SI-prefix seconds-based units. The datetime64 data type
+some additional SI-prefix seconds-based units. The datetime64 data type
also accepts the string "NAT", in any combination of lowercase/uppercase
letters, for a "Not A Time" value.
@@ -74,6 +74,18 @@ datetime type with generic units.
array(['2001-01-01T12:00:00.000', '2002-02-03T13:56:03.172'],
dtype='datetime64[ms]')
+An array of datetimes can be constructed from integers representing
+POSIX timestamps with the given unit.
+
+.. admonition:: Example
+
+ >>> np.array([0, 1577836800], dtype='datetime64[s]')
+ array(['1970-01-01T00:00:00', '2020-01-01T00:00:00'],
+ dtype='datetime64[s]')
+
+ >>> np.array([0, 1577836800000]).astype('datetime64[ms]')
+ array(['1970-01-01T00:00:00.000', '2020-01-01T00:00:00.000'],
+ dtype='datetime64[ms]')
The datetime type works with many common NumPy functions, for
example :func:`arange` can be used to generate ranges of dates.
@@ -120,9 +132,9 @@ Datetime and Timedelta Arithmetic
NumPy allows the subtraction of two Datetime values, an operation which
produces a number with a time unit. Because NumPy doesn't have a physical
quantities system in its core, the timedelta64 data type was created
-to complement datetime64. The arguments for timedelta64 are a number,
+to complement datetime64. The arguments for timedelta64 are a number,
to represent the number of units, and a date/time unit, such as
-(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64
+(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64
data type also accepts the string "NAT" in place of the number for a "Not A Time" value.
.. admonition:: Example
diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst
index 34b0d7085..8606bc8f1 100644
--- a/doc/source/reference/arrays.dtypes.rst
+++ b/doc/source/reference/arrays.dtypes.rst
@@ -569,3 +569,13 @@ Utility method for typing:
:toctree: generated/
dtype.__class_getitem__
+
+Comparison operations:
+
+.. autosummary::
+ :toctree: generated/
+
+ dtype.__ge__
+ dtype.__gt__
+ dtype.__le__
+ dtype.__lt__
diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst
index 7831b5f2c..0f703b475 100644
--- a/doc/source/reference/arrays.ndarray.rst
+++ b/doc/source/reference/arrays.ndarray.rst
@@ -249,7 +249,6 @@ Other attributes
ndarray.real
ndarray.imag
ndarray.flat
- ndarray.ctypes
.. _arrays.ndarray.array-interface:
diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst
index 2db325293..5404c43d8 100644
--- a/doc/source/reference/routines.ma.rst
+++ b/doc/source/reference/routines.ma.rst
@@ -44,7 +44,9 @@ Ones and zeros
ma.masked_all
ma.masked_all_like
ma.ones
+ ma.ones_like
ma.zeros
+ ma.zeros_like
_____
@@ -331,6 +333,7 @@ Minimum/maximum
ma.max
ma.min
ma.ptp
+ ma.diff
ma.MaskedArray.argmax
ma.MaskedArray.argmin
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 62bd15790..aa490b5f5 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -6,6 +6,7 @@ Release notes
:maxdepth: 3
1.22.0 <release/1.22.0-notes>
+ 1.21.3 <release/1.21.3-notes>
1.21.2 <release/1.21.2-notes>
1.21.1 <release/1.21.1-notes>
1.21.0 <release/1.21.0-notes>
diff --git a/doc/source/release/1.21.3-notes.rst b/doc/source/release/1.21.3-notes.rst
new file mode 100644
index 000000000..4058452ef
--- /dev/null
+++ b/doc/source/release/1.21.3-notes.rst
@@ -0,0 +1,44 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.21.3 Release Notes
+==========================
+
+NumPy 1.21.3 is a maintenance release that fixes a few bugs discovered after
+1.21.2. It also provides 64 bit Python 3.10.0 wheels. Note a few oddities about
+Python 3.10:
+
+* There are no 32 bit wheels for Windows, Mac, or Linux.
+* The Mac Intel builds are only available in universal2 wheels.
+
+The Python versions supported in this release are 3.7-3.10. If you want to
+compile your own version using gcc-11, you will need to use gcc-11.2+ to avoid
+problems.
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Aaron Meurer
+* Bas van Beek
+* Charles Harris
+* Developer-Ecosystem-Engineering +
+* Kevin Sheppard
+* Sebastian Berg
+* Warren Weckesser
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#19745 <https://github.com/numpy/numpy/pull/19745>`__: ENH: Add dtype-support to 3 ```generic``/``ndarray`` methods
+* `#19955 <https://github.com/numpy/numpy/pull/19955>`__: BUG: Resolve Divide by Zero on Apple silicon + test failures...
+* `#19958 <https://github.com/numpy/numpy/pull/19958>`__: MAINT: Mark type-check-only ufunc subclasses as ufunc aliases...
+* `#19994 <https://github.com/numpy/numpy/pull/19994>`__: BUG: np.tan(np.inf) test failure
+* `#20080 <https://github.com/numpy/numpy/pull/20080>`__: BUG: Correct incorrect advance in PCG with emulated int128
+* `#20081 <https://github.com/numpy/numpy/pull/20081>`__: BUG: Fix NaT handling in the PyArray_CompareFunc for datetime...
+* `#20082 <https://github.com/numpy/numpy/pull/20082>`__: DOC: Ensure that we add documentation also as to the dict for...
+* `#20106 <https://github.com/numpy/numpy/pull/20106>`__: BUG: core: result_type(0, np.timedelta64(4)) would seg. fault.
diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst
index bb570f622..a98ca3e40 100644
--- a/doc/source/user/absolute_beginners.rst
+++ b/doc/source/user/absolute_beginners.rst
@@ -899,12 +899,18 @@ You can aggregate matrices the same way you aggregated vectors::
.. image:: images/np_matrix_aggregation.png
You can aggregate all the values in a matrix and you can aggregate them across
-columns or rows using the ``axis`` parameter::
+columns or rows using the ``axis`` parameter. To illustrate this point, let's
+look at a slightly modified dataset::
+ >>> data = np.array([[1, 2], [5, 3], [4, 6]])
+ >>> data
+ array([[1, 2],
+ [5, 3],
+ [4, 6]])
>>> data.max(axis=0)
array([5, 6])
>>> data.max(axis=1)
- array([2, 4, 6])
+ array([2, 5, 6])
.. image:: images/np_matrix_aggregation_row.png
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index dd5773878..a9cfeca31 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -45,10 +45,11 @@ NumPy's main object is the homogeneous multidimensional array. It is a
table of elements (usually numbers), all of the same type, indexed by a
tuple of non-negative integers. In NumPy dimensions are called *axes*.
-For example, the coordinates of a point in 3D space ``[1, 2, 1]`` has
-one axis. That axis has 3 elements in it, so we say it has a length
-of 3. In the example pictured below, the array has 2 axes. The first
-axis has a length of 2, the second axis has a length of 3.
+For example, the array for the coordinates of a point in 3D space,
+``[1, 2, 1]``, has one axis. That axis has 3 elements in it, so we say
+it has a length of 3. In the example pictured below, the array has 2
+axes. The first axis has a length of 2, the second axis has a length of
+3.
::
diff --git a/environment.yml b/environment.yml
index 7c9d28449..6a13499e0 100644
--- a/environment.yml
+++ b/environment.yml
@@ -27,6 +27,7 @@ dependencies:
- pandas
- matplotlib
- pydata-sphinx-theme
+ - breathe
# For linting
- pycodestyle=2.7.0
- gitpython
diff --git a/linter_requirements.txt b/linter_requirements.txt
index 51a769ee0..6ed26c5c0 100644
--- a/linter_requirements.txt
+++ b/linter_requirements.txt
@@ -1,2 +1,2 @@
-pycodestyle==2.7.0
+pycodestyle==2.8.0
GitPython==3.1.13 \ No newline at end of file
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index c78d48cc6..9a788baa7 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -33,6 +33,8 @@ from numpy.typing import (
_ArrayLikeTD64_co,
_ArrayLikeDT64_co,
_ArrayLikeObject_co,
+ _ArrayLikeStr_co,
+ _ArrayLikeBytes_co,
# DTypes
DTypeLike,
@@ -197,6 +199,7 @@ from typing import (
Final,
final,
ClassVar,
+ Set,
)
# Ensures that the stubs are picked up
@@ -670,182 +673,6 @@ class MachAr:
) -> None: ...
def __getattr__(self, key: str) -> Any: ...
-class chararray(ndarray[_ShapeType, _DType_co]):
- def __new__(
- subtype,
- shape: Any,
- itemsize: Any = ...,
- unicode: Any = ...,
- buffer: Any = ...,
- offset: Any = ...,
- strides: Any = ...,
- order: Any = ...,
- ) -> Any: ...
- def __array_finalize__(self, obj): ...
- def argsort(self, axis=..., kind=..., order=...): ...
- def capitalize(self): ...
- def center(self, width, fillchar=...): ...
- def count(self, sub, start=..., end=...): ...
- def decode(self, encoding=..., errors=...): ...
- def encode(self, encoding=..., errors=...): ...
- def endswith(self, suffix, start=..., end=...): ...
- def expandtabs(self, tabsize=...): ...
- def find(self, sub, start=..., end=...): ...
- def index(self, sub, start=..., end=...): ...
- def isalnum(self): ...
- def isalpha(self): ...
- def isdigit(self): ...
- def islower(self): ...
- def isspace(self): ...
- def istitle(self): ...
- def isupper(self): ...
- def join(self, seq): ...
- def ljust(self, width, fillchar=...): ...
- def lower(self): ...
- def lstrip(self, chars=...): ...
- def partition(self, sep): ...
- def replace(self, old, new, count=...): ...
- def rfind(self, sub, start=..., end=...): ...
- def rindex(self, sub, start=..., end=...): ...
- def rjust(self, width, fillchar=...): ...
- def rpartition(self, sep): ...
- def rsplit(self, sep=..., maxsplit=...): ...
- def rstrip(self, chars=...): ...
- def split(self, sep=..., maxsplit=...): ...
- def splitlines(self, keepends=...): ...
- def startswith(self, prefix, start=..., end=...): ...
- def strip(self, chars=...): ...
- def swapcase(self): ...
- def title(self): ...
- def translate(self, table, deletechars=...): ...
- def upper(self): ...
- def zfill(self, width): ...
- def isnumeric(self): ...
- def isdecimal(self): ...
-
-class matrix(ndarray[_ShapeType, _DType_co]):
- def __new__(
- subtype,
- data: Any,
- dtype: Any = ...,
- copy: Any = ...,
- ) -> Any: ...
- def __array_finalize__(self, obj): ...
- def __getitem__(self, index): ...
- def __mul__(self, other): ...
- def __rmul__(self, other): ...
- def __imul__(self, other): ...
- def __pow__(self, other): ...
- def __ipow__(self, other): ...
- def __rpow__(self, other): ...
- def tolist(self): ...
- def sum(self, axis=..., dtype=..., out=...): ...
- def squeeze(self, axis=...): ...
- def flatten(self, order=...): ...
- def mean(self, axis=..., dtype=..., out=...): ...
- def std(self, axis=..., dtype=..., out=..., ddof=...): ...
- def var(self, axis=..., dtype=..., out=..., ddof=...): ...
- def prod(self, axis=..., dtype=..., out=...): ...
- def any(self, axis=..., out=...): ...
- def all(self, axis=..., out=...): ...
- def max(self, axis=..., out=...): ...
- def argmax(self, axis=..., out=...): ...
- def min(self, axis=..., out=...): ...
- def argmin(self, axis=..., out=...): ...
- def ptp(self, axis=..., out=...): ...
- def ravel(self, order=...): ...
- @property
- def T(self): ...
- @property
- def I(self): ...
- @property
- def A(self): ...
- @property
- def A1(self): ...
- @property
- def H(self): ...
- def getT(self): ...
- def getA(self): ...
- def getA1(self): ...
- def getH(self): ...
- def getI(self): ...
-
-class poly1d:
- def __init__(
- self,
- c_or_r: Any,
- r: Any = ...,
- variable: Any = ...,
- ) -> None: ...
- def __call__(self, val: Any) -> Any: ...
- __hash__: Any
- @property
- def coeffs(self): ...
- @coeffs.setter
- def coeffs(self, value): ...
- @property
- def c(self): ...
- @c.setter
- def c(self, value): ...
- @property
- def coef(self): ...
- @coef.setter
- def coef(self, value): ...
- @property
- def coefficients(self): ...
- @coefficients.setter
- def coefficients(self, value): ...
- @property
- def variable(self): ...
- @property
- def order(self): ...
- @property
- def o(self): ...
- @property
- def roots(self): ...
- @property
- def r(self): ...
- def __array__(self, t=...): ...
- def __len__(self): ...
- def __neg__(self): ...
- def __pos__(self): ...
- def __mul__(self, other): ...
- def __rmul__(self, other): ...
- def __add__(self, other): ...
- def __radd__(self, other): ...
- def __pow__(self, val): ...
- def __sub__(self, other): ...
- def __rsub__(self, other): ...
- def __div__(self, other): ...
- def __truediv__(self, other): ...
- def __rdiv__(self, other): ...
- def __rtruediv__(self, other): ...
- def __eq__(self, other): ...
- def __ne__(self, other): ...
- def __getitem__(self, val): ...
- def __setitem__(self, key, val): ...
- def __iter__(self): ...
- def integ(self, m=..., k=...): ...
- def deriv(self, m=...): ...
-
-class vectorize:
- pyfunc: Any
- cache: Any
- signature: Any
- otypes: Any
- excluded: Any
- __doc__: Any
- def __init__(
- self,
- pyfunc,
- otypes: Any = ...,
- doc: Any = ...,
- excluded: Any = ...,
- cache: Any = ...,
- signature: Any = ...,
- ) -> None: ...
- def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
-
# Some of these are aliases; others are wrappers with an identical signature
round = around
round_ = around
@@ -1181,8 +1008,6 @@ class _ArrayOrScalarCommon:
# generics and 0d arrays return builtin scalars
def tolist(self) -> Any: ...
- # TODO: Add proper signatures
- def __getitem__(self, key) -> Any: ...
@property
def __array_interface__(self): ...
@property
@@ -1679,6 +1504,26 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
/,
) -> ndarray[_ShapeType2, _DType]: ...
+ @overload
+ def __getitem__(self, key: Union[
+ SupportsIndex,
+ _ArrayLikeInt_co,
+ Tuple[SupportsIndex | _ArrayLikeInt_co, ...],
+ ]) -> Any: ...
+ @overload
+ def __getitem__(self, key: Union[
+ None,
+ slice,
+ ellipsis,
+ SupportsIndex,
+ _ArrayLikeInt_co,
+ Tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...],
+ ]) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ...
+ @overload
+ def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType, dtype[void]]: ...
+
@property
def ctypes(self) -> _ctypes[int]: ...
@property
@@ -3879,3 +3724,637 @@ class memmap(ndarray[_ShapeType, _DType_co]):
) -> Any: ...
def __getitem__(self, index): ... # TODO
def flush(self) -> None: ...
+
+class vectorize:
+ pyfunc: Callable[..., Any]
+ cache: bool
+ signature: None | str
+ otypes: None | str
+ excluded: Set[int | str]
+ __doc__: None | str
+ def __init__(
+ self,
+ pyfunc: Callable[..., Any],
+ otypes: None | str | Iterable[DTypeLike] = ...,
+ doc: None | str = ...,
+ excluded: None | Iterable[int | str] = ...,
+ cache: bool = ...,
+ signature: None | str = ...,
+ ) -> None: ...
+ def __call__(self, *args: Any, **kwargs: Any) -> NDArray[Any]: ...
+
+class poly1d:
+ @property
+ def variable(self) -> str: ...
+ @property
+ def order(self) -> int: ...
+ @property
+ def o(self) -> int: ...
+ @property
+ def roots(self) -> NDArray[Any]: ...
+ @property
+ def r(self) -> NDArray[Any]: ...
+
+ @property
+ def coeffs(self) -> NDArray[Any]: ...
+ @coeffs.setter
+ def coeffs(self, value: NDArray[Any]) -> None: ...
+
+ @property
+ def c(self) -> NDArray[Any]: ...
+ @c.setter
+ def c(self, value: NDArray[Any]) -> None: ...
+
+ @property
+ def coef(self) -> NDArray[Any]: ...
+ @coef.setter
+ def coef(self, value: NDArray[Any]) -> None: ...
+
+ @property
+ def coefficients(self) -> NDArray[Any]: ...
+ @coefficients.setter
+ def coefficients(self, value: NDArray[Any]) -> None: ...
+
+ __hash__: None # type: ignore
+
+ @overload
+ def __array__(self, t: None = ...) -> NDArray[Any]: ...
+ @overload
+ def __array__(self, t: _DType) -> ndarray[Any, _DType]: ...
+
+ @overload
+ def __call__(self, val: _ScalarLike_co) -> Any: ...
+ @overload
+ def __call__(self, val: poly1d) -> poly1d: ...
+ @overload
+ def __call__(self, val: ArrayLike) -> NDArray[Any]: ...
+
+ def __init__(
+ self,
+ c_or_r: ArrayLike,
+ r: bool = ...,
+ variable: None | str = ...,
+ ) -> None: ...
+ def __len__(self) -> int: ...
+ def __neg__(self) -> poly1d: ...
+ def __pos__(self) -> poly1d: ...
+ def __mul__(self, other: ArrayLike) -> poly1d: ...
+ def __rmul__(self, other: ArrayLike) -> poly1d: ...
+ def __add__(self, other: ArrayLike) -> poly1d: ...
+ def __radd__(self, other: ArrayLike) -> poly1d: ...
+ def __pow__(self, val: _FloatLike_co) -> poly1d: ... # Integral floats are accepted
+ def __sub__(self, other: ArrayLike) -> poly1d: ...
+ def __rsub__(self, other: ArrayLike) -> poly1d: ...
+ def __div__(self, other: ArrayLike) -> poly1d: ...
+ def __truediv__(self, other: ArrayLike) -> poly1d: ...
+ def __rdiv__(self, other: ArrayLike) -> poly1d: ...
+ def __rtruediv__(self, other: ArrayLike) -> poly1d: ...
+ def __getitem__(self, val: int) -> Any: ...
+ def __setitem__(self, key: int, val: Any) -> None: ...
+ def __iter__(self) -> Iterator[Any]: ...
+ def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ...
+ def integ(
+ self,
+ m: SupportsInt | SupportsIndex = ...,
+ k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
+ ) -> poly1d: ...
+
+class matrix(ndarray[_ShapeType, _DType_co]):
+ def __new__(
+ subtype,
+ data: ArrayLike,
+ dtype: DTypeLike = ...,
+ copy: bool = ...,
+ ) -> matrix[Any, Any]: ...
+ def __array_finalize__(self, obj: NDArray[Any]) -> None: ...
+ def __getitem__(self, index): ... # TODO
+ def __mul__(self, other: ArrayLike) -> matrix[Any, Any]: ...
+ def __rmul__(self, other: ArrayLike) -> matrix[Any, Any]: ...
+ def __imul__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ...
+ def __pow__(self, other: ArrayLike) -> matrix[Any, Any]: ...
+ def __ipow__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ...
+
+ @overload
+ def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ...
+ @overload
+ def sum(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ...
+ @overload
+ def sum(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def mean(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ...
+ @overload
+ def mean(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ...
+ @overload
+ def mean(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def std(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ...
+ @overload
+ def std(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ...
+ @overload
+ def std(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def var(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ...
+ @overload
+ def var(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ...
+ @overload
+ def var(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def prod(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ...
+ @overload
+ def prod(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ...
+ @overload
+ def prod(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def any(self, axis: None = ..., out: None = ...) -> bool_: ...
+ @overload
+ def any(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[bool_]]: ...
+ @overload
+ def any(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def all(self, axis: None = ..., out: None = ...) -> bool_: ...
+ @overload
+ def all(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[bool_]]: ...
+ @overload
+ def all(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def max(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ...
+ @overload
+ def max(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ...
+ @overload
+ def max(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def min(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ...
+ @overload
+ def min(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ...
+ @overload
+ def min(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def argmax(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ...
+ @overload
+ def argmax(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ...
+ @overload
+ def argmax(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def argmin(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ...
+ @overload
+ def argmin(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ...
+ @overload
+ def argmin(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def ptp(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ...
+ @overload
+ def ptp(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ...
+ @overload
+ def ptp(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[Any, _DType_co]: ...
+ def tolist(self: matrix[Any, dtype[_SupportsItem[_T]]]) -> List[List[_T]]: ... # type: ignore[typevar]
+ def ravel(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ...
+ def flatten(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ...
+
+ @property
+ def T(self) -> matrix[Any, _DType_co]: ...
+ @property
+ def I(self) -> matrix[Any, Any]: ...
+ @property
+ def A(self) -> ndarray[_ShapeType, _DType_co]: ...
+ @property
+ def A1(self) -> ndarray[Any, _DType_co]: ...
+ @property
+ def H(self) -> matrix[Any, _DType_co]: ...
+ def getT(self) -> matrix[Any, _DType_co]: ...
+ def getI(self) -> matrix[Any, Any]: ...
+ def getA(self) -> ndarray[_ShapeType, _DType_co]: ...
+ def getA1(self) -> ndarray[Any, _DType_co]: ...
+ def getH(self) -> matrix[Any, _DType_co]: ...
+
+_CharType = TypeVar("_CharType", str_, bytes_)
+_CharDType = TypeVar("_CharDType", dtype[str_], dtype[bytes_])
+_CharArray = chararray[Any, dtype[_CharType]]
+
+class chararray(ndarray[_ShapeType, _CharDType]):
+ @overload
+ def __new__(
+ subtype,
+ shape: _ShapeLike,
+ itemsize: SupportsIndex | SupportsInt = ...,
+ unicode: L[False] = ...,
+ buffer: _SupportsBuffer = ...,
+ offset: SupportsIndex = ...,
+ strides: _ShapeLike = ...,
+ order: _OrderKACF = ...,
+ ) -> chararray[Any, dtype[bytes_]]: ...
+ @overload
+ def __new__(
+ subtype,
+ shape: _ShapeLike,
+ itemsize: SupportsIndex | SupportsInt = ...,
+ unicode: L[True] = ...,
+ buffer: _SupportsBuffer = ...,
+ offset: SupportsIndex = ...,
+ strides: _ShapeLike = ...,
+ order: _OrderKACF = ...,
+ ) -> chararray[Any, dtype[str_]]: ...
+
+ def __array_finalize__(self, obj: NDArray[str_ | bytes_]) -> None: ...
+ def __mul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ...
+ def __rmul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ...
+ def __mod__(self, i: Any) -> chararray[Any, _CharDType]: ...
+
+ @overload
+ def __eq__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __eq__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __ne__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __ne__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __ge__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __ge__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __le__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __le__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __gt__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __gt__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __lt__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __lt__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __add__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def __add__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def __radd__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def __radd__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def center(
+ self: _CharArray[str_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def center(
+ self: _CharArray[bytes_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def count(
+ self: _CharArray[str_],
+ sub: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def count(
+ self: _CharArray[bytes_],
+ sub: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+
+ def decode(
+ self: _CharArray[bytes_],
+ encoding: None | str = ...,
+ errors: None | str = ...,
+ ) -> _CharArray[str_]: ...
+
+ def encode(
+ self: _CharArray[str_],
+ encoding: None | str = ...,
+ errors: None | str = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def endswith(
+ self: _CharArray[str_],
+ suffix: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def endswith(
+ self: _CharArray[bytes_],
+ suffix: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[bool_]: ...
+
+ def expandtabs(
+ self,
+ tabsize: _ArrayLikeInt_co = ...,
+ ) -> chararray[Any, _CharDType]: ...
+
+ @overload
+ def find(
+ self: _CharArray[str_],
+ sub: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def find(
+ self: _CharArray[bytes_],
+ sub: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def index(
+ self: _CharArray[str_],
+ sub: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def index(
+ self: _CharArray[bytes_],
+ sub: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def join(
+ self: _CharArray[str_],
+ seq: _ArrayLikeStr_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def join(
+ self: _CharArray[bytes_],
+ seq: _ArrayLikeBytes_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def ljust(
+ self: _CharArray[str_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def ljust(
+ self: _CharArray[bytes_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def lstrip(
+ self: _CharArray[str_],
+ chars: None | _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def lstrip(
+ self: _CharArray[bytes_],
+ chars: None | _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def partition(
+ self: _CharArray[str_],
+ sep: _ArrayLikeStr_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def partition(
+ self: _CharArray[bytes_],
+ sep: _ArrayLikeBytes_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def replace(
+ self: _CharArray[str_],
+ old: _ArrayLikeStr_co,
+ new: _ArrayLikeStr_co,
+ count: None | _ArrayLikeInt_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def replace(
+ self: _CharArray[bytes_],
+ old: _ArrayLikeBytes_co,
+ new: _ArrayLikeBytes_co,
+ count: None | _ArrayLikeInt_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def rfind(
+ self: _CharArray[str_],
+ sub: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def rfind(
+ self: _CharArray[bytes_],
+ sub: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def rindex(
+ self: _CharArray[str_],
+ sub: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def rindex(
+ self: _CharArray[bytes_],
+ sub: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def rjust(
+ self: _CharArray[str_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def rjust(
+ self: _CharArray[bytes_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def rpartition(
+ self: _CharArray[str_],
+ sep: _ArrayLikeStr_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def rpartition(
+ self: _CharArray[bytes_],
+ sep: _ArrayLikeBytes_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def rsplit(
+ self: _CharArray[str_],
+ sep: None | _ArrayLikeStr_co = ...,
+ maxsplit: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[object_]: ...
+ @overload
+ def rsplit(
+ self: _CharArray[bytes_],
+ sep: None | _ArrayLikeBytes_co = ...,
+ maxsplit: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[object_]: ...
+
+ @overload
+ def rstrip(
+ self: _CharArray[str_],
+ chars: None | _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def rstrip(
+ self: _CharArray[bytes_],
+ chars: None | _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def split(
+ self: _CharArray[str_],
+ sep: None | _ArrayLikeStr_co = ...,
+ maxsplit: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[object_]: ...
+ @overload
+ def split(
+ self: _CharArray[bytes_],
+ sep: None | _ArrayLikeBytes_co = ...,
+ maxsplit: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[object_]: ...
+
+ def splitlines(self, keepends: None | _ArrayLikeBool_co = ...) -> NDArray[object_]: ...
+
+ @overload
+ def startswith(
+ self: _CharArray[str_],
+ prefix: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def startswith(
+ self: _CharArray[bytes_],
+ prefix: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def strip(
+ self: _CharArray[str_],
+ chars: None | _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def strip(
+ self: _CharArray[bytes_],
+ chars: None | _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def translate(
+ self: _CharArray[str_],
+ table: _ArrayLikeStr_co,
+ deletechars: None | _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def translate(
+ self: _CharArray[bytes_],
+ table: _ArrayLikeBytes_co,
+ deletechars: None | _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ def zfill(self, width: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ...
+ def capitalize(self) -> chararray[_ShapeType, _CharDType]: ...
+ def title(self) -> chararray[_ShapeType, _CharDType]: ...
+ def swapcase(self) -> chararray[_ShapeType, _CharDType]: ...
+ def lower(self) -> chararray[_ShapeType, _CharDType]: ...
+ def upper(self) -> chararray[_ShapeType, _CharDType]: ...
+ def isalnum(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isalpha(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isdigit(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def islower(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isspace(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def istitle(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isupper(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isnumeric(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isdecimal(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
diff --git a/numpy/array_api/_array_object.py b/numpy/array_api/_array_object.py
index 830319e8c..ef66c5efd 100644
--- a/numpy/array_api/_array_object.py
+++ b/numpy/array_api/_array_object.py
@@ -29,7 +29,7 @@ from ._dtypes import (
_dtype_categories,
)
-from typing import TYPE_CHECKING, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Optional, Tuple, Union, Any
if TYPE_CHECKING:
from ._typing import PyCapsule, Device, Dtype
@@ -382,7 +382,7 @@ class Array:
def __array_namespace__(
self: Array, /, *, api_version: Optional[str] = None
- ) -> object:
+ ) -> Any:
if api_version is not None and not api_version.startswith("2021."):
raise ValueError(f"Unrecognized array API version: {api_version!r}")
return array_api
diff --git a/numpy/array_api/_typing.py b/numpy/array_api/_typing.py
index 519e8463c..dfa87b358 100644
--- a/numpy/array_api/_typing.py
+++ b/numpy/array_api/_typing.py
@@ -6,6 +6,8 @@ annotations in the function signatures. The functions in the module are only
valid for inputs that match the given type annotations.
"""
+from __future__ import annotations
+
__all__ = [
"Array",
"Device",
@@ -16,7 +18,16 @@ __all__ = [
]
import sys
-from typing import Any, Literal, Sequence, Type, Union, TYPE_CHECKING, TypeVar
+from typing import (
+ Any,
+ Literal,
+ Sequence,
+ Type,
+ Union,
+ TYPE_CHECKING,
+ TypeVar,
+ Protocol,
+)
from ._array_object import Array
from numpy import (
@@ -33,10 +44,11 @@ from numpy import (
float64,
)
-# This should really be recursive, but that isn't supported yet. See the
-# similar comment in numpy/typing/_array_like.py
-_T = TypeVar("_T")
-NestedSequence = Sequence[Sequence[_T]]
+_T_co = TypeVar("_T_co", covariant=True)
+
+class NestedSequence(Protocol[_T_co]):
+ def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ...
+ def __len__(self, /) -> int: ...
Device = Literal["cpu"]
if TYPE_CHECKING or sys.version_info >= (3, 9):
@@ -55,6 +67,8 @@ if TYPE_CHECKING or sys.version_info >= (3, 9):
else:
Dtype = dtype
-SupportsDLPack = Any
SupportsBufferProtocol = Any
PyCapsule = Any
+
+class SupportsDLPack(Protocol):
+ def __dlpack__(self, /, *, stream: None = ...) -> PyCapsule: ...
diff --git a/numpy/array_api/tests/test_creation_functions.py b/numpy/array_api/tests/test_creation_functions.py
index 3cb8865cd..7b633eaf1 100644
--- a/numpy/array_api/tests/test_creation_functions.py
+++ b/numpy/array_api/tests/test_creation_functions.py
@@ -8,30 +8,15 @@ from .._creation_functions import (
empty,
empty_like,
eye,
- from_dlpack,
full,
full_like,
linspace,
- meshgrid,
ones,
ones_like,
zeros,
zeros_like,
)
from .._array_object import Array
-from .._dtypes import (
- _all_dtypes,
- _boolean_dtypes,
- _floating_dtypes,
- _integer_dtypes,
- _integer_or_boolean_dtypes,
- _numeric_dtypes,
- int8,
- int16,
- int32,
- int64,
- uint64,
-)
def test_asarray_errors():
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index bb0c2ea12..50291c3e7 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -2819,7 +2819,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__class_getitem__',
>>> import numpy as np
>>> np.ndarray[Any, np.dtype[Any]]
- numpy.ndarray[typing.Any, numpy.dtype[Any]]
+ numpy.ndarray[typing.Any, numpy.dtype[typing.Any]]
Notes
-----
@@ -4044,6 +4044,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
+
+ .. deprecated:: 1.22.0
+ Passing booleans as index is deprecated.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
@@ -6110,6 +6113,68 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('__class_getitem__',
"""))
+add_newdoc('numpy.core.multiarray', 'dtype', ('__ge__',
+ """
+ __ge__(value, /)
+
+ Return ``self >= value``.
+
+ Equivalent to ``np.can_cast(value, self, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__le__',
+ """
+ __le__(value, /)
+
+ Return ``self <= value``.
+
+ Equivalent to ``np.can_cast(self, value, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__gt__',
+ """
+ __ge__(value, /)
+
+ Return ``self > value``.
+
+ Equivalent to
+ ``self != value and np.can_cast(value, self, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__lt__',
+ """
+ __lt__(value, /)
+
+ Return ``self < value``.
+
+ Equivalent to
+ ``self != value and np.can_cast(self, value, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
##############################################################################
#
# Datetime-related Methods
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 4891e8f23..3a27a34cd 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -359,7 +359,7 @@ defdict = {
docstrings.get('numpy.core.umath.fmod'),
None,
TD(ints),
- TD(flts, f='fmod', astype={'e':'f'}),
+ TD(flts, f='fmod', astype={'e': 'f'}),
TD(P, f='fmod'),
),
'square':
@@ -390,7 +390,7 @@ defdict = {
docstrings.get('numpy.core.umath.power'),
None,
TD(ints),
- TD(inexact, f='pow', astype={'e':'f'}),
+ TD(inexact, f='pow', astype={'e': 'f'}),
TD(O, f='npy_ObjectPower'),
),
'float_power':
@@ -551,13 +551,13 @@ defdict = {
Ufunc(2, 1, MinusInfinity,
docstrings.get('numpy.core.umath.logaddexp'),
None,
- TD(flts, f="logaddexp", astype={'e':'f'})
+ TD(flts, f="logaddexp", astype={'e': 'f'})
),
'logaddexp2':
Ufunc(2, 1, MinusInfinity,
docstrings.get('numpy.core.umath.logaddexp2'),
None,
- TD(flts, f="logaddexp2", astype={'e':'f'})
+ TD(flts, f="logaddexp2", astype={'e': 'f'})
),
'bitwise_and':
Ufunc(2, 1, AllOnes,
@@ -605,80 +605,93 @@ defdict = {
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.heaviside'),
None,
- TD(flts, f='heaviside', astype={'e':'f'}),
+ TD(flts, f='heaviside', astype={'e': 'f'}),
),
'degrees':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.degrees'),
None,
- TD(fltsP, f='degrees', astype={'e':'f'}),
+ TD(fltsP, f='degrees', astype={'e': 'f'}),
),
'rad2deg':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rad2deg'),
None,
- TD(fltsP, f='rad2deg', astype={'e':'f'}),
+ TD(fltsP, f='rad2deg', astype={'e': 'f'}),
),
'radians':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.radians'),
None,
- TD(fltsP, f='radians', astype={'e':'f'}),
+ TD(fltsP, f='radians', astype={'e': 'f'}),
),
'deg2rad':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.deg2rad'),
None,
- TD(fltsP, f='deg2rad', astype={'e':'f'}),
+ TD(fltsP, f='deg2rad', astype={'e': 'f'}),
),
'arccos':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccos'),
None,
- TD(inexact, f='acos', astype={'e':'f'}),
+ TD('e', f='acos', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='acos', astype={'e': 'f'}),
TD(P, f='arccos'),
),
'arccosh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccosh'),
None,
- TD(inexact, f='acosh', astype={'e':'f'}),
+ TD('e', f='acosh', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='acosh', astype={'e': 'f'}),
TD(P, f='arccosh'),
),
'arcsin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsin'),
None,
- TD(inexact, f='asin', astype={'e':'f'}),
+ TD('e', f='asin', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='asin', astype={'e': 'f'}),
TD(P, f='arcsin'),
),
'arcsinh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsinh'),
None,
- TD(inexact, f='asinh', astype={'e':'f'}),
+ TD('e', f='asinh', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='asinh', astype={'e': 'f'}),
TD(P, f='arcsinh'),
),
'arctan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctan'),
None,
- TD(inexact, f='atan', astype={'e':'f'}),
+ TD('e', f='atan', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='atan', astype={'e': 'f'}),
TD(P, f='arctan'),
),
'arctanh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctanh'),
None,
- TD(inexact, f='atanh', astype={'e':'f'}),
+ TD('e', f='atanh', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='atanh', astype={'e': 'f'}),
TD(P, f='arctanh'),
),
'cos':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cos'),
None,
- TD('e', f='cos', astype={'e':'f'}),
+ TD('e', f='cos', astype={'e': 'f'}),
TD('f', dispatch=[('loops_trigonometric', 'f')]),
+ TD('d', dispatch=[('loops_umath_fp', 'd')]),
TD('fdg' + cmplx, f='cos'),
TD(P, f='cos'),
),
@@ -686,8 +699,9 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sin'),
None,
- TD('e', f='sin', astype={'e':'f'}),
+ TD('e', f='sin', astype={'e': 'f'}),
TD('f', dispatch=[('loops_trigonometric', 'f')]),
+ TD('d', dispatch=[('loops_umath_fp', 'd')]),
TD('fdg' + cmplx, f='sin'),
TD(P, f='sin'),
),
@@ -695,35 +709,43 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tan'),
None,
- TD(inexact, f='tan', astype={'e':'f'}),
+ TD('e', f='tan', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='tan', astype={'e': 'f'}),
TD(P, f='tan'),
),
'cosh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cosh'),
None,
- TD(inexact, f='cosh', astype={'e':'f'}),
+ TD('e', f='cosh', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='cosh', astype={'e': 'f'}),
TD(P, f='cosh'),
),
'sinh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sinh'),
None,
- TD(inexact, f='sinh', astype={'e':'f'}),
+ TD('e', f='sinh', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='sinh', astype={'e': 'f'}),
TD(P, f='sinh'),
),
'tanh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tanh'),
None,
- TD(inexact, f='tanh', astype={'e':'f'}),
+ TD('e', f='tanh', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='tanh', astype={'e': 'f'}),
TD(P, f='tanh'),
),
'exp':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp'),
None,
- TD('e', f='exp', astype={'e':'f'}),
+ TD('e', f='exp', astype={'e': 'f'}),
TD('fd', dispatch=[('loops_exponent_log', 'fd')]),
TD('fdg' + cmplx, f='exp'),
TD(P, f='exp'),
@@ -732,21 +754,25 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp2'),
None,
- TD(inexact, f='exp2', astype={'e':'f'}),
+ TD('e', f='exp2', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='exp2', astype={'e': 'f'}),
TD(P, f='exp2'),
),
'expm1':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.expm1'),
None,
- TD(inexact, f='expm1', astype={'e':'f'}),
+ TD('e', f='expm1', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='expm1', astype={'e': 'f'}),
TD(P, f='expm1'),
),
'log':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log'),
None,
- TD('e', f='log', astype={'e':'f'}),
+ TD('e', f='log', astype={'e': 'f'}),
TD('fd', dispatch=[('loops_exponent_log', 'fd')]),
TD('fdg' + cmplx, f='log'),
TD(P, f='log'),
@@ -755,28 +781,34 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log2'),
None,
- TD(inexact, f='log2', astype={'e':'f'}),
+ TD('e', f='log2', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='log2', astype={'e': 'f'}),
TD(P, f='log2'),
),
'log10':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log10'),
None,
- TD(inexact, f='log10', astype={'e':'f'}),
+ TD('e', f='log10', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='log10', astype={'e': 'f'}),
TD(P, f='log10'),
),
'log1p':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log1p'),
None,
- TD(inexact, f='log1p', astype={'e':'f'}),
+ TD('e', f='log1p', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(inexact, f='log1p', astype={'e': 'f'}),
TD(P, f='log1p'),
),
'sqrt':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sqrt'),
None,
- TD('e', f='sqrt', astype={'e':'f'}),
+ TD('e', f='sqrt', astype={'e': 'f'}),
TD(inexactvec, dispatch=[('loops_unary_fp', 'fd')]),
TD('fdg' + cmplx, f='sqrt'),
TD(P, f='sqrt'),
@@ -785,14 +817,16 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cbrt'),
None,
- TD(flts, f='cbrt', astype={'e':'f'}),
+ TD('e', f='cbrt', astype={'e': 'f'}),
+ TD('fd', dispatch=[('loops_umath_fp', 'fd')]),
+ TD(flts, f='cbrt', astype={'e': 'f'}),
TD(P, f='cbrt'),
),
'ceil':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.ceil'),
None,
- TD('e', f='ceil', astype={'e':'f'}),
+ TD('e', f='ceil', astype={'e': 'f'}),
TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD('fdg', f='ceil'),
TD(O, f='npy_ObjectCeil'),
@@ -801,7 +835,7 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.trunc'),
None,
- TD('e', f='trunc', astype={'e':'f'}),
+ TD('e', f='trunc', astype={'e': 'f'}),
TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD('fdg', f='trunc'),
TD(O, f='npy_ObjectTrunc'),
@@ -810,14 +844,14 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.fabs'),
None,
- TD(flts, f='fabs', astype={'e':'f'}),
+ TD(flts, f='fabs', astype={'e': 'f'}),
TD(P, f='fabs'),
),
'floor':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.floor'),
None,
- TD('e', f='floor', astype={'e':'f'}),
+ TD('e', f='floor', astype={'e': 'f'}),
TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD('fdg', f='floor'),
TD(O, f='npy_ObjectFloor'),
@@ -826,7 +860,7 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rint'),
None,
- TD('e', f='rint', astype={'e':'f'}),
+ TD('e', f='rint', astype={'e': 'f'}),
TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD('fdg' + cmplx, f='rint'),
TD(P, f='rint'),
@@ -835,7 +869,7 @@ defdict = {
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.arctan2'),
None,
- TD(flts, f='atan2', astype={'e':'f'}),
+ TD(flts, f='atan2', astype={'e': 'f'}),
TD(P, f='arctan2'),
),
'remainder':
@@ -858,7 +892,7 @@ defdict = {
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.hypot'),
None,
- TD(flts, f='hypot', astype={'e':'f'}),
+ TD(flts, f='hypot', astype={'e': 'f'}),
TD(P, f='hypot'),
),
'isnan':
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index c15e1f042..4e1182de6 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -668,7 +668,7 @@ add_newdoc('numpy.core.umath', 'bitwise_or',
Examples
--------
- The number 13 has the binaray representation ``00001101``. Likewise,
+ The number 13 has the binary representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
@@ -1091,9 +1091,7 @@ add_newdoc('numpy.core.umath', 'divide',
Behavior on division by zero can be changed using ``seterr``.
- In Python 2, when both ``x1`` and ``x2`` are of an integer type,
- ``divide`` will behave like ``floor_divide``. In Python 3, it behaves
- like ``true_divide``.
+ Behaves like ``true_divide``.
Examples
--------
@@ -1106,27 +1104,6 @@ add_newdoc('numpy.core.umath', 'divide',
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
- Note the behavior with integer types (Python 2 only):
-
- >>> np.divide(2, 4)
- 0
- >>> np.divide(2, 4.)
- 0.5
-
- Division by zero always yields zero in integer arithmetic (again,
- Python 2 only), and does not raise an exception or a warning:
-
- >>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
- array([0, 0])
-
- Division by zero can, however, be caught using ``seterr``:
-
- >>> old_err_state = np.seterr(divide='raise')
- >>> np.divide(1, 0)
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- FloatingPointError: divide by zero encountered in divide
-
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
@@ -4038,9 +4015,8 @@ add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
- Instead of the Python traditional 'floor division', this returns a true
- division. True division adjusts the output type to present the best
- answer, regardless of input types.
+ Unlike 'floor division', true division adjusts the output type
+ to present the best answer, regardless of input types.
Parameters
----------
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index 18157641a..c78d3db23 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -987,7 +987,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False):
def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
- # Arguably we dispatch on more arguments that we really should; see note in
+ # Arguably we dispatch on more arguments than we really should; see note in
# _einsum_path_dispatcher for why.
yield from operands
yield out
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 5ecb1e666..3242124ac 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -689,6 +689,9 @@ def partition(a, kth, axis=-1, kind='introselect', order=None):
it. The order of all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
+
+ .. deprecated:: 1.22.0
+ Passing booleans as index is deprecated.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
@@ -781,6 +784,9 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
+
+ .. deprecated:: 1.22.0
+ Passing booleans as index is deprecated.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
@@ -1138,6 +1144,8 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue):
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
+ .. versionadded:: 1.22.0
+
Returns
-------
index_array : ndarray of ints
@@ -1232,6 +1240,8 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue):
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
+ .. versionadded:: 1.22.0
+
Returns
-------
index_array : ndarray of ints
diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi
index c35629aa7..68d3b3a98 100644
--- a/numpy/core/function_base.pyi
+++ b/numpy/core/function_base.pyi
@@ -1,4 +1,4 @@
-from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal
+from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal, List
from numpy import ndarray
from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co
@@ -8,6 +8,9 @@ _ArrayLikeNested = Sequence[Sequence[Any]]
_ArrayLikeNumber = Union[
_NumberLike_co, Sequence[_NumberLike_co], ndarray, _SupportsArray, _ArrayLikeNested
]
+
+__all__: List[str]
+
@overload
def linspace(
start: _ArrayLikeNumber,
@@ -47,3 +50,11 @@ def geomspace(
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
) -> ndarray: ...
+
+# Re-exported to `np.lib.function_base`
+def add_newdoc(
+ place: str,
+ obj: str,
+ doc: str | Tuple[str, str] | List[Tuple[str, str]],
+ warn_on_python: bool = ...,
+) -> None: ...
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index 0f7031bac..c96e6d5e7 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -11,7 +11,6 @@ from . import numeric
from . import numerictypes as ntypes
from .numeric import array, inf, NaN
from .umath import log10, exp2, nextafter, isnan
-from . import umath
def _fr0(a):
diff --git a/numpy/core/include/numpy/experimental_dtype_api.h b/numpy/core/include/numpy/experimental_dtype_api.h
new file mode 100644
index 000000000..22854a725
--- /dev/null
+++ b/numpy/core/include/numpy/experimental_dtype_api.h
@@ -0,0 +1,306 @@
+/*
+ * This header exports the new experimental DType API as proposed in
+ * NEPs 41 to 43. For background, please check these NEPs. Otherwise,
+ * this header also serves as documentation for the time being.
+ *
+ * Please do not hesitate to contact @seberg with questions. This is
+ * developed together with https://github.com/seberg/experimental_user_dtypes
+ * and those interested in experimenting are encouraged to contribute there.
+ *
+ * To use the functions defined in the header, call::
+ *
+ * if (import_experimental_dtype_api(version) < 0) {
+ * return NULL;
+ * }
+ *
+ * in your module init. (A version mismatch will be reported, just update
+ * to the correct one, this will alert you of possible changes.)
+ *
+ * The two main symbols exported are:
+ *
+ * - PyUFunc_AddLoopFromSpec (Register a new loop for a ufunc)
+ * - PyArrayInitDTypeMeta_FromSpec (Create a new DType)
+ *
+ * Please check the in-line documentation for details and do not hesitate to
+ * ask for help.
+ *
+ * WARNING
+ * =======
+ *
+ * By using this header, you understand that this is a fully experimental
+ * exposure. Details are expected to change, and some options may have no
+ * effect. (Please contact @seberg if you have questions!)
+ * If the exposure stops working, please file a bug report with NumPy.
+ * Further, a DType created using this API/header should still be expected
+ * to be incompatible with some functionality inside and outside of NumPy.
+ * In this case crashes must be expected. Please report any such problems
+ * so that they can be fixed before final exposure.
+ * Furthermore, expect missing checks for programming errors which the final
+ * API is expected to have.
+ *
+ * Symbols with a leading underscore are likely to not be included in the
+ * first public version, if these are central to your use-case, please let
+ * us know, so that we can reconsider.
+ *
+ * "Array-like" consumer API not yet under considerations
+ * ======================================================
+ *
+ * The new DType API is designed in a way to make it potentially useful for
+ * alternative "array-like" implementations. This will require careful
+ * exposure of details and functions and is not part of this experimental API.
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_
+
+#include <Python.h>
+#include "ndarraytypes.h"
+
+
+/*
+ * Just a hack so I don't forget importing as much myself, I spend way too
+ * much time noticing it the first time around :).
+ */
+static void
+__not_imported(void)
+{
+ printf("*****\nCritical error, dtype API not imported\n*****\n");
+}
+static void *__uninitialized_table[] = {
+ &__not_imported, &__not_imported, &__not_imported, &__not_imported};
+
+
+static void **__experimental_dtype_api_table = __uninitialized_table;
+
+/*
+ * ******************************************************
+ * ArrayMethod API (Casting and UFuncs)
+ * ******************************************************
+ */
+/*
+ * NOTE: Expected changes:
+ * * invert logic of floating point error flag
+ * * probably split runtime and general flags into two
+ * * should possibly not use an enum for typdef for more stable ABI?
+ */
+typedef enum {
+ /* Flag for whether the GIL is required */
+ NPY_METH_REQUIRES_PYAPI = 1 << 1,
+ /*
+ * Some functions cannot set floating point error flags, this flag
+ * gives us the option (not requirement) to skip floating point error
+ * setup/check. No function should set error flags and ignore them
+ * since it would interfere with chaining operations (e.g. casting).
+ */
+ NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 2,
+ /* Whether the method supports unaligned access (not runtime) */
+ NPY_METH_SUPPORTS_UNALIGNED = 1 << 3,
+
+ /* All flags which can change at runtime */
+ NPY_METH_RUNTIME_FLAGS = (
+ NPY_METH_REQUIRES_PYAPI |
+ NPY_METH_NO_FLOATINGPOINT_ERRORS),
+} NPY_ARRAYMETHOD_FLAGS;
+
+
+/*
+ * The main object for creating a new ArrayMethod. We use the typical `slots`
+ * mechanism used by the Python limited API (see below for the slot defs).
+ */
+typedef struct {
+ const char *name;
+ int nin, nout;
+ NPY_CASTING casting;
+ NPY_ARRAYMETHOD_FLAGS flags;
+ PyObject **dtypes; /* array of DType class objects */
+ PyType_Slot *slots;
+} PyArrayMethod_Spec;
+
+
+typedef PyObject *_ufunc_addloop_fromspec_func(
+ PyObject *ufunc, PyArrayMethod_Spec *spec);
+/*
+ * The main ufunc registration function. This adds a new implementation/loop
+ * to a ufunc. It replaces `PyUFunc_RegisterLoopForType`.
+ */
+#define PyUFunc_AddLoopFromSpec \
+ (*(_ufunc_addloop_fromspec_func *)(__experimental_dtype_api_table[0]))
+
+
+/*
+ * In addition to the normal casting levels, NPY_CAST_IS_VIEW indicates
+ * that no cast operation is necessary at all (although a copy usually will be)
+ *
+ * NOTE: The most likely modification here is to add an additional
+ * `view_offset` output to resolve_descriptors. If set, it would
+ * indicate both that it is a view and what offset to use. This means that
+ * e.g. `arr.imag` could be implemented by an ArrayMethod.
+ */
+#define NPY_CAST_IS_VIEW _NPY_CAST_IS_VIEW
+
+/*
+ * The resolve descriptors function, must be able to handle NULL values for
+ * all output (but not input) `given_descrs` and fill `loop_descrs`.
+ * Return -1 on error or 0 if the operation is not possible without an error
+ * set. (This may still be in flux.)
+ * Otherwise must return the "casting safety", for normal functions, this is
+ * almost always "safe" (or even "equivalent"?).
+ *
+ * `resolve_descriptors` is optional if all output DTypes are non-parametric.
+ */
+#define NPY_METH_resolve_descriptors 1
+typedef NPY_CASTING (resolve_descriptors_function)(
+ /* "method" is currently opaque (necessary e.g. to wrap Python) */
+ PyObject *method,
+ /* DTypes the method was created for */
+ PyObject **dtypes,
+ /* Input descriptors (instances). Outputs may be NULL. */
+ PyArray_Descr **given_descrs,
+ /* Exact loop descriptors to use, must not hold references on error */
+ PyArray_Descr **loop_descrs);
+
+/* NOT public yet: Signature needs adapting as external API. */
+#define _NPY_METH_get_loop 2
+
+/*
+ * Current public API to define fast inner-loops. You must provide a
+ * strided loop. If this is a cast between two "versions" of the same dtype
+ * you must also provide an unaligned strided loop.
+ * Other loops are useful to optimize the very common contiguous case.
+ *
+ * NOTE: As of now, NumPy will NOT use unaligned loops in ufuncs!
+ */
+#define NPY_METH_strided_loop 3
+#define NPY_METH_contiguous_loop 4
+#define NPY_METH_unaligned_strided_loop 5
+#define NPY_METH_unaligned_contiguous_loop 6
+
+
+typedef struct {
+ PyObject *caller; /* E.g. the original ufunc, may be NULL */
+ PyObject *method; /* The method "self". Currently an opaque object */
+
+ /* Operand descriptors, filled in by resolve_descriptors */
+ PyArray_Descr **descriptors;
+ /* Structure may grow (this is harmless for DType authors) */
+} PyArrayMethod_Context;
+
+typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context,
+ char *const *data, const npy_intp *dimensions, const npy_intp *strides,
+ NpyAuxData *transferdata);
+
+
+
+/*
+ * ****************************
+ * DTYPE API
+ * ****************************
+ */
+
+#define NPY_DT_ABSTRACT 1 << 1
+#define NPY_DT_PARAMETRIC 1 << 2
+
+#define NPY_DT_discover_descr_from_pyobject 1
+#define _NPY_DT_is_known_scalar_type 2
+#define NPY_DT_default_descr 3
+#define NPY_DT_common_dtype 4
+#define NPY_DT_common_instance 5
+#define NPY_DT_setitem 6
+#define NPY_DT_getitem 7
+
+
+// TODO: These slots probably still need some thought, and/or a way to "grow"?
+typedef struct{
+ PyTypeObject *typeobj; /* type of python scalar or NULL */
+ int flags; /* flags, including parametric and abstract */
+ /* NULL terminated cast definitions. Use NULL for the newly created DType */
+ PyArrayMethod_Spec **casts;
+ PyType_Slot *slots;
+ /* Baseclass or NULL (will always subclass `np.dtype`) */
+ PyTypeObject *baseclass;
+} PyArrayDTypeMeta_Spec;
+
+
+/*
+ * DTypeMeta struct, the content may be made fully opaque (except the size).
+ * We may also move everything into a single `void *dt_slots`.
+ */
+typedef struct {
+ PyHeapTypeObject super;
+ PyArray_Descr *singleton;
+ int type_num;
+ PyTypeObject *scalar_type;
+ npy_uint64 flags;
+ void *dt_slots;
+ void *reserved[3];
+} PyArray_DTypeMeta;
+
+
+#define PyArrayDTypeMeta_Type \
+ (&(PyTypeObject *)__experimental_dtype_api_table[1])
+
+typedef int __dtypemeta_fromspec(
+ PyArray_DTypeMeta *DType, PyArrayDTypeMeta_Spec *dtype_spec);
+/*
+ * Finalize creation of a DTypeMeta. You must ensure that the DTypeMeta is
+ * a proper subclass. The DTypeMeta object has additional fields compared to
+ * a normal PyTypeObject!
+ * The only (easy) creation of a new DType is to create a static Type which
+ * inherits `PyArray_DescrType`, sets its type to `PyArrayDTypeMeta_Type` and
+ * uses `PyArray_DTypeMeta` defined above as the C-structure.
+ */
+#define PyArrayInitDTypeMeta_FromSpec \
+ ((__dtypemeta_fromspec *)(__experimental_dtype_api_table[2]))
+
+
+
+/*
+ * ********************************
+ * Initialization
+ * ********************************
+ *
+ * Import the experimental API, the version must match the one defined in
+ * the header to ensure changes are taken into account. NumPy will further
+ * runtime-check this.
+ * You must call this function to use the symbols defined in this file.
+ */
+#define __EXPERIMENTAL_DTYPE_VERSION 1
+
+static int
+import_experimental_dtype_api(int version)
+{
+ if (version != __EXPERIMENTAL_DTYPE_VERSION) {
+ PyErr_Format(PyExc_RuntimeError,
+ "DType API version %d did not match header version %d. Please "
+ "update the import statement and check for API changes.",
+ version, __EXPERIMENTAL_DTYPE_VERSION);
+ return -1;
+ }
+ if (__experimental_dtype_api_table != __uninitialized_table) {
+ /* already imported. */
+ return 0;
+ }
+
+ PyObject *multiarray = PyImport_ImportModule("numpy.core._multiarray_umath");
+ if (multiarray == NULL) {
+ return -1;
+ }
+
+ PyObject *api = PyObject_CallMethod(multiarray,
+ "_get_experimental_dtype_api", "i", version);
+ Py_DECREF(multiarray);
+ if (api == NULL) {
+ return -1;
+ }
+ __experimental_dtype_api_table = PyCapsule_GetPointer(api,
+ "experimental_dtype_api_table");
+ Py_DECREF(api);
+
+ if (__experimental_dtype_api_table == NULL) {
+ __experimental_dtype_api_table = __uninitialized_table;
+ return -1;
+ }
+ return 0;
+}
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_ */
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index 740223882..8d810fa64 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -1858,32 +1858,14 @@ typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size,
*/
#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
/*
- * The Structures defined in this block are considered private API and
- * may change without warning!
+ * The Structures defined in this block are currently considered
+ * private API and may change without warning!
+ * Part of this (at least the size) is exepcted to be public API without
+ * further modifications.
*/
/* TODO: Make this definition public in the API, as soon as its settled */
NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type;
- typedef struct PyArray_DTypeMeta_tag PyArray_DTypeMeta;
-
- typedef PyArray_Descr *(discover_descr_from_pyobject_function)(
- PyArray_DTypeMeta *cls, PyObject *obj);
-
- /*
- * Before making this public, we should decide whether it should pass
- * the type, or allow looking at the object. A possible use-case:
- * `np.array(np.array([0]), dtype=np.ndarray)`
- * Could consider arrays that are not `dtype=ndarray` "scalars".
- */
- typedef int (is_known_scalar_type_function)(
- PyArray_DTypeMeta *cls, PyTypeObject *obj);
-
- typedef PyArray_Descr *(default_descr_function)(PyArray_DTypeMeta *cls);
- typedef PyArray_DTypeMeta *(common_dtype_function)(
- PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtyep2);
- typedef PyArray_Descr *(common_instance_function)(
- PyArray_Descr *dtype1, PyArray_Descr *dtyep2);
-
/*
* While NumPy DTypes would not need to be heap types the plan is to
* make DTypes available in Python at which point they will be heap types.
@@ -1894,7 +1876,7 @@ typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size,
* it is a fairly complex construct which may be better to allow
* refactoring of.
*/
- struct PyArray_DTypeMeta_tag {
+ typedef struct {
PyHeapTypeObject super;
/*
@@ -1922,7 +1904,7 @@ typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size,
*/
void *dt_slots;
void *reserved[3];
- };
+ } PyArray_DTypeMeta;
#endif /* NPY_INTERNAL_BUILD */
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 12f424fd4..8e5de852b 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -80,12 +80,10 @@ Exported symbols include:
"""
import numbers
-import warnings
from numpy.core.multiarray import (
- typeinfo, ndarray, array, empty, dtype, datetime_data,
- datetime_as_string, busday_offset, busday_count, is_busday,
- busdaycalendar
+ ndarray, array, dtype, datetime_data, datetime_as_string,
+ busday_offset, busday_count, is_busday, busdaycalendar
)
from numpy.core.overrides import set_module
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index e1fdd06f2..840cf38c9 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -2,7 +2,6 @@
import collections
import functools
import os
-import textwrap
from numpy.core._multiarray_umath import (
add_docstring, implement_array_function, _get_implementing_args)
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 2f495c48b..6800f65e9 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -5,6 +5,7 @@ import copy
import warnings
import platform
import textwrap
+import glob
from os.path import join
from numpy.distutils import log
@@ -63,6 +64,20 @@ class CallOnceOnly:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
+def can_link_svml():
+ """SVML library is supported only on x86_64 architecture and currently
+ only on linux
+ """
+ machine = platform.machine()
+ system = platform.system()
+ return "x86_64" in machine and system == "Linux"
+
+def check_svml_submodule(svmlpath):
+ if not os.path.exists(svmlpath + "/README.md"):
+ raise RuntimeError("Missing `SVML` submodule! Run `git submodule "
+ "update --init` to fix this.")
+ return True
+
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
@@ -455,6 +470,9 @@ def configuration(parent_package='',top_path=None):
# Inline check
inline = config_cmd.check_inline()
+ if can_link_svml():
+ moredefs.append(('NPY_CAN_LINK_SVML', 1))
+
# Use relaxed stride checking
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
@@ -727,6 +745,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'common', 'npy_import.h'),
join('src', 'common', 'npy_hashtable.h'),
join('src', 'common', 'npy_longdouble.h'),
+ join('src', 'common', 'npy_svml.h'),
join('src', 'common', 'templ_common.h.src'),
join('src', 'common', 'ucsnarrow.h'),
join('src', 'common', 'ufunc_override.h'),
@@ -791,6 +810,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'dragon4.h'),
join('src', 'multiarray', 'einsum_debug.h'),
join('src', 'multiarray', 'einsum_sumprod.h'),
+ join('src', 'multiarray', 'experimental_public_dtype_api.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
@@ -858,6 +878,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'einsum_sumprod.c.src'),
+ join('src', 'multiarray', 'experimental_public_dtype_api.c'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
@@ -888,7 +909,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'timsort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
- join('src', 'npysort', 'radixsort.c.src'),
+ join('src', 'npysort', 'radixsort.cpp'),
join('src', 'common', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'common', 'npy_binsearch.h.src'),
@@ -923,11 +944,12 @@ def configuration(parent_package='',top_path=None):
join('src', 'umath', 'loops_arithm_fp.dispatch.c.src'),
join('src', 'umath', 'loops_arithmetic.dispatch.c.src'),
join('src', 'umath', 'loops_trigonometric.dispatch.c.src'),
+ join('src', 'umath', 'loops_umath_fp.dispatch.c.src'),
join('src', 'umath', 'loops_exponent_log.dispatch.c.src'),
join('src', 'umath', 'matmul.h.src'),
join('src', 'umath', 'matmul.c.src'),
- join('src', 'umath', 'clip.h.src'),
- join('src', 'umath', 'clip.c.src'),
+ join('src', 'umath', 'clip.h'),
+ join('src', 'umath', 'clip.cpp'),
join('src', 'umath', 'dispatching.c'),
join('src', 'umath', 'legacy_array_method.c'),
join('src', 'umath', 'ufunc_object.c'),
@@ -951,7 +973,15 @@ def configuration(parent_package='',top_path=None):
join(codegen_dir, 'generate_ufunc_api.py'),
]
+ svml_path = join('numpy', 'core', 'src', 'umath', 'svml')
+ svml_objs = []
+ if can_link_svml() and check_svml_submodule(svml_path):
+ svml_objs = glob.glob(svml_path + '/**/*.s', recursive=True)
+
config.add_extension('_multiarray_umath',
+ # Forcing C language even though we have C++ sources.
+ # It forces the C linker and don't link C++ runtime.
+ language = 'c',
sources=multiarray_src + umath_src +
common_src +
[generate_config_h,
@@ -965,7 +995,12 @@ def configuration(parent_package='',top_path=None):
depends=deps + multiarray_deps + umath_deps +
common_deps,
libraries=['npymath'],
- extra_info=extra_info)
+ extra_objects=svml_objs,
+ extra_info=extra_info,
+ extra_cxx_compile_args=['-std=c++11',
+ '-D__STDC_VERSION__=0',
+ '-fno-exceptions',
+ '-fno-rtti'])
#######################################################################
# umath_tests module #
diff --git a/numpy/core/src/common/npy_sort.h.src b/numpy/core/src/common/npy_sort.h.src
index ddbde0c9b..b4a1e9b0c 100644
--- a/numpy/core/src/common/npy_sort.h.src
+++ b/numpy/core/src/common/npy_sort.h.src
@@ -49,9 +49,14 @@ NPY_NO_EXPORT int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *
* #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
* longlong, ulonglong#
*/
-
+#ifdef __cplusplus
+extern "C" {
+#endif
NPY_NO_EXPORT int radixsort_@suff@(void *vec, npy_intp cnt, void *null);
NPY_NO_EXPORT int aradixsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+#ifdef __cplusplus
+}
+#endif
/**end repeat**/
diff --git a/numpy/core/src/common/npy_svml.h b/numpy/core/src/common/npy_svml.h
new file mode 100644
index 000000000..4292f7090
--- /dev/null
+++ b/numpy/core/src/common/npy_svml.h
@@ -0,0 +1,41 @@
+#if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML)
+extern __m512 __svml_exp2f16(__m512 x);
+extern __m512 __svml_log2f16(__m512 x);
+extern __m512 __svml_log10f16(__m512 x);
+extern __m512 __svml_expm1f16(__m512 x);
+extern __m512 __svml_log1pf16(__m512 x);
+extern __m512 __svml_cbrtf16(__m512 x);
+extern __m512 __svml_sinf16(__m512 x);
+extern __m512 __svml_cosf16(__m512 x);
+extern __m512 __svml_tanf16(__m512 x);
+extern __m512 __svml_asinf16(__m512 x);
+extern __m512 __svml_acosf16(__m512 x);
+extern __m512 __svml_atanf16(__m512 x);
+extern __m512 __svml_atan2f16(__m512 x);
+extern __m512 __svml_sinhf16(__m512 x);
+extern __m512 __svml_coshf16(__m512 x);
+extern __m512 __svml_tanhf16(__m512 x);
+extern __m512 __svml_asinhf16(__m512 x);
+extern __m512 __svml_acoshf16(__m512 x);
+extern __m512 __svml_atanhf16(__m512 x);
+
+extern __m512d __svml_exp28(__m512d x);
+extern __m512d __svml_log28(__m512d x);
+extern __m512d __svml_log108(__m512d x);
+extern __m512d __svml_expm18(__m512d x);
+extern __m512d __svml_log1p8(__m512d x);
+extern __m512d __svml_cbrt8(__m512d x);
+extern __m512d __svml_sin8(__m512d x);
+extern __m512d __svml_cos8(__m512d x);
+extern __m512d __svml_tan8(__m512d x);
+extern __m512d __svml_asin8(__m512d x);
+extern __m512d __svml_acos8(__m512d x);
+extern __m512d __svml_atan8(__m512d x);
+extern __m512d __svml_atan28(__m512d x);
+extern __m512d __svml_sinh8(__m512d x);
+extern __m512d __svml_cosh8(__m512d x);
+extern __m512d __svml_tanh8(__m512d x);
+extern __m512d __svml_asinh8(__m512d x);
+extern __m512d __svml_acosh8(__m512d x);
+extern __m512d __svml_atanh8(__m512d x);
+#endif
diff --git a/numpy/core/src/common/numpy_tag.h b/numpy/core/src/common/numpy_tag.h
new file mode 100644
index 000000000..dc8d5286b
--- /dev/null
+++ b/numpy/core/src/common/numpy_tag.h
@@ -0,0 +1,78 @@
+#ifndef _NPY_COMMON_TAG_H_
+#define _NPY_COMMON_TAG_H_
+
+namespace npy {
+
+struct integral_tag {
+};
+struct floating_point_tag {
+};
+struct complex_tag {
+};
+struct date_tag {
+};
+
+struct bool_tag : integral_tag {
+ using type = npy_bool;
+};
+struct byte_tag : integral_tag {
+ using type = npy_byte;
+};
+struct ubyte_tag : integral_tag {
+ using type = npy_ubyte;
+};
+struct short_tag : integral_tag {
+ using type = npy_short;
+};
+struct ushort_tag : integral_tag {
+ using type = npy_ushort;
+};
+struct int_tag : integral_tag {
+ using type = npy_int;
+};
+struct uint_tag : integral_tag {
+ using type = npy_uint;
+};
+struct long_tag : integral_tag {
+ using type = npy_long;
+};
+struct ulong_tag : integral_tag {
+ using type = npy_ulong;
+};
+struct longlong_tag : integral_tag {
+ using type = npy_longlong;
+};
+struct ulonglong_tag : integral_tag {
+ using type = npy_ulonglong;
+};
+struct half_tag {
+ using type = npy_half;
+};
+struct float_tag : floating_point_tag {
+ using type = npy_float;
+};
+struct double_tag : floating_point_tag {
+ using type = npy_double;
+};
+struct longdouble_tag : floating_point_tag {
+ using type = npy_longdouble;
+};
+struct cfloat_tag : complex_tag {
+ using type = npy_cfloat;
+};
+struct cdouble_tag : complex_tag {
+ using type = npy_cdouble;
+};
+struct clongdouble_tag : complex_tag {
+ using type = npy_clongdouble;
+};
+struct datetime_tag : date_tag {
+ using type = npy_datetime;
+};
+struct timedelta_tag : date_tag {
+ using type = npy_timedelta;
+};
+
+} // namespace npy
+
+#endif
diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c
index c4db73c3b..d93dac506 100644
--- a/numpy/core/src/multiarray/array_method.c
+++ b/numpy/core/src/multiarray/array_method.c
@@ -58,16 +58,10 @@ default_resolve_descriptors(
{
int nin = method->nin;
int nout = method->nout;
- int all_defined = 1;
for (int i = 0; i < nin + nout; i++) {
PyArray_DTypeMeta *dtype = dtypes[i];
- if (dtype == NULL) {
- output_descrs[i] = NULL;
- all_defined = 0;
- continue;
- }
- if (NPY_DTYPE(input_descrs[i]) == dtype) {
+ if (input_descrs[i] != NULL) {
output_descrs[i] = ensure_dtype_nbo(input_descrs[i]);
}
else {
@@ -77,41 +71,11 @@ default_resolve_descriptors(
goto fail;
}
}
- if (all_defined) {
- return method->casting;
- }
-
- if (NPY_UNLIKELY(nin == 0 || dtypes[0] == NULL)) {
- /* Registration should reject this, so this would be indicates a bug */
- PyErr_SetString(PyExc_RuntimeError,
- "Invalid use of default resolver without inputs or with "
- "input or output DType incorrectly missing.");
- goto fail;
- }
- /* We find the common dtype of all inputs, and use it for the unknowns */
- PyArray_DTypeMeta *common_dtype = dtypes[0];
- assert(common_dtype != NULL);
- for (int i = 1; i < nin; i++) {
- Py_SETREF(common_dtype, PyArray_CommonDType(common_dtype, dtypes[i]));
- if (common_dtype == NULL) {
- goto fail;
- }
- }
- for (int i = nin; i < nin + nout; i++) {
- if (output_descrs[i] != NULL) {
- continue;
- }
- if (NPY_DTYPE(input_descrs[i]) == common_dtype) {
- output_descrs[i] = ensure_dtype_nbo(input_descrs[i]);
- }
- else {
- output_descrs[i] = NPY_DT_CALL_default_descr(common_dtype);
- }
- if (NPY_UNLIKELY(output_descrs[i] == NULL)) {
- goto fail;
- }
- }
-
+ /*
+ * If we relax the requirement for specifying all `dtypes` (e.g. allow
+ * abstract ones or unspecified outputs). We can use the common-dtype
+ * operation to provide a default here.
+ */
return method->casting;
fail:
@@ -219,9 +183,18 @@ validate_spec(PyArrayMethod_Spec *spec)
}
for (int i = 0; i < nargs; i++) {
- if (spec->dtypes[i] == NULL && i < spec->nin) {
+ /*
+ * Note that we could allow for output dtypes to not be specified
+ * (the array-method would have to make sure to support this).
+ * We could even allow for some dtypes to be abstract.
+ * For now, assume that this is better handled in a promotion step.
+ * One problem with providing all DTypes is the definite need to
+ * hold references. We probably, eventually, have to implement
+ * traversal and trust the GC to deal with it.
+ */
+ if (spec->dtypes[i] == NULL) {
PyErr_Format(PyExc_TypeError,
- "ArrayMethod must have well defined input DTypes. "
+ "ArrayMethod must provide all input and output DTypes. "
"(method: %s)", spec->name);
return -1;
}
@@ -231,10 +204,10 @@ validate_spec(PyArrayMethod_Spec *spec)
"(method: %s)", spec->dtypes[i], spec->name);
return -1;
}
- if (NPY_DT_is_abstract(spec->dtypes[i]) && i < spec->nin) {
+ if (NPY_DT_is_abstract(spec->dtypes[i])) {
PyErr_Format(PyExc_TypeError,
- "abstract DType %S are currently not allowed for inputs."
- "(method: %s defined at %s)", spec->dtypes[i], spec->name);
+ "abstract DType %S are currently not supported."
+ "(method: %s)", spec->dtypes[i], spec->name);
return -1;
}
}
@@ -323,7 +296,7 @@ fill_arraymethod_from_slots(
PyErr_Format(PyExc_TypeError,
"Must specify output DTypes or use custom "
"`resolve_descriptors` when there are no inputs. "
- "(method: %s defined at %s)", spec->name);
+ "(method: %s)", spec->name);
return -1;
}
}
@@ -370,6 +343,26 @@ fill_arraymethod_from_slots(
}
+/*
+ * Public version of `PyArrayMethod_FromSpec_int` (see below).
+ *
+ * TODO: Error paths will probably need to be improved before a release into
+ * the non-experimental public API.
+ */
+NPY_NO_EXPORT PyObject *
+PyArrayMethod_FromSpec(PyArrayMethod_Spec *spec)
+{
+ for (int i = 0; i < spec->nin + spec->nout; i++) {
+ if (!PyObject_TypeCheck(spec->dtypes[i], &PyArrayDTypeMeta_Type)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "ArrayMethod spec contained a non DType.");
+ return NULL;
+ }
+ }
+ return (PyObject *)PyArrayMethod_FromSpec_int(spec, 0);
+}
+
+
/**
* Create a new ArrayMethod (internal version).
*
@@ -683,7 +676,7 @@ boundarraymethod__simple_strided_call(
"All arrays must have the same length.");
return NULL;
}
- if (i >= nout) {
+ if (i >= nin) {
if (PyArray_FailUnlessWriteable(
arrays[i], "_simple_strided_call() output") < 0) {
return NULL;
@@ -787,6 +780,13 @@ _masked_stridedloop_data_free(NpyAuxData *auxdata)
* This function wraps a regular unmasked strided-loop as a
* masked strided-loop, only calling the function for elements
* where the mask is True.
+ *
+ * TODO: Reductions also use this code to implement masked reductions.
+ * Before consolidating them, reductions had a special case for
+ * broadcasts: when the mask stride was 0 the code does not check all
+ * elements as `npy_memchr` currently does.
+ * It may be worthwhile to add such an optimization again if broadcasted
+ * masks are common enough.
*/
static int
generic_masked_strided_loop(PyArrayMethod_Context *context,
diff --git a/numpy/core/src/multiarray/array_method.h b/numpy/core/src/multiarray/array_method.h
index 3017abf25..7b7372bd0 100644
--- a/numpy/core/src/multiarray/array_method.h
+++ b/numpy/core/src/multiarray/array_method.h
@@ -21,6 +21,17 @@ typedef enum {
NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 2,
/* Whether the method supports unaligned access (not runtime) */
NPY_METH_SUPPORTS_UNALIGNED = 1 << 3,
+ /*
+ * Private flag for now for *logic* functions. The logical functions
+ * `logical_or` and `logical_and` can always cast the inputs to booleans
+ * "safely" (because that is how the cast to bool is defined).
+ * @seberg: I am not sure this is the best way to handle this, so its
+ * private for now (also it is very limited anyway).
+ * There is one "exception". NA aware dtypes cannot cast to bool
+ * (hopefully), so the `??->?` loop should error even with this flag.
+ * But a second NA fallback loop will be necessary.
+ */
+ _NPY_METH_FORCE_CAST_INPUTS = 1 << 17,
/* All flags which can change at runtime */
NPY_METH_RUNTIME_FLAGS = (
@@ -170,6 +181,11 @@ PyArrayMethod_GetMaskedStridedLoop(
NPY_ARRAYMETHOD_FLAGS *flags);
+
+NPY_NO_EXPORT PyObject *
+PyArrayMethod_FromSpec(PyArrayMethod_Spec *spec);
+
+
/*
* TODO: This function is the internal version, and its error paths may
* need better tests when a public version is exposed.
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 9b9df08f2..28aff5d65 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -1705,22 +1705,6 @@ array_iter(PyArrayObject *arr)
return PySeqIter_New((PyObject *)arr);
}
-static PyObject *
-array_alloc(PyTypeObject *type, Py_ssize_t NPY_UNUSED(nitems))
-{
- /* nitems will always be 0 */
- PyObject *obj = PyObject_Malloc(type->tp_basicsize);
- PyObject_Init(obj, type);
- return obj;
-}
-
-static void
-array_free(PyObject * v)
-{
- /* avoid same deallocator as PyBaseObject, see gentype_free */
- PyObject_Free(v);
-}
-
NPY_NO_EXPORT PyTypeObject PyArray_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
@@ -1741,7 +1725,5 @@ NPY_NO_EXPORT PyTypeObject PyArray_Type = {
.tp_iter = (getiterfunc)array_iter,
.tp_methods = array_methods,
.tp_getset = array_getsetlist,
- .tp_alloc = (allocfunc)array_alloc,
.tp_new = (newfunc)array_new,
- .tp_free = (freefunc)array_free,
};
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index c0c087056..15782a91b 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -62,7 +62,7 @@ get_dummy_stack_array(PyArrayObject *orig)
PyArrayObject_fields new_fields;
new_fields.flags = PyArray_FLAGS(orig);
/* Set to NULL so the dummy object can be distinguished from the real one */
- Py_TYPE(&new_fields) = NULL;
+ Py_SET_TYPE(&new_fields, NULL);
new_fields.base = (PyObject *)orig;
return new_fields;
}
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index efe120b80..9910fffe6 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -1428,9 +1428,26 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args)
PyCFunctionObject *new = (PyCFunctionObject *)obj;
_ADDDOC(new->m_ml->ml_doc, new->m_ml->ml_name);
}
- else if (Py_TYPE(obj) == &PyType_Type) {
+ else if (PyObject_TypeCheck(obj, &PyType_Type)) {
+ /*
+ * We add it to both `tp_doc` and `__doc__` here. Note that in theory
+ * `tp_doc` extracts the signature line, but we currently do not use
+ * it. It may make sense to only add it as `__doc__` and
+ * `__text_signature__` to the dict in the future.
+ * The dictionary path is only necessary for heaptypes (currently not
+ * used) and metaclasses.
+ * If `__doc__` as stored in `tp_dict` is None, we assume this was
+ * filled in by `PyType_Ready()` and should also be replaced.
+ */
PyTypeObject *new = (PyTypeObject *)obj;
_ADDDOC(new->tp_doc, new->tp_name);
+ if (new->tp_dict != NULL && PyDict_CheckExact(new->tp_dict) &&
+ PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) {
+ /* Warning: Modifying `tp_dict` is not generally safe! */
+ if (PyDict_SetItemString(new->tp_dict, "__doc__", str) < 0) {
+ return NULL;
+ }
+ }
}
else if (Py_TYPE(obj) == &PyMemberDescr_Type) {
PyMemberDescrObject *new = (PyMemberDescrObject *)obj;
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 12dd99504..eeadad374 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -1548,6 +1548,40 @@ should_use_min_scalar(npy_intp narrs, PyArrayObject **arr,
}
+/*
+ * Utility function used only in PyArray_ResultType for value-based logic.
+ * See that function for the meaning and contents of the parameters.
+ */
+static PyArray_Descr *
+get_descr_from_cast_or_value(
+ npy_intp i,
+ PyArrayObject *arrs[],
+ npy_intp ndtypes,
+ PyArray_Descr *descriptor,
+ PyArray_DTypeMeta *common_dtype)
+{
+ PyArray_Descr *curr;
+ if (NPY_LIKELY(i < ndtypes ||
+ !(PyArray_FLAGS(arrs[i-ndtypes]) & _NPY_ARRAY_WAS_PYSCALAR))) {
+ curr = PyArray_CastDescrToDType(descriptor, common_dtype);
+ }
+ else {
+ /*
+ * Unlike `PyArray_CastToDTypeAndPromoteDescriptors`, deal with
+ * plain Python values "graciously". This recovers the original
+ * value the long route, but it should almost never happen...
+ */
+ PyObject *tmp = PyArray_GETITEM(arrs[i-ndtypes],
+ PyArray_BYTES(arrs[i-ndtypes]));
+ if (tmp == NULL) {
+ return NULL;
+ }
+ curr = NPY_DT_CALL_discover_descr_from_pyobject(common_dtype, tmp);
+ Py_DECREF(tmp);
+ }
+ return curr;
+}
+
/*NUMPY_API
*
* Produces the result type of a bunch of inputs, using the same rules
@@ -1684,28 +1718,15 @@ PyArray_ResultType(
result = NPY_DT_CALL_default_descr(common_dtype);
}
else {
- result = PyArray_CastDescrToDType(all_descriptors[0], common_dtype);
+ result = get_descr_from_cast_or_value(
+ 0, arrs, ndtypes, all_descriptors[0], common_dtype);
+ if (result == NULL) {
+ goto error;
+ }
for (npy_intp i = 1; i < ndtypes+narrs; i++) {
- PyArray_Descr *curr;
- if (NPY_LIKELY(i < ndtypes ||
- !(PyArray_FLAGS(arrs[i-ndtypes]) & _NPY_ARRAY_WAS_PYSCALAR))) {
- curr = PyArray_CastDescrToDType(all_descriptors[i], common_dtype);
- }
- else {
- /*
- * Unlike `PyArray_CastToDTypeAndPromoteDescriptors` deal with
- * plain Python values "graciously". This recovers the original
- * value the long route, but it should almost never happen...
- */
- PyObject *tmp = PyArray_GETITEM(
- arrs[i-ndtypes], PyArray_BYTES(arrs[i-ndtypes]));
- if (tmp == NULL) {
- goto error;
- }
- curr = NPY_DT_CALL_discover_descr_from_pyobject(common_dtype, tmp);
- Py_DECREF(tmp);
- }
+ PyArray_Descr *curr = get_descr_from_cast_or_value(
+ i, arrs, ndtypes, all_descriptors[i], common_dtype);
if (curr == NULL) {
goto error;
}
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index b24bc0356..e0064c017 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -13,6 +13,7 @@
#include <Python.h>
#include "numpy/arrayobject.h"
+#include "numpyos.h"
#include "npy_config.h"
#include "npy_pycompat.h"
@@ -723,12 +724,21 @@ parse_datetime_extended_unit_from_string(char const *str, Py_ssize_t len,
{
char const *substr = str, *substrend = NULL;
int den = 1;
+ npy_longlong true_meta_val;
/* First comes an optional integer multiplier */
out_meta->num = (int)strtol_const(substr, &substrend, 10);
if (substr == substrend) {
out_meta->num = 1;
}
+ else {
+ // check for 32-bit integer overflow
+ char *endptr = NULL;
+ true_meta_val = NumPyOS_strtoll(substr, &endptr, 10);
+ if (true_meta_val > INT_MAX || true_meta_val < 0) {
+ goto bad_input;
+ }
+ }
substr = substrend;
/* Next comes the unit itself, followed by either '/' or the string end */
@@ -3776,7 +3786,17 @@ time_to_time_resolve_descriptors(
meta2 = get_datetime_metadata_from_dtype(loop_descrs[1]);
assert(meta2 != NULL);
- if (meta1->base == meta2->base && meta1->num == meta2->num) {
+ if ((meta1->base == meta2->base && meta1->num == meta2->num) ||
+ // handle some common metric prefix conversions
+ // 1000 fold conversions
+ ((meta2->base >= 7) && (meta1->base - meta2->base == 1)
+ && ((meta1->num / meta2->num) == 1000)) ||
+ // 10^6 fold conversions
+ ((meta2->base >= 7) && (meta1->base - meta2->base == 2)
+ && ((meta1->num / meta2->num) == 1000000)) ||
+ // 10^9 fold conversions
+ ((meta2->base >= 7) && (meta1->base - meta2->base == 3)
+ && ((meta1->num / meta2->num) == 1000000000))) {
if (byteorder_may_allow_view) {
return NPY_NO_CASTING | byteorder_may_allow_view;
}
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 082876aa2..6a09f92ac 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -2304,6 +2304,33 @@ arraydescr_new(PyTypeObject *subtype,
PyObject *args, PyObject *kwds)
{
if (subtype != &PyArrayDescr_Type) {
+ if (Py_TYPE(subtype) == &PyArrayDTypeMeta_Type &&
+ !(PyType_GetFlags(Py_TYPE(subtype)) & Py_TPFLAGS_HEAPTYPE) &&
+ (NPY_DT_SLOTS((PyArray_DTypeMeta *)subtype)) != NULL) {
+ /*
+ * Appears to be a properly initialized user DType. Allocate
+ * it and initialize the main part as best we can.
+ * TODO: This should probably be a user function, and enforce
+ * things like the `elsize` being correctly set.
+ * TODO: This is EXPERIMENTAL API!
+ */
+ PyArray_DTypeMeta *DType = (PyArray_DTypeMeta *)subtype;
+ PyArray_Descr *descr = (PyArray_Descr *)subtype->tp_alloc(subtype, 0);
+ if (descr == 0) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+ PyObject_Init((PyObject *)descr, subtype);
+ descr->f = &NPY_DT_SLOTS(DType)->f;
+ Py_XINCREF(DType->scalar_type);
+ descr->typeobj = DType->scalar_type;
+ descr->type_num = DType->type_num;
+ descr->flags = NPY_USE_GETITEM|NPY_USE_SETITEM;
+ descr->byteorder = '|'; /* If DType uses it, let it override */
+ descr->elsize = -1; /* Initialize to invalid value */
+ descr->hash = -1;
+ return (PyObject *)descr;
+ }
/* The DTypeMeta class should prevent this from happening. */
PyErr_Format(PyExc_SystemError,
"'%S' must not inherit np.dtype.__new__().", subtype);
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index cbde91b76..cd489d5e7 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -290,7 +290,7 @@ void_common_instance(PyArray_Descr *descr1, PyArray_Descr *descr2)
return descr1;
}
-static int
+NPY_NO_EXPORT int
python_builtins_are_known_scalar_types(
PyArray_DTypeMeta *NPY_UNUSED(cls), PyTypeObject *pytype)
{
diff --git a/numpy/core/src/multiarray/dtypemeta.h b/numpy/core/src/multiarray/dtypemeta.h
index fb772c07d..05e9e2394 100644
--- a/numpy/core/src/multiarray/dtypemeta.h
+++ b/numpy/core/src/multiarray/dtypemeta.h
@@ -8,6 +8,35 @@
#define NPY_DT_PARAMETRIC 1 << 2
+typedef PyArray_Descr *(discover_descr_from_pyobject_function)(
+ PyArray_DTypeMeta *cls, PyObject *obj);
+
+/*
+ * Before making this public, we should decide whether it should pass
+ * the type, or allow looking at the object. A possible use-case:
+ * `np.array(np.array([0]), dtype=np.ndarray)`
+ * Could consider arrays that are not `dtype=ndarray` "scalars".
+ */
+typedef int (is_known_scalar_type_function)(
+ PyArray_DTypeMeta *cls, PyTypeObject *obj);
+
+typedef PyArray_Descr *(default_descr_function)(PyArray_DTypeMeta *cls);
+typedef PyArray_DTypeMeta *(common_dtype_function)(
+ PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2);
+typedef PyArray_Descr *(common_instance_function)(
+ PyArray_Descr *dtype1, PyArray_Descr *dtype2);
+
+/*
+ * TODO: These two functions are currently only used for experimental DType
+ * API support. Their relation should be "reversed": NumPy should
+ * always use them internally.
+ * There are open points about "casting safety" though, e.g. setting
+ * elements is currently always unsafe.
+ */
+typedef int(setitemfunction)(PyArray_Descr *, PyObject *, char *);
+typedef PyObject *(getitemfunction)(PyArray_Descr *, char *);
+
+
typedef struct {
/* DType methods, these could be moved into its own struct */
discover_descr_from_pyobject_function *discover_descr_from_pyobject;
@@ -16,6 +45,12 @@ typedef struct {
common_dtype_function *common_dtype;
common_instance_function *common_instance;
/*
+ * Currently only used for experimental user DTypes.
+ * Typing as `void *` until NumPy itself uses these (directly).
+ */
+ setitemfunction *setitem;
+ getitemfunction *getitem;
+ /*
* The casting implementation (ArrayMethod) to convert between two
* instances of this DType, stored explicitly for fast access:
*/
@@ -58,7 +93,10 @@ typedef struct {
NPY_DT_SLOTS(dtype)->default_descr(dtype)
#define NPY_DT_CALL_common_dtype(dtype, other) \
NPY_DT_SLOTS(dtype)->common_dtype(dtype, other)
-
+#define NPY_DT_CALL_getitem(descr, data_ptr) \
+ NPY_DT_SLOTS(NPY_DTYPE(descr))->getitem(descr, data_ptr)
+#define NPY_DT_CALL_setitem(descr, value, data_ptr) \
+ NPY_DT_SLOTS(NPY_DTYPE(descr))->setitem(descr, value, data_ptr)
/*
* This function will hopefully be phased out or replaced, but was convenient
@@ -78,6 +116,10 @@ PyArray_DTypeFromTypeNum(int typenum)
NPY_NO_EXPORT int
+python_builtins_are_known_scalar_types(
+ PyArray_DTypeMeta *cls, PyTypeObject *pytype);
+
+NPY_NO_EXPORT int
dtypemeta_wrap_legacy_descriptor(PyArray_Descr *dtypem);
#endif /* NUMPY_CORE_SRC_MULTIARRAY_DTYPEMETA_H_ */
diff --git a/numpy/core/src/multiarray/experimental_public_dtype_api.c b/numpy/core/src/multiarray/experimental_public_dtype_api.c
new file mode 100644
index 000000000..1e8abe9d6
--- /dev/null
+++ b/numpy/core/src/multiarray/experimental_public_dtype_api.c
@@ -0,0 +1,363 @@
+#include <Python.h>
+
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _UMATHMODULE
+#define _MULTIARRAYMODULE
+#include <numpy/npy_common.h>
+#include "numpy/arrayobject.h"
+#include "numpy/ufuncobject.h"
+#include "common.h"
+
+#include "experimental_public_dtype_api.h"
+#include "array_method.h"
+#include "dtypemeta.h"
+#include "array_coercion.h"
+#include "convert_datatype.h"
+
+
+#define EXPERIMENTAL_DTYPE_API_VERSION 1
+
+
+typedef struct{
+ PyTypeObject *typeobj; /* type of python scalar or NULL */
+ int flags; /* flags, including parametric and abstract */
+ /* NULL terminated cast definitions. Use NULL for the newly created DType */
+ PyArrayMethod_Spec **casts;
+ PyType_Slot *slots;
+} PyArrayDTypeMeta_Spec;
+
+
+
+static PyArray_DTypeMeta *
+dtype_does_not_promote(
+ PyArray_DTypeMeta *NPY_UNUSED(self), PyArray_DTypeMeta *NPY_UNUSED(other))
+{
+ /* `other` is guaranteed not to be `self`, so we don't have to do much... */
+ Py_INCREF(Py_NotImplemented);
+ return (PyArray_DTypeMeta *)Py_NotImplemented;
+}
+
+
+static PyArray_Descr *
+discover_as_default(PyArray_DTypeMeta *cls, PyObject *NPY_UNUSED(obj))
+{
+ return NPY_DT_CALL_default_descr(cls);
+}
+
+
+static PyArray_Descr *
+use_new_as_default(PyArray_DTypeMeta *self)
+{
+ PyObject *res = PyObject_CallObject((PyObject *)self, NULL);
+ if (res == NULL) {
+ return NULL;
+ }
+ /*
+ * Lets not trust that the DType is implemented correctly
+ * TODO: Should probably do an exact type-check (at least unless this is
+ * an abstract DType).
+ */
+ if (!PyArray_DescrCheck(res)) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Instantiating %S did not return a dtype instance, this is "
+ "invalid (especially without a custom `default_descr()`).",
+ self);
+ Py_DECREF(res);
+ return NULL;
+ }
+ PyArray_Descr *descr = (PyArray_Descr *)res;
+ /*
+ * Should probably do some more sanity checks here on the descriptor
+ * to ensure the user is not being naughty. But in the end, we have
+ * only limited control anyway.
+ */
+ return descr;
+}
+
+
+static int
+legacy_setitem_using_DType(PyObject *obj, void *data, void *arr)
+{
+ if (arr == NULL) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Using legacy SETITEM with NULL array object is only "
+ "supported for basic NumPy DTypes.");
+ return -1;
+ }
+ setitemfunction *setitem;
+ setitem = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(arr)))->setitem;
+ return setitem(PyArray_DESCR(arr), obj, data);
+}
+
+
+static PyObject *
+legacy_getitem_using_DType(void *data, void *arr)
+{
+ if (arr == NULL) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Using legacy SETITEM with NULL array object is only "
+ "supported for basic NumPy DTypes.");
+ return NULL;
+ }
+ getitemfunction *getitem;
+ getitem = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(arr)))->getitem;
+ return getitem(PyArray_DESCR(arr), data);
+}
+
+
+/*
+ * The descr->f structure used user-DTypes. Some functions may be filled
+ * from the user in the future and more could get defaults for compatibility.
+ */
+PyArray_ArrFuncs default_funcs = {
+ .setitem = &legacy_setitem_using_DType,
+ .getitem = &legacy_getitem_using_DType
+};
+
+
+/* other slots are in order, so keep only last around: */
+#define NUM_DTYPE_SLOTS 7
+
+
+int
+PyArrayInitDTypeMeta_FromSpec(
+ PyArray_DTypeMeta *DType, PyArrayDTypeMeta_Spec *spec)
+{
+ if (!PyObject_TypeCheck(DType, &PyArrayDTypeMeta_Type)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Passed in DType must be a valid (initialized) DTypeMeta "
+ "instance!");
+ return -1;
+ }
+
+ if (spec->typeobj == NULL || !PyType_Check(spec->typeobj)) {
+ PyErr_SetString(PyExc_TypeError,
+ "Not giving a type object is currently not supported, but "
+ "is expected to be supported eventually. This would mean "
+ "that e.g. indexing a NumPy array will return a 0-D array "
+ "and not a scalar.");
+ return -1;
+ }
+
+ if (DType->dt_slots != NULL) {
+ PyErr_Format(PyExc_RuntimeError,
+ "DType %R appears already registered?", DType);
+ return -1;
+ }
+
+ /* Check and handle flags: */
+ if (spec->flags & ~(NPY_DT_PARAMETRIC|NPY_DT_ABSTRACT)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "invalid DType flags specified, only parametric and abstract "
+ "are valid flags for user DTypes.");
+ return -1;
+ }
+
+ DType->flags = spec->flags;
+ DType->dt_slots = PyMem_Calloc(1, sizeof(NPY_DType_Slots));
+ if (DType->dt_slots == NULL) {
+ return -1;
+ }
+
+ /* Set default values (where applicable) */
+ NPY_DT_SLOTS(DType)->discover_descr_from_pyobject = &discover_as_default;
+ NPY_DT_SLOTS(DType)->is_known_scalar_type = (
+ &python_builtins_are_known_scalar_types);
+ NPY_DT_SLOTS(DType)->default_descr = use_new_as_default;
+ NPY_DT_SLOTS(DType)->common_dtype = dtype_does_not_promote;
+ /* May need a default for non-parametric? */
+ NPY_DT_SLOTS(DType)->common_instance = NULL;
+ NPY_DT_SLOTS(DType)->setitem = NULL;
+ NPY_DT_SLOTS(DType)->getitem = NULL;
+
+ PyType_Slot *spec_slot = spec->slots;
+ while (1) {
+ int slot = spec_slot->slot;
+ void *pfunc = spec_slot->pfunc;
+ spec_slot++;
+ if (slot == 0) {
+ break;
+ }
+ if (slot > NUM_DTYPE_SLOTS || slot < 0) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Invalid slot with value %d passed in.", slot);
+ return -1;
+ }
+ /*
+ * It is up to the user to get this right, and slots are sorted
+ * exactly like they are stored right now:
+ */
+ void **current = (void **)(&(
+ NPY_DT_SLOTS(DType)->discover_descr_from_pyobject));
+ current += slot - 1;
+ *current = pfunc;
+ }
+ if (NPY_DT_SLOTS(DType)->setitem == NULL
+ || NPY_DT_SLOTS(DType)->getitem == NULL) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "A DType must provide a getitem/setitem (there may be an "
+ "exception here in the future if no scalar type is provided)");
+ return -1;
+ }
+
+ /*
+ * Now that the spec is read we can check that all required functions were
+ * defined by the user.
+ */
+ if (spec->flags & NPY_DT_PARAMETRIC) {
+ if (NPY_DT_SLOTS(DType)->common_instance == NULL ||
+ NPY_DT_SLOTS(DType)->discover_descr_from_pyobject
+ == &discover_as_default) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Parametric DType must define a common-instance and "
+ "descriptor discovery function!");
+ return -1;
+ }
+ }
+ NPY_DT_SLOTS(DType)->f = default_funcs;
+ /* invalid type num. Ideally, we get away with it! */
+ DType->type_num = -1;
+
+ /*
+ * Handle the scalar type mapping.
+ */
+ Py_INCREF(spec->typeobj);
+ DType->scalar_type = spec->typeobj;
+ if (PyType_GetFlags(spec->typeobj) & Py_TPFLAGS_HEAPTYPE) {
+ if (PyObject_SetAttrString((PyObject *)DType->scalar_type,
+ "__associated_array_dtype__", (PyObject *)DType) < 0) {
+ Py_DECREF(DType);
+ return -1;
+ }
+ }
+ if (_PyArray_MapPyTypeToDType(DType, DType->scalar_type, 0) < 0) {
+ Py_DECREF(DType);
+ return -1;
+ }
+
+ /* Ensure cast dict is defined (not sure we have to do it here) */
+ NPY_DT_SLOTS(DType)->castingimpls = PyDict_New();
+ if (NPY_DT_SLOTS(DType)->castingimpls == NULL) {
+ return -1;
+ }
+ /*
+ * And now, register all the casts that are currently defined!
+ */
+ PyArrayMethod_Spec **next_meth_spec = spec->casts;
+ while (1) {
+ PyArrayMethod_Spec *meth_spec = *next_meth_spec;
+ next_meth_spec++;
+ if (meth_spec == NULL) {
+ break;
+ }
+ /*
+ * The user doesn't know the name of DType yet, so we have to fill it
+ * in for them!
+ */
+ for (int i=0; i < meth_spec->nin + meth_spec->nout; i++) {
+ if (meth_spec->dtypes[i] == NULL) {
+ meth_spec->dtypes[i] = DType;
+ }
+ }
+ /* Register the cast! */
+ int res = PyArray_AddCastingImplementation_FromSpec(meth_spec, 0);
+
+ /* Also clean up again, so nobody can get bad ideas... */
+ for (int i=0; i < meth_spec->nin + meth_spec->nout; i++) {
+ if (meth_spec->dtypes[i] == DType) {
+ meth_spec->dtypes[i] = NULL;
+ }
+ }
+
+ if (res < 0) {
+ return -1;
+ }
+ }
+
+ if (NPY_DT_SLOTS(DType)->within_dtype_castingimpl == NULL) {
+ /*
+ * We expect this for now. We should have a default for DType that
+ * only supports simple copy (and possibly byte-order assuming that
+ * they swap the full itemsize).
+ */
+ PyErr_SetString(PyExc_RuntimeError,
+ "DType must provide a function to cast (or just copy) between "
+ "its own instances!");
+ return -1;
+ }
+
+ /* And finally, we have to register all the casts! */
+ return 0;
+}
+
+
+/* Function is defined in umath/dispatching.c (same/one compilation unit) */
+NPY_NO_EXPORT int
+PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate);
+
+static int
+PyUFunc_AddLoopFromSpec(PyObject *ufunc, PyArrayMethod_Spec *spec)
+{
+ if (!PyObject_TypeCheck(ufunc, &PyUFunc_Type)) {
+ PyErr_SetString(PyExc_TypeError,
+ "ufunc object passed is not a ufunc!");
+ return -1;
+ }
+ PyBoundArrayMethodObject *bmeth =
+ (PyBoundArrayMethodObject *)PyArrayMethod_FromSpec(spec);
+ if (bmeth == NULL) {
+ return -1;
+ }
+ int nargs = bmeth->method->nin + bmeth->method->nout;
+ PyObject *dtypes = PyArray_TupleFromItems(
+ nargs, (PyObject **)bmeth->dtypes, 1);
+ if (dtypes == NULL) {
+ return -1;
+ }
+ PyObject *info = PyTuple_Pack(2, dtypes, bmeth->method);
+ Py_DECREF(bmeth);
+ Py_DECREF(dtypes);
+ if (info == NULL) {
+ return -1;
+ }
+ return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0);
+}
+
+
+NPY_NO_EXPORT PyObject *
+_get_experimental_dtype_api(PyObject *NPY_UNUSED(mod), PyObject *arg)
+{
+ static void *experimental_api_table[] = {
+ &PyUFunc_AddLoopFromSpec,
+ &PyArrayDTypeMeta_Type,
+ &PyArrayInitDTypeMeta_FromSpec,
+ NULL,
+ };
+
+ char *env = getenv("NUMPY_EXPERIMENTAL_DTYPE_API");
+ if (env == NULL || strcmp(env, "1") != 0) {
+ PyErr_Format(PyExc_RuntimeError,
+ "The new DType API is currently in an exploratory phase and "
+ "should NOT be used for production code. "
+ "Expect modifications and crashes! "
+ "To experiment with the new API you must set "
+ "`NUMPY_EXPERIMENTAL_DTYPE_API=1` as an environment variable.");
+ return NULL;
+ }
+
+ long version = PyLong_AsLong(arg);
+ if (error_converting(version)) {
+ return NULL;
+ }
+ if (version != EXPERIMENTAL_DTYPE_API_VERSION) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Experimental DType API version %d requested, but NumPy "
+ "is exporting version %d. Recompile your DType and/or upgrade "
+ "NumPy to match.",
+ version, EXPERIMENTAL_DTYPE_API_VERSION);
+ return NULL;
+ }
+
+ return PyCapsule_New(&experimental_api_table,
+ "experimental_dtype_api_table", NULL);
+}
diff --git a/numpy/core/src/multiarray/experimental_public_dtype_api.h b/numpy/core/src/multiarray/experimental_public_dtype_api.h
new file mode 100644
index 000000000..270cb82bf
--- /dev/null
+++ b/numpy/core/src/multiarray/experimental_public_dtype_api.h
@@ -0,0 +1,18 @@
+/*
+ * This file exports the experimental dtype API as exposed via the
+ * `numpy/core/include/numpy/experimental_dtype_api.h`
+ * header file.
+ *
+ * This file is a stub, all important definitions are in the code file.
+ *
+ * NOTE: This file is considered in-flux, exploratory and transitional.
+ */
+
+#ifndef NUMPY_CORE_SRC_MULTIARRAY_EXPERIMENTAL_PUBLIC_DTYPE_API_H_
+#define NUMPY_CORE_SRC_MULTIARRAY_EXPERIMENTAL_PUBLIC_DTYPE_API_H_
+
+NPY_NO_EXPORT PyObject *
+_get_experimental_dtype_api(PyObject *mod, PyObject *arg);
+
+
+#endif /* NUMPY_CORE_SRC_MULTIARRAY_EXPERIMENTAL_PUBLIC_DTYPE_API_H_ */
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index ad5478bbf..33d378c2b 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -1292,7 +1292,15 @@ partition_prep_kth_array(PyArrayObject * ktharray,
npy_intp * kth;
npy_intp nkth, i;
- if (!PyArray_CanCastSafely(PyArray_TYPE(ktharray), NPY_INTP)) {
+ if (PyArray_ISBOOL(ktharray)) {
+ /* 2021-09-29, NumPy 1.22 */
+ if (DEPRECATE(
+ "Passing booleans as partition index is deprecated"
+ " (warning added in NumPy 1.22)") < 0) {
+ return NULL;
+ }
+ }
+ else if (!PyArray_ISINTEGER(ktharray)) {
PyErr_Format(PyExc_TypeError, "Partition index must be integer");
return NULL;
}
@@ -2390,19 +2398,14 @@ PyArray_CountNonzero(PyArrayObject *self)
npy_intp *strideptr, *innersizeptr;
NPY_BEGIN_THREADS_DEF;
- // Special low-overhead version specific to the boolean/int types
dtype = PyArray_DESCR(self);
- switch(dtype->kind) {
- case 'u':
- case 'i':
- case 'b':
- if (dtype->elsize > 8) {
- break;
- }
- return count_nonzero_int(
- PyArray_NDIM(self), PyArray_BYTES(self), PyArray_DIMS(self),
- PyArray_STRIDES(self), dtype->elsize
- );
+ /* Special low-overhead version specific to the boolean/int types */
+ if (PyArray_ISALIGNED(self) && (
+ PyDataType_ISBOOL(dtype) || PyDataType_ISINTEGER(dtype))) {
+ return count_nonzero_int(
+ PyArray_NDIM(self), PyArray_BYTES(self), PyArray_DIMS(self),
+ PyArray_STRIDES(self), dtype->elsize
+ );
}
nonzero = PyArray_DESCR(self)->f->nonzero;
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 36bfaa7cf..f959162fd 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -1124,6 +1124,35 @@ NPY_NO_EXPORT PyTypeObject PyArrayIter_Type = {
/** END of Array Iterator **/
+
+static int
+set_shape_mismatch_exception(PyArrayMultiIterObject *mit, int i1, int i2)
+{
+ PyObject *shape1, *shape2, *msg;
+
+ shape1 = PyObject_GetAttrString((PyObject *) mit->iters[i1]->ao, "shape");
+ if (shape1 == NULL) {
+ return -1;
+ }
+ shape2 = PyObject_GetAttrString((PyObject *) mit->iters[i2]->ao, "shape");
+ if (shape2 == NULL) {
+ Py_DECREF(shape1);
+ return -1;
+ }
+ msg = PyUnicode_FromFormat("shape mismatch: objects cannot be broadcast "
+ "to a single shape. Mismatch is between arg %d "
+ "with shape %S and arg %d with shape %S.",
+ i1, shape1, i2, shape2);
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
+ if (msg == NULL) {
+ return -1;
+ }
+ PyErr_SetObject(PyExc_ValueError, msg);
+ Py_DECREF(msg);
+ return 0;
+}
+
/* Adjust dimensionality and strides for index object iterators
--- i.e. broadcast
*/
@@ -1132,6 +1161,7 @@ NPY_NO_EXPORT int
PyArray_Broadcast(PyArrayMultiIterObject *mit)
{
int i, nd, k, j;
+ int src_iter = -1; /* Initializing avoids a compiler warning. */
npy_intp tmp;
PyArrayIterObject *it;
@@ -1155,12 +1185,10 @@ PyArray_Broadcast(PyArrayMultiIterObject *mit)
}
if (mit->dimensions[i] == 1) {
mit->dimensions[i] = tmp;
+ src_iter = j;
}
else if (mit->dimensions[i] != tmp) {
- PyErr_SetString(PyExc_ValueError,
- "shape mismatch: objects" \
- " cannot be broadcast" \
- " to a single shape");
+ set_shape_mismatch_exception(mit, src_iter, j);
return -1;
}
}
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 5ceed1678..d211f01bc 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -68,6 +68,7 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
#include "typeinfo.h"
#include "get_attr_string.h"
+#include "experimental_public_dtype_api.h" /* _get_experimental_dtype_api */
/*
*****************************************************************************
@@ -4419,7 +4420,9 @@ static struct PyMethodDef array_module_methods[] = {
{"_discover_array_parameters", (PyCFunction)_discover_array_parameters,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_get_castingimpl", (PyCFunction)_get_castingimpl,
- METH_VARARGS | METH_KEYWORDS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
+ {"_get_experimental_dtype_api", (PyCFunction)_get_experimental_dtype_api,
+ METH_O, NULL},
/* from umath */
{"frompyfunc",
(PyCFunction) ufunc_frompyfunc,
diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c
index d0cf53576..a338d712d 100644
--- a/numpy/core/src/multiarray/usertypes.c
+++ b/numpy/core/src/multiarray/usertypes.c
@@ -268,6 +268,56 @@ PyArray_RegisterDataType(PyArray_Descr *descr)
return typenum;
}
+
+/*
+ * Checks that there is no cast already cached using the new casting-impl
+ * mechanism.
+ * In that case, we do not clear out the cache (but otherwise silently
+ * continue). Users should not modify casts after they have been used,
+ * but this may also happen accidentally during setup (and may never have
+ * mattered). See https://github.com/numpy/numpy/issues/20009
+ */
+static int _warn_if_cast_exists_already(
+ PyArray_Descr *descr, int totype, char *funcname)
+{
+ PyArray_DTypeMeta *to_DType = PyArray_DTypeFromTypeNum(totype);
+ if (to_DType == NULL) {
+ return -1;
+ }
+ PyObject *cast_impl = PyDict_GetItemWithError(
+ NPY_DT_SLOTS(NPY_DTYPE(descr))->castingimpls, (PyObject *)to_DType);
+ Py_DECREF(to_DType);
+ if (cast_impl == NULL) {
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+ }
+ else {
+ char *extra_msg;
+ if (cast_impl == Py_None) {
+ extra_msg = "the cast will continue to be considered impossible.";
+ }
+ else {
+ extra_msg = "the previous definition will continue to be used.";
+ }
+ Py_DECREF(cast_impl);
+ PyArray_Descr *to_descr = PyArray_DescrFromType(totype);
+ int ret = PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
+ "A cast from %R to %R was registered/modified using `%s` "
+ "after the cast had been used. "
+ "This registration will have (mostly) no effect: %s\n"
+ "The most likely fix is to ensure that casts are the first "
+ "thing initialized after dtype registration. "
+ "Please contact the NumPy developers with any questions!",
+ descr, to_descr, funcname, extra_msg);
+ Py_DECREF(to_descr);
+ if (ret < 0) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
/*NUMPY_API
Register Casting Function
Replaces any function currently stored.
@@ -279,14 +329,19 @@ PyArray_RegisterCastFunc(PyArray_Descr *descr, int totype,
PyObject *cobj, *key;
int ret;
- if (totype < NPY_NTYPES_ABI_COMPATIBLE) {
- descr->f->cast[totype] = castfunc;
- return 0;
- }
if (totype >= NPY_NTYPES && !PyTypeNum_ISUSERDEF(totype)) {
PyErr_SetString(PyExc_TypeError, "invalid type number.");
return -1;
}
+ if (_warn_if_cast_exists_already(
+ descr, totype, "PyArray_RegisterCastFunc") < 0) {
+ return -1;
+ }
+
+ if (totype < NPY_NTYPES_ABI_COMPATIBLE) {
+ descr->f->cast[totype] = castfunc;
+ return 0;
+ }
if (descr->f->castdict == NULL) {
descr->f->castdict = PyDict_New();
if (descr->f->castdict == NULL) {
@@ -328,6 +383,10 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
"RegisterCanCast must be user-defined.");
return -1;
}
+ if (_warn_if_cast_exists_already(
+ descr, totype, "PyArray_RegisterCanCast") < 0) {
+ return -1;
+ }
if (scalar == NPY_NOSCALAR) {
/*
diff --git a/numpy/core/src/npymath/npy_math_private.h b/numpy/core/src/npymath/npy_math_private.h
index 212d11a0b..7ca0c5ba0 100644
--- a/numpy/core/src/npymath/npy_math_private.h
+++ b/numpy/core/src/npymath/npy_math_private.h
@@ -19,7 +19,13 @@
#define _NPY_MATH_PRIVATE_H_
#include <Python.h>
+#ifdef __cplusplus
+#include <cmath>
+using std::isgreater;
+using std::isless;
+#else
#include <math.h>
+#endif
#include "npy_config.h"
#include "npy_fpmath.h"
@@ -507,17 +513,29 @@ typedef union {
#else /* !_MSC_VER */
typedef union {
npy_cdouble npy_z;
+#ifdef __cplusplus
+ std::complex<double> c99z;
+#else
complex double c99_z;
+#endif
} __npy_cdouble_to_c99_cast;
typedef union {
npy_cfloat npy_z;
+#ifdef __cplusplus
+ std::complex<float> c99z;
+#else
complex float c99_z;
+#endif
} __npy_cfloat_to_c99_cast;
typedef union {
npy_clongdouble npy_z;
+#ifdef __cplusplus
+ std::complex<long double> c99_z;
+#else
complex long double c99_z;
+#endif
} __npy_clongdouble_to_c99_cast;
#endif /* !_MSC_VER */
diff --git a/numpy/core/src/npysort/radixsort.c.src b/numpy/core/src/npysort/radixsort.c.src
deleted file mode 100644
index 99d8ed42a..000000000
--- a/numpy/core/src/npysort/radixsort.c.src
+++ /dev/null
@@ -1,231 +0,0 @@
-#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-
-#include "npy_sort.h"
-#include "npysort_common.h"
-#include <stdlib.h>
-
-/*
- *****************************************************************************
- ** INTEGER SORTS **
- *****************************************************************************
- */
-
-
-/**begin repeat
- *
- * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
- * LONGLONG, ULONGLONG#
- * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
- * longlong, ulonglong#
- * #type = npy_ubyte, npy_ubyte, npy_ubyte, npy_ushort, npy_ushort, npy_uint,
- * npy_uint, npy_ulong, npy_ulong, npy_ulonglong, npy_ulonglong#
- * #sign = 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0#
- * #floating = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0#
- */
-
-// Reference: https://github.com/eloj/radix-sorting#-key-derivation
-#if @sign@
- // Floating-point is currently disabled.
- // Floating-point tests succeed for double and float on macOS but not on Windows/Linux.
- // Basic sorting tests succeed but others relying on sort fail.
- // Possibly related to floating-point normalisation or multiple NaN reprs? Not sure.
- #if @floating@
- // For floats, we invert the key if the sign bit is set, else we invert the sign bit.
- #define KEY_OF(x) ((x) ^ (-((x) >> (sizeof(@type@) * 8 - 1)) | ((@type@)1 << (sizeof(@type@) * 8 - 1))))
- #else
- // For signed ints, we flip the sign bit so the negatives are below the positives.
- #define KEY_OF(x) ((x) ^ ((@type@)1 << (sizeof(@type@) * 8 - 1)))
- #endif
-#else
- // For unsigned ints, the key is as-is
- #define KEY_OF(x) (x)
-#endif
-
-static inline npy_ubyte
-nth_byte_@suff@(@type@ key, npy_intp l) {
- return (key >> (l << 3)) & 0xFF;
-}
-
-static @type@*
-radixsort0_@suff@(@type@ *arr, @type@ *aux, npy_intp num)
-{
- npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } };
- npy_intp i;
- size_t l;
- @type@ key0 = KEY_OF(arr[0]);
- size_t ncols = 0;
- npy_ubyte cols[sizeof(@type@)];
-
- for (i = 0; i < num; i++) {
- @type@ k = KEY_OF(arr[i]);
-
- for (l = 0; l < sizeof(@type@); l++) {
- cnt[l][nth_byte_@suff@(k, l)]++;
- }
- }
-
- for (l = 0; l < sizeof(@type@); l++) {
- if (cnt[l][nth_byte_@suff@(key0, l)] != num) {
- cols[ncols++] = l;
- }
- }
-
- for (l = 0; l < ncols; l++) {
- npy_intp a = 0;
- for (i = 0; i < 256; i++) {
- npy_intp b = cnt[cols[l]][i];
- cnt[cols[l]][i] = a;
- a += b;
- }
- }
-
- for (l = 0; l < ncols; l++) {
- @type@* temp;
- for (i = 0; i < num; i++) {
- @type@ k = KEY_OF(arr[i]);
- npy_intp dst = cnt[cols[l]][nth_byte_@suff@(k, cols[l])]++;
- aux[dst] = arr[i];
- }
-
- temp = aux;
- aux = arr;
- arr = temp;
- }
-
- return arr;
-}
-
-NPY_NO_EXPORT int
-radixsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr))
-{
- void *sorted;
- @type@ *aux;
- @type@ *arr = start;
- @type@ k1, k2;
- npy_bool all_sorted = 1;
-
- if (num < 2) {
- return 0;
- }
-
- k1 = KEY_OF(arr[0]);
- for (npy_intp i = 1; i < num; i++) {
- k2 = KEY_OF(arr[i]);
- if (k1 > k2) {
- all_sorted = 0;
- break;
- }
- k1 = k2;
- }
-
- if (all_sorted) {
- return 0;
- }
-
- aux = malloc(num * sizeof(@type@));
- if (aux == NULL) {
- return -NPY_ENOMEM;
- }
-
- sorted = radixsort0_@suff@(start, aux, num);
- if (sorted != start) {
- memcpy(start, sorted, num * sizeof(@type@));
- }
-
- free(aux);
- return 0;
-}
-
-static npy_intp*
-aradixsort0_@suff@(@type@ *arr, npy_intp *aux, npy_intp *tosort, npy_intp num)
-{
- npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } };
- npy_intp i;
- size_t l;
- @type@ key0 = KEY_OF(arr[0]);
- size_t ncols = 0;
- npy_ubyte cols[sizeof(@type@)];
-
- for (i = 0; i < num; i++) {
- @type@ k = KEY_OF(arr[i]);
-
- for (l = 0; l < sizeof(@type@); l++) {
- cnt[l][nth_byte_@suff@(k, l)]++;
- }
- }
-
- for (l = 0; l < sizeof(@type@); l++) {
- if (cnt[l][nth_byte_@suff@(key0, l)] != num) {
- cols[ncols++] = l;
- }
- }
-
- for (l = 0; l < ncols; l++) {
- npy_intp a = 0;
- for (i = 0; i < 256; i++) {
- npy_intp b = cnt[cols[l]][i];
- cnt[cols[l]][i] = a;
- a += b;
- }
- }
-
- for (l = 0; l < ncols; l++) {
- npy_intp* temp;
- for (i = 0; i < num; i++) {
- @type@ k = KEY_OF(arr[tosort[i]]);
- npy_intp dst = cnt[cols[l]][nth_byte_@suff@(k, cols[l])]++;
- aux[dst] = tosort[i];
- }
-
- temp = aux;
- aux = tosort;
- tosort = temp;
- }
-
- return tosort;
-}
-
-NPY_NO_EXPORT int
-aradixsort_@suff@(void *start, npy_intp* tosort, npy_intp num, void *NPY_UNUSED(varr))
-{
- npy_intp *sorted;
- npy_intp *aux;
- @type@ *arr = start;
- @type@ k1, k2;
- npy_bool all_sorted = 1;
-
- if (num < 2) {
- return 0;
- }
-
- k1 = KEY_OF(arr[tosort[0]]);
- for (npy_intp i = 1; i < num; i++) {
- k2 = KEY_OF(arr[tosort[i]]);
- if (k1 > k2) {
- all_sorted = 0;
- break;
- }
- k1 = k2;
- }
-
- if (all_sorted) {
- return 0;
- }
-
- aux = malloc(num * sizeof(npy_intp));
- if (aux == NULL) {
- return -NPY_ENOMEM;
- }
-
- sorted = aradixsort0_@suff@(start, aux, tosort, num);
- if (sorted != tosort) {
- memcpy(tosort, sorted, num * sizeof(npy_intp));
- }
-
- free(aux);
- return 0;
-}
-
-#undef KEY_OF
-
-/**end repeat**/
diff --git a/numpy/core/src/npysort/radixsort.cpp b/numpy/core/src/npysort/radixsort.cpp
new file mode 100644
index 000000000..017ea43b6
--- /dev/null
+++ b/numpy/core/src/npysort/radixsort.cpp
@@ -0,0 +1,354 @@
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "npy_sort.h"
+#include "npysort_common.h"
+
+#include "../common/numpy_tag.h"
+#include <stdlib.h>
+#include <type_traits>
+
+/*
+ *****************************************************************************
+ ** INTEGER SORTS **
+ *****************************************************************************
+ */
+
+// Reference: https://github.com/eloj/radix-sorting#-key-derivation
+template <class T>
+T
+KEY_OF(T x)
+{
+ // Floating-point is currently disabled.
+ // Floating-point tests succeed for double and float on macOS but not on
+ // Windows/Linux. Basic sorting tests succeed but others relying on sort
+ // fail. Possibly related to floating-point normalisation or multiple NaN
+ // reprs? Not sure.
+ if (std::is_floating_point<T>::value) {
+ // For floats, we invert the key if the sign bit is set, else we invert
+ // the sign bit.
+ return ((x) ^ (-((x) >> (sizeof(T) * 8 - 1)) |
+ ((T)1 << (sizeof(T) * 8 - 1))));
+ }
+ else if (std::is_signed<T>::value) {
+ // For signed ints, we flip the sign bit so the negatives are below the
+ // positives.
+ return ((x) ^ ((T)1 << (sizeof(T) * 8 - 1)));
+ }
+ else {
+ return x;
+ }
+}
+
+template <class T>
+static inline npy_ubyte
+nth_byte(T key, npy_intp l)
+{
+ return (key >> (l << 3)) & 0xFF;
+}
+
+template <class T>
+static T *
+radixsort0(T *start, T *aux, npy_intp num)
+{
+ npy_intp cnt[sizeof(T)][1 << 8] = {{0}};
+ T key0 = KEY_OF(start[0]);
+
+ for (npy_intp i = 0; i < num; i++) {
+ T k = KEY_OF(start[i]);
+
+ for (size_t l = 0; l < sizeof(T); l++) {
+ cnt[l][nth_byte(k, l)]++;
+ }
+ }
+
+ size_t ncols = 0;
+ npy_ubyte cols[sizeof(T)];
+ for (size_t l = 0; l < sizeof(T); l++) {
+ if (cnt[l][nth_byte(key0, l)] != num) {
+ cols[ncols++] = l;
+ }
+ }
+
+ for (size_t l = 0; l < ncols; l++) {
+ npy_intp a = 0;
+ for (npy_intp i = 0; i < 256; i++) {
+ npy_intp b = cnt[cols[l]][i];
+ cnt[cols[l]][i] = a;
+ a += b;
+ }
+ }
+
+ for (size_t l = 0; l < ncols; l++) {
+ T *temp;
+ for (npy_intp i = 0; i < num; i++) {
+ T k = KEY_OF(start[i]);
+ npy_intp dst = cnt[cols[l]][nth_byte(k, cols[l])]++;
+ aux[dst] = start[i];
+ }
+
+ temp = aux;
+ aux = start;
+ start = temp;
+ }
+
+ return start;
+}
+
+template <class T>
+static int
+radixsort_(T *start, npy_intp num)
+{
+ if (num < 2) {
+ return 0;
+ }
+
+ npy_bool all_sorted = 1;
+ T k1 = KEY_OF(start[0]), k2;
+ for (npy_intp i = 1; i < num; i++) {
+ k2 = KEY_OF(start[i]);
+ if (k1 > k2) {
+ all_sorted = 0;
+ break;
+ }
+ k1 = k2;
+ }
+
+ if (all_sorted) {
+ return 0;
+ }
+
+ T *aux = (T *)malloc(num * sizeof(T));
+ if (aux == nullptr) {
+ return -NPY_ENOMEM;
+ }
+
+ T *sorted = radixsort0(start, aux, num);
+ if (sorted != start) {
+ memcpy(start, sorted, num * sizeof(T));
+ }
+
+ free(aux);
+ return 0;
+}
+
+template <class T>
+static int
+radixsort(void *start, npy_intp num)
+{
+ return radixsort_((T *)start, num);
+}
+
+template <class T>
+static npy_intp *
+aradixsort0(T *start, npy_intp *aux, npy_intp *tosort, npy_intp num)
+{
+ npy_intp cnt[sizeof(T)][1 << 8] = {{0}};
+ T key0 = KEY_OF(start[0]);
+
+ for (npy_intp i = 0; i < num; i++) {
+ T k = KEY_OF(start[i]);
+
+ for (size_t l = 0; l < sizeof(T); l++) {
+ cnt[l][nth_byte(k, l)]++;
+ }
+ }
+
+ size_t ncols = 0;
+ npy_ubyte cols[sizeof(T)];
+ for (size_t l = 0; l < sizeof(T); l++) {
+ if (cnt[l][nth_byte(key0, l)] != num) {
+ cols[ncols++] = l;
+ }
+ }
+
+ for (size_t l = 0; l < ncols; l++) {
+ npy_intp a = 0;
+ for (npy_intp i = 0; i < 256; i++) {
+ npy_intp b = cnt[cols[l]][i];
+ cnt[cols[l]][i] = a;
+ a += b;
+ }
+ }
+
+ for (size_t l = 0; l < ncols; l++) {
+ npy_intp *temp;
+ for (npy_intp i = 0; i < num; i++) {
+ T k = KEY_OF(start[tosort[i]]);
+ npy_intp dst = cnt[cols[l]][nth_byte(k, cols[l])]++;
+ aux[dst] = tosort[i];
+ }
+
+ temp = aux;
+ aux = tosort;
+ tosort = temp;
+ }
+
+ return tosort;
+}
+
+template <class T>
+static int
+aradixsort_(T *start, npy_intp *tosort, npy_intp num)
+{
+ npy_intp *sorted;
+ npy_intp *aux;
+ T k1, k2;
+ npy_bool all_sorted = 1;
+
+ if (num < 2) {
+ return 0;
+ }
+
+ k1 = KEY_OF(start[tosort[0]]);
+ for (npy_intp i = 1; i < num; i++) {
+ k2 = KEY_OF(start[tosort[i]]);
+ if (k1 > k2) {
+ all_sorted = 0;
+ break;
+ }
+ k1 = k2;
+ }
+
+ if (all_sorted) {
+ return 0;
+ }
+
+ aux = (npy_intp *)malloc(num * sizeof(npy_intp));
+ if (aux == NULL) {
+ return -NPY_ENOMEM;
+ }
+
+ sorted = aradixsort0(start, aux, tosort, num);
+ if (sorted != tosort) {
+ memcpy(tosort, sorted, num * sizeof(npy_intp));
+ }
+
+ free(aux);
+ return 0;
+}
+
+template <class T>
+static int
+aradixsort(void *start, npy_intp *tosort, npy_intp num)
+{
+ return aradixsort_((T *)start, tosort, num);
+}
+
+extern "C" {
+NPY_NO_EXPORT int
+radixsort_bool(void *vec, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return radixsort<npy_bool>(vec, cnt);
+}
+NPY_NO_EXPORT int
+radixsort_byte(void *vec, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return radixsort<npy_byte>(vec, cnt);
+}
+NPY_NO_EXPORT int
+radixsort_ubyte(void *vec, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return radixsort<npy_ubyte>(vec, cnt);
+}
+NPY_NO_EXPORT int
+radixsort_short(void *vec, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return radixsort<npy_short>(vec, cnt);
+}
+NPY_NO_EXPORT int
+radixsort_ushort(void *vec, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return radixsort<npy_ushort>(vec, cnt);
+}
+NPY_NO_EXPORT int
+radixsort_int(void *vec, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return radixsort<npy_int>(vec, cnt);
+}
+NPY_NO_EXPORT int
+radixsort_uint(void *vec, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return radixsort<npy_uint>(vec, cnt);
+}
+NPY_NO_EXPORT int
+radixsort_long(void *vec, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return radixsort<npy_long>(vec, cnt);
+}
+NPY_NO_EXPORT int
+radixsort_ulong(void *vec, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return radixsort<npy_ulong>(vec, cnt);
+}
+NPY_NO_EXPORT int
+radixsort_longlong(void *vec, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return radixsort<npy_longlong>(vec, cnt);
+}
+NPY_NO_EXPORT int
+radixsort_ulonglong(void *vec, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return radixsort<npy_ulonglong>(vec, cnt);
+}
+NPY_NO_EXPORT int
+aradixsort_bool(void *vec, npy_intp *ind, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return aradixsort<npy_bool>(vec, ind, cnt);
+}
+NPY_NO_EXPORT int
+aradixsort_byte(void *vec, npy_intp *ind, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return aradixsort<npy_byte>(vec, ind, cnt);
+}
+NPY_NO_EXPORT int
+aradixsort_ubyte(void *vec, npy_intp *ind, npy_intp cnt,
+ void *NPY_UNUSED(null))
+{
+ return aradixsort<npy_ubyte>(vec, ind, cnt);
+}
+NPY_NO_EXPORT int
+aradixsort_short(void *vec, npy_intp *ind, npy_intp cnt,
+ void *NPY_UNUSED(null))
+{
+ return aradixsort<npy_short>(vec, ind, cnt);
+}
+NPY_NO_EXPORT int
+aradixsort_ushort(void *vec, npy_intp *ind, npy_intp cnt,
+ void *NPY_UNUSED(null))
+{
+ return aradixsort<npy_ushort>(vec, ind, cnt);
+}
+NPY_NO_EXPORT int
+aradixsort_int(void *vec, npy_intp *ind, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return aradixsort<npy_int>(vec, ind, cnt);
+}
+NPY_NO_EXPORT int
+aradixsort_uint(void *vec, npy_intp *ind, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return aradixsort<npy_uint>(vec, ind, cnt);
+}
+NPY_NO_EXPORT int
+aradixsort_long(void *vec, npy_intp *ind, npy_intp cnt, void *NPY_UNUSED(null))
+{
+ return aradixsort<npy_long>(vec, ind, cnt);
+}
+NPY_NO_EXPORT int
+aradixsort_ulong(void *vec, npy_intp *ind, npy_intp cnt,
+ void *NPY_UNUSED(null))
+{
+ return aradixsort<npy_ulong>(vec, ind, cnt);
+}
+NPY_NO_EXPORT int
+aradixsort_longlong(void *vec, npy_intp *ind, npy_intp cnt,
+ void *NPY_UNUSED(null))
+{
+ return aradixsort<npy_longlong>(vec, ind, cnt);
+}
+NPY_NO_EXPORT int
+aradixsort_ulonglong(void *vec, npy_intp *ind, npy_intp cnt,
+ void *NPY_UNUSED(null))
+{
+ return aradixsort<npy_ulonglong>(vec, ind, cnt);
+}
+}
diff --git a/numpy/core/src/umath/_scaled_float_dtype.c b/numpy/core/src/umath/_scaled_float_dtype.c
index eeef33a3d..b6c19362a 100644
--- a/numpy/core/src/umath/_scaled_float_dtype.c
+++ b/numpy/core/src/umath/_scaled_float_dtype.c
@@ -398,6 +398,42 @@ float_to_from_sfloat_resolve_descriptors(
}
+/*
+ * Cast to boolean (for testing the logical functions a bit better).
+ */
+static int
+cast_sfloat_to_bool(PyArrayMethod_Context *NPY_UNUSED(context),
+ char *const data[], npy_intp const dimensions[],
+ npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata))
+{
+ npy_intp N = dimensions[0];
+ char *in = data[0];
+ char *out = data[1];
+ for (npy_intp i = 0; i < N; i++) {
+ *(npy_bool *)out = *(double *)in != 0;
+ in += strides[0];
+ out += strides[1];
+ }
+ return 0;
+}
+
+static NPY_CASTING
+sfloat_to_bool_resolve_descriptors(
+ PyArrayMethodObject *NPY_UNUSED(self),
+ PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]),
+ PyArray_Descr *given_descrs[2],
+ PyArray_Descr *loop_descrs[2])
+{
+ Py_INCREF(given_descrs[0]);
+ loop_descrs[0] = given_descrs[0];
+ if (loop_descrs[0] == NULL) {
+ return -1;
+ }
+ loop_descrs[1] = PyArray_DescrFromType(NPY_BOOL); /* cannot fail */
+ return NPY_UNSAFE_CASTING;
+}
+
+
static int
init_casts(void)
{
@@ -453,6 +489,22 @@ init_casts(void)
return -1;
}
+ slots[0].slot = NPY_METH_resolve_descriptors;
+ slots[0].pfunc = &sfloat_to_bool_resolve_descriptors;
+ slots[1].slot = NPY_METH_strided_loop;
+ slots[1].pfunc = &cast_sfloat_to_bool;
+ slots[2].slot = 0;
+ slots[2].pfunc = NULL;
+
+ spec.name = "sfloat_to_bool_cast";
+ dtypes[0] = &PyArray_SFloatDType;
+ dtypes[1] = PyArray_DTypeFromTypeNum(NPY_BOOL);
+ Py_DECREF(dtypes[1]); /* immortal anyway */
+
+ if (PyArray_AddCastingImplementation_FromSpec(&spec, 0)) {
+ return -1;
+ }
+
return 0;
}
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index ed4c617a4..ce42fc271 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -58,6 +58,19 @@
*****************************************************************************
*/
+static void
+always_error_loop(
+ char **NPY_UNUSED(args), npy_intp const *NPY_UNUSED(dimensions),
+ npy_intp const *NPY_UNUSED(steps), void *NPY_UNUSED(func))
+{
+ NPY_ALLOW_C_API_DEF
+ NPY_ALLOW_C_API;
+ PyErr_SetString(PyExc_RuntimeError, "How unexpected :)!");
+ NPY_DISABLE_C_API;
+ return;
+}
+
+
char *inner1d_signature = "(i),(i)->()";
/**begin repeat
@@ -348,6 +361,9 @@ defdict = {
*/
+static PyUFuncGenericFunction always_error_functions[] = { always_error_loop };
+static void *always_error_data[] = { (void *)NULL };
+static char always_error_signatures[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
static PyUFuncGenericFunction inner1d_functions[] = { LONG_inner1d, DOUBLE_inner1d };
static void *inner1d_data[] = { (void *)NULL, (void *)NULL };
static char inner1d_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
@@ -375,6 +391,25 @@ static int
addUfuncs(PyObject *dictionary) {
PyObject *f;
+ f = PyUFunc_FromFuncAndData(always_error_functions, always_error_data,
+ always_error_signatures, 1, 2, 1, PyUFunc_None, "always_error",
+ "simply, broken, ufunc that sets an error (but releases the GIL).",
+ 0);
+ if (f == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(dictionary, "always_error", f);
+ Py_DECREF(f);
+ f = PyUFunc_FromFuncAndDataAndSignature(always_error_functions,
+ always_error_data, always_error_signatures, 1, 2, 1, PyUFunc_None,
+ "always_error_gufunc",
+ "simply, broken, gufunc that sets an error (but releases the GIL).",
+ 0, "(i),()->()");
+ if (f == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(dictionary, "always_error_gufunc", f);
+ Py_DECREF(f);
f = PyUFunc_FromFuncAndDataAndSignature(inner1d_functions, inner1d_data,
inner1d_signatures, 2, 2, 1, PyUFunc_None, "inner1d",
"inner on the last dimension and broadcast on the rest \n"
diff --git a/numpy/core/src/umath/clip.c.src b/numpy/core/src/umath/clip.c.src
deleted file mode 100644
index bc966b7ac..000000000
--- a/numpy/core/src/umath/clip.c.src
+++ /dev/null
@@ -1,120 +0,0 @@
-/**
- * This module provides the inner loops for the clip ufunc
- */
-#define PY_SSIZE_T_CLEAN
-#include <Python.h>
-
-#define _UMATHMODULE
-#define _MULTIARRAYMODULE
-#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-
-#include "numpy/halffloat.h"
-#include "numpy/npy_math.h"
-#include "numpy/ndarraytypes.h"
-#include "numpy/npy_common.h"
-#include "numpy/utils.h"
-#include "fast_loop_macros.h"
-
-/*
- * Produce macros that perform nan/nat-propagating min and max
- */
-
-/**begin repeat
- * #name = BOOL,
- * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
- * LONG, ULONG, LONGLONG, ULONGLONG#
- */
-#define _NPY_@name@_MIN(a, b) PyArray_MIN(a, b)
-#define _NPY_@name@_MAX(a, b) PyArray_MAX(a, b)
-/**end repeat**/
-
-#define _NPY_HALF_MIN(a, b) (npy_half_isnan(a) || npy_half_le(a, b) ? (a) : (b))
-#define _NPY_HALF_MAX(a, b) (npy_half_isnan(a) || npy_half_ge(a, b) ? (a) : (b))
-
-/**begin repeat
- * #name = FLOAT, DOUBLE, LONGDOUBLE#
- */
-#define _NPY_@name@_MIN(a, b) (npy_isnan(a) ? (a) : PyArray_MIN(a, b))
-#define _NPY_@name@_MAX(a, b) (npy_isnan(a) ? (a) : PyArray_MAX(a, b))
-/**end repeat**/
-
-/**begin repeat
- * #name = CFLOAT, CDOUBLE, CLONGDOUBLE#
- */
-#define _NPY_@name@_MIN(a, b) (npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CLT(a, b) ? (a) : (b))
-#define _NPY_@name@_MAX(a, b) (npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CGT(a, b) ? (a) : (b))
-/**end repeat**/
-
-/**begin repeat
- * #name = DATETIME, TIMEDELTA#
- */
-#define _NPY_@name@_MIN(a, b) ( \
- (a) == NPY_DATETIME_NAT ? (a) : \
- (b) == NPY_DATETIME_NAT ? (b) : \
- (a) < (b) ? (a) : (b) \
-)
-#define _NPY_@name@_MAX(a, b) ( \
- (a) == NPY_DATETIME_NAT ? (a) : \
- (b) == NPY_DATETIME_NAT ? (b) : \
- (a) > (b) ? (a) : (b) \
-)
-/**end repeat**/
-
-/**begin repeat
- *
- * #name = BOOL,
- * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
- * LONG, ULONG, LONGLONG, ULONGLONG,
- * HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE,
- * DATETIME, TIMEDELTA#
- * #type = npy_bool,
- * npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
- * npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_half, npy_float, npy_double, npy_longdouble,
- * npy_cfloat, npy_cdouble, npy_clongdouble,
- * npy_datetime, npy_timedelta#
- */
-
-#define _NPY_CLIP(x, min, max) \
- _NPY_@name@_MIN(_NPY_@name@_MAX((x), (min)), (max))
-
-NPY_NO_EXPORT void
-@name@_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
-{
- if (steps[1] == 0 && steps[2] == 0) {
- /* min and max are constant throughout the loop, the most common case */
- /* NOTE: it may be possible to optimize these checks for nan */
- @type@ min_val = *(@type@ *)args[1];
- @type@ max_val = *(@type@ *)args[2];
-
- char *ip1 = args[0], *op1 = args[3];
- npy_intp is1 = steps[0], os1 = steps[3];
- npy_intp n = dimensions[0];
-
- /* contiguous, branch to let the compiler optimize */
- if (is1 == sizeof(@type@) && os1 == sizeof(@type@)) {
- for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) {
- *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, min_val, max_val);
- }
- }
- else {
- for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) {
- *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, min_val, max_val);
- }
- }
- }
- else {
- TERNARY_LOOP {
- *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, *(@type@ *)ip2, *(@type@ *)ip3);
- }
- }
- npy_clear_floatstatus_barrier((char*)dimensions);
-}
-
-// clean up the macros we defined above
-#undef _NPY_CLIP
-#undef _NPY_@name@_MAX
-#undef _NPY_@name@_MIN
-
-/**end repeat**/
diff --git a/numpy/core/src/umath/clip.cpp b/numpy/core/src/umath/clip.cpp
new file mode 100644
index 000000000..19d05c848
--- /dev/null
+++ b/numpy/core/src/umath/clip.cpp
@@ -0,0 +1,282 @@
+/**
+ * This module provides the inner loops for the clip ufunc
+ */
+#define _UMATHMODULE
+#define _MULTIARRAYMODULE
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
+#include "numpy/halffloat.h"
+#include "numpy/ndarraytypes.h"
+#include "numpy/npy_common.h"
+#include "numpy/npy_math.h"
+#include "numpy/utils.h"
+
+#include "fast_loop_macros.h"
+
+#include "../common/numpy_tag.h"
+
+template <class T>
+T
+_NPY_MIN(T a, T b, npy::integral_tag const &)
+{
+ return PyArray_MIN(a, b);
+}
+template <class T>
+T
+_NPY_MAX(T a, T b, npy::integral_tag const &)
+{
+ return PyArray_MAX(a, b);
+}
+
+npy_half
+_NPY_MIN(npy_half a, npy_half b, npy::half_tag const &)
+{
+ return npy_half_isnan(a) || npy_half_le(a, b) ? (a) : (b);
+}
+npy_half
+_NPY_MAX(npy_half a, npy_half b, npy::half_tag const &)
+{
+ return npy_half_isnan(a) || npy_half_ge(a, b) ? (a) : (b);
+}
+
+template <class T>
+T
+_NPY_MIN(T a, T b, npy::floating_point_tag const &)
+{
+ return npy_isnan(a) ? (a) : PyArray_MIN(a, b);
+}
+template <class T>
+T
+_NPY_MAX(T a, T b, npy::floating_point_tag const &)
+{
+ return npy_isnan(a) ? (a) : PyArray_MAX(a, b);
+}
+
+template <class T>
+T
+_NPY_MIN(T a, T b, npy::complex_tag const &)
+{
+ return npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CLT(a, b)
+ ? (a)
+ : (b);
+}
+template <class T>
+T
+_NPY_MAX(T a, T b, npy::complex_tag const &)
+{
+ return npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CGT(a, b)
+ ? (a)
+ : (b);
+}
+
+template <class T>
+T
+_NPY_MIN(T a, T b, npy::date_tag const &)
+{
+ return (a) == NPY_DATETIME_NAT ? (a)
+ : (b) == NPY_DATETIME_NAT ? (b)
+ : (a) < (b) ? (a)
+ : (b);
+}
+template <class T>
+T
+_NPY_MAX(T a, T b, npy::date_tag const &)
+{
+ return (a) == NPY_DATETIME_NAT ? (a)
+ : (b) == NPY_DATETIME_NAT ? (b)
+ : (a) > (b) ? (a)
+ : (b);
+}
+
+/* generic dispatcher */
+template <class Tag, class T = typename Tag::type>
+T
+_NPY_MIN(T const &a, T const &b)
+{
+ return _NPY_MIN(a, b, Tag{});
+}
+template <class Tag, class T = typename Tag::type>
+T
+_NPY_MAX(T const &a, T const &b)
+{
+ return _NPY_MAX(a, b, Tag{});
+}
+
+template <class Tag, class T>
+T
+_NPY_CLIP(T x, T min, T max)
+{
+ return _NPY_MIN<Tag>(_NPY_MAX<Tag>((x), (min)), (max));
+}
+
+template <class Tag, class T = typename Tag::type>
+static void
+_npy_clip_(T **args, npy_intp const *dimensions, npy_intp const *steps)
+{
+ npy_intp n = dimensions[0];
+ if (steps[1] == 0 && steps[2] == 0) {
+ /* min and max are constant throughout the loop, the most common case
+ */
+ /* NOTE: it may be possible to optimize these checks for nan */
+ T min_val = *args[1];
+ T max_val = *args[2];
+
+ T *ip1 = args[0], *op1 = args[3];
+ npy_intp is1 = steps[0] / sizeof(T), os1 = steps[3] / sizeof(T);
+
+ /* contiguous, branch to let the compiler optimize */
+ if (is1 == 1 && os1 == 1) {
+ for (npy_intp i = 0; i < n; i++, ip1++, op1++) {
+ *op1 = _NPY_CLIP<Tag>(*ip1, min_val, max_val);
+ }
+ }
+ else {
+ for (npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) {
+ *op1 = _NPY_CLIP<Tag>(*ip1, min_val, max_val);
+ }
+ }
+ }
+ else {
+ T *ip1 = args[0], *ip2 = args[1], *ip3 = args[2], *op1 = args[3];
+ npy_intp is1 = steps[0] / sizeof(T), is2 = steps[1] / sizeof(T),
+ is3 = steps[2] / sizeof(T), os1 = steps[3] / sizeof(T);
+ for (npy_intp i = 0; i < n;
+ i++, ip1 += is1, ip2 += is2, ip3 += is3, op1 += os1)
+ *op1 = _NPY_CLIP<Tag>(*ip1, *ip2, *ip3);
+ }
+ npy_clear_floatstatus_barrier((char *)dimensions);
+}
+
+template <class Tag>
+static void
+_npy_clip(char **args, npy_intp const *dimensions, npy_intp const *steps)
+{
+ using T = typename Tag::type;
+ return _npy_clip_<Tag>((T **)args, dimensions, steps);
+}
+
+extern "C" {
+NPY_NO_EXPORT void
+BOOL_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::bool_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+BYTE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::byte_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+UBYTE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::ubyte_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+SHORT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::short_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+USHORT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::ushort_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+INT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::int_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+UINT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::uint_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+LONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::long_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+ULONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::ulong_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+LONGLONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::longlong_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+ULONGLONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::ulonglong_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+HALF_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::half_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+FLOAT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::float_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+DOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::double_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+LONGDOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::longdouble_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+CFLOAT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::cfloat_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+CDOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::cdouble_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+CLONGDOUBLE_clip(char **args, npy_intp const *dimensions,
+ npy_intp const *steps, void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::clongdouble_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+DATETIME_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::datetime_tag>(args, dimensions, steps);
+}
+NPY_NO_EXPORT void
+TIMEDELTA_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func))
+{
+ return _npy_clip<npy::timedelta_tag>(args, dimensions, steps);
+}
+}
diff --git a/numpy/core/src/umath/clip.h b/numpy/core/src/umath/clip.h
new file mode 100644
index 000000000..f69ebd1e3
--- /dev/null
+++ b/numpy/core/src/umath/clip.h
@@ -0,0 +1,73 @@
+#ifndef _NPY_UMATH_CLIP_H_
+#define _NPY_UMATH_CLIP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+NPY_NO_EXPORT void
+BOOL_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+BYTE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+UBYTE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+SHORT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+USHORT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+INT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+UINT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+LONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+ULONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+LONGLONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+ULONGLONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+HALF_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+FLOAT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+DOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+LONGDOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+CFLOAT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+CDOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+CLONGDOUBLE_clip(char **args, npy_intp const *dimensions,
+ npy_intp const *steps, void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+DATETIME_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+TIMEDELTA_clip(char **args, npy_intp const *dimensions, npy_intp const *steps,
+ void *NPY_UNUSED(func));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/numpy/core/src/umath/clip.h.src b/numpy/core/src/umath/clip.h.src
deleted file mode 100644
index f16856cdf..000000000
--- a/numpy/core/src/umath/clip.h.src
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _NPY_UMATH_CLIP_H_
-#define _NPY_UMATH_CLIP_H_
-
-
-/**begin repeat
- *
- * #name = BOOL,
- * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
- * LONG, ULONG, LONGLONG, ULONGLONG,
- * HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE,
- * DATETIME, TIMEDELTA#
- */
-NPY_NO_EXPORT void
-@name@_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
-/**end repeat**/
-
-#endif
diff --git a/numpy/core/src/umath/dispatching.c b/numpy/core/src/umath/dispatching.c
index 40de28754..9c76b40e0 100644
--- a/numpy/core/src/umath/dispatching.c
+++ b/numpy/core/src/umath/dispatching.c
@@ -267,8 +267,39 @@ resolve_implementation_info(PyUFuncObject *ufunc,
* the subclass should be considered a better match
* (subclasses are always more specific).
*/
+ /* Whether this (normally output) dtype was specified at all */
+ if (op_dtypes[i] == NULL) {
+ /*
+ * When DType is completely unspecified, prefer abstract
+ * over concrete, assuming it will resolve.
+ * Furthermore, we cannot decide which abstract/None
+ * is "better", only concrete ones which are subclasses
+ * of Abstract ones are defined as worse.
+ */
+ npy_bool prev_is_concrete = NPY_FALSE;
+ npy_bool new_is_concrete = NPY_FALSE;
+ if ((prev_dtype != Py_None) &&
+ !NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) {
+ prev_is_concrete = NPY_TRUE;
+ }
+ if ((new_dtype != Py_None) &&
+ !NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) {
+ new_is_concrete = NPY_TRUE;
+ }
+ if (prev_is_concrete == new_is_concrete) {
+ best = -1;
+ }
+ else if (prev_is_concrete) {
+ unambiguously_equally_good = 0;
+ best = 1;
+ }
+ else {
+ unambiguously_equally_good = 0;
+ best = 0;
+ }
+ }
/* If either is None, the other is strictly more specific */
- if (prev_dtype == Py_None) {
+ else if (prev_dtype == Py_None) {
unambiguously_equally_good = 0;
best = 1;
}
@@ -289,13 +320,29 @@ resolve_implementation_info(PyUFuncObject *ufunc,
*/
best = -1;
}
+ else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) {
+ /* old is not abstract, so better (both not possible) */
+ unambiguously_equally_good = 0;
+ best = 0;
+ }
+ else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) {
+ /* new is not abstract, so better (both not possible) */
+ unambiguously_equally_good = 0;
+ best = 1;
+ }
/*
- * TODO: Unreachable, but we will need logic for abstract
- * DTypes to decide if one is a subclass of the other
- * (And their subclass relation is well defined.)
+ * TODO: This will need logic for abstract DTypes to decide if
+ * one is a subclass of the other (And their subclass
+ * relation is well defined). For now, we bail out
+ * in cas someone manages to get here.
*/
else {
- assert(0);
+ PyErr_SetString(PyExc_NotImplementedError,
+ "deciding which one of two abstract dtypes is "
+ "a better match is not yet implemented. This "
+ "will pick the better (or bail) in the future.");
+ *out_info = NULL;
+ return -1;
}
if ((current_best != -1) && (current_best != best)) {
@@ -612,6 +659,35 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
}
return info;
}
+ else if (info == NULL && op_dtypes[0] == NULL) {
+ /*
+ * If we have a reduction, fill in the unspecified input/array
+ * assuming it should have the same dtype as the operand input
+ * (or the output one if given).
+ * Then, try again. In some cases, this will choose different
+ * paths, such as `ll->?` instead of an `??->?` loop for `np.equal`
+ * when the input is `.l->.` (`.` meaning undefined). This will
+ * then cause an error. But cast to `?` would always lose
+ * information, and in many cases important information:
+ *
+ * ```python
+ * from operator import eq
+ * from functools import reduce
+ *
+ * reduce(eq, [1, 2, 3]) != reduce(eq, [True, True, True])
+ * ```
+ *
+ * The special cases being `logical_(and|or|xor)` which can always
+ * cast to boolean ahead of time and still give the right answer
+ * (unsafe cast to bool is fine here). We special case these at
+ * the time of this comment (NumPy 1.21).
+ */
+ assert(ufunc->nin == 2 && ufunc->nout == 1);
+ op_dtypes[0] = op_dtypes[2] != NULL ? op_dtypes[2] : op_dtypes[1];
+ Py_INCREF(op_dtypes[0]);
+ return promote_and_get_info_and_ufuncimpl(ufunc,
+ ops, signature, op_dtypes, allow_legacy_promotion, 1);
+ }
}
/*
@@ -743,3 +819,94 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
return method;
}
+
+
+/*
+ * Special promoter for the logical ufuncs. The logical ufuncs can always
+ * use the ??->? and still get the correct output (as long as the output
+ * is not supposed to be `object`).
+ */
+static int
+logical_ufunc_promoter(PyUFuncObject *NPY_UNUSED(ufunc),
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[])
+{
+ /*
+ * If we find any object DType at all, we currently force to object.
+ * However, if the output is specified and not object, there is no point,
+ * it should be just as well to cast the input rather than doing the
+ * unsafe out cast.
+ */
+ int force_object = 0;
+
+ for (int i = 0; i < 3; i++) {
+ PyArray_DTypeMeta *item;
+ if (signature[i] != NULL) {
+ item = signature[i];
+ Py_INCREF(item);
+ if (item->type_num == NPY_OBJECT) {
+ force_object = 1;
+ }
+ }
+ else {
+ /* Always override to boolean */
+ item = PyArray_DTypeFromTypeNum(NPY_BOOL);
+ if (op_dtypes[i] != NULL && op_dtypes[i]->type_num == NPY_OBJECT) {
+ force_object = 1;
+ }
+ }
+ new_op_dtypes[i] = item;
+ }
+
+ if (!force_object || (op_dtypes[2] != NULL
+ && op_dtypes[2]->type_num != NPY_OBJECT)) {
+ return 0;
+ }
+ /*
+ * Actually, we have to use the OBJECT loop after all, set all we can
+ * to object (that might not work out, but try).
+ *
+ * NOTE: Change this to check for `op_dtypes[0] == NULL` to STOP
+ * returning `object` for `np.logical_and.reduce(obj_arr)`
+ * which will also affect `np.all` and `np.any`!
+ */
+ for (int i = 0; i < 3; i++) {
+ if (signature[i] != NULL) {
+ continue;
+ }
+ Py_SETREF(new_op_dtypes[i], PyArray_DTypeFromTypeNum(NPY_OBJECT));
+ }
+ return 0;
+}
+
+
+NPY_NO_EXPORT int
+install_logical_ufunc_promoter(PyObject *ufunc)
+{
+ if (PyObject_Type(ufunc) != (PyObject *)&PyUFunc_Type) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "internal numpy array, logical ufunc was not a ufunc?!");
+ return -1;
+ }
+ PyObject *dtype_tuple = PyTuple_Pack(3,
+ &PyArrayDescr_Type, &PyArrayDescr_Type, &PyArrayDescr_Type, NULL);
+ if (dtype_tuple == NULL) {
+ return -1;
+ }
+ PyObject *promoter = PyCapsule_New(&logical_ufunc_promoter,
+ "numpy._ufunc_promoter", NULL);
+ if (promoter == NULL) {
+ Py_DECREF(dtype_tuple);
+ return -1;
+ }
+
+ PyObject *info = PyTuple_Pack(2, dtype_tuple, promoter);
+ Py_DECREF(dtype_tuple);
+ Py_DECREF(promoter);
+ if (info == NULL) {
+ return -1;
+ }
+
+ return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0);
+}
+
diff --git a/numpy/core/src/umath/dispatching.h b/numpy/core/src/umath/dispatching.h
index 8d116873c..2f314615d 100644
--- a/numpy/core/src/umath/dispatching.h
+++ b/numpy/core/src/umath/dispatching.h
@@ -26,4 +26,8 @@ NPY_NO_EXPORT PyObject *
add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc,
PyArray_DTypeMeta *operation_dtypes[], int ignore_duplicate);
+NPY_NO_EXPORT int
+install_logical_ufunc_promoter(PyObject *ufunc);
+
+
#endif /*_NPY_DISPATCHING_H */
diff --git a/numpy/core/src/umath/legacy_array_method.c b/numpy/core/src/umath/legacy_array_method.c
index 77b1b9013..a423823d4 100644
--- a/numpy/core/src/umath/legacy_array_method.c
+++ b/numpy/core/src/umath/legacy_array_method.c
@@ -217,6 +217,25 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc,
*/
int any_output_flexible = 0;
NPY_ARRAYMETHOD_FLAGS flags = 0;
+ if (ufunc->nargs == 3 &&
+ signature[0]->type_num == NPY_BOOL &&
+ signature[1]->type_num == NPY_BOOL &&
+ signature[2]->type_num == NPY_BOOL && (
+ strcmp(ufunc->name, "logical_or") == 0 ||
+ strcmp(ufunc->name, "logical_and") == 0 ||
+ strcmp(ufunc->name, "logical_xor") == 0)) {
+ /*
+ * This is a logical ufunc, and the `??->?` loop`. It is always OK
+ * to cast any input to bool, because that cast is defined by
+ * truthiness.
+ * This allows to ensure two things:
+ * 1. `np.all`/`np.any` know that force casting the input is OK
+ * (they must do this since there are no `?l->?`, etc. loops)
+ * 2. The logical functions automatically work for any DType
+ * implementing a cast to boolean.
+ */
+ flags = _NPY_METH_FORCE_CAST_INPUTS;
+ }
for (int i = 0; i < ufunc->nin+ufunc->nout; i++) {
if (signature[i]->singleton->flags & (
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index 02d749a5e..0938cd050 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -210,6 +210,32 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@,
/**end repeat1**/
/**end repeat**/
+#ifndef NPY_DISABLE_OPTIMIZATION
+ #include "loops_umath_fp.dispatch.h"
+#endif
+
+/**begin repeat
+ * #TYPE = FLOAT, DOUBLE#
+ */
+/**begin repeat1
+ * #func = tanh, exp2, log2, log10, expm1, log1p, cbrt, tan, arcsin, arccos, arctan, sinh, cosh, arcsinh, arccosh, arctanh#
+ */
+
+NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@func@,
+ (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)))
+
+/**end repeat1**/
+/**end repeat**/
+
+/**begin repeat
+ * #func = sin, cos#
+ */
+
+NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void DOUBLE_@func@,
+ (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)))
+
+/**end repeat**/
+
/**begin repeat
* #TYPE = FLOAT, DOUBLE#
*/
diff --git a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
index cc0fd19bb..95cce553a 100644
--- a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
+++ b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
@@ -868,6 +868,32 @@ AVX512F_exp_DOUBLE(npy_double * op,
* = p(r)
* = 2((r/2) + 1/3*(r/2)^3 + 1/5*(r/2)^5 + ...)
*/
+
+/* LLVM has a bug where AVX-512F intrinsic `_mm512_mask_mul_pd` emits an
+ * unmasked operation with a masked store. This can cause FP exceptions to
+ * occur for the lanes that are suppose to have been masked.
+ *
+ * See https://bugs.llvm.org/show_bug.cgi?id=51988
+ *
+ * Note, this affects LLVM based compilers like Apple Clang, Clang, and Intel's
+ * ICX.
+ */
+#if defined(__clang__)
+ #if defined(__apple_build_version__)
+ // Apple Clang
+ #if __apple_build_version__ > 11000000
+ // Apple Clang after v11
+ #define WORKAROUND_LLVM__mm512_mask_mul_pd
+ #endif
+ #else
+ // Clang, not Apple Clang
+ #if __clang_major__ > 9
+ // Clang v9+
+ #define WORKAROUND_LLVM__mm512_mask_mul_pd
+ #endif
+ #endif
+#endif
+
static void
AVX512F_log_DOUBLE(npy_double * op,
npy_double * ip,
@@ -954,8 +980,12 @@ AVX512F_log_DOUBLE(npy_double * op,
denormal_mask = _mm512_cmp_epi64_mask(top12, _mm512_set1_epi64(0),
_CMP_EQ_OQ);
denormal_mask = (~zero_mask) & denormal_mask;
+ __m512d masked_x = x;
+ #ifdef WORKAROUND_LLVM__mm512_mask_mul_pd
+ masked_x = avx512_set_masked_lanes_pd(masked_x, zeros_d, (~denormal_mask));
+ #endif
ix = _mm512_castpd_si512(_mm512_mask_mul_pd(x, denormal_mask,
- x, _mm512_set1_pd(0x1p52)));
+ masked_x, _mm512_set1_pd(0x1p52)));
ix = _mm512_mask_sub_epi64(ix, denormal_mask,
ix, _mm512_set1_epi64(52ULL << 52));
@@ -1039,6 +1069,9 @@ AVX512F_log_DOUBLE(npy_double * op,
npy_set_floatstatus_divbyzero();
}
}
+
+#undef WORKAROUND_LLVM__mm512_mask_mul_pd
+
#endif // AVX512F_NOCLANG_BUG
#ifdef SIMD_AVX512_SKX
diff --git a/numpy/core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/core/src/umath/loops_umath_fp.dispatch.c.src
new file mode 100644
index 000000000..852604655
--- /dev/null
+++ b/numpy/core/src/umath/loops_umath_fp.dispatch.c.src
@@ -0,0 +1,141 @@
+/*@targets
+ ** $maxopt baseline avx512_skx
+ */
+#include "numpy/npy_math.h"
+#include "simd/simd.h"
+#include "loops_utils.h"
+#include "loops.h"
+#include "npy_svml.h"
+#include "fast_loop_macros.h"
+
+#if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML)
+/**begin repeat
+ * #sfx = f32, f64#
+ * #func_suffix = f16, 8#
+ */
+/**begin repeat1
+ * #func = tanh, exp2, log2, log10, expm1, log1p, cbrt, tan, asin, acos, atan, sinh, cosh, asinh, acosh, atanh#
+ * #default_val = 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0#
+ */
+static void
+simd_@func@_@sfx@(const npyv_lanetype_@sfx@ *src, npy_intp ssrc,
+ npyv_lanetype_@sfx@ *dst, npy_intp sdst, npy_intp len)
+{
+ const int vstep = npyv_nlanes_@sfx@;
+ for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) {
+ npyv_@sfx@ x;
+ #if @default_val@
+ if (ssrc == 1) {
+ x = npyv_load_till_@sfx@(src, len, @default_val@);
+ } else {
+ x = npyv_loadn_till_@sfx@(src, ssrc, len, @default_val@);
+ }
+ #else
+ if (ssrc == 1) {
+ x = npyv_load_tillz_@sfx@(src, len);
+ } else {
+ x = npyv_loadn_tillz_@sfx@(src, ssrc, len);
+ }
+ #endif
+ npyv_@sfx@ out = __svml_@func@@func_suffix@(x);
+ if (sdst == 1) {
+ npyv_store_till_@sfx@(dst, len, out);
+ } else {
+ npyv_storen_till_@sfx@(dst, sdst, len, out);
+ }
+ }
+ npyv_cleanup();
+}
+/**end repeat1**/
+/**end repeat**/
+
+/**begin repeat
+ * #func = sin, cos#
+ */
+static void
+simd_@func@_f64(const double *src, npy_intp ssrc,
+ double *dst, npy_intp sdst, npy_intp len)
+{
+ const int vstep = npyv_nlanes_f64;
+ for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) {
+ npyv_f64 x;
+ if (ssrc == 1) {
+ x = npyv_load_tillz_f64(src, len);
+ } else {
+ x = npyv_loadn_tillz_f64(src, ssrc, len);
+ }
+ npyv_f64 out = __svml_@func@8(x);
+ if (sdst == 1) {
+ npyv_store_till_f64(dst, len, out);
+ } else {
+ npyv_storen_till_f64(dst, sdst, len, out);
+ }
+ }
+ npyv_cleanup();
+}
+/**end repeat**/
+#endif
+
+/**begin repeat
+ * #TYPE = DOUBLE, FLOAT#
+ * #type = npy_double, npy_float#
+ * #vsub = , f#
+ * #sfx = f64, f32#
+ */
+/**begin repeat1
+ * #func = tanh, exp2, log2, log10, expm1, log1p, cbrt, tan, arcsin, arccos, arctan, sinh, cosh, arcsinh, arccosh, arctanh#
+ * #intrin = tanh, exp2, log2, log10, expm1, log1p, cbrt, tan, asin, acos, atan, sinh, cosh, asinh, acosh, atanh#
+ */
+NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@)
+(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
+{
+#if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML)
+ const @type@ *src = (@type@*)args[0];
+ @type@ *dst = (@type@*)args[1];
+ const int lsize = sizeof(src[0]);
+ const npy_intp ssrc = steps[0] / lsize;
+ const npy_intp sdst = steps[1] / lsize;
+ const npy_intp len = dimensions[0];
+ assert(steps[0] % lsize == 0 && steps[1] % lsize == 0);
+ if (!is_mem_overlap(src, steps[0], dst, steps[1], len) &&
+ npyv_loadable_stride_@sfx@(ssrc) &&
+ npyv_storable_stride_@sfx@(sdst)) {
+ simd_@intrin@_@sfx@(src, ssrc, dst, sdst, len);
+ return;
+ }
+#endif
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = npy_@intrin@@vsub@(in1);
+ }
+}
+/**end repeat1**/
+/**end repeat**/
+
+/**begin repeat
+ * #func = sin, cos#
+ */
+NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@)
+(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
+{
+#if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML)
+ const double *src = (double*)args[0];
+ double *dst = (double*)args[1];
+ const int lsize = sizeof(src[0]);
+ const npy_intp ssrc = steps[0] / lsize;
+ const npy_intp sdst = steps[1] / lsize;
+ const npy_intp len = dimensions[0];
+ assert(steps[0] % lsize == 0 && steps[1] % lsize == 0);
+ if (!is_mem_overlap(src, steps[0], dst, steps[1], len) &&
+ npyv_loadable_stride_f64(ssrc) &&
+ npyv_storable_stride_f64(sdst)) {
+ simd_@func@_f64(src, ssrc, dst, sdst, len);
+ return;
+ }
+#endif
+ UNARY_LOOP {
+ const npy_double in1 = *(npy_double *)ip1;
+ *(npy_double *)op1 = npy_@func@(in1);
+ }
+}
+/**end repeat**/
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index d5a251368..c28c8abd8 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -145,14 +145,12 @@ PyArray_CopyInitialReduceValues(
* boilerplate code, just calling the appropriate inner loop function where
* necessary.
*
+ * context : The ArrayMethod context (with ufunc, method, and descriptors).
* operand : The array to be reduced.
* out : NULL, or the array into which to place the result.
* wheremask : NOT YET SUPPORTED, but this parameter is placed here
* so that support can be added in the future without breaking
* API compatibility. Pass in NULL.
- * operand_dtype : The dtype the inner loop expects for the operand.
- * result_dtype : The dtype the inner loop expects for the result.
- * casting : The casting rule to apply to the operands.
* axis_flags : Flags indicating the reduction axes of 'operand'.
* reorderable : If True, the reduction being done is reorderable, which
* means specifying multiple axes of reduction at once is ok,
@@ -182,10 +180,8 @@ PyArray_CopyInitialReduceValues(
* generalized ufuncs!)
*/
NPY_NO_EXPORT PyArrayObject *
-PyUFunc_ReduceWrapper(
+PyUFunc_ReduceWrapper(PyArrayMethod_Context *context,
PyArrayObject *operand, PyArrayObject *out, PyArrayObject *wheremask,
- PyArray_Descr *operand_dtype, PyArray_Descr *result_dtype,
- NPY_CASTING casting,
npy_bool *axis_flags, int reorderable, int keepdims,
PyObject *identity, PyArray_ReduceLoopFunc *loop,
void *data, npy_intp buffersize, const char *funcname, int errormask)
@@ -199,6 +195,8 @@ PyUFunc_ReduceWrapper(
PyArrayObject *op[3];
PyArray_Descr *op_dtypes[3];
npy_uint32 it_flags, op_flags[3];
+ /* Loop auxdata (must be freed on error) */
+ NpyAuxData *auxdata = NULL;
/* More than one axis means multiple orders are possible */
if (!reorderable && count_axes(PyArray_NDIM(operand), axis_flags) > 1) {
@@ -221,8 +219,8 @@ PyUFunc_ReduceWrapper(
/* Set up the iterator */
op[0] = out;
op[1] = operand;
- op_dtypes[0] = result_dtype;
- op_dtypes[1] = operand_dtype;
+ op_dtypes[0] = context->descriptors[0];
+ op_dtypes[1] = context->descriptors[1];
it_flags = NPY_ITER_BUFFERED |
NPY_ITER_EXTERNAL_LOOP |
@@ -291,7 +289,7 @@ PyUFunc_ReduceWrapper(
}
iter = NpyIter_AdvancedNew(wheremask == NULL ? 2 : 3, op, it_flags,
- NPY_KEEPORDER, casting,
+ NPY_KEEPORDER, NPY_UNSAFE_CASTING,
op_flags,
op_dtypes,
PyArray_NDIM(operand), op_axes, NULL, buffersize);
@@ -301,9 +299,29 @@ PyUFunc_ReduceWrapper(
result = NpyIter_GetOperandArray(iter)[0];
- int needs_api = NpyIter_IterationNeedsAPI(iter);
- /* Start with the floating-point exception flags cleared */
- npy_clear_floatstatus_barrier((char*)&iter);
+ PyArrayMethod_StridedLoop *strided_loop;
+ NPY_ARRAYMETHOD_FLAGS flags = 0;
+ npy_intp fixed_strides[3];
+ NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
+ if (wheremask != NULL) {
+ if (PyArrayMethod_GetMaskedStridedLoop(context,
+ 1, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
+ goto fail;
+ }
+ }
+ else {
+ if (context->method->get_strided_loop(context,
+ 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
+ goto fail;
+ }
+ }
+
+ int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0;
+ needs_api |= NpyIter_IterationNeedsAPI(iter);
+ if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
+ /* Start with the floating-point exception flags cleared */
+ npy_clear_floatstatus_barrier((char*)&iter);
+ }
/*
* Initialize the result to the reduction unit if possible,
@@ -345,16 +363,18 @@ PyUFunc_ReduceWrapper(
strideptr = NpyIter_GetInnerStrideArray(iter);
countptr = NpyIter_GetInnerLoopSizePtr(iter);
- if (loop(iter, dataptr, strideptr, countptr,
- iternext, needs_api, skip_first_count, data) < 0) {
+ if (loop(context, strided_loop, auxdata,
+ iter, dataptr, strideptr, countptr, iternext,
+ needs_api, skip_first_count) < 0) {
goto fail;
}
}
- /* Check whether any errors occurred during the loop */
- if (PyErr_Occurred() ||
- _check_ufunc_fperr(errormask, NULL, "reduce") < 0) {
- goto fail;
+ if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
+ /* NOTE: We could check float errors even on error */
+ if (_check_ufunc_fperr(errormask, NULL, "reduce") < 0) {
+ goto fail;
+ }
}
if (out != NULL) {
@@ -369,6 +389,7 @@ PyUFunc_ReduceWrapper(
return result;
fail:
+ NPY_AUXDATA_FREE(auxdata);
if (iter != NULL) {
NpyIter_Deallocate(iter);
}
diff --git a/numpy/core/src/umath/reduction.h b/numpy/core/src/umath/reduction.h
index 372605dba..2170e27a7 100644
--- a/numpy/core/src/umath/reduction.h
+++ b/numpy/core/src/umath/reduction.h
@@ -19,93 +19,17 @@ typedef int (PyArray_AssignReduceIdentityFunc)(PyArrayObject *result,
void *data);
/*
- * This is a function for the reduce loop.
+ * Inner definition of the reduce loop, only used for a static function.
+ * At some point around NumPy 1.6, there was probably an intention to make
+ * the reduce loop customizable at this level (per ufunc?).
*
- * The needs_api parameter indicates whether it's ok to release the GIL during
- * the loop, such as when the iternext() function never calls
- * a function which could raise a Python exception.
- *
- * The skip_first_count parameter indicates how many elements need to be
- * skipped based on NpyIter_IsFirstVisit checks. This can only be positive
- * when the 'assign_identity' parameter was NULL when calling
- * PyArray_ReduceWrapper.
- *
- * The loop gets two data pointers and two strides, and should
- * look roughly like this:
- * {
- * NPY_BEGIN_THREADS_DEF;
- * if (!needs_api) {
- * NPY_BEGIN_THREADS;
- * }
- * // This first-visit loop can be skipped if 'assign_identity' was non-NULL
- * if (skip_first_count > 0) {
- * do {
- * char *data0 = dataptr[0], *data1 = dataptr[1];
- * npy_intp stride0 = strideptr[0], stride1 = strideptr[1];
- * npy_intp count = *countptr;
- *
- * // Skip any first-visit elements
- * if (NpyIter_IsFirstVisit(iter, 0)) {
- * if (stride0 == 0) {
- * --count;
- * --skip_first_count;
- * data1 += stride1;
- * }
- * else {
- * skip_first_count -= count;
- * count = 0;
- * }
- * }
- *
- * while (count--) {
- * *(result_t *)data0 = my_reduce_op(*(result_t *)data0,
- * *(operand_t *)data1);
- * data0 += stride0;
- * data1 += stride1;
- * }
- *
- * // Jump to the faster loop when skipping is done
- * if (skip_first_count == 0) {
- * if (iternext(iter)) {
- * break;
- * }
- * else {
- * goto finish_loop;
- * }
- * }
- * } while (iternext(iter));
- * }
- * do {
- * char *data0 = dataptr[0], *data1 = dataptr[1];
- * npy_intp stride0 = strideptr[0], stride1 = strideptr[1];
- * npy_intp count = *countptr;
- *
- * while (count--) {
- * *(result_t *)data0 = my_reduce_op(*(result_t *)data0,
- * *(operand_t *)data1);
- * data0 += stride0;
- * data1 += stride1;
- * }
- * } while (iternext(iter));
- * finish_loop:
- * if (!needs_api) {
- * NPY_END_THREADS;
- * }
- * return (needs_api && PyErr_Occurred()) ? -1 : 0;
- * }
- *
- * If needs_api is True, this function should call PyErr_Occurred()
- * to check if an error occurred during processing, and return -1 for
- * error, 0 for success.
+ * TODO: This should be refactored/removed.
*/
-typedef int (PyArray_ReduceLoopFunc)(NpyIter *iter,
- char **dataptr,
- npy_intp const *strideptr,
- npy_intp const *countptr,
- NpyIter_IterNextFunc *iternext,
- int needs_api,
- npy_intp skip_first_count,
- void *data);
+typedef int (PyArray_ReduceLoopFunc)(PyArrayMethod_Context *context,
+ PyArrayMethod_StridedLoop *strided_loop, NpyAuxData *auxdata,
+ NpyIter *iter, char **dataptrs, npy_intp const *strides,
+ npy_intp const *countptr, NpyIter_IterNextFunc *iternext,
+ int needs_api, npy_intp skip_first_count);
/*
* This function executes all the standard NumPy reduction function
@@ -138,16 +62,10 @@ typedef int (PyArray_ReduceLoopFunc)(NpyIter *iter,
* errormask : forwarded from _get_bufsize_errmask
*/
NPY_NO_EXPORT PyArrayObject *
-PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
- PyArrayObject *wheremask,
- PyArray_Descr *operand_dtype,
- PyArray_Descr *result_dtype,
- NPY_CASTING casting,
- npy_bool *axis_flags, int reorderable,
- int keepdims,
- PyObject *identity,
- PyArray_ReduceLoopFunc *loop,
- void *data, npy_intp buffersize, const char *funcname,
- int errormask);
+PyUFunc_ReduceWrapper(PyArrayMethod_Context *context,
+ PyArrayObject *operand, PyArrayObject *out, PyArrayObject *wheremask,
+ npy_bool *axis_flags, int reorderable, int keepdims,
+ PyObject *identity, PyArray_ReduceLoopFunc *loop,
+ void *data, npy_intp buffersize, const char *funcname, int errormask);
#endif
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 654ab81cc..d47be9a30 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -116,9 +116,8 @@ run_binary_avx512f_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_in
#endif
return 0;
}
-
-
/**end repeat1**/
+
/**end repeat**/
/**begin repeat
@@ -1152,6 +1151,7 @@ NPY_FINLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@d
* #is_finite = 0, 1, 0, 0#
* #is_signbit = 0, 0, 0, 1#
*/
+
#if defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
static NPY_INLINE NPY_GCC_TARGET_AVX512_SKX void
AVX512_SKX_@func@_@TYPE@(npy_bool* op, @type@* ip, const npy_intp array_size, const npy_intp steps)
diff --git a/numpy/core/src/umath/svml b/numpy/core/src/umath/svml
new file mode 160000
+Subproject 9f8af767ed6c75455d9a382af829048f8dd1806
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 714afb273..15385b624 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -616,9 +616,24 @@ _is_same_name(const char* s1, const char* s2)
}
/*
- * Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets,
- * and core_signature in PyUFuncObject "ufunc". Returns 0 unless an
- * error occurred.
+ * Sets the following fields in the PyUFuncObject 'ufunc':
+ *
+ * Field Type Array Length
+ * core_enabled int (effectively bool) N/A
+ * core_num_dim_ix int N/A
+ * core_dim_flags npy_uint32 * core_num_dim_ix
+ * core_dim_sizes npy_intp * core_num_dim_ix
+ * core_num_dims int * nargs (i.e. nin+nout)
+ * core_offsets int * nargs
+ * core_dim_ixs int * sum(core_num_dims)
+ * core_signature char * strlen(signature) + 1
+ *
+ * The function assumes that the values that are arrays have not
+ * been set already, and sets these pointers to memory allocated
+ * with PyArray_malloc. These are freed when the ufunc dealloc
+ * method is called.
+ *
+ * Returns 0 unless an error occurred.
*/
static int
_parse_signature(PyUFuncObject *ufunc, const char *signature)
@@ -990,6 +1005,7 @@ convert_ufunc_arguments(PyUFuncObject *ufunc,
}
/* Convert and fill in output arguments */
+ memset(out_op_DTypes + nin, 0, nout * sizeof(*out_op_DTypes));
if (full_args.out != NULL) {
for (int i = 0; i < nout; i++) {
obj = PyTuple_GET_ITEM(full_args.out, i);
@@ -1047,6 +1063,7 @@ check_for_trivial_loop(PyArrayMethodObject *ufuncimpl,
PyArrayObject **op, PyArray_Descr **dtypes,
NPY_CASTING casting, npy_intp buffersize)
{
+ int force_cast_input = ufuncimpl->flags & _NPY_METH_FORCE_CAST_INPUTS;
int i, nin = ufuncimpl->nin, nop = nin + ufuncimpl->nout;
for (i = 0; i < nop; ++i) {
@@ -1070,7 +1087,13 @@ check_for_trivial_loop(PyArrayMethodObject *ufuncimpl,
must_copy = 1;
}
- if (PyArray_MinCastSafety(safety, casting) != casting) {
+ if (force_cast_input && i < nin) {
+ /*
+ * ArrayMethod flagged to ignore casting (logical funcs
+ * can force cast to bool)
+ */
+ }
+ else if (PyArray_MinCastSafety(safety, casting) != casting) {
return 0; /* the cast is not safe enough */
}
}
@@ -1325,6 +1348,14 @@ try_trivial_single_output_loop(PyArrayMethod_Context *context,
NPY_END_THREADS;
NPY_AUXDATA_FREE(auxdata);
+ /*
+ * An error should only be possible if `res != 0` is already set.
+ * But this is not strictly correct for old-style ufuncs (e.g. `power`
+ * released the GIL but manually set an Exception).
+ */
+ if (PyErr_Occurred()) {
+ res = -1;
+ }
if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
/* NOTE: We could check float errors even when `res < 0` */
@@ -1352,8 +1383,15 @@ validate_casting(PyArrayMethodObject *method, PyUFuncObject *ufunc,
*/
return 0;
}
- if (PyUFunc_ValidateCasting(ufunc, casting, ops, descriptors) < 0) {
- return -1;
+ if (method->flags & _NPY_METH_FORCE_CAST_INPUTS) {
+ if (PyUFunc_ValidateOutCasting(ufunc, casting, ops, descriptors) < 0) {
+ return -1;
+ }
+ }
+ else {
+ if (PyUFunc_ValidateCasting(ufunc, casting, ops, descriptors) < 0) {
+ return -1;
+ }
}
return 0;
}
@@ -2462,9 +2500,9 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc,
/* Final preparation of the arraymethod call */
PyArrayMethod_Context context = {
- .caller = (PyObject *)ufunc,
- .method = ufuncimpl,
- .descriptors = operation_descrs,
+ .caller = (PyObject *)ufunc,
+ .method = ufuncimpl,
+ .descriptors = operation_descrs,
};
PyArrayMethod_StridedLoop *strided_loop;
NPY_ARRAYMETHOD_FLAGS flags = 0;
@@ -2519,7 +2557,7 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc,
PyArray_free(inner_strides);
NPY_AUXDATA_FREE(auxdata);
- if (NpyIter_Deallocate(iter) < 0) {
+ if (!NpyIter_Deallocate(iter)) {
retval = -1;
}
@@ -2584,9 +2622,9 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc,
/* Final preparation of the arraymethod call */
PyArrayMethod_Context context = {
- .caller = (PyObject *)ufunc,
- .method = ufuncimpl,
- .descriptors = operation_descrs,
+ .caller = (PyObject *)ufunc,
+ .method = ufuncimpl,
+ .descriptors = operation_descrs,
};
/* Do the ufunc loop */
@@ -2653,195 +2691,129 @@ PyUFunc_GenericFunction(PyUFuncObject *NPY_UNUSED(ufunc),
/*
- * Given the output type, finds the specified binary op. The
- * ufunc must have nin==2 and nout==1. The function may modify
- * otype if the given type isn't found.
+ * Promote and resolve a reduction like operation.
*
- * Returns 0 on success, -1 on failure.
+ * @param ufunc
+ * @param arr The operation array
+ * @param out The output array or NULL if not provided. Note that NumPy always
+ * used out to mean the same as `dtype=out.dtype` and never passed
+ * the array itself to the type-resolution.
+ * @param signature The DType signature, which may already be set due to the
+ * dtype passed in by the user, or the special cases (add, multiply).
+ * (Contains strong references and may be modified.)
+ * @param enforce_uniform_args If `NPY_TRUE` fully uniform dtypes/descriptors
+ * are enforced as required for accumulate and (currently) reduceat.
+ * @param out_descrs New references to the resolved descriptors (on success).
+ * @param method The ufunc method, "reduce", "reduceat", or "accumulate".
+
+ * @returns ufuncimpl The `ArrayMethod` implemention to use. Or NULL if an
+ * error occurred.
*/
-static int
-get_binary_op_function(PyUFuncObject *ufunc, int *otype,
- PyUFuncGenericFunction *out_innerloop,
- void **out_innerloopdata)
+static PyArrayMethodObject *
+reducelike_promote_and_resolve(PyUFuncObject *ufunc,
+ PyArrayObject *arr, PyArrayObject *out,
+ PyArray_DTypeMeta *signature[3],
+ npy_bool enforce_uniform_args, PyArray_Descr *out_descrs[3],
+ char *method)
{
- int i;
-
- NPY_UF_DBG_PRINT1("Getting binary op function for type number %d\n",
- *otype);
-
- /* If the type is custom and there are userloops, search for it here */
- if (ufunc->userloops != NULL && PyTypeNum_ISUSERDEF(*otype)) {
- PyObject *key, *obj;
- key = PyLong_FromLong(*otype);
- if (key == NULL) {
- return -1;
- }
- obj = PyDict_GetItemWithError(ufunc->userloops, key);
- Py_DECREF(key);
- if (obj == NULL && PyErr_Occurred()) {
- return -1;
- }
- else if (obj != NULL) {
- PyUFunc_Loop1d *funcdata = PyCapsule_GetPointer(obj, NULL);
- if (funcdata == NULL) {
- return -1;
- }
- while (funcdata != NULL) {
- int *types = funcdata->arg_types;
-
- if (types[0] == *otype && types[1] == *otype &&
- types[2] == *otype) {
- *out_innerloop = funcdata->func;
- *out_innerloopdata = funcdata->data;
- return 0;
- }
+ /*
+ * Note that the `ops` is not realy correct. But legacy resolution
+ * cannot quite handle the correct ops (e.g. a NULL first item if `out`
+ * is NULL), and it should only matter in very strange cases.
+ */
+ PyArrayObject *ops[3] = {arr, arr, NULL};
+ /*
+ * TODO: If `out` is not provided, arguably `initial` could define
+ * the first DType (and maybe also the out one), that way
+ * `np.add.reduce([1, 2, 3], initial=3.4)` would return a float
+ * value. As of 1.20, it returned an integer, so that should
+ * probably go to an error/warning first.
+ */
+ PyArray_DTypeMeta *operation_DTypes[3] = {
+ NULL, NPY_DTYPE(PyArray_DESCR(arr)), NULL};
+ Py_INCREF(operation_DTypes[1]);
- funcdata = funcdata->next;
- }
- }
+ if (out != NULL) {
+ operation_DTypes[0] = NPY_DTYPE(PyArray_DESCR(out));
+ Py_INCREF(operation_DTypes[0]);
+ operation_DTypes[2] = operation_DTypes[0];
+ Py_INCREF(operation_DTypes[2]);
}
- /* Search for a function with compatible inputs */
- for (i = 0; i < ufunc->ntypes; ++i) {
- char *types = ufunc->types + i*ufunc->nargs;
-
- NPY_UF_DBG_PRINT3("Trying loop with signature %d %d -> %d\n",
- types[0], types[1], types[2]);
-
- if (PyArray_CanCastSafely(*otype, types[0]) &&
- types[0] == types[1] &&
- (*otype == NPY_OBJECT || types[0] != NPY_OBJECT)) {
- /* If the signature is "xx->x", we found the loop */
- if (types[2] == types[0]) {
- *out_innerloop = ufunc->functions[i];
- *out_innerloopdata = ufunc->data[i];
- *otype = types[0];
- return 0;
- }
- /*
- * Otherwise, we found the natural type of the reduction,
- * replace otype and search again
- */
- else {
- *otype = types[2];
- break;
- }
- }
+ PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
+ ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE);
+ Py_DECREF(operation_DTypes[1]);
+ if (out != NULL) {
+ Py_DECREF(operation_DTypes[0]);
+ Py_DECREF(operation_DTypes[2]);
}
-
- /* Search for the exact function */
- for (i = 0; i < ufunc->ntypes; ++i) {
- char *types = ufunc->types + i*ufunc->nargs;
-
- if (PyArray_CanCastSafely(*otype, types[0]) &&
- types[0] == types[1] &&
- types[1] == types[2] &&
- (*otype == NPY_OBJECT || types[0] != NPY_OBJECT)) {
- /* Since the signature is "xx->x", we found the loop */
- *out_innerloop = ufunc->functions[i];
- *out_innerloopdata = ufunc->data[i];
- *otype = types[0];
- return 0;
- }
+ if (ufuncimpl == NULL) {
+ return NULL;
}
- return -1;
-}
-
-static int
-reduce_type_resolver(PyUFuncObject *ufunc, PyArrayObject *arr,
- PyArray_Descr *odtype, PyArray_Descr **out_dtype)
-{
- int i, retcode;
- PyArrayObject *op[3] = {arr, arr, NULL};
- PyArray_Descr *dtypes[3] = {NULL, NULL, NULL};
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
- PyObject *type_tup = NULL;
-
- *out_dtype = NULL;
-
/*
- * If odtype is specified, make a type tuple for the type
- * resolution.
+ * Find the correct descriptors for the operation. We use unsafe casting
+ * for historic reasons: The logic ufuncs required it to cast everything to
+ * boolean. However, we now special case the logical ufuncs, so that the
+ * casting safety could in principle be set to the default same-kind.
+ * (although this should possibly happen through a deprecation)
*/
- if (odtype != NULL) {
- type_tup = PyTuple_Pack(3, odtype, odtype, Py_None);
- if (type_tup == NULL) {
- return -1;
- }
- }
-
- /* Use the type resolution function to find our loop */
- retcode = ufunc->type_resolver(
- ufunc, NPY_UNSAFE_CASTING,
- op, type_tup, dtypes);
- Py_DECREF(type_tup);
- if (retcode == -1) {
- return -1;
- }
- else if (retcode == -2) {
- PyErr_Format(PyExc_RuntimeError,
- "type resolution returned NotImplemented to "
- "reduce ufunc %s", ufunc_name);
- return -1;
+ if (resolve_descriptors(3, ufunc, ufuncimpl,
+ ops, out_descrs, signature, NPY_UNSAFE_CASTING) < 0) {
+ return NULL;
}
/*
- * The first two type should be equivalent. Because of how
- * reduce has historically behaved in NumPy, the return type
- * could be different, and it is the return type on which the
- * reduction occurs.
+ * The first operand and output should be the same array, so they should
+ * be identical. The second argument can be different for reductions,
+ * but is checked to be identical for accumulate and reduceat.
*/
- if (!PyArray_EquivTypes(dtypes[0], dtypes[1])) {
- for (i = 0; i < 3; ++i) {
- Py_DECREF(dtypes[i]);
- }
- PyErr_Format(PyExc_RuntimeError,
- "could not find a type resolution appropriate for "
- "reduce ufunc %s", ufunc_name);
- return -1;
+ if (out_descrs[0] != out_descrs[2] || (
+ enforce_uniform_args && out_descrs[0] != out_descrs[1])) {
+ PyErr_Format(PyExc_TypeError,
+ "the resolved dtypes are not compatible with %s.%s",
+ ufunc_get_name_cstr(ufunc), method);
+ goto fail;
+ }
+ /* TODO: This really should _not_ be unsafe casting (same above)! */
+ if (validate_casting(ufuncimpl,
+ ufunc, ops, out_descrs, NPY_UNSAFE_CASTING) < 0) {
+ goto fail;
}
- Py_DECREF(dtypes[0]);
- Py_DECREF(dtypes[1]);
- *out_dtype = dtypes[2];
+ return ufuncimpl;
- return 0;
+ fail:
+ for (int i = 0; i < 3; ++i) {
+ Py_DECREF(out_descrs[i]);
+ }
+ return NULL;
}
+
static int
-reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides,
- npy_intp const *countptr, NpyIter_IterNextFunc *iternext,
- int needs_api, npy_intp skip_first_count, void *data)
+reduce_loop(PyArrayMethod_Context *context,
+ PyArrayMethod_StridedLoop *strided_loop, NpyAuxData *auxdata,
+ NpyIter *iter, char **dataptrs, npy_intp const *strides,
+ npy_intp const *countptr, NpyIter_IterNextFunc *iternext,
+ int needs_api, npy_intp skip_first_count)
{
- PyArray_Descr *dtypes[3], **iter_dtypes;
- PyUFuncObject *ufunc = (PyUFuncObject *)data;
- char *dataptrs_copy[3];
- npy_intp strides_copy[3];
+ int retval;
+ char *dataptrs_copy[4];
+ npy_intp strides_copy[4];
npy_bool masked;
- /* The normal selected inner loop */
- PyUFuncGenericFunction innerloop = NULL;
- void *innerloopdata = NULL;
-
NPY_BEGIN_THREADS_DEF;
/* Get the number of operands, to determine whether "where" is used */
masked = (NpyIter_GetNOp(iter) == 3);
- /* Get the inner loop */
- iter_dtypes = NpyIter_GetDescrArray(iter);
- dtypes[0] = iter_dtypes[0];
- dtypes[1] = iter_dtypes[1];
- dtypes[2] = iter_dtypes[0];
- if (ufunc->legacy_inner_loop_selector(ufunc, dtypes,
- &innerloop, &innerloopdata, &needs_api) < 0) {
- return -1;
+ if (!needs_api) {
+ NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter));
}
- NPY_BEGIN_THREADS_NDITER(iter);
-
if (skip_first_count > 0) {
- do {
+ assert(!masked); /* Path currently not available for masked */
+ while (1) {
npy_intp count = *countptr;
/* Skip any first-visit elements */
@@ -2864,27 +2836,23 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides,
strides_copy[0] = strides[0];
strides_copy[1] = strides[1];
strides_copy[2] = strides[0];
- innerloop(dataptrs_copy, &count,
- strides_copy, innerloopdata);
- if (needs_api && PyErr_Occurred()) {
+ retval = strided_loop(context,
+ dataptrs_copy, &count, strides_copy, auxdata);
+ if (retval < 0) {
goto finish_loop;
}
- /* Jump to the faster loop when skipping is done */
- if (skip_first_count == 0) {
- if (iternext(iter)) {
- break;
- }
- else {
- goto finish_loop;
- }
+ /* Advance loop, and abort on error (or finish) */
+ if (!iternext(iter)) {
+ goto finish_loop;
}
- } while (iternext(iter));
- }
- if (needs_api && PyErr_Occurred()) {
- goto finish_loop;
+ /* When skipping is done break and continue with faster loop */
+ if (skip_first_count == 0) {
+ break;
+ }
+ }
}
do {
@@ -2895,42 +2863,23 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides,
strides_copy[0] = strides[0];
strides_copy[1] = strides[1];
strides_copy[2] = strides[0];
-
- if (!masked) {
- innerloop(dataptrs_copy, countptr,
- strides_copy, innerloopdata);
+ if (masked) {
+ dataptrs_copy[3] = dataptrs[2];
+ strides_copy[3] = strides[2];
}
- else {
- npy_intp count = *countptr;
- char *maskptr = dataptrs[2];
- npy_intp mask_stride = strides[2];
- /* Optimization for when the mask is broadcast */
- npy_intp n = mask_stride == 0 ? count : 1;
- while (count) {
- char mask = *maskptr;
- maskptr += mask_stride;
- while (n < count && mask == *maskptr) {
- n++;
- maskptr += mask_stride;
- }
- /* If mask set, apply inner loop on this contiguous region */
- if (mask) {
- innerloop(dataptrs_copy, &n,
- strides_copy, innerloopdata);
- }
- dataptrs_copy[0] += n * strides[0];
- dataptrs_copy[1] += n * strides[1];
- dataptrs_copy[2] = dataptrs_copy[0];
- count -= n;
- n = 1;
- }
+
+ retval = strided_loop(context,
+ dataptrs_copy, countptr, strides_copy, auxdata);
+ if (retval < 0) {
+ goto finish_loop;
}
- } while (!(needs_api && PyErr_Occurred()) && iternext(iter));
+
+ } while (iternext(iter));
finish_loop:
NPY_END_THREADS;
- return (needs_api && PyErr_Occurred()) ? -1 : 0;
+ return retval;
}
/*
@@ -2951,15 +2900,14 @@ finish_loop:
* this function does not validate them.
*/
static PyArrayObject *
-PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
- int naxes, int *axes, PyArray_Descr *odtype, int keepdims,
+PyUFunc_Reduce(PyUFuncObject *ufunc,
+ PyArrayObject *arr, PyArrayObject *out,
+ int naxes, int *axes, PyArray_DTypeMeta *signature[3], int keepdims,
PyObject *initial, PyArrayObject *wheremask)
{
int iaxes, ndim;
npy_bool reorderable;
npy_bool axis_flags[NPY_MAXDIMS];
- PyArray_Descr *dtype;
- PyArrayObject *result;
PyObject *identity;
const char *ufunc_name = ufunc_get_name_cstr(ufunc);
/* These parameters come from a TLS global */
@@ -2986,6 +2934,7 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
}
/* Get the identity */
+ /* TODO: Both of these should be provided by the ArrayMethod! */
identity = _get_identity(ufunc, &reorderable);
if (identity == NULL) {
return NULL;
@@ -3009,21 +2958,27 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
Py_INCREF(initial); /* match the reference count in the if above */
}
- /* Get the reduction dtype */
- if (reduce_type_resolver(ufunc, arr, odtype, &dtype) < 0) {
+ PyArray_Descr *descrs[3];
+ PyArrayMethodObject *ufuncimpl = reducelike_promote_and_resolve(ufunc,
+ arr, out, signature, NPY_FALSE, descrs, "reduce");
+ if (ufuncimpl == NULL) {
Py_DECREF(initial);
return NULL;
}
- result = PyUFunc_ReduceWrapper(arr, out, wheremask, dtype, dtype,
- NPY_UNSAFE_CASTING,
- axis_flags, reorderable,
- keepdims,
- initial,
- reduce_loop,
- ufunc, buffersize, ufunc_name, errormask);
+ PyArrayMethod_Context context = {
+ .caller = (PyObject *)ufunc,
+ .method = ufuncimpl,
+ .descriptors = descrs,
+ };
+
+ PyArrayObject *result = PyUFunc_ReduceWrapper(&context,
+ arr, out, wheremask, axis_flags, reorderable, keepdims,
+ initial, reduce_loop, ufunc, buffersize, ufunc_name, errormask);
- Py_DECREF(dtype);
+ for (int i = 0; i < 3; i++) {
+ Py_DECREF(descrs[i]);
+ }
Py_DECREF(initial);
return result;
}
@@ -3031,23 +2986,21 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
static PyObject *
PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
- int axis, int otype)
+ int axis, PyArray_DTypeMeta *signature[3])
{
PyArrayObject *op[2];
- PyArray_Descr *op_dtypes[2] = {NULL, NULL};
int op_axes_arrays[2][NPY_MAXDIMS];
int *op_axes[2] = {op_axes_arrays[0], op_axes_arrays[1]};
npy_uint32 op_flags[2];
- int idim, ndim, otype_final;
+ int idim, ndim;
int needs_api, need_outer_iterator;
- NpyIter *iter = NULL;
+ int res = 0;
- /* The selected inner loop */
- PyUFuncGenericFunction innerloop = NULL;
- void *innerloopdata = NULL;
+ PyArrayMethod_StridedLoop *strided_loop;
+ NpyAuxData *auxdata = NULL;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
+ NpyIter *iter = NULL;
/* These parameters come from extobj= or from a TLS global */
int buffersize = 0, errormask = 0;
@@ -3069,42 +3022,32 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
/* Take a reference to out for later returning */
Py_XINCREF(out);
- otype_final = otype;
- if (get_binary_op_function(ufunc, &otype_final,
- &innerloop, &innerloopdata) < 0) {
- PyArray_Descr *dtype = PyArray_DescrFromType(otype);
- PyErr_Format(PyExc_ValueError,
- "could not find a matching type for %s.accumulate, "
- "requested type has type code '%c'",
- ufunc_name, dtype ? dtype->type : '-');
- Py_XDECREF(dtype);
- goto fail;
+ PyArray_Descr *descrs[3];
+ PyArrayMethodObject *ufuncimpl = reducelike_promote_and_resolve(ufunc,
+ arr, out, signature, NPY_TRUE, descrs, "accumulate");
+ if (ufuncimpl == NULL) {
+ return NULL;
}
- ndim = PyArray_NDIM(arr);
+ /* The below code assumes that all descriptors are identical: */
+ assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]);
- /*
- * Set up the output data type, using the input's exact
- * data type if the type number didn't change to preserve
- * metadata
- */
- if (PyArray_DESCR(arr)->type_num == otype_final) {
- if (PyArray_ISNBO(PyArray_DESCR(arr)->byteorder)) {
- op_dtypes[0] = PyArray_DESCR(arr);
- Py_INCREF(op_dtypes[0]);
- }
- else {
- op_dtypes[0] = PyArray_DescrNewByteorder(PyArray_DESCR(arr),
- NPY_NATIVE);
- }
- }
- else {
- op_dtypes[0] = PyArray_DescrFromType(otype_final);
- }
- if (op_dtypes[0] == NULL) {
+ if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) {
+ /* This can be removed, but the initial element copy needs fixing */
+ PyErr_SetString(PyExc_TypeError,
+ "accumulation currently only supports `object` dtype with "
+ "references");
goto fail;
}
+ PyArrayMethod_Context context = {
+ .caller = (PyObject *)ufunc,
+ .method = ufuncimpl,
+ .descriptors = descrs,
+ };
+
+ ndim = PyArray_NDIM(arr);
+
#if NPY_UF_DBG_TRACING
printf("Found %s.accumulate inner loop with dtype : ", ufunc_name);
PyObject_Print((PyObject *)op_dtypes[0], stdout, 0);
@@ -3130,9 +3073,9 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
need_outer_iterator = (ndim > 1);
/* We can't buffer, so must do UPDATEIFCOPY */
if (!PyArray_ISALIGNED(arr) || (out && !PyArray_ISALIGNED(out)) ||
- !PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(arr)) ||
+ !PyArray_EquivTypes(descrs[1], PyArray_DESCR(arr)) ||
(out &&
- !PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(out)))) {
+ !PyArray_EquivTypes(descrs[0], PyArray_DESCR(out)))) {
need_outer_iterator = 1;
}
/* If input and output overlap in memory, use iterator to figure it out */
@@ -3145,7 +3088,6 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
npy_uint32 flags = NPY_ITER_ZEROSIZE_OK|
NPY_ITER_REFS_OK|
NPY_ITER_COPY_IF_OVERLAP;
- PyArray_Descr **op_dtypes_param = NULL;
/*
* The way accumulate is set up, we can't do buffering,
@@ -3162,13 +3104,11 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
*/
op_flags[0] |= NPY_ITER_UPDATEIFCOPY|NPY_ITER_ALIGNED|NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
op_flags[1] |= NPY_ITER_COPY|NPY_ITER_ALIGNED|NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
- op_dtypes_param = op_dtypes;
- op_dtypes[1] = op_dtypes[0];
+
NPY_UF_DBG_PRINT("Allocating outer iterator\n");
iter = NpyIter_AdvancedNew(2, op, flags,
NPY_KEEPORDER, NPY_UNSAFE_CASTING,
- op_flags,
- op_dtypes_param,
+ op_flags, descrs,
ndim_iter, op_axes, NULL, 0);
if (iter == NULL) {
goto fail;
@@ -3186,14 +3126,14 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
}
}
- /* Get the output */
+ /* Get the output from the iterator if it was allocated */
if (out == NULL) {
if (iter) {
op[0] = out = NpyIter_GetOperandArray(iter)[0];
Py_INCREF(out);
}
else {
- PyArray_Descr *dtype = op_dtypes[0];
+ PyArray_Descr *dtype = descrs[0];
Py_INCREF(dtype);
op[0] = out = (PyArrayObject *)PyArray_NewFromDescr(
&PyArray_Type, dtype,
@@ -3202,10 +3142,31 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
if (out == NULL) {
goto fail;
}
-
}
}
+ npy_intp fixed_strides[3];
+ if (need_outer_iterator) {
+ NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
+ }
+ else {
+ fixed_strides[0] = PyArray_STRIDES(op[0])[axis];
+ fixed_strides[1] = PyArray_STRIDES(op[1])[axis];
+ fixed_strides[2] = fixed_strides[0];
+ }
+
+
+ NPY_ARRAYMETHOD_FLAGS flags = 0;
+ if (ufuncimpl->get_strided_loop(&context,
+ 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
+ goto fail;
+ }
+ needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0;
+ if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
+ /* Start with the floating-point exception flags cleared */
+ npy_clear_floatstatus_barrier((char*)&iter);
+ }
+
/*
* If the reduction axis has size zero, either return the reduction
* unit for UFUNC_REDUCE, or return the zero-sized output array
@@ -3226,7 +3187,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
NpyIter_IterNextFunc *iternext;
char **dataptr;
- int itemsize = op_dtypes[0]->elsize;
+ int itemsize = descrs[0]->elsize;
/* Get the variables needed for the loop */
iternext = NpyIter_GetIterNext(iter, NULL);
@@ -3234,8 +3195,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
goto fail;
}
dataptr = NpyIter_GetDataPtrArray(iter);
- needs_api = NpyIter_IterationNeedsAPI(iter);
-
+ needs_api |= NpyIter_IterationNeedsAPI(iter);
/* Execute the loop with just the outer iterator */
count_m1 = PyArray_DIM(op[1], axis)-1;
@@ -3249,7 +3209,9 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
stride_copy[1] = stride1;
stride_copy[2] = stride0;
- NPY_BEGIN_THREADS_NDITER(iter);
+ if (!needs_api) {
+ NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter));
+ }
do {
dataptr_copy[0] = dataptr[0];
@@ -3262,7 +3224,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
* Output (dataptr[0]) and input (dataptr[1]) may point to
* the same memory, e.g. np.add.accumulate(a, out=a).
*/
- if (otype == NPY_OBJECT) {
+ if (descrs[2]->type_num == NPY_OBJECT) {
/*
* Incref before decref to avoid the possibility of the
* reference count being zero temporarily.
@@ -3282,18 +3244,17 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
dataptr_copy[2] += stride0;
NPY_UF_DBG_PRINT1("iterator loop count %d\n",
(int)count_m1);
- innerloop(dataptr_copy, &count_m1,
- stride_copy, innerloopdata);
+ res = strided_loop(&context,
+ dataptr_copy, &count_m1, stride_copy, auxdata);
}
- } while (!(needs_api && PyErr_Occurred()) && iternext(iter));
+ } while (res == 0 && iternext(iter));
NPY_END_THREADS;
}
else if (iter == NULL) {
char *dataptr_copy[3];
- npy_intp stride_copy[3];
- int itemsize = op_dtypes[0]->elsize;
+ int itemsize = descrs[0]->elsize;
/* Execute the loop with no iterators */
npy_intp count = PyArray_DIM(op[1], axis);
@@ -3307,15 +3268,11 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
PyArray_NDIM(op[0]))) {
PyErr_SetString(PyExc_ValueError,
"provided out is the wrong size "
- "for the reduction");
+ "for the accumulation.");
goto fail;
}
stride0 = PyArray_STRIDE(op[0], axis);
- stride_copy[0] = stride0;
- stride_copy[1] = stride1;
- stride_copy[2] = stride0;
-
/* Turn the two items into three for the inner loop */
dataptr_copy[0] = PyArray_BYTES(op[0]);
dataptr_copy[1] = PyArray_BYTES(op[1]);
@@ -3327,7 +3284,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
* Output (dataptr[0]) and input (dataptr[1]) may point to the
* same memory, e.g. np.add.accumulate(a, out=a).
*/
- if (otype == NPY_OBJECT) {
+ if (descrs[2]->type_num == NPY_OBJECT) {
/*
* Incref before decref to avoid the possibility of the
* reference count being zero temporarily.
@@ -3348,25 +3305,34 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count);
- needs_api = PyDataType_REFCHK(op_dtypes[0]);
+ needs_api = PyDataType_REFCHK(descrs[0]);
if (!needs_api) {
NPY_BEGIN_THREADS_THRESHOLDED(count);
}
- innerloop(dataptr_copy, &count,
- stride_copy, innerloopdata);
+ res = strided_loop(&context,
+ dataptr_copy, &count, fixed_strides, auxdata);
NPY_END_THREADS;
}
}
finish:
- Py_XDECREF(op_dtypes[0]);
- int res = 0;
+ NPY_AUXDATA_FREE(auxdata);
+ Py_DECREF(descrs[0]);
+ Py_DECREF(descrs[1]);
+ Py_DECREF(descrs[2]);
+
if (!NpyIter_Deallocate(iter)) {
res = -1;
}
+
+ if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
+ /* NOTE: We could check float errors even when `res < 0` */
+ res = _check_ufunc_fperr(errormask, NULL, "accumulate");
+ }
+
if (res < 0) {
Py_DECREF(out);
return NULL;
@@ -3376,7 +3342,11 @@ finish:
fail:
Py_XDECREF(out);
- Py_XDECREF(op_dtypes[0]);
+
+ NPY_AUXDATA_FREE(auxdata);
+ Py_XDECREF(descrs[0]);
+ Py_XDECREF(descrs[1]);
+ Py_XDECREF(descrs[2]);
NpyIter_Deallocate(iter);
@@ -3401,28 +3371,31 @@ fail:
* indices[1::2] = range(1,len(array))
*
* output shape is based on the size of indices
+ *
+ * TODO: Reduceat duplicates too much code from accumulate!
*/
static PyObject *
PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
- PyArrayObject *out, int axis, int otype)
+ PyArrayObject *out, int axis, PyArray_DTypeMeta *signature[3])
{
PyArrayObject *op[3];
- PyArray_Descr *op_dtypes[3] = {NULL, NULL, NULL};
int op_axes_arrays[3][NPY_MAXDIMS];
int *op_axes[3] = {op_axes_arrays[0], op_axes_arrays[1],
op_axes_arrays[2]};
npy_uint32 op_flags[3];
- int idim, ndim, otype_final;
- int need_outer_iterator = 0;
+ int idim, ndim;
+ int needs_api, need_outer_iterator = 0;
+
+ int res = 0;
NpyIter *iter = NULL;
+ PyArrayMethod_StridedLoop *strided_loop;
+ NpyAuxData *auxdata = NULL;
+
/* The reduceat indices - ind must be validated outside this call */
npy_intp *reduceat_ind;
npy_intp i, ind_size, red_axis_size;
- /* The selected inner loop */
- PyUFuncGenericFunction innerloop = NULL;
- void *innerloopdata = NULL;
const char *ufunc_name = ufunc_get_name_cstr(ufunc);
char *opname = "reduceat";
@@ -3462,42 +3435,32 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
/* Take a reference to out for later returning */
Py_XINCREF(out);
- otype_final = otype;
- if (get_binary_op_function(ufunc, &otype_final,
- &innerloop, &innerloopdata) < 0) {
- PyArray_Descr *dtype = PyArray_DescrFromType(otype);
- PyErr_Format(PyExc_ValueError,
- "could not find a matching type for %s.%s, "
- "requested type has type code '%c'",
- ufunc_name, opname, dtype ? dtype->type : '-');
- Py_XDECREF(dtype);
- goto fail;
+ PyArray_Descr *descrs[3];
+ PyArrayMethodObject *ufuncimpl = reducelike_promote_and_resolve(ufunc,
+ arr, out, signature, NPY_TRUE, descrs, "reduceat");
+ if (ufuncimpl == NULL) {
+ return NULL;
}
- ndim = PyArray_NDIM(arr);
+ /* The below code assumes that all descriptors are identical: */
+ assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]);
- /*
- * Set up the output data type, using the input's exact
- * data type if the type number didn't change to preserve
- * metadata
- */
- if (PyArray_DESCR(arr)->type_num == otype_final) {
- if (PyArray_ISNBO(PyArray_DESCR(arr)->byteorder)) {
- op_dtypes[0] = PyArray_DESCR(arr);
- Py_INCREF(op_dtypes[0]);
- }
- else {
- op_dtypes[0] = PyArray_DescrNewByteorder(PyArray_DESCR(arr),
- NPY_NATIVE);
- }
- }
- else {
- op_dtypes[0] = PyArray_DescrFromType(otype_final);
- }
- if (op_dtypes[0] == NULL) {
+ if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) {
+ /* This can be removed, but the initial element copy needs fixing */
+ PyErr_SetString(PyExc_TypeError,
+ "reduceat currently only supports `object` dtype with "
+ "references");
goto fail;
}
+ PyArrayMethod_Context context = {
+ .caller = (PyObject *)ufunc,
+ .method = ufuncimpl,
+ .descriptors = descrs,
+ };
+
+ ndim = PyArray_NDIM(arr);
+
#if NPY_UF_DBG_TRACING
printf("Found %s.%s inner loop with dtype : ", ufunc_name, opname);
PyObject_Print((PyObject *)op_dtypes[0], stdout, 0);
@@ -3524,11 +3487,13 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
op[2] = ind;
if (out != NULL || ndim > 1 || !PyArray_ISALIGNED(arr) ||
- !PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(arr))) {
+ !PyArray_EquivTypes(descrs[0], PyArray_DESCR(arr))) {
need_outer_iterator = 1;
}
if (need_outer_iterator) {
+ PyArray_Descr *op_dtypes[3] = {descrs[0], descrs[1], NULL};
+
npy_uint32 flags = NPY_ITER_ZEROSIZE_OK|
NPY_ITER_REFS_OK|
NPY_ITER_MULTI_INDEX|
@@ -3557,8 +3522,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
NPY_UF_DBG_PRINT("Allocating outer iterator\n");
iter = NpyIter_AdvancedNew(3, op, flags,
NPY_KEEPORDER, NPY_UNSAFE_CASTING,
- op_flags,
- op_dtypes,
+ op_flags, op_dtypes,
ndim, op_axes, NULL, 0);
if (iter == NULL) {
goto fail;
@@ -3582,11 +3546,15 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
Py_INCREF(out);
}
}
- /* Allocate the output for when there's no outer iterator */
- else if (out == NULL) {
- Py_INCREF(op_dtypes[0]);
+ else {
+ /*
+ * Allocate the output for when there's no outer iterator, we always
+ * use the outer_iteration path when `out` is passed.
+ */
+ assert(out == NULL);
+ Py_INCREF(descrs[0]);
op[0] = out = (PyArrayObject *)PyArray_NewFromDescr(
- &PyArray_Type, op_dtypes[0],
+ &PyArray_Type, descrs[0],
1, &ind_size, NULL, NULL,
0, NULL);
if (out == NULL) {
@@ -3594,6 +3562,28 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
}
}
+ npy_intp fixed_strides[3];
+ if (need_outer_iterator) {
+ NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
+ }
+ else {
+ fixed_strides[1] = PyArray_STRIDES(op[1])[axis];
+ }
+ /* The reduce axis does not advance here in the strided-loop */
+ fixed_strides[0] = 0;
+ fixed_strides[2] = 0;
+
+ NPY_ARRAYMETHOD_FLAGS flags = 0;
+ if (ufuncimpl->get_strided_loop(&context,
+ 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
+ goto fail;
+ }
+ needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0;
+ if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
+ /* Start with the floating-point exception flags cleared */
+ npy_clear_floatstatus_barrier((char*)&iter);
+ }
+
/*
* If the output has zero elements, return now.
*/
@@ -3611,8 +3601,8 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
npy_intp stride0, stride1;
npy_intp stride0_ind = PyArray_STRIDE(op[0], axis);
- int itemsize = op_dtypes[0]->elsize;
- int needs_api = NpyIter_IterationNeedsAPI(iter);
+ int itemsize = descrs[0]->elsize;
+ needs_api |= NpyIter_IterationNeedsAPI(iter);
/* Get the variables needed for the loop */
iternext = NpyIter_GetIterNext(iter, NULL);
@@ -3632,10 +3622,11 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
stride_copy[1] = stride1;
stride_copy[2] = stride0;
- NPY_BEGIN_THREADS_NDITER(iter);
+ if (!needs_api) {
+ NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter));
+ }
do {
-
for (i = 0; i < ind_size; ++i) {
npy_intp start = reduceat_ind[i],
end = (i == ind_size-1) ? count_m1+1 :
@@ -3653,7 +3644,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
* to the same memory, e.g.
* np.add.reduceat(a, np.arange(len(a)), out=a).
*/
- if (otype == NPY_OBJECT) {
+ if (descrs[2]->type_num == NPY_OBJECT) {
/*
* Incref before decref to avoid the possibility of
* the reference count being zero temporarily.
@@ -3673,33 +3664,24 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
dataptr_copy[1] += stride1;
NPY_UF_DBG_PRINT1("iterator loop count %d\n",
(int)count);
- innerloop(dataptr_copy, &count,
- stride_copy, innerloopdata);
+ res = strided_loop(&context,
+ dataptr_copy, &count, stride_copy, auxdata);
}
}
- } while (!(needs_api && PyErr_Occurred()) && iternext(iter));
+ } while (res == 0 && iternext(iter));
NPY_END_THREADS;
}
else if (iter == NULL) {
char *dataptr_copy[3];
- npy_intp stride_copy[3];
- int itemsize = op_dtypes[0]->elsize;
+ int itemsize = descrs[0]->elsize;
npy_intp stride0_ind = PyArray_STRIDE(op[0], axis);
-
- /* Execute the loop with no iterators */
- npy_intp stride0 = 0, stride1 = PyArray_STRIDE(op[1], axis);
-
- int needs_api = PyDataType_REFCHK(op_dtypes[0]);
+ npy_intp stride1 = PyArray_STRIDE(op[1], axis);
NPY_UF_DBG_PRINT("UFunc: Reduce loop with no iterators\n");
- stride_copy[0] = stride0;
- stride_copy[1] = stride1;
- stride_copy[2] = stride0;
-
if (!needs_api) {
NPY_BEGIN_THREADS;
}
@@ -3721,7 +3703,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
* the same memory, e.g.
* np.add.reduceat(a, np.arange(len(a)), out=a).
*/
- if (otype == NPY_OBJECT) {
+ if (descrs[2]->type_num == NPY_OBJECT) {
/*
* Incref before decref to avoid the possibility of the
* reference count being zero temporarily.
@@ -3741,8 +3723,11 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
dataptr_copy[1] += stride1;
NPY_UF_DBG_PRINT1("iterator loop count %d\n",
(int)count);
- innerloop(dataptr_copy, &count,
- stride_copy, innerloopdata);
+ res = strided_loop(&context,
+ dataptr_copy, &count, fixed_strides, auxdata);
+ if (res != 0) {
+ break;
+ }
}
}
@@ -3750,8 +3735,21 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
}
finish:
- Py_XDECREF(op_dtypes[0]);
+ NPY_AUXDATA_FREE(auxdata);
+ Py_DECREF(descrs[0]);
+ Py_DECREF(descrs[1]);
+ Py_DECREF(descrs[2]);
+
if (!NpyIter_Deallocate(iter)) {
+ res = -1;
+ }
+
+ if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
+ /* NOTE: We could check float errors even when `res < 0` */
+ res = _check_ufunc_fperr(errormask, NULL, "reduceat");
+ }
+
+ if (res < 0) {
Py_DECREF(out);
return NULL;
}
@@ -3760,9 +3758,14 @@ finish:
fail:
Py_XDECREF(out);
- Py_XDECREF(op_dtypes[0]);
+
+ NPY_AUXDATA_FREE(auxdata);
+ Py_XDECREF(descrs[0]);
+ Py_XDECREF(descrs[1]);
+ Py_XDECREF(descrs[2]);
NpyIter_Deallocate(iter);
+
return NULL;
}
@@ -3860,7 +3863,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
PyArrayObject *mp = NULL, *wheremask = NULL, *ret = NULL;
PyObject *op = NULL;
PyArrayObject *indices = NULL;
- PyArray_Descr *otype = NULL;
+ PyArray_DTypeMeta *signature[3] = {NULL, NULL, NULL};
PyArrayObject *out = NULL;
int keepdims = 0;
PyObject *initial = NULL;
@@ -4004,13 +4007,10 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
}
if (otype_obj && otype_obj != Py_None) {
/* Use `_get_dtype` because `dtype` is a DType and not the instance */
- PyArray_DTypeMeta *dtype = _get_dtype(otype_obj);
- if (dtype == NULL) {
+ signature[0] = _get_dtype(otype_obj);
+ if (signature[0] == NULL) {
goto fail;
}
- otype = dtype->singleton;
- Py_INCREF(otype);
- Py_DECREF(dtype);
}
if (out_obj && !PyArray_OutputConverter(out_obj, &out)) {
goto fail;
@@ -4030,15 +4030,6 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
ndim = PyArray_NDIM(mp);
- /* Check to see that type (and otype) is not FLEXIBLE */
- if (PyArray_ISFLEXIBLE(mp) ||
- (otype && PyTypeNum_ISFLEXIBLE(otype->type_num))) {
- PyErr_Format(PyExc_TypeError,
- "cannot perform %s with flexible type",
- _reduce_type[operation]);
- goto fail;
- }
-
/* Convert the 'axis' parameter into a list of axes */
if (axes_obj == NULL) {
/* apply defaults */
@@ -4101,14 +4092,12 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
}
/*
- * If out is specified it determines otype
- * unless otype already specified.
+ * If no dtype is specified and out is not specified, we override the
+ * integer and bool dtype used for add and multiply.
+ *
+ * TODO: The following should be handled by a promoter!
*/
- if (otype == NULL && out != NULL) {
- otype = PyArray_DESCR(out);
- Py_INCREF(otype);
- }
- if (otype == NULL) {
+ if (signature[0] == NULL && out == NULL) {
/*
* For integer types --- make sure at least a long
* is used for add and multiply reduction to avoid overflow
@@ -4128,16 +4117,17 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
typenum = NPY_LONG;
}
}
+ signature[0] = PyArray_DTypeFromTypeNum(typenum);
}
- otype = PyArray_DescrFromType(typenum);
}
-
+ Py_XINCREF(signature[0]);
+ signature[2] = signature[0];
switch(operation) {
case UFUNC_REDUCE:
- ret = PyUFunc_Reduce(ufunc, mp, out, naxes, axes,
- otype, keepdims, initial, wheremask);
- Py_XDECREF(wheremask);
+ ret = PyUFunc_Reduce(ufunc,
+ mp, out, naxes, axes, signature, keepdims, initial, wheremask);
+ Py_XSETREF(wheremask, NULL);
break;
case UFUNC_ACCUMULATE:
if (ndim == 0) {
@@ -4149,8 +4139,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
"accumulate does not allow multiple axes");
goto fail;
}
- ret = (PyArrayObject *)PyUFunc_Accumulate(ufunc, mp, out, axes[0],
- otype->type_num);
+ ret = (PyArrayObject *)PyUFunc_Accumulate(ufunc,
+ mp, out, axes[0], signature);
break;
case UFUNC_REDUCEAT:
if (ndim == 0) {
@@ -4163,19 +4153,22 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
goto fail;
}
ret = (PyArrayObject *)PyUFunc_Reduceat(ufunc,
- mp, indices, out, axes[0], otype->type_num);
+ mp, indices, out, axes[0], signature);
Py_SETREF(indices, NULL);
break;
}
+ if (ret == NULL) {
+ goto fail;
+ }
+
+ Py_DECREF(signature[0]);
+ Py_DECREF(signature[1]);
+ Py_DECREF(signature[2]);
+
Py_DECREF(mp);
- Py_DECREF(otype);
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
- if (ret == NULL) {
- return NULL;
- }
-
/* Wrap and return the output */
{
/* Find __array_wrap__ - note that these rules are different to the
@@ -4203,7 +4196,10 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
}
fail:
- Py_XDECREF(otype);
+ Py_XDECREF(signature[0]);
+ Py_XDECREF(signature[1]);
+ Py_XDECREF(signature[2]);
+
Py_XDECREF(mp);
Py_XDECREF(wheremask);
Py_XDECREF(indices);
@@ -6162,7 +6158,12 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
Py_XDECREF(array_operands[i]);
}
- if (needs_api && PyErr_Occurred()) {
+ /*
+ * An error should only be possible if needs_api is true, but this is not
+ * strictly correct for old-style ufuncs (e.g. `power` released the GIL
+ * but manually set an Exception).
+ */
+ if (PyErr_Occurred()) {
return NULL;
}
else {
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 7e24bc493..9ed923cf5 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -247,6 +247,28 @@ PyUFunc_ValidateCasting(PyUFuncObject *ufunc,
}
+/*
+ * Same as `PyUFunc_ValidateCasting` but only checks output casting.
+ */
+NPY_NO_EXPORT int
+PyUFunc_ValidateOutCasting(PyUFuncObject *ufunc,
+ NPY_CASTING casting, PyArrayObject **operands, PyArray_Descr **dtypes)
+{
+ int i, nin = ufunc->nin, nop = nin + ufunc->nout;
+
+ for (i = nin; i < nop; ++i) {
+ if (operands[i] == NULL) {
+ continue;
+ }
+ if (!PyArray_CanCastTypeTo(dtypes[i],
+ PyArray_DESCR(operands[i]), casting)) {
+ return raise_output_casting_error(
+ ufunc, casting, dtypes[i], PyArray_DESCR(operands[i]), i);
+ }
+ }
+ return 0;
+}
+
/*UFUNC_API
*
* This function applies the default type resolution rules
@@ -2142,6 +2164,10 @@ type_tuple_type_resolver(PyUFuncObject *self,
* `signature=(None,)*nin + (dtype,)*nout`. If the signature matches that
* exactly (could be relaxed but that is not necessary for backcompat),
* we also try `signature=(dtype,)*(nin+nout)`.
+ * Since reduction pass in `(dtype, None, dtype)` we broaden this to
+ * replacing all unspecified dtypes with the homogeneous output one.
+ * Note that this can (and often will) lead to unsafe casting. This is
+ * normally rejected (but not currently for reductions!).
* This used to be the main meaning for `dtype=dtype`, but some calls broke
* the expectation, and changing it allows for `dtype=dtype` to be useful
* for ufuncs like `np.ldexp` in the future while also normalizing it to
@@ -2160,13 +2186,12 @@ type_tuple_type_resolver(PyUFuncObject *self,
if (homogeneous_type != NPY_NOTYPE) {
for (int i = 0; i < nin; i++) {
if (specified_types[i] != NPY_NOTYPE) {
- homogeneous_type = NPY_NOTYPE;
- break;
+ /* Never replace a specified type! */
+ continue;
}
specified_types[i] = homogeneous_type;
}
- }
- if (homogeneous_type != NPY_NOTYPE) {
+
/* Try again with the homogeneous specified types. */
res = type_tuple_type_resolver_core(self,
op, input_casting, casting, specified_types, any_object,
diff --git a/numpy/core/src/umath/ufunc_type_resolution.h b/numpy/core/src/umath/ufunc_type_resolution.h
index dd88a081a..84a2593f4 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.h
+++ b/numpy/core/src/umath/ufunc_type_resolution.h
@@ -99,6 +99,10 @@ PyUFunc_DivmodTypeResolver(PyUFuncObject *ufunc,
PyObject *type_tup,
PyArray_Descr **out_dtypes);
+NPY_NO_EXPORT int
+PyUFunc_ValidateOutCasting(PyUFuncObject *ufunc,
+ NPY_CASTING casting, PyArrayObject **operands, PyArray_Descr **dtypes);
+
/*
* Does a linear search for the best inner loop of the ufunc.
*
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index a9954dfc1..272555704 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -22,6 +22,7 @@
#include "numpy/npy_math.h"
#include "number.h"
+#include "dispatching.h"
static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om};
@@ -305,5 +306,33 @@ int initumath(PyObject *m)
return -1;
}
+ /*
+ * Set up promoters for logical functions
+ * TODO: This should probably be done at a better place, or even in the
+ * code generator directly.
+ */
+ s = _PyDict_GetItemStringWithError(d, "logical_and");
+ if (s == NULL) {
+ return -1;
+ }
+ if (install_logical_ufunc_promoter(s) < 0) {
+ return -1;
+ }
+
+ s = _PyDict_GetItemStringWithError(d, "logical_or");
+ if (s == NULL) {
+ return -1;
+ }
+ if (install_logical_ufunc_promoter(s) < 0) {
+ return -1;
+ }
+
+ s = _PyDict_GetItemStringWithError(d, "logical_xor");
+ if (s == NULL) {
+ return -1;
+ }
+ if (install_logical_ufunc_promoter(s) < 0) {
+ return -1;
+ }
return 0;
}
diff --git a/numpy/core/tests/data/generate_umath_validation_data.cpp b/numpy/core/tests/data/generate_umath_validation_data.cpp
index 9d97ff4ab..418eae670 100644
--- a/numpy/core/tests/data/generate_umath_validation_data.cpp
+++ b/numpy/core/tests/data/generate_umath_validation_data.cpp
@@ -1,41 +1,46 @@
-#include<math.h>
-#include<stdio.h>
-#include<iostream>
-#include<algorithm>
-#include<vector>
-#include<random>
-#include<fstream>
-#include<time.h>
+#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <math.h>
+#include <random>
+#include <stdio.h>
+#include <time.h>
+#include <vector>
struct ufunc {
std::string name;
- double (*f32func) (double);
- long double (*f64func) (long double);
+ double (*f32func)(double);
+ long double (*f64func)(long double);
float f32ulp;
float f64ulp;
};
-template<typename T>
-T RandomFloat(T a, T b) {
- T random = ((T) rand()) / (T) RAND_MAX;
+template <typename T>
+T
+RandomFloat(T a, T b)
+{
+ T random = ((T)rand()) / (T)RAND_MAX;
T diff = b - a;
T r = random * diff;
return a + r;
}
-template<typename T>
-void append_random_array(std::vector<T>& arr, T min, T max, size_t N)
+template <typename T>
+void
+append_random_array(std::vector<T> &arr, T min, T max, size_t N)
{
for (size_t ii = 0; ii < N; ++ii)
arr.emplace_back(RandomFloat<T>(min, max));
}
-template<typename T1, typename T2>
-std::vector<T1> computeTrueVal(const std::vector<T1>& in, T2(*mathfunc)(T2)) {
+template <typename T1, typename T2>
+std::vector<T1>
+computeTrueVal(const std::vector<T1> &in, T2 (*mathfunc)(T2))
+{
std::vector<T1> out;
for (T1 elem : in) {
- T2 elem_d = (T2) elem;
- T1 out_elem = (T1) mathfunc(elem_d);
+ T2 elem_d = (T2)elem;
+ T1 out_elem = (T1)mathfunc(elem_d);
out.emplace_back(out_elem);
}
return out;
@@ -49,17 +54,20 @@ std::vector<T1> computeTrueVal(const std::vector<T1>& in, T2(*mathfunc)(T2)) {
#define MINDEN std::numeric_limits<T>::denorm_min()
#define MINFLT std::numeric_limits<T>::min()
#define MAXFLT std::numeric_limits<T>::max()
-#define INF std::numeric_limits<T>::infinity()
-#define qNAN std::numeric_limits<T>::quiet_NaN()
-#define sNAN std::numeric_limits<T>::signaling_NaN()
+#define INF std::numeric_limits<T>::infinity()
+#define qNAN std::numeric_limits<T>::quiet_NaN()
+#define sNAN std::numeric_limits<T>::signaling_NaN()
-template<typename T>
-std::vector<T> generate_input_vector(std::string func) {
- std::vector<T> input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT, -MAXFLT,
- INF, -INF, qNAN, sNAN, -1.0, 1.0, 0.0, -0.0};
+template <typename T>
+std::vector<T>
+generate_input_vector(std::string func)
+{
+ std::vector<T> input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT,
+ -MAXFLT, INF, -INF, qNAN, sNAN,
+ -1.0, 1.0, 0.0, -0.0};
// [-1.0, 1.0]
- if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")){
+ if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")) {
append_random_array<T>(input, -1.0, 1.0, 700);
}
// (0.0, INF]
@@ -98,57 +106,62 @@ std::vector<T> generate_input_vector(std::string func) {
return input;
}
-int main() {
- srand (42);
+int
+main()
+{
+ srand(42);
std::vector<struct ufunc> umathfunc = {
- {"sin",sin,sin,2.37,3.3},
- {"cos",cos,cos,2.36,3.38},
- {"tan",tan,tan,3.91,3.93},
- {"arcsin",asin,asin,3.12,2.55},
- {"arccos",acos,acos,2.1,1.67},
- {"arctan",atan,atan,2.3,2.52},
- {"sinh",sinh,sinh,1.55,1.89},
- {"cosh",cosh,cosh,2.48,1.97},
- {"tanh",tanh,tanh,1.38,1.19},
- {"arcsinh",asinh,asinh,1.01,1.48},
- {"arccosh",acosh,acosh,1.16,1.05},
- {"arctanh",atanh,atanh,1.45,1.46},
- {"cbrt",cbrt,cbrt,1.94,1.82},
- //{"exp",exp,exp,3.76,1.53},
- {"exp2",exp2,exp2,1.01,1.04},
- {"expm1",expm1,expm1,2.62,2.1},
- //{"log",log,log,1.84,1.67},
- {"log10",log10,log10,3.5,1.92},
- {"log1p",log1p,log1p,1.96,1.93},
- {"log2",log2,log2,2.12,1.84},
+ {"sin", sin, sin, 2.37, 3.3},
+ {"cos", cos, cos, 2.36, 3.38},
+ {"tan", tan, tan, 3.91, 3.93},
+ {"arcsin", asin, asin, 3.12, 2.55},
+ {"arccos", acos, acos, 2.1, 1.67},
+ {"arctan", atan, atan, 2.3, 2.52},
+ {"sinh", sinh, sinh, 1.55, 1.89},
+ {"cosh", cosh, cosh, 2.48, 1.97},
+ {"tanh", tanh, tanh, 1.38, 1.19},
+ {"arcsinh", asinh, asinh, 1.01, 1.48},
+ {"arccosh", acosh, acosh, 1.16, 1.05},
+ {"arctanh", atanh, atanh, 1.45, 1.46},
+ {"cbrt", cbrt, cbrt, 1.94, 1.82},
+ //{"exp",exp,exp,3.76,1.53},
+ {"exp2", exp2, exp2, 1.01, 1.04},
+ {"expm1", expm1, expm1, 2.62, 2.1},
+ //{"log",log,log,1.84,1.67},
+ {"log10", log10, log10, 3.5, 1.92},
+ {"log1p", log1p, log1p, 1.96, 1.93},
+ {"log2", log2, log2, 2.12, 1.84},
};
for (int ii = 0; ii < umathfunc.size(); ++ii) {
- // ignore sin/cos
+ // ignore sin/cos
if ((umathfunc[ii].name != "sin") && (umathfunc[ii].name != "cos")) {
- std::string fileName = "umath-validation-set-" + umathfunc[ii].name + ".csv";
+ std::string fileName =
+ "umath-validation-set-" + umathfunc[ii].name + ".csv";
std::ofstream txtOut;
- txtOut.open (fileName, std::ofstream::trunc);
+ txtOut.open(fileName, std::ofstream::trunc);
txtOut << "dtype,input,output,ulperrortol" << std::endl;
// Single Precision
auto f32in = generate_input_vector<float>(umathfunc[ii].name);
- auto f32out = computeTrueVal<float, double>(f32in, umathfunc[ii].f32func);
+ auto f32out = computeTrueVal<float, double>(f32in,
+ umathfunc[ii].f32func);
for (int jj = 0; jj < f32in.size(); ++jj) {
- txtOut << "np.float32" << std::hex <<
- ",0x" << *reinterpret_cast<uint32_t*>(&f32in[jj]) <<
- ",0x" << *reinterpret_cast<uint32_t*>(&f32out[jj]) <<
- "," << ceil(umathfunc[ii].f32ulp) << std::endl;
+ txtOut << "np.float32" << std::hex << ",0x"
+ << *reinterpret_cast<uint32_t *>(&f32in[jj]) << ",0x"
+ << *reinterpret_cast<uint32_t *>(&f32out[jj]) << ","
+ << ceil(umathfunc[ii].f32ulp) << std::endl;
}
// Double Precision
auto f64in = generate_input_vector<double>(umathfunc[ii].name);
- auto f64out = computeTrueVal<double, long double>(f64in, umathfunc[ii].f64func);
+ auto f64out = computeTrueVal<double, long double>(
+ f64in, umathfunc[ii].f64func);
for (int jj = 0; jj < f64in.size(); ++jj) {
- txtOut << "np.float64" << std::hex <<
- ",0x" << *reinterpret_cast<uint64_t*>(&f64in[jj]) <<
- ",0x" << *reinterpret_cast<uint64_t*>(&f64out[jj]) <<
- "," << ceil(umathfunc[ii].f64ulp) << std::endl;
+ txtOut << "np.float64" << std::hex << ",0x"
+ << *reinterpret_cast<uint64_t *>(&f64in[jj]) << ",0x"
+ << *reinterpret_cast<uint64_t *>(&f64out[jj]) << ","
+ << ceil(umathfunc[ii].f64ulp) << std::endl;
}
txtOut.close();
}
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index 09cc79f72..25826d8ed 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
import sys
import gc
from hypothesis import given
diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py
index b0d8ff503..cb4792090 100644
--- a/numpy/core/tests/test_casting_unittests.py
+++ b/numpy/core/tests/test_casting_unittests.py
@@ -9,7 +9,6 @@ than integration tests.
import pytest
import textwrap
import enum
-import itertools
import random
import numpy as np
diff --git a/numpy/core/tests/test_custom_dtypes.py b/numpy/core/tests/test_custom_dtypes.py
index 5eb82bc93..e502a87ed 100644
--- a/numpy/core/tests/test_custom_dtypes.py
+++ b/numpy/core/tests/test_custom_dtypes.py
@@ -101,6 +101,22 @@ class TestSFloat:
expected_view = a.view(np.float64) * b.view(np.float64)
assert_array_equal(res.view(np.float64), expected_view)
+ def test_possible_and_impossible_reduce(self):
+ # For reductions to work, the first and last operand must have the
+ # same dtype. For this parametric DType that is not necessarily true.
+ a = self._get_array(2.)
+ # Addition reductin works (as of writing requires to pass initial
+ # because setting a scaled-float from the default `0` fails).
+ res = np.add.reduce(a, initial=0.)
+ assert res == a.astype(np.float64).sum()
+
+ # But each multiplication changes the factor, so a reduction is not
+ # possible (the relaxed version of the old refusal to handle any
+ # flexible dtype).
+ with pytest.raises(TypeError,
+ match="the resolved dtypes are not compatible"):
+ np.multiply.reduce(a)
+
def test_basic_multiply_promotion(self):
float_a = np.array([1., 2., 3.])
b = self._get_array(2.)
@@ -145,3 +161,23 @@ class TestSFloat:
# Check that casting the output fails also (done by the ufunc here)
with pytest.raises(TypeError):
np.add(a, a, out=c, casting="safe")
+
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or, np.logical_xor])
+ def test_logical_ufuncs_casts_to_bool(self, ufunc):
+ a = self._get_array(2.)
+ a[0] = 0. # make sure first element is considered False.
+
+ float_equiv = a.astype(float)
+ expected = ufunc(float_equiv, float_equiv)
+ res = ufunc(a, a)
+ assert_array_equal(res, expected)
+
+ # also check that the same works for reductions:
+ expected = ufunc.reduce(float_equiv)
+ res = ufunc.reduce(a)
+ assert_array_equal(res, expected)
+
+ # The output casting does not match the bool, bool -> bool loop:
+ with pytest.raises(TypeError):
+ ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 5a490646e..b95d669a8 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -63,6 +63,7 @@ class TestDateTime:
assert_raises(TypeError, np.dtype, 'm7')
assert_raises(TypeError, np.dtype, 'M16')
assert_raises(TypeError, np.dtype, 'm16')
+ assert_raises(TypeError, np.dtype, 'M8[3000000000ps]')
def test_datetime_casting_rules(self):
# Cannot cast safely/same_kind between timedelta and datetime
@@ -137,6 +138,42 @@ class TestDateTime:
assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
+ def test_datetime_prefix_conversions(self):
+ # regression tests related to gh-19631;
+ # test metric prefixes from seconds down to
+ # attoseconds for bidirectional conversions
+ smaller_units = ['M8[7000ms]',
+ 'M8[2000us]',
+ 'M8[1000ns]',
+ 'M8[5000ns]',
+ 'M8[2000ps]',
+ 'M8[9000fs]',
+ 'M8[1000as]',
+ 'M8[2000000ps]',
+ 'M8[1000000as]',
+ 'M8[2000000000ps]',
+ 'M8[1000000000as]']
+ larger_units = ['M8[7s]',
+ 'M8[2ms]',
+ 'M8[us]',
+ 'M8[5us]',
+ 'M8[2ns]',
+ 'M8[9ps]',
+ 'M8[1fs]',
+ 'M8[2us]',
+ 'M8[1ps]',
+ 'M8[2ms]',
+ 'M8[1ns]']
+ for larger_unit, smaller_unit in zip(larger_units, smaller_units):
+ assert np.can_cast(larger_unit, smaller_unit, casting='safe')
+ assert np.can_cast(smaller_unit, larger_unit, casting='safe')
+
+ @pytest.mark.parametrize("unit", [
+ "s", "ms", "us", "ns", "ps", "fs", "as"])
+ def test_prohibit_negative_datetime(self, unit):
+ with assert_raises(TypeError):
+ np.array([1], dtype=f"M8[-1{unit}]")
+
def test_compare_generic_nat(self):
# regression tests for gh-6452
assert_(np.datetime64('NaT') !=
@@ -1992,6 +2029,21 @@ class TestDateTime:
assert_equal(np.maximum.reduce(a),
np.timedelta64(7, 's'))
+ def test_datetime_no_subtract_reducelike(self):
+ # subtracting two datetime64 works, but we cannot reduce it, since
+ # the result of that subtraction will have a different dtype.
+ arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]")
+ msg = r"the resolved dtypes are not compatible with subtract\."
+
+ with pytest.raises(TypeError, match=msg + "reduce"):
+ np.subtract.reduce(arr)
+
+ with pytest.raises(TypeError, match=msg + "accumulate"):
+ np.subtract.accumulate(arr)
+
+ with pytest.raises(TypeError, match=msg + "reduceat"):
+ np.subtract.reduceat(arr, [0])
+
def test_datetime_busday_offset(self):
# First Monday in June
assert_equal(
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 1d0c5dfac..898ff8075 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -1192,3 +1192,26 @@ class TestUFuncForcedDTypeWarning(_DeprecationTestCase):
np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns"
with pytest.warns(DeprecationWarning, match=self.message):
np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns"
+
+
+PARTITION_DICT = {
+ "partition method": np.arange(10).partition,
+ "argpartition method": np.arange(10).argpartition,
+ "partition function": lambda kth: np.partition(np.arange(10), kth),
+ "argpartition function": lambda kth: np.argpartition(np.arange(10), kth),
+}
+
+
+@pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT)
+class TestPartitionBoolIndex(_DeprecationTestCase):
+ # Deprecated 2021-09-29, NumPy 1.22
+ warning_cls = DeprecationWarning
+ message = "Passing booleans as partition index is deprecated"
+
+ def test_deprecated(self, func):
+ self.assert_deprecated(lambda: func(True))
+ self.assert_deprecated(lambda: func([False, True]))
+
+ def test_not_deprecated(self, func):
+ self.assert_not_deprecated(lambda: func(1))
+ self.assert_not_deprecated(lambda: func([0, 1]))
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 1c25bee00..8fe859919 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -3,7 +3,6 @@ import operator
import pytest
import ctypes
import gc
-import warnings
import types
from typing import Any
@@ -1590,6 +1589,14 @@ class TestClassGetItem:
assert np.dtype[Any]
+def test_result_type_integers_and_unitless_timedelta64():
+ # Regression test for gh-20077. The following call of `result_type`
+ # would cause a seg. fault.
+ td = np.timedelta64(4)
+ result = np.result_type(0, td)
+ assert_dtype_equal(result, td.dtype)
+
+
@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
def test_class_getitem_38() -> None:
match = "Type subscription requires python >= 3.9"
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index b5f9f8af3..0da36bbea 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -2511,27 +2511,19 @@ class TestMethods:
assert_(not isinstance(a.searchsorted(b, 'left', s), A))
assert_(not isinstance(a.searchsorted(b, 'right', s), A))
- def test_argpartition_out_of_range(self):
+ @pytest.mark.parametrize("dtype", np.typecodes["All"])
+ def test_argpartition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469
- d = np.arange(10)
+ d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
- # Test also for generic type argpartition, which uses sorting
- # and used to not bound check kth
- d_obj = np.arange(10, dtype=object)
- assert_raises(ValueError, d_obj.argpartition, 10)
- assert_raises(ValueError, d_obj.argpartition, -11)
- def test_partition_out_of_range(self):
+ @pytest.mark.parametrize("dtype", np.typecodes["All"])
+ def test_partition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469
- d = np.arange(10)
+ d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
- # Test also for generic type partition, which uses sorting
- # and used to not bound check kth
- d_obj = np.arange(10, dtype=object)
- assert_raises(ValueError, d_obj.partition, 10)
- assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
@@ -2551,26 +2543,30 @@ class TestMethods:
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
- def test_partition_empty_array(self):
+ @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
+ def test_partition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays
+ kth = np.array(0, dtype=kth_dtype)[()]
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
- assert_equal(np.partition(a, 0, axis=axis), a, msg)
+ assert_equal(np.partition(a, kth, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
- assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
+ assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg)
- def test_argpartition_empty_array(self):
+ @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
+ def test_argpartition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays
+ kth = np.array(0, dtype=kth_dtype)[()]
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
- assert_equal(np.partition(a, 0, axis=axis),
+ assert_equal(np.partition(a, kth, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
- assert_equal(np.partition(a, 0, axis=None),
+ assert_equal(np.partition(a, kth, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
@@ -2901,10 +2897,12 @@ class TestMethods:
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
- def test_argpartition_gh5524(self):
+ @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
+ def test_argpartition_gh5524(self, kth_dtype):
# A test for functionality of argpartition on lists.
- d = [6,7,3,2,9,0]
- p = np.argpartition(d,1)
+ kth = np.array(1, dtype=kth_dtype)[()]
+ d = [6, 7, 3, 2, 9, 0]
+ p = np.argpartition(d, kth)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
@@ -4200,7 +4198,7 @@ class TestArgmaxArgminCommon:
(3, 4, 1, 2), (4, 1, 2, 3)]
@pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis)
- for axis in list(range(-len(size), len(size))) + [None]]
+ for axis in list(range(-len(size), len(size))) + [None]]
for size in sizes]))
@pytest.mark.parametrize('method', [np.argmax, np.argmin])
def test_np_argmin_argmax_keepdims(self, size, axis, method):
@@ -4221,7 +4219,7 @@ class TestArgmaxArgminCommon:
assert_equal(res, res_orig)
assert_(res.shape == new_shape)
outarray = np.empty(res.shape, dtype=res.dtype)
- res1 = method(arr, axis=axis, out=outarray,
+ res1 = method(arr, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
@@ -4234,7 +4232,7 @@ class TestArgmaxArgminCommon:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
- method(arr.T, axis=axis,
+ method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
# non-contiguous arrays
@@ -4252,18 +4250,18 @@ class TestArgmaxArgminCommon:
assert_(res.shape == new_shape)
outarray = np.empty(new_shape[::-1], dtype=res.dtype)
outarray = outarray.T
- res1 = method(arr.T, axis=axis, out=outarray,
+ res1 = method(arr.T, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
if len(size) > 0:
- # one dimension lesser for non-zero sized
+ # one dimension lesser for non-zero sized
# array should raise an error
with pytest.raises(ValueError):
- method(arr[0], axis=axis,
+ method(arr[0], axis=axis,
out=outarray, keepdims=True)
-
+
if len(size) > 0:
wrong_shape = list(new_shape)
if axis is not None:
@@ -4272,7 +4270,7 @@ class TestArgmaxArgminCommon:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
- method(arr.T, axis=axis,
+ method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
@pytest.mark.parametrize('method', ['max', 'min'])
@@ -4287,7 +4285,7 @@ class TestArgmaxArgminCommon:
axes.remove(i)
assert_(np.all(a_maxmin == aarg_maxmin.choose(
*a.transpose(i, *axes))))
-
+
@pytest.mark.parametrize('method', ['argmax', 'argmin'])
def test_output_shape(self, method):
# see also gh-616
@@ -4330,7 +4328,7 @@ class TestArgmaxArgminCommon:
[('argmax', np.argmax),
('argmin', np.argmin)])
def test_np_vs_ndarray(self, arr_method, np_method):
- # make sure both ndarray.argmax/argmin and
+ # make sure both ndarray.argmax/argmin and
# numpy.argmax/argmin support out/axis args
a = np.random.normal(size=(2, 3))
arg_method = getattr(a, arr_method)
@@ -4344,7 +4342,7 @@ class TestArgmaxArgminCommon:
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
- assert_equal(arg_method(out=out1, axis=0),
+ assert_equal(arg_method(out=out1, axis=0),
np_method(a, out=out2, axis=0))
assert_equal(out1, out2)
@@ -4438,7 +4436,7 @@ class TestArgmax:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], val, err_msg="%r" % arr)
-
+
def test_maximum_signed_integers(self):
a = np.array([1, 2**7 - 1, -2**7], dtype=np.int8)
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index fbf6da0e1..79f44ef80 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -3128,6 +3128,8 @@ def test_warn_noclose():
assert len(sup.log) == 1
+@pytest.mark.skipif(sys.version_info[:2] == (3, 9) and sys.platform == "win32",
+ reason="Errors with Python 3.9 on Windows")
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
[("i", "O"), ("O", "i"), # most simple cases
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 4510333a1..a98c7016a 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -16,7 +16,7 @@ from numpy.testing import (
)
from numpy.core._rational_tests import rational
-from hypothesis import assume, given, strategies as st
+from hypothesis import given, strategies as st
from hypothesis.extra import numpy as hynp
@@ -932,25 +932,6 @@ class TestTypes:
# Promote with object:
assert_equal(promote_types('O', S+'30'), np.dtype('O'))
- @pytest.mark.parametrize(["dtype1", "dtype2"],
- [[np.dtype("V6"), np.dtype("V10")],
- [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])],
- [np.dtype("i8,i8"), np.dtype("i4,i4")],
- ])
- def test_invalid_void_promotion(self, dtype1, dtype2):
- # Mainly test structured void promotion, which currently allows
- # byte-swapping, but nothing else:
- with pytest.raises(TypeError):
- np.promote_types(dtype1, dtype2)
-
- @pytest.mark.parametrize(["dtype1", "dtype2"],
- [[np.dtype("V10"), np.dtype("V10")],
- [np.dtype([("name1", "<i8")]), np.dtype([("name1", ">i8")])],
- [np.dtype("i8,i8"), np.dtype("i8,>i8")],
- ])
- def test_valid_void_promotion(self, dtype1, dtype2):
- assert np.promote_types(dtype1, dtype2) is dtype1
-
@pytest.mark.parametrize("dtype",
list(np.typecodes["All"]) +
["i,i", "S3", "S100", "U3", "U100", rational])
@@ -3511,6 +3492,12 @@ class TestBroadcast:
assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
+ def test_shape_mismatch_error_message(self):
+ with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and "
+ r"arg 2 with shape \(2,\)"):
+ np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7])
+
+
class TestKeepdims:
class sub_array(np.ndarray):
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index cc53eb244..98d7f7cde 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index becd65b11..90078a2ea 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -5,14 +5,14 @@ import itertools
import operator
import platform
import pytest
-from hypothesis import given, settings, Verbosity, assume
+from hypothesis import given, settings, Verbosity
from hypothesis.strategies import sampled_from
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
- assert_warns, assert_raises_regex,
+ assert_warns,
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index 2f1c3bc5e..ee21d4aa5 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 30929ce91..4b06c8668 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1362,6 +1362,14 @@ class TestUfunc:
np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object),
)
+ def test_object_array_accumulate_failure(self):
+ # Typical accumulation on object works as expected:
+ res = np.add.accumulate(np.array([1, 0, 2], dtype=object))
+ assert_array_equal(res, np.array([1, 1, 3], dtype=object))
+ # But errors are propagated from the inner-loop if they occur:
+ with pytest.raises(TypeError):
+ np.add.accumulate([1, None, 2])
+
def test_object_array_reduceat_inplace(self):
# Checks that in-place reduceats work, see also gh-7465
arr = np.empty(4, dtype=object)
@@ -1381,6 +1389,15 @@ class TestUfunc:
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
assert_array_equal(arr, out)
+ def test_object_array_reduceat_failure(self):
+ # Reduceat works as expected when no invalid operation occurs (None is
+ # not involved in an operation here)
+ res = np.add.reduceat(np.array([1, None, 2], dtype=object), [1, 2])
+ assert_array_equal(res, np.array([None, 2], dtype=object))
+ # But errors when None would be involved in an operation:
+ with pytest.raises(TypeError):
+ np.add.reduceat([1, None, 2], [0, 2])
+
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
@@ -2098,6 +2115,25 @@ class TestUfunc:
with pytest.raises(TypeError):
ufunc(a, a, signature=signature)
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or, np.logical_xor])
+ def test_logical_ufuncs_support_anything(self, ufunc):
+ # The logical ufuncs support even input that can't be promoted:
+ a = np.array('1')
+ c = np.array([1., 2.])
+ assert_array_equal(ufunc(a, c), ufunc([True, True], True))
+ assert ufunc.reduce(a) == True
+
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or, np.logical_xor])
+ def test_logical_ufuncs_out_cast_check(self, ufunc):
+ a = np.array('1')
+ c = np.array([1., 2.])
+ out = a.copy()
+ with pytest.raises(TypeError):
+ # It would be safe, but not equiv casting:
+ ufunc(a, c, out=out, casting="equiv")
+
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
# appropriately.
@@ -2119,6 +2155,22 @@ class TestUfunc:
assert_equal(y_base[1,:], y_base_copy[1,:])
assert_equal(y_base[3,:], y_base_copy[3,:])
+ @pytest.mark.parametrize("with_cast", [True, False])
+ def test_reduceat_and_accumulate_out_shape_mismatch(self, with_cast):
+ # Should raise an error mentioning "shape" or "size"
+ arr = np.arange(5)
+ out = np.arange(3) # definitely wrong shape
+ if with_cast:
+ # If a cast is necessary on the output, we can be sure to use
+ # the generic NpyIter (non-fast) path.
+ out = out.astype(np.float64)
+
+ with pytest.raises(ValueError, match="(shape|size)"):
+ np.add.reduceat(arr, [0, 3], out=out)
+
+ with pytest.raises(ValueError, match="(shape|size)"):
+ np.add.accumulate(arr, out=out)
+
@pytest.mark.parametrize('out_shape',
[(), (1,), (3,), (1, 1), (1, 3), (4, 3)])
@pytest.mark.parametrize('keepdims', [True, False])
@@ -2331,8 +2383,9 @@ def test_reduce_casterrors(offset):
out = np.array(-1, dtype=np.intp)
count = sys.getrefcount(value)
- with pytest.raises(ValueError):
- # This is an unsafe cast, but we currently always allow that:
+ with pytest.raises(ValueError, match="invalid literal"):
+ # This is an unsafe cast, but we currently always allow that.
+ # Note that the double loop is picked, but the cast fails.
np.add.reduce(arr, dtype=np.intp, out=out)
assert count == sys.getrefcount(value)
# If an error occurred during casting, the operation is done at most until
@@ -2340,3 +2393,18 @@ def test_reduce_casterrors(offset):
# if the error happened immediately.
# This does not define behaviour, the output is invalid and thus undefined
assert out[()] < value * offset
+
+
+@pytest.mark.parametrize("method",
+ [np.add.accumulate, np.add.reduce,
+ pytest.param(lambda x: np.add.reduceat(x, [0]), id="reduceat")])
+def test_reducelike_floaterrors(method):
+ # adding inf and -inf creates an invalid float and should give a warning
+ arr = np.array([np.inf, 0, -np.inf])
+ with np.errstate(all="warn"):
+ with pytest.warns(RuntimeWarning, match="invalid value"):
+ method(arr)
+
+ with np.errstate(all="raise"):
+ with pytest.raises(FloatingPointError):
+ method(arr)
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 4f57c0088..8f5a85824 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -15,7 +15,7 @@ from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings,
- _gen_alignment_data, assert_array_almost_equal_nulp, assert_warns
+ _gen_alignment_data, assert_array_almost_equal_nulp
)
def get_glibc_version():
@@ -973,6 +973,12 @@ class TestLog:
xf = np.log(x)
assert_almost_equal(np.log(x, out=x), xf)
+ # test log() of max for dtype does not raise
+ for dt in ['f', 'd', 'g']:
+ with np.errstate(all='raise'):
+ x = np.finfo(dt).max
+ np.log(x)
+
def test_log_strides(self):
np.random.seed(42)
strides = np.array([-4,-3,-2,-1,1,2,3,4])
@@ -3852,3 +3858,39 @@ def test_outer_exceeds_maxdims():
with assert_raises(ValueError):
np.add.outer(deep, deep)
+def test_bad_legacy_ufunc_silent_errors():
+ # legacy ufuncs can't report errors and NumPy can't check if the GIL
+ # is released. So NumPy has to check after the GIL is released just to
+ # cover all bases. `np.power` uses/used to use this.
+ arr = np.arange(3).astype(np.float64)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error(arr, arr)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ # not contiguous means the fast-path cannot be taken
+ non_contig = arr.repeat(20).reshape(-1, 6)[:, ::2]
+ ncu_tests.always_error(non_contig, arr)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error.outer(arr, arr)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error.reduce(arr)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error.reduceat(arr, [0, 1])
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error.accumulate(arr)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error.at(arr, [0, 1, 2], arr)
+
+
+@pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]])
+def test_bad_legacy_gufunc_silent_errors(x1):
+ # Verify that an exception raised in a gufunc loop propagates correctly.
+ # The signature of always_error_gufunc is '(i),()->()'.
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error_gufunc(x1, 0.0)
diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py
index a703c697a..32e2dca66 100644
--- a/numpy/core/tests/test_umath_accuracy.py
+++ b/numpy/core/tests/test_umath_accuracy.py
@@ -1,5 +1,4 @@
import numpy as np
-import platform
import os
from os import path
import sys
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 8ba6f15e5..8d105a248 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -90,18 +90,23 @@ else:
def load_library(libname, loader_path):
"""
It is possible to load a library using
+
>>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP
But there are cross-platform considerations, such as library file extensions,
plus the fact Windows will just load the first library it finds with that name.
NumPy supplies the load_library function as a convenience.
+ .. versionchanged:: 1.20.0
+ Allow libname and loader_path to take any
+ :term:`python:path-like object`.
+
Parameters
----------
- libname : str
+ libname : path-like
Name of the library, which can have 'lib' as a prefix,
but without an extension.
- loader_path : str
+ loader_path : path-like
Where the library can be found.
Returns
@@ -120,6 +125,10 @@ else:
warnings.warn("All features of ctypes interface may not work "
"with ctypes < 1.0.1", stacklevel=2)
+ # Convert path-like objects into strings
+ libname = os.fsdecode(libname)
+ loader_path = os.fsdecode(loader_path)
+
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index 6d063ee4e..9c85d28b9 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -23,7 +23,8 @@ from numpy.distutils.exec_command import (
)
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
get_num_build_jobs, \
- _commandline_dep_string
+ _commandline_dep_string, \
+ sanitize_cxx_flags
# globals for parallel build management
import threading
@@ -258,9 +259,6 @@ def CCompiler_compile(self, sources, output_dir=None, macros=None,
If compilation fails.
"""
- # This method is effective only with Python >=2.3 distutils.
- # Any changes here should be applied also to fcompiler.compile
- # method to support pre Python 2.3 distutils.
global _job_semaphore
jobs = get_num_build_jobs()
@@ -677,7 +675,9 @@ def CCompiler_cxx_compiler(self):
return self
cxx = copy(self)
- cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:]
+ cxx.compiler_cxx = cxx.compiler_cxx
+ cxx.compiler_so = [cxx.compiler_cxx[0]] + \
+ sanitize_cxx_flags(cxx.compiler_so[1:])
if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]:
# AIX needs the ld_so_aix script included with Python
cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index e7fd494d3..8f1768f77 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -8,7 +8,14 @@ the sources with proper compiler's flags.
instead only focuses on the compiler side, but it creates abstract C headers
that can be used later for the final runtime dispatching process."""
-import sys, io, os, re, textwrap, pprint, inspect, atexit, subprocess
+import atexit
+import inspect
+import os
+import pprint
+import re
+import subprocess
+import textwrap
+
class _Config:
"""An abstract class holds all configurable attributes of `CCompilerOpt`,
@@ -188,7 +195,8 @@ class _Config:
# native usually works only with x86
native = '-march=native',
opt = '-O3',
- werror = '-Werror'
+ werror = '-Werror',
+ cxx = '-std=c++11',
),
clang = dict(
native = '-march=native',
@@ -198,22 +206,26 @@ class _Config:
# cases `-Werror` gets skipped during the availability test due to
# "unused arguments" warnings.
# see https://github.com/numpy/numpy/issues/19624
- werror = '-Werror-implicit-function-declaration -Werror'
+ werror = '-Werror=switch -Werror',
+ cxx = '-std=c++11',
),
icc = dict(
native = '-xHost',
opt = '-O3',
- werror = '-Werror'
+ werror = '-Werror',
+ cxx = '-std=c++11',
),
iccw = dict(
native = '/QxHost',
opt = '/O3',
- werror = '/Werror'
+ werror = '/Werror',
+ cxx = '-std=c++11',
),
msvc = dict(
native = None,
opt = '/O2',
- werror = '/WX'
+ werror = '/WX',
+ cxx = '-std=c++11',
)
)
conf_min_features = dict(
@@ -516,7 +528,8 @@ class _Config:
def __init__(self):
if self.conf_tmp_path is None:
- import tempfile, shutil
+ import shutil
+ import tempfile
tmp = tempfile.mkdtemp()
def rm_temp():
try:
@@ -555,6 +568,7 @@ class _Distutils:
flags = kwargs.pop("extra_postargs", []) + flags
if not ccompiler:
ccompiler = self._ccompiler
+
return ccompiler.compile(sources, extra_postargs=flags, **kwargs)
def dist_test(self, source, flags, macros=[]):
@@ -696,7 +710,6 @@ class _Distutils:
)
@staticmethod
def _dist_test_spawn(cmd, display=None):
- from distutils.errors import CompileError
try:
o = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
universal_newlines=True)
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index 0e31a7dee..a481758c1 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -264,6 +264,8 @@ class build_clib(old_build_clib):
if include_dirs is None:
include_dirs = []
extra_postargs = build_info.get('extra_compiler_args') or []
+ extra_cflags = build_info.get('extra_cflags') or []
+ extra_cxxflags = build_info.get('extra_cxxflags') or []
include_dirs.extend(get_numpy_include_dirs())
# where compiled F90 module files are:
@@ -315,38 +317,45 @@ class build_clib(old_build_clib):
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
- extra_postargs=extra_postargs,
+ extra_postargs=extra_postargs + extra_cxxflags,
ccompiler=cxx_compiler
)
if copt_c_sources:
log.info("compiling C dispatch-able sources")
- objects += self.compiler_opt.try_dispatch(copt_c_sources,
- output_dir=self.build_temp,
- src_dir=copt_build_src,
- macros=macros + copt_macros,
- include_dirs=include_dirs,
- debug=self.debug,
- extra_postargs=extra_postargs)
+ objects += self.compiler_opt.try_dispatch(
+ copt_c_sources,
+ output_dir=self.build_temp,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs + extra_cflags)
if c_sources:
log.info("compiling C sources")
- objects += compiler.compile(c_sources,
- output_dir=self.build_temp,
- macros=macros + copt_macros,
- include_dirs=include_dirs,
- debug=self.debug,
- extra_postargs=extra_postargs + copt_baseline_flags)
+ objects += compiler.compile(
+ c_sources,
+ output_dir=self.build_temp,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_postargs +
+ copt_baseline_flags +
+ extra_cflags))
if cxx_sources:
log.info("compiling C++ sources")
cxx_compiler = compiler.cxx_compiler()
- cxx_objects = cxx_compiler.compile(cxx_sources,
- output_dir=self.build_temp,
- macros=macros + copt_macros,
- include_dirs=include_dirs,
- debug=self.debug,
- extra_postargs=extra_postargs + copt_baseline_flags)
+ cxx_objects = cxx_compiler.compile(
+ cxx_sources,
+ output_dir=self.build_temp,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_postargs +
+ copt_baseline_flags +
+ extra_cxxflags))
objects.extend(cxx_objects)
if f_sources or fmodule_sources:
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index b8378d473..7040a2411 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -243,7 +243,8 @@ class build_ext (old_build_ext):
if l and l != ext_language and ext.language:
log.warn('resetting extension %r language from %r to %r.' %
(ext.name, l, ext_language))
- ext.language = ext_language
+ if not ext.language:
+ ext.language = ext_language
# global language
all_languages.update(ext_languages)
@@ -376,6 +377,9 @@ class build_ext (old_build_ext):
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
+ extra_cflags = ext.extra_c_compile_args or []
+ extra_cxxflags = ext.extra_cxx_compile_args or []
+
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
@@ -462,38 +466,43 @@ class build_ext (old_build_ext):
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
- extra_postargs=extra_args,
+ extra_postargs=extra_args + extra_cxxflags,
ccompiler=cxx_compiler,
**kws
)
if copt_c_sources:
log.info("compiling C dispatch-able sources")
- c_objects += self.compiler_opt.try_dispatch(copt_c_sources,
- output_dir=output_dir,
- src_dir=copt_build_src,
- macros=macros + copt_macros,
- include_dirs=include_dirs,
- debug=self.debug,
- extra_postargs=extra_args,
- **kws)
+ c_objects += self.compiler_opt.try_dispatch(
+ copt_c_sources,
+ output_dir=output_dir,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_args + extra_cflags,
+ **kws)
if c_sources:
log.info("compiling C sources")
- c_objects += self.compiler.compile(c_sources,
- output_dir=output_dir,
- macros=macros + copt_macros,
- include_dirs=include_dirs,
- debug=self.debug,
- extra_postargs=extra_args + copt_baseline_flags,
- **kws)
+ c_objects += self.compiler.compile(
+ c_sources,
+ output_dir=output_dir,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_args + copt_baseline_flags +
+ extra_cflags),
+ **kws)
if cxx_sources:
log.info("compiling C++ sources")
- c_objects += cxx_compiler.compile(cxx_sources,
- output_dir=output_dir,
- macros=macros + copt_macros,
- include_dirs=include_dirs,
- debug=self.debug,
- extra_postargs=extra_args + copt_baseline_flags,
- **kws)
+ c_objects += cxx_compiler.compile(
+ cxx_sources,
+ output_dir=output_dir,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_args + copt_baseline_flags +
+ extra_cxxflags),
+ **kws)
extra_postargs = []
f_objects = []
diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py
index d5551f349..c4a14e599 100644
--- a/numpy/distutils/core.py
+++ b/numpy/distutils/core.py
@@ -19,7 +19,7 @@ import warnings
import distutils.core
import distutils.dist
-from numpy.distutils.extension import Extension
+from numpy.distutils.extension import Extension # noqa: F401
from numpy.distutils.numpy_distribution import NumpyDistribution
from numpy.distutils.command import config, config_compiler, \
build, build_py, build_ext, build_clib, build_src, build_scripts, \
diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py
index c90b5d725..3ede013e0 100644
--- a/numpy/distutils/extension.py
+++ b/numpy/distutils/extension.py
@@ -47,6 +47,8 @@ class Extension(old_Extension):
language=None,
f2py_options=None,
module_dirs=None,
+ extra_c_compile_args=None,
+ extra_cxx_compile_args=None,
extra_f77_compile_args=None,
extra_f90_compile_args=None,):
@@ -83,6 +85,8 @@ class Extension(old_Extension):
# numpy_distutils features
self.f2py_options = f2py_options or []
self.module_dirs = module_dirs or []
+ self.extra_c_compile_args = extra_c_compile_args or []
+ self.extra_cxx_compile_args = extra_cxx_compile_args or []
self.extra_f77_compile_args = extra_f77_compile_args or []
self.extra_f90_compile_args = extra_f90_compile_args or []
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index 4681d403b..82d296434 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -547,12 +547,12 @@ if sys.platform == 'win32':
# Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
# on Windows XP:
_MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
- # Python 3.7 uses 1415, but get_build_version returns 140 ??
- _MSVCRVER_TO_FULLVER['140'] = "14.15.26726.0"
- if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
- major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2)
- _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION
- del major, minor, rest
+ crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None)
+ if crt_ver is not None: # Available at least back to Python 3.3
+ maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups()
+ _MSVCRVER_TO_FULLVER[maj + min] = crt_ver
+ del maj, min
+ del crt_ver
except ImportError:
# If we are here, means python was not built with MSVC. Not sure what
# to do in that case: manifest building will fail, but it should not be
@@ -647,11 +647,9 @@ def generate_manifest(config):
if msver is not None:
if msver >= 8:
check_embedded_msvcr_match_linked(msver)
- ma = int(msver)
- mi = int((msver - ma) * 10)
+ ma_str, mi_str = str(msver).split('.')
# Write the manifest file
- manxml = msvc_manifest_xml(ma, mi)
- man = open(manifest_name(config), "w")
- config.temp_files.append(manifest_name(config))
- man.write(manxml)
- man.close()
+ manxml = msvc_manifest_xml(int(ma_str), int(mi_str))
+ with open(manifest_name(config), "w") as man:
+ config.temp_files.append(manifest_name(config))
+ man.write(manxml)
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index c9e051237..f0f9b4bd7 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -11,6 +11,7 @@ import multiprocessing
import textwrap
import importlib.util
from threading import local as tlocal
+from functools import reduce
import distutils
from distutils.errors import DistutilsError
@@ -43,7 +44,7 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'get_build_architecture', 'get_info', 'get_pkg_info',
- 'get_num_build_jobs']
+ 'get_num_build_jobs', 'sanitize_cxx_flags']
class InstallableLib:
"""
@@ -2478,3 +2479,15 @@ def get_build_architecture():
# systems, so delay the import to here.
from distutils.msvccompiler import get_build_architecture
return get_build_architecture()
+
+
+_cxx_ignore_flags = {'-Werror=implicit-function-declaration'}
+
+
+def sanitize_cxx_flags(cxxflags):
+ '''
+ Some flags are valid for C but not C++. Prune them.
+ '''
+ return [flag for flag in cxxflags if flag not in _cxx_ignore_flags]
+
+
diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py
index b722281ad..8c26271af 100644
--- a/numpy/distutils/tests/test_system_info.py
+++ b/numpy/distutils/tests/test_system_info.py
@@ -254,6 +254,10 @@ class TestSystemInfoReading:
finally:
os.chdir(previousDir)
+ HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", [])
+
+ @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if "
+ "numpy is built with MKL support"))
def test_overrides(self):
previousDir = os.getcwd()
cfg = os.path.join(self._dir1, 'site.cfg')
diff --git a/numpy/doc/constants.py b/numpy/doc/constants.py
index 128493d90..4db5c6390 100644
--- a/numpy/doc/constants.py
+++ b/numpy/doc/constants.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
=========
Constants
diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py
index 5c9ddb00a..4848233d4 100644
--- a/numpy/f2py/cb_rules.py
+++ b/numpy/f2py/cb_rules.py
@@ -191,7 +191,7 @@ capi_return_pt:
'maxnofargs': '#maxnofargs#',
'nofoptargs': '#nofoptargs#',
'docstr': """\
-\tdef #argname#(#docsignature#): return #docreturn#\\n\\
+ def #argname#(#docsignature#): return #docreturn#\\n\\
#docstrsigns#""",
'latexdocstr': """
{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}}
@@ -219,10 +219,10 @@ cb_rout_rules = [
'noargs': '',
'setdims': '/*setdims*/',
'docstrsigns': '', 'latexdocstrsigns': '',
- 'docstrreq': '\tRequired arguments:',
- 'docstropt': '\tOptional arguments:',
- 'docstrout': '\tReturn objects:',
- 'docstrcbs': '\tCall-back functions:',
+ 'docstrreq': ' Required arguments:',
+ 'docstropt': ' Optional arguments:',
+ 'docstrout': ' Return objects:',
+ 'docstrcbs': ' Call-back functions:',
'docreturn': '', 'docsign': '', 'docsignopt': '',
'latexdocstrreq': '\\noindent Required arguments:',
'latexdocstropt': '\\noindent Optional arguments:',
@@ -306,7 +306,7 @@ return_value
'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'],
'_check': iscomplexfunction
},
- {'docstrout': '\t\t#pydocsignout#',
+ {'docstrout': ' #pydocsignout#',
'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasnote: '--- #note#'}],
'docreturn': '#rname#,',
@@ -316,9 +316,9 @@ return_value
cb_arg_rules = [
{ # Doc
- 'docstropt': {l_and(isoptional, isintent_nothide): '\t\t#pydocsign#'},
- 'docstrreq': {l_and(isrequired, isintent_nothide): '\t\t#pydocsign#'},
- 'docstrout': {isintent_out: '\t\t#pydocsignout#'},
+ 'docstropt': {l_and(isoptional, isintent_nothide): ' #pydocsign#'},
+ 'docstrreq': {l_and(isrequired, isintent_nothide): ' #pydocsign#'},
+ 'docstrout': {isintent_out: ' #pydocsignout#'},
'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
@@ -492,7 +492,7 @@ def buildcallbacks(m):
def buildcallback(rout, um):
from . import capi_maps
- outmess('\tConstructing call-back function "cb_%s_in_%s"\n' %
+ outmess(' Constructing call-back function "cb_%s_in_%s"\n' %
(rout['name'], um))
args, depargs = getargs(rout)
capi_maps.depargs = depargs
@@ -612,6 +612,6 @@ def buildcallback(rout, um):
'latexdocstr': ar['latexdocstr'],
'argname': rd['argname']
}
- outmess('\t %s\n' % (ar['docstrshort']))
+ outmess(' %s\n' % (ar['docstrshort']))
return
################## Build call-back function #############
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 66f11f6b5..78810a0a7 100755
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -174,67 +174,67 @@ static PyObject *#modulename#_module;
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
-\t{NULL}
+ {NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
-\t{NULL,NULL}
+ {NULL,NULL}
};
static struct PyModuleDef moduledef = {
-\tPyModuleDef_HEAD_INIT,
-\t"#modulename#",
-\tNULL,
-\t-1,
-\tf2py_module_methods,
-\tNULL,
-\tNULL,
-\tNULL,
-\tNULL
+ PyModuleDef_HEAD_INIT,
+ "#modulename#",
+ NULL,
+ -1,
+ f2py_module_methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
};
PyMODINIT_FUNC PyInit_#modulename#(void) {
-\tint i;
-\tPyObject *m,*d, *s, *tmp;
-\tm = #modulename#_module = PyModule_Create(&moduledef);
-\tPy_SET_TYPE(&PyFortran_Type, &PyType_Type);
-\timport_array();
-\tif (PyErr_Occurred())
-\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;}
-\td = PyModule_GetDict(m);
-\ts = PyUnicode_FromString(\"#f2py_version#\");
-\tPyDict_SetItemString(d, \"__version__\", s);
-\tPy_DECREF(s);
-\ts = PyUnicode_FromString(
-\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
-\tPyDict_SetItemString(d, \"__doc__\", s);
-\tPy_DECREF(s);
-\ts = PyUnicode_FromString(\"""" + numpy_version + """\");
-\tPyDict_SetItemString(d, \"__f2py_numpy_version__\", s);
-\tPy_DECREF(s);
-\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
-\t/*
-\t * Store the error object inside the dict, so that it could get deallocated.
-\t * (in practice, this is a module, so it likely will not and cannot.)
-\t */
-\tPyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error);
-\tPy_DECREF(#modulename#_error);
-\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) {
-\t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]);
-\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp);
-\t\tPy_DECREF(tmp);
-\t}
+ int i;
+ PyObject *m,*d, *s, *tmp;
+ m = #modulename#_module = PyModule_Create(&moduledef);
+ Py_SET_TYPE(&PyFortran_Type, &PyType_Type);
+ import_array();
+ if (PyErr_Occurred())
+ {PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;}
+ d = PyModule_GetDict(m);
+ s = PyUnicode_FromString(\"#f2py_version#\");
+ PyDict_SetItemString(d, \"__version__\", s);
+ Py_DECREF(s);
+ s = PyUnicode_FromString(
+ \"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
+ PyDict_SetItemString(d, \"__doc__\", s);
+ Py_DECREF(s);
+ s = PyUnicode_FromString(\"""" + numpy_version + """\");
+ PyDict_SetItemString(d, \"__f2py_numpy_version__\", s);
+ Py_DECREF(s);
+ #modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
+ /*
+ * Store the error object inside the dict, so that it could get deallocated.
+ * (in practice, this is a module, so it likely will not and cannot.)
+ */
+ PyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error);
+ Py_DECREF(#modulename#_error);
+ for(i=0;f2py_routine_defs[i].name!=NULL;i++) {
+ tmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]);
+ PyDict_SetItemString(d, f2py_routine_defs[i].name, tmp);
+ Py_DECREF(tmp);
+ }
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
-\tif (! PyErr_Occurred())
-\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
+ if (! PyErr_Occurred())
+ on_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
-\treturn m;
+ return m;
}
#ifdef __cplusplus
}
@@ -326,7 +326,7 @@ f2py_stop_clock();
'externroutines': '#declfortranroutine#',
'doc': '#docreturn##name#(#docsignature#)',
'docshort': '#docreturn##name#(#docsignatureshort#)',
- 'docs': '"\t#docreturn##name#(#docsignature#)\\n"\n',
+ 'docs': '" #docreturn##name#(#docsignature#)\\n"\n',
'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'],
'cppmacros': {debugcapi: '#define DEBUGCFUNCS'},
'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n',
@@ -400,25 +400,25 @@ rout_rules = [
ismoduleroutine: '',
isdummyroutine: ''
},
- 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
- l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
- l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
+ 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine), isdummyroutine): ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'},
'callfortranroutine': [
{debugcapi: [
- """\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
+ """ fprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
-\t\tif (#setjmpbuf#) {
-\t\t\tf2py_success = 0;
-\t\t} else {"""},
- {isthreadsafe: '\t\t\tPy_BEGIN_ALLOW_THREADS'},
- {hascallstatement: '''\t\t\t\t#callstatement#;
-\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
+ {hascallstatement: ''' #callstatement#;
+ /*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement, isdummyroutine))
- : '\t\t\t\t(*f2py_func)(#callfortran#);'},
- {isthreadsafe: '\t\t\tPy_END_ALLOW_THREADS'},
- {hasexternals: """\t\t}"""}
+ : ' (*f2py_func)(#callfortran#);'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: """ }"""}
],
'_check': l_and(issubroutine, l_not(issubroutine_wrap)),
}, { # Wrapped function
@@ -427,8 +427,8 @@ rout_rules = [
isdummyroutine: '',
},
- 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
- isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
+ 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
@@ -445,18 +445,18 @@ rout_rules = [
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
- """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
+ """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
-\tif (#setjmpbuf#) {
-\t\tf2py_success = 0;
-\t} else {"""},
- {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
- : '\t(*f2py_func)(#callfortran#);'},
+ : ' (*f2py_func)(#callfortran#);'},
{hascallstatement:
- '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
- {isthreadsafe: '\tPy_END_ALLOW_THREADS'},
- {hasexternals: '\t}'}
+ ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'}
],
'_check': isfunction_wrap,
}, { # Wrapped subroutine
@@ -465,8 +465,8 @@ rout_rules = [
isdummyroutine: '',
},
- 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
- isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
+ 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
@@ -483,18 +483,18 @@ rout_rules = [
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
- """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
+ """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
-\tif (#setjmpbuf#) {
-\t\tf2py_success = 0;
-\t} else {"""},
- {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
- : '\t(*f2py_func)(#callfortran#);'},
+ : ' (*f2py_func)(#callfortran#);'},
{hascallstatement:
- '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
- {isthreadsafe: '\tPy_END_ALLOW_THREADS'},
- {hasexternals: '\t}'}
+ ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'}
],
'_check': issubroutine_wrap,
}, { # Function
@@ -505,13 +505,13 @@ rout_rules = [
{hasresultnote: '--- #resultnote#'}],
'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\
#ifdef USESCOMPAQFORTRAN
-\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
+ fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
-\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
+ fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi, l_not(isstringfunction)): """\
-\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
+ fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check': l_and(isfunction, l_not(isfunction_wrap))
@@ -520,32 +520,32 @@ rout_rules = [
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine: ''
},
- 'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
- l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
- isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
+ 'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
+ isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
- 'decl': [{iscomplexfunction_warn: '\t#ctype# #name#_return_value={0,0};',
- l_not(iscomplexfunction): '\t#ctype# #name#_return_value=0;'},
+ 'decl': [{iscomplexfunction_warn: ' #ctype# #name#_return_value={0,0};',
+ l_not(iscomplexfunction): ' #ctype# #name#_return_value=0;'},
{iscomplexfunction:
- '\tPyObject *#name#_return_value_capi = Py_None;'}
+ ' PyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine': [
{hasexternals: """\
-\tif (#setjmpbuf#) {
-\t\tf2py_success = 0;
-\t} else {"""},
- {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
- {hascallstatement: '''\t#callstatement#;
-/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
+ {hascallstatement: ''' #callstatement#;
+/* #name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement, isdummyroutine))
- : '\t#name#_return_value = (*f2py_func)(#callfortran#);'},
- {isthreadsafe: '\tPy_END_ALLOW_THREADS'},
- {hasexternals: '\t}'},
+ : ' #name#_return_value = (*f2py_func)(#callfortran#);'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'},
{l_and(debugcapi, iscomplexfunction)
- : '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
- {l_and(debugcapi, l_not(iscomplexfunction)): '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
- 'pyobjfrom': {iscomplexfunction: '\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
+ : ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
+ {l_and(debugcapi, l_not(iscomplexfunction)): ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
+ 'pyobjfrom': {iscomplexfunction: ' #name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need': [{l_not(isdummyroutine): 'F_FUNC'},
{iscomplexfunction: 'pyobj_from_#ctype#1'},
{islong_longfunction: 'long_long'},
@@ -557,50 +557,50 @@ rout_rules = [
}, { # String function # in use for --no-wrap
'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)):
- '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ ' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c):
- '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
+ ' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
- 'decl': ['\t#ctype# #name#_return_value = NULL;',
- '\tint #name#_return_value_len = 0;'],
+ 'decl': [' #ctype# #name#_return_value = NULL;',
+ ' int #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
- 'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
- '\tif ((#name#_return_value = (string)malloc('
- '#name#_return_value_len+1) == NULL) {',
- '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
- '\t\tf2py_success = 0;',
- '\t} else {',
- "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
- '\t}',
- '\tif (f2py_success) {',
+ 'callfortranroutine':[' #name#_return_value_len = #rlength#;',
+ ' if ((#name#_return_value = (string)malloc('
+ + '#name#_return_value_len+1) == NULL) {',
+ ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");',
+ ' f2py_success = 0;',
+ ' } else {',
+ " (#name#_return_value)[#name#_return_value_len] = '\\0';",
+ ' }',
+ ' if (f2py_success) {',
{hasexternals: """\
-\t\tif (#setjmpbuf#) {
-\t\t\tf2py_success = 0;
-\t\t} else {"""},
- {isthreadsafe: '\t\tPy_BEGIN_ALLOW_THREADS'},
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
-\t\t(*f2py_func)(#callcompaqfortran#);
+ (*f2py_func)(#callcompaqfortran#);
#else
-\t\t(*f2py_func)(#callfortran#);
+ (*f2py_func)(#callfortran#);
#endif
""",
- {isthreadsafe: '\t\tPy_END_ALLOW_THREADS'},
- {hasexternals: '\t\t}'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'},
{debugcapi:
- '\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
- '\t} /* if (f2py_success) after (string)malloc */',
+ ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
+ ' } /* if (f2py_success) after (string)malloc */',
],
'returnformat': '#rformat#',
'return': ',#name#_return_value',
- 'freemem': '\tSTRINGFREE(#name#_return_value);',
+ 'freemem': ' STRINGFREE(#name#_return_value);',
'need': ['F_FUNC', '#ctype#', 'STRINGFREE'],
'_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete
},
{ # Debugging
- 'routdebugenter': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
- 'routdebugleave': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
- 'routdebugfailure': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
+ 'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
+ 'routdebugleave': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
+ 'routdebugfailure': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check': debugcapi
}
]
@@ -625,16 +625,16 @@ aux_rules = [
'separatorsfor': sepdict
},
{ # Common
- 'frompyobj': ['\t/* Processing auxiliary variable #varname# */',
- {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ],
- 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
+ 'frompyobj': [' /* Processing auxiliary variable #varname# */',
+ {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ],
+ 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */',
'need': typedef_need_dict,
},
# Scalars (not complex)
{ # Common
- 'decl': '\t#ctype# #varname# = 0;',
+ 'decl': ' #ctype# #varname# = 0;',
'need': {hasinitvalue: 'math.h'},
- 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'},
+ 'frompyobj': {hasinitvalue: ' #varname# = #init#;'},
'_check': l_and(isscalar, l_not(iscomplex)),
},
{
@@ -646,23 +646,23 @@ aux_rules = [
},
# Complex scalars
{ # Common
- 'decl': '\t#ctype# #varname#;',
- 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
+ 'decl': ' #ctype# #varname#;',
+ 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': iscomplex
},
# String
{ # Common
- 'decl': ['\t#ctype# #varname# = NULL;',
- '\tint slen(#varname#);',
+ 'decl': [' #ctype# #varname# = NULL;',
+ ' int slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
# Array
{ # Common
- 'decl': ['\t#ctype# *#varname# = NULL;',
- '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
- '\tconst int #varname#_Rank = #rank#;',
+ 'decl': [' #ctype# *#varname# = NULL;',
+ ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
+ ' const int #varname#_Rank = #rank#;',
],
'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
'_check': isarray
@@ -711,9 +711,9 @@ arg_rules = [
'separatorsfor': sepdict
},
{ # Common
- 'frompyobj': ['\t/* Processing variable #varname# */',
- {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ],
- 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
+ 'frompyobj': [' /* Processing variable #varname# */',
+ {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ],
+ 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */',
'_depend': '',
'need': typedef_need_dict,
},
@@ -832,8 +832,8 @@ if (#varname#_cb.capi==Py_None) {
},
# Scalars (not complex)
{ # Common
- 'decl': '\t#ctype# #varname# = 0;',
- 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
+ 'decl': ' #ctype# #varname# = 0;',
+ 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
'return': {isintent_out: ',#varname#'},
'_check': l_and(isscalar, l_not(iscomplex))
@@ -841,15 +841,15 @@ if (#varname#_cb.capi==Py_None) {
'need': {hasinitvalue: 'math.h'},
'_check': l_and(isscalar, l_not(iscomplex)),
}, { # Not hidden
- 'decl': '\tPyObject *#varname#_capi = Py_None;',
+ 'decl': ' PyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': {isintent_inout: """\
-\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
-\tif (f2py_success) {"""},
- 'closepyobjfrom': {isintent_inout: "\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
+ f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
+ if (f2py_success) {"""},
+ 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide)
}, {
@@ -869,91 +869,91 @@ if (#varname#_cb.capi==Py_None) {
# ...
# from_pyobj(varname)
#
- {hasinitvalue: '\tif (#varname#_capi == Py_None) #varname# = #init#; else',
+ {hasinitvalue: ' if (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend': ''},
- {l_and(isoptional, l_not(hasinitvalue)): '\tif (#varname#_capi != Py_None)',
+ {l_and(isoptional, l_not(hasinitvalue)): ' if (#varname#_capi != Py_None)',
'_depend': ''},
{l_not(islogical): '''\
-\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
-\tif (f2py_success) {'''},
+ f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
+ if (f2py_success) {'''},
{islogical: '''\
-\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
-\t\tf2py_success = 1;
-\tif (f2py_success) {'''},
+ #varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
+ f2py_success = 1;
+ if (f2py_success) {'''},
],
- 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname#*/',
+ 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname#*/',
'need': {l_not(islogical): '#ctype#_from_pyobj'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide),
'_depend': ''
}, { # Hidden
- 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'},
+ 'frompyobj': {hasinitvalue: ' #varname# = #init#;'},
'need': typedef_need_dict,
'_check': l_and(isscalar, l_not(iscomplex), isintent_hide),
'_depend': ''
}, { # Common
- 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
+ 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'_check': l_and(isscalar, l_not(iscomplex)),
'_depend': ''
},
# Complex scalars
{ # Common
- 'decl': '\t#ctype# #varname#;',
+ 'decl': ' #ctype# #varname#;',
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
- 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
+ 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'return': {isintent_out: ',#varname#_capi'},
'_check': iscomplex
}, { # Not hidden
- 'decl': '\tPyObject *#varname#_capi = Py_None;',
+ 'decl': ' PyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'pyobjfrom': {isintent_inout: """\
-\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
-\t\tif (f2py_success) {"""},
- 'closepyobjfrom': {isintent_inout: "\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
+ f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
+ if (f2py_success) {"""},
+ 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check': l_and(iscomplex, isintent_nothide)
}, {
- 'frompyobj': [{hasinitvalue: '\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
+ 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
{l_and(isoptional, l_not(hasinitvalue))
- : '\tif (#varname#_capi != Py_None)'},
- '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
- '\n\tif (f2py_success) {'],
- 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname# frompyobj*/',
+ : ' if (#varname#_capi != Py_None)'},
+ ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
+ '\n if (f2py_success) {'],
+ 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/',
'need': ['#ctype#_from_pyobj'],
'_check': l_and(iscomplex, isintent_nothide),
'_depend': ''
}, { # Hidden
- 'decl': {isintent_out: '\tPyObject *#varname#_capi = Py_None;'},
+ 'decl': {isintent_out: ' PyObject *#varname#_capi = Py_None;'},
'_check': l_and(iscomplex, isintent_hide)
}, {
- 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
+ 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': l_and(iscomplex, isintent_hide),
'_depend': ''
}, { # Common
- 'pyobjfrom': {isintent_out: '\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'},
+ 'pyobjfrom': {isintent_out: ' #varname#_capi = pyobj_from_#ctype#1(#varname#);'},
'need': ['pyobj_from_#ctype#1'],
'_check': iscomplex
}, {
- 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
+ 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'_check': iscomplex,
'_depend': ''
},
# String
{ # Common
- 'decl': ['\t#ctype# #varname# = NULL;',
- '\tint slen(#varname#);',
- '\tPyObject *#varname#_capi = Py_None;'],
+ 'decl': [' #ctype# #varname# = NULL;',
+ ' int slen(#varname#);',
+ ' PyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
'pyobjfrom':[
{debugcapi:
- '\tfprintf(stderr,'
+ ' fprintf(stderr,'
'"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
# The trailing null value for Fortran is blank.
{l_and(isintent_out, l_not(isintent_c)):
- "\t\tSTRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
+ " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
],
'return': {isintent_out: ',#varname#'},
'need': ['len..',
@@ -962,18 +962,18 @@ if (#varname#_cb.capi==Py_None) {
}, { # Common
'frompyobj': [
"""\
-\tslen(#varname#) = #length#;
-\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,"""
+ slen(#varname#) = #length#;
+ f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,"""
"""#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#"""
"""`#varname#\' of #pyname# to C #ctype#\");
-\tif (f2py_success) {""",
+ if (f2py_success) {""",
# The trailing null value for Fortran is blank.
{l_not(isintent_c):
- "\t\tSTRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"},
+ " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"},
],
'cleanupfrompyobj': """\
-\t\tSTRINGFREE(#varname#);
-\t} /*if (f2py_success) of #varname#*/""",
+ STRINGFREE(#varname#);
+ } /*if (f2py_success) of #varname#*/""",
'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE',
{l_not(isintent_c): 'STRINGPADN'}],
'_check':isstring,
@@ -985,36 +985,36 @@ if (#varname#_cb.capi==Py_None) {
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': [
{l_and(isintent_inout, l_not(isintent_c)):
- "\t\tSTRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
+ " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
{isintent_inout: '''\
-\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#,
-\t slen(#varname#));
-\tif (f2py_success) {'''}],
- 'closepyobjfrom': {isintent_inout: '\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
+ f2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#,
+ slen(#varname#));
+ if (f2py_success) {'''}],
+ 'closepyobjfrom': {isintent_inout: ' } /*if (f2py_success) of #varname# pyobjfrom*/'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#',
l_and(isintent_inout, l_not(isintent_c)): 'STRINGPADN'},
'_check': l_and(isstring, isintent_nothide)
}, { # Hidden
'_check': l_and(isstring, isintent_hide)
}, {
- 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
+ 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check': isstring,
'_depend': ''
},
# Array
{ # Common
- 'decl': ['\t#ctype# *#varname# = NULL;',
- '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
- '\tconst int #varname#_Rank = #rank#;',
- '\tPyArrayObject *capi_#varname#_tmp = NULL;',
- '\tint capi_#varname#_intent = 0;',
+ 'decl': [' #ctype# *#varname# = NULL;',
+ ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
+ ' const int #varname#_Rank = #rank#;',
+ ' PyArrayObject *capi_#varname#_tmp = NULL;',
+ ' int capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out: ',capi_#varname#_tmp'},
'need': 'len..',
'_check': isarray
}, { # intent(overwrite) array
- 'decl': '\tint capi_overwrite_#varname# = 1;',
+ 'decl': ' int capi_overwrite_#varname# = 1;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
@@ -1023,12 +1023,12 @@ if (#varname#_cb.capi==Py_None) {
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1',
'_check': l_and(isarray, isintent_overwrite),
}, {
- 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
+ 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_overwrite),
'_depend': '',
},
{ # intent(copy) array
- 'decl': '\tint capi_overwrite_#varname# = 0;',
+ 'decl': ' int capi_overwrite_#varname# = 0;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
@@ -1037,7 +1037,7 @@ if (#varname#_cb.capi==Py_None) {
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0',
'_check': l_and(isarray, isintent_copy),
}, {
- 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
+ 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_copy),
'_depend': '',
}, {
@@ -1045,57 +1045,57 @@ if (#varname#_cb.capi==Py_None) {
'_check': isarray,
'_depend': ''
}, { # Not hidden
- 'decl': '\tPyObject *#varname#_capi = Py_None;',
+ 'decl': ' PyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'_check': l_and(isarray, isintent_nothide)
}, {
- 'frompyobj': ['\t#setdims#;',
- '\tcapi_#varname#_intent |= #intent#;',
+ 'frompyobj': [' #setdims#;',
+ ' capi_#varname#_intent |= #intent#;',
{isintent_hide:
- '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
+ ' capi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:
- '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
+ ' capi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
-\tif (capi_#varname#_tmp == NULL) {
-\t\tPyObject *exc, *val, *tb;
-\t\tPyErr_Fetch(&exc, &val, &tb);
-\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
-\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb);
-\t} else {
-\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp));
+ if (capi_#varname#_tmp == NULL) {
+ PyObject *exc, *val, *tb;
+ PyErr_Fetch(&exc, &val, &tb);
+ PyErr_SetString(exc ? exc : #modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
+ npy_PyErr_ChainExceptionsCause(exc, val, tb);
+ } else {
+ #varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp));
""",
{hasinitvalue: [
{isintent_nothide:
- '\tif (#varname#_capi == Py_None) {'},
- {isintent_hide: '\t{'},
- {iscomplexarray: '\t\t#ctype# capi_c;'},
+ ' if (#varname#_capi == Py_None) {'},
+ {isintent_hide: ' {'},
+ {iscomplexarray: ' #ctype# capi_c;'},
"""\
-\t\tint *_i,capi_i=0;
-\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
-\t\tif (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) {
-\t\t\twhile ((_i = nextforcomb()))
-\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
-\t\t} else {
-\t\t\tPyObject *exc, *val, *tb;
-\t\t\tPyErr_Fetch(&exc, &val, &tb);
-\t\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
-\t\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb);
-\t\t\tf2py_success = 0;
-\t\t}
-\t}
-\tif (f2py_success) {"""]},
+ int *_i,capi_i=0;
+ CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
+ if (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) {
+ while ((_i = nextforcomb()))
+ #varname#[capi_i++] = #init#; /* fortran way */
+ } else {
+ PyObject *exc, *val, *tb;
+ PyErr_Fetch(&exc, &val, &tb);
+ PyErr_SetString(exc ? exc : #modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
+ npy_PyErr_ChainExceptionsCause(exc, val, tb);
+ f2py_success = 0;
+ }
+ }
+ if (f2py_success) {"""]},
],
'cleanupfrompyobj': [ # note that this list will be reversed
- '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
+ ' } /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
{l_not(l_or(isintent_out, isintent_hide)): """\
-\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
-\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
+ if((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
+ Py_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide, l_not(isintent_out))
- : """\t\tPy_XDECREF(capi_#varname#_tmp);"""},
- {hasinitvalue: '\t} /*if (f2py_success) of #varname# init*/'},
+ : """ Py_XDECREF(capi_#varname#_tmp);"""},
+ {hasinitvalue: ' } /*if (f2py_success) of #varname# init*/'},
],
'_check': isarray,
'_depend': ''
@@ -1143,30 +1143,30 @@ if (#varname#_cb.capi==Py_None) {
check_rules = [
{
- 'frompyobj': {debugcapi: '\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
+ 'frompyobj': {debugcapi: ' fprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need': 'len..'
}, {
- 'frompyobj': '\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
- 'cleanupfrompyobj': '\t} /*CHECKSCALAR(#check#)*/',
+ 'frompyobj': ' CHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
+ 'cleanupfrompyobj': ' } /*CHECKSCALAR(#check#)*/',
'need': 'CHECKSCALAR',
'_check': l_and(isscalar, l_not(iscomplex)),
'_break': ''
}, {
- 'frompyobj': '\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
- 'cleanupfrompyobj': '\t} /*CHECKSTRING(#check#)*/',
+ 'frompyobj': ' CHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
+ 'cleanupfrompyobj': ' } /*CHECKSTRING(#check#)*/',
'need': 'CHECKSTRING',
'_check': isstring,
'_break': ''
}, {
'need': 'CHECKARRAY',
- 'frompyobj': '\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
- 'cleanupfrompyobj': '\t} /*CHECKARRAY(#check#)*/',
+ 'frompyobj': ' CHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
+ 'cleanupfrompyobj': ' } /*CHECKARRAY(#check#)*/',
'_check': isarray,
'_break': ''
}, {
'need': 'CHECKGENERIC',
- 'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
- 'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/',
+ 'frompyobj': ' CHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
+ 'cleanupfrompyobj': ' } /*CHECKGENERIC(#check#)*/',
}
]
@@ -1179,7 +1179,7 @@ def buildmodule(m, um):
"""
Return
"""
- outmess('\tBuilding module "%s"...\n' % (m['name']))
+ outmess(' Building module "%s"...\n' % (m['name']))
ret = {}
mod_rules = defmod_rules[:]
vrd = capi_maps.modsign2map(m)
@@ -1281,7 +1281,7 @@ def buildmodule(m, um):
ret['csrc'] = fn
with open(fn, 'w') as f:
f.write(ar['modulebody'].replace('\t', 2 * ' '))
- outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn))
+ outmess(' Wrote C/API module "%s" to file "%s"\n' % (m['name'], fn))
if options['dorestdoc']:
fn = os.path.join(
@@ -1289,7 +1289,7 @@ def buildmodule(m, um):
with open(fn, 'w') as f:
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
- outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' %
+ outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' %
(options['buildpath'], vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(
@@ -1304,7 +1304,7 @@ def buildmodule(m, um):
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
- outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' %
+ outmess(' Documentation is saved to file "%s/%smodule.tex"\n' %
(options['buildpath'], vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output'])
@@ -1329,7 +1329,7 @@ def buildmodule(m, um):
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
- outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn))
+ outmess(' Fortran 77 wrappers are saved to "%s"\n' % (wn))
if funcwrappers2:
wn = os.path.join(
options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename']))
@@ -1356,7 +1356,7 @@ def buildmodule(m, um):
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
- outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn))
+ outmess(' Fortran 90 wrappers are saved to "%s"\n' % (wn))
return ret
################## Build C/API function #############
@@ -1372,10 +1372,10 @@ def buildapi(rout):
var = rout['vars']
if ismoduleroutine(rout):
- outmess('\t\t\tConstructing wrapper function "%s.%s"...\n' %
+ outmess(' Constructing wrapper function "%s.%s"...\n' %
(rout['modulename'], rout['name']))
else:
- outmess('\t\tConstructing wrapper function "%s"...\n' % (rout['name']))
+ outmess(' Constructing wrapper function "%s"...\n' % (rout['name']))
# Routine
vrd = capi_maps.routsign2map(rout)
rd = dictappend({}, vrd)
@@ -1477,9 +1477,9 @@ def buildapi(rout):
ar = applyrules(routine_rules, rd)
if ismoduleroutine(rout):
- outmess('\t\t\t %s\n' % (ar['docshort']))
+ outmess(' %s\n' % (ar['docshort']))
else:
- outmess('\t\t %s\n' % (ar['docshort']))
+ outmess(' %s\n' % (ar['docshort']))
return ar, wrap
diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py
index daefc02ed..499609f96 100644
--- a/numpy/f2py/setup.py
+++ b/numpy/f2py/setup.py
@@ -66,7 +66,6 @@ command line tool (f2py) for generating Python C/API modules for
wrapping Fortran 77/90/95 subroutines, accessing common blocks from
Python, and calling Python functions from Fortran (call-backs).
Interfacing subroutines/data from Fortran 90/95 modules is supported.""",
- url="https://web.archive.org/web/20140822061353/"\
- "http://cens.ioc.ee/projects/f2py2e/",
+ url="https://numpy.org/doc/stable/f2py/",
keywords=['Fortran', 'f2py'],
**config)
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index 2cb429ec2..5d2aab94d 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -5,7 +5,6 @@ import pytest
import threading
import traceback
import time
-import random
import numpy as np
from numpy.testing import assert_, assert_equal, IS_PYPY
@@ -107,9 +106,9 @@ cf2py intent(out) r
-----
Call-back functions::
- def fun(): return a
- Return objects:
- a : int
+ def fun(): return a
+ Return objects:
+ a : int
""")
assert_equal(self.module.t.__doc__, expected)
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index eace3c9fc..1a6805e75 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -242,9 +242,6 @@ def build_module_distutils(source_files, config_code, module_name, **kw):
Build a module via distutils and import it.
"""
- from numpy.distutils.misc_util import Configuration
- from numpy.distutils.core import setup
-
d = get_module_dir()
# Copy files
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index 56b94853d..8201d3772 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -324,7 +324,6 @@ class DataSource:
# a significant fraction of numpy's total import time.
import shutil
from urllib.request import urlopen
- from urllib.error import URLError
upath = self.abspath(path)
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index a576925d6..4a5ac1285 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -23,8 +23,7 @@ def _decode_line(line, encoding=None):
Returns
-------
- decoded_line : unicode
- Unicode in Python 2, a str (unicode) in Python 3.
+ decoded_line : str
"""
if type(line) is bytes:
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index e0c056d88..20e32a78d 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -33,7 +33,7 @@ from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
import builtins
# needed in this module for compatibility
-from numpy.lib.histograms import histogram, histogramdd
+from numpy.lib.histograms import histogram, histogramdd # noqa: F401
array_function_dispatch = functools.partial(
@@ -268,6 +268,19 @@ def iterable(y):
>>> np.iterable(2)
False
+ Notes
+ -----
+ In most cases, the results of ``np.iterable(obj)`` are consistent with
+ ``isinstance(obj, collections.abc.Iterable)``. One notable exception is
+ the treatment of 0-dimensional arrays::
+
+ >>> from collections.abc import Iterable
+ >>> a = np.array(1.0) # 0-dimensional numpy array
+ >>> isinstance(a, Iterable)
+ True
+ >>> np.iterable(a)
+ False
+
"""
try:
iter(y)
@@ -784,6 +797,17 @@ def copy(a, order='K', subok=False):
>>> x[0] == z[0]
False
+ Note that, np.copy clears previously set WRITEABLE=False flag.
+
+ >>> a = np.array([1, 2, 3])
+ >>> a.flags["WRITEABLE"] = False
+ >>> b = np.copy(a)
+ >>> b.flags["WRITEABLE"]
+ True
+ >>> b[0] = 3
+ >>> b
+ array([3, 2, 3])
+
Note that np.copy is a shallow copy and will not copy object
elements within arrays. This is mainly important for arrays
containing Python objects. The new array will contain the
@@ -2809,9 +2833,9 @@ def blackman(M):
"""
if M < 1:
- return array([])
+ return array([], dtype=np.result_type(M, 0.0))
if M == 1:
- return ones(1, float)
+ return ones(1, dtype=np.result_type(M, 0.0))
n = arange(1-M, M, 2)
return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1))
@@ -2918,9 +2942,9 @@ def bartlett(M):
"""
if M < 1:
- return array([])
+ return array([], dtype=np.result_type(M, 0.0))
if M == 1:
- return ones(1, float)
+ return ones(1, dtype=np.result_type(M, 0.0))
n = arange(1-M, M, 2)
return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1))
@@ -3022,9 +3046,9 @@ def hanning(M):
"""
if M < 1:
- return array([])
+ return array([], dtype=np.result_type(M, 0.0))
if M == 1:
- return ones(1, float)
+ return ones(1, dtype=np.result_type(M, 0.0))
n = arange(1-M, M, 2)
return 0.5 + 0.5*cos(pi*n/(M-1))
@@ -3122,9 +3146,9 @@ def hamming(M):
"""
if M < 1:
- return array([])
+ return array([], dtype=np.result_type(M, 0.0))
if M == 1:
- return ones(1, float)
+ return ones(1, dtype=np.result_type(M, 0.0))
n = arange(1-M, M, 2)
return 0.54 + 0.46*cos(pi*n/(M-1))
@@ -3401,7 +3425,7 @@ def kaiser(M, beta):
"""
if M == 1:
- return np.array([1.])
+ return np.ones(1, dtype=np.result_type(M, 0.0))
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
@@ -4734,9 +4758,8 @@ def insert(arr, obj, values, axis=None):
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
- raise IndexError(
- "index %i is out of bounds for axis %i with "
- "size %i" % (obj, axis, N))
+ raise IndexError(f"index {obj} is out of bounds for axis {axis} "
+ f"with size {N}")
if (index < 0):
index += N
diff --git a/numpy/lib/function_base.pyi b/numpy/lib/function_base.pyi
index 69c615c9c..9a53b24f2 100644
--- a/numpy/lib/function_base.pyi
+++ b/numpy/lib/function_base.pyi
@@ -1,7 +1,60 @@
-from typing import List
+import sys
+from typing import (
+ Literal as L,
+ List,
+ Type,
+ Sequence,
+ Tuple,
+ Union,
+ Any,
+ TypeVar,
+ Iterator,
+ overload,
+ Callable,
+ Protocol,
+ SupportsIndex,
+ Iterable,
+ SupportsInt,
+)
+
+if sys.version_info >= (3, 10):
+ from typing import TypeGuard
+else:
+ from typing_extensions import TypeGuard
from numpy import (
vectorize as vectorize,
+ ufunc,
+ dtype,
+ generic,
+ floating,
+ complexfloating,
+ intp,
+ float64,
+ complex128,
+ timedelta64,
+ datetime64,
+ object_,
+ _OrderKACF,
+)
+
+from numpy.typing import (
+ NDArray,
+ ArrayLike,
+ DTypeLike,
+ _ShapeLike,
+ _ScalarLike_co,
+ _SupportsDType,
+ _FiniteNestedSequence,
+ _SupportsArray,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeTD64_co,
+ _ArrayLikeDT64_co,
+ _ArrayLikeObject_co,
+ _FloatLike_co,
+ _ComplexLike_co,
)
from numpy.core.function_base import (
@@ -12,46 +65,632 @@ from numpy.core.multiarray import (
add_docstring as add_docstring,
bincount as bincount,
)
+
from numpy.core.umath import _add_newdoc_ufunc
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+_2Tuple = Tuple[_T, _T]
+_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]]
+_DTypeLike = Union[
+ dtype[_SCT],
+ Type[_SCT],
+ _SupportsDType[dtype[_SCT]],
+]
+
+class _TrimZerosSequence(Protocol[_T_co]):
+ def __len__(self) -> int: ...
+ def __getitem__(self, key: slice, /) -> _T_co: ...
+ def __iter__(self) -> Iterator[Any]: ...
+
+class _SupportsWriteFlush(Protocol):
+ def write(self, s: str, /) -> object: ...
+ def flush(self) -> object: ...
+
__all__: List[str]
-add_newdoc_ufunc = _add_newdoc_ufunc
-
-def rot90(m, k=..., axes = ...): ...
-def flip(m, axis=...): ...
-def iterable(y): ...
-def average(a, axis=..., weights=..., returned=...): ...
-def asarray_chkfinite(a, dtype=..., order=...): ...
-def piecewise(x, condlist, funclist, *args, **kw): ...
-def select(condlist, choicelist, default=...): ...
-def copy(a, order=..., subok=...): ...
-def gradient(f, *varargs, axis=..., edge_order=...): ...
-def diff(a, n=..., axis=..., prepend = ..., append = ...): ...
-def interp(x, xp, fp, left=..., right=..., period=...): ...
-def angle(z, deg=...): ...
-def unwrap(p, discont = ..., axis=..., *, period=...): ...
-def sort_complex(a): ...
-def trim_zeros(filt, trim=...): ...
-def extract(condition, arr): ...
-def place(arr, mask, vals): ...
-def disp(mesg, device=..., linefeed=...): ...
-def cov(m, y=..., rowvar=..., bias=..., ddof=..., fweights=..., aweights=..., *, dtype=...): ...
-def corrcoef(x, y=..., rowvar=..., bias = ..., ddof = ..., *, dtype=...): ...
-def blackman(M): ...
-def bartlett(M): ...
-def hanning(M): ...
-def hamming(M): ...
-def i0(x): ...
-def kaiser(M, beta): ...
-def sinc(x): ...
-def msort(a): ...
-def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ...
-def percentile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ...
-def quantile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ...
-def trapz(y, x=..., dx=..., axis=...): ...
-def meshgrid(*xi, copy=..., sparse=..., indexing=...): ...
-def delete(arr, obj, axis=...): ...
-def insert(arr, obj, values, axis=...): ...
-def append(arr, values, axis=...): ...
-def digitize(x, bins, right=...): ...
+# NOTE: This is in reality a re-export of `np.core.umath._add_newdoc_ufunc`
+def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None: ...
+
+@overload
+def rot90(
+ m: _ArrayLike[_SCT],
+ k: int = ...,
+ axes: Tuple[int, int] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def rot90(
+ m: ArrayLike,
+ k: int = ...,
+ axes: Tuple[int, int] = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def flip(m: _SCT, axis: None = ...) -> _SCT: ...
+@overload
+def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...
+@overload
+def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
+@overload
+def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ...
+
+def iterable(y: object) -> TypeGuard[Iterable[Any]]: ...
+
+@overload
+def average(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeFloat_co= ...,
+ returned: L[False] = ...,
+) -> floating[Any]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeComplex_co = ...,
+ returned: L[False] = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def average(
+ a: _ArrayLikeObject_co,
+ axis: None = ...,
+ weights: None | Any = ...,
+ returned: L[False] = ...,
+) -> Any: ...
+@overload
+def average(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeFloat_co= ...,
+ returned: L[True] = ...,
+) -> _2Tuple[floating[Any]]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeComplex_co = ...,
+ returned: L[True] = ...,
+) -> _2Tuple[complexfloating[Any, Any]]: ...
+@overload
+def average(
+ a: _ArrayLikeObject_co,
+ axis: None = ...,
+ weights: None | Any = ...,
+ returned: L[True] = ...,
+) -> _2Tuple[Any]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ weights: None | Any = ...,
+ returned: L[False] = ...,
+) -> Any: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ weights: None | Any = ...,
+ returned: L[True] = ...,
+) -> _2Tuple[Any]: ...
+
+@overload
+def asarray_chkfinite(
+ a: _ArrayLike[_SCT],
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray_chkfinite(
+ a: object,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+) -> NDArray[Any]: ...
+@overload
+def asarray_chkfinite(
+ a: Any,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderKACF = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray_chkfinite(
+ a: Any,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def piecewise(
+ x: _ArrayLike[_SCT],
+ condlist: ArrayLike,
+ funclist: Sequence[Any | Callable[..., Any]],
+ *args: Any,
+ **kw: Any,
+) -> NDArray[_SCT]: ...
+@overload
+def piecewise(
+ x: ArrayLike,
+ condlist: ArrayLike,
+ funclist: Sequence[Any | Callable[..., Any]],
+ *args: Any,
+ **kw: Any,
+) -> NDArray[Any]: ...
+
+def select(
+ condlist: Sequence[ArrayLike],
+ choicelist: Sequence[ArrayLike],
+ default: ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def copy(
+ a: _ArrayType,
+ order: _OrderKACF,
+ subok: L[True],
+) -> _ArrayType: ...
+@overload
+def copy(
+ a: _ArrayType,
+ order: _OrderKACF = ...,
+ *,
+ subok: L[True],
+) -> _ArrayType: ...
+@overload
+def copy(
+ a: _ArrayLike[_SCT],
+ order: _OrderKACF = ...,
+ subok: L[False] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def copy(
+ a: ArrayLike,
+ order: _OrderKACF = ...,
+ subok: L[False] = ...,
+) -> NDArray[Any]: ...
+
+def gradient(
+ f: ArrayLike,
+ *varargs: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ edge_order: L[1, 2] = ...,
+) -> Any: ...
+
+@overload
+def diff(
+ a: _T,
+ n: L[0],
+ axis: SupportsIndex = ...,
+ prepend: ArrayLike = ...,
+ append: ArrayLike = ...,
+) -> _T: ...
+@overload
+def diff(
+ a: ArrayLike,
+ n: int = ...,
+ axis: SupportsIndex = ...,
+ prepend: ArrayLike = ...,
+ append: ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def interp(
+ x: _ArrayLikeFloat_co,
+ xp: _ArrayLikeFloat_co,
+ fp: _ArrayLikeFloat_co,
+ left: None | _FloatLike_co = ...,
+ right: None | _FloatLike_co = ...,
+ period: None | _FloatLike_co = ...,
+) -> NDArray[float64]: ...
+@overload
+def interp(
+ x: _ArrayLikeFloat_co,
+ xp: _ArrayLikeFloat_co,
+ fp: _ArrayLikeComplex_co,
+ left: None | _ComplexLike_co = ...,
+ right: None | _ComplexLike_co = ...,
+ period: None | _FloatLike_co = ...,
+) -> NDArray[complex128]: ...
+
+@overload
+def angle(z: _ArrayLikeFloat_co, deg: bool = ...) -> floating[Any]: ...
+@overload
+def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> complexfloating[Any, Any]: ...
+@overload
+def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> Any: ...
+
+@overload
+def unwrap(
+ p: _ArrayLikeFloat_co,
+ discont: None | float = ...,
+ axis: int = ...,
+ *,
+ period: float = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def unwrap(
+ p: _ArrayLikeObject_co,
+ discont: None | float = ...,
+ axis: int = ...,
+ *,
+ period: float = ...,
+) -> NDArray[object_]: ...
+
+def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ...
+
+def trim_zeros(
+ filt: _TrimZerosSequence[_T],
+ trim: L["f", "b", "fb", "bf"] = ...,
+) -> _T: ...
+
+@overload
+def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...
+
+def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...
+
+def disp(
+ mesg: object,
+ device: None | _SupportsWriteFlush = ...,
+ linefeed: bool = ...,
+) -> None: ...
+
+@overload
+def cov(
+ m: _ArrayLikeFloat_co,
+ y: None | _ArrayLikeFloat_co = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: None | SupportsIndex | SupportsInt = ...,
+ fweights: None | ArrayLike = ...,
+ aweights: None | ArrayLike = ...,
+ *,
+ dtype: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def cov(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: None | SupportsIndex | SupportsInt = ...,
+ fweights: None | ArrayLike = ...,
+ aweights: None | ArrayLike = ...,
+ *,
+ dtype: None = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def cov(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: None | SupportsIndex | SupportsInt = ...,
+ fweights: None | ArrayLike = ...,
+ aweights: None | ArrayLike = ...,
+ *,
+ dtype: _DTypeLike[_SCT],
+) -> NDArray[_SCT]: ...
+@overload
+def cov(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: None | SupportsIndex | SupportsInt = ...,
+ fweights: None | ArrayLike = ...,
+ aweights: None | ArrayLike = ...,
+ *,
+ dtype: DTypeLike,
+) -> NDArray[Any]: ...
+
+# NOTE `bias` and `ddof` have been deprecated
+@overload
+def corrcoef(
+ m: _ArrayLikeFloat_co,
+ y: None | _ArrayLikeFloat_co = ...,
+ rowvar: bool = ...,
+ *,
+ dtype: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def corrcoef(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ *,
+ dtype: None = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def corrcoef(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ *,
+ dtype: _DTypeLike[_SCT],
+) -> NDArray[_SCT]: ...
+@overload
+def corrcoef(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ *,
+ dtype: DTypeLike,
+) -> NDArray[Any]: ...
+
+def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
+
+def kaiser(
+ M: _FloatLike_co,
+ beta: _FloatLike_co,
+) -> NDArray[floating[Any]]: ...
+
+@overload
+def sinc(x: _FloatLike_co) -> floating[Any]: ...
+@overload
+def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
+@overload
+def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def msort(a: _ArrayType) -> _ArrayType: ...
+@overload
+def msort(a: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def msort(a: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def median(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> floating[Any]: ...
+@overload
+def median(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def median(
+ a: _ArrayLikeTD64_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> timedelta64: ...
+@overload
+def median(
+ a: _ArrayLikeObject_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> Any: ...
+@overload
+def median(
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def median(
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ out: _ArrayType = ...,
+ overwrite_input: bool = ...,
+ keepdims: bool = ...,
+) -> _ArrayType: ...
+
+_InterpolationKind = L[
+ "lower",
+ "higher",
+ "midpoint",
+ "nearest",
+ "linear",
+]
+
+@overload
+def percentile(
+ a: _ArrayLikeFloat_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: L[False] = ...,
+) -> floating[Any]: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: L[False] = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def percentile(
+ a: _ArrayLikeTD64_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: L[False] = ...,
+) -> timedelta64: ...
+@overload
+def percentile(
+ a: _ArrayLikeDT64_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: L[False] = ...,
+) -> datetime64: ...
+@overload
+def percentile(
+ a: _ArrayLikeObject_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: L[False] = ...,
+) -> Any: ...
+@overload
+def percentile(
+ a: _ArrayLikeFloat_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: L[False] = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: L[False] = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def percentile(
+ a: _ArrayLikeTD64_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: L[False] = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def percentile(
+ a: _ArrayLikeDT64_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: L[False] = ...,
+) -> NDArray[datetime64]: ...
+@overload
+def percentile(
+ a: _ArrayLikeObject_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: L[False] = ...,
+) -> NDArray[object_]: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ q: _ArrayLikeFloat_co,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ q: _ArrayLikeFloat_co,
+ axis: None | _ShapeLike = ...,
+ out: _ArrayType = ...,
+ overwrite_input: bool = ...,
+ interpolation: _InterpolationKind = ...,
+ keepdims: bool = ...,
+) -> _ArrayType: ...
+
+# NOTE: Not an alias, but they do have identical signatures
+# (that we can reuse)
+quantile = percentile
+
+# TODO: Returns a scalar for <= 1D array-likes; returns an ndarray otherwise
+def trapz(
+ y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ x: None | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co = ...,
+ dx: float = ...,
+ axis: SupportsIndex = ...,
+) -> Any: ...
+
+def meshgrid(
+ *xi: ArrayLike,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: L["xy", "ij"] = ...,
+) -> List[NDArray[Any]]: ...
+
+@overload
+def delete(
+ arr: _ArrayLike[_SCT],
+ obj: slice | _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def delete(
+ arr: ArrayLike,
+ obj: slice | _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def insert(
+ arr: _ArrayLike[_SCT],
+ obj: slice | _ArrayLikeInt_co,
+ values: ArrayLike,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def insert(
+ arr: ArrayLike,
+ obj: slice | _ArrayLikeInt_co,
+ values: ArrayLike,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+def append(
+ arr: ArrayLike,
+ values: ArrayLike,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def digitize(
+ x: _FloatLike_co,
+ bins: _ArrayLikeFloat_co,
+ right: bool = ...,
+) -> intp: ...
+@overload
+def digitize(
+ x: _ArrayLikeFloat_co,
+ bins: _ArrayLikeFloat_co,
+ right: bool = ...,
+) -> NDArray[intp]: ...
diff --git a/numpy/lib/histograms.pyi b/numpy/lib/histograms.pyi
index 25a33e3ae..2ceb60793 100644
--- a/numpy/lib/histograms.pyi
+++ b/numpy/lib/histograms.pyi
@@ -1,7 +1,51 @@
-from typing import List
+from typing import (
+ Literal as L,
+ List,
+ Tuple,
+ Any,
+ SupportsIndex,
+ Sequence,
+)
+
+from numpy.typing import (
+ NDArray,
+ ArrayLike,
+)
+
+_BinKind = L[
+ "stone",
+ "auto",
+ "doane",
+ "fd",
+ "rice",
+ "scott",
+ "sqrt",
+ "sturges",
+]
__all__: List[str]
-def histogram_bin_edges(a, bins=..., range=..., weights=...): ...
-def histogram(a, bins=..., range=..., normed=..., weights=..., density=...): ...
-def histogramdd(sample, bins=..., range=..., normed=..., weights=..., density=...): ...
+def histogram_bin_edges(
+ a: ArrayLike,
+ bins: _BinKind | SupportsIndex | ArrayLike = ...,
+ range: None | Tuple[float, float] = ...,
+ weights: None | ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+def histogram(
+ a: ArrayLike,
+ bins: _BinKind | SupportsIndex | ArrayLike = ...,
+ range: None | Tuple[float, float] = ...,
+ normed: None = ...,
+ weights: None | ArrayLike = ...,
+ density: bool = ...,
+) -> Tuple[NDArray[Any], NDArray[Any]]: ...
+
+def histogramdd(
+ sample: ArrayLike,
+ bins: SupportsIndex | ArrayLike = ...,
+ range: Sequence[Tuple[float, float]] = ...,
+ normed: None | bool = ...,
+ weights: None | ArrayLike = ...,
+ density: None | bool = ...,
+) -> Tuple[NDArray[Any], List[NDArray[Any]]]: ...
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 4e77f0d92..08d9b42bb 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -229,12 +229,14 @@ def _divide_by_count(a, b, out=None):
return np.divide(a, b, out=out, casting='unsafe')
-def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None):
+def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None,
+ initial=None, where=None):
return (a, out)
@array_function_dispatch(_nanmin_dispatcher)
-def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
+def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
@@ -266,6 +268,16 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
+ initial : scalar, optional
+ The maximum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to compare for the minimum. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
Returns
-------
@@ -321,6 +333,11 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
+ if initial is not np._NoValue:
+ kwargs['initial'] = initial
+ if where is not np._NoValue:
+ kwargs['where'] = where
+
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
@@ -336,6 +353,7 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
return res
# Check for all-NaN axis
+ kwargs.pop("initial", None)
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
@@ -344,12 +362,14 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
return res
-def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None):
+def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None,
+ initial=None, where=None):
return (a, out)
@array_function_dispatch(_nanmax_dispatcher)
-def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
+def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
"""
Return the maximum of an array or maximum along an axis, ignoring any
NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
@@ -381,6 +401,16 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
+ initial : scalar, optional
+ The minimum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to compare for the maximum. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
Returns
-------
@@ -436,6 +466,11 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
+ if initial is not np._NoValue:
+ kwargs['initial'] = initial
+ if where is not np._NoValue:
+ kwargs['where'] = where
+
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
@@ -451,6 +486,7 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
return res
# Check for all-NaN axis
+ kwargs.pop("initial", None)
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
@@ -459,12 +495,12 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
return res
-def _nanargmin_dispatcher(a, axis=None):
+def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None):
return (a,)
@array_function_dispatch(_nanargmin_dispatcher)
-def nanargmin(a, axis=None):
+def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue):
"""
Return the indices of the minimum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
@@ -476,6 +512,17 @@ def nanargmin(a, axis=None):
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+
+ .. versionadded:: 1.22.0
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ .. versionadded:: 1.22.0
Returns
-------
@@ -500,20 +547,20 @@ def nanargmin(a, axis=None):
"""
a, mask = _replace_nan(a, np.inf)
- res = np.argmin(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
+ res = np.argmin(a, axis=axis, out=out, keepdims=keepdims)
return res
-def _nanargmax_dispatcher(a, axis=None):
+def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None):
return (a,)
@array_function_dispatch(_nanargmax_dispatcher)
-def nanargmax(a, axis=None):
+def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue):
"""
Return the indices of the maximum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
@@ -526,6 +573,17 @@ def nanargmax(a, axis=None):
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+
+ .. versionadded:: 1.22.0
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ .. versionadded:: 1.22.0
Returns
-------
@@ -550,20 +608,22 @@ def nanargmax(a, axis=None):
"""
a, mask = _replace_nan(a, -np.inf)
- res = np.argmax(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
+ res = np.argmax(a, axis=axis, out=out, keepdims=keepdims)
return res
-def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
return (a, out)
@array_function_dispatch(_nansum_dispatcher)
-def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
"""
Return the sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
@@ -608,6 +668,14 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
+ initial : scalar, optional
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
Returns
-------
@@ -653,15 +721,18 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
a, mask = _replace_nan(a, 0)
- return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
+ return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ initial=initial, where=where)
-def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
return (a, out)
@array_function_dispatch(_nanprod_dispatcher)
-def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
"""
Return the product of array elements over a given axis treating Not a
Numbers (NaNs) as ones.
@@ -695,6 +766,16 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the original `arr`.
+ initial : scalar, optional
+ The starting value for this product. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to include in the product. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
Returns
-------
@@ -723,7 +804,8 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
a, mask = _replace_nan(a, 1)
- return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
+ return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ initial=initial, where=where)
def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None):
@@ -863,12 +945,14 @@ def nancumprod(a, axis=None, dtype=None, out=None):
return np.cumprod(a, axis=axis, dtype=dtype, out=out)
-def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ *, where=None):
return (a, out)
@array_function_dispatch(_nanmean_dispatcher)
-def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ *, where=np._NoValue):
"""
Compute the arithmetic mean along the specified axis, ignoring NaNs.
@@ -906,6 +990,10 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
+ where : array_like of bool, optional
+ Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
Returns
-------
@@ -944,7 +1032,8 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
- return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
+ return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ where=where)
if dtype is not None:
dtype = np.dtype(dtype)
@@ -953,8 +1042,10 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
- cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims)
- tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims,
+ where=where)
+ tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ where=where)
avg = _divide_by_count(tot, cnt, out=out)
isbad = (cnt == 0)
@@ -1428,13 +1519,14 @@ def _nanquantile_1d(arr1d, q, overwrite_input=False, interpolation='linear'):
arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation)
-def _nanvar_dispatcher(
- a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+ keepdims=None, *, where=None):
return (a, out)
@array_function_dispatch(_nanvar_dispatcher)
-def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
+def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
+ *, where=np._NoValue):
"""
Compute the variance along the specified axis, while ignoring NaNs.
@@ -1471,7 +1563,11 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
+ where : array_like of bool, optional
+ Elements to include in the variance. See `~numpy.ufunc.reduce` for
+ details.
+ .. versionadded:: 1.22.0
Returns
-------
@@ -1527,7 +1623,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
- keepdims=keepdims)
+ keepdims=keepdims, where=where)
if dtype is not None:
dtype = np.dtype(dtype)
@@ -1546,20 +1642,22 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
# keepdims=True, however matrix now raises an error in this case, but
# the reason that it drops the keepdims kwarg is to force keepdims=True
# so this used to work by serendipity.
- cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims)
- avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims)
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims,
+ where=where)
+ avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims, where=where)
avg = _divide_by_count(avg, cnt)
# Compute squared deviation from mean.
- np.subtract(arr, avg, out=arr, casting='unsafe')
+ np.subtract(arr, avg, out=arr, casting='unsafe', where=where)
arr = _copyto(arr, 0, mask)
if issubclass(arr.dtype.type, np.complexfloating):
- sqr = np.multiply(arr, arr.conj(), out=arr).real
+ sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real
else:
- sqr = np.multiply(arr, arr, out=arr)
+ sqr = np.multiply(arr, arr, out=arr, where=where)
# Compute variance.
- var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
+ var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ where=where)
# Precaution against reduced object arrays
try:
@@ -1582,13 +1680,14 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
return var
-def _nanstd_dispatcher(
- a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+ keepdims=None, *, where=None):
return (a, out)
@array_function_dispatch(_nanstd_dispatcher)
-def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
+def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
+ *, where=np._NoValue):
"""
Compute the standard deviation along the specified axis, while
ignoring NaNs.
@@ -1632,6 +1731,11 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
as-is to the relevant functions of the sub-classes. If these
functions do not have a `keepdims` kwarg, a RuntimeError will
be raised.
+ where : array_like of bool, optional
+ Elements to include in the standard deviation.
+ See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
Returns
-------
@@ -1683,7 +1787,7 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
- keepdims=keepdims)
+ keepdims=keepdims, where=where)
if isinstance(var, np.ndarray):
std = np.sqrt(var, out=var)
elif hasattr(var, 'dtype'):
diff --git a/numpy/lib/nanfunctions.pyi b/numpy/lib/nanfunctions.pyi
index 447770a54..54b4a7e26 100644
--- a/numpy/lib/nanfunctions.pyi
+++ b/numpy/lib/nanfunctions.pyi
@@ -1,54 +1,40 @@
from typing import List
+from numpy.core.fromnumeric import (
+ amin,
+ amax,
+ argmin,
+ argmax,
+ sum,
+ prod,
+ cumsum,
+ cumprod,
+ mean,
+ var,
+ std
+)
+
+from numpy.lib.function_base import (
+ median,
+ percentile,
+ quantile,
+)
+
__all__: List[str]
-def nanmin(a, axis=..., out=..., keepdims=...): ...
-def nanmax(a, axis=..., out=..., keepdims=...): ...
-def nanargmin(a, axis=...): ...
-def nanargmax(a, axis=...): ...
-def nansum(a, axis=..., dtype=..., out=..., keepdims=...): ...
-def nanprod(a, axis=..., dtype=..., out=..., keepdims=...): ...
-def nancumsum(a, axis=..., dtype=..., out=...): ...
-def nancumprod(a, axis=..., dtype=..., out=...): ...
-def nanmean(a, axis=..., dtype=..., out=..., keepdims=...): ...
-def nanmedian(
- a,
- axis=...,
- out=...,
- overwrite_input=...,
- keepdims=...,
-): ...
-def nanpercentile(
- a,
- q,
- axis=...,
- out=...,
- overwrite_input=...,
- interpolation=...,
- keepdims=...,
-): ...
-def nanquantile(
- a,
- q,
- axis=...,
- out=...,
- overwrite_input=...,
- interpolation=...,
- keepdims=...,
-): ...
-def nanvar(
- a,
- axis=...,
- dtype=...,
- out=...,
- ddof=...,
- keepdims=...,
-): ...
-def nanstd(
- a,
- axis=...,
- dtype=...,
- out=...,
- ddof=...,
- keepdims=...,
-): ...
+# NOTE: In reaility these functions are not aliases but distinct functions
+# with identical signatures.
+nanmin = amin
+nanmax = amax
+nanargmin = argmin
+nanargmax = argmax
+nansum = sum
+nanprod = prod
+nancumsum = cumsum
+nancumprod = cumprod
+nanmean = mean
+nanvar = var
+nanstd = std
+nanmedian = median
+nanpercentile = percentile
+nanquantile = quantile
diff --git a/numpy/lib/polynomial.pyi b/numpy/lib/polynomial.pyi
index 7d38658d0..00065f53b 100644
--- a/numpy/lib/polynomial.pyi
+++ b/numpy/lib/polynomial.pyi
@@ -1,19 +1,305 @@
-from typing import List
+from typing import (
+ Literal as L,
+ List,
+ overload,
+ Any,
+ SupportsInt,
+ SupportsIndex,
+ TypeVar,
+ Tuple,
+ NoReturn,
+)
from numpy import (
RankWarning as RankWarning,
poly1d as poly1d,
+ unsignedinteger,
+ signedinteger,
+ floating,
+ complexfloating,
+ bool_,
+ int32,
+ int64,
+ float64,
+ complex128,
+ object_,
+)
+
+from numpy.typing import (
+ NDArray,
+ ArrayLike,
+ _ArrayLikeBool_co,
+ _ArrayLikeUInt_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeObject_co,
)
+_T = TypeVar("_T")
+
+_2Tup = Tuple[_T, _T]
+_5Tup = Tuple[
+ _T,
+ NDArray[float64],
+ NDArray[int32],
+ NDArray[float64],
+ NDArray[float64],
+]
+
__all__: List[str]
-def poly(seq_of_zeros): ...
-def roots(p): ...
-def polyint(p, m=..., k=...): ...
-def polyder(p, m=...): ...
-def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ...
-def polyval(p, x): ...
-def polyadd(a1, a2): ...
-def polysub(a1, a2): ...
-def polymul(a1, a2): ...
-def polydiv(u, v): ...
+def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ...
+
+# Returns either a float or complex array depending on the input values.
+# See `np.linalg.eigvals`.
+def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ...
+
+@overload
+def polyint(
+ p: poly1d,
+ m: SupportsInt | SupportsIndex = ...,
+ k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
+) -> poly1d: ...
+@overload
+def polyint(
+ p: _ArrayLikeFloat_co,
+ m: SupportsInt | SupportsIndex = ...,
+ k: None | _ArrayLikeFloat_co = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyint(
+ p: _ArrayLikeComplex_co,
+ m: SupportsInt | SupportsIndex = ...,
+ k: None | _ArrayLikeComplex_co = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyint(
+ p: _ArrayLikeObject_co,
+ m: SupportsInt | SupportsIndex = ...,
+ k: None | _ArrayLikeObject_co = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def polyder(
+ p: poly1d,
+ m: SupportsInt | SupportsIndex = ...,
+) -> poly1d: ...
+@overload
+def polyder(
+ p: _ArrayLikeFloat_co,
+ m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyder(
+ p: _ArrayLikeComplex_co,
+ m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyder(
+ p: _ArrayLikeObject_co,
+ m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def polyfit(
+ x: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[False] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: L[False] = ...,
+) -> NDArray[float64]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[False] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: L[False] = ...,
+) -> NDArray[complex128]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[False] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: L[True, "unscaled"] = ...,
+) -> _2Tup[NDArray[float64]]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[False] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: L[True, "unscaled"] = ...,
+) -> _2Tup[NDArray[complex128]]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[True] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: bool | L["unscaled"] = ...,
+) -> _5Tup[NDArray[float64]]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[True] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: bool | L["unscaled"] = ...,
+) -> _5Tup[NDArray[complex128]]: ...
+
+@overload
+def polyval(
+ p: _ArrayLikeBool_co,
+ x: _ArrayLikeBool_co,
+) -> NDArray[int64]: ...
+@overload
+def polyval(
+ p: _ArrayLikeUInt_co,
+ x: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def polyval(
+ p: _ArrayLikeInt_co,
+ x: _ArrayLikeInt_co,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def polyval(
+ p: _ArrayLikeFloat_co,
+ x: _ArrayLikeFloat_co,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyval(
+ p: _ArrayLikeComplex_co,
+ x: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyval(
+ p: _ArrayLikeObject_co,
+ x: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+@overload
+def polyadd(
+ a1: poly1d,
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> poly1d: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ a2: poly1d,
+) -> poly1d: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeBool_co,
+ a2: _ArrayLikeBool_co,
+) -> NDArray[bool_]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeUInt_co,
+ a2: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeInt_co,
+ a2: _ArrayLikeInt_co,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeFloat_co,
+ a2: _ArrayLikeFloat_co,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeComplex_co,
+ a2: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeObject_co,
+ a2: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+@overload
+def polysub(
+ a1: poly1d,
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> poly1d: ...
+@overload
+def polysub(
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ a2: poly1d,
+) -> poly1d: ...
+@overload
+def polysub(
+ a1: _ArrayLikeBool_co,
+ a2: _ArrayLikeBool_co,
+) -> NoReturn: ...
+@overload
+def polysub(
+ a1: _ArrayLikeUInt_co,
+ a2: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeInt_co,
+ a2: _ArrayLikeInt_co,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeFloat_co,
+ a2: _ArrayLikeFloat_co,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeComplex_co,
+ a2: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeObject_co,
+ a2: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+# NOTE: Not an alias, but they do have the same signature (that we can reuse)
+polymul = polyadd
+
+@overload
+def polydiv(
+ u: poly1d,
+ v: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> _2Tup[poly1d]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ v: poly1d,
+) -> _2Tup[poly1d]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeFloat_co,
+ v: _ArrayLikeFloat_co,
+) -> _2Tup[NDArray[floating[Any]]]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeComplex_co,
+ v: _ArrayLikeComplex_co,
+) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeObject_co,
+ v: _ArrayLikeObject_co,
+) -> _2Tup[NDArray[Any]]: ...
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 10656a233..78e67a89b 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -276,8 +276,6 @@ Test the header writing.
'''
import sys
import os
-import shutil
-import tempfile
import warnings
import pytest
from io import BytesIO
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 5f27ea655..c7dfe5673 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -553,6 +553,11 @@ class TestInsert:
with pytest.raises(IndexError):
np.insert([0, 1, 2], np.array([], dtype=float), [])
+ @pytest.mark.parametrize('idx', [4, -4])
+ def test_index_out_of_bounds(self, idx):
+ with pytest.raises(IndexError, match='out of bounds'):
+ np.insert([0, 1, 2], [idx], [3, 4])
+
class TestAmax:
@@ -1528,7 +1533,7 @@ class TestVectorize:
([('x',)], [('y',), ()]))
assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'),
([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
-
+
# Tests to check if whitespaces are ignored
assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()]))
assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'),
@@ -1853,35 +1858,116 @@ class TestUnwrap:
assert sm_discont.dtype == wrap_uneven.dtype
+@pytest.mark.parametrize(
+ "dtype", "O" + np.typecodes["AllInteger"] + np.typecodes["Float"]
+)
+@pytest.mark.parametrize("M", [0, 1, 10])
class TestFilterwindows:
- def test_hanning(self):
+ def test_hanning(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = hanning(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
# check symmetry
- w = hanning(10)
assert_equal(w, flipud(w))
+
# check known value
- assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
+
+ def test_hamming(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = hamming(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
- def test_hamming(self):
# check symmetry
- w = hamming(10)
assert_equal(w, flipud(w))
+
# check known value
- assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
+
+ def test_bartlett(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = bartlett(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
- def test_bartlett(self):
# check symmetry
- w = bartlett(10)
assert_equal(w, flipud(w))
+
# check known value
- assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
+
+ def test_blackman(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = blackman(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
- def test_blackman(self):
# check symmetry
- w = blackman(10)
assert_equal(w, flipud(w))
+
# check known value
- assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
+
+ def test_kaiser(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = kaiser(scalar, 0)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 10, 15)
class TestTrapz:
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index c21aefd1a..26a34be7e 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -4,7 +4,6 @@ import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_raises, assert_raises_regex,
- assert_warns
)
from numpy.lib.index_tricks import (
mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index 3fdeec41c..126dba495 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -1,11 +1,12 @@
import warnings
import pytest
+import inspect
import numpy as np
from numpy.lib.nanfunctions import _nan_mask, _replace_nan
from numpy.testing import (
- assert_, assert_equal, assert_almost_equal, assert_no_warnings,
- assert_raises, assert_array_equal, suppress_warnings
+ assert_, assert_equal, assert_almost_equal, assert_raises,
+ assert_array_equal, suppress_warnings
)
@@ -35,6 +36,53 @@ _ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
+class TestSignatureMatch:
+ NANFUNCS = {
+ np.nanmin: np.amin,
+ np.nanmax: np.amax,
+ np.nanargmin: np.argmin,
+ np.nanargmax: np.argmax,
+ np.nansum: np.sum,
+ np.nanprod: np.prod,
+ np.nancumsum: np.cumsum,
+ np.nancumprod: np.cumprod,
+ np.nanmean: np.mean,
+ np.nanmedian: np.median,
+ np.nanpercentile: np.percentile,
+ np.nanquantile: np.quantile,
+ np.nanvar: np.var,
+ np.nanstd: np.std,
+ }
+ IDS = [k.__name__ for k in NANFUNCS]
+
+ @staticmethod
+ def get_signature(func, default="..."):
+ """Construct a signature and replace all default parameter-values."""
+ prm_list = []
+ signature = inspect.signature(func)
+ for prm in signature.parameters.values():
+ if prm.default is inspect.Parameter.empty:
+ prm_list.append(prm)
+ else:
+ prm_list.append(prm.replace(default=default))
+ return inspect.Signature(prm_list)
+
+ @pytest.mark.parametrize("nan_func,func", NANFUNCS.items(), ids=IDS)
+ def test_signature_match(self, nan_func, func):
+ # Ignore the default parameter-values as they can sometimes differ
+ # between the two functions (*e.g.* one has `False` while the other
+ # has `np._NoValue`)
+ signature = self.get_signature(func)
+ nan_signature = self.get_signature(nan_func)
+ np.testing.assert_equal(signature, nan_signature)
+
+ def test_exhaustiveness(self):
+ """Validate that all nan functions are actually tested."""
+ np.testing.assert_equal(
+ set(self.IDS), set(np.lib.nanfunctions.__all__)
+ )
+
+
class TestNanFunctions_MinMax:
nanfuncs = [np.nanmin, np.nanmax]
@@ -170,6 +218,46 @@ class TestNanFunctions_MinMax:
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_initial(self, dtype):
+ class MyNDArray(np.ndarray):
+ pass
+
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ initial = 100 if f is np.nanmax else 0
+
+ ret1 = f(ar, initial=initial)
+ assert ret1.dtype == dtype
+ assert ret1 == initial
+
+ ret2 = f(ar.view(MyNDArray), initial=initial)
+ assert ret2.dtype == dtype
+ assert ret2 == initial
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_where(self, dtype):
+ class MyNDArray(np.ndarray):
+ pass
+
+ ar = np.arange(9).reshape(3, 3).astype(dtype)
+ ar[0, :] = np.nan
+ where = np.ones_like(ar, dtype=np.bool_)
+ where[:, 0] = False
+
+ for f in self.nanfuncs:
+ reference = 4 if f is np.nanmin else 8
+
+ ret1 = f(ar, where=where, initial=5)
+ assert ret1.dtype == dtype
+ assert ret1 == reference
+
+ ret2 = f(ar.view(MyNDArray), where=where, initial=5)
+ assert ret2.dtype == dtype
+ assert ret2 == reference
+
class TestNanFunctions_ArgminArgmax:
@@ -240,6 +328,30 @@ class TestNanFunctions_ArgminArgmax:
res = f(mine)
assert_(res.shape == ())
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_keepdims(self, dtype):
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ reference = 5 if f is np.nanargmin else 8
+ ret = f(ar, keepdims=True)
+ assert ret.ndim == ar.ndim
+ assert ret == reference
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_out(self, dtype):
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ out = np.zeros((), dtype=np.intp)
+ reference = 5 if f is np.nanargmin else 8
+ ret = f(ar, out=out)
+ assert ret is out
+ assert ret == reference
+
+
_TEST_ARRAYS = {
"0d": np.array(5),
@@ -456,6 +568,30 @@ class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
res = f(mat, axis=None)
assert_equal(res, tgt)
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_initial(self, dtype):
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ reference = 28 if f is np.nansum else 3360
+ ret = f(ar, initial=2)
+ assert ret.dtype == dtype
+ assert ret == reference
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_where(self, dtype):
+ ar = np.arange(9).reshape(3, 3).astype(dtype)
+ ar[0, :] = np.nan
+ where = np.ones_like(ar, dtype=np.bool_)
+ where[:, 0] = False
+
+ for f in self.nanfuncs:
+ reference = 26 if f is np.nansum else 2240
+ ret = f(ar, where=where, initial=2)
+ assert ret.dtype == dtype
+ assert ret == reference
+
class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):
@@ -611,6 +747,21 @@ class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin):
assert_equal(f(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_where(self, dtype):
+ ar = np.arange(9).reshape(3, 3).astype(dtype)
+ ar[0, :] = np.nan
+ where = np.ones_like(ar, dtype=np.bool_)
+ where[:, 0] = False
+
+ for f, f_std in zip(self.nanfuncs, self.stdfuncs):
+ reference = f_std(ar[where][2:])
+ dtype_reference = dtype if f is np.nanmean else ar.real.dtype
+
+ ret = f(ar, where=where)
+ assert ret.dtype == dtype_reference
+ np.testing.assert_allclose(ret, reference)
+
_TIME_UNITS = (
"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index 373226277..55df2a675 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -1,5 +1,3 @@
-import pytest
-
import os
import numpy as np
diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py
index 398c27e94..ca8d4c62c 100755
--- a/numpy/linalg/lapack_lite/make_lite.py
+++ b/numpy/linalg/lapack_lite/make_lite.py
@@ -341,10 +341,7 @@ def main():
lapack_src_dir = sys.argv[2]
output_dir = os.path.join(os.path.dirname(__file__), 'build')
- try:
- shutil.rmtree(output_dir)
- except:
- pass
+ shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir)
wrapped_routines, ignores = getWrappedRoutineNames(wrapped_routines_file)
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 95780d19d..d002a34d4 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -30,7 +30,7 @@ from numpy.core.multiarray import normalize_axis_index
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
-from numpy.linalg import lapack_lite, _umath_linalg
+from numpy.linalg import _umath_linalg
array_function_dispatch = functools.partial(
diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py
index e2944f38c..94536bb2c 100644
--- a/numpy/linalg/setup.py
+++ b/numpy/linalg/setup.py
@@ -3,8 +3,7 @@ import sys
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
- from numpy.distutils.system_info import (
- get_info, system_info, lapack_opt_info, blas_opt_info)
+ from numpy.distutils.system_info import get_info, system_info
config = Configuration('linalg', parent_package, top_path)
config.add_subpackage('tests')
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index a45323bb3..c1ba84a8e 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -1,7 +1,6 @@
""" Test functions for linalg module
"""
-from numpy.core.fromnumeric import shape
import os
import sys
import itertools
@@ -22,7 +21,6 @@ from numpy.testing import (
assert_almost_equal, assert_allclose, suppress_warnings,
assert_raises_regex, HAS_LAPACK64,
)
-from numpy.testing._private.utils import requires_memory
def consistent_subclass(out, in_):
@@ -1072,7 +1070,6 @@ class TestMatrixPower:
assert_raises(LinAlgError, matrix_power, mat, -1)
-
class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
diff --git a/numpy/ma/bench.py b/numpy/ma/bench.py
index e29d54365..56865683d 100644
--- a/numpy/ma/bench.py
+++ b/numpy/ma/bench.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
import timeit
import numpy
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 2ff1667ba..036d6312c 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -1065,7 +1065,7 @@ class _MaskedBinaryOperation(_MaskedUFunc):
tr = self.f.reduce(t, axis)
mr = nomask
else:
- tr = self.f.reduce(t, axis, dtype=dtype or t.dtype)
+ tr = self.f.reduce(t, axis, dtype=dtype)
mr = umath.logical_and.reduce(m, axis)
if not tr.shape:
diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi
index 26453f000..c1b82d2ec 100644
--- a/numpy/matrixlib/__init__.pyi
+++ b/numpy/matrixlib/__init__.pyi
@@ -1,4 +1,4 @@
-from typing import Any, List
+from typing import List
from numpy._pytesttester import PytestTester
@@ -6,10 +6,12 @@ from numpy import (
matrix as matrix,
)
+from numpy.matrixlib.defmatrix import (
+ bmat as bmat,
+ mat as mat,
+ asmatrix as asmatrix,
+)
+
__all__: List[str]
__path__: List[str]
test: PytestTester
-
-def bmat(obj, ldict=..., gdict=...): ...
-def asmatrix(data, dtype=...): ...
-mat = asmatrix
diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi
new file mode 100644
index 000000000..6c86ea1ef
--- /dev/null
+++ b/numpy/matrixlib/defmatrix.pyi
@@ -0,0 +1,15 @@
+from typing import List, Any, Sequence, Mapping
+from numpy import matrix as matrix
+from numpy.typing import ArrayLike, DTypeLike, NDArray
+
+__all__: List[str]
+
+def bmat(
+ obj: str | Sequence[ArrayLike] | NDArray[Any],
+ ldict: None | Mapping[str, Any] = ...,
+ gdict: None | Mapping[str, Any] = ...,
+) -> matrix[Any, Any]: ...
+
+def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[Any, Any]: ...
+
+mat = asmatrix
diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py
index 8e71a1945..6322062f2 100644
--- a/numpy/polynomial/tests/test_classes.py
+++ b/numpy/polynomial/tests/test_classes.py
@@ -597,4 +597,4 @@ class TestInterpolate:
for deg in range(0, 10):
for t in range(0, deg + 1):
p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,))
- assert_almost_equal(p(x), powx(x, t), decimal=12)
+ assert_almost_equal(p(x), powx(x, t), decimal=11)
diff --git a/numpy/random/src/pcg64/pcg64.c b/numpy/random/src/pcg64/pcg64.c
index c623c809b..b9be1e39d 100644
--- a/numpy/random/src/pcg64/pcg64.c
+++ b/numpy/random/src/pcg64/pcg64.c
@@ -109,8 +109,7 @@ pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta, pcg128_t cur_mult,
cur_plus = pcg128_mult(pcg128_add(cur_mult, PCG_128BIT_CONSTANT(0u, 1u)),
cur_plus);
cur_mult = pcg128_mult(cur_mult, cur_mult);
- delta.low >>= 1;
- delta.low += delta.high & 1;
+ delta.low = (delta.low >> 1) | (delta.high << 63);
delta.high >>= 1;
}
return pcg128_add(pcg128_mult(acc_mult, state), acc_plus);
diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py
index 29054b70b..ea1ebacb6 100644
--- a/numpy/random/tests/test_direct.py
+++ b/numpy/random/tests/test_direct.py
@@ -358,6 +358,17 @@ class TestPCG64(Base):
assert val_neg == val_pos
assert val_big == val_pos
+ def test_advange_large(self):
+ rs = Generator(self.bit_generator(38219308213743))
+ pcg = rs.bit_generator
+ state = pcg.state["state"]
+ initial_state = 287608843259529770491897792873167516365
+ assert state["state"] == initial_state
+ pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
+ state = pcg.state["state"]
+ advanced_state = 135275564607035429730177404003164635391
+ assert state["state"] == advanced_state
+
class TestPCG64DXSM(Base):
@classmethod
@@ -386,6 +397,17 @@ class TestPCG64DXSM(Base):
assert val_neg == val_pos
assert val_big == val_pos
+ def test_advange_large(self):
+ rs = Generator(self.bit_generator(38219308213743))
+ pcg = rs.bit_generator
+ state = pcg.state
+ initial_state = 287608843259529770491897792873167516365
+ assert state["state"]["state"] == initial_state
+ pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
+ state = pcg.state["state"]
+ advanced_state = 277778083536782149546677086420637664879
+ assert state["state"] == advanced_state
+
class TestMT19937(Base):
@classmethod
diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py
index 88d2792a6..0227d6502 100644
--- a/numpy/random/tests/test_generator_mt19937_regressions.py
+++ b/numpy/random/tests/test_generator_mt19937_regressions.py
@@ -1,7 +1,7 @@
from numpy.testing import (assert_, assert_array_equal)
import numpy as np
import pytest
-from numpy.random import Generator, MT19937, RandomState
+from numpy.random import Generator, MT19937
mt19937 = Generator(MT19937())
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 77ca4ef85..3d52f74b2 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -2402,9 +2402,9 @@ def break_cycles():
gc.collect()
if IS_PYPY:
- # interpreter runs now, to call deleted objects' __del__ methods
+ # a few more, just to make sure all the finalizers are called
+ gc.collect()
gc.collect()
- # two more, just to make sure
gc.collect()
gc.collect()
diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py
index af3730df1..1ea083700 100644
--- a/numpy/tests/test_ctypeslib.py
+++ b/numpy/tests/test_ctypeslib.py
@@ -1,6 +1,7 @@
import sys
import pytest
import weakref
+from pathlib import Path
import numpy as np
from numpy.ctypeslib import ndpointer, load_library, as_array
@@ -37,13 +38,15 @@ else:
reason="Known to fail on cygwin")
class TestLoadLibrary:
def test_basic(self):
- try:
- # Should succeed
- load_library('_multiarray_umath', np.core._multiarray_umath.__file__)
- except ImportError as e:
- msg = ("ctypes is not available on this python: skipping the test"
- " (import error was: %s)" % str(e))
- print(msg)
+ loader_path = np.core._multiarray_umath.__file__
+
+ out1 = load_library('_multiarray_umath', loader_path)
+ out2 = load_library(Path('_multiarray_umath'), loader_path)
+ out3 = load_library('_multiarray_umath', Path(loader_path))
+ out4 = load_library(b'_multiarray_umath', loader_path)
+
+ assert isinstance(out1, ctypes.CDLL)
+ assert out1 is out2 is out3 is out4
def test_basic2(self):
# Regression for #801: load_library with a full library name
diff --git a/numpy/typing/_nested_sequence.py b/numpy/typing/_nested_sequence.py
index e3b8fc33f..a853303ca 100644
--- a/numpy/typing/_nested_sequence.py
+++ b/numpy/typing/_nested_sequence.py
@@ -2,7 +2,6 @@
from __future__ import annotations
-import sys
from typing import (
Any,
Iterator,
@@ -86,8 +85,6 @@ class _NestedSequence(Protocol[_T_co]):
"""Return the number of occurrences of `value`."""
raise NotImplementedError
- def index(
- self, value: Any, start: int = 0, stop: int = sys.maxsize, /
- ) -> int:
+ def index(self, value: Any, /) -> int:
"""Return the first index of `value`."""
raise NotImplementedError
diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py
index 5421d6bfa..5ac75f94d 100644
--- a/numpy/typing/mypy_plugin.py
+++ b/numpy/typing/mypy_plugin.py
@@ -131,9 +131,8 @@ if TYPE_CHECKING or MYPY_EX is None:
for i, value in enumerate(iterable):
if getattr(value, "id", None) == id:
return i
- else:
- raise ValueError("Failed to identify a `ImportFrom` instance "
- f"with the following id: {id!r}")
+ raise ValueError("Failed to identify a `ImportFrom` instance "
+ f"with the following id: {id!r}")
def _override_imports(
file: MypyFile,
diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi
new file mode 100644
index 000000000..ebc182ec2
--- /dev/null
+++ b/numpy/typing/tests/data/fail/chararray.pyi
@@ -0,0 +1,62 @@
+import numpy as np
+from typing import Any
+
+AR_U: np.chararray[Any, np.dtype[np.str_]]
+AR_S: np.chararray[Any, np.dtype[np.bytes_]]
+
+AR_S.encode() # E: Invalid self argument
+AR_U.decode() # E: Invalid self argument
+
+AR_U.join(b"_") # E: incompatible type
+AR_S.join("_") # E: incompatible type
+
+AR_U.ljust(5, fillchar=b"a") # E: incompatible type
+AR_S.ljust(5, fillchar="a") # E: incompatible type
+AR_U.rjust(5, fillchar=b"a") # E: incompatible type
+AR_S.rjust(5, fillchar="a") # E: incompatible type
+
+AR_U.lstrip(chars=b"a") # E: incompatible type
+AR_S.lstrip(chars="a") # E: incompatible type
+AR_U.strip(chars=b"a") # E: incompatible type
+AR_S.strip(chars="a") # E: incompatible type
+AR_U.rstrip(chars=b"a") # E: incompatible type
+AR_S.rstrip(chars="a") # E: incompatible type
+
+AR_U.partition(b"a") # E: incompatible type
+AR_S.partition("a") # E: incompatible type
+AR_U.rpartition(b"a") # E: incompatible type
+AR_S.rpartition("a") # E: incompatible type
+
+AR_U.replace(b"_", b"-") # E: incompatible type
+AR_S.replace("_", "-") # E: incompatible type
+
+AR_U.split(b"_") # E: incompatible type
+AR_S.split("_") # E: incompatible type
+AR_S.split(1) # E: incompatible type
+AR_U.rsplit(b"_") # E: incompatible type
+AR_S.rsplit("_") # E: incompatible type
+
+AR_U.count(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.count("a", end=9) # E: incompatible type
+
+AR_U.endswith(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.endswith("a", end=9) # E: incompatible type
+AR_U.startswith(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.startswith("a", end=9) # E: incompatible type
+
+AR_U.find(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.find("a", end=9) # E: incompatible type
+AR_U.rfind(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.rfind("a", end=9) # E: incompatible type
+
+AR_U.index(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.index("a", end=9) # E: incompatible type
+AR_U.rindex(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.rindex("a", end=9) # E: incompatible type
+
+AR_U == AR_S # E: Unsupported operand types
+AR_U != AR_S # E: Unsupported operand types
+AR_U >= AR_S # E: Unsupported operand types
+AR_U <= AR_S # E: Unsupported operand types
+AR_U > AR_S # E: Unsupported operand types
+AR_U < AR_S # E: Unsupported operand types
diff --git a/numpy/typing/tests/data/fail/histograms.pyi b/numpy/typing/tests/data/fail/histograms.pyi
new file mode 100644
index 000000000..ad151488d
--- /dev/null
+++ b/numpy/typing/tests/data/fail/histograms.pyi
@@ -0,0 +1,13 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+
+np.histogram_bin_edges(AR_i8, range=(0, 1, 2)) # E: incompatible type
+
+np.histogram(AR_i8, range=(0, 1, 2)) # E: incompatible type
+np.histogram(AR_i8, normed=True) # E: incompatible type
+
+np.histogramdd(AR_i8, range=(0, 1)) # E: incompatible type
+np.histogramdd(AR_i8, range=[(0, 1, 2)]) # E: incompatible type
diff --git a/numpy/typing/tests/data/fail/lib_function_base.pyi b/numpy/typing/tests/data/fail/lib_function_base.pyi
new file mode 100644
index 000000000..9cad2da03
--- /dev/null
+++ b/numpy/typing/tests/data/fail/lib_function_base.pyi
@@ -0,0 +1,53 @@
+from typing import Any
+
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_m: npt.NDArray[np.timedelta64]
+AR_M: npt.NDArray[np.datetime64]
+AR_O: npt.NDArray[np.object_]
+
+def func(a: int) -> None: ...
+
+np.average(AR_m) # E: incompatible type
+np.select(1, [AR_f8]) # E: incompatible type
+np.angle(AR_m) # E: incompatible type
+np.unwrap(AR_m) # E: incompatible type
+np.unwrap(AR_c16) # E: incompatible type
+np.trim_zeros(1) # E: incompatible type
+np.place(1, [True], 1.5) # E: incompatible type
+np.vectorize(1) # E: incompatible type
+np.add_newdoc("__main__", 1.5, "docstring") # E: incompatible type
+np.place(AR_f8, slice(None), 5) # E: incompatible type
+
+np.interp(AR_f8, AR_c16, AR_f8) # E: incompatible type
+np.interp(AR_c16, AR_f8, AR_f8) # E: incompatible type
+np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # E: No overload variant
+np.interp(AR_f8, AR_f8, AR_O) # E: incompatible type
+
+np.cov(AR_m) # E: incompatible type
+np.cov(AR_O) # E: incompatible type
+np.corrcoef(AR_m) # E: incompatible type
+np.corrcoef(AR_O) # E: incompatible type
+np.corrcoef(AR_f8, bias=True) # E: No overload variant
+np.corrcoef(AR_f8, ddof=2) # E: No overload variant
+np.blackman(1j) # E: incompatible type
+np.bartlett(1j) # E: incompatible type
+np.hanning(1j) # E: incompatible type
+np.hamming(1j) # E: incompatible type
+np.hamming(AR_c16) # E: incompatible type
+np.kaiser(1j, 1) # E: incompatible type
+np.sinc(AR_O) # E: incompatible type
+np.median(AR_M) # E: incompatible type
+
+np.add_newdoc_ufunc(func, "docstring") # E: incompatible type
+np.percentile(AR_f8, 50j) # E: No overload variant
+np.percentile(AR_f8, 50, interpolation="bob") # E: No overload variant
+np.quantile(AR_f8, 0.5j) # E: No overload variant
+np.quantile(AR_f8, 0.5, interpolation="bob") # E: No overload variant
+np.meshgrid(AR_f8, AR_f8, indexing="bob") # E: incompatible type
+np.delete(AR_f8, AR_f8) # E: incompatible type
+np.insert(AR_f8, AR_f8, 1.5) # E: incompatible type
+np.digitize(AR_f8, 1j) # E: No overload variant
diff --git a/numpy/typing/tests/data/fail/lib_polynomial.pyi b/numpy/typing/tests/data/fail/lib_polynomial.pyi
new file mode 100644
index 000000000..ca02d7bde
--- /dev/null
+++ b/numpy/typing/tests/data/fail/lib_polynomial.pyi
@@ -0,0 +1,29 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_O: npt.NDArray[np.object_]
+AR_U: npt.NDArray[np.str_]
+
+poly_obj: np.poly1d
+
+np.polyint(AR_U) # E: incompatible type
+np.polyint(AR_f8, m=1j) # E: No overload variant
+
+np.polyder(AR_U) # E: incompatible type
+np.polyder(AR_f8, m=1j) # E: No overload variant
+
+np.polyfit(AR_O, AR_f8, 1) # E: incompatible type
+np.polyfit(AR_f8, AR_f8, 1, rcond=1j) # E: No overload variant
+np.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # E: incompatible type
+np.polyfit(AR_f8, AR_f8, 1, cov="bob") # E: No overload variant
+
+np.polyval(AR_f8, AR_U) # E: incompatible type
+np.polyadd(AR_f8, AR_U) # E: incompatible type
+np.polysub(AR_f8, AR_U) # E: incompatible type
+np.polymul(AR_f8, AR_U) # E: incompatible type
+np.polydiv(AR_f8, AR_U) # E: incompatible type
+
+5**poly_obj # E: No overload variant
+hash(poly_obj)
diff --git a/numpy/typing/tests/data/fail/ndarray_misc.pyi b/numpy/typing/tests/data/fail/ndarray_misc.pyi
index cf3fedc45..8320a44f3 100644
--- a/numpy/typing/tests/data/fail/ndarray_misc.pyi
+++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi
@@ -35,3 +35,7 @@ AR_M.__int__() # E: Invalid self argument
AR_M.__float__() # E: Invalid self argument
AR_M.__complex__() # E: Invalid self argument
AR_b.__index__() # E: Invalid self argument
+
+AR_f8[1.5] # E: No overload variant
+AR_f8["field_a"] # E: No overload variant
+AR_f8[["field_a", "field_b"]] # E: Invalid index type
diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py
index 206f70a15..2763d9c92 100644
--- a/numpy/typing/tests/data/pass/array_constructors.py
+++ b/numpy/typing/tests/data/pass/array_constructors.py
@@ -1,5 +1,5 @@
import sys
-from typing import List, Any
+from typing import Any
import numpy as np
diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py
index e16d196b6..5bd2fda20 100644
--- a/numpy/typing/tests/data/pass/array_like.py
+++ b/numpy/typing/tests/data/pass/array_like.py
@@ -1,4 +1,4 @@
-from typing import Any, List, Optional
+from typing import Any, Optional
import numpy as np
from numpy.typing import ArrayLike, _SupportsArray
diff --git a/numpy/typing/tests/data/pass/einsumfunc.py b/numpy/typing/tests/data/pass/einsumfunc.py
index a2a39fb1c..429764e67 100644
--- a/numpy/typing/tests/data/pass/einsumfunc.py
+++ b/numpy/typing/tests/data/pass/einsumfunc.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import List, Any
+from typing import Any
import numpy as np
diff --git a/numpy/typing/tests/data/pass/lib_utils.py b/numpy/typing/tests/data/pass/lib_utils.py
index 0a15dad22..65640c288 100644
--- a/numpy/typing/tests/data/pass/lib_utils.py
+++ b/numpy/typing/tests/data/pass/lib_utils.py
@@ -1,7 +1,6 @@
from __future__ import annotations
from io import StringIO
-from typing import Any
import numpy as np
diff --git a/numpy/typing/tests/data/pass/multiarray.py b/numpy/typing/tests/data/pass/multiarray.py
index e5d33c673..26cedfd77 100644
--- a/numpy/typing/tests/data/pass/multiarray.py
+++ b/numpy/typing/tests/data/pass/multiarray.py
@@ -1,4 +1,3 @@
-from typing import Any
import numpy as np
import numpy.typing as npt
diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi
new file mode 100644
index 000000000..c0a39c92b
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/chararray.pyi
@@ -0,0 +1,129 @@
+import numpy as np
+from typing import Any
+
+AR_U: np.chararray[Any, np.dtype[np.str_]]
+AR_S: np.chararray[Any, np.dtype[np.bytes_]]
+
+reveal_type(AR_U == AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S == AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U != AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S != AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U >= AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S >= AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U <= AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S <= AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U > AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S > AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U < AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S < AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U * 5) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S * [5]) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U % "test") # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S % b"test") # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.capitalize()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.capitalize()) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.center(5)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.center([2, 3, 4], b"a")) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.encode()) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+reveal_type(AR_S.decode()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+
+reveal_type(AR_U.expandtabs()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.expandtabs(tabsize=4)) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.join("_")) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.join([b"_", b""])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.ljust(5)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+reveal_type(AR_U.rjust(5)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.lstrip()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.lstrip(chars=b"_")) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+reveal_type(AR_U.rstrip()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.rstrip(chars=b"_")) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+reveal_type(AR_U.strip()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.strip(chars=b"_")) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.partition("\n")) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.partition([b"a", b"b", b"c"])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+reveal_type(AR_U.rpartition("\n")) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.rpartition([b"a", b"b", b"c"])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.replace("_", "-")) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.replace([b"_", b""], [b"a", b"b"])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.split("_")) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+reveal_type(AR_S.split(maxsplit=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+reveal_type(AR_U.rsplit("_")) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+reveal_type(AR_S.rsplit(maxsplit=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(AR_U.splitlines()) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+reveal_type(AR_S.splitlines(keepends=[True, True, False])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(AR_U.swapcase()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.swapcase()) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.title()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.title()) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.upper()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.upper()) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.zfill(5)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(AR_S.zfill([2, 3, 4])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(AR_U.count("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(AR_S.count([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+
+reveal_type(AR_U.endswith("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S.endswith([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_U.startswith("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S.startswith([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U.find("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(AR_S.find([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(AR_U.rfind("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(AR_S.rfind([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+
+reveal_type(AR_U.index("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(AR_S.index([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(AR_U.rindex("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(AR_S.rindex([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+
+reveal_type(AR_U.isalpha()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S.isalpha()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U.isalnum()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S.isalnum()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U.isdecimal()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S.isdecimal()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U.isdigit()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S.isdigit()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U.islower()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S.islower()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U.isnumeric()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S.isnumeric()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U.isspace()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S.isspace()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U.istitle()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S.istitle()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+
+reveal_type(AR_U.isupper()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(AR_S.isupper()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
diff --git a/numpy/typing/tests/data/reveal/histograms.pyi b/numpy/typing/tests/data/reveal/histograms.pyi
new file mode 100644
index 000000000..55fa9518f
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/histograms.pyi
@@ -0,0 +1,19 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+
+reveal_type(np.histogram_bin_edges(AR_i8, bins="auto")) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3))) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.histogram(AR_i8, bins="auto")) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]]
+reveal_type(np.histogram(AR_i8, bins="rice", range=(0, 3))) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]]
+reveal_type(np.histogram(AR_i8, bins="scott", weights=AR_f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]]
+reveal_type(np.histogram(AR_f8, bins=1, density=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]]
+
+reveal_type(np.histogramdd(AR_i8, bins=[1])) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], builtins.list[numpy.ndarray[Any, numpy.dtype[Any]]]]
+reveal_type(np.histogramdd(AR_i8, range=[(0, 3)])) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], builtins.list[numpy.ndarray[Any, numpy.dtype[Any]]]]
+reveal_type(np.histogramdd(AR_i8, weights=AR_f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], builtins.list[numpy.ndarray[Any, numpy.dtype[Any]]]]
+reveal_type(np.histogramdd(AR_f8, density=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], builtins.list[numpy.ndarray[Any, numpy.dtype[Any]]]]
diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi
new file mode 100644
index 000000000..bced08894
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi
@@ -0,0 +1,180 @@
+from typing import Any
+
+import numpy as np
+import numpy.typing as npt
+
+vectorized_func: np.vectorize
+
+f8: np.float64
+AR_LIKE_f8: list[float]
+
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_m: npt.NDArray[np.timedelta64]
+AR_M: npt.NDArray[np.datetime64]
+AR_O: npt.NDArray[np.object_]
+AR_b: npt.NDArray[np.bool_]
+AR_U: npt.NDArray[np.str_]
+CHAR_AR_U: np.chararray[Any, np.dtype[np.str_]]
+
+def func(*args: Any, **kwargs: Any) -> Any: ...
+
+reveal_type(vectorized_func.pyfunc) # E: def (*Any, **Any) -> Any
+reveal_type(vectorized_func.cache) # E: bool
+reveal_type(vectorized_func.signature) # E: Union[None, builtins.str]
+reveal_type(vectorized_func.otypes) # E: Union[None, builtins.str]
+reveal_type(vectorized_func.excluded) # E: set[Union[builtins.int, builtins.str]]
+reveal_type(vectorized_func.__doc__) # E: Union[None, builtins.str]
+reveal_type(vectorized_func([1])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.vectorize(int)) # E: numpy.vectorize
+reveal_type(np.vectorize( # E: numpy.vectorize
+ int, otypes="i", doc="doc", excluded=(), cache=True, signature=None
+))
+
+reveal_type(np.add_newdoc("__main__", "blabla", doc="test doc")) # E: None
+reveal_type(np.add_newdoc("__main__", "blabla", doc=("meth", "test doc"))) # E: None
+reveal_type(np.add_newdoc("__main__", "blabla", doc=[("meth", "test doc")])) # E: None
+
+reveal_type(np.rot90(AR_f8, k=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.rot90(AR_LIKE_f8, axes=(0, 1))) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.flip(f8)) # E: {float64}
+reveal_type(np.flip(1.0)) # E: Any
+reveal_type(np.flip(AR_f8, axis=(0, 1))) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.flip(AR_LIKE_f8, axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.iterable(1)) # E: bool
+reveal_type(np.iterable([1])) # E: bool
+
+reveal_type(np.average(AR_f8)) # E: numpy.floating[Any]
+reveal_type(np.average(AR_f8, weights=AR_c16)) # E: numpy.complexfloating[Any, Any]
+reveal_type(np.average(AR_O)) # E: Any
+reveal_type(np.average(AR_f8, returned=True)) # E: Tuple[numpy.floating[Any], numpy.floating[Any]]
+reveal_type(np.average(AR_f8, weights=AR_c16, returned=True)) # E: Tuple[numpy.complexfloating[Any, Any], numpy.complexfloating[Any, Any]]
+reveal_type(np.average(AR_O, returned=True)) # E: Tuple[Any, Any]
+reveal_type(np.average(AR_f8, axis=0)) # E: Any
+reveal_type(np.average(AR_f8, axis=0, returned=True)) # E: Tuple[Any, Any]
+
+reveal_type(np.asarray_chkfinite(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.asarray_chkfinite(AR_LIKE_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.asarray_chkfinite(AR_f8, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.asarray_chkfinite(AR_f8, dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.piecewise(AR_f8, AR_b, [func])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.piecewise(AR_LIKE_f8, AR_b, [func])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.select([AR_f8], [AR_f8])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.copy(AR_LIKE_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.copy(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(np.copy(CHAR_AR_U)) # E: numpy.ndarray[Any, Any]
+reveal_type(np.copy(CHAR_AR_U, "K", subok=True)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(np.copy(CHAR_AR_U, subok=True)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+
+reveal_type(np.gradient(AR_f8, axis=None)) # E: Any
+reveal_type(np.gradient(AR_LIKE_f8, edge_order=2)) # E: Any
+
+reveal_type(np.diff("bob", n=0)) # E: str
+reveal_type(np.diff(AR_f8, axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.diff(AR_LIKE_f8, prepend=1.5)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.angle(AR_f8)) # E: numpy.floating[Any]
+reveal_type(np.angle(AR_c16, deg=True)) # E: numpy.complexfloating[Any, Any]
+reveal_type(np.angle(AR_O)) # E: Any
+
+reveal_type(np.unwrap(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.unwrap(AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(np.sort_complex(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+
+reveal_type(np.trim_zeros(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.trim_zeros(AR_LIKE_f8)) # E: list[builtins.float]
+
+reveal_type(np.extract(AR_i8, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.extract(AR_i8, AR_LIKE_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.place(AR_f8, mask=AR_i8, vals=5.0)) # E: None
+
+reveal_type(np.disp(1, linefeed=True)) # E: None
+with open("test", "w") as f:
+ reveal_type(np.disp("message", device=f)) # E: None
+
+reveal_type(np.cov(AR_f8, bias=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.cov(AR_f8, AR_c16, ddof=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[{float32}]]
+reveal_type(np.cov(AR_f8, fweights=AR_f8, dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.corrcoef(AR_f8, rowvar=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.corrcoef(AR_f8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.corrcoef(AR_f8, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[{float32}]]
+reveal_type(np.corrcoef(AR_f8, dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.blackman(5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.bartlett(6)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.hanning(4.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.hamming(0)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.i0(AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.kaiser(4, 5.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+
+reveal_type(np.sinc(1.0)) # E: numpy.floating[Any]
+reveal_type(np.sinc(1j)) # E: numpy.complexfloating[Any, Any]
+reveal_type(np.sinc(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.sinc(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+
+reveal_type(np.msort(CHAR_AR_U)) # E: Any
+reveal_type(np.msort(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(np.msort(AR_LIKE_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.median(AR_f8, keepdims=False)) # E: numpy.floating[Any]
+reveal_type(np.median(AR_c16, overwrite_input=True)) # E: numpy.complexfloating[Any, Any]
+reveal_type(np.median(AR_m)) # E: numpy.timedelta64
+reveal_type(np.median(AR_O)) # E: Any
+reveal_type(np.median(AR_f8, keepdims=True)) # E: Any
+reveal_type(np.median(AR_c16, axis=0)) # E: Any
+reveal_type(np.median(AR_LIKE_f8, out=AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
+
+reveal_type(np.add_newdoc_ufunc(np.add, "docstring")) # E: None
+
+reveal_type(np.percentile(AR_f8, 50)) # E: numpy.floating[Any]
+reveal_type(np.percentile(AR_c16, 50)) # E: numpy.complexfloating[Any, Any]
+reveal_type(np.percentile(AR_m, 50)) # E: numpy.timedelta64
+reveal_type(np.percentile(AR_M, 50, overwrite_input=True)) # E: numpy.datetime64
+reveal_type(np.percentile(AR_O, 50)) # E: Any
+reveal_type(np.percentile(AR_f8, [50])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.percentile(AR_c16, [50])) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.percentile(AR_m, [50])) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]
+reveal_type(np.percentile(AR_M, [50], interpolation="nearest")) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]
+reveal_type(np.percentile(AR_O, [50])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+reveal_type(np.percentile(AR_f8, [50], keepdims=True)) # E: Any
+reveal_type(np.percentile(AR_f8, [50], axis=[1])) # E: Any
+reveal_type(np.percentile(AR_f8, [50], out=AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
+
+reveal_type(np.quantile(AR_f8, 0.5)) # E: numpy.floating[Any]
+reveal_type(np.quantile(AR_c16, 0.5)) # E: numpy.complexfloating[Any, Any]
+reveal_type(np.quantile(AR_m, 0.5)) # E: numpy.timedelta64
+reveal_type(np.quantile(AR_M, 0.5, overwrite_input=True)) # E: numpy.datetime64
+reveal_type(np.quantile(AR_O, 0.5)) # E: Any
+reveal_type(np.quantile(AR_f8, [0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.quantile(AR_c16, [0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.quantile(AR_m, [0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]
+reveal_type(np.quantile(AR_M, [0.5], interpolation="nearest")) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]
+reveal_type(np.quantile(AR_O, [0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+reveal_type(np.quantile(AR_f8, [0.5], keepdims=True)) # E: Any
+reveal_type(np.quantile(AR_f8, [0.5], axis=[1])) # E: Any
+reveal_type(np.quantile(AR_f8, [0.5], out=AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
+
+reveal_type(np.meshgrid(AR_f8, AR_i8, copy=False)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]]
+reveal_type(np.meshgrid(AR_f8, AR_i8, AR_c16, indexing="ij")) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]]
+
+reveal_type(np.delete(AR_f8, np.s_[:5])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.insert(AR_f8, np.s_[:5], 5)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.append(AR_f8, 5)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.append(AR_LIKE_f8, 1j, axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.digitize(4.5, [1])) # E: {intp}
+reveal_type(np.digitize(AR_f8, [1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{intp}]]
diff --git a/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/numpy/typing/tests/data/reveal/lib_polynomial.pyi
new file mode 100644
index 000000000..5a4a3c424
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/lib_polynomial.pyi
@@ -0,0 +1,111 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_b: npt.NDArray[np.bool_]
+AR_u4: npt.NDArray[np.uint32]
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_O: npt.NDArray[np.object_]
+
+poly_obj: np.poly1d
+
+reveal_type(poly_obj.variable) # E: str
+reveal_type(poly_obj.order) # E: int
+reveal_type(poly_obj.o) # E: int
+reveal_type(poly_obj.roots) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(poly_obj.r) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(poly_obj.coeffs) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(poly_obj.c) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(poly_obj.coef) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(poly_obj.coefficients) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(poly_obj.__hash__) # E: None
+
+reveal_type(poly_obj(1)) # E: Any
+reveal_type(poly_obj([1])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(poly_obj(poly_obj)) # E: numpy.poly1d
+
+reveal_type(len(poly_obj)) # E: int
+reveal_type(-poly_obj) # E: numpy.poly1d
+reveal_type(+poly_obj) # E: numpy.poly1d
+
+reveal_type(poly_obj * 5) # E: numpy.poly1d
+reveal_type(5 * poly_obj) # E: numpy.poly1d
+reveal_type(poly_obj + 5) # E: numpy.poly1d
+reveal_type(5 + poly_obj) # E: numpy.poly1d
+reveal_type(poly_obj - 5) # E: numpy.poly1d
+reveal_type(5 - poly_obj) # E: numpy.poly1d
+reveal_type(poly_obj**1) # E: numpy.poly1d
+reveal_type(poly_obj**1.0) # E: numpy.poly1d
+reveal_type(poly_obj / 5) # E: numpy.poly1d
+reveal_type(5 / poly_obj) # E: numpy.poly1d
+
+reveal_type(poly_obj[0]) # E: Any
+poly_obj[0] = 5
+reveal_type(iter(poly_obj)) # E: Iterator[Any]
+reveal_type(poly_obj.deriv()) # E: numpy.poly1d
+reveal_type(poly_obj.integ()) # E: numpy.poly1d
+
+reveal_type(np.poly(poly_obj)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.poly(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.poly(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+
+reveal_type(np.polyint(poly_obj)) # E: numpy.poly1d
+reveal_type(np.polyint(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.polyint(AR_f8, k=AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.polyint(AR_O, m=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(np.polyder(poly_obj)) # E: numpy.poly1d
+reveal_type(np.polyder(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.polyder(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.polyder(AR_O, m=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(np.polyfit(AR_f8, AR_f8, 2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.polyfit(AR_f8, AR_i8, 1, full=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]], numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]]]
+reveal_type(np.polyfit(AR_u4, AR_f8, 1.0, cov="unscaled")) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]]]
+reveal_type(np.polyfit(AR_c16, AR_f8, 2)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
+reveal_type(np.polyfit(AR_f8, AR_c16, 1, full=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{complex128}]], numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]], numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]]]
+reveal_type(np.polyfit(AR_u4, AR_c16, 1.0, cov=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{complex128}]], numpy.ndarray[Any, numpy.dtype[{complex128}]]]
+
+reveal_type(np.polyval(AR_b, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
+reveal_type(np.polyval(AR_u4, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]
+reveal_type(np.polyval(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
+reveal_type(np.polyval(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.polyval(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.polyval(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(np.polyadd(poly_obj, AR_i8)) # E: numpy.poly1d
+reveal_type(np.polyadd(AR_f8, poly_obj)) # E: numpy.poly1d
+reveal_type(np.polyadd(AR_b, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(np.polyadd(AR_u4, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]
+reveal_type(np.polyadd(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
+reveal_type(np.polyadd(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.polyadd(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.polyadd(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(np.polysub(poly_obj, AR_i8)) # E: numpy.poly1d
+reveal_type(np.polysub(AR_f8, poly_obj)) # E: numpy.poly1d
+reveal_type(np.polysub(AR_b, AR_b)) # E: <nothing>
+reveal_type(np.polysub(AR_u4, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]
+reveal_type(np.polysub(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
+reveal_type(np.polysub(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.polysub(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.polysub(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(np.polymul(poly_obj, AR_i8)) # E: numpy.poly1d
+reveal_type(np.polymul(AR_f8, poly_obj)) # E: numpy.poly1d
+reveal_type(np.polymul(AR_b, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
+reveal_type(np.polymul(AR_u4, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]]
+reveal_type(np.polymul(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
+reveal_type(np.polymul(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.polymul(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+reveal_type(np.polymul(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(np.polydiv(poly_obj, AR_i8)) # E: numpy.poly1d
+reveal_type(np.polydiv(AR_f8, poly_obj)) # E: numpy.poly1d
+reveal_type(np.polydiv(AR_b, AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
+reveal_type(np.polydiv(AR_u4, AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
+reveal_type(np.polydiv(AR_i8, AR_i8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
+reveal_type(np.polydiv(AR_f8, AR_i8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
+reveal_type(np.polydiv(AR_i8, AR_c16)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]]
+reveal_type(np.polydiv(AR_O, AR_O)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]]
diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi
new file mode 100644
index 000000000..def33f458
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/matrix.pyi
@@ -0,0 +1,69 @@
+from typing import Any
+import numpy as np
+import numpy.typing as npt
+
+mat: np.matrix[Any, np.dtype[np.int64]]
+ar_f8: npt.NDArray[np.float64]
+
+reveal_type(mat * 5) # E: numpy.matrix[Any, Any]
+reveal_type(5 * mat) # E: numpy.matrix[Any, Any]
+mat *= 5
+
+reveal_type(mat**5) # E: numpy.matrix[Any, Any]
+mat **= 5
+
+reveal_type(mat.sum()) # E: Any
+reveal_type(mat.mean()) # E: Any
+reveal_type(mat.std()) # E: Any
+reveal_type(mat.var()) # E: Any
+reveal_type(mat.prod()) # E: Any
+reveal_type(mat.any()) # E: numpy.bool_
+reveal_type(mat.all()) # E: numpy.bool_
+reveal_type(mat.max()) # E: {int64}
+reveal_type(mat.min()) # E: {int64}
+reveal_type(mat.argmax()) # E: {intp}
+reveal_type(mat.argmin()) # E: {intp}
+reveal_type(mat.ptp()) # E: {int64}
+
+reveal_type(mat.sum(axis=0)) # E: numpy.matrix[Any, Any]
+reveal_type(mat.mean(axis=0)) # E: numpy.matrix[Any, Any]
+reveal_type(mat.std(axis=0)) # E: numpy.matrix[Any, Any]
+reveal_type(mat.var(axis=0)) # E: numpy.matrix[Any, Any]
+reveal_type(mat.prod(axis=0)) # E: numpy.matrix[Any, Any]
+reveal_type(mat.any(axis=0)) # E: numpy.matrix[Any, numpy.dtype[numpy.bool_]]
+reveal_type(mat.all(axis=0)) # E: numpy.matrix[Any, numpy.dtype[numpy.bool_]]
+reveal_type(mat.max(axis=0)) # E: numpy.matrix[Any, numpy.dtype[{int64}]]
+reveal_type(mat.min(axis=0)) # E: numpy.matrix[Any, numpy.dtype[{int64}]]
+reveal_type(mat.argmax(axis=0)) # E: numpy.matrix[Any, numpy.dtype[{intp}]]
+reveal_type(mat.argmin(axis=0)) # E: numpy.matrix[Any, numpy.dtype[{intp}]]
+reveal_type(mat.ptp(axis=0)) # E: numpy.matrix[Any, numpy.dtype[{int64}]]
+
+reveal_type(mat.sum(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(mat.mean(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(mat.std(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(mat.var(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(mat.prod(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(mat.any(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(mat.all(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(mat.max(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(mat.min(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(mat.argmax(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(mat.argmin(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(mat.ptp(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(mat.T) # E: numpy.matrix[Any, numpy.dtype[{int64}]]
+reveal_type(mat.I) # E: numpy.matrix[Any, Any]
+reveal_type(mat.A) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
+reveal_type(mat.A1) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
+reveal_type(mat.H) # E: numpy.matrix[Any, numpy.dtype[{int64}]]
+reveal_type(mat.getT()) # E: numpy.matrix[Any, numpy.dtype[{int64}]]
+reveal_type(mat.getI()) # E: numpy.matrix[Any, Any]
+reveal_type(mat.getA()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
+reveal_type(mat.getA1()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
+reveal_type(mat.getH()) # E: numpy.matrix[Any, numpy.dtype[{int64}]]
+
+reveal_type(np.bmat(ar_f8)) # E: numpy.matrix[Any, Any]
+reveal_type(np.bmat([[0, 1, 2]])) # E: numpy.matrix[Any, Any]
+reveal_type(np.bmat("mat")) # E: numpy.matrix[Any, Any]
+
+reveal_type(np.asmatrix(ar_f8, dtype=np.int64)) # E: numpy.matrix[Any, Any]
diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi
index 050b82cdc..e384b5388 100644
--- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi
+++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi
@@ -20,6 +20,7 @@ B: SubClass
AR_f8: NDArray[np.float64]
AR_i8: NDArray[np.int64]
AR_U: NDArray[np.str_]
+AR_V: NDArray[np.void]
ctypes_obj = AR_f8.ctypes
@@ -193,3 +194,13 @@ reveal_type(operator.index(AR_i8)) # E: int
reveal_type(AR_f8.__array_prepare__(B)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
reveal_type(AR_f8.__array_wrap__(B)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(AR_V[0]) # E: Any
+reveal_type(AR_V[0, 0]) # E: Any
+reveal_type(AR_V[AR_i8]) # E: Any
+reveal_type(AR_V[AR_i8, AR_i8]) # E: Any
+reveal_type(AR_V[AR_i8, None]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]]
+reveal_type(AR_V[0, ...]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]]
+reveal_type(AR_V[:]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]]
+reveal_type(AR_V["a"]) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(AR_V[["a", "b"]]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]]
diff --git a/numpy/typing/tests/data/reveal/nested_sequence.pyi b/numpy/typing/tests/data/reveal/nested_sequence.pyi
index 07e24e357..4d3aad467 100644
--- a/numpy/typing/tests/data/reveal/nested_sequence.pyi
+++ b/numpy/typing/tests/data/reveal/nested_sequence.pyi
@@ -21,3 +21,4 @@ reveal_type(func(e)) # E: None
reveal_type(func(f)) # E: None
reveal_type(func(g)) # E: None
reveal_type(func(h)) # E: None
+reveal_type(func(range(15))) # E: None
diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py
index 0f3e10b7b..f303ebea3 100644
--- a/numpy/typing/tests/test_typing.py
+++ b/numpy/typing/tests/test_typing.py
@@ -160,7 +160,9 @@ def test_fail(path: str) -> None:
expected_error = errors.get(lineno)
_test_fail(path, marker, expected_error, lineno)
else:
- pytest.fail(f"Unexpected mypy output\n\n{errors[lineno]}")
+ pytest.fail(
+ f"Unexpected mypy output at line {lineno}\n\n{errors[lineno]}"
+ )
_FAIL_MSG1 = """Extra error at line {}
diff --git a/setup.py b/setup.py
index 245c23676..6cb37d291 100755
--- a/setup.py
+++ b/setup.py
@@ -55,11 +55,14 @@ FULLVERSION = versioneer.get_version()
# 1.22.0 ... -> ISRELEASED == True, VERSION == 1.22.0
# 1.22.0rc1 ... -> ISRELEASED == True, VERSION == 1.22.0
ISRELEASED = re.search(r'(dev|\+)', FULLVERSION) is None
-MAJOR, MINOR, MICRO = re.match(r'(\d+)\.(\d+)\.(\d+)', FULLVERSION).groups()
+_V_MATCH = re.match(r'(\d+)\.(\d+)\.(\d+)', FULLVERSION)
+if _V_MATCH is None:
+ raise RuntimeError(f'Cannot parse version {FULLVERSION}')
+MAJOR, MINOR, MICRO = _V_MATCH.groups()
VERSION = '{}.{}.{}'.format(MAJOR, MINOR, MICRO)
# The first version not in the `Programming Language :: Python :: ...` classifiers above
-if sys.version_info >= (3, 10):
+if sys.version_info >= (3, 11):
fmt = "NumPy {} may not yet support Python {}.{}."
warnings.warn(
fmt.format(VERSION, *sys.version_info[:2]),
@@ -210,9 +213,8 @@ def get_build_overrides():
class new_build_clib(build_clib):
def build_a_library(self, build_info, lib_name, libraries):
if _needs_gcc_c99_flag(self):
- args = build_info.get('extra_compiler_args') or []
- args.append('-std=c99')
- build_info['extra_compiler_args'] = args
+ build_info['extra_cflags'] = ['-std=c99']
+ build_info['extra_cxxflags'] = ['-std=c++11']
build_clib.build_a_library(self, build_info, lib_name, libraries)
class new_build_ext(build_ext):
diff --git a/test_requirements.txt b/test_requirements.txt
index 6b6211872..31bea70ca 100644
--- a/test_requirements.txt
+++ b/test_requirements.txt
@@ -1,10 +1,10 @@
cython==0.29.24
wheel<0.37.1
setuptools<49.2.0
-hypothesis==6.23.0
+hypothesis==6.23.3
pytest==6.2.5
-pytz==2021.1
-pytest-cov==2.12.1
+pytz==2021.3
+pytest-cov==3.0.0
pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy'
# for numpy.random.test.test_extending
cffi
diff --git a/tools/changelog.py b/tools/changelog.py
index 2bd7cde08..444d96882 100755
--- a/tools/changelog.py
+++ b/tools/changelog.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- encoding:utf-8 -*-
"""
Script to generate contributor and pull request lists
diff --git a/tools/download-wheels.py b/tools/download-wheels.py
index 28b3fc7ad..dd066d9ad 100644
--- a/tools/download-wheels.py
+++ b/tools/download-wheels.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- encoding:utf-8 -*-
"""
Script to download NumPy wheels from the Anaconda staging area.
diff --git a/tools/gitpod/gitpod.Dockerfile b/tools/gitpod/gitpod.Dockerfile
index 538963bc0..7894be5bc 100644
--- a/tools/gitpod/gitpod.Dockerfile
+++ b/tools/gitpod/gitpod.Dockerfile
@@ -34,6 +34,7 @@ COPY --from=clone --chown=gitpod /tmp/numpy ${WORKSPACE}
WORKDIR ${WORKSPACE}
# Build numpy to populate the cache used by ccache
+RUN git submodule update --init --depth=1 -- numpy/core/src/umath/svml
RUN conda activate ${CONDA_ENV} && \
python setup.py build_ext --inplace && \
ccache -s
diff --git a/tools/openblas_support.py b/tools/openblas_support.py
index 9ab964e6f..4eb72dbc9 100644
--- a/tools/openblas_support.py
+++ b/tools/openblas_support.py
@@ -13,8 +13,8 @@ from tempfile import mkstemp, gettempdir
from urllib.request import urlopen, Request
from urllib.error import HTTPError
-OPENBLAS_V = '0.3.17'
-OPENBLAS_LONG = 'v0.3.17'
+OPENBLAS_V = '0.3.18'
+OPENBLAS_LONG = 'v0.3.18'
BASE_LOC = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs'
BASEURL = f'{BASE_LOC}/{OPENBLAS_LONG}/download'
SUPPORTED_PLATFORMS = [
diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh
index 65aa4ad13..056e97472 100755
--- a/tools/travis-before-install.sh
+++ b/tools/travis-before-install.sh
@@ -22,13 +22,12 @@ pushd builds
# Build into own virtualenv
# We therefore control our own environment, avoid travis' numpy
-pip install -U virtualenv
if [ -n "$USE_DEBUG" ]
then
- virtualenv --python=$(which python3-dbg) venv
+ python3-dbg -m venv venv
else
- virtualenv --python=python venv
+ python -m venv venv
fi
source venv/bin/activate
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index 4667db991..b395942fb 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -165,7 +165,7 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
fi
$PYTHON setup.py build --warn-error build_src --verbose-cfg bdist_wheel
# Make another virtualenv to install into
- virtualenv --python=`which $PYTHON` venv-for-wheel
+ $PYTHON -m venv venv-for-wheel
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
@@ -181,7 +181,7 @@ elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result"
$PYTHON setup.py sdist
# Make another virtualenv to install into
- virtualenv --python=`which $PYTHON` venv-for-wheel
+ $PYTHON -m venv venv-for-wheel
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist