diff options
731 files changed, 23194 insertions, 7779 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml index fdb85be98..de7f52f81 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -8,7 +8,11 @@ jobs: docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/docs/2.0/circleci-images/ - - image: cimg/python:3.8 + # circleci/python3.8 images come with old versions of Doxygen(1.6.x), + # therefore a new base image chose instead to guarantee to + # have a newer version >= 1.8.10 to avoid warnings + # that related to the default behaviors or non-exist config options + - image: cimg/base:2021.05 working_directory: ~/repo @@ -20,10 +24,17 @@ jobs: if [[ -v CI_PULL_REQUEST ]] ; then git pull --ff-only origin "refs/pull/${CI_PULL_REQUEST//*pull\//}/merge" ; fi - run: + name: update submodules + command: | + git submodule init + git submodule update + + - run: name: create virtual environment, install dependencies command: | sudo apt-get update - sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra latexmk texlive-xetex + sudo apt-get install -y python3.8 python3.8-dev python3-venv graphviz texlive-fonts-recommended texlive-latex-recommended \ + texlive-latex-extra latexmk texlive-xetex doxygen python3.8 -m venv venv . venv/bin/activate @@ -58,7 +69,7 @@ jobs: . venv/bin/activate cd doc # Don't use -q, show warning summary" - SPHINXOPTS="-n" make -e html || echo "ignoring errors for now, see gh-13114" + SPHINXOPTS="-j4 -n" make -e html || echo "ignoring errors for now, see gh-13114" - run: name: build devdocs @@ -67,14 +78,14 @@ jobs: . venv/bin/activate cd doc make clean - SPHINXOPTS=-q make -e html + SPHINXOPTS="-j4 -q" make -e html - run: name: build neps command: | . venv/bin/activate cd doc/neps - SPHINXOPTS=-q make -e html + SPHINXOPTS="-j4 -q" make -e html - store_artifacts: path: doc/build/html/ diff --git a/.clang-format b/.clang-format new file mode 100644 index 000000000..60b1066bc --- /dev/null +++ b/.clang-format @@ -0,0 +1,37 @@ +# A clang-format style that approximates Python's PEP 7 +# Useful for IDE integration +# +# Based on Paul Ganssle's version at +# https://gist.github.com/pganssle/0e3a5f828b4d07d79447f6ced8e7e4db +# and modified for NumPy +BasedOnStyle: Google +AlignAfterOpenBracket: Align +AllowShortEnumsOnASingleLine: false +AllowShortIfStatementsOnASingleLine: false +AlwaysBreakAfterReturnType: TopLevel +BreakBeforeBraces: Stroustrup +ColumnLimit: 79 +ContinuationIndentWidth: 8 +DerivePointerAlignment: false +IndentWidth: 4 +IncludeBlocks: Regroup +IncludeCategories: + - Regex: '^[<"](Python|structmember|pymem)\.h' + Priority: -3 + CaseSensitive: true + - Regex: '^"numpy/' + Priority: -2 + - Regex: '^"(npy_pycompat|npy_config)' + Priority: -1 + - Regex: '^"[[:alnum:]_.]+"' + Priority: 1 + - Regex: '^<[[:alnum:]_.]+"' + Priority: 2 +Language: Cpp +PointerAlignment: Right +ReflowComments: true +SpaceBeforeParens: ControlStatements +SpacesInParentheses: false +StatementMacros: [PyObject_HEAD, PyObject_VAR_HEAD, PyObject_HEAD_EXTRA] +TabWidth: 4 +UseTab: Never diff --git a/.gitattributes b/.gitattributes index a0676bee4..911db2b72 100644 --- a/.gitattributes +++ b/.gitattributes @@ -11,6 +11,7 @@ numpy/linalg/lapack_lite/f2c.c linguist-vendored numpy/linalg/lapack_lite/f2c.h linguist-vendored tools/npy_tempita/* linguist-vendored numpy/core/include/numpy/libdivide/* linguist-vendored +numpy/core/src/umath/svml/* linguist-vendored # Mark some files as generated numpy/linalg/lapack_lite/f2c_*.c linguist-generated diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 8283a20f7..8c3502443 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,3 +1,3 @@ github: [numfocus] tidelift: pypi/numpy -custom: https://numpy.org/about/ +custom: https://numpy.org/about#donate diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md deleted file mode 100644 index 6da1f7370..000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -name: "Bug report" -about: Report a bug. Not for security vulnerabilities -- see below. - ---- - -<!-- Please describe the issue in detail here, and fill in the fields below --> - -### Reproducing code example: - -<!-- A short code example that reproduces the problem/missing feature. It should be -self-contained, i.e., possible to run as-is via 'python myproblem.py' --> - -```python -import numpy as np -<< your code here >> -``` - -### Error message: - -<!-- If you are reporting a segfault please include a GDB traceback, which you -can generate by following -https://github.com/numpy/numpy/blob/main/doc/source/dev/development_environment.rst#debugging --> - -<!-- Full error message, if any (starting from line Traceback: ...) --> - -### NumPy/Python version information: - -<!-- Output from 'import sys, numpy; print(numpy.__version__, sys.version)' --> - diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 000000000..b46225968 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,49 @@ +name: Bug report +description: Report a bug. For security vulnerabilities see Report a security vulnerability in the templates. +title: "BUG: " +labels: [00 - Bug] + +body: +- type: markdown + attributes: + value: > + Thank you for taking the time to file a bug report. Before creating a new + issue, please make sure to take a few minutes to check the issue tracker + for existing issues about the bug. + +- type: textarea + attributes: + label: "Describe the issue:" + validations: + required: true + +- type: textarea + attributes: + label: "Reproduce the code example:" + description: > + A short code example that reproduces the problem/missing feature. It + should be self-contained, i.e., can be copy-pasted into the Python + interpreter or run as-is via `python myproblem.py`. + placeholder: | + import numpy as np + << your code here >> + render: python + validations: + required: true + +- type: textarea + attributes: + label: "Error message:" + description: > + Please include full error message, if any (starting from `Traceback: ...`). + If you are reporting a segfault please include a GDB traceback, + which you can generate by following + [these instructions](https://github.com/numpy/numpy/blob/main/doc/source/dev/development_environment.rst#debugging). + render: shell + +- type: textarea + attributes: + label: "NumPy/Python version information:" + description: Output from `import sys, numpy; print(numpy.__version__, sys.version)`. + validations: + required: true
\ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md deleted file mode 100644 index cdb7cde2e..000000000 --- a/.github/ISSUE_TEMPLATE/documentation.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: "Documentation" -about: Report an issue related to the NumPy documentation -labels: 04 - Documentation - ---- - -## Documentation - -<!-- If this is an issue with the current documentation for NumPy (e.g. -incomplete/inaccurate docstring, unclear explanation in any part of the -documentation), make sure to leave a reference to the document/code you're -referring to. You can also check the development version of the documentation -and see if this issue has already been addressed: https://numpy.org/devdocs/ ---> - -<!-- If this is an idea or a request for content, please describe as clearly as -possible what topics you think are missing from the current documentation. Make -sure to check https://github.com/numpy/numpy-tutorials and see if this issue -might be more appropriate there. --> diff --git a/.github/ISSUE_TEMPLATE/documentation.yml b/.github/ISSUE_TEMPLATE/documentation.yml new file mode 100644 index 000000000..1005d3ade --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.yml @@ -0,0 +1,23 @@ +name: Documentation +description: Report an issue related to the NumPy documentation. +title: "DOC: " +labels: [04 - Documentation] + +body: +- type: textarea + attributes: + label: "Issue with current documentation:" + description: > + Please make sure to leave a reference to the document/code you're + referring to. You can also check the development version of the + documentation and see if this issue has already been addressed at + https://numpy.org/devdocs. + +- type: textarea + attributes: + label: "Idea or request for content:" + description: > + Please describe as clearly as possible what topics you think are missing + from the current documentation. Make sure to check + https://github.com/numpy/numpy-tutorials and see if this issue might be + more appropriate there.
\ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md deleted file mode 100644 index 68872ec06..000000000 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -name: "Feature request" -about: Check instructions for submitting your idea on the mailing list first. - ---- - -## Feature - -<!-- If you're looking to request a new feature or change in functionality, including -adding or changing the meaning of arguments to an existing function, please -post your idea on the [numpy-discussion mailing list] -(https://mail.python.org/mailman/listinfo/numpy-discussion) to explain your -reasoning in addition to opening an issue or pull request. You can also check -out our [Contributor Guide] -(https://github.com/numpy/numpy/blob/main/doc/source/dev/index.rst) if you -need more information. --> diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 000000000..5e2af4015 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,22 @@ +name: Feature request +description: Check instructions for submitting your idea on the mailing list first. +title: "ENH: " + +body: +- type: markdown + attributes: + value: > + If you're looking to request a new feature or change in functionality, + including adding or changing the meaning of arguments to an existing + function, please post your idea on the + [numpy-discussion mailing list](https://mail.python.org/mailman/listinfo/numpy-discussion) + to explain your reasoning in addition to opening an issue or pull request. + You can also check out our + [Contributor Guide](https://github.com/numpy/numpy/blob/main/doc/source/dev/index.rst) + if you need more information. + +- type: textarea + attributes: + label: "Proposed new feature or change:" + validations: + required: true
\ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/post-install.md b/.github/ISSUE_TEMPLATE/post-install.md deleted file mode 100644 index 11b91384c..000000000 --- a/.github/ISSUE_TEMPLATE/post-install.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -name: "Post-install/importing issue" -about: If you have trouble importing or using NumPy after installation -labels: 32 - Installation - ---- - -<!-- Please describe the issue in detail here, and fill in the fields below. Also, check our Troubleshooting ImportError document to see if your issue is listed there: https://numpy.org/devdocs/user/troubleshooting-importerror.html --> - -### Steps to reproduce: - -<!-- Please describe the installation method (e.g. building from source, Anaconda, pip), your OS and NumPy/Python version information --> - -### Error message: - -<!-- If you are reporting a segfault please include a GDB traceback, which you -can generate by following -https://github.com/numpy/numpy/blob/main/doc/source/dev/development_environment.rst#debugging --> - -<!-- Full error message, if any (starting from line Traceback: ...) --> - diff --git a/.github/ISSUE_TEMPLATE/post-install.yml b/.github/ISSUE_TEMPLATE/post-install.yml new file mode 100644 index 000000000..5831994d1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/post-install.yml @@ -0,0 +1,28 @@ +name: Post-install/importing issue +description: Report an issue if you have trouble importing or using NumPy after installation. +labels: [32 - Installation] + +body: +- type: textarea + attributes: + label: "Steps to reproduce:" + description: > + Please describe the installation method (e.g. building from source, + Anaconda, pip), your OS and NumPy/Python version information. + validations: + required: true + +- type: textarea + attributes: + label: "Error message:" + description: > + Please include full error message, if any (starting from `Traceback: ...`). + If you are reporting a segfault please include a GDB traceback, + which you can generate by following + [these instructions](https://github.com/numpy/numpy/blob/main/doc/source/dev/development_environment.rst#debugging). + render: shell + +- type: textarea + attributes: + label: "Additional information:" + description: Please add any additional information that could help us diagnose the problem better.
\ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 16ce0846c..000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,22 +0,0 @@ -version: 2 -updates: -- package-ecosystem: pip - directory: "/" - schedule: - interval: weekly - open-pull-requests-limit: 10 - labels: - - 03 - Maintenance - ignore: - - dependency-name: gitpython - versions: - - "> 3.1.13, < 3.2" - - dependency-name: pydata-sphinx-theme - versions: - - 0.6.0 - - 0.6.1 - - dependency-name: hypothesis - versions: - - 6.3.0 - commit-message: - prefix: MAINT diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 950294fe8..239a18602 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -56,7 +56,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.9, 3.10.0-rc.1] + python-version: ["3.9", "3.10"] steps: - uses: actions/checkout@v2 with: @@ -202,18 +202,18 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions - #pypy37: - #needs: [smoke_test] - #runs-on: ubuntu-latest - #steps: - #- uses: actions/checkout@v2 - #with: - #submodules: recursive - #fetch-depth: 0 - #- uses: actions/setup-python@v2 - #with: - #python-version: pypy-3.7-v7.3.4 - #- uses: ./.github/actions + pypy38: + needs: [smoke_test] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + fetch-depth: 0 + - uses: actions/setup-python@v2 + with: + python-version: pypy-3.8-v7.3.6rc1 + - uses: ./.github/actions sdist: needs: [smoke_test] @@ -230,3 +230,50 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions + armv7_simd_test: + needs: [smoke_test] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + fetch-depth: 0 + - name: Initialize binfmt_misc for qemu-user-static + run: | + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + - name: Creates new container + run: | + # use x86_64 cross-compiler to speed up the build + sudo apt update + sudo apt install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf + docker run --name the_container --interactive -v /:/host arm32v7/ubuntu:latest /bin/bash -c " + apt update && + apt install -y git python3 python3-dev python3-pip && + pip3 install cython==0.29.24 setuptools\<49.2.0 hypothesis==6.23.3 pytest==6.2.5 && + ln -s /host/lib64 /lib64 && + ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && + ln -s /host/usr/arm-linux-gnueabihf /usr/arm-linux-gnueabihf && + rm -rf /usr/lib/gcc/arm-linux-gnueabihf && ln -s /host/usr/lib/gcc-cross/arm-linux-gnueabihf /usr/lib/gcc/arm-linux-gnueabihf && + rm -f /usr/bin/arm-linux-gnueabihf-gcc && ln -s /host/usr/bin/arm-linux-gnueabihf-gcc /usr/bin/arm-linux-gnueabihf-gcc && + rm -f /usr/bin/arm-linux-gnueabihf-g++ && ln -s /host/usr/bin/arm-linux-gnueabihf-g++ /usr/bin/arm-linux-gnueabihf-g++ && + rm -f /usr/bin/arm-linux-gnueabihf-ar && ln -s /host/usr/bin/arm-linux-gnueabihf-ar /usr/bin/arm-linux-gnueabihf-ar && + rm -f /usr/bin/arm-linux-gnueabihf-as && ln -s /host/usr/bin/arm-linux-gnueabihf-as /usr/bin/arm-linux-gnueabihf-as && + rm -f /usr/bin/arm-linux-gnueabihf-ld && ln -s /host/usr/bin/arm-linux-gnueabihf-ld /usr/bin/arm-linux-gnueabihf-ld && + rm -f /usr/bin/arm-linux-gnueabihf-ld.bfd && ln -s /host/usr/bin/arm-linux-gnueabihf-ld.bfd /usr/bin/arm-linux-gnueabihf-ld.bfd + " + docker commit the_container the_container + - name: Build + run: | + sudo docker run --name the_build --interactive -v $(pwd):/numpy -v /:/host the_container /bin/bash -c " + uname -a && + gcc --version && + g++ --version && + python3 --version && + cd /numpy && python3 setup.py install + " + docker commit the_build the_build + - name: Run SIMD Tests + run: | + docker run --rm --interactive -v $(pwd):/numpy the_build /bin/bash -c " + cd /numpy && python3 runtests.py -n -v -- -k test_simd + " diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 930ce39ff..78fa25995 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -24,7 +24,7 @@ jobs: python38-cython python38-pip python38-wheel python38-cffi python38-pytz python38-setuptools python38-pytest python38-hypothesis liblapack-devel libopenblas - gcc-fortran git dash + gcc-fortran gcc-g++ git dash - name: Set Windows PATH uses: egor-tensin/cleanup-path@v1 with: @@ -49,6 +49,9 @@ jobs: - name: Install new NumPy run: | bash -c "/usr/bin/python3.8 -m pip install dist/numpy-*cp38*.whl" + - name: Rebase NumPy compiled extensions + run: | + dash "tools/rebase_installed_dlls_cygwin.sh" 3.8 - name: Run NumPy test suite run: >- dash -c "/usr/bin/python3.8 runtests.py -n -vv" @@ -64,4 +67,4 @@ jobs: dash -c "/usr/bin/python3.8 -m pip show numpy" dash -c "/usr/bin/python3.8 -m pip show -f numpy | grep .dll" dash -c "/bin/tr -d '\r' <tools/list_installed_dll_dependencies_cygwin.sh >list_dlls_unix.sh" - dash "list_dlls_unix.sh" + dash "list_dlls_unix.sh" 3.8 diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml new file mode 100644 index 000000000..3da4fdfa9 --- /dev/null +++ b/.github/workflows/wheels.yml @@ -0,0 +1,104 @@ +# Workflow to build and test wheels. +# To work on the wheel building infrastructure on a fork, comment out: +# +# if: github.repository == 'numpy/numpy' +# +# in the get_commit_message job. Be sure to include [cd build] in your commit +# message to trigger the build. All files related to wheel building are located +# at tools/wheels/ +name: Wheel builder + +on: + schedule: + # Nightly build at 1:42 UTC + - cron: "42 1 * * *" + push: + pull_request: + workflow_dispatch: + +jobs: + get_commit_message: + name: Get commit message + runs-on: ubuntu-latest + if: github.repository == 'numpy/numpy' + outputs: + message: ${{ steps.commit_message.outputs.message }} + steps: + - name: Checkout numpy + uses: actions/checkout@v2 + # Gets the correct commit message for pull request + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Get commit message + id: commit_message + run: | + set -xe + COMMIT_MSG=$(git log --no-merges -1 --oneline) + echo "::set-output name=message::$COMMIT_MSG" + + build_wheels: + name: Build wheel for cp${{ matrix.python }}-${{ matrix.platform }} + needs: get_commit_message + if: >- + contains(needs.get_commit_message.outputs.message, '[cd build]') || + github.event.name == 'schedule' || + github.event.name == 'workflow_dispatch' + runs-on: ${{ matrix.os }} + strategy: + # Ensure that a wheel builder finishes even if another fails + fail-fast: false + matrix: + include: + # manylinux builds + - os: ubuntu-latest + python: "38" + platform: manylinux_x86_64 + - os: ubuntu-latest + python: "39" + platform: manylinux_x86_64 + - os: ubuntu-latest + python: "310" + platform: manylinux_x86_64 + + # macos builds + - os: macos-latest + python: "38" + platform: macosx_x86_64 + - os: macos-latest + python: "39" + platform: macosx_x86_64 + - os: macos-latest + python: "310" + platform: macosx_x86_64 + + steps: + - name: Checkout numpy + uses: actions/checkout@v2 + with: + submodules: true + # versioneer.py requires the latest tag to be reachable. Here we + # fetch the complete history to get access to the tags. + # A shallow clone can work when the following issue is resolved: + # https://github.com/actions/checkout/issues/338 + fetch-depth: 0 + + - name: Build wheels + uses: pypa/cibuildwheel@v2.1.3 + env: + NPY_USE_BLAS_ILP64: 1 + CIBW_BUILD: cp${{ matrix.python }}-${{ matrix.platform }} + CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 + CIBW_ENVIRONMENT_LINUX: CFLAGS='-std=c99 -fno-strict-aliasing' + LDFLAGS='-Wl,--strip-debug' + OPENBLAS64_=/usr/local + # MACOS linker doesn't support stripping symbols + CIBW_ENVIRONMENT_MACOS: CFLAGS='-std=c99 -fno-strict-aliasing' + OPENBLAS64_=/usr/local + CIBW_BUILD_VERBOSITY: 3 + CIBW_BEFORE_BUILD: bash {project}/tools/wheels/cibw_before_build.sh {project} + CIBW_BEFORE_TEST: pip install -r {project}/test_requirements.txt + CIBW_TEST_COMMAND: bash {project}/tools/wheels/cibw_test_command.sh {project} + + - uses: actions/upload-artifact@v2 + with: + path: ./wheelhouse/*.whl diff --git a/.gitignore b/.gitignore index d85676249..52997523c 100644 --- a/.gitignore +++ b/.gitignore @@ -220,3 +220,4 @@ numpy/core/src/umath/loops_arithm_fp.dispatch.c numpy/core/src/umath/loops_arithmetic.dispatch.c numpy/core/src/umath/loops_trigonometric.dispatch.c numpy/core/src/umath/loops_exponent_log.dispatch.c +numpy/core/src/umath/loops_umath_fp.dispatch.c diff --git a/.gitmodules b/.gitmodules index 0d6857868..1ea274daf 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "doc/source/_static/scipy-mathjax"] path = doc/source/_static/scipy-mathjax url = https://github.com/scipy/scipy-mathjax.git +[submodule "numpy/core/src/umath/svml"] + path = numpy/core/src/umath/svml + url = https://github.com/numpy/SVML.git diff --git a/.gitpod.yml b/.gitpod.yml index dfbee831a..f9c35fd9b 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -14,7 +14,6 @@ tasks: python setup.py build_ext --inplace echo "🛠Completed rebuilding NumPy!! 🛠" echo "📖 Building docs 📖 " - git submodule update --init cd doc make html echo "✨ Pre-build complete! You can close this terminal ✨ " @@ -60,4 +59,4 @@ github: # add a "Review in Gitpod" button to the pull request's description (defaults to false) addBadge: false # add a label once the prebuild is ready to pull requests (defaults to false) - addLabel: false
\ No newline at end of file + addLabel: false diff --git a/.travis.yml b/.travis.yml index 839f52ed1..5652e2dbd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -53,6 +53,14 @@ jobs: - DOWNLOAD_OPENBLAS=1 - ATLAS=None + - python: 3.10-dev + os: linux + arch: arm64 + virt: vm + env: + # use OpenBLAS build, not system ATLAS + - DOWNLOAD_OPENBLAS=1 + - ATLAS=None before_install: - ./tools/travis-before-install.sh @@ -1,4 +1,4 @@ -# <img alt="NumPy" src="/branding/logo/primary/numpylogo.svg" height="60"> +# <a href="https://numpy.org/"><img alt="NumPy" src="/branding/logo/primary/numpylogo.svg" height="60"></a> <!--[](--> <!--https://dev.azure.com/numpy/numpy/_build/latest?definitionId=1?branchName=main)--> diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 714f62912..7c21087e1 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -24,6 +24,9 @@ stages: vmImage: 'ubuntu-20.04' steps: - script: | + git submodule update --init + displayName: 'Fetch submodules' + - script: | if ! `gcc 2>/dev/null`; then sudo apt install gcc fi @@ -72,6 +75,9 @@ stages: vmImage: 'ubuntu-20.04' steps: - script: | + git submodule update --init + displayName: 'Fetch submodules' + - script: | docker run -v $(pwd):/numpy -e CFLAGS="-msse2 -std=c99 -UNDEBUG" \ -e F77=gfortran-5 -e F90=gfortran-5 quay.io/pypa/manylinux2014_i686 \ /bin/bash -xc "cd numpy && \ @@ -103,7 +109,7 @@ stages: # the docs even though i.e., numba uses another in their # azure config for mac os -- Microsoft has indicated # they will patch this issue - vmImage: macOS-10.14 + vmImage: 'macOS-10.14' strategy: maxParallel: 3 matrix: @@ -224,7 +230,7 @@ stages: - job: Windows pool: - vmImage: 'VS2017-Win2016' + vmImage: 'windows-latest' strategy: maxParallel: 6 matrix: @@ -249,6 +255,12 @@ stages: TEST_MODE: full BITS: 64 NPY_USE_BLAS_ILP64: '1' + PyPy38-64bit-fast: + PYTHON_VERSION: 'PyPy' + PYTHON_ARCH: 'x64' + TEST_MODE: fast + BITS: 64 + NPY_USE_BLAS_ILP64: '1' steps: - template: azure-steps-windows.yml @@ -259,6 +271,9 @@ stages: vmImage: 'ubuntu-20.04' steps: - script: | + git submodule update --init + displayName: 'Fetch submodules' + - script: | # create and activate conda environment conda env create -f environment.yml displayName: 'Create conda environment.' diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml index 9a5f9bb70..34f9797de 100644 --- a/azure-steps-windows.yml +++ b/azure-steps-windows.yml @@ -6,7 +6,7 @@ steps: architecture: $(PYTHON_ARCH) condition: not(contains(variables['PYTHON_VERSION'], 'PyPy')) - powershell: | - $url = "http://buildbot.pypy.org/nightly/py3.7/pypy-c-jit-latest-win64.zip" + $url = "http://buildbot.pypy.org/nightly/py3.8/pypy-c-jit-latest-win64.zip" $output = "pypy.zip" $wc = New-Object System.Net.WebClient $wc.DownloadFile($url, $output) diff --git a/doc/BRANCH_WALKTHROUGH.rst b/doc/BRANCH_WALKTHROUGH.rst new file mode 100644 index 000000000..95de5464b --- /dev/null +++ b/doc/BRANCH_WALKTHROUGH.rst @@ -0,0 +1,77 @@ +This file contains a walkthrough of branching NumPy 1.21.x on Linux. The +commands can be copied into the command line, but be sure to replace 1.21 and +1.22 by the correct versions. It is good practice to make ``.mailmap`` as +current as possible before making the branch, that may take several weeks. + +This should be read together with the general directions in `releasing`. + +Branching +========= + +Make the branch +--------------- + +This is only needed when starting a new maintenance branch. Because +NumPy now depends on tags to determine the version, the start of a new +development cycle in the main branch needs an annotated tag. That is done +as follows:: + + $ git checkout main + $ git pull upstream main + $ git commit --allow-empty -m'REL: Begin NumPy 1.22.0 development' + $ git push upstream HEAD + +If the push fails because new PRs have been merged, do:: + + $ git pull --rebase upstream + +and repeat the push. Once the push succeeds, tag it:: + + $ git tag -a -s v1.22.0.dev0 -m'Begin NumPy 1.22.0 development' + $ git push upstream v1.22.0.dev0 + +then make the new branch and push it:: + + $ git branch maintenance/1.21.x HEAD^ + $ git push upstream maintenance/1.21.x + +Prepare the main branch for further development +----------------------------------------------- + +Make a PR branch to prepare main for further development:: + + $ git checkout -b 'prepare-main-for-1.22.0-development' v1.22.0.dev0 + +Delete the release note fragments:: + + $ git rm doc/release/upcoming_changes/[0-9]*.*.rst + +Create the new release notes skeleton and add to index:: + + $ cp doc/source/release/template.rst doc/source/release/1.22.0-notes.rst + $ gvim doc/source/release/1.22.0-notes.rst # put the correct version + $ git add doc/source/release/1.22.0-notes.rst + $ gvim doc/source/release.rst # add new notes to notes index + $ git add doc/source/release.rst + +Update ``pavement.py`` and update the ``RELEASE_NOTES`` variable to point to +the new notes:: + + $ gvim pavement.py + $ git add pavement.py + +Update ``cversions.txt`` to add current release. There should be no new hash +to worry about at this early point, just add a comment following previous +practice:: + + $ gvim numpy/core/code_generators/cversions.txt + $ git add numpy/core/code_generators/cversions.txt + +Check your work, commit it, and push:: + + $ git status # check work + $ git commit -m'REL: Prepare main for NumPy 1.22.0 development' + $ git push origin HEAD + +Now make a pull request. + diff --git a/doc/Makefile b/doc/Makefile index 68d496389..16fc3229d 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -14,6 +14,7 @@ PYTHON = python$(PYVER) SPHINXOPTS ?= SPHINXBUILD ?= LANG=C sphinx-build PAPER ?= +DOXYGEN ?= doxygen # For merging a documentation archive into a git checkout of numpy/doc # Turn a tag like v1.18.0 into 1.18 # Use sed -n -e 's/patttern/match/p' to return a blank value if no match @@ -77,7 +78,7 @@ INSTALL_DIR = $(CURDIR)/build/inst-dist INSTALL_PPH = $(INSTALL_DIR)/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/lib/python$(PYVER)/dist-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/dist-packages UPLOAD_DIR=/srv/docs_scipy_org/doc/numpy-$(RELEASE) -DIST_VARS=SPHINXBUILD="LANG=C PYTHONPATH=$(INSTALL_PPH) python$(PYVER) `which sphinx-build`" PYTHON="PYTHONPATH=$(INSTALL_PPH) python$(PYVER)" +DIST_VARS=SPHINXBUILD="LANG=C PYTHONPATH=$(INSTALL_PPH) python$(PYVER) `which sphinx-build`" PYTHON="PYTHONPATH=$(INSTALL_PPH) python$(PYVER)" NUMPYVER:=$(shell $(PYTHON) -c "import numpy; print(numpy.version.git_revision[:10])" 2>/dev/null) GITVER ?= $(shell cd ..; $(PYTHON) -c "import versioneer as v; print(v.get_versions()['full-revisionid'][:10])") @@ -176,6 +177,12 @@ build/generate-stamp: $(wildcard source/reference/*.rst) html: version-check html-build html-build: generate mkdir -p build/html build/doctrees + $(PYTHON) preprocess.py +ifeq (, $(shell which $(DOXYGEN))) + @echo "Unable to find 'Doxygen:$(DOXYGEN)', skip generating C/C++ API from comment blocks." +else + $(DOXYGEN) build/doxygen/Doxyfile +endif $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html $(FILES) $(PYTHON) postprocess.py html build/html/*.html @echo diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt index 6febd554f..42d84e04b 100644 --- a/doc/RELEASE_WALKTHROUGH.rst.txt +++ b/doc/RELEASE_WALKTHROUGH.rst.txt @@ -1,7 +1,7 @@ -This file contains a walkthrough of the NumPy 1.19.0 release on Linux, modified +This file contains a walkthrough of the NumPy 1.21.0 release on Linux, modified for building on azure and uploading to anaconda.org The commands can be copied into the command line, but be sure to -replace 1.19.0 by the correct version. +replace 1.21.0 by the correct version. This should be read together with the general directions in `releasing`. @@ -13,46 +13,72 @@ Backport Pull Requests ---------------------- Changes that have been marked for this release must be backported to the -maintenance/1.19.x branch. +maintenance/1.21.x branch. Update Release documentation ---------------------------- -The file ``doc/changelog/1.19.0-changelog.rst`` should be updated to reflect -the final list of changes and contributors. This text can be generated by:: +Four documents usually need to be updated or created before making a release: - $ python tools/changelog.py $GITHUB v1.18.0..maintenance/1.19.x > doc/changelog/1.19.0-changelog.rst +- The changelog +- The release-notes +- The ``.mailmap`` file +- The ``doc/source/release.rst`` file -where ``GITHUB`` contains your github access token. This text may also be -appended to ``doc/release/1.19.0-notes.rst`` for patch release, though not for -new releases like ``1.19.0``, as the changelogs for ``*.0`` releases tend to be -excessively long. The ``doc/source/release.rst`` file should also be updated -with a link to the new release notes. These changes should be committed to the -maintenance branch, and later will be forward ported to main. The changelog -should be reviewed for name duplicates or short names and the ``.mailmap`` file -updated if needed. +These changes should be made as an ordinary PR against the maintenance branch. +After release all files except ``doc/source/release.rst`` will need to be +forward ported to the main branch. +Generate the changelog +~~~~~~~~~~~~~~~~~~~~~~ -Finish the Release Note ------------------------ +The changelog is generated using the changelog tool:: -.. note: + $ python tools/changelog.py $GITHUB v1.20.0..maintenance/1.21.x > doc/changelog/1.21.0-changelog.rst - This has changed now that we use ``towncrier``. See the instructions for - creating the release note in ``doc/release/upcoming_changes/README.rst``. +where ``GITHUB`` contains your GitHub access token. The text will need to be +checked for non-standard contributor names and dependabot entries removed. It +is also a good idea to remove any links that may be present in the PR titles +as they don't translate well to markdown, replace them with monospaced text. The +non-standard contributor names should be fixed by updating the ``.mailmap`` +file, which is a lot of work. It is best to make several trial runs before +reaching this point and ping the malefactors using a GitHub issue to get the +needed information. -Fill out the release note ``doc/release/1.19.0-notes.rst`` calling out -significant changes. +Finish the release notes +~~~~~~~~~~~~~~~~~~~~~~~~ + +If this is the first release in a series the release note is generated, see +the release note in ``doc/release/upcoming_changes/README.rst`` to see how to +do this. Generating the release notes will also delete all the news +fragment files in ``doc/release/upcoming_changes/``. + +The generated release note will always need some fixups, the introduction will +need to be written, and significant changes should be called out. For patch +releases the changelog text may also be appended, but not for the initial +release as it is too long. Check previous release notes to see how this is +done. Note that the ``:orphan:`` markup at the top, if present, will need +changing to ``.. currentmodule:: numpy`` and the ``doc/source/release.rst`` +index file will need updating. + +Check the pavement.py file +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Check that the pavement.py file points to the correct release notes. It should +have been updated after the last release, but if not, fix it now:: + + $gvim pavement.py Release Walkthrough ==================== Note that in the code snippets below, ``upstream`` refers to the root repository on -github and ``origin`` to a fork in your personal account. You may need to make adjustments -if you have not forked the repository but simply cloned it locally. You can -also edit ``.git/config`` and add ``upstream`` if it isn't already present. +GitHub and ``origin`` to its fork in your personal GitHub repositories. You may +need to make adjustments if you have not forked the repository but simply +cloned it locally. You can also edit ``.git/config`` and add ``upstream`` if it +isn't already present. Prepare the release commit -------------------------- @@ -60,24 +86,20 @@ Prepare the release commit Checkout the branch for the release, make sure it is up to date, and clean the repository:: - $ git checkout maintenance/1.19.x - $ git pull upstream maintenance/1.19.x + $ git checkout maintenance/1.21.x + $ git pull upstream maintenance/1.21.x $ git submodule update $ git clean -xdfq -Edit pavement.py and setup.py as detailed in HOWTO_RELEASE:: - - $ gvim pavement.py setup.py # Generally only setup.py needs updating - $ git commit -a -m"REL: NumPy 1.19.0 release." - Sanity check:: $ python3 runtests.py -m "full" -Push this release directly onto the end of the maintenance branch. This -requires write permission to the numpy repository:: +Tag the release and push the tag. This requires write permission for the numpy +repository:: - $ git push upstream HEAD + $ git tag -a -s v1.21.0 -m"NumPy 1.21.0 release" + $ git push upstream v1.21.0 Build source releases @@ -87,7 +109,6 @@ Paver is used to build the source releases. It will create the ``release`` and ``release/installers`` directories and put the ``*.zip`` and ``*.tar.gz`` source releases in the latter. :: - $ python3 -m cython --version # check for correct cython version $ paver sdist # sdist will do a git clean -xdfq, so we omit that @@ -104,17 +125,18 @@ exists skip this:: $ cd ../numpy-wheels $ git checkout main $ git pull upstream main - $ git branch v1.19.x + $ git branch v1.21.x Checkout the new branch and edit the ``azure-pipelines.yml`` and ``.travis.yml`` files to make sure they have the correct version, and put in -the commit hash for the ``REL`` commit created above for ``BUILD_COMMIT``. The -``azure/posix.yml`` and ``.travis.yml`` files may also need the Cython versions -updated to keep up with Python releases, but generally just do:: - - $ git checkout v1.19.x - $ gvim azure-pipelines .travis.yml - $ git commit -a -m"NumPy 1.19.0 release." +the commit hash for the ``REL`` commit created above for ``BUILD_COMMIT`` +variable. The ``azure/posix.yml`` and ``.travis.yml`` files may also need the +Cython versions updated to keep up with Python releases, but generally just +do:: + + $ git checkout v1.21.x + $ gvim azure-pipelines.yml .travis.yml + $ git commit -a -m"NumPy 1.21.0 release." $ git push upstream HEAD Now wait. If you get nervous at the amount of time taken -- the builds can take @@ -134,7 +156,7 @@ When the wheels have all been successfully built and staged, download them from Anaconda staging directory using the ``tools/download-wheels.py`` script:: $ cd ../numpy - $ python3 tools/download-wheels.py 1.19.0 + $ python3 tools/download-wheels.py 1.21.0 Generate the README files @@ -146,47 +168,24 @@ file is updated for continued development:: $ paver write_release -Tag the release ---------------- - -Once the wheels have been built and downloaded without errors tag the ``REL`` commit, signing -it with your gpg key:: - - $ git tag -s -m"NumPy 1.19.0 release" v1.19.0 - -You should upload your public gpg key to github, so that the tag will appear -"verified" there. - -Check that the files in ``release/installers`` have the correct versions, then -push the tag upstream:: - - $ git push upstream v1.19.0 - -We wait until this point to push the tag because it is public and should not -be changed after it has been pushed. - - Reset the maintenance branch into a development state ----------------------------------------------------- -Add another ``REL`` commit to the numpy maintenance branch, which resets the -``ISREALEASED`` flag to ``False`` and increments the version counter:: - - $ gvim pavement.py setup.py - -Create release notes for next release and edit them to set the version:: +Create release notes for next release and edit them to set the version. These +notes will be a skeleton and have little content:: - $ cp doc/source/release/template.rst doc/source/release/1.19.1-notes.rst - $ gvim doc/source/release/1.19.1-notes.rst - $ git add doc/source/release/1.19.1-notes.rst + $ cp doc/source/release/template.rst doc/source/release/1.21.1-notes.rst + $ gvim doc/source/release/1.21.1-notes.rst + $ git add doc/source/release/1.21.1-notes.rst -Add new release notes to the documentation release list:: +Add new release notes to the documentation release list and update the +``RELEASE_NOTES`` variable in ``pavement.py``. - $ gvim doc/source/release.rst + $ gvim doc/source/release.rst pavement.py Commit the result:: - $ git commit -a -m"REL: prepare 1.19.x for further development" + $ git commit -a -m"REL: prepare 1.21.x for further development" $ git push upstream HEAD @@ -194,33 +193,37 @@ Upload to PyPI -------------- Upload to PyPI using ``twine``. A recent version of ``twine`` of is needed -after recent PyPI changes, version ``3.1.1`` was used here:: +after recent PyPI changes, version ``3.4.1`` was used here:: $ cd ../numpy $ twine upload release/installers/*.whl - $ twine upload release/installers/numpy-1.19.0.zip # Upload last. + $ twine upload release/installers/numpy-1.21.0.zip # Upload last. If one of the commands breaks in the middle, you may need to selectively upload the remaining files because PyPI does not allow the same file to be uploaded twice. The source file should be uploaded last to avoid synchronization problems that might occur if pip users access the files while this is in -process. Note that PyPI only allows a single source distribution, here we have +process, causing pip to build from source rather than downloading a binary +wheel. PyPI only allows a single source distribution, here we have chosen the zip archive. Upload files to github ---------------------- -Go to `<https://github.com/numpy/numpy/releases>`_, there should be a ``v1.19.0 +Go to `<https://github.com/numpy/numpy/releases>`_, there should be a ``v1.21.0 tag``, click on it and hit the edit button for that tag. There are two ways to -add files, using an editable text window and as binary uploads. Cut and paste -the ``release/README.md`` file contents into the text window. You will probably -need to make some edits to get it to look right. Then - -- Upload ``release/installers/numpy-1.19.0.tar.gz`` as a binary file. -- Upload ``release/installers/numpy-1.19.0.zip`` as a binary file. +add files, using an editable text window and as binary uploads. Start by +editing the ``release/README.md`` that is translated from the rst version using +pandoc. Things that will need fixing: PR lines from the changelog, if included, +are wrapped and need unwrapping, links should be changed to monospaced text. +Then copy the contents to the clipboard and paste them into the text window. It +may take several tries to get it look right. Then + +- Upload ``release/installers/numpy-1.21.0.tar.gz`` as a binary file. +- Upload ``release/installers/numpy-1.21.0.zip`` as a binary file. - Upload ``release/README.rst`` as a binary file. -- Upload ``doc/changelog/1.19.0-changelog.rst`` as a binary file. +- Upload ``doc/changelog/1.21.0-changelog.rst`` as a binary file. - Check the pre-release button if this is a pre-releases. - Hit the ``{Publish,Update} release`` button at the bottom. @@ -228,9 +231,11 @@ need to make some edits to get it to look right. Then Upload documents to numpy.org ----------------------------- -This step is only needed for final releases and can be skipped for -pre-releases. ``make merge-doc`` clones the ``numpy/doc`` repo into +This step is only needed for final releases and can be skipped for pre-releases +and most patch releases. ``make merge-doc`` clones the ``numpy/doc`` repo into ``doc/build/merge`` and updates it with the new documentation:: +Note that if you have a `.local` numpy install, you should either remove it or +install the current version for the docs to pick up the correct NumPy version. $ pushd doc $ make dist @@ -245,7 +250,7 @@ If the release series is a new one, you will need to add a new section to the Otherwise, only the ``zip`` and ``pdf`` links should be updated with the new tag name:: - $ gvim doc/build/merge/index.html +/'tag v1.19' + $ gvim doc/build/merge/index.html +/'tag v1.21' You can "test run" the new documentation in a browser to make sure the links work:: @@ -254,12 +259,12 @@ work:: Update the stable link:: - $ ln -sfn 1.19 stable + $ ln -sfn 1.21 stable Once everything seems satisfactory, commit and upload the changes:: $ pushd doc/build/merge - $ git commit -am"Add documentation for v1.19.0" + $ git commit -a -m"Add documentation for v1.21.0" $ git push $ popd @@ -271,14 +276,13 @@ This assumes that you have forked `<https://github.com/scipy/scipy.org>`_:: $ cd ../scipy.org $ git checkout master $ git pull upstream master - $ git checkout -b numpy-1.19.0 + $ git checkout -b numpy-1.21.0 $ gvim www/index.rst # edit the News section $ git commit -a $ git push origin HEAD Now go to your fork and make a pull request for the branch. - Announce to mailing lists ------------------------- @@ -294,14 +298,14 @@ Post-Release Tasks Checkout main and forward port the documentation changes:: - $ git checkout -b post-1.19.0-release-update - $ git checkout maintenance/1.19.x doc/source/release/1.19.0-notes.rst - $ git checkout maintenance/1.19.x doc/changelog/1.19.0-changelog.rst - $ git checkout maintenance/1.19.x .mailmap # only if updated for release. + $ git checkout -b post-1.21.0-release-update + $ git checkout maintenance/1.21.x doc/source/release/1.21.0-notes.rst + $ git checkout maintenance/1.21.x doc/changelog/1.21.0-changelog.rst + $ git checkout maintenance/1.21.x .mailmap # only if updated for release. $ gvim doc/source/release.rst # Add link to new notes - $ git add doc/changelog/1.19.0-changelog.rst doc/source/release/1.19.0-notes.rst + $ git add doc/changelog/1.21.0-changelog.rst doc/source/release/1.21.0-notes.rst $ git status # check status before commit - $ git commit -a -m"REL: Update main after 1.19.0 release." + $ git commit -a -m"REL: Update main after 1.21.0 release." $ git push origin HEAD -Go to github and make a PR. +Go to GitHub and make a PR. diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt index ba09aa800..0d8137f4a 100644 --- a/doc/TESTS.rst.txt +++ b/doc/TESTS.rst.txt @@ -16,13 +16,7 @@ Our goal is that every module and package in NumPy should have a thorough set of unit tests. These tests should exercise the full functionality of a given routine as well as its robustness to erroneous or unexpected input -arguments. Long experience has shown that by far the best time to -write the tests is before you write or change the code - this is -`test-driven development -<https://en.wikipedia.org/wiki/Test-driven_development>`__. The -arguments for this can sound rather abstract, but we can assure you -that you will find that writing the tests first leads to more robust -and better designed code. Well-designed tests with good coverage make +arguments. Well-designed tests with good coverage make an enormous difference to the ease of refactoring. Whenever a new bug is found in a routine, you should write a new test for that specific case and add it to the test suite to prevent that bug from creeping @@ -145,6 +139,21 @@ originally written without unit tests, there are still several modules that don't have tests yet. Please feel free to choose one of these modules and develop tests for it. +Using C code in tests +--------------------- + +NumPy exposes a rich :ref:`C-API<c-api>` . These are tested using c-extension +modules written "as-if" they know nothing about the internals of NumPy, rather +using the official C-API interfaces only. Examples of such modules are tests +for a user-defined ``rational`` dtype in ``_rational_tests`` or the ufunc +machinery tests in ``_umath_tests`` which are part of the binary distribution. +Starting from version 1.21, you can also write snippets of C code in tests that +will be compiled locally into c-extension modules and loaded into python. + +.. currentmodule:: numpy.testing.extbuild + +.. autofunction:: build_and_import_extension + Labeling tests -------------- diff --git a/doc/cdoc/Doxyfile b/doc/cdoc/Doxyfile deleted file mode 100644 index c9c386e4e..000000000 --- a/doc/cdoc/Doxyfile +++ /dev/null @@ -1,29 +0,0 @@ -# Doxyfile for NumPy C API -# See http://www.doxygen.nl/manual/config.html -PROJECT_NAME = numpy -PROJECT_NUMBER = 2.0.0 -OUTPUT_DIRECTORY = build -STRIP_FROM_PATH = ../../numpy/core -INHERIT_DOCS = YES -TAB_SIZE = 8 -OPTIMIZE_OUTPUT_FOR_C = YES -EXTRACT_ALL = YES -EXTRACT_PRIVATE = YES -EXTRACT_STATIC = YES -CASE_SENSE_NAMES = NO -INPUT = ../../numpy/core/src \ - ../../numpy/core/include -FILE_PATTERNS = *.h *.c *.src -RECURSIVE = YES -INPUT_FILTER = ./numpyfilter.py -REFERENCED_BY_RELATION = YES -REFERENCES_RELATION = YES -ALPHABETICAL_INDEX = NO -GENERATE_HTML = YES -HTML_TIMESTAMP = YES -GENERATE_TREEVIEW = YES -SEARCHENGINE = NO -GENERATE_LATEX = NO -PAPER_TYPE = a4wide -GENERATE_XML = NO -HAVE_DOT = NO diff --git a/doc/cdoc/Makefile b/doc/cdoc/Makefile deleted file mode 100644 index 8b9deada8..000000000 --- a/doc/cdoc/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -all: build - -build: - doxygen - -clean: - rm -rf build - -.PHONY: all build clean - diff --git a/doc/cdoc/README b/doc/cdoc/README deleted file mode 100644 index a5363cfa1..000000000 --- a/doc/cdoc/README +++ /dev/null @@ -1,31 +0,0 @@ -cdoc -==== - -This is a simple Doxygen project for building NumPy C code documentation, -with docstrings extracted from the C sources themselves. - -The understood syntax for documentation in the C source is - - /* - * Some text in reStructuredText format - */ - int function_to_which_the_text_applies() - { - ... - } - - /* - * More text in reStructuredText format - */ - struct - { - int variable_1; /* Documentation for variable_1 */ - - /* - * Documentation for variable_2 - */ - int variable_2; - } struct_name_t; - -Please do not use JavaDoc or Doxygen-specific formatting at the moment. - diff --git a/doc/cdoc/numpyfilter.py b/doc/cdoc/numpyfilter.py deleted file mode 100755 index d3cfe18f0..000000000 --- a/doc/cdoc/numpyfilter.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python3 -""" -numpyfilter.py [-h] inputfile - -Interpret C comments as ReStructuredText, and replace them by the HTML output. -Also, add Doxygen /** and /**< syntax automatically where appropriate. - -""" -import sys -import re -import os -import textwrap - -from numpy.compat import pickle - -CACHE_FILE = 'build/rst-cache.pck' - -def main(): - import argparse - - parser = argparse.ArgumentParser(usage=__doc__.strip()) - parser.add_argument('input_file', help='input file') - args = parser.parse_args() - - comment_re = re.compile(r'(\n.*?)/\*(.*?)\*/', re.S) - - cache = load_cache() - - try: - with open(args.input_file, 'r') as f: - text = f.read() - text = comment_re.sub(lambda m: process_match(m, cache), text) - sys.stdout.write(text) - finally: - save_cache(cache) - -def filter_comment(text): - if text.startswith('NUMPY_API'): - text = text[9:].strip() - if text.startswith('UFUNC_API'): - text = text[9:].strip() - - html = render_html(text) - return html - -def process_match(m, cache=None): - pre, rawtext = m.groups() - - preline = pre.split("\n")[-1] - - if cache is not None and rawtext in cache: - text = cache[rawtext] - else: - text = re.compile(r'^\s*\*', re.M).sub('', rawtext) - text = textwrap.dedent(text) - text = filter_comment(text) - - if cache is not None: - cache[rawtext] = text - - if preline.strip(): - return pre + "/**< " + text + " */" - else: - return pre + "/** " + text + " */" - -def load_cache(): - if os.path.exists(CACHE_FILE): - with open(CACHE_FILE, 'rb') as f: - try: - cache = pickle.load(f) - except Exception: - cache = {} - else: - cache = {} - return cache - -def save_cache(cache): - with open(CACHE_FILE + '.new', 'wb') as f: - pickle.dump(cache, f) - os.rename(CACHE_FILE + '.new', CACHE_FILE) - -def render_html(text): - import docutils.parsers.rst - import docutils.writers.html4css1 - import docutils.core - - docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE = 'title-reference' - writer = docutils.writers.html4css1.Writer() - parts = docutils.core.publish_parts( - text, - writer=writer, - settings_overrides = dict(halt_level=5, - traceback=True, - default_reference_context='title-reference', - stylesheet_path='', - # security settings: - raw_enabled=0, - file_insertion_enabled=0, - _disable_config=1, - ) - ) - return parts['html_body'] - -if __name__ == "__main__": main() diff --git a/doc/changelog/1.21.3-changelog.rst b/doc/changelog/1.21.3-changelog.rst new file mode 100644 index 000000000..767794721 --- /dev/null +++ b/doc/changelog/1.21.3-changelog.rst @@ -0,0 +1,28 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Meurer +* Bas van Beek +* Charles Harris +* Developer-Ecosystem-Engineering + +* Kevin Sheppard +* Sebastian Berg +* Warren Weckesser + +Pull requests merged +==================== + +A total of 8 pull requests were merged for this release. + +* `#19745 <https://github.com/numpy/numpy/pull/19745>`__: ENH: Add dtype-support to 3 `generic`/`ndarray` methods +* `#19955 <https://github.com/numpy/numpy/pull/19955>`__: BUG: Resolve Divide by Zero on Apple silicon + test failures... +* `#19958 <https://github.com/numpy/numpy/pull/19958>`__: MAINT: Mark type-check-only ufunc subclasses as ufunc aliases... +* `#19994 <https://github.com/numpy/numpy/pull/19994>`__: BUG: np.tan(np.inf) test failure +* `#20080 <https://github.com/numpy/numpy/pull/20080>`__: BUG: Correct incorrect advance in PCG with emulated int128 +* `#20081 <https://github.com/numpy/numpy/pull/20081>`__: BUG: Fix NaT handling in the PyArray_CompareFunc for datetime... +* `#20082 <https://github.com/numpy/numpy/pull/20082>`__: DOC: Ensure that we add documentation also as to the dict for... +* `#20106 <https://github.com/numpy/numpy/pull/20106>`__: BUG: core: result_type(0, np.timedelta64(4)) would seg. fault. diff --git a/doc/neps/conf.py b/doc/neps/conf.py index f01ee8a51..68805e50f 100644 --- a/doc/neps/conf.py +++ b/doc/neps/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # NumPy Enhancement Proposals documentation build configuration file, created by # sphinx-quickstart on Mon Dec 11 12:45:09 2017. diff --git a/doc/neps/nep-0000.rst b/doc/neps/nep-0000.rst index 7f841b7e2..044edebc7 100644 --- a/doc/neps/nep-0000.rst +++ b/doc/neps/nep-0000.rst @@ -1,7 +1,7 @@ .. _NEP00: =========================== -NEP 0 — Purpose and Process +NEP 0 — Purpose and process =========================== :Author: Jarrod Millman <millman@berkeley.edu> diff --git a/doc/neps/nep-0001-npy-format.rst b/doc/neps/nep-0001-npy-format.rst index fdf4ae47a..fb6754f5c 100644 --- a/doc/neps/nep-0001-npy-format.rst +++ b/doc/neps/nep-0001-npy-format.rst @@ -1,7 +1,7 @@ .. _NEP01: ============================================= -NEP 1 — A Simple File Format for NumPy Arrays +NEP 1 — A simple file format for NumPy arrays ============================================= :Author: Robert Kern <robert.kern@gmail.com> diff --git a/doc/neps/nep-0010-new-iterator-ufunc.rst b/doc/neps/nep-0010-new-iterator-ufunc.rst index 4e7fdfdf5..67177d30b 100644 --- a/doc/neps/nep-0010-new-iterator-ufunc.rst +++ b/doc/neps/nep-0010-new-iterator-ufunc.rst @@ -1,7 +1,7 @@ .. _NEP10: ============================================== -NEP 10 — Optimizing Iterator/UFunc Performance +NEP 10 — Optimizing Iterator/UFunc performance ============================================== :Author: Mark Wiebe <mwwiebe@gmail.com> @@ -10,7 +10,7 @@ NEP 10 — Optimizing Iterator/UFunc Performance :Status: Final ***************** -Table of Contents +Table of contents ***************** .. contents:: @@ -1545,7 +1545,7 @@ Functions For Iteration ``npy_intp *NpyIter_GetIndexPtr(NpyIter *iter)`` This gives back a pointer to the index being tracked, or NULL - if no index is being tracked. It is only useable if one of + if no index is being tracked. It is only usable if one of the flags ``NPY_ITER_C_INDEX`` or ``NPY_ITER_F_INDEX`` were specified during construction. diff --git a/doc/neps/nep-0011-deferred-ufunc-evaluation.rst b/doc/neps/nep-0011-deferred-ufunc-evaluation.rst index 866a774d1..fde034378 100644 --- a/doc/neps/nep-0011-deferred-ufunc-evaluation.rst +++ b/doc/neps/nep-0011-deferred-ufunc-evaluation.rst @@ -1,7 +1,7 @@ .. _NEP11: ================================== -NEP 11 — Deferred UFunc Evaluation +NEP 11 — Deferred UFunc evaluation ================================== :Author: Mark Wiebe <mwwiebe@gmail.com> diff --git a/doc/neps/nep-0012-missing-data.rst b/doc/neps/nep-0012-missing-data.rst index f47feadbd..4775ea18b 100644 --- a/doc/neps/nep-0012-missing-data.rst +++ b/doc/neps/nep-0012-missing-data.rst @@ -1,7 +1,7 @@ .. _NEP12: ============================================ -NEP 12 — Missing Data Functionality in NumPy +NEP 12 — Missing data functionality in NumPy ============================================ :Author: Mark Wiebe <mwwiebe@gmail.com> @@ -903,7 +903,7 @@ before it will allow NA-masked arrays to flow through. https://docs.scipy.org/doc/numpy/reference/c-api.array.html#NPY_ARRAY_ALLOWNA Code which does not follow this advice, and instead just calls PyArray_Check() to verify -its an ndarray and checks some flags, will silently produce incorrect results. This style +it is an ndarray and checks some flags, will silently produce incorrect results. This style of code does not provide any opportunity for numpy to say "hey, this array is special", so also is not compatible with future ideas of lazy evaluation, derived dtypes, etc. @@ -963,7 +963,7 @@ The first version to implement is the array masks, because it is the more general approach. The mask itself is an array, but since it is intended to never be directly accessible from Python, it won't be a full ndarray itself. The mask always has the same shape as -the array it's attached to, so it doesn't need its own shape. For +the array it is attached to, so it doesn't need its own shape. For an array with a struct dtype, however, the mask will have a different dtype than just a straight bool, so it does need its own dtype. This gives us the following additions to the PyArrayObject:: diff --git a/doc/neps/nep-0013-ufunc-overrides.rst b/doc/neps/nep-0013-ufunc-overrides.rst index ceb8b23e9..2f455e9b4 100644 --- a/doc/neps/nep-0013-ufunc-overrides.rst +++ b/doc/neps/nep-0013-ufunc-overrides.rst @@ -1,7 +1,7 @@ .. _NEP13: ========================================== -NEP 13 — A Mechanism for Overriding Ufuncs +NEP 13 — A mechanism for overriding Ufuncs ========================================== .. currentmodule:: numpy diff --git a/doc/neps/nep-0017-split-out-maskedarray.rst b/doc/neps/nep-0017-split-out-maskedarray.rst index 151c5ad1a..5cb1c0c39 100644 --- a/doc/neps/nep-0017-split-out-maskedarray.rst +++ b/doc/neps/nep-0017-split-out-maskedarray.rst @@ -1,7 +1,7 @@ .. _NEP17: ================================ -NEP 17 — Split Out Masked Arrays +NEP 17 — Split out masked arrays ================================ :Author: Stéfan van der Walt <stefanv@berkeley.edu> diff --git a/doc/neps/nep-0018-array-function-protocol.rst b/doc/neps/nep-0018-array-function-protocol.rst index 0dcb0ff7e..f4c21446b 100644 --- a/doc/neps/nep-0018-array-function-protocol.rst +++ b/doc/neps/nep-0018-array-function-protocol.rst @@ -15,8 +15,8 @@ NEP 18 — A dispatch mechanism for NumPy's high level array functions :Updated: 2019-05-25 :Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-August/078493.html -Abstact -------- +Abstract +-------- We propose the ``__array_function__`` protocol, to allow arguments of NumPy functions to define how that function operates on them. This will allow diff --git a/doc/neps/nep-0019-rng-policy.rst b/doc/neps/nep-0019-rng-policy.rst index 077997f43..c5c46603b 100644 --- a/doc/neps/nep-0019-rng-policy.rst +++ b/doc/neps/nep-0019-rng-policy.rst @@ -1,7 +1,7 @@ .. _NEP19: ======================================= -NEP 19 — Random Number Generator Policy +NEP 19 — Random number generator policy ======================================= :Author: Robert Kern <robert.kern@gmail.com> diff --git a/doc/neps/nep-0020-gufunc-signature-enhancement.rst b/doc/neps/nep-0020-gufunc-signature-enhancement.rst index 90ed930b4..80ee75f5f 100644 --- a/doc/neps/nep-0020-gufunc-signature-enhancement.rst +++ b/doc/neps/nep-0020-gufunc-signature-enhancement.rst @@ -1,7 +1,7 @@ .. _NEP20: =============================================================== -NEP 20 — Expansion of Generalized Universal Function Signatures +NEP 20 — Expansion of generalized universal function signatures =============================================================== :Author: Marten van Kerkwijk <mhvk@astro.utoronto.ca> @@ -112,7 +112,7 @@ have a summary of all flags. This could possibly be stored in ``core_enabled`` but specific flags indicating whether or not a gufunc uses fixed, flexible, or broadcastable dimensions. -With the above, the formal defition of the syntax would become [4]_:: +With the above, the formal definition of the syntax would become [4]_:: <Signature> ::= <Input arguments> "->" <Output arguments> <Input arguments> ::= <Argument list> diff --git a/doc/neps/nep-0024-missing-data-2.rst b/doc/neps/nep-0024-missing-data-2.rst index 903ece1ba..c0e2d2ce7 100644 --- a/doc/neps/nep-0024-missing-data-2.rst +++ b/doc/neps/nep-0024-missing-data-2.rst @@ -1,7 +1,7 @@ .. _NEP24: ============================================================= -NEP 24 — Missing Data Functionality - Alternative 1 to NEP 12 +NEP 24 — Missing data functionality - Alternative 1 to NEP 12 ============================================================= :Author: Nathaniel J. Smith <njs@pobox.com>, Matthew Brett <matthew.brett@gmail.com> diff --git a/doc/neps/nep-0026-missing-data-summary.rst b/doc/neps/nep-0026-missing-data-summary.rst index 49d89d828..08dbf36d4 100644 --- a/doc/neps/nep-0026-missing-data-summary.rst +++ b/doc/neps/nep-0026-missing-data-summary.rst @@ -1,7 +1,7 @@ .. _NEP26: ==================================================== -NEP 26 — Summary of Missing Data NEPs and discussion +NEP 26 — Summary of missing data NEPs and discussion ==================================================== :Author: Mark Wiebe <mwwiebe@gmail.com>, Nathaniel J. Smith <njs@pobox.com> diff --git a/doc/neps/nep-0027-zero-rank-arrarys.rst b/doc/neps/nep-0027-zero-rank-arrarys.rst index cb3972675..4515cf96f 100644 --- a/doc/neps/nep-0027-zero-rank-arrarys.rst +++ b/doc/neps/nep-0027-zero-rank-arrarys.rst @@ -1,7 +1,7 @@ .. _NEP27: ========================= -NEP 27 — Zero Rank Arrays +NEP 27 — Zero rank arrays ========================= :Author: Alexander Belopolsky (sasha), transcribed Matt Picus <matti.picus@gmail.com> diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index 11a297132..1e12b546a 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -1,7 +1,7 @@ .. _NEP30: ====================================================== -NEP 30 — Duck Typing for NumPy Arrays - Implementation +NEP 30 — Duck typing for NumPy arrays - Implementation ====================================================== :Author: Peter Andreas Entschev <pentschev@nvidia.com> diff --git a/doc/neps/nep-0031-uarray.rst b/doc/neps/nep-0031-uarray.rst index 47d4bdd37..b4ec94077 100644 --- a/doc/neps/nep-0031-uarray.rst +++ b/doc/neps/nep-0031-uarray.rst @@ -359,7 +359,7 @@ NEP 18 notes that this may require maintenance of two separate APIs. However, this burden may be lessened by, for example, parametrizing all tests over ``numpy.overridable`` separately via a fixture. This also has the side-effect of thoroughly testing it, unlike ``__array_function__``. We also feel that it -provides an oppurtunity to separate the NumPy API contract properly from the +provides an opportunity to separate the NumPy API contract properly from the implementation. Benefits to end-users and mixing backends diff --git a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst index 3a689a4dc..f6a77f754 100644 --- a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst +++ b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst @@ -1,7 +1,7 @@ .. _NEP35: =========================================================== -NEP 35 — Array Creation Dispatching With __array_function__ +NEP 35 — Array creation dispatching with __array_function__ =========================================================== :Author: Peter Andreas Entschev <pentschev@nvidia.com> @@ -209,7 +209,7 @@ libraries, preventing those libraries from using such important functionality in that context. The purpose of this NEP is to address that shortcoming in a simple and -straighforward way: introduce a new ``like=`` keyword argument, similar to how +straightforward way: introduce a new ``like=`` keyword argument, similar to how the ``empty_like`` family of functions work. When array creation functions receive such an argument, they will trigger the ``__array_function__`` protocol, and call the downstream library's own array creation function implementation. diff --git a/doc/neps/nep-0036-fair-play.rst b/doc/neps/nep-0036-fair-play.rst index 906247fd3..2acdcc704 100644 --- a/doc/neps/nep-0036-fair-play.rst +++ b/doc/neps/nep-0036-fair-play.rst @@ -126,7 +126,7 @@ Fair play rules were designed to help external packages interact more easily with NumPy. E.g., the latter allows objects from foreign libraries to pass through NumPy. We actively encourage using any of - these "officialy sanctioned" mechanisms for overriding or + these "officially sanctioned" mechanisms for overriding or interacting with NumPy. If these mechanisms are deemed insufficient, please start a diff --git a/doc/neps/nep-0038-SIMD-optimizations.rst b/doc/neps/nep-0038-SIMD-optimizations.rst index 396ba1371..927228447 100644 --- a/doc/neps/nep-0038-SIMD-optimizations.rst +++ b/doc/neps/nep-0038-SIMD-optimizations.rst @@ -64,7 +64,7 @@ mechanism for NumPy. There are three stages to using the mechanism: - Infrastructure is provided in the code for abstract intrinsics. The ufunc machinery will be extended using sets of these abstract intrinsics, so that a single ufunc will be expressed as a set of loops, going from a minimal to - a maximal set of possibly availabe intrinsics. + a maximal set of possibly available intrinsics. - At compile time, compiler macros and CPU detection are used to turn the abstract intrinsics into concrete intrinsic calls. Any intrinsics not available on the platform, either because the CPU does not support them @@ -183,7 +183,7 @@ yet supported as a universal intrinsic, then: 1. It should be added as a universal intrinsic for all platforms 2. If it does not have an equivalent instruction on other platforms (e.g. ``_mm512_mask_i32gather_ps`` in ``AVX512``), then no universal intrinsic - should be added and a platform-specific ``ufunc`` or a short helper fuction + should be added and a platform-specific ``ufunc`` or a short helper function should be written instead. If such a helper function is used, it must be wrapped with the feature macros, and a reasonable non-intrinsic fallback to be used by default. @@ -289,7 +289,7 @@ implementing and maintaining that platform's loop code. Discussion ---------- -Most of the discussion took place on the PR `gh-15228`_ to accecpt this NEP. +Most of the discussion took place on the PR `gh-15228`_ to accept this NEP. Discussion on the mailing list mentioned `VOLK`_ which was added to the section on related work. The question of maintainability also was raised both on the mailing list and in `gh-15228`_ and resolved as follows: diff --git a/doc/neps/nep-0040-legacy-datatype-impl.rst b/doc/neps/nep-0040-legacy-datatype-impl.rst index 7ea7f6df3..a6e74d7a0 100644 --- a/doc/neps/nep-0040-legacy-datatype-impl.rst +++ b/doc/neps/nep-0040-legacy-datatype-impl.rst @@ -1,7 +1,7 @@ .. _NEP40: ================================================ -NEP 40 — Legacy Datatype Implementation in NumPy +NEP 40 — Legacy datatype implementation in NumPy ================================================ :title: Legacy Datatype Implementation in NumPy @@ -82,7 +82,7 @@ Thus we have data types (mainly strings) with the properties that: 2. Array coercion should be able to discover the exact dtype, such as for ``np.array(["str1", 12.34], dtype="S")`` where NumPy discovers the resulting dtype as ``"S5"``. - (If the dtype argument is ommitted the behaviour is currently ill defined [gh-15327]_.) + (If the dtype argument is omitted the behaviour is currently ill defined [gh-15327]_.) A form similar to ``dtype="S"`` is ``dtype="datetime64"`` which can discover the unit: ``np.array(["2017-02"], dtype="datetime64")``. @@ -197,7 +197,7 @@ Currently ``np.dtype`` is a Python class with its instances being the To set the actual behaviour of these instances, a prototype instance is stored globally and looked up based on the ``dtype.typenum``. The singleton is used where possible. Where required it is copied and modified, for instance to change -endianess. +endianness. Parametric datatypes (strings, void, datetime, and timedelta) must store additional information such as string lengths, fields, or datetime units -- diff --git a/doc/neps/nep-0041-improved-dtype-support.rst b/doc/neps/nep-0041-improved-dtype-support.rst index d7a08562d..2fb907073 100644 --- a/doc/neps/nep-0041-improved-dtype-support.rst +++ b/doc/neps/nep-0041-improved-dtype-support.rst @@ -1,7 +1,7 @@ .. _NEP41: ================================================= -NEP 41 — First step towards a new Datatype System +NEP 41 — First step towards a new datatype system ================================================= :title: First step towards a new Datatype System @@ -284,7 +284,7 @@ in general, it is not safe:: >>> np.can_cast(np.float64, np.dtype[mp.mpf](dps=4), casting="safe") False -since a float64 has a higer precision than the ``mpf`` datatype with +since a float64 has a higher precision than the ``mpf`` datatype with ``dps=4``. Alternatively, we can say that:: @@ -765,7 +765,7 @@ Discussion See :ref:`NEP 40 <NEP40>` for a list of previous meetings and discussions. -Additional discussion around this specific NEP has occured on both +Additional discussion around this specific NEP has occurred on both the mailing list and the pull request: * `Mailing list discussion <https://mail.python.org/pipermail/numpy-discussion/2020-March/080481.html>`_ diff --git a/doc/neps/nep-0042-new-dtypes.rst b/doc/neps/nep-0042-new-dtypes.rst index bb85f1d10..c29172a28 100644 --- a/doc/neps/nep-0042-new-dtypes.rst +++ b/doc/neps/nep-0042-new-dtypes.rst @@ -214,7 +214,7 @@ which describes the casting from one DType to another. In :ref:`NEP 43 <NEP43>` this ``CastingImpl`` object is used unchanged to support universal functions. Note that the name ``CastingImpl`` here will be generically called -``ArrayMethod`` to accomodate both casting and universal functions. +``ArrayMethod`` to accommodate both casting and universal functions. ****************************************************************************** diff --git a/doc/neps/nep-0043-extensible-ufuncs.rst b/doc/neps/nep-0043-extensible-ufuncs.rst index cd73108e4..3312eb12c 100644 --- a/doc/neps/nep-0043-extensible-ufuncs.rst +++ b/doc/neps/nep-0043-extensible-ufuncs.rst @@ -1,7 +1,7 @@ .. _NEP43: ============================================================================== -NEP 43 — Enhancing the Extensibility of UFuncs +NEP 43 — Enhancing the extensibility of UFuncs ============================================================================== :title: Enhancing the Extensibility of UFuncs @@ -571,7 +571,7 @@ This stores all of the constant information that is part of the ``Context``, such as: * the ``DTypes`` -* the number of input and ouput arguments +* the number of input and output arguments * the ufunc signature (specific to generalized ufuncs, compare :ref:`NEP20`). Fortunately, most users and even ufunc implementers will not have to worry @@ -1233,7 +1233,7 @@ are the best solution: logic fails or is incorrect for a newly-added loop, the loop can add a new promoter to refine the logic. -The option of having each loop verify that no upcast occured is probably +The option of having each loop verify that no upcast occurred is probably the best alternative, but does not include the ability to dynamically adding new loops. diff --git a/doc/neps/nep-0044-restructuring-numpy-docs.rst b/doc/neps/nep-0044-restructuring-numpy-docs.rst index 229856547..fd41e0c2a 100644 --- a/doc/neps/nep-0044-restructuring-numpy-docs.rst +++ b/doc/neps/nep-0044-restructuring-numpy-docs.rst @@ -1,7 +1,7 @@ .. _NEP44: =================================================== -NEP 44 — Restructuring the NumPy Documentation +NEP 44 — Restructuring the NumPy documentation =================================================== :Author: Ralf Gommers diff --git a/doc/neps/nep-0045-c_style_guide.rst b/doc/neps/nep-0045-c_style_guide.rst index 5a2fcf946..9a6323873 100644 --- a/doc/neps/nep-0045-c_style_guide.rst +++ b/doc/neps/nep-0045-c_style_guide.rst @@ -1,7 +1,7 @@ .. _NEP45: ================================= -NEP 45 — C Style Guide +NEP 45 — C style guide ================================= :Author: Charles Harris <charlesr.harris@gmail.com> diff --git a/doc/neps/nep-0046-sponsorship-guidelines.rst b/doc/neps/nep-0046-sponsorship-guidelines.rst index b8b312aa5..8535cb554 100644 --- a/doc/neps/nep-0046-sponsorship-guidelines.rst +++ b/doc/neps/nep-0046-sponsorship-guidelines.rst @@ -1,7 +1,7 @@ .. _NEP46: ===================================== -NEP 46 — NumPy Sponsorship Guidelines +NEP 46 — NumPy sponsorship guidelines ===================================== :Author: Ralf Gommers <ralf.gommers@gmail.com> diff --git a/doc/neps/nep-0048-spending-project-funds.rst b/doc/neps/nep-0048-spending-project-funds.rst index 3571eef2d..d2924d4a9 100644 --- a/doc/neps/nep-0048-spending-project-funds.rst +++ b/doc/neps/nep-0048-spending-project-funds.rst @@ -1,7 +1,7 @@ .. _NEP48: ===================================== -NEP 48 — Spending NumPy Project Funds +NEP 48 — Spending NumPy project funds ===================================== :Author: Ralf Gommers <ralf.gommers@gmail.com> diff --git a/doc/neps/nep-0049.rst b/doc/neps/nep-0049.rst index 6a17f33b5..4758edb35 100644 --- a/doc/neps/nep-0049.rst +++ b/doc/neps/nep-0049.rst @@ -3,10 +3,10 @@ NEP 49 — Data allocation strategies =================================== :Author: Matti Picus -:Status: Draft +:Status: Final :Type: Standards Track :Created: 2021-04-18 -:Resolution: http://numpy-discussion.10968.n7.nabble.com/NEP-49-Data-allocation-strategies-tt49185.html +:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/YZ3PNTXZUT27B6ITFAD3WRSM3T3SRVK4/#PKYXCTG4R5Q6LIRZC4SEWLNBM6GLRF26 Abstract @@ -43,10 +43,10 @@ override ``malloc``. The long CPython discussion of `BPO 18835`_ began with discussing the need for ``PyMem_Alloc32`` and ``PyMem_Alloc64``. The early conclusion was that the -cost (of wasted padding) vs. the benifit of aligned memory is best left to the +cost (of wasted padding) vs. the benefit of aligned memory is best left to the user, but then evolves into a discussion of various proposals to deal with memory allocations, including `PEP 445`_ `memory interfaces`_ to -``PyTraceMalloc_Track`` which apparently was explictly added for NumPy. +``PyTraceMalloc_Track`` which apparently was explicitly added for NumPy. Allowing users to implement different strategies via the NumPy C-API will enable exploration of this rich area of possible optimizations. The intention @@ -93,19 +93,21 @@ High level design Users who wish to change the NumPy data memory management routines will use :c:func:`PyDataMem_SetHandler`, which uses a :c:type:`PyDataMem_Handler` -structure to hold pointers to functions used to manage the data memory. +structure to hold pointers to functions used to manage the data memory. In +order to allow lifetime management of the ``context``, the structure is wrapped +in a ``PyCapsule``. Since a call to ``PyDataMem_SetHandler`` will change the default functions, but that function may be called during the lifetime of an ``ndarray`` object, each -``ndarray`` will carry with it the ``PyDataMem_Handler`` struct used at the -time of its instantiation, and these will be used to reallocate or free the -data memory of the instance. Internally NumPy may use ``memcpy`` or ``memset`` -on the pointer to the data memory. +``ndarray`` will carry with it the ``PyDataMem_Handler``-wrapped PyCapsule used +at the time of its instantiation, and these will be used to reallocate or free +the data memory of the instance. Internally NumPy may use ``memcpy`` or +``memset`` on the pointer to the data memory. The name of the handler will be exposed on the python level via a ``numpy.core.multiarray.get_handler_name(arr)`` function. If called as ``numpy.core.multiarray.get_handler_name()`` it will return the name of the -global handler that will be used to allocate data for the next new `ndarrray`. +handler that will be used to allocate data for the next new `ndarrray`. NumPy C-API functions ===================== @@ -150,20 +152,19 @@ NumPy C-API functions 15780_ and 15788_ but has not yet been resolved. When it is this NEP should be revisited. -.. c:function:: const PyDataMem_Handler * PyDataMem_SetHandler(PyDataMem_Handler *handler) +.. c:function:: PyObject * PyDataMem_SetHandler(PyObject *handler) Sets a new allocation policy. If the input value is ``NULL``, will reset - the policy to the default. Returns the previous policy, ``NULL`` if the - previous policy was the default. We wrap the user-provided functions + the policy to the default. Return the previous policy, or + return NULL if an error has occurred. We wrap the user-provided so they will still call the Python and NumPy memory management callback hooks. All the function pointers must be filled in, ``NULL`` is not accepted. -.. c:function:: const PyDataMem_Handler * PyDataMem_GetHandler(PyArrayObject *obj) +.. c:function:: const PyObject * PyDataMem_GetHandler() - Return the ``PyDataMem_Handler`` used by the - ``PyArrayObject``. If ``NULL``, return the handler - that will be used to allocate data for the next ``PyArrayObject``. + Return the current policy that will be used to allocate data for the + next ``PyArrayObject``. On failure, return ``NULL``. ``PyDataMem_Handler`` thread safety and lifetime ================================================ diff --git a/doc/neps/nep-template.rst b/doc/neps/nep-template.rst index 42f717c7a..bbb48eaae 100644 --- a/doc/neps/nep-template.rst +++ b/doc/neps/nep-template.rst @@ -1,5 +1,5 @@ ================================= -NEP X — Template and Instructions +NEP X — Template and instructions ================================= :Author: <list of authors' real names and optionally, email addresses> diff --git a/doc/preprocess.py b/doc/preprocess.py new file mode 100755 index 000000000..870d3e123 --- /dev/null +++ b/doc/preprocess.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +import subprocess +import os +import sys +from string import Template + +def main(): + doxy_gen(os.path.abspath(os.path.join('..'))) + +def doxy_gen(root_path): + """ + Generate Doxygen configuration file. + """ + confs = doxy_config(root_path) + build_path = os.path.join(root_path, "doc", "build", "doxygen") + gen_path = os.path.join(build_path, "Doxyfile") + if not os.path.exists(build_path): + os.makedirs(build_path) + with open(gen_path, 'w') as fd: + fd.write("#Please Don't Edit! This config file was autogenerated by ") + fd.write(f"doxy_gen({root_path}) in doc/preprocess.py.\n") + for c in confs: + fd.write(c) + +class DoxyTpl(Template): + delimiter = '@' + +def doxy_config(root_path): + """ + Fetch all Doxygen sub-config files and gather it with the main config file. + """ + confs = [] + dsrc_path = os.path.join(root_path, "doc", "source") + sub = dict(ROOT_DIR=root_path) + with open(os.path.join(dsrc_path, "doxyfile"), "r") as fd: + conf = DoxyTpl(fd.read()) + confs.append(conf.substitute(CUR_DIR=dsrc_path, **sub)) + + for dpath, _, files in os.walk(root_path): + if ".doxyfile" not in files: + continue + conf_path = os.path.join(dpath, ".doxyfile") + with open(conf_path, "r") as fd: + conf = DoxyTpl(fd.read()) + confs.append(conf.substitute(CUR_DIR=dpath, **sub)) + return confs + + +if __name__ == "__main__": + main() + diff --git a/doc/release/upcoming_changes/17530.improvement.rst b/doc/release/upcoming_changes/17530.improvement.rst new file mode 100644 index 000000000..07a23f0e5 --- /dev/null +++ b/doc/release/upcoming_changes/17530.improvement.rst @@ -0,0 +1,5 @@ +`ctypeslib.load_library` can now take any path-like object +----------------------------------------------------------------------- +All parameters in the can now take any :term:`python:path-like object`. +This includes the likes of strings, bytes and objects implementing the +:meth:`__fspath__<os.PathLike.__fspath__>` protocol. diff --git a/doc/release/upcoming_changes/17582.new_feature.rst b/doc/release/upcoming_changes/17582.new_feature.rst new file mode 100644 index 000000000..c2426330c --- /dev/null +++ b/doc/release/upcoming_changes/17582.new_feature.rst @@ -0,0 +1,10 @@ +NEP 49 configurable allocators +------------------------------ +As detailed in `NEP 49`_, the function used for allocation of the data segment +of a ndarray can be changed. The policy can be set globally or in a context. +For more information see the NEP and the :ref:`data_memory` reference docs. +Also add a ``NUMPY_WARN_IF_NO_MEM_POLICY`` override to warn on dangerous use +of transfering ownership by setting ``NPY_ARRAY_OWNDATA``. + +.. _`NEP 49`: https://numpy.org/neps/nep-0049.html + diff --git a/doc/release/upcoming_changes/18884.new_feature.rst b/doc/release/upcoming_changes/18884.new_feature.rst new file mode 100644 index 000000000..41503b00e --- /dev/null +++ b/doc/release/upcoming_changes/18884.new_feature.rst @@ -0,0 +1,7 @@ +Generate C/C++ API reference documentation from comments blocks is now possible +------------------------------------------------------------------------------- +This feature depends on Doxygen_ in the generation process and on Breathe_ +to integrate it with Sphinx. + +.. _`Doxygen`: https://www.doxygen.nl/index.html +.. _`Breathe`: https://breathe.readthedocs.io/en/latest/ diff --git a/doc/release/upcoming_changes/19355.new_feature.rst b/doc/release/upcoming_changes/19355.new_feature.rst new file mode 100644 index 000000000..cfa50b7a1 --- /dev/null +++ b/doc/release/upcoming_changes/19355.new_feature.rst @@ -0,0 +1,13 @@ +``bit_count`` to compute the number of 1-bits in an integer +----------------------------------------------------------- + +Computes the number of 1-bits in the absolute value of the input. +This works on all the numpy integer types. Analogous to the builtin +``int.bit_count`` or ``popcount`` in C++. + +.. code-block:: python + + >>> np.uint32(1023).bit_count() + 10 + >>> np.int32(-127).bit_count() + 7 diff --git a/doc/release/upcoming_changes/19478.performance.rst b/doc/release/upcoming_changes/19478.performance.rst new file mode 100644 index 000000000..6a389c20e --- /dev/null +++ b/doc/release/upcoming_changes/19478.performance.rst @@ -0,0 +1,11 @@ +Vectorize umath module using AVX-512 +------------------------------------- + +By leveraging Intel Short Vector Math Library (SVML), 18 umath functions +(``exp2``, ``log2``, ``log10``, ``expm1``, ``log1p``, ``cbrt``, ``sin``, +``cos``, ``tan``, ``arcsin``, ``arccos``, ``arctan``, ``sinh``, ``cosh``, +``tanh``, ``arcsinh``, ``arccosh``, ``arctanh``) are vectorized using AVX-512 +instruction set for both single and double precision implementations. This +change is currently enabled only for Linux users and on processors with +AVX-512 instruction set. It provides an average speed up of 32x and 14x for +single and double precision functions respectively. diff --git a/doc/release/upcoming_changes/19655.change.rst b/doc/release/upcoming_changes/19665.change.rst index 2c2315dd2..2c2315dd2 100644 --- a/doc/release/upcoming_changes/19655.change.rst +++ b/doc/release/upcoming_changes/19665.change.rst diff --git a/doc/release/upcoming_changes/19687.change.rst b/doc/release/upcoming_changes/19687.change.rst new file mode 100644 index 000000000..c7f7512b6 --- /dev/null +++ b/doc/release/upcoming_changes/19687.change.rst @@ -0,0 +1,8 @@ +str/repr of complex dtypes now include space after punctuation +-------------------------------------------------------------- + +The repr of ``np.dtype({"names": ["a"], "formats": [int], "offsets": [2]})`` is +now ``dtype({'names': ['a'], 'formats': ['<i8'], 'offsets': [2], 'itemsize': 10})``, +whereas spaces where previously omitted after colons and between fields. + +The old behavior can be restored via ``np.set_printoptions(legacy="1.21")``. diff --git a/doc/release/upcoming_changes/19754.new_feature.rst b/doc/release/upcoming_changes/19754.new_feature.rst new file mode 100644 index 000000000..4e91e4cb3 --- /dev/null +++ b/doc/release/upcoming_changes/19754.new_feature.rst @@ -0,0 +1,7 @@ +A ``.clang-format`` file has been added +--------------------------------------- +Clang-format is a C/C++ code formatter, together with the added +``.clang-format`` file, it produces code close enough to the NumPy +C_STYLE_GUIDE for general use. Clang-format version 12+ is required +due to the use of several new features, it is available in +Fedora 34 and Ubuntu Focal among other distributions. diff --git a/doc/release/upcoming_changes/19805.new_feature.rst b/doc/release/upcoming_changes/19805.new_feature.rst new file mode 100644 index 000000000..f59409254 --- /dev/null +++ b/doc/release/upcoming_changes/19805.new_feature.rst @@ -0,0 +1,5 @@ +Symbolic parser for Fortran dimension specifications +---------------------------------------------------- +A new symbolic parser has been added to f2py in order to correctly parse +dimension specifications. The parser is the basis for future improvements +and provides compatibility with Draft Fortran 202x. diff --git a/doc/release/upcoming_changes/19879.new_feature.rst b/doc/release/upcoming_changes/19879.new_feature.rst new file mode 100644 index 000000000..c6624138b --- /dev/null +++ b/doc/release/upcoming_changes/19879.new_feature.rst @@ -0,0 +1,15 @@ +``ndarray``, ``dtype`` and ``number`` are now runtime-subscriptable +------------------------------------------------------------------- +Mimicking :pep:`585`, the `~numpy.ndarray`, `~numpy.dtype` and `~numpy.number` +classes are now subscriptable for python 3.9 and later. +Consequently, expressions that were previously only allowed in .pyi stub files +or with the help of ``from __future__ import annotations`` are now also legal +during runtime. + +.. code-block:: python + + >>> import numpy as np + >>> from typing import Any + + >>> np.ndarray[Any, np.dtype[np.float64]] + numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] diff --git a/doc/release/upcoming_changes/19921.deprecation.rst b/doc/release/upcoming_changes/19921.deprecation.rst new file mode 100644 index 000000000..17fa0f605 --- /dev/null +++ b/doc/release/upcoming_changes/19921.deprecation.rst @@ -0,0 +1,3 @@ +* the misspelled keyword argument ``delimitor`` of + ``numpy.ma.mrecords.fromtextfile()`` has been changed into + ``delimiter``, using it will emit a deprecation warning. diff --git a/doc/release/upcoming_changes/20000.deprecation.rst b/doc/release/upcoming_changes/20000.deprecation.rst new file mode 100644 index 000000000..e0a56cd47 --- /dev/null +++ b/doc/release/upcoming_changes/20000.deprecation.rst @@ -0,0 +1,5 @@ +Passing boolean ``kth`` values to (arg-)partition has been deprecated +--------------------------------------------------------------------- +`~numpy.partition` and `~numpy.argpartition` would previously accept boolean +values for the ``kth`` parameter, which would subsequently be converted into +integers. This behavior has now been deprecated. diff --git a/doc/release/upcoming_changes/20027.improvement.rst b/doc/release/upcoming_changes/20027.improvement.rst new file mode 100644 index 000000000..86b3bed74 --- /dev/null +++ b/doc/release/upcoming_changes/20027.improvement.rst @@ -0,0 +1,17 @@ +Missing parameters have been added to the ``nan<x>`` functions +-------------------------------------------------------------- +A number of the ``nan<x>`` functions previously lacked parameters that were +present in their ``<x>``-based counterpart, *e.g.* the ``where`` parameter was +present in `~numpy.mean` but absent from `~numpy.nanmean`. + +The following parameters have now been added to the ``nan<x>`` functions: + +* nanmin: ``initial`` & ``where`` +* nanmax: ``initial`` & ``where`` +* nanargmin: ``keepdims`` & ``out`` +* nanargmax: ``keepdims`` & ``out`` +* nansum: ``initial`` & ``where`` +* nanprod: ``initial`` & ``where`` +* nanmean: ``where`` +* nanvar: ``where`` +* nanstd: ``where`` diff --git a/doc/release/upcoming_changes/20049.change.rst b/doc/release/upcoming_changes/20049.change.rst new file mode 100644 index 000000000..e1f08b343 --- /dev/null +++ b/doc/release/upcoming_changes/20049.change.rst @@ -0,0 +1,5 @@ +Corrected ``advance`` in ``PCG64DSXM`` and ``PCG64`` +---------------------------------------------------- +Fixed a bug in the ``advance`` method of ``PCG64DSXM`` and ``PCG64``. The bug only +affects results when the step was larger than :math:`2^{64}` on platforms +that do not support 128-bit integers(e.g., Windows and 32-bit Linux). diff --git a/doc/release/upcoming_changes/20201.deprecation.rst b/doc/release/upcoming_changes/20201.deprecation.rst new file mode 100644 index 000000000..db8cda21f --- /dev/null +++ b/doc/release/upcoming_changes/20201.deprecation.rst @@ -0,0 +1,5 @@ +The ``np.MachAr`` class has been deprecated +------------------------------------------- +The `~numpy.MachAr` class and `finfo.machar <numpy.finfo>` attribute have +been deprecated. Users are encouraged to access the property if interest +directly from the corresponding `~numpy.finfo` attribute. diff --git a/doc/release/upcoming_changes/20217.improvement.rst b/doc/release/upcoming_changes/20217.improvement.rst new file mode 100644 index 000000000..28e5c8ff7 --- /dev/null +++ b/doc/release/upcoming_changes/20217.improvement.rst @@ -0,0 +1,10 @@ +Annotating the main Numpy namespace +-------------------------------------- +Starting from the 1.20 release, PEP 484 type annotations have been included +for parts of the NumPy library; annotating the remaining functions being a +work in progress. With the release of 1.22 this process has been completed for +the main NumPy namespace, which is now fully annotated. + +Besides the main namespace, a limited number of sub-packages contain +annotations as well. This includes, among others, `numpy.testing`, +`numpy.linalg` and `numpy.random` (available since 1.21). diff --git a/doc/source/conf.py b/doc/source/conf.py index 41b5cee25..a7a885c34 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- import os import re import sys +import importlib # Minimum version, enforced by sphinx needs_sphinx = '3.2.0' @@ -86,6 +86,16 @@ extensions = [ 'sphinx.ext.mathjax', ] +skippable_extensions = [ + ('breathe', 'skip generating C/C++ API from comment blocks.'), +] +for ext, warn in skippable_extensions: + ext_exist = importlib.util.find_spec(ext) is not None + if ext_exist: + extensions.append(ext) + else: + print(f"Unable to find Sphinx extension '{ext}', {warn}.") + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -477,3 +487,11 @@ class NumPyLexer(CLexer): inherit, ], } + + +# ----------------------------------------------------------------------------- +# Breathe & Doxygen +# ----------------------------------------------------------------------------- +breathe_projects = dict(numpy=os.path.join("..", "build", "doxygen", "xml")) +breathe_default_project = "numpy" +breathe_default_members = ("members", "undoc-members", "protected-members") diff --git a/doc/source/dev/alignment.rst b/doc/source/dev/alignment.rst new file mode 100644 index 000000000..bb1198ebf --- /dev/null +++ b/doc/source/dev/alignment.rst @@ -0,0 +1,113 @@ +.. currentmodule:: numpy + +.. _alignment: + +**************** +Memory Alignment +**************** + +NumPy alignment goals +===================== + +There are three use-cases related to memory alignment in NumPy (as of 1.14): + + 1. Creating :term:`structured datatypes <structured data type>` with + :term:`fields <field>` aligned like in a C-struct. + 2. Speeding up copy operations by using :class:`uint` assignment in instead of + ``memcpy``. + 3. Guaranteeing safe aligned access for ufuncs/setitem/casting code. + +NumPy uses two different forms of alignment to achieve these goals: +"True alignment" and "Uint alignment". + +"True" alignment refers to the architecture-dependent alignment of an +equivalent C-type in C. For example, in x64 systems :attr:`float64` is +equivalent to ``double`` in C. On most systems, this has either an alignment of +4 or 8 bytes (and this can be controlled in GCC by the option +``malign-double``). A variable is aligned in memory if its memory offset is a +multiple of its alignment. On some systems (eg. sparc) memory alignment is +required; on others, it gives a speedup. + +"Uint" alignment depends on the size of a datatype. It is defined to be the +"True alignment" of the uint used by NumPy's copy-code to copy the datatype, or +undefined/unaligned if there is no equivalent uint. Currently, NumPy uses +``uint8``, ``uint16``, ``uint32``, ``uint64``, and ``uint64`` to copy data of +size 1, 2, 4, 8, 16 bytes respectively, and all other sized datatypes cannot +be uint-aligned. + +For example, on a (typical Linux x64 GCC) system, the NumPy :attr:`complex64` +datatype is implemented as ``struct { float real, imag; }``. This has "true" +alignment of 4 and "uint" alignment of 8 (equal to the true alignment of +``uint64``). + +Some cases where uint and true alignment are different (default GCC Linux): + ====== ========= ======== ======== + arch type true-aln uint-aln + ====== ========= ======== ======== + x86_64 complex64 4 8 + x86_64 float128 16 8 + x86 float96 4 \- + ====== ========= ======== ======== + + +Variables in NumPy which control and describe alignment +======================================================= + +There are 4 relevant uses of the word ``align`` used in NumPy: + + * The :attr:`dtype.alignment` attribute (``descr->alignment`` in C). This is + meant to reflect the "true alignment" of the type. It has arch-dependent + default values for all datatypes, except for the structured types created + with ``align=True`` as described below. + * The ``ALIGNED`` flag of an ndarray, computed in ``IsAligned`` and checked + by :c:func:`PyArray_ISALIGNED`. This is computed from + :attr:`dtype.alignment`. + It is set to ``True`` if every item in the array is at a memory location + consistent with :attr:`dtype.alignment`, which is the case if the + ``data ptr`` and all strides of the array are multiples of that alignment. + * The ``align`` keyword of the dtype constructor, which only affects + :ref:`structured_arrays`. If the structure's field offsets are not manually + provided, NumPy determines offsets automatically. In that case, + ``align=True`` pads the structure so that each field is "true" aligned in + memory and sets :attr:`dtype.alignment` to be the largest of the field + "true" alignments. This is like what C-structs usually do. Otherwise if + offsets or itemsize were manually provided ``align=True`` simply checks that + all the fields are "true" aligned and that the total itemsize is a multiple + of the largest field alignment. In either case :attr:`dtype.isalignedstruct` + is also set to True. + * ``IsUintAligned`` is used to determine if an ndarray is "uint aligned" in + an analogous way to how ``IsAligned`` checks for true alignment. + +Consequences of alignment +========================= + +Here is how the variables above are used: + + 1. Creating aligned structs: To know how to offset a field when + ``align=True``, NumPy looks up ``field.dtype.alignment``. This includes + fields that are nested structured arrays. + 2. Ufuncs: If the ``ALIGNED`` flag of an array is False, ufuncs will + buffer/cast the array before evaluation. This is needed since ufunc inner + loops access raw elements directly, which might fail on some archs if the + elements are not true-aligned. + 3. Getitem/setitem/copyswap function: Similar to ufuncs, these functions + generally have two code paths. If ``ALIGNED`` is False they will + use a code path that buffers the arguments so they are true-aligned. + 4. Strided copy code: Here, "uint alignment" is used instead. If the itemsize + of an array is equal to 1, 2, 4, 8 or 16 bytes and the array is uint + aligned then instead NumPy will do ``*(uintN*)dst) = *(uintN*)src)`` for + appropriate N. Otherwise, NumPy copies by doing ``memcpy(dst, src, N)``. + 5. Nditer code: Since this often calls the strided copy code, it must + check for "uint alignment". + 6. Cast code: This checks for "true" alignment, as it does + ``*dst = CASTFUNC(*src)`` if aligned. Otherwise, it does + ``memmove(srcval, src); dstval = CASTFUNC(srcval); memmove(dst, dstval)`` + where dstval/srcval are aligned. + +Note that the strided-copy and strided-cast code are deeply intertwined and so +any arrays being processed by them must be both uint and true aligned, even +though the copy-code only needs uint alignment and the cast code only true +alignment. If there is ever a big rewrite of this code it would be good to +allow them to use different alignments. + + diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index fa4014fdb..18a7f6ae9 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -3,8 +3,8 @@ Advanced debugging tools ======================== If you reached here, you want to dive into, or use, more advanced tooling. -This is usually not necessary for first time contributers and most -day-to-day developement. +This is usually not necessary for first time contributors and most +day-to-day development. These are used more rarely, for example close to a new NumPy release, or when a large or particular complex change was made. @@ -25,7 +25,7 @@ narrow down. We do not expect any of these tools to be run by most contributors. However, you can ensure that we can track down such issues more easily easier: -* Tests should cover all code paths, incluing error paths. +* Tests should cover all code paths, including error paths. * Try to write short and simple tests. If you have a very complicated test consider creating an additional simpler test as well. This can be helpful, because often it is only easy to find which test @@ -112,7 +112,7 @@ where ``PYTHONMALLOC=malloc`` is necessary to avoid false positives from python itself. Depending on the system and valgrind version, you may see more false positives. ``valgrind`` supports "suppressions" to ignore some of these, and Python does -have a supression file (and even a compile time option) which may help if you +have a suppression file (and even a compile time option) which may help if you find it necessary. Valgrind helps: diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 665198c69..37cf6f7af 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -122,7 +122,7 @@ source tree is to use:: NumPy uses a series of tests to probe the compiler and libc libraries for -funtions. The results are stored in ``_numpyconfig.h`` and ``config.h`` files +functions. The results are stored in ``_numpyconfig.h`` and ``config.h`` files using ``HAVE_XXX`` definitions. These tests are run during the ``build_src`` phase of the ``_multiarray_umath`` module in the ``generate_config_h`` and ``generate_numpyconfig_h`` functions. Since the output of these calls includes diff --git a/doc/source/dev/examples/.doxyfile b/doc/source/dev/examples/.doxyfile new file mode 100644 index 000000000..966c1b636 --- /dev/null +++ b/doc/source/dev/examples/.doxyfile @@ -0,0 +1,2 @@ +INPUT += @CUR_DIR +INCLUDE_PATH += @CUR_DIR diff --git a/doc/source/dev/examples/doxy_class.hpp b/doc/source/dev/examples/doxy_class.hpp new file mode 100644 index 000000000..ceba63487 --- /dev/null +++ b/doc/source/dev/examples/doxy_class.hpp @@ -0,0 +1,21 @@ +/** + * Template to represent limbo numbers. + * + * Specializations for integer types that are part of nowhere. + * It doesn't support with any real types. + * + * @param Tp Type of the integer. Required to be an integer type. + * @param N Number of elements. +*/ +template<typename Tp, std::size_t N> +class DoxyLimbo { + public: + /// Default constructor. Initialize nothing. + DoxyLimbo(); + /// Set Default behavior for copy the limbo. + DoxyLimbo(const DoxyLimbo<Tp, N> &l); + /// Returns the raw data for the limbo. + const Tp *data(); + protected: + Tp p_data[N]; ///< Example for inline comment. +}; diff --git a/doc/source/dev/examples/doxy_func.h b/doc/source/dev/examples/doxy_func.h new file mode 100644 index 000000000..792a9d1b7 --- /dev/null +++ b/doc/source/dev/examples/doxy_func.h @@ -0,0 +1,11 @@ +/** + * This a simple brief. + * + * And the details goes here. + * Multi lines are welcome. + * + * @param num leave a comment for parameter num. + * @param str leave a comment for the second parameter. + * @return leave a comment for the returned value. + */ +int doxy_javadoc_example(int num, const char *str); diff --git a/doc/source/dev/examples/doxy_rst.h b/doc/source/dev/examples/doxy_rst.h new file mode 100644 index 000000000..6ab4a0775 --- /dev/null +++ b/doc/source/dev/examples/doxy_rst.h @@ -0,0 +1,15 @@ +/** + * A comment block contains reST markup. + * @rst + * .. note:: + * + * Thanks to Breathe_, we were able to bring it to Doxygen_ + * + * Some code example:: + * + * int example(int x) { + * return x * 2; + * } + * @endrst + */ +void doxy_reST_example(void); diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst index 3156d3452..93fec509c 100644 --- a/doc/source/dev/howto-docs.rst +++ b/doc/source/dev/howto-docs.rst @@ -59,8 +59,8 @@ Obvious **wording** mistakes (like leaving out a "not") fall into the typo category, but other rewordings -- even for grammar -- require a judgment call, which raises the bar. Test the waters by first presenting the fix as an issue. -Some functions/objects like numpy.ndarray.transpose, numpy.array etc. defined in -C-extension modules have their docstrings defined seperately in `_add_newdocs.py +Some functions/objects like numpy.ndarray.transpose, numpy.array etc. defined in +C-extension modules have their docstrings defined separately in `_add_newdocs.py <https://github.com/numpy/numpy/blob/main/numpy/core/_add_newdocs.py>`__ ********************** @@ -72,7 +72,7 @@ Your frustrations using our documents are our best guide to what needs fixing. If you write a missing doc you join the front line of open source, but it's a meaningful contribution just to let us know what's missing. If you want to compose a doc, run your thoughts by the `mailing list -<https://mail.python.org/mailman/listinfo/numpy-discussion>`__ for futher +<https://mail.python.org/mailman/listinfo/numpy-discussion>`__ for further ideas and feedback. If you want to alert us to a gap, `open an issue <https://github.com/numpy/numpy/issues>`__. See `this issue <https://github.com/numpy/numpy/issues/15760>`__ for an example. @@ -215,6 +215,219 @@ Note that for documentation within NumPy, it is not necessary to do Please use the ``numpydoc`` :ref:`formatting standard <numpydoc:format>` as shown in their :ref:`example <numpydoc:example>`. +.. _doc_c_code: + +Documenting C/C++ Code +====================== + +NumPy uses Doxygen_ to parse specially-formatted C/C++ comment blocks. This generates +XML files, which are converted by Breathe_ into RST, which is used by Sphinx. + +**It takes three steps to complete the documentation process**: + +1. Writing the comment blocks +----------------------------- + +Although there is still no commenting style set to follow, the Javadoc +is more preferable than the others due to the similarities with the current +existing non-indexed comment blocks. + +.. note:: + Please see `"Documenting the code" <https://www.doxygen.nl/manual/docblocks.html>`__. + +**This is what Javadoc style looks like**: + +.. literalinclude:: examples/doxy_func.h + +**And here is how it is rendered**: + +.. doxygenfunction:: doxy_javadoc_example + +**For line comment, you can use a triple forward slash. For example**: + +.. literalinclude:: examples/doxy_class.hpp + +**And here is how it is rendered**: + +.. doxygenclass:: DoxyLimbo + +Common Doxygen Tags: +++++++++++++++++++++ + +.. note:: + For more tags/commands, please take a look at https://www.doxygen.nl/manual/commands.html + +``@brief`` + +Starts a paragraph that serves as a brief description. By default the first sentence +of the documentation block is automatically treated as a brief description, since +option `JAVADOC_AUTOBRIEF <https://www.doxygen.nl/manual/config.html#cfg_javadoc_autobrief>`__ +is enabled within doxygen configurations. + +``@details`` + +Just like ``@brief`` starts a brief description, ``@details`` starts the detailed description. +You can also start a new paragraph (blank line) then the ``@details`` command is not needed. + +``@param`` + +Starts a parameter description for a function parameter with name <parameter-name>, +followed by a description of the parameter. The existence of the parameter is checked +and a warning is given if the documentation of this (or any other) parameter is missing +or not present in the function declaration or definition. + +``@return`` + +Starts a return value description for a function. +Multiple adjacent ``@return`` commands will be joined into a single paragraph. +The ``@return`` description ends when a blank line or some other sectioning command is encountered. + +``@code/@endcode`` + +Starts/Ends a block of code. A code block is treated differently from ordinary text. +It is interpreted as source code. + +``@rst/@endrst`` + +Starts/Ends a block of reST markup. + +Example +~~~~~~~ +**Take a look at the following example**: + +.. literalinclude:: examples/doxy_rst.h + +**And here is how it is rendered**: + +.. doxygenfunction:: doxy_reST_example + +2. Feeding Doxygen +------------------ + +Not all headers files are collected automatically. You have to add the desired +C/C++ header paths within the sub-config files of Doxygen. + +Sub-config files have the unique name ``.doxyfile``, which you can usually find near +directories that contain documented headers. You need to create a new config file if +there's not one located in a path close(2-depth) to the headers you want to add. + +Sub-config files can accept any of Doxygen_ `configuration options <https://www.doxygen.nl/manual/config.html>`__, +but do not override or re-initialize any configuration option, +rather only use the concatenation operator "+=". For example:: + + # to specfiy certain headers + INPUT += @CUR_DIR/header1.h \ + @CUR_DIR/header2.h + # to add all headers in certain path + INPUT += @CUR_DIR/to/headers + # to define certain macros + PREDEFINED += C_MACRO(X)=X + # to enable certain branches + PREDEFINED += NPY_HAVE_FEATURE \ + NPY_HAVE_FEATURE2 + +.. note:: + @CUR_DIR is a template constant returns the current + dir path of the sub-config file. + +3. Inclusion directives +----------------------- + +Breathe_ provides a wide range of custom directives to allow +converting the documents generated by Doxygen_ into reST files. + +.. note:: + For more information, please check out "`Directives & Config Variables <https://breathe.readthedocs.io/en/latest/directives.html>`__" + +Common directives: +++++++++++++++++++ + +``doxygenfunction`` + +This directive generates the appropriate output for a single function. +The function name is required to be unique in the project. + +.. code:: + + .. doxygenfunction:: <function name> + :outline: + :no-link: + +Checkout the `example <https://breathe.readthedocs.io/en/latest/function.html#function-example>`__ +to see it in action. + + +``doxygenclass`` + +This directive generates the appropriate output for a single class. +It takes the standard project, path, outline and no-link options and +additionally the members, protected-members, private-members, undoc-members, +membergroups and members-only options: + +.. code:: + + .. doxygenclass:: <class name> + :members: [...] + :protected-members: + :private-members: + :undoc-members: + :membergroups: ... + :members-only: + :outline: + :no-link: + +Checkout the `doxygenclass documentation <https://breathe.readthedocs.io/en/latest/class.html#class-example>_` +for more details and to see it in action. + +``doxygennamespace`` + +This directive generates the appropriate output for the contents of a namespace. +It takes the standard project, path, outline and no-link options and additionally the content-only, +members, protected-members, private-members and undoc-members options. +To reference a nested namespace, the full namespaced path must be provided, +e.g. foo::bar for the bar namespace inside the foo namespace. + +.. code:: + + .. doxygennamespace:: <namespace> + :content-only: + :outline: + :members: + :protected-members: + :private-members: + :undoc-members: + :no-link: + +Checkout the `doxygennamespace documentation <https://breathe.readthedocs.io/en/latest/namespace.html#namespace-example>`__ +for more details and to see it in action. + +``doxygengroup`` + +This directive generates the appropriate output for the contents of a doxygen group. +A doxygen group can be declared with specific doxygen markup in the source comments +as covered in the doxygen `grouping documentation <https://www.doxygen.nl/manual/grouping.html>`__. + +It takes the standard project, path, outline and no-link options and additionally the +content-only, members, protected-members, private-members and undoc-members options. + +.. code:: + + .. doxygengroup:: <group name> + :content-only: + :outline: + :members: + :protected-members: + :private-members: + :undoc-members: + :no-link: + :inner: + +Checkout the `doxygengroup documentation <https://breathe.readthedocs.io/en/latest/group.html#group-example>`__ +for more details and to see it in action. + +.. _`Doxygen`: https://www.doxygen.nl/index.html +.. _`Breathe`: https://breathe.readthedocs.io/en/latest/ + ********************* Documentation reading diff --git a/doc/source/dev/howto_build_docs.rst b/doc/source/dev/howto_build_docs.rst index 884cf7935..b175926da 100644 --- a/doc/source/dev/howto_build_docs.rst +++ b/doc/source/dev/howto_build_docs.rst @@ -58,18 +58,28 @@ new virtual environment is recommended. Dependencies ^^^^^^^^^^^^ -All of the necessary dependencies for building the NumPy docs can be installed -with:: +All of the necessary dependencies for building the NumPy docs except for +Doxygen_ can be installed with:: pip install -r doc_requirements.txt -We currently use Sphinx_ for generating the API and reference -documentation for NumPy. In addition, building the documentation requires -the Sphinx extension `plot_directive`, which is shipped with +We currently use Sphinx_ along with Doxygen_ for generating the API and +reference documentation for NumPy. In addition, building the documentation +requires the Sphinx extension `plot_directive`, which is shipped with :doc:`Matplotlib <matplotlib:index>`. We also use numpydoc_ to render docstrings in the generated API documentation. :doc:`SciPy <scipy:index>` is installed since some parts of the documentation require SciPy functions. +For installing Doxygen_, please check the official +`download <https://www.doxygen.nl/download.html#srcbin>`_ and +`installation <https://www.doxygen.nl/manual/install.html>`_ pages, or if you +are using Linux then you can install it through your distribution package manager. + +.. note:: + + Try to install a newer version of Doxygen_ > 1.8.10 otherwise you may get some + warnings during the build. + Submodules ^^^^^^^^^^ @@ -80,6 +90,7 @@ additional parts required for building the documentation:: .. _Sphinx: http://www.sphinx-doc.org/ .. _numpydoc: https://numpydoc.readthedocs.io/en/latest/index.html +.. _Doxygen: https://www.doxygen.nl/index.html Instructions ------------ diff --git a/doc/source/dev/internals.code-explanations.rst b/doc/source/dev/internals.code-explanations.rst new file mode 100644 index 000000000..b6edd61b1 --- /dev/null +++ b/doc/source/dev/internals.code-explanations.rst @@ -0,0 +1,646 @@ +.. currentmodule:: numpy + +.. _c-code-explanations: + +************************* +NumPy C code explanations +************************* + + Fanaticism consists of redoubling your efforts when you have forgotten + your aim. + --- *George Santayana* + + An authority is a person who can tell you more about something than + you really care to know. + --- *Unknown* + +This page attempts to explain the logic behind some of the new +pieces of code. The purpose behind these explanations is to enable +somebody to be able to understand the ideas behind the implementation +somewhat more easily than just staring at the code. Perhaps in this +way, the algorithms can be improved on, borrowed from, and/or +optimized by more people. + + +Memory model +============ + +.. index:: + pair: ndarray; memory model + +One fundamental aspect of the :class:`ndarray` is that an array is seen as a +"chunk" of memory starting at some location. The interpretation of +this memory depends on the :term:`stride` information. For each dimension in +an :math:`N`-dimensional array, an integer (:term:`stride`) dictates how many +bytes must be skipped to get to the next element in that dimension. +Unless you have a single-segment array, this :term:`stride` information must +be consulted when traversing through an array. It is not difficult to +write code that accepts strides, you just have to use ``char*`` +pointers because strides are in units of bytes. Keep in mind also that +strides do not have to be unit-multiples of the element size. Also, +remember that if the number of dimensions of the array is 0 (sometimes +called a ``rank-0`` array), then the :term:`strides <stride>` and +:term:`dimensions <dimension>` variables are ``NULL``. + +Besides the structural information contained in the strides and +dimensions members of the :c:type:`PyArrayObject`, the flags contain +important information about how the data may be accessed. In particular, +the :c:data:`NPY_ARRAY_ALIGNED` flag is set when the memory is on a +suitable boundary according to the datatype array. Even if you have +a :term:`contiguous` chunk of memory, you cannot just assume it is safe to +dereference a datatype-specific pointer to an element. Only if the +:c:data:`NPY_ARRAY_ALIGNED` flag is set, this is a safe operation. On +some platforms it will work but on others, like Solaris, it will cause +a bus error. The :c:data:`NPY_ARRAY_WRITEABLE` should also be ensured +if you plan on writing to the memory area of the array. It is also +possible to obtain a pointer to an unwritable memory area. Sometimes, +writing to the memory area when the :c:data:`NPY_ARRAY_WRITEABLE` flag is not +set will just be rude. Other times it can cause program crashes (*e.g.* +a data-area that is a read-only memory-mapped file). + + +Data-type encapsulation +======================= + +.. seealso:: :ref:`arrays.dtypes` + +.. index:: + single: dtype + +The :ref:`datatype <arrays.dtypes>` is an important abstraction of the +:class:`ndarray`. Operations +will look to the datatype to provide the key functionality that is +needed to operate on the array. This functionality is provided in the +list of function pointers pointed to by the ``f`` member of the +:c:type:`PyArray_Descr` structure. In this way, the number of datatypes can be +extended simply by providing a :c:type:`PyArray_Descr` structure with suitable +function pointers in the ``f`` member. For built-in types, there are some +optimizations that bypass this mechanism, but the point of the datatype +abstraction is to allow new datatypes to be added. + +One of the built-in datatypes, the :class:`void` datatype allows for +arbitrary :term:`structured types <structured data type>` containing 1 or more +fields as elements of the array. A :term:`field` is simply another datatype +object along with an offset into the current structured type. In order to +support arbitrarily nested fields, several recursive implementations of +datatype access are implemented for the void type. A common idiom is to cycle +through the elements of the dictionary and perform a specific operation based on +the datatype object stored at the given offset. These offsets can be +arbitrary numbers. Therefore, the possibility of encountering misaligned +data must be recognized and taken into account if necessary. + + +N-D Iterators +============= + +.. seealso:: :ref:`arrays.nditer` + +.. index:: + single: array iterator + +A very common operation in much of NumPy code is the need to iterate +over all the elements of a general, strided, N-dimensional array. This +operation of a general-purpose N-dimensional loop is abstracted in the +notion of an iterator object. To write an N-dimensional loop, you only +have to create an iterator object from an ndarray, work with the +:c:member:`dataptr <PyArrayIterObject.dataptr>` member of the iterator object +structure and call the macro :c:func:`PyArray_ITER_NEXT` on the iterator +object to move to the next element. The ``next`` element is always in +C-contiguous order. The macro works by first special-casing the C-contiguous, +1-D, and 2-D cases which work very simply. + +For the general case, the iteration works by keeping track of a list +of coordinate counters in the iterator object. At each iteration, the +last coordinate counter is increased (starting from 0). If this +counter is smaller than one less than the size of the array in that +dimension (a pre-computed and stored value), then the counter is +increased and the :c:member:`dataptr <PyArrayIterObject.dataptr>` member is +increased by the strides in that +dimension and the macro ends. If the end of a dimension is reached, +the counter for the last dimension is reset to zero and the +:c:member:`dataptr <PyArrayIterObject.dataptr>` is +moved back to the beginning of that dimension by subtracting the +strides value times one less than the number of elements in that +dimension (this is also pre-computed and stored in the +:c:member:`backstrides <PyArrayIterObject.backstrides>` +member of the iterator object). In this case, the macro does not end, +but a local dimension counter is decremented so that the next-to-last +dimension replaces the role that the last dimension played and the +previously-described tests are executed again on the next-to-last +dimension. In this way, the :c:member:`dataptr <PyArrayIterObject.dataptr>` +is adjusted appropriately for arbitrary striding. + +The :c:member:`coordinates <PyArrayIterObject.coordinates>` member of the +:c:type:`PyArrayIterObject` structure maintains +the current N-d counter unless the underlying array is C-contiguous in +which case the coordinate counting is bypassed. The +:c:member:`index <PyArrayIterObject.index>` member of +the :c:type:`PyArrayIterObject` keeps track of the current flat index of the +iterator. It is updated by the :c:func:`PyArray_ITER_NEXT` macro. + + +Broadcasting +============ + +.. seealso:: :ref:`basics.broadcasting` + +.. index:: + single: broadcasting + +In Numeric, the ancestor of NumPy, broadcasting was implemented in several +lines of code buried deep in ``ufuncobject.c``. In NumPy, the notion of +broadcasting has been abstracted so that it can be performed in multiple places. +Broadcasting is handled by the function :c:func:`PyArray_Broadcast`. This +function requires a :c:type:`PyArrayMultiIterObject` (or something that is a +binary equivalent) to be passed in. The :c:type:`PyArrayMultiIterObject` keeps +track of the broadcast number of dimensions and size in each +dimension along with the total size of the broadcast result. It also +keeps track of the number of arrays being broadcast and a pointer to +an iterator for each of the arrays being broadcast. + +The :c:func:`PyArray_Broadcast` function takes the iterators that have already +been defined and uses them to determine the broadcast shape in each +dimension (to create the iterators at the same time that broadcasting +occurs then use the :c:func:`PyArray_MultiIterNew` function). +Then, the iterators are +adjusted so that each iterator thinks it is iterating over an array +with the broadcast size. This is done by adjusting the iterators +number of dimensions, and the :term:`shape` in each dimension. This works +because the iterator strides are also adjusted. Broadcasting only +adjusts (or adds) length-1 dimensions. For these dimensions, the +strides variable is simply set to 0 so that the data-pointer for the +iterator over that array doesn't move as the broadcasting operation +operates over the extended dimension. + +Broadcasting was always implemented in Numeric using 0-valued strides +for the extended dimensions. It is done in exactly the same way in +NumPy. The big difference is that now the array of strides is kept +track of in a :c:type:`PyArrayIterObject`, the iterators involved in a +broadcast result are kept track of in a :c:type:`PyArrayMultiIterObject`, +and the :c:func:`PyArray_Broadcast` call implements the +:ref:`general-broadcasting-rules`. + + +Array Scalars +============= + +.. seealso:: :ref:`arrays.scalars` + +.. index:: + single: array scalars + +The array scalars offer a hierarchy of Python types that allow a one-to-one +correspondence between the datatype stored in an array and the +Python-type that is returned when an element is extracted from the +array. An exception to this rule was made with object arrays. Object +arrays are heterogeneous collections of arbitrary Python objects. When +you select an item from an object array, you get back the original +Python object (and not an object array scalar which does exist but is +rarely used for practical purposes). + +The array scalars also offer the same methods and attributes as arrays +with the intent that the same code can be used to support arbitrary +dimensions (including 0-dimensions). The array scalars are read-only +(immutable) with the exception of the void scalar which can also be +written to so that structured array field setting works more naturally +(``a[0]['f1'] = value``). + + +Indexing +======== + +.. seealso:: :ref:`basics.indexing`, :ref:`arrays.indexing` + +.. index:: + single: indexing + +All Python indexing operations ``arr[index]`` are organized by first preparing +the index and finding the index type. The supported index types are: + +* integer +* :const:`newaxis` +* :term:`python:slice` +* :py:data:`Ellipsis` +* integer arrays/array-likes (advanced) +* boolean (single boolean array); if there is more than one boolean array as + the index or the shape does not match exactly, the boolean array will be + converted to an integer array instead. +* 0-d boolean (and also integer); 0-d boolean arrays are a special + case that has to be handled in the advanced indexing code. They signal + that a 0-d boolean array had to be interpreted as an integer array. + +As well as the scalar array special case signaling that an integer array +was interpreted as an integer index, which is important because an integer +array index forces a copy but is ignored if a scalar is returned (full integer +index). The prepared index is guaranteed to be valid with the exception of +out of bound values and broadcasting errors for advanced indexing. This +includes that an :py:data:`Ellipsis` is added for incomplete indices for +example when a two-dimensional array is indexed with a single integer. + +The next step depends on the type of index which was found. If all +dimensions are indexed with an integer a scalar is returned or set. A +single boolean indexing array will call specialized boolean functions. +Indices containing an :py:data:`Ellipsis` or :term:`python:slice` but no +advanced indexing will always create a view into the old array by calculating +the new strides and memory offset. This view can then either be returned or, +for assignments, filled using ``PyArray_CopyObject``. Note that +``PyArray_CopyObject`` may also be called on temporary arrays in other branches +to support complicated assignments when the array is of object :class:`dtype`. + +Advanced indexing +----------------- + +By far the most complex case is advanced indexing, which may or may not be +combined with typical view-based indexing. Here integer indices are +interpreted as view-based. Before trying to understand this, you may want +to make yourself familiar with its subtleties. The advanced indexing code +has three different branches and one special case: + +* There is one indexing array and it, as well as the assignment array, can + be iterated trivially. For example, they may be contiguous. Also, the + indexing array must be of :class:`intp` type and the value array in + assignments should be of the correct type. This is purely a fast path. +* There are only integer array indices so that no subarray exists. +* View-based and advanced indexing is mixed. In this case, the view-based + indexing defines a collection of subarrays that are combined by the + advanced indexing. For example, ``arr[[1, 2, 3], :]`` is created by + vertically stacking the subarrays ``arr[1, :]``, ``arr[2, :]``, and + ``arr[3, :]``. +* There is a subarray but it has exactly one element. This case can be handled + as if there is no subarray but needs some care during setup. + +Deciding what case applies, checking broadcasting, and determining the kind +of transposition needed are all done in :c:func:`PyArray_MapIterNew`. After +setting up, there are two cases. If there is no subarray or it only has one +element, no subarray iteration is necessary and an iterator is prepared +which iterates all indexing arrays *as well as* the result or value array. +If there is a subarray, there are three iterators prepared. One for the +indexing arrays, one for the result or value array (minus its subarray), +and one for the subarrays of the original and the result/assignment array. +The first two iterators give (or allow calculation) of the pointers into +the start of the subarray, which then allows restarting the subarray +iteration. + +When advanced indices are next to each other transposing may be necessary. +All necessary transposing is handled by :c:func:`PyArray_MapIterSwapAxes` and +has to be handled by the caller unless :c:func:`PyArray_MapIterNew` is asked to +allocate the result. + +After preparation, getting and setting are relatively straightforward, +although the different modes of iteration need to be considered. Unless +there is only a single indexing array during item getting, the validity of +the indices is checked beforehand. Otherwise, it is handled in the inner +loop itself for optimization. + +.. _ufuncs-internals: + +Universal functions +=================== + +.. seealso:: :ref:`ufuncs`, :ref:`ufuncs-basics` + +.. index:: + single: ufunc + +Universal functions are callable objects that take :math:`N` inputs +and produce :math:`M` outputs by wrapping basic 1-D loops that work +element-by-element into full easy-to-use functions that seamlessly +implement :ref:`broadcasting <basics.broadcasting>`, +:ref:`type-checking <ufuncs.casting>`, +:ref:`buffered coercion <use-of-internal-buffers>`, and +:ref:`output-argument handling <ufuncs-output-type>`. New universal functions +are normally created in C, although there is a mechanism for creating ufuncs +from Python functions (:func:`frompyfunc`). The user must supply a 1-D loop that +implements the basic function taking the input scalar values and +placing the resulting scalars into the appropriate output slots as +explained in implementation. + + +Setup +----- + +Every :class:`ufunc` calculation involves some overhead related to setting up +the calculation. The practical significance of this overhead is that +even though the actual calculation of the ufunc is very fast, you will +be able to write array and type-specific code that will work faster +for small arrays than the ufunc. In particular, using ufuncs to +perform many calculations on 0-D arrays will be slower than other +Python-based solutions (the silently-imported ``scalarmath`` module exists +precisely to give array scalars the look-and-feel of ufunc based +calculations with significantly reduced overhead). + +When a :class:`ufunc` is called, many things must be done. The information +collected from these setup operations is stored in a loop object. This +loop object is a C-structure (that could become a Python object but is +not initialized as such because it is only used internally). This loop +object has the layout needed to be used with :c:func:`PyArray_Broadcast` +so that the broadcasting can be handled in the same way as it is handled in +other sections of code. + +The first thing done is to look up in the thread-specific global +dictionary the current values for the buffer-size, the error mask, and +the associated error object. The state of the error mask controls what +happens when an error condition is found. It should be noted that +checking of the hardware error flags is only performed after each 1-D +loop is executed. This means that if the input and output arrays are +contiguous and of the correct type so that a single 1-D loop is +performed, then the flags may not be checked until all elements of the +array have been calculated. Looking up these values in a thread-specific +dictionary takes time which is easily ignored for all but +very small arrays. + +After checking, the thread-specific global variables, the inputs are +evaluated to determine how the ufunc should proceed and the input and +output arrays are constructed if necessary. Any inputs which are not +arrays are converted to arrays (using context if necessary). Which of +the inputs are scalars (and therefore converted to 0-D arrays) is +noted. + +Next, an appropriate 1-D loop is selected from the 1-D loops available +to the :class:`ufunc` based on the input array types. This 1-D loop is selected +by trying to match the signature of the datatypes of the inputs +against the available signatures. The signatures corresponding to +built-in types are stored in the :attr:`ufunc.types` member of the ufunc +structure. The signatures corresponding to user-defined types are stored in a +linked list of function information with the head element stored as a +``CObject`` in the ``userloops`` dictionary keyed by the datatype number +(the first user-defined type in the argument list is used as the key). +The signatures are searched until a signature is found to which the +input arrays can all be cast safely (ignoring any scalar arguments +which are not allowed to determine the type of the result). The +implication of this search procedure is that "lesser types" should be +placed below "larger types" when the signatures are stored. If no 1-D +loop is found, then an error is reported. Otherwise, the ``argument_list`` +is updated with the stored signature --- in case casting is necessary +and to fix the output types assumed by the 1-D loop. + +If the ufunc has 2 inputs and 1 output and the second input is an +``Object`` array then a special-case check is performed so that +``NotImplemented`` is returned if the second input is not an ndarray, has +the :obj:`~numpy.class.__array_priority__` attribute, and has an ``__r{op}__`` +special method. In this way, Python is signaled to give the other object a +chance to complete the operation instead of using generic object-array +calculations. This allows (for example) sparse matrices to override +the multiplication operator 1-D loop. + +For input arrays that are smaller than the specified buffer size, +copies are made of all non-contiguous, misaligned, or out-of-byteorder +arrays to ensure that for small arrays, a single loop is +used. Then, array iterators are created for all the input arrays and +the resulting collection of iterators is broadcast to a single shape. + +The output arguments (if any) are then processed and any missing +return arrays are constructed. If any provided output array doesn't +have the correct type (or is misaligned) and is smaller than the +buffer size, then a new output array is constructed with the special +:c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set. At the end of the function, +:c:func:`PyArray_ResolveWritebackIfCopy` is called so that +its contents will be copied back into the output array. +Iterators for the output arguments are then processed. + +Finally, the decision is made about how to execute the looping +mechanism to ensure that all elements of the input arrays are combined +to produce the output arrays of the correct type. The options for loop +execution are one-loop (for :term`contiguous`, aligned, and correct data +type), strided-loop (for non-contiguous but still aligned and correct +data type), and a buffered loop (for misaligned or incorrect data +type situations). Depending on which execution method is called for, +the loop is then set up and computed. + + +Function call +------------- + +This section describes how the basic universal function computation loop is +set up and executed for each of the three different kinds of execution. If +:c:data:`NPY_ALLOW_THREADS` is defined during compilation, then as long as +no object arrays are involved, the Python Global Interpreter Lock (GIL) is +released prior to calling the loops. It is re-acquired if necessary to +handle error conditions. The hardware error flags are checked only after +the 1-D loop is completed. + + +One loop +^^^^^^^^ + +This is the simplest case of all. The ufunc is executed by calling the +underlying 1-D loop exactly once. This is possible only when we have +aligned data of the correct type (including byteorder) for both input +and output and all arrays have uniform strides (either :term:`contiguous`, +0-D, or 1-D). In this case, the 1-D computational loop is called once +to compute the calculation for the entire array. Note that the +hardware error flags are only checked after the entire calculation is +complete. + + +Strided loop +^^^^^^^^^^^^ + +When the input and output arrays are aligned and of the correct type, +but the striding is not uniform (non-contiguous and 2-D or larger), +then a second looping structure is employed for the calculation. This +approach converts all of the iterators for the input and output +arguments to iterate over all but the largest dimension. The inner +loop is then handled by the underlying 1-D computational loop. The +outer loop is a standard iterator loop on the converted iterators. The +hardware error flags are checked after each 1-D loop is completed. + + +Buffered loop +^^^^^^^^^^^^^ + +This is the code that handles the situation whenever the input and/or +output arrays are either misaligned or of the wrong datatype +(including being byteswapped) from what the underlying 1-D loop +expects. The arrays are also assumed to be non-contiguous. The code +works very much like the strided-loop except for the inner 1-D loop is +modified so that pre-processing is performed on the inputs and post-processing +is performed on the outputs in ``bufsize`` chunks (where +``bufsize`` is a user-settable parameter). The underlying 1-D +computational loop is called on data that is copied over (if it needs +to be). The setup code and the loop code is considerably more +complicated in this case because it has to handle: + +- memory allocation of the temporary buffers + +- deciding whether or not to use buffers on the input and output data + (misaligned and/or wrong datatype) + +- copying and possibly casting data for any inputs or outputs for which + buffers are necessary. + +- special-casing ``Object`` arrays so that reference counts are properly + handled when copies and/or casts are necessary. + +- breaking up the inner 1-D loop into ``bufsize`` chunks (with a possible + remainder). + +Again, the hardware error flags are checked at the end of each 1-D +loop. + + +Final output manipulation +------------------------- + +Ufuncs allow other array-like classes to be passed seamlessly through +the interface in that inputs of a particular class will induce the +outputs to be of that same class. The mechanism by which this works is +the following. If any of the inputs are not ndarrays and define the +:obj:`~numpy.class.__array_wrap__` method, then the class with the largest +:obj:`~numpy.class.__array_priority__` attribute determines the type of all the +outputs (with the exception of any output arrays passed in). The +:obj:`~numpy.class.__array_wrap__` method of the input array will be called +with the ndarray being returned from the ufunc as its input. There are two +calling styles of the :obj:`~numpy.class.__array_wrap__` function supported. +The first takes the ndarray as the first argument and a tuple of "context" as +the second argument. The context is (ufunc, arguments, output argument +number). This is the first call tried. If a ``TypeError`` occurs, then the +function is called with just the ndarray as the first argument. + + +Methods +------- + +There are three methods of ufuncs that require calculation similar to +the general-purpose ufuncs. These are :meth:`ufunc.reduce`, +:meth:`ufunc.accumulate`, and :meth:`ufunc.reduceat`. Each of these +methods requires a setup command followed by a +loop. There are four loop styles possible for the methods +corresponding to no-elements, one-element, strided-loop, and buffered-loop. +These are the same basic loop styles as implemented for the +general-purpose function call except for the no-element and one-element +cases which are special-cases occurring when the input array +objects have 0 and 1 elements respectively. + + +Setup +^^^^^ + +The setup function for all three methods is ``construct_reduce``. +This function creates a reducing loop object and fills it with the +parameters needed to complete the loop. All of the methods only work +on ufuncs that take 2-inputs and return 1 output. Therefore, the +underlying 1-D loop is selected assuming a signature of ``[otype, +otype, otype]`` where ``otype`` is the requested reduction +datatype. The buffer size and error handling are then retrieved from +(per-thread) global storage. For small arrays that are misaligned or +have incorrect datatype, a copy is made so that the un-buffered +section of code is used. Then, the looping strategy is selected. If +there is 1 element or 0 elements in the array, then a simple looping +method is selected. If the array is not misaligned and has the +correct datatype, then strided looping is selected. Otherwise, +buffered looping must be performed. Looping parameters are then +established, and the return array is constructed. The output array is +of a different :term:`shape` depending on whether the method is +:meth:`reduce <ufunc.reduce>`, :meth:`accumulate <ufunc.accumulate>`, or +:meth:`reduceat <ufunc.reduceat>`. If an output array is already provided, then +its shape is checked. If the output array is not C-contiguous, +aligned, and of the correct data type, then a temporary copy is made +with the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set. In this way, the methods +will be able to work with a well-behaved output array but the result will be +copied back into the true output array when +:c:func:`PyArray_ResolveWritebackIfCopy` is called at function completion. +Finally, iterators are set up to loop over the correct :term:`axis` +(depending on the value of axis provided to the method) and the setup +routine returns to the actual computation routine. + + +:meth:`Reduce <ufunc.reduce>` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. index:: + triple: ufunc; methods; reduce + +All of the ufunc methods use the same underlying 1-D computational +loops with input and output arguments adjusted so that the appropriate +reduction takes place. For example, the key to the functioning of +:meth:`reduce <ufunc.reduce>` is that the 1-D loop is called with the output +and the second input pointing to the same position in memory and both having +a step-size of 0. The first input is pointing to the input array with a +step-size given by the appropriate stride for the selected axis. In this +way, the operation performed is + +.. math:: + :nowrap: + + \begin{align*} + o & = & i[0] \\ + o & = & i[k]\textrm{<op>}o\quad k=1\ldots N + \end{align*} + +where :math:`N+1` is the number of elements in the input, :math:`i`, +:math:`o` is the output, and :math:`i[k]` is the +:math:`k^{\textrm{th}}` element of :math:`i` along the selected axis. +This basic operation is repeated for arrays with greater than 1 +dimension so that the reduction takes place for every 1-D sub-array +along the selected axis. An iterator with the selected dimension +removed handles this looping. + +For buffered loops, care must be taken to copy and cast data before +the loop function is called because the underlying loop expects +aligned data of the correct datatype (including byteorder). The +buffered loop must handle this copying and casting prior to calling +the loop function on chunks no greater than the user-specified +``bufsize``. + + +:meth:`Accumulate <ufunc.accumulate>` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. index:: + triple: ufunc; methods; accumulate + +The :meth:`accumulate <ufunc.accumulate>` method is very similar to +the :meth:`reduce <ufunc.reduce>` method in that +the output and the second input both point to the output. The +difference is that the second input points to memory one stride behind +the current output pointer. Thus, the operation performed is + +.. math:: + :nowrap: + + \begin{align*} + o[0] & = & i[0] \\ + o[k] & = & i[k]\textrm{<op>}o[k-1]\quad k=1\ldots N. + \end{align*} + +The output has the same shape as the input and each 1-D loop operates +over :math:`N` elements when the shape in the selected axis is :math:`N+1`. +Again, buffered loops take care to copy and cast the data before +calling the underlying 1-D computational loop. + + +:meth:`Reduceat <ufunc.reduceat>` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. index:: + triple: ufunc; methods; reduceat + single: ufunc + +The :meth:`reduceat <ufunc.reduceat>` function is a generalization of both the +:meth:`reduce <ufunc.reduce>` and :meth:`accumulate <ufunc.accumulate>` +functions. It implements a :meth:`reduce <ufunc.reduce>` over ranges of +the input array specified by indices. The extra indices argument is checked to +be sure that every input is not too large for the input array along +the selected dimension before the loop calculations take place. The +loop implementation is handled using code that is very similar to the +:meth:`reduce <ufunc.reduce>` code repeated as many times as there are elements +in the indices input. In particular: the first input pointer passed to the +underlying 1-D computational loop points to the input array at the +correct location indicated by the index array. In addition, the output +pointer and the second input pointer passed to the underlying 1-D loop +point to the same position in memory. The size of the 1-D +computational loop is fixed to be the difference between the current +index and the next index (when the current index is the last index, +then the next index is assumed to be the length of the array along the +selected dimension). In this way, the 1-D loop will implement a +:meth:`reduce <ufunc.reduce>` over the specified indices. + +Misaligned or a loop datatype that does not match the input and/or +output datatype is handled using buffered code wherein data is +copied to a temporary buffer and cast to the correct datatype if +necessary prior to calling the underlying 1-D function. The temporary +buffers are created in (element) sizes no bigger than the user +settable buffer-size value. Thus, the loop must be flexible enough to +call the underlying 1-D computational loop enough times to complete +the total calculation in chunks no bigger than the buffer-size. diff --git a/doc/source/dev/internals.rst b/doc/source/dev/internals.rst new file mode 100644 index 000000000..14e5f3141 --- /dev/null +++ b/doc/source/dev/internals.rst @@ -0,0 +1,175 @@ +.. currentmodule:: numpy + +.. _numpy-internals: + +************************************* +Internal organization of NumPy arrays +************************************* + +It helps to understand a bit about how NumPy arrays are handled under the covers +to help understand NumPy better. This section will not go into great detail. +Those wishing to understand the full details are requested to refer to Travis +Oliphant's book `Guide to NumPy <http://web.mit.edu/dvp/Public/numpybook.pdf>`_. + +NumPy arrays consist of two major components: the raw array data (from now on, +referred to as the data buffer), and the information about the raw array data. +The data buffer is typically what people think of as arrays in C or Fortran, +a :term:`contiguous` (and fixed) block of memory containing fixed-sized data +items. NumPy also contains a significant set of data that describes how to +interpret the data in the data buffer. This extra information contains (among +other things): + + 1) The basic data element's size in bytes. + 2) The start of the data within the data buffer (an offset relative to the + beginning of the data buffer). + 3) The number of :term:`dimensions <dimension>` and the size of each dimension. + 4) The separation between elements for each dimension (the :term:`stride`). + This does not have to be a multiple of the element size. + 5) The byte order of the data (which may not be the native byte order). + 6) Whether the buffer is read-only. + 7) Information (via the :class:`dtype` object) about the interpretation of the + basic data element. The basic data element may be as simple as an int or a + float, or it may be a compound object (e.g., + :term:`struct-like <structured data type>`), a fixed character field, + or Python object pointers. + 8) Whether the array is to be interpreted as :term:`C-order <C order>` + or :term:`Fortran-order <Fortran order>`. + +This arrangement allows for the very flexible use of arrays. One thing that it +allows is simple changes to the metadata to change the interpretation of the +array buffer. Changing the byteorder of the array is a simple change involving +no rearrangement of the data. The :term:`shape` of the array can be changed very +easily without changing anything in the data buffer or any data copying at all. + +Among other things that are made possible is one can create a new array metadata +object that uses the same data buffer +to create a new :term:`view` of that data buffer that has a different +interpretation of the buffer (e.g., different shape, offset, byte order, +strides, etc) but shares the same data bytes. Many operations in NumPy do just +this such as :term:`slicing <python:slice>`. Other operations, such as +transpose, don't move data elements around in the array, but rather change the +information about the shape and strides so that the indexing of the array +changes, but the data in the doesn't move. + +Typically these new versions of the array metadata but the same data buffer are +new views into the data buffer. There is a different :class:`ndarray` object, +but it uses the same data buffer. This is why it is necessary to force copies +through the use of the :func:`copy` method if one really wants to make a new +and independent copy of the data buffer. + +New views into arrays mean the object reference counts for the data buffer +increase. Simply doing away with the original array object will not remove the +data buffer if other views of it still exist. + +Multidimensional array indexing order issues +============================================ + +.. seealso:: :ref:`basics.indexing` + +What is the right way to index +multi-dimensional arrays? Before you jump to conclusions about the one and +true way to index multi-dimensional arrays, it pays to understand why this is +a confusing issue. This section will try to explain in detail how NumPy +indexing works and why we adopt the convention we do for images, and when it +may be appropriate to adopt other conventions. + +The first thing to understand is +that there are two conflicting conventions for indexing 2-dimensional arrays. +Matrix notation uses the first index to indicate which row is being selected and +the second index to indicate which column is selected. This is opposite the +geometrically oriented-convention for images where people generally think the +first index represents x position (i.e., column) and the second represents y +position (i.e., row). This alone is the source of much confusion; +matrix-oriented users and image-oriented users expect two different things with +regard to indexing. + +The second issue to understand is how indices correspond +to the order in which the array is stored in memory. In Fortran, the first index +is the most rapidly varying index when moving through the elements of a +two-dimensional array as it is stored in memory. If you adopt the matrix +convention for indexing, then this means the matrix is stored one column at a +time (since the first index moves to the next row as it changes). Thus Fortran +is considered a Column-major language. C has just the opposite convention. In +C, the last index changes most rapidly as one moves through the array as +stored in memory. Thus C is a Row-major language. The matrix is stored by +rows. Note that in both cases it presumes that the matrix convention for +indexing is being used, i.e., for both Fortran and C, the first index is the +row. Note this convention implies that the indexing convention is invariant +and that the data order changes to keep that so. + +But that's not the only way +to look at it. Suppose one has large two-dimensional arrays (images or +matrices) stored in data files. Suppose the data are stored by rows rather than +by columns. If we are to preserve our index convention (whether matrix or +image) that means that depending on the language we use, we may be forced to +reorder the data if it is read into memory to preserve our indexing +convention. For example, if we read row-ordered data into memory without +reordering, it will match the matrix indexing convention for C, but not for +Fortran. Conversely, it will match the image indexing convention for Fortran, +but not for C. For C, if one is using data stored in row order, and one wants +to preserve the image index convention, the data must be reordered when +reading into memory. + +In the end, what you do for Fortran or C depends on +which is more important, not reordering data or preserving the indexing +convention. For large images, reordering data is potentially expensive, and +often the indexing convention is inverted to avoid that. + +The situation with +NumPy makes this issue yet more complicated. The internal machinery of NumPy +arrays is flexible enough to accept any ordering of indices. One can simply +reorder indices by manipulating the internal :term:`stride` information for +arrays without reordering the data at all. NumPy will know how to map the new +index order to the data without moving the data. + +So if this is true, why not choose +the index order that matches what you most expect? In particular, why not define +row-ordered images to use the image convention? (This is sometimes referred +to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN' +order options for array ordering in NumPy.) The drawback of doing this is +potential performance penalties. It's common to access the data sequentially, +either implicitly in array operations or explicitly by looping over rows of an +image. When that is done, then the data will be accessed in non-optimal order. +As the first index is incremented, what is actually happening is that elements +spaced far apart in memory are being sequentially accessed, with usually poor +memory access speeds. For example, for a two-dimensional image ``im`` defined so +that ``im[0, 10]`` represents the value at ``x = 0``, ``y = 10``. To be +consistent with usual Python behavior then ``im[0]`` would represent a column +at ``x = 0``. Yet that data would be spread over the whole array since the data +are stored in row order. Despite the flexibility of NumPy's indexing, it can't +really paper over the fact basic operations are rendered inefficient because of +data order or that getting contiguous subarrays is still awkward (e.g., +``im[:, 0]`` for the first row, vs ``im[0]``). Thus one can't use an idiom such +as for row in ``im``; for col in ``im`` does work, but doesn't yield contiguous +column data. + +As it turns out, NumPy is +smart enough when dealing with :ref:`ufuncs <ufuncs-internals>` to determine +which index is the most rapidly varying one in memory and uses that for the +innermost loop. Thus for ufuncs, there is no large intrinsic advantage to +either approach in most cases. On the other hand, use of :attr:`ndarray.flat` +with a FORTRAN ordered array will lead to non-optimal memory access as adjacent +elements in the flattened array (iterator, actually) are not contiguous in +memory. + +Indeed, the fact is that Python +indexing on lists and other sequences naturally leads to an outside-to-inside +ordering (the first index gets the largest grouping, the next largest, +and the last gets the smallest element). Since image data are normally stored +in rows, this corresponds to the position within rows being the last item +indexed. + +If you do want to use Fortran ordering realize that +there are two approaches to consider: 1) accept that the first index is just not +the most rapidly changing in memory and have all your I/O routines reorder +your data when going from memory to disk or visa versa, or use NumPy's +mechanism for mapping the first index to the most rapidly varying data. We +recommend the former if possible. The disadvantage of the latter is that many +of NumPy's functions will yield arrays without Fortran ordering unless you are +careful to use the ``order`` keyword. Doing this would be highly inconvenient. + +Otherwise, we recommend simply learning to reverse the usual order of indices +when accessing elements of an array. Granted, it goes against the grain, but +it is more in line with Python semantics and the natural order of the data. + + diff --git a/doc/source/dev/underthehood.rst b/doc/source/dev/underthehood.rst index 4dae48689..c0f37fd5b 100644 --- a/doc/source/dev/underthehood.rst +++ b/doc/source/dev/underthehood.rst @@ -4,4 +4,12 @@ Under-the-hood Documentation for developers =========================================== -To be completed. +These documents are intended as a low-level look into NumPy; focused +towards developers. + +.. toctree:: + :maxdepth: 1 + + internals + internals.code-explanations + alignment diff --git a/doc/source/doxyfile b/doc/source/doxyfile new file mode 100644 index 000000000..ea45b9578 --- /dev/null +++ b/doc/source/doxyfile @@ -0,0 +1,340 @@ +# Doxyfile 1.8.18 +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- +DOXYFILE_ENCODING = UTF-8 +PROJECT_NAME = NumPy +PROJECT_NUMBER = +PROJECT_BRIEF = "NumPy is the fundamental package for scientific computing in Python" +PROJECT_LOGO = +OUTPUT_DIRECTORY = @ROOT_DIR/doc/build/doxygen +CREATE_SUBDIRS = NO +ALLOW_UNICODE_NAMES = NO +OUTPUT_LANGUAGE = English +OUTPUT_TEXT_DIRECTION = None +BRIEF_MEMBER_DESC = YES +REPEAT_BRIEF = YES +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the +ALWAYS_DETAILED_SEC = NO +INLINE_INHERITED_MEMB = NO +FULL_PATH_NAMES = YES +STRIP_FROM_PATH = @ROOT_DIR +STRIP_FROM_INC_PATH = +SHORT_NAMES = NO +JAVADOC_AUTOBRIEF = YES +JAVADOC_BANNER = NO +QT_AUTOBRIEF = NO +MULTILINE_CPP_IS_BRIEF = NO +INHERIT_DOCS = YES +SEPARATE_MEMBER_PAGES = NO +TAB_SIZE = 4 +ALIASES = +ALIASES += "rst=\verbatim embed:rst:leading-asterisk" +ALIASES += "endrst=\endverbatim" +OPTIMIZE_OUTPUT_FOR_C = NO +OPTIMIZE_OUTPUT_JAVA = NO +OPTIMIZE_FOR_FORTRAN = NO +OPTIMIZE_OUTPUT_VHDL = NO +OPTIMIZE_OUTPUT_SLICE = NO +EXTENSION_MAPPING = +MARKDOWN_SUPPORT = YES +TOC_INCLUDE_HEADINGS = 5 +AUTOLINK_SUPPORT = YES +BUILTIN_STL_SUPPORT = NO +CPP_CLI_SUPPORT = NO +SIP_SUPPORT = NO +IDL_PROPERTY_SUPPORT = YES +DISTRIBUTE_GROUP_DOC = NO +GROUP_NESTED_COMPOUNDS = NO +SUBGROUPING = YES +INLINE_GROUPED_CLASSES = NO +INLINE_SIMPLE_STRUCTS = NO +TYPEDEF_HIDES_STRUCT = NO +LOOKUP_CACHE_SIZE = 0 +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- +EXTRACT_ALL = NO +EXTRACT_PRIVATE = NO +EXTRACT_PRIV_VIRTUAL = NO +EXTRACT_PACKAGE = NO +EXTRACT_STATIC = NO +EXTRACT_LOCAL_CLASSES = YES +EXTRACT_LOCAL_METHODS = NO +EXTRACT_ANON_NSPACES = NO +HIDE_UNDOC_MEMBERS = NO +HIDE_UNDOC_CLASSES = NO +HIDE_FRIEND_COMPOUNDS = NO +HIDE_IN_BODY_DOCS = NO +INTERNAL_DOCS = NO +CASE_SENSE_NAMES = YES +HIDE_SCOPE_NAMES = NO +HIDE_COMPOUND_REFERENCE= NO +SHOW_INCLUDE_FILES = YES +SHOW_GROUPED_MEMB_INC = NO +FORCE_LOCAL_INCLUDES = NO +INLINE_INFO = YES +SORT_MEMBER_DOCS = YES +SORT_BRIEF_DOCS = NO +SORT_MEMBERS_CTORS_1ST = NO +SORT_GROUP_NAMES = NO +SORT_BY_SCOPE_NAME = NO +STRICT_PROTO_MATCHING = NO +GENERATE_TODOLIST = YES +GENERATE_TESTLIST = YES +GENERATE_BUGLIST = YES +GENERATE_DEPRECATEDLIST= YES +ENABLED_SECTIONS = +MAX_INITIALIZER_LINES = 30 +SHOW_USED_FILES = YES +SHOW_FILES = YES +SHOW_NAMESPACES = YES +FILE_VERSION_FILTER = +LAYOUT_FILE = +CITE_BIB_FILES = +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- +QUIET = no +WARNINGS = YES +WARN_IF_UNDOCUMENTED = YES +WARN_IF_DOC_ERROR = YES +WARN_NO_PARAMDOC = NO +WARN_AS_ERROR = NO +WARN_FORMAT = "$file:$line: $text" +WARN_LOGFILE = +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- +INPUT = +INPUT_ENCODING = UTF-8 +FILE_PATTERNS = *.h, *.hpp +RECURSIVE = YES +EXCLUDE = +EXCLUDE_SYMLINKS = NO +EXCLUDE_PATTERNS = +EXCLUDE_SYMBOLS = +EXAMPLE_PATH = +EXAMPLE_PATTERNS = +EXAMPLE_RECURSIVE = NO +IMAGE_PATH = +INPUT_FILTER = +FILTER_PATTERNS = +FILTER_SOURCE_FILES = NO +FILTER_SOURCE_PATTERNS = +USE_MDFILE_AS_MAINPAGE = +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- +SOURCE_BROWSER = NO +INLINE_SOURCES = NO +STRIP_CODE_COMMENTS = YES +REFERENCED_BY_RELATION = NO +REFERENCES_RELATION = NO +REFERENCES_LINK_SOURCE = YES +SOURCE_TOOLTIPS = YES +USE_HTAGS = NO +VERBATIM_HEADERS = YES +CLANG_ASSISTED_PARSING = NO +CLANG_OPTIONS = +CLANG_DATABASE_PATH = +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- +ALPHABETICAL_INDEX = YES +COLS_IN_ALPHA_INDEX = 5 +IGNORE_PREFIX = +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- +GENERATE_HTML = NO +HTML_OUTPUT = html +HTML_FILE_EXTENSION = .html +HTML_HEADER = +HTML_FOOTER = +HTML_STYLESHEET = +HTML_EXTRA_STYLESHEET = +HTML_EXTRA_FILES = +HTML_COLORSTYLE_HUE = 220 +HTML_COLORSTYLE_SAT = 100 +HTML_COLORSTYLE_GAMMA = 80 +HTML_TIMESTAMP = NO +HTML_DYNAMIC_MENUS = YES +HTML_DYNAMIC_SECTIONS = NO +HTML_INDEX_NUM_ENTRIES = 100 +GENERATE_DOCSET = NO +DOCSET_FEEDNAME = "Doxygen generated docs" +DOCSET_BUNDLE_ID = org.doxygen.Project +DOCSET_PUBLISHER_ID = org.doxygen.Publisher +DOCSET_PUBLISHER_NAME = Publisher +GENERATE_HTMLHELP = NO +CHM_FILE = +HHC_LOCATION = +GENERATE_CHI = NO +CHM_INDEX_ENCODING = +BINARY_TOC = NO +TOC_EXPAND = NO +GENERATE_QHP = NO +QCH_FILE = +QHP_NAMESPACE = org.doxygen.Project +QHP_VIRTUAL_FOLDER = doc +QHP_CUST_FILTER_NAME = +QHP_CUST_FILTER_ATTRS = +QHP_SECT_FILTER_ATTRS = +QHG_LOCATION = +GENERATE_ECLIPSEHELP = NO +ECLIPSE_DOC_ID = org.doxygen.Project +DISABLE_INDEX = NO +GENERATE_TREEVIEW = NO +ENUM_VALUES_PER_LINE = 4 +TREEVIEW_WIDTH = 250 +EXT_LINKS_IN_WINDOW = NO +HTML_FORMULA_FORMAT = png +FORMULA_FONTSIZE = 10 +FORMULA_TRANSPARENT = YES +FORMULA_MACROFILE = +USE_MATHJAX = NO +MATHJAX_FORMAT = HTML-CSS +MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@@2 +MATHJAX_EXTENSIONS = +MATHJAX_CODEFILE = +SEARCHENGINE = YES +SERVER_BASED_SEARCH = NO +EXTERNAL_SEARCH = NO +SEARCHENGINE_URL = +SEARCHDATA_FILE = searchdata.xml +EXTERNAL_SEARCH_ID = +EXTRA_SEARCH_MAPPINGS = +#--------------------------------------------------------------------------- +# Configuration options related to the LaTeX output +#--------------------------------------------------------------------------- +GENERATE_LATEX = NO +LATEX_OUTPUT = latex +LATEX_CMD_NAME = +MAKEINDEX_CMD_NAME = makeindex +LATEX_MAKEINDEX_CMD = makeindex +COMPACT_LATEX = NO +PAPER_TYPE = a4 +EXTRA_PACKAGES = +LATEX_HEADER = +LATEX_FOOTER = +LATEX_EXTRA_STYLESHEET = +LATEX_EXTRA_FILES = +PDF_HYPERLINKS = YES +USE_PDFLATEX = YES +LATEX_BATCHMODE = NO +LATEX_HIDE_INDICES = NO +LATEX_SOURCE_CODE = NO +LATEX_BIB_STYLE = plain +LATEX_TIMESTAMP = NO +LATEX_EMOJI_DIRECTORY = +#--------------------------------------------------------------------------- +# Configuration options related to the RTF output +#--------------------------------------------------------------------------- +GENERATE_RTF = NO +RTF_OUTPUT = rtf +COMPACT_RTF = NO +RTF_HYPERLINKS = NO +RTF_STYLESHEET_FILE = +RTF_EXTENSIONS_FILE = +RTF_SOURCE_CODE = NO +#--------------------------------------------------------------------------- +# Configuration options related to the man page output +#--------------------------------------------------------------------------- +GENERATE_MAN = NO +MAN_OUTPUT = man +MAN_EXTENSION = .3 +MAN_SUBDIR = +MAN_LINKS = NO +#--------------------------------------------------------------------------- +# Configuration options related to the XML output +#--------------------------------------------------------------------------- +GENERATE_XML = YES +XML_OUTPUT = xml +XML_PROGRAMLISTING = YES +XML_NS_MEMB_FILE_SCOPE = NO +#--------------------------------------------------------------------------- +# Configuration options related to the DOCBOOK output +#--------------------------------------------------------------------------- +GENERATE_DOCBOOK = NO +DOCBOOK_OUTPUT = docbook +DOCBOOK_PROGRAMLISTING = NO +#--------------------------------------------------------------------------- +# Configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- +GENERATE_AUTOGEN_DEF = NO +#--------------------------------------------------------------------------- +# Configuration options related to the Perl module output +#--------------------------------------------------------------------------- +GENERATE_PERLMOD = NO +PERLMOD_LATEX = NO +PERLMOD_PRETTY = YES +PERLMOD_MAKEVAR_PREFIX = +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- +ENABLE_PREPROCESSING = YES +MACRO_EXPANSION = YES +EXPAND_ONLY_PREDEF = NO +SEARCH_INCLUDES = YES +INCLUDE_PATH = +INCLUDE_FILE_PATTERNS = +PREDEFINED = +EXPAND_AS_DEFINED = +SKIP_FUNCTION_MACROS = YES +#--------------------------------------------------------------------------- +# Configuration options related to external references +#--------------------------------------------------------------------------- +TAGFILES = +GENERATE_TAGFILE = +ALLEXTERNALS = NO +EXTERNAL_GROUPS = YES +EXTERNAL_PAGES = YES +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- +CLASS_DIAGRAMS = YES +DIA_PATH = +HIDE_UNDOC_RELATIONS = YES +HAVE_DOT = NO +DOT_NUM_THREADS = 0 +DOT_FONTNAME = Helvetica +DOT_FONTSIZE = 10 +DOT_FONTPATH = +CLASS_GRAPH = YES +COLLABORATION_GRAPH = YES +GROUP_GRAPHS = YES +UML_LOOK = NO +UML_LIMIT_NUM_FIELDS = 10 +TEMPLATE_RELATIONS = NO +INCLUDE_GRAPH = YES +INCLUDED_BY_GRAPH = YES +CALL_GRAPH = NO +CALLER_GRAPH = NO +GRAPHICAL_HIERARCHY = YES +DIRECTORY_GRAPH = YES +DOT_IMAGE_FORMAT = png +INTERACTIVE_SVG = NO +DOT_PATH = +DOTFILE_DIRS = +MSCFILE_DIRS = +DIAFILE_DIRS = +PLANTUML_JAR_PATH = +PLANTUML_CFG_FILE = +PLANTUML_INCLUDE_PATH = +DOT_GRAPH_MAX_NODES = 50 +MAX_DOT_GRAPH_DEPTH = 0 +DOT_TRANSPARENT = NO +DOT_MULTI_TARGETS = NO +GENERATE_LEGEND = YES +DOT_CLEANUP = YES diff --git a/doc/source/f2py/advanced.rst b/doc/source/f2py/advanced.rst index 1b4625dde..c8efbaadb 100644 --- a/doc/source/f2py/advanced.rst +++ b/doc/source/f2py/advanced.rst @@ -1,48 +1,46 @@ -====================== -Advanced F2PY usages -====================== +======================== +Advanced F2PY use cases +======================== -Adding self-written functions to F2PY generated modules -======================================================= +Adding user-defined functions to F2PY generated modules +========================================================= -Self-written Python C/API functions can be defined inside +User-defined Python C/API functions can be defined inside signature files using ``usercode`` and ``pymethoddef`` statements (they must be used inside the ``python module`` block). For example, the following signature file ``spam.pyf`` -.. include:: spam.pyf +.. include:: ./code/spam.pyf :literal: wraps the C library function ``system()``:: f2py -c spam.pyf -In Python: +In Python this can then be used as: -.. include:: spam_session.dat - :literal: - -Modifying the dictionary of a F2PY generated module -=================================================== +.. literalinclude:: ./code/results/spam_session.dat + :language: python -The following example illustrates how to add user-defined -variables to a F2PY generated extension module. Given the following -signature file +Adding user-defined variables +============================== -.. include:: var.pyf - :literal: +The following example illustrates how to add user-defined variables to a F2PY +generated extension module by modifying the dictionary of a F2PY generated +module. Consider the following signature file (compiled with ``f2py -c var.pyf``): -compile it as ``f2py -c var.pyf``. +.. literalinclude:: ./code/var.pyf + :language: fortran Notice that the second ``usercode`` statement must be defined inside -an ``interface`` block and where the module dictionary is available through -the variable ``d`` (see ``f2py var.pyf``-generated ``varmodule.c`` for +an ``interface`` block and the module dictionary is available through +the variable ``d`` (see ``varmodule.c`` generated by ``f2py var.pyf`` for additional details). -In Python: +Usage in Python: -.. include:: var_session.dat - :literal: +.. literalinclude:: ./code/results/var_session.dat + :language: python Dealing with KIND specifiers @@ -70,7 +68,7 @@ Use the ``--f2cmap`` command-line option to pass the file name to F2PY. By default, F2PY assumes file name is ``.f2py_f2cmap`` in the current working directory. -Or more generally, the f2cmap file must contain a dictionary +More generally, the f2cmap file must contain a dictionary with items:: <Fortran typespec> : {<selector_expr>:<C type>} @@ -79,7 +77,7 @@ that defines mapping between Fortran type:: <Fortran typespec>([kind=]<selector_expr>) -and the corresponding <C type>. <C type> can be one of the following:: +and the corresponding <C type>. The <C type> can be one of the following:: char signed_char @@ -94,4 +92,4 @@ and the corresponding <C type>. <C type> can be one of the following:: complex_long_double string -For more information, see F2Py source code ``numpy/f2py/capi_maps.py``. +For more information, see the F2Py source code ``numpy/f2py/capi_maps.py``. diff --git a/doc/source/f2py/allocarr.f90 b/doc/source/f2py/code/allocarr.f90 index e0d6c2ec8..e0d6c2ec8 100644 --- a/doc/source/f2py/allocarr.f90 +++ b/doc/source/f2py/code/allocarr.f90 diff --git a/doc/source/f2py/array.f b/doc/source/f2py/code/array.f index ef20c9c20..ef20c9c20 100644 --- a/doc/source/f2py/array.f +++ b/doc/source/f2py/code/array.f diff --git a/doc/source/f2py/calculate.f b/doc/source/f2py/code/calculate.f index 4ff570d28..4ff570d28 100644 --- a/doc/source/f2py/calculate.f +++ b/doc/source/f2py/code/calculate.f diff --git a/doc/source/f2py/callback.f b/doc/source/f2py/code/callback.f index d5cfc7574..d5cfc7574 100644 --- a/doc/source/f2py/callback.f +++ b/doc/source/f2py/code/callback.f diff --git a/doc/source/f2py/callback2.pyf b/doc/source/f2py/code/callback2.pyf index 3d77eed24..3d77eed24 100644 --- a/doc/source/f2py/callback2.pyf +++ b/doc/source/f2py/code/callback2.pyf diff --git a/doc/source/f2py/common.f b/doc/source/f2py/code/common.f index b098ab20c..b098ab20c 100644 --- a/doc/source/f2py/common.f +++ b/doc/source/f2py/code/common.f diff --git a/doc/source/f2py/extcallback.f b/doc/source/f2py/code/extcallback.f index 9a800628e..9a800628e 100644 --- a/doc/source/f2py/extcallback.f +++ b/doc/source/f2py/code/extcallback.f diff --git a/doc/source/f2py/fib1.f b/doc/source/f2py/code/fib1.f index cfbb1eea0..cfbb1eea0 100644 --- a/doc/source/f2py/fib1.f +++ b/doc/source/f2py/code/fib1.f diff --git a/doc/source/f2py/fib1.pyf b/doc/source/f2py/code/fib1.pyf index 3d6cc0a54..3d6cc0a54 100644 --- a/doc/source/f2py/fib1.pyf +++ b/doc/source/f2py/code/fib1.pyf diff --git a/doc/source/f2py/fib2.pyf b/doc/source/f2py/code/fib2.pyf index 4a5ae29f1..4a5ae29f1 100644 --- a/doc/source/f2py/fib2.pyf +++ b/doc/source/f2py/code/fib2.pyf diff --git a/doc/source/f2py/fib3.f b/doc/source/f2py/code/fib3.f index 08b050cd2..08b050cd2 100644 --- a/doc/source/f2py/fib3.f +++ b/doc/source/f2py/code/fib3.f diff --git a/doc/source/f2py/ftype.f b/doc/source/f2py/code/ftype.f index cabbb9e2d..cabbb9e2d 100644 --- a/doc/source/f2py/ftype.f +++ b/doc/source/f2py/code/ftype.f diff --git a/doc/source/f2py/moddata.f90 b/doc/source/f2py/code/moddata.f90 index 0e98f0467..0e98f0467 100644 --- a/doc/source/f2py/moddata.f90 +++ b/doc/source/f2py/code/moddata.f90 diff --git a/doc/source/f2py/allocarr_session.dat b/doc/source/f2py/code/results/allocarr_session.dat index ba168c22a..ba168c22a 100644 --- a/doc/source/f2py/allocarr_session.dat +++ b/doc/source/f2py/code/results/allocarr_session.dat diff --git a/doc/source/f2py/array_session.dat b/doc/source/f2py/code/results/array_session.dat index 714c03651..714c03651 100644 --- a/doc/source/f2py/array_session.dat +++ b/doc/source/f2py/code/results/array_session.dat diff --git a/doc/source/f2py/calculate_session.dat b/doc/source/f2py/code/results/calculate_session.dat index c4c380700..c4c380700 100644 --- a/doc/source/f2py/calculate_session.dat +++ b/doc/source/f2py/code/results/calculate_session.dat diff --git a/doc/source/f2py/callback_session.dat b/doc/source/f2py/code/results/callback_session.dat index 460c9ce28..460c9ce28 100644 --- a/doc/source/f2py/callback_session.dat +++ b/doc/source/f2py/code/results/callback_session.dat diff --git a/doc/source/f2py/common_session.dat b/doc/source/f2py/code/results/common_session.dat index 2595bfbd5..2595bfbd5 100644 --- a/doc/source/f2py/common_session.dat +++ b/doc/source/f2py/code/results/common_session.dat diff --git a/doc/source/f2py/compile_session.dat b/doc/source/f2py/code/results/compile_session.dat index 5c42742be..5c42742be 100644 --- a/doc/source/f2py/compile_session.dat +++ b/doc/source/f2py/code/results/compile_session.dat diff --git a/doc/source/f2py/extcallback_session.dat b/doc/source/f2py/code/results/extcallback_session.dat index 5b97ab7cf..5b97ab7cf 100644 --- a/doc/source/f2py/extcallback_session.dat +++ b/doc/source/f2py/code/results/extcallback_session.dat diff --git a/doc/source/f2py/ftype_session.dat b/doc/source/f2py/code/results/ftype_session.dat index e39cc128d..e39cc128d 100644 --- a/doc/source/f2py/ftype_session.dat +++ b/doc/source/f2py/code/results/ftype_session.dat diff --git a/doc/source/f2py/moddata_session.dat b/doc/source/f2py/code/results/moddata_session.dat index 824bd86fc..824bd86fc 100644 --- a/doc/source/f2py/moddata_session.dat +++ b/doc/source/f2py/code/results/moddata_session.dat diff --git a/doc/source/f2py/run_main_session.dat b/doc/source/f2py/code/results/run_main_session.dat index be6cacd22..be6cacd22 100644 --- a/doc/source/f2py/run_main_session.dat +++ b/doc/source/f2py/code/results/run_main_session.dat diff --git a/doc/source/f2py/scalar_session.dat b/doc/source/f2py/code/results/scalar_session.dat index 3bb45ed68..3bb45ed68 100644 --- a/doc/source/f2py/scalar_session.dat +++ b/doc/source/f2py/code/results/scalar_session.dat diff --git a/doc/source/f2py/spam_session.dat b/doc/source/f2py/code/results/spam_session.dat index bd5832d88..bd5832d88 100644 --- a/doc/source/f2py/spam_session.dat +++ b/doc/source/f2py/code/results/spam_session.dat diff --git a/doc/source/f2py/string_session.dat b/doc/source/f2py/code/results/string_session.dat index e8f7854d9..e8f7854d9 100644 --- a/doc/source/f2py/string_session.dat +++ b/doc/source/f2py/code/results/string_session.dat diff --git a/doc/source/f2py/var_session.dat b/doc/source/f2py/code/results/var_session.dat index fb0f798bf..fb0f798bf 100644 --- a/doc/source/f2py/var_session.dat +++ b/doc/source/f2py/code/results/var_session.dat diff --git a/doc/source/f2py/scalar.f b/doc/source/f2py/code/scalar.f index c22f639ed..c22f639ed 100644 --- a/doc/source/f2py/scalar.f +++ b/doc/source/f2py/code/scalar.f diff --git a/doc/source/f2py/setup_example.py b/doc/source/f2py/code/setup_example.py index 479acc004..479acc004 100644 --- a/doc/source/f2py/setup_example.py +++ b/doc/source/f2py/code/setup_example.py diff --git a/doc/source/f2py/spam.pyf b/doc/source/f2py/code/spam.pyf index 21ea18b77..21ea18b77 100644 --- a/doc/source/f2py/spam.pyf +++ b/doc/source/f2py/code/spam.pyf diff --git a/doc/source/f2py/string.f b/doc/source/f2py/code/string.f index 9246f02e7..9246f02e7 100644 --- a/doc/source/f2py/string.f +++ b/doc/source/f2py/code/string.f diff --git a/doc/source/f2py/var.pyf b/doc/source/f2py/code/var.pyf index 8275ff3af..8275ff3af 100644 --- a/doc/source/f2py/var.pyf +++ b/doc/source/f2py/code/var.pyf diff --git a/doc/source/f2py/distutils.rst b/doc/source/f2py/distutils.rst index 4cf30045e..575dacdff 100644 --- a/doc/source/f2py/distutils.rst +++ b/doc/source/f2py/distutils.rst @@ -4,16 +4,17 @@ Using via `numpy.distutils` .. currentmodule:: numpy.distutils.core -:mod:`numpy.distutils` is part of NumPy extending standard Python ``distutils`` -to deal with Fortran sources and F2PY signature files, e.g. compile Fortran -sources, call F2PY to construct extension modules, etc. +:mod:`numpy.distutils` is part of NumPy, and extends the standard Python +``distutils`` module to deal with Fortran sources and F2PY signature files, e.g. +compile Fortran sources, call F2PY to construct extension modules, etc. .. topic:: Example - Consider the following `setup file`__: + Consider the following `setup file`__ for the ``fib`` examples in the previous + section: - .. include:: setup_example.py - :literal: + .. literalinclude:: ./code/setup_example.py + :language: python Running @@ -26,30 +27,32 @@ sources, call F2PY to construct extension modules, etc. __ setup_example.py +Extensions to ``distutils`` +=========================== + :mod:`numpy.distutils` extends ``distutils`` with the following features: * :class:`Extension` class argument ``sources`` may contain Fortran source files. In addition, the list ``sources`` may contain at most one - F2PY signature file, and then the name of an Extension module must - match with the ``<modulename>`` used in signature file. It is + F2PY signature file, and in this case, the name of an Extension module must + match with the ``<modulename>`` used in signature file. It is assumed that an F2PY signature file contains exactly one ``python module`` block. - If ``sources`` does not contain a signature files, then F2PY is used - to scan Fortran source files for routine signatures to construct the - wrappers to Fortran codes. + If ``sources`` do not contain a signature file, then F2PY is used to scan + Fortran source files to construct wrappers to the Fortran codes. - Additional options to F2PY process can be given using :class:`Extension` - class argument ``f2py_options``. + Additional options to the F2PY executable can be given using the + :class:`Extension` class argument ``f2py_options``. * The following new ``distutils`` commands are defined: ``build_src`` to construct Fortran wrapper extension modules, among many other things. ``config_fc`` - to change Fortran compiler options + to change Fortran compiler options. - as well as ``build_ext`` and ``build_clib`` commands are enhanced + Additionally, the ``build_ext`` and ``build_clib`` commands are also enhanced to support Fortran sources. Run @@ -60,15 +63,15 @@ sources, call F2PY to construct extension modules, etc. to see available options for these commands. -* When building Python packages containing Fortran sources, then one - can choose different Fortran compilers by using ``build_ext`` +* When building Python packages containing Fortran sources, one + can choose different Fortran compilers by using the ``build_ext`` command option ``--fcompiler=<Vendor>``. Here ``<Vendor>`` can be one of the - following names:: + following names (on ``linux`` systems):: - absoft sun mips intel intelv intele intelev nag compaq compaqv gnu vast pg hpux + absoft compaq fujitsu g95 gnu gnu95 intel intele intelem lahey nag nagfor nv pathf95 pg vast - See ``numpy_distutils/fcompiler.py`` for up-to-date list of - supported compilers or run + See ``numpy_distutils/fcompiler.py`` for an up-to-date list of + supported compilers for different platforms, or run :: diff --git a/doc/source/f2py/f2py.getting-started.rst b/doc/source/f2py/f2py.getting-started.rst index 27ddbb005..1709aad61 100644 --- a/doc/source/f2py/f2py.getting-started.rst +++ b/doc/source/f2py/f2py.getting-started.rst @@ -6,52 +6,55 @@ Wrapping Fortran or C functions to Python using F2PY consists of the following steps: * Creating the so-called signature file that contains descriptions of - wrappers to Fortran or C functions, also called as signatures of the - functions. In the case of Fortran routines, F2PY can create initial + wrappers to Fortran or C functions, also called the signatures of the + functions. For Fortran routines, F2PY can create an initial signature file by scanning Fortran source codes and - catching all relevant information needed to create wrapper + tracking all relevant information needed to create wrapper functions. -* Optionally, F2PY created signature files can be edited to optimize - wrappers functions, make them "smarter" and more "Pythonic". + * Optionally, F2PY created signature files can be edited to optimize + wrapper functions, to make them "smarter" and more "Pythonic". * F2PY reads a signature file and writes a Python C/API module containing Fortran/C/Python bindings. - * F2PY compiles all sources and builds an extension module containing - the wrappers. In building extension modules, F2PY uses - ``numpy_distutils`` that supports a number of Fortran 77/90/95 - compilers, including Gnu, Intel, - Sun Fortre, SGI MIPSpro, Absoft, NAG, Compaq etc. compilers. + the wrappers. + + * In building the extension modules, F2PY uses ``numpy_distutils`` which + supports a number of Fortran 77/90/95 compilers, including Gnu, Intel, Sun + Fortran, SGI MIPSpro, Absoft, NAG, Compaq etc. + +Depending on the situation, these steps can be carried out in a single composite +command or step-by-step; in which case some steps can be omitted or combined +with others. -Depending on a particular situation, these steps can be carried out -either by just in one command or step-by-step, some steps can be -omitted or combined with others. +Below, we describe three typical approaches of using F2PY. These can be read in +order of increasing effort, but also cater to different access levels depending +on whether the Fortran code can be freely modified. -Below I'll describe three typical approaches of using F2PY. The following example Fortran 77 code will be used for -illustration, save it as fib1.f: +illustration, save it as ``fib1.f``: -.. include:: fib1.f - :literal: +.. literalinclude:: ./code/fib1.f + :language: fortran The quick way ============== -The quickest way to wrap the Fortran subroutine ``FIB`` to Python is -to run +The quickest way to wrap the Fortran subroutine ``FIB`` for use in Python is to +run :: python -m numpy.f2py -c fib1.f -m fib1 -This command builds (see ``-c`` flag, execute ``python -m numpy.f2py`` without -arguments to see the explanation of command line options) an extension -module ``fib1.so`` (see ``-m`` flag) to the current directory. Now, in -Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: +This command compiles and wraps ``fib1.f`` (``-c``) to create the extension +module ``fib1.so`` (``-m``) in the current directory. A list of command line +options can be seen by executing ``python -m numpy.f2py``. Now, in Python the +Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: - >>> import numpy + >>> import numpy as np >>> import fib1 >>> print(fib1.fib.__doc__) fib(a,[n]) @@ -67,21 +70,21 @@ Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: n : input int, optional Default: len(a) - >>> a = numpy.zeros(8, 'd') + >>> a = np.zeros(8, 'd') >>> fib1.fib(a) >>> print(a) [ 0. 1. 1. 2. 3. 5. 8. 13.] .. note:: - * Note that F2PY found that the second argument ``n`` is the + * Note that F2PY recognized that the second argument ``n`` is the dimension of the first array argument ``a``. Since by default all arguments are input-only arguments, F2PY concludes that ``n`` can be optional with the default value ``len(a)``. * One can use different values for optional ``n``:: - >>> a1 = numpy.zeros(8, 'd') + >>> a1 = np.zeros(8, 'd') >>> fib1.fib(a1, 6) >>> print(a1) [ 0. 1. 1. 2. 3. 5. 0. 0.] @@ -96,98 +99,94 @@ Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: >>> F2PY implements basic compatibility checks between related - arguments in order to avoid any unexpected crashes. + arguments in order to avoid unexpected crashes. - * When a NumPy array, that is Fortran contiguous and has a dtype - corresponding to presumed Fortran type, is used as an input array + * When a NumPy array, that is Fortran contiguous and has a ``dtype`` + corresponding to a presumed Fortran type, is used as an input array argument, then its C pointer is directly passed to Fortran. - Otherwise F2PY makes a contiguous copy (with a proper dtype) of - the input array and passes C pointer of the copy to Fortran + Otherwise F2PY makes a contiguous copy (with the proper ``dtype``) of + the input array and passes a C pointer of the copy to the Fortran subroutine. As a result, any possible changes to the (copy of) input array have no effect to the original argument, as demonstrated below:: - >>> a = numpy.ones(8, 'i') + >>> a = np.ones(8, 'i') >>> fib1.fib(a) >>> print(a) [1 1 1 1 1 1 1 1] - Clearly, this is not an expected behaviour. The fact that the - above example worked with ``dtype=float`` is considered - accidental. + Clearly, this is unexpected, as Fortran typically passes by reference. That + the above example worked with ``dtype=float`` is considered accidental. - F2PY provides ``intent(inplace)`` attribute that would modify + F2PY provides an ``intent(inplace)`` attribute that modifies the attributes of an input array so that any changes made by - Fortran routine will be effective also in input argument. For example, - if one specifies ``intent(inplace) a`` (see below, how), then - the example above would read:: + Fortran routine will be reflected in the input argument. For example, + if one specifies the ``intent(inplace) a`` directive (see subsequent + sections on how), then the example above would read:: - >>> a = numpy.ones(8, 'i') + >>> a = np.ones(8, 'i') >>> fib1.fib(a) >>> print(a) [ 0. 1. 1. 2. 3. 5. 8. 13.] - However, the recommended way to get changes made by Fortran - subroutine back to Python is to use ``intent(out)`` attribute. It - is more efficient and a cleaner solution. - - * The usage of ``fib1.fib`` in Python is very similar to using - ``FIB`` in Fortran. However, using *in situ* output arguments in - Python indicates a poor style as there is no safety mechanism - in Python with respect to wrong argument types. When using Fortran - or C, compilers naturally discover any type mismatches during - compile time but in Python the types must be checked in - runtime. So, using *in situ* output arguments in Python may cause - difficult to find bugs, not to mention that the codes will be less - readable when all required type checks are implemented. - - Though the demonstrated way of wrapping Fortran routines to Python - is very straightforward, it has several drawbacks (see the comments - above). These drawbacks are due to the fact that there is no way - that F2PY can determine what is the actual intention of one or the - other argument, is it input or output argument, or both, or - something else. So, F2PY conservatively assumes that all arguments - are input arguments by default. - - However, there are ways (see below) how to "teach" F2PY about the - true intentions (among other things) of function arguments; and then - F2PY is able to generate more Pythonic (more explicit, easier to - use, and less error prone) wrappers to Fortran functions. + However, the recommended way to have changes made by Fortran subroutine + propagate to Python is to use the ``intent(out)`` attribute. That approach is + more efficient and also cleaner. + + * The usage of ``fib1.fib`` in Python is very similar to using ``FIB`` in + Fortran. However, using *in situ* output arguments in Python is poor style, + as there are no safety mechanisms in Python to protect against wrong + argument types. When using Fortran or C, compilers discover any type + mismatches during the compilation process, but in Python the types must be + checked at runtime. Consequently, using *in situ* output arguments in Python + may lead to difficult to find bugs, not to mention the fact that the + codes will be less readable when all required type checks are implemented. + + Though the approach to wrapping Fortran routines for Python discussed so far is + very straightforward, it has several drawbacks (see the comments above). + The drawbacks are due to the fact that there is no way for F2PY to determine + the actual intention of the arguments; that is there is ambiguity in + distinguishing between input and output arguments. Consequently, F2PY assumes + that all arguments are input arguments by default. + + However, there are ways (see below) to remove this ambiguity by "teaching" + F2PY about the true intentions of function arguments, and F2PY is then able to + generate more explicit, easier to use, and less error prone wrappers for + Fortran functions. The smart way ============== -Let's apply the steps of wrapping Fortran functions to Python one by +Let us apply the steps for wrapping Fortran functions to Python one by one. -* First, we create a signature file from ``fib1.f`` by running +* First, we create a signature file from ``fib1.f`` by running: :: python -m numpy.f2py fib1.f -m fib2 -h fib1.pyf - The signature file is saved to ``fib1.pyf`` (see ``-h`` flag) and - its contents is shown below. + The signature file is saved to ``fib1.pyf`` (see the ``-h`` flag) and + its contents are shown below. - .. include:: fib1.pyf - :literal: + .. literalinclude:: ./code/fib1.pyf + :language: fortran -* Next, we'll teach F2PY that the argument ``n`` is an input argument - (use ``intent(in)`` attribute) and that the result, i.e. the - contents of ``a`` after calling Fortran function ``FIB``, should be - returned to Python (use ``intent(out)`` attribute). In addition, an - array ``a`` should be created dynamically using the size given by - the input argument ``n`` (use ``depend(n)`` attribute to indicate - dependence relation). +* Next, we'll teach F2PY that the argument ``n`` is an input argument (using the + ``intent(in)`` attribute) and that the result, i.e., the contents of ``a`` + after calling the Fortran function ``FIB``, should be returned to Python (using + the ``intent(out)`` attribute). In addition, an array ``a`` should be created + dynamically using the size determined by the input argument ``n`` (using the + ``depend(n)`` attribute to indicate this dependence relation). - The content of a modified version of ``fib1.pyf`` (saved as + The contents of a suitably modified version of ``fib1.pyf`` (saved as ``fib2.pyf``) is as follows: - .. include:: fib2.pyf - :literal: + .. literalinclude:: ./code/fib2.pyf + :language: fortran -* And finally, we build the extension module by running +* Finally, we build the extension module with ``numpy.distutils`` by running: :: @@ -214,16 +213,14 @@ In Python:: .. note:: - * Clearly, the signature of ``fib2.fib`` now corresponds to the - intention of Fortran subroutine ``FIB`` more closely: given the - number ``n``, ``fib2.fib`` returns the first ``n`` Fibonacci numbers - as a NumPy array. Also, the new Python signature ``fib2.fib`` - rules out any surprises that we experienced with ``fib1.fib``. + * The signature of ``fib2.fib`` now more closely corresponds to the + intention of Fortran subroutine ``FIB``: given the number ``n``, + ``fib2.fib`` returns the first ``n`` Fibonacci numbers as a NumPy array. + The new Python signature ``fib2.fib`` also rules out the unexpected behaviour in ``fib1.fib``. - * Note that by default using single ``intent(out)`` also implies + * Note that by default, using a single ``intent(out)`` also implies ``intent(hide)``. Arguments that have the ``intent(hide)`` attribute - specified will not be listed in the argument list of a wrapper - function. + specified will not be listed in the argument list of a wrapper function. The quick and smart way ======================== @@ -233,26 +230,25 @@ suitable for wrapping (e.g. third party) Fortran codes for which modifications to their source codes are not desirable nor even possible. -However, if editing Fortran codes is acceptable, then the generation -of an intermediate signature file can be skipped in most -cases. Namely, F2PY specific attributes can be inserted directly to -Fortran source codes using the so-called F2PY directive. A F2PY -directive defines special comment lines (starting with ``Cf2py``, for -example) which are ignored by Fortran compilers but F2PY interprets -them as normal lines. +However, if editing Fortran codes is acceptable, then the generation of an +intermediate signature file can be skipped in most cases. F2PY specific +attributes can be inserted directly into Fortran source codes using F2PY +directives. A F2PY directive consists of special comment lines (starting with +``Cf2py`` or ``!f2py``, for example) which are ignored by Fortran compilers but +interpreted by F2PY as normal lines. -Here is shown a modified version of the previous Fortran code, save it -as ``fib3.f``: +Consider a modified version of the previous Fortran code with F2PY directives, +saved as ``fib3.f``: -.. include:: fib3.f - :literal: +.. literalinclude:: ./code/fib3.f + :language: fortran Building the extension module can be now carried out in one command:: python -m numpy.f2py -c -m fib3 fib3.f -Notice that the resulting wrapper to ``FIB`` is as "smart" as in -previous case:: +Notice that the resulting wrapper to ``FIB`` is as "smart" (unambiguous) as in +the previous case:: >>> import fib3 >>> print(fib3.fib.__doc__) diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst index 492139651..c774a0df6 100644 --- a/doc/source/f2py/index.rst +++ b/doc/source/f2py/index.rst @@ -1,8 +1,10 @@ -##################################### -F2PY Users Guide and Reference Manual -##################################### +.. _f2py: -The purpose of the ``F2PY`` --*Fortran to Python interface generator*-- +===================================== +F2PY user guide and reference manual +===================================== + +The purpose of the ``F2PY`` --*Fortran to Python interface generator*-- utility is to provide a connection between Python and Fortran languages. F2PY is a part of NumPy_ (``numpy.f2py``) and also available as a standalone command line tool ``f2py`` when ``numpy`` is installed that @@ -19,11 +21,11 @@ from Python. .. toctree:: :maxdepth: 2 - f2py.getting-started - signature-file - python-usage usage + f2py.getting-started distutils + python-usage + signature-file advanced .. _Python: https://www.python.org/ diff --git a/doc/source/f2py/python-usage.rst b/doc/source/f2py/python-usage.rst index 65c0cec64..ef8ccd7dd 100644 --- a/doc/source/f2py/python-usage.rst +++ b/doc/source/f2py/python-usage.rst @@ -4,74 +4,76 @@ Using F2PY bindings in Python All wrappers for Fortran/C routines, common blocks, or for Fortran 90 module data generated by F2PY are exposed to Python as ``fortran`` -type objects. Routine wrappers are callable ``fortran`` type objects +type objects. Routine wrappers are callable ``fortran`` type objects while wrappers to Fortran data have attributes referring to data objects. -All ``fortran`` type objects have attribute ``_cpointer`` that contains -CObject referring to the C pointer of the corresponding Fortran/C -function or variable in C level. Such CObjects can be used as a -callback argument of F2PY generated functions to bypass Python C/API -layer of calling Python functions from Fortran or C when the -computational part of such functions is implemented in C or Fortran -and wrapped with F2PY (or any other tool capable of providing CObject -of a function). +All ``fortran`` type objects have an attribute ``_cpointer`` that contains a +``CObject`` referring to the C pointer of the corresponding Fortran/C function +or variable at the C level. Such ``CObjects`` can be used as a callback argument +for F2PY generated functions to bypass the Python C/API layer for calling Python +functions from Fortran or C when the computational aspects of such functions are +implemented in C or Fortran and wrapped with F2PY (or any other tool capable of +providing the ``CObject`` of a function). -Consider a Fortran 77 file ``ftype.f``: +Consider a Fortran 77 file ```ftype.f``: - .. include:: ftype.f - :literal: + .. literalinclude:: ./code/ftype.f + :language: fortran -and build a wrapper using ``f2py -c ftype.f -m ftype``. +and a wrapper built using ``f2py -c ftype.f -m ftype``. In Python: - .. include:: ftype_session.dat - :literal: + .. literalinclude:: ./code/results/ftype_session.dat + :language: python Scalar arguments ================= -In general, a scalar argument of a F2PY generated wrapper function can +In general, a scalar argument for a F2PY generated wrapper function can be an ordinary Python scalar (integer, float, complex number) as well as an arbitrary sequence object (list, tuple, array, string) of scalars. In the latter case, the first element of the sequence object is passed to Fortran routine as a scalar argument. -Note that when type-casting is required and there is possible loss of -information (e.g. when type-casting float to integer or complex to -float), F2PY does not raise any exception. In complex to real -type-casting only the real part of a complex number is used. +.. note:: + + * When type-casting is required and there is possible loss of information via + narrowing e.g. when type-casting float to integer or complex to float, F2PY + *does not* raise an exception. -``intent(inout)`` scalar arguments are assumed to be array objects in -order to have *in situ* changes be effective. It is recommended to use -arrays with proper type but also other types work. + * For complex to real type-casting only the real part of a complex number is used. + + * ``intent(inout)`` scalar arguments are assumed to be array objects in + order to have *in situ* changes be effective. It is recommended to use + arrays with proper type but also other types work. Consider the following Fortran 77 code: - .. include:: scalar.f - :literal: + .. literalinclude:: ./code/scalar.f + :language: fortran and wrap it using ``f2py -c -m scalar scalar.f``. In Python: - .. include:: scalar_session.dat - :literal: + .. literalinclude:: ./code/results/scalar_session.dat + :language: python String arguments ================= -F2PY generated wrapper functions accept (almost) any Python object as -a string argument, ``str`` is applied for non-string objects. +F2PY generated wrapper functions accept almost any Python object as +a string argument, since ``str`` is applied for non-string objects. Exceptions are NumPy arrays that must have type code ``'c'`` or ``'1'`` when used as string arguments. -A string can have arbitrary length when using it as a string argument -to F2PY generated wrapper function. If the length is greater than -expected, the string is truncated. If the length is smaller than +A string can have an arbitrary length when used as a string argument +for an F2PY generated wrapper function. If the length is greater than +expected, the string is truncated silently. If the length is smaller than expected, additional memory is allocated and filled with ``\0``. Because Python strings are immutable, an ``intent(inout)`` argument @@ -79,43 +81,43 @@ expects an array version of a string in order to have *in situ* changes be effec Consider the following Fortran 77 code: - .. include:: string.f - :literal: + .. literalinclude:: ./code/string.f + :language: fortran and wrap it using ``f2py -c -m mystring string.f``. Python session: - .. include:: string_session.dat - :literal: + .. literalinclude:: ./code/results/string_session.dat + :language: python Array arguments ================ -In general, array arguments of F2PY generated wrapper functions accept -arbitrary sequences that can be transformed to NumPy array objects. -An exception is ``intent(inout)`` array arguments that always must be -proper-contiguous and have proper type, otherwise an exception is -raised. Another exception is ``intent(inplace)`` array arguments that -attributes will be changed *in situ* if the argument has different type -than expected (see ``intent(inplace)`` attribute for more -information). - -In general, if a NumPy array is proper-contiguous and has a proper -type then it is directly passed to wrapped Fortran/C function. -Otherwise, an element-wise copy of an input array is made and the -copy, being proper-contiguous and with proper type, is used as an -array argument. +In general, array arguments for F2PY generated wrapper functions accept +arbitrary sequences that can be transformed to NumPy array objects. There are +two notable exceptions: + +* ``intent(inout)`` array arguments must always be proper-contiguous (defined below) and have a + compatible ``dtype``, otherwise an exception is raised. +* ``intent(inplace)`` array arguments will be changed *in situ* if the argument + has a different type than expected (see the ``intent(inplace)`` attribute for + more information). + +In general, if a NumPy array is proper-contiguous and has a proper type then it +is directly passed to the wrapped Fortran/C function. Otherwise, an element-wise +copy of the input array is made and the copy, being proper-contiguous and with +proper type, is used as the array argument. There are two types of proper-contiguous NumPy arrays: -* Fortran-contiguous arrays when data is stored column-wise, - i.e. indexing of data as stored in memory starts from the lowest +* Fortran-contiguous arrays refer to data that is stored columnwise, + i.e. the indexing of data as stored in memory starts from the lowest dimension; -* C-contiguous or simply contiguous arrays when data is stored - row-wise, i.e. indexing of data as stored in memory starts from the - highest dimension. +* C-contiguous, or simply contiguous arrays, refer to data that is stored + rowwise, i.e. the indexing of data as stored in memory starts from the highest + dimension. For one-dimensional arrays these notions coincide. @@ -132,30 +134,29 @@ To test whether an array is C-contiguous, use the ``.flags.c_contiguous`` attribute of NumPy arrays. To test for Fortran contiguity, use the ``.flags.f_contiguous`` attribute. -Usually there is no need to worry about how the arrays are stored in -memory and whether the wrapped functions, being either Fortran or C -functions, assume one or another storage order. F2PY automatically -ensures that wrapped functions get arguments with proper storage -order; the corresponding algorithm is designed to make copies of -arrays only when absolutely necessary. However, when dealing with very -large multidimensional input arrays with sizes close to the size of -the physical memory in your computer, then a care must be taken to use -always proper-contiguous and proper type arguments. +Usually there is no need to worry about how the arrays are stored in memory and +whether the wrapped functions, being either Fortran or C functions, assume one +or another storage order. F2PY automatically ensures that wrapped functions get +arguments with the proper storage order; the underlying algorithm is designed to +make copies of arrays only when absolutely necessary. However, when dealing with +very large multidimensional input arrays with sizes close to the size of the +physical memory in your computer, then care must be taken to ensure the usage of +proper-contiguous and proper type arguments. To transform input arrays to column major storage order before passing them to Fortran routines, use the function ``numpy.asfortranarray(<array>)``. Consider the following Fortran 77 code: - .. include:: array.f - :literal: + .. literalinclude:: ./code/array.f + :language: fortran and wrap it using ``f2py -c -m arr array.f -DF2PY_REPORT_ON_ARRAY_COPY=1``. In Python: - .. include:: array_session.dat - :literal: + .. literalinclude:: ./code/results/array_session.dat + :language: python .. _Call-back arguments: @@ -166,31 +167,32 @@ F2PY supports calling Python functions from Fortran or C codes. Consider the following Fortran 77 code: - .. include:: callback.f - :literal: + .. literalinclude:: ./code/callback.f + :language: fortran and wrap it using ``f2py -c -m callback callback.f``. In Python: - .. include:: callback_session.dat - :literal: + .. literalinclude:: ./code/results/callback_session.dat + :language: python In the above example F2PY was able to guess accurately the signature -of a call-back function. However, sometimes F2PY cannot establish the -signature as one would wish and then the signature of a call-back -function must be modified in the signature file manually. Namely, -signature files may contain special modules (the names of such modules -contain a substring ``__user__``) that collect various signatures of -call-back functions. Callback arguments in routine signatures have -attribute ``external`` (see also ``intent(callback)`` attribute). To -relate a callback argument and its signature in ``__user__`` module -block, use ``use`` statement as illustrated below. The same signature -of a callback argument can be referred in different routine +of the call-back function. However, sometimes F2PY cannot establish the +appropriate signature; in these cases the signature of the call-back +function must be explicitly defined in the signature file. + +To facilitate this, signature files may contain special modules (the names of +these modules contain the special ``__user__`` sub-string) that defines the +various signatures for call-back functions. Callback arguments in routine +signatures have the ``external`` attribute (see also the ``intent(callback)`` +attribute). To relate a callback argument with its signature in a ``__user__`` +module block, a ``use`` statement can be utilized as illustrated below. The same +signature for a callback argument can be referred to in different routine signatures. -We use the same Fortran 77 code as in previous example but now -we'll pretend that F2PY was not able to guess the signatures of +We use the same Fortran 77 code as in the previous example but now +we will pretend that F2PY was not able to guess the signatures of call-back arguments correctly. First, we create an initial signature file ``callback2.pyf`` using F2PY:: @@ -198,40 +200,40 @@ file ``callback2.pyf`` using F2PY:: Then modify it as follows - .. include:: callback2.pyf + .. include:: ./code/callback2.pyf :literal: -Finally, build the extension module using ``f2py -c callback2.pyf callback.f``. +Finally, we build the extension module using ``f2py -c callback2.pyf callback.f``. -An example Python session would be identical to the previous example -except that argument names would differ. +An example Python session for this snippet would be identical to the previous +example except that the argument names would differ. Sometimes a Fortran package may require that users provide routines that the package will use. F2PY can construct an interface to such -routines so that Python functions could be called from Fortran. +routines so that Python functions can be called from Fortran. -Consider the following Fortran 77 subroutine that takes an array +Consider the following Fortran 77 subroutine that takes an array as its input and applies a function ``func`` to its elements. - .. include:: calculate.f - :literal: + .. literalinclude:: ./code/calculate.f + :language: fortran -It is expected that function ``func`` has been defined -externally. In order to use a Python function as ``func``, it must -have an attribute ``intent(callback)`` (it must be specified before -the ``external`` statement). +The Fortran code expects that the function ``func`` has been defined externally. +In order to use a Python function for ``func``, it must have an attribute +``intent(callback)`` and, it must be specified before the ``external`` statement. Finally, build an extension module using ``f2py -c -m foo calculate.f`` In Python: - .. include:: calculate_session.dat - :literal: + .. literalinclude:: ./code/results/calculate_session.dat + :language: python -The function is included as an argument to the python function call to -the Fortran subroutine even though it was *not* in the Fortran subroutine argument -list. The "external" refers to the C function generated by f2py, not the python -function itself. The python function must be supplied to the C function. +The function is included as an argument to the python function call to the +Fortran subroutine even though it was *not* in the Fortran subroutine argument +list. The "external" keyword refers to the C function generated by f2py, not the +python function itself. The python function is essentially being supplied to the +C function. The callback function may also be explicitly set in the module. Then it is not necessary to pass the function in the argument list to @@ -240,24 +242,24 @@ the python callback function is itself called by another Fortran function. Consider the following Fortran 77 subroutine: - .. include:: extcallback.f - :literal: + .. literalinclude:: ./code/extcallback.f + :language: fortran and wrap it using ``f2py -c -m pfromf extcallback.f``. In Python: - .. include:: extcallback_session.dat - :literal: + .. literalinclude:: ./code/results/extcallback_session.dat + :language: python Resolving arguments to call-back functions ------------------------------------------- +=========================================== -F2PY generated interface is very flexible with respect to call-back +F2PY generated interfaces are very flexible with respect to call-back arguments. For each call-back argument an additional optional argument ``<name>_extra_args`` is introduced by F2PY. This argument can be used to pass extra arguments to user provided call-back -arguments. +functions. If a F2PY generated wrapper function expects the following call-back argument:: @@ -281,7 +283,7 @@ is provided by a user, and in addition, fun_extra_args = (e_1,...,e_p) is used, then the following rules are applied when a Fortran or C -function calls the call-back argument ``gun``: +function evaluates the call-back argument ``gun``: * If ``p == 0`` then ``gun(a_1, ..., a_q)`` is called, here ``q = min(m, n)``. @@ -292,8 +294,8 @@ function calls the call-back argument ``gun``: * If ``n + p`` is less than the number of required arguments to ``gun`` then an exception is raised. -The function ``gun`` may return any number of objects as a tuple. Then -following rules are applied: +If the function ``gun`` may return any number of objects as a tuple; then +the following rules are applied: * If ``k < l``, then ``y_{k + 1}, ..., y_l`` are ignored. * If ``k > l``, then only ``x_1, ..., x_l`` are set. @@ -303,62 +305,62 @@ Common blocks ============== F2PY generates wrappers to ``common`` blocks defined in a routine -signature block. Common blocks are visible by all Fortran codes linked -with the current extension module, but not to other extension modules -(this restriction is due to how Python imports shared libraries). In +signature block. Common blocks are visible to all Fortran codes linked +to the current extension module, but not to other extension modules +(this restriction is due to the way Python imports shared libraries). In Python, the F2PY wrappers to ``common`` blocks are ``fortran`` type -objects that have (dynamic) attributes related to data members of -common blocks. When accessed, these attributes return as NumPy array -objects (multidimensional arrays are Fortran-contiguous) that +objects that have (dynamic) attributes related to the data members of +the common blocks. When accessed, these attributes return as NumPy array +objects (multidimensional arrays are Fortran-contiguous) which directly link to data members in common blocks. Data members can be changed by direct assignment or by in-place changes to the corresponding array objects. Consider the following Fortran 77 code: - .. include:: common.f - :literal: + .. literalinclude:: ./code/common.f + :language: fortran and wrap it using ``f2py -c -m common common.f``. In Python: - .. include:: common_session.dat - :literal: + .. literalinclude:: ./code/results/common_session.dat + :language: python Fortran 90 module data ======================= -The F2PY interface to Fortran 90 module data is similar to Fortran 77 +The F2PY interface to Fortran 90 module data is similar to the handling of Fortran 77 common blocks. Consider the following Fortran 90 code: - .. include:: moddata.f90 - :literal: + .. literalinclude:: ./code/moddata.f90 + :language: fortran and wrap it using ``f2py -c -m moddata moddata.f90``. In Python: - .. include:: moddata_session.dat - :literal: + .. literalinclude:: ./code/results/moddata_session.dat + :language: python Allocatable arrays -------------------- +=================== F2PY has basic support for Fortran 90 module allocatable arrays. Consider the following Fortran 90 code: - .. include:: allocarr.f90 - :literal: + .. literalinclude:: ./code/allocarr.f90 + :language: fortran and wrap it using ``f2py -c -m allocarr allocarr.f90``. In Python: - .. include:: allocarr_session.dat - :literal: + .. literalinclude:: ./code/results/allocarr_session.dat + :language: python diff --git a/doc/source/f2py/signature-file.rst b/doc/source/f2py/signature-file.rst index 3a163ee23..b80b31509 100644 --- a/doc/source/f2py/signature-file.rst +++ b/doc/source/f2py/signature-file.rst @@ -2,23 +2,22 @@ Signature file ================== -The syntax specification for signature files (.pyf files) is borrowed -from the Fortran 90/95 language specification. Almost all Fortran -90/95 standard constructs are understood, both in free and fixed -format (recall that Fortran 77 is a subset of Fortran 90/95). F2PY -introduces also some extensions to Fortran 90/95 language -specification that help designing Fortran to Python interface, make it -more "Pythonic". - -Signature files may contain arbitrary Fortran code (so that Fortran -codes can be considered as signature files). F2PY silently ignores +The syntax specification for signature files (.pyf files) is modeled on the +Fortran 90/95 language specification. Almost all Fortran 90/95 standard +constructs are understood, both in free and fixed format (recall that Fortran 77 +is a subset of Fortran 90/95). F2PY introduces some extensions to the Fortran +90/95 language specification that help in the design of the Fortran to Python +interface, making it more "Pythonic". + +Signature files may contain arbitrary Fortran code so that any Fortran 90/95 +codes can be treated as signature files. F2PY silently ignores Fortran constructs that are irrelevant for creating the interface. -However, this includes also syntax errors. So, be careful not making -ones ;-). +However, this also means that syntax errors are not caught by F2PY and will only +be caught when the library is built. -In general, the contents of signature files is case-sensitive. When -scanning Fortran codes and writing a signature file, F2PY lowers all -cases automatically except in multiline blocks or when ``--no-lower`` +In general, the contents of the signature files are case-sensitive. When +scanning Fortran codes to generate a signature file, F2PY lowers all +cases automatically except in multi-line blocks or when the ``--no-lower`` option is used. The syntax of signature files is presented below. @@ -27,13 +26,15 @@ Python module block ===================== A signature file may contain one (recommended) or more ``python -module`` blocks. ``python module`` block describes the contents of +module`` blocks. The ``python module`` block describes the contents of a Python/C extension module ``<modulename>module.c`` that F2PY generates. -Exception: if ``<modulename>`` contains a substring ``__user__``, then -the corresponding ``python module`` block describes the signatures of -so-called call-back functions (see :ref:`Call-back arguments`). +.. warning:: + + Exception: if ``<modulename>`` contains a substring ``__user__``, then the + corresponding ``python module`` block describes the signatures of call-back + functions (see :ref:`Call-back arguments`). A ``python module`` block has the following structure:: @@ -56,9 +57,9 @@ A ``python module`` block has the following structure:: ]... end [python module [<modulename>]] -Here brackets ``[]`` indicate an optional part, dots ``...`` indicate -one or more of a previous part. So, ``[]...`` reads zero or more of a -previous part. +Here brackets ``[]`` indicate an optional section, dots ``...`` indicate one or +more of a previous section. So, ``[]...`` is to be read as zero or more of a +previous section. Fortran/C routine signatures @@ -93,7 +94,7 @@ The signature of a Fortran block data has the following structure:: end [ block data [<block data name>] ] Type declarations ------------------ +================= The definition of the ``<argument/variable type declaration>`` part is @@ -123,33 +124,36 @@ where and -+ ``<attrspec>`` is a comma separated list of attributes_; +* ``<attrspec>`` is a comma separated list of attributes_; -+ ``<arrayspec>`` is a comma separated list of dimension bounds; +* ``<arrayspec>`` is a comma separated list of dimension bounds; -+ ``<init_expr>`` is a `C expression`__. +* ``<init_expr>`` is a `C expression`__; -+ ``<intlen>`` may be negative integer for ``integer`` type +* ``<intlen>`` may be negative integer for ``integer`` type specifications. In such cases ``integer*<negintlen>`` represents - unsigned C integers. + unsigned C integers; __ `C expressions`_ If an argument has no ``<argument type declaration>``, its type is determined by applying ``implicit`` rules to its name. - Statements ----------- +========== + +Attribute statements +^^^^^^^^^^^^^^^^^^^^^ -Attribute statements: - The ``<argument/variable attribute statement>`` is +* The ``<argument/variable attribute statement>`` is ``<argument/variable type declaration>`` without ``<typespec>``. - In addition, in an attribute statement one cannot use other +* In addition, in an attribute statement one cannot use other attributes, also ``<entitydecl>`` can be only a list of names. -Use statements: - The definition of the ``<use statement>`` part is +Use statements +^^^^^^^^^^^^^^^ + +* The definition of the ``<use statement>`` part is :: @@ -161,12 +165,14 @@ Use statements: <rename_list> := <local_name> => <use_name> [ , <rename_list> ] - Currently F2PY uses ``use`` statement only for linking call-back +* Currently F2PY uses ``use`` statement only for linking call-back modules and ``external`` arguments (call-back functions), see :ref:`Call-back arguments`. -Common block statements: - The definition of the ``<common block statement>`` part is +Common block statements +^^^^^^^^^^^^^^^^^^^^^^^ + +* The definition of the ``<common block statement>`` part is :: @@ -178,18 +184,19 @@ Common block statements: <shortentitydecl> := <name> [ ( <arrayspec> ) ] [ , <shortentitydecl> ] - If a ``python module`` block contains two or more ``common`` blocks +* If a ``python module`` block contains two or more ``common`` blocks with the same name, the variables from the additional declarations are appended. The types of variables in ``<shortentitydecl>`` are defined using ``<argument type declarations>``. Note that the corresponding ``<argument type declarations>`` may contain array - specifications; then you don't need to specify these in - ``<shortentitydecl>``. + specifications; then these need not be specified in ``<shortentitydecl>``. -Other statements: - The ``<other statement>`` part refers to any other Fortran language +Other statements +^^^^^^^^^^^^^^^^^ + +* The ``<other statement>`` part refers to any other Fortran language constructs that are not described above. F2PY ignores most of them - except + except the following: + ``call`` statements and function calls of ``external`` arguments (`more details`__?); @@ -223,7 +230,7 @@ Other statements: Implicit rules are used to determine the type specification of a variable (from the first-letter of its name) if the variable is not defined using ``<variable type declaration>``. Default - implicit rule is given by + implicit rules are given by: :: @@ -234,153 +241,170 @@ Other statements: entry <entry name> [([<arguments>])] - F2PY generates wrappers to all entry names using the signature + F2PY generates wrappers for all entry names using the signature of the routine block. - Tip: ``entry`` statement can be used to describe the signature - of an arbitrary routine allowing F2PY to generate a number of - wrappers from only one routine block signature. There are few - restrictions while doing this: ``fortranname`` cannot be used, - ``callstatement`` and ``callprotoargument`` can be used only if - they are valid for all entry routines, etc. + .. note:: + + The ``entry`` statement can be used to describe the signature of an + arbitrary subroutine or function allowing F2PY to generate a number of + wrappers from only one routine block signature. There are few + restrictions while doing this: ``fortranname`` cannot be used, + ``callstatement`` and ``callprotoargument`` can be used only if they are + valid for all entry routines, etc. + +F2PY statements +^^^^^^^^^^^^^^^^ In addition, F2PY introduces the following statements: - + ``threadsafe`` - Use ``Py_BEGIN_ALLOW_THREADS .. Py_END_ALLOW_THREADS`` block - around the call to Fortran/C function. - - + ``callstatement <C-expr|multi-line block>`` - Replace F2PY generated call statement to Fortran/C function with - ``<C-expr|multi-line block>``. The wrapped Fortran/C function - is available as ``(*f2py_func)``. To raise an exception, set - ``f2py_success = 0`` in ``<C-expr|multi-line block>``. - - + ``callprotoargument <C-typespecs>`` - When ``callstatement`` statement is used then F2PY may not - generate proper prototypes for Fortran/C functions (because - ``<C-expr>`` may contain any function calls and F2PY has no way - to determine what should be the proper prototype). With this - statement you can explicitly specify the arguments of the - corresponding prototype:: - - extern <return type> FUNC_F(<routine name>,<ROUTINE NAME>)(<callprotoargument>); - - + ``fortranname [<actual Fortran/C routine name>]`` - You can use arbitrary ``<routine name>`` for a given Fortran/C - function. Then you have to specify - ``<actual Fortran/C routine name>`` with this statement. - - If ``fortranname`` statement is used without - ``<actual Fortran/C routine name>`` then a dummy wrapper is - generated. - - + ``usercode <multi-line block>`` - When used inside ``python module`` block, then given C code - will be inserted to generated C/API source just before - wrapper function definitions. Here you can define arbitrary - C functions to be used in initialization of optional arguments, - for example. If ``usercode`` is used twice inside ``python - module`` block then the second multiline block is inserted - after the definition of external routines. - - When used inside ``<routine signature>``, then given C code will - be inserted to the corresponding wrapper function just after - declaring variables but before any C statements. So, ``usercode`` - follow-up can contain both declarations and C statements. - - When used inside the first ``interface`` block, then given C - code will be inserted at the end of the initialization - function of the extension module. Here you can modify extension - modules dictionary. For example, for defining additional - variables etc. - - + ``pymethoddef <multiline block>`` - Multiline block will be inserted to the definition of - module methods ``PyMethodDef``-array. It must be a - comma-separated list of C arrays (see `Extending and Embedding`__ - Python documentation for details). - ``pymethoddef`` statement can be used only inside - ``python module`` block. +``threadsafe`` + Uses a ``Py_BEGIN_ALLOW_THREADS .. Py_END_ALLOW_THREADS`` block + around the call to Fortran/C function. + +``callstatement <C-expr|multi-line block>`` + Replaces the F2PY generated call statement to Fortran/C function with + ``<C-expr|multi-line block>``. The wrapped Fortran/C function is available + as ``(*f2py_func)``. + + To raise an exception, set ``f2py_success = 0`` in ``<C-expr|multi-line + block>``. + +``callprotoargument <C-typespecs>`` + When the ``callstatement`` statement is used then F2PY may not + generate proper prototypes for Fortran/C functions (because + ``<C-expr>`` may contain any function calls and F2PY has no way + to determine what should be the proper prototype). + + With this statement you can explicitly specify the arguments of the + corresponding prototype:: + + extern <return type> FUNC_F(<routine name>,<ROUTINE NAME>)(<callprotoargument>); + +``fortranname [<actual Fortran/C routine name>]`` + F2PY allows for the use of an arbitrary ``<routine name>`` for a given + Fortran/C function. Then this statement is used for the ``<actual + Fortran/C routine name>``. + + If ``fortranname`` statement is used without + ``<actual Fortran/C routine name>`` then a dummy wrapper is + generated. + +``usercode <multi-line block>`` + When this is used inside a ``python module`` block, the given C code will + be inserted to generated C/API source just before wrapper function + definitions. + + Here you can define arbitrary C functions to be used for the + initialization of optional arguments. + + For example, if ``usercode`` is used twice inside ``python module`` block + then the second multi-line block is inserted after the definition of + the external routines. + + When used inside ``<routine signature>``, then the given C code will be + inserted into the corresponding wrapper function just after the + declaration of variables but before any C statements. So, the + ``usercode`` follow-up can contain both declarations and C statements. + + When used inside the first ``interface`` block, then the given C code will + be inserted at the end of the initialization function of the extension + module. This is how the extension modules dictionary can be modified and + has many use-cases; for example, to define additional variables. + +``pymethoddef <multiline block>`` + This is a multi-line block which will be inserted into the definition of a + module methods ``PyMethodDef``-array. It must be a comma-separated list of + C arrays (see `Extending and Embedding`__ Python documentation for + details). ``pymethoddef`` statement can be used only inside ``python + module`` block. __ https://docs.python.org/extending/index.html Attributes ------------- +============ The following attributes are used by F2PY: ``optional`` The corresponding argument is moved to the end of ``<optional arguments>`` list. A default value for an optional argument can be - specified ``<init_expr>``, see ``entitydecl`` definition. Note that - the default value must be given as a valid C expression. + specified via ``<init_expr>``, see the ``entitydecl`` definition. + - Note that whenever ``<init_expr>`` is used, ``optional`` attribute - is set automatically by F2PY. + .. note:: - For an optional array argument, all its dimensions must be bounded. + * The default value must be given as a valid C expression. + * Whenever ``<init_expr>`` is used, ``optional`` attribute + is set automatically by F2PY. + * For an optional array argument, all its dimensions must be bounded. ``required`` - The corresponding argument is considered as a required one. This is - default. You need to specify ``required`` only if there is a need to - disable automatic ``optional`` setting when ``<init_expr>`` is used. + The corresponding argument with this attribute considered mandatory. This is + the default. ``required`` should only be specified if there is a need to + disable the automatic ``optional`` setting when ``<init_expr>`` is used. - If Python ``None`` object is used as a required argument, the + If a Python ``None`` object is used as a required argument, the argument is treated as optional. That is, in the case of array - argument, the memory is allocated. And if ``<init_expr>`` is given, - the corresponding initialization is carried out. + argument, the memory is allocated. If ``<init_expr>`` is given, then the + corresponding initialization is carried out. ``dimension(<arrayspec>)`` - The corresponding variable is considered as an array with given - dimensions in ``<arrayspec>``. + The corresponding variable is considered as an array with dimensions given in + ``<arrayspec>``. ``intent(<intentspec>)`` This specifies the "intention" of the corresponding argument. ``<intentspec>`` is a comma separated list of the following keys: - + ``in`` - The argument is considered as an input-only argument. It means - that the value of the argument is passed to Fortran/C function and - that function is expected not to change the value of an argument. - - + ``inout`` - The argument is considered as an input/output or *in situ* - output argument. ``intent(inout)`` arguments can be only - "contiguous" NumPy arrays with proper type and size. Here - "contiguous" can be either in Fortran or C sense. The latter one - coincides with the contiguous concept used in NumPy and is - effective only if ``intent(c)`` is used. Fortran contiguity - is assumed by default. - - Using ``intent(inout)`` is generally not recommended, use - ``intent(in,out)`` instead. See also ``intent(inplace)`` attribute. - - + ``inplace`` - The argument is considered as an input/output or *in situ* - output argument. ``intent(inplace)`` arguments must be - NumPy arrays with proper size. If the type of an array is - not "proper" or the array is non-contiguous then the array - will be changed in-place to fix the type and make it contiguous. - - Using ``intent(inplace)`` is generally not recommended either. - For example, when slices have been taken from an - ``intent(inplace)`` argument then after in-place changes, - slices data pointers may point to unallocated memory area. - - + ``out`` - The argument is considered as a return variable. It is appended - to the ``<returned variables>`` list. Using ``intent(out)`` - sets ``intent(hide)`` automatically, unless also - ``intent(in)`` or ``intent(inout)`` were used. - - By default, returned multidimensional arrays are - Fortran-contiguous. If ``intent(c)`` is used, then returned - multidimensional arrays are C-contiguous. - - + ``hide`` - The argument is removed from the list of required or optional + * ``in`` + The corresponding argument is considered to be input-only. This means that the value of + the argument is passed to a Fortran/C function and that the function is + expected to not change the value of this argument. + + * ``inout`` + The corresponding argument is marked for input/output or as an *in situ* output + argument. ``intent(inout)`` arguments can be only "contiguous" NumPy + arrays with proper type and size. Here "contiguous" can be either in the + Fortran or C sense. The latter coincides with the default contiguous + concept used in NumPy and is effective only if ``intent(c)`` is used. F2PY + assumes Fortran contiguous arguments by default. + + .. note:: + + Using ``intent(inout)`` is generally not recommended, use ``intent(in,out)`` instead. + + See also the ``intent(inplace)`` attribute. + + * ``inplace`` + The corresponding argument is considered to be an input/output or *in situ* output + argument. ``intent(inplace)`` arguments must be NumPy arrays of a proper + size. If the type of an array is not "proper" or the array is + non-contiguous then the array will be modified in-place to fix the type and + make it contiguous. + + .. note:: + + Using ``intent(inplace)`` is generally not recommended either. + + For example, when slices have been taken from an ``intent(inplace)`` argument + then after in-place changes, the data pointers for the slices may point to + an unallocated memory area. + + + * ``out`` + The corresponding argument is considered to be a return variable. It is appended to the + ``<returned variables>`` list. Using ``intent(out)`` sets ``intent(hide)`` + automatically, unless ``intent(in)`` or ``intent(inout)`` are specified + as well. + + By default, returned multidimensional arrays are Fortran-contiguous. If + ``intent(c)`` attribute is used, then the returned multidimensional arrays + are C-contiguous. + + * ``hide`` + The corresponding argument is removed from the list of required or optional arguments. Typically ``intent(hide)`` is used with ``intent(out)`` or when ``<init_expr>`` completely determines the value of the argument like in the following example:: @@ -388,18 +412,17 @@ The following attributes are used by F2PY: integer intent(hide),depend(a) :: n = len(a) real intent(in),dimension(n) :: a - + ``c`` - The argument is treated as a C scalar or C array argument. In - the case of a scalar argument, its value is passed to C function - as a C scalar argument (recall that Fortran scalar arguments are - actually C pointer arguments). In the case of an array - argument, the wrapper function is assumed to treat + * ``c`` + The corresponding argument is treated as a C scalar or C array argument. For the case + of a scalar argument, its value is passed to a C function as a C scalar + argument (recall that Fortran scalar arguments are actually C pointer + arguments). For array arguments, the wrapper function is assumed to treat multidimensional arrays as C-contiguous arrays. There is no need to use ``intent(c)`` for one-dimensional - arrays, no matter if the wrapped function is either a Fortran or - a C function. This is because the concepts of Fortran- and - C contiguity overlap in one-dimensional cases. + arrays, irrespective of whether the wrapped function is in Fortran or C. + This is because the concepts of Fortran- and C contiguity overlap in + one-dimensional cases. If ``intent(c)`` is used as a statement but without an entity declaration list, then F2PY adds the ``intent(c)`` attribute to all @@ -409,110 +432,121 @@ The following attributes are used by F2PY: attribute for ``<routine name>`` in order to disable Fortran specific ``F_FUNC(..,..)`` macros. - + ``cache`` - The argument is treated as a junk of memory. No Fortran nor C - contiguity checks are carried out. Using ``intent(cache)`` - makes sense only for array arguments, also in connection with - ``intent(hide)`` or ``optional`` attributes. - - + ``copy`` - Ensure that the original contents of ``intent(in)`` argument is - preserved. Typically used in connection with ``intent(in,out)`` - attribute. F2PY creates an optional argument - ``overwrite_<argument name>`` with the default value ``0``. - - + ``overwrite`` - The original contents of the ``intent(in)`` argument may be - altered by the Fortran/C function. F2PY creates an optional - argument ``overwrite_<argument name>`` with the default value - ``1``. - - + ``out=<new name>`` - Replace the return name with ``<new name>`` in the ``__doc__`` - string of a wrapper function. - - + ``callback`` - Construct an external function suitable for calling Python function + * ``cache`` + The corresponding argument is treated as junk memory. No Fortran nor C contiguity + checks are carried out. Using ``intent(cache)`` makes sense only for array + arguments, also in conjunction with ``intent(hide)`` or ``optional`` + attributes. + + * ``copy`` + Ensures that the original contents of ``intent(in)`` argument is + preserved. Typically used with the ``intent(in,out)`` attribute. F2PY + creates an optional argument ``overwrite_<argument name>`` with the + default value ``0``. + + * ``overwrite`` + This indicates that the original contents of the ``intent(in)`` argument + may be altered by the Fortran/C function. F2PY creates an optional + argument ``overwrite_<argument name>`` with the default value ``1``. + + * ``out=<new name>`` + Replaces the returned name with ``<new name>`` in the ``__doc__`` string + of the wrapper function. + + * ``callback`` + Constructs an external function suitable for calling Python functions from Fortran. ``intent(callback)`` must be specified before the - corresponding ``external`` statement. If 'argument' is not in - argument list then it will be added to Python wrapper but only - initializing external function. - - Use ``intent(callback)`` in situations where a Fortran/C code - assumes that a user implements a function with given prototype - and links it to an executable. Don't use ``intent(callback)`` - if function appears in the argument list of a Fortran routine. - - With ``intent(hide)`` or ``optional`` attributes specified and - using a wrapper function without specifying the callback argument - in argument list then call-back function is looked in the - namespace of F2PY generated extension module where it can be - set as a module attribute by a user. - - + ``aux`` - Define auxiliary C variable in F2PY generated wrapper function. - Useful to save parameter values so that they can be accessed - in initialization expression of other variables. Note that - ``intent(aux)`` silently implies ``intent(c)``. + corresponding ``external`` statement. If the 'argument' is not in + the argument list then it will be added to Python wrapper but only + by initializing an external function. + + .. note:: + + Use ``intent(callback)`` in situations where the Fortran/C code assumes + that the user implemented a function with a given prototype and linked + it to an executable. Don't use ``intent(callback)`` if the function + appears in the argument list of a Fortran routine. + + With ``intent(hide)`` or ``optional`` attributes specified and using a + wrapper function without specifying the callback argument in the argument + list; then the call-back function is assumed to be found in the namespace + of the F2PY generated extension module where it can be set as a module + attribute by a user. + + * ``aux`` + Defines an auxiliary C variable in the F2PY generated wrapper function. + Useful to save parameter values so that they can be accessed in + initialization expressions for other variables. + + .. note:: + + ``intent(aux)`` silently implies ``intent(c)``. The following rules apply: - + If no ``intent(in | inout | out | hide)`` is specified, + * If none of ``intent(in | inout | out | hide)`` are specified, ``intent(in)`` is assumed. - + ``intent(in,inout)`` is ``intent(in)``. - + ``intent(in,hide)`` or ``intent(inout,hide)`` is - ``intent(hide)``. - + ``intent(out)`` is ``intent(out,hide)`` unless ``intent(in)`` or - ``intent(inout)`` is specified. - + If ``intent(copy)`` or ``intent(overwrite)`` is used, then an - additional optional argument is introduced with a name - ``overwrite_<argument name>`` and a default value 0 or 1, respectively. - + ``intent(inout,inplace)`` is ``intent(inplace)``. - + ``intent(in,inplace)`` is ``intent(inplace)``. - + ``intent(hide)`` disables ``optional`` and ``required``. + + * ``intent(in,inout)`` is ``intent(in)``; + + * ``intent(in,hide)`` or ``intent(inout,hide)`` is ``intent(hide)``; + + * ``intent(out)`` is ``intent(out,hide)`` unless ``intent(in)`` or + ``intent(inout)`` is specified. + + * If ``intent(copy)`` or ``intent(overwrite)`` is used, then an additional + optional argument is introduced with a name ``overwrite_<argument name>`` + and a default value 0 or 1, respectively. + + * ``intent(inout,inplace)`` is ``intent(inplace)``; + + * ``intent(in,inplace)`` is ``intent(inplace)``; + + * ``intent(hide)`` disables ``optional`` and ``required``. ``check([<C-booleanexpr>])`` - Perform consistency check of arguments by evaluating - ``<C-booleanexpr>``; if ``<C-booleanexpr>`` returns 0, an exception - is raised. + Performs a consistency check on the arguments by evaluating + ``<C-booleanexpr>``; if ``<C-booleanexpr>`` returns 0, an exception is raised. + + .. note:: - If ``check(..)`` is not used then F2PY generates few standard checks - (e.g. in a case of an array argument, check for the proper shape - and size) automatically. Use ``check()`` to disable checks generated - by F2PY. + If ``check(..)`` is not used then F2PY automatically generates a few + standard checks (e.g. in a case of an array argument, it checks for the + proper shape and size). Use ``check()`` to disable checks + generated by F2PY. ``depend([<names>])`` This declares that the corresponding argument depends on the values - of variables in the list ``<names>``. For example, ``<init_expr>`` + of variables in the ``<names>`` list. For example, ``<init_expr>`` may use the values of other arguments. Using information given by ``depend(..)`` attributes, F2PY ensures that arguments are - initialized in a proper order. If ``depend(..)`` attribute is not + initialized in a proper order. If the ``depend(..)`` attribute is not used then F2PY determines dependence relations automatically. Use - ``depend()`` to disable dependence relations generated by F2PY. + ``depend()`` to disable the dependence relations generated by F2PY. When you edit dependence relations that were initially generated by F2PY, be careful not to break the dependence relations of other - relevant variables. Another thing to watch out is cyclic + relevant variables. Another thing to watch out for is cyclic dependencies. F2PY is able to detect cyclic dependencies when constructing wrappers and it complains if any are found. ``allocatable`` - The corresponding variable is Fortran 90 allocatable array defined - as Fortran 90 module data. + The corresponding variable is a Fortran 90 allocatable array defined as + Fortran 90 module data. .. _external: ``external`` The corresponding argument is a function provided by user. The - signature of this so-called call-back function can be defined + signature of this call-back function can be defined - in ``__user__`` module block, - or by demonstrative (or real, if the signature file is a real Fortran code) call in the ``<other statements>`` block. - For example, F2PY generates from + For example, F2PY generates from: - :: + .. code-block:: fortran external cb_sub, cb_fun integer n @@ -520,7 +554,9 @@ The following attributes are used by F2PY: call cb_sub(a,n) r = cb_fun(4) - the following call-back signatures:: + the following call-back signatures: + + .. code-block:: fortran subroutine cb_sub(a,n) real dimension(n) :: a @@ -531,7 +567,9 @@ The following attributes are used by F2PY: real :: r end function cb_fun - The corresponding user-provided Python function are then:: + The corresponding user-provided Python function are then: + + .. code-block:: python def cb_sub(a,[n]): ... @@ -540,49 +578,50 @@ The following attributes are used by F2PY: ... return r - See also ``intent(callback)`` attribute. + See also the ``intent(callback)`` attribute. ``parameter`` - The corresponding variable is a parameter and it must have a fixed - value. F2PY replaces all parameter occurrences by their - corresponding values. + This indicates that the corresponding variable is a parameter and it must have + a fixed value. F2PY replaces all parameter occurrences by their corresponding + values. Extensions ============ F2PY directives ------------------ +^^^^^^^^^^^^^^^^ -The so-called F2PY directives allow using F2PY signature file -constructs also in Fortran 77/90 source codes. With this feature you -can skip (almost) completely intermediate signature file generations -and apply F2PY directly to Fortran source codes. +The F2PY directives allow using F2PY signature file constructs in +Fortran 77/90 source codes. With this feature one can (almost) completely skip +the intermediate signature file generation and apply F2PY directly to Fortran +source codes. -F2PY directive has the following form:: +F2PY directives have the following form:: <comment char>f2py ... where allowed comment characters for fixed and free format Fortran codes are ``cC*!#`` and ``!``, respectively. Everything that follows ``<comment char>f2py`` is ignored by a compiler but read by F2PY as a -normal Fortran, non-comment line: +normal non-comment Fortran line: +.. note:: When F2PY finds a line with F2PY directive, the directive is first replaced by 5 spaces and then the line is reread. For fixed format Fortran codes, ``<comment char>`` must be at the first column of a file, of course. For free format Fortran codes, -F2PY directives can appear anywhere in a file. +the F2PY directives can appear anywhere in a file. C expressions --------------- +^^^^^^^^^^^^^^ C expressions are used in the following parts of signature files: -* ``<init_expr>`` of variable initialization; +* ``<init_expr>`` for variable initialization; * ``<C-booleanexpr>`` of the ``check`` attribute; -* ``<arrayspec> of the ``dimension`` attribute; -* ``callstatement`` statement, here also a C multiline block can be used. +* ``<arrayspec>`` of the ``dimension`` attribute; +* ``callstatement`` statement, here also a C multi-line block can be used. A C expression may contain: @@ -592,15 +631,19 @@ A C expression may contain: according to given dependence relations; * the following CPP macros: - ``rank(<name>)`` + * ``rank(<name>)`` Returns the rank of an array ``<name>``. - ``shape(<name>,<n>)`` + + * ``shape(<name>,<n>)`` Returns the ``<n>``-th dimension of an array ``<name>``. - ``len(<name>)`` + + * ``len(<name>)`` Returns the length of an array ``<name>``. - ``size(<name>)`` + + * ``size(<name>)`` Returns the size of an array ``<name>``. - ``slen(<name>)`` + + * ``slen(<name>)`` Returns the length of a string ``<name>``. For initializing an array ``<array name>``, F2PY generates a loop over @@ -615,7 +658,7 @@ from ``0`` to ``shape(<array name>,<i>)-1``. For example, a function ``myrange(n)`` generated from the following signature -:: +.. code-block:: subroutine myrange(a,n) fortranname ! myrange is a dummy wrapper @@ -630,23 +673,23 @@ is equivalent to ``numpy.arange(n,dtype=float)``. F2PY may lower cases also in C expressions when scanning Fortran codes (see ``--[no]-lower`` option). -Multiline blocks ------------------- +Multi-line blocks +^^^^^^^^^^^^^^^^^^ -A multiline block starts with ``'''`` (triple single-quotes) and ends -with ``'''`` in some *strictly* subsequent line. Multiline blocks can -be used only within .pyf files. The contents of a multiline block can +A multi-line block starts with ``'''`` (triple single-quotes) and ends +with ``'''`` in some *strictly* subsequent line. Multi-line blocks can +be used only within .pyf files. The contents of a multi-line block can be arbitrary (except that it cannot contain ``'''``) and no transformations (e.g. lowering cases) are applied to it. -Currently, multiline blocks can be used in the following constructs: +Currently, multi-line blocks can be used in the following constructs: -+ as a C expression of the ``callstatement`` statement; +* as a C expression of the ``callstatement`` statement; -+ as a C type specification of the ``callprotoargument`` statement; +* as a C type specification of the ``callprotoargument`` statement; -+ as a C code block of the ``usercode`` statement; +* as a C code block of the ``usercode`` statement; -+ as a list of C arrays of the ``pymethoddef`` statement; +* as a list of C arrays of the ``pymethoddef`` statement; -+ as documentation string. +* as a documentation string. diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index 6c3b4b6ef..596148799 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -3,9 +3,9 @@ Using F2PY =========== F2PY can be used either as a command line tool ``f2py`` or as a Python -module ``numpy.f2py``. While we try to install the command line tool as part +module ``numpy.f2py``. While we try to provide the command line tool as part of the numpy setup, some platforms like Windows make it difficult to -reliably put the executable on the ``PATH``. We will refer to ``f2py`` +reliably put the executables on the ``PATH``. We will refer to ``f2py`` in this document but you may have to run it as a module:: python -m numpy.f2py @@ -21,32 +21,40 @@ Command ``f2py`` When used as a command line tool, ``f2py`` has three major modes, distinguished by the usage of ``-c`` and ``-h`` switches: +Signature file generation +^^^^^^^^^^^^^^^^^^^^^^^^^^ + 1. To scan Fortran sources and generate a signature file, use - :: + .. code-block:: sh f2py -h <filename.pyf> <options> <fortran files> \ [[ only: <fortran functions> : ] \ [ skip: <fortran functions> : ]]... \ [<fortran files> ...] - Note that a Fortran source file can contain many routines, and not - necessarily all routines are needed to be used from Python. So, you - can either specify which routines should be wrapped (in ``only: .. :`` - part) or which routines F2PY should ignored (in ``skip: .. :`` part). + .. note:: + + A Fortran source file can contain many routines, and it is often + not necessary to allow all routines be usable from Python. In such cases, + either specify which routines should be wrapped (in the ``only: .. :`` part) + or which routines F2PY should ignored (in the ``skip: .. :`` part). If ``<filename.pyf>`` is specified as ``stdout`` then signatures - are send to standard output instead of a file. + are written to standard output instead of a file. - Among other options (see below), the following options can be used + Among other options (see below), the following can be used in this mode: ``--overwrite-signature`` - Overwrite existing signature file. + Overwrites an existing signature file. + +Extension module construction +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 2. To construct an extension module, use - :: + .. code-block:: sh f2py -m <modulename> <options> <fortran files> \ [[ only: <fortran functions> : ] \ @@ -61,17 +69,19 @@ distinguished by the usage of ``-c`` and ``-h`` switches: in this mode: ``--debug-capi`` - Add debugging hooks to the extension module. When using this - extension module, various information about the wrapper is printed - to standard output, for example, the values of variables, the - steps taken, etc. + Adds debugging hooks to the extension module. When using this extension + module, various diagnostic information about the wrapper is written to + the standard output, for example, the values of variables, the steps taken, + etc. ``-include'<includefile>'`` Add a CPP ``#include`` statement to the extension module source. - ``<includefile>`` should be given in one of the following forms:: + ``<includefile>`` should be given in one of the following forms - "filename.ext" - <filename.ext> + .. code-block:: cpp + + "filename.ext" + <filename.ext> The include statement is inserted just before the wrapper functions. This feature enables using arbitrary C functions @@ -91,16 +101,19 @@ distinguished by the usage of ``-c`` and ``-h`` switches: List system resources found by ``numpy_distutils/system_info.py``. For example, try ``f2py --help-link lapack_opt``. +Building a module +^^^^^^^^^^^^^^^^^ + 3. To build an extension module, use - :: + .. code-block:: sh f2py -c <options> <fortran files> \ [[ only: <fortran functions> : ] \ [ skip: <fortran functions> : ]]... \ [ <fortran/c source files> ] [ <.o, .a, .so files> ] - If ``<fortran files>`` contains a signature file, then a source for + If ``<fortran files>`` contains a signature file, then the source for an extension module is constructed, all Fortran and C sources are compiled, and finally all object and library files are linked to the extension module ``<modulename>.so`` which is saved into the current @@ -108,26 +121,25 @@ distinguished by the usage of ``-c`` and ``-h`` switches: If ``<fortran files>`` does not contain a signature file, then an extension module is constructed by scanning all Fortran source codes - for routine signatures. + for routine signatures, before proceeding to build the extension module. - Among other options (see below) and options described in previous - mode, the following options can be used in this mode: + Among other options (see below) and options described for previous + modes, the following options can be used in this mode: ``--help-fcompiler`` - List available Fortran compilers. - ``--help-compiler`` [depreciated] - List available Fortran compilers. + List the available Fortran compilers. + ``--help-compiler`` **[depreciated]** + List the available Fortran compilers. ``--fcompiler=<Vendor>`` - Specify Fortran compiler type by vendor. + Specify a Fortran compiler type by vendor. ``--f77exec=<path>`` - Specify the path to F77 compiler - ``--fcompiler-exec=<path>`` [depreciated] - Specify the path to F77 compiler + Specify the path to a F77 compiler + ``--fcompiler-exec=<path>`` **[depreciated]** + Specify the path to a F77 compiler ``--f90exec=<path>`` - Specify the path to F90 compiler - ``--f90compiler-exec=<path>`` [depreciated] - Specify the path to F90 compiler - + Specify the path to a F90 compiler + ``--f90compiler-exec=<path>`` **[depreciated]** + Specify the path to a F90 compiler ``--f77flags=<string>`` Specify F77 compiler flags ``--f90flags=<string>`` @@ -137,12 +149,11 @@ distinguished by the usage of ``-c`` and ``-h`` switches: ``--arch=<string>`` Specify architecture specific optimization flags ``--noopt`` - Compile without optimization + Compile without optimization flags ``--noarch`` - Compile without arch-dependent optimization + Compile without arch-dependent optimization flags ``--debug`` Compile with debugging information - ``-l<libname>`` Use the library ``<libname>`` when linking. ``-D<macro>[=<defn=1>]`` @@ -155,34 +166,35 @@ distinguished by the usage of ``-c`` and ``-h`` switches: ``-L<dir>`` Add directory ``<dir>`` to the list of directories to be searched for ``-l``. - ``link-<resource>`` - Link extension module with <resource> as defined by + Link the extension module with <resource> as defined by ``numpy_distutils/system_info.py``. E.g. to link with optimized LAPACK libraries (vecLib on MacOSX, ATLAS elsewhere), use ``--link-lapack_opt``. See also ``--help-link`` switch. .. note:: The ``f2py -c`` option must be applied either to an existing ``.pyf`` file (plus the source/object/library files) or one must specify the ``-m <modulename>`` option (plus the sources/object/library files). Use one of the following options: - :: + .. code-block:: sh f2py -c -m fib1 fib1.f - or + or - :: + .. code-block:: sh f2py -m fib1 fib1.f -h fib1.pyf f2py -c fib1.pyf fib1.f - For more information, see `Building C and C++ Extensions`__ Python documentation for details. + For more information, see the `Building C and C++ Extensions`__ Python documentation for details. - __ https://docs.python.org/3/extending/building.html + __ https://docs.python.org/3/extending/building.html When building an extension module, a combination of the following - macros may be required for non-gcc Fortran compilers:: - + macros may be required for non-gcc Fortran compilers: + + .. code-block:: sh + -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN @@ -197,11 +209,13 @@ distinguished by the usage of ``-c`` and ``-h`` switches: of an array argument is larger than ``<int>``, a message about the coping is sent to ``stderr``. -Other options: +Other options +^^^^^^^^^^^^^ ``-m <modulename>`` - Name of an extension module. Default is ``untitled``. Don't use this option - if a signature file (\*.pyf) is used. + Name of an extension module. Default is ``untitled``. + + .. warning:: Don't use this option if a signature file (\*.pyf) is used. ``--[no-]lower`` Do [not] lower the cases in ``<fortran files>``. By default, ``--lower`` is assumed with ``-h`` switch, and ``--no-lower`` @@ -214,7 +228,7 @@ Other options: ``--verbose`` Run with extra verbosity. ``-v`` - Print f2py version ID and exit. + Print the F2PY version and exit. Execute ``f2py`` without any options to get an up-to-date list of available options. diff --git a/doc/source/index.rst b/doc/source/index.rst index 21dec00fe..aac820a6f 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,7 +1,7 @@ .. _numpy_docs_mainpage: ################### -NumPy Documentation +NumPy documentation ################### .. toctree:: diff --git a/doc/source/reference/alignment.rst b/doc/source/reference/alignment.rst index 5e4315b38..70ded916a 100644 --- a/doc/source/reference/alignment.rst +++ b/doc/source/reference/alignment.rst @@ -1,104 +1,13 @@ -.. _alignment: +:orphan: +**************** Memory Alignment -================ +**************** -Numpy Alignment Goals ---------------------- +.. This document has been moved to ../dev/alignment.rst. -There are three use-cases related to memory alignment in numpy (as of 1.14): +This document has been moved to :ref:`alignment`. - 1. Creating structured datatypes with fields aligned like in a C-struct. - 2. Speeding up copy operations by using uint assignment in instead of memcpy - 3. Guaranteeing safe aligned access for ufuncs/setitem/casting code -Numpy uses two different forms of alignment to achieve these goals: -"True alignment" and "Uint alignment". - -"True" alignment refers to the architecture-dependent alignment of an -equivalent C-type in C. For example, in x64 systems ``numpy.float64`` is -equivalent to ``double`` in C. On most systems this has either an alignment of -4 or 8 bytes (and this can be controlled in gcc by the option -``malign-double``). A variable is aligned in memory if its memory offset is a -multiple of its alignment. On some systems (eg sparc) memory alignment is -required, on others it gives a speedup. - -"Uint" alignment depends on the size of a datatype. It is defined to be the -"True alignment" of the uint used by numpy's copy-code to copy the datatype, or -undefined/unaligned if there is no equivalent uint. Currently numpy uses uint8, -uint16, uint32, uint64 and uint64 to copy data of size 1,2,4,8,16 bytes -respectively, and all other sized datatypes cannot be uint-aligned. - -For example, on a (typical linux x64 gcc) system, the numpy ``complex64`` -datatype is implemented as ``struct { float real, imag; }``. This has "true" -alignment of 4 and "uint" alignment of 8 (equal to the true alignment of -``uint64``). - -Some cases where uint and true alignment are different (default gcc linux): - arch type true-aln uint-aln - ---- ---- -------- -------- - x86_64 complex64 4 8 - x86_64 float128 16 8 - x86 float96 4 - - - -Variables in Numpy which control and describe alignment -------------------------------------------------------- - -There are 4 relevant uses of the word ``align`` used in numpy: - - * The ``dtype.alignment`` attribute (``descr->alignment`` in C). This is meant - to reflect the "true alignment" of the type. It has arch-dependent default - values for all datatypes, with the exception of structured types created - with ``align=True`` as described below. - * The ``ALIGNED`` flag of an ndarray, computed in ``IsAligned`` and checked - by ``PyArray_ISALIGNED``. This is computed from ``dtype.alignment``. - It is set to ``True`` if every item in the array is at a memory location - consistent with ``dtype.alignment``, which is the case if the data ptr and - all strides of the array are multiples of that alignment. - * The ``align`` keyword of the dtype constructor, which only affects structured - arrays. If the structure's field offsets are not manually provided numpy - determines offsets automatically. In that case, ``align=True`` pads the - structure so that each field is "true" aligned in memory and sets - ``dtype.alignment`` to be the largest of the field "true" alignments. This - is like what C-structs usually do. Otherwise if offsets or itemsize were - manually provided ``align=True`` simply checks that all the fields are - "true" aligned and that the total itemsize is a multiple of the largest - field alignment. In either case ``dtype.isalignedstruct`` is also set to - True. - * ``IsUintAligned`` is used to determine if an ndarray is "uint aligned" in - an analogous way to how ``IsAligned`` checks for true-alignment. - -Consequences of alignment -------------------------- - -Here is how the variables above are used: - - 1. Creating aligned structs: In order to know how to offset a field when - ``align=True``, numpy looks up ``field.dtype.alignment``. This includes - fields which are nested structured arrays. - 2. Ufuncs: If the ``ALIGNED`` flag of an array is False, ufuncs will - buffer/cast the array before evaluation. This is needed since ufunc inner - loops access raw elements directly, which might fail on some archs if the - elements are not true-aligned. - 3. Getitem/setitem/copyswap function: Similar to ufuncs, these functions - generally have two code paths. If ``ALIGNED`` is False they will - use a code path that buffers the arguments so they are true-aligned. - 4. Strided copy code: Here, "uint alignment" is used instead. If the itemsize - of an array is equal to 1, 2, 4, 8 or 16 bytes and the array is uint - aligned then instead numpy will do ``*(uintN*)dst) = *(uintN*)src)`` for - appropriate N. Otherwise numpy copies by doing ``memcpy(dst, src, N)``. - 5. Nditer code: Since this often calls the strided copy code, it must - check for "uint alignment". - 6. Cast code: This checks for "true" alignment, as it does - ``*dst = CASTFUNC(*src)`` if aligned. Otherwise, it does - ``memmove(srcval, src); dstval = CASTFUNC(srcval); memmove(dst, dstval)`` - where dstval/srcval are aligned. - -Note that the strided-copy and strided-cast code are deeply intertwined and so -any arrays being processed by them must be both uint and true aligned, even -though the copy-code only needs uint alignment and the cast code only true -alignment. If there is ever a big rewrite of this code it would be good to -allow them to use different alignments. diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index e3b8d270d..63c93821b 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -25,7 +25,7 @@ form of the string, and can be either a :ref:`date unit <arrays.dtypes.dateunits :ref:`time unit <arrays.dtypes.timeunits>`. The date units are years ('Y'), months ('M'), weeks ('W'), and days ('D'), while the time units are hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and -some additional SI-prefix seconds-based units. The datetime64 data type +some additional SI-prefix seconds-based units. The datetime64 data type also accepts the string "NAT", in any combination of lowercase/uppercase letters, for a "Not A Time" value. @@ -74,6 +74,18 @@ datetime type with generic units. array(['2001-01-01T12:00:00.000', '2002-02-03T13:56:03.172'], dtype='datetime64[ms]') +An array of datetimes can be constructed from integers representing +POSIX timestamps with the given unit. + +.. admonition:: Example + + >>> np.array([0, 1577836800], dtype='datetime64[s]') + array(['1970-01-01T00:00:00', '2020-01-01T00:00:00'], + dtype='datetime64[s]') + + >>> np.array([0, 1577836800000]).astype('datetime64[ms]') + array(['1970-01-01T00:00:00.000', '2020-01-01T00:00:00.000'], + dtype='datetime64[ms]') The datetime type works with many common NumPy functions, for example :func:`arange` can be used to generate ranges of dates. @@ -120,9 +132,9 @@ Datetime and Timedelta Arithmetic NumPy allows the subtraction of two Datetime values, an operation which produces a number with a time unit. Because NumPy doesn't have a physical quantities system in its core, the timedelta64 data type was created -to complement datetime64. The arguments for timedelta64 are a number, +to complement datetime64. The arguments for timedelta64 are a number, to represent the number of units, and a date/time unit, such as -(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64 +(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64 data type also accepts the string "NAT" in place of the number for a "Not A Time" value. .. admonition:: Example diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index b5ffa1a8b..8606bc8f1 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -562,3 +562,20 @@ The following methods implement the pickle protocol: dtype.__reduce__ dtype.__setstate__ + +Utility method for typing: + +.. autosummary:: + :toctree: generated/ + + dtype.__class_getitem__ + +Comparison operations: + +.. autosummary:: + :toctree: generated/ + + dtype.__ge__ + dtype.__gt__ + dtype.__le__ + dtype.__lt__ diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index f2204752d..0f703b475 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -249,7 +249,6 @@ Other attributes ndarray.real ndarray.imag ndarray.flat - ndarray.ctypes .. _arrays.ndarray.array-interface: @@ -621,3 +620,10 @@ String representations: ndarray.__str__ ndarray.__repr__ + +Utility method for typing: + +.. autosummary:: + :toctree: generated/ + + ndarray.__class_getitem__ diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index abef66692..c691e802f 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -196,10 +196,10 @@ Inexact types ``f16`` prints as ``0.1`` because it is as close to that value as possible, whereas the other types do not as they have more precision and therefore have closer values. - + Conversely, floating-point scalars of different precisions which approximate the same decimal value may compare unequal despite printing identically: - + >>> f16 = np.float16("0.1") >>> f32 = np.float32("0.1") >>> f64 = np.float64("0.1") @@ -399,7 +399,7 @@ are also provided. complex256 Alias for `numpy.clongdouble`, named after its size in bits. - The existance of these aliases depends on the platform. + The existence of these aliases depends on the platform. Other aliases ~~~~~~~~~~~~~ @@ -498,6 +498,13 @@ The exceptions to the above rules are given below: generic.__setstate__ generic.setflags +Utility method for typing: + +.. autosummary:: + :toctree: generated/ + + number.__class_getitem__ + Defining new types ================== diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 26a8f643d..bb4405825 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -325,8 +325,7 @@ From scratch should be increased after the pointer is passed in, and the base member of the returned ndarray should point to the Python object that owns the data. This will ensure that the provided memory is not - freed while the returned array is in existence. To free memory as soon - as the ndarray is deallocated, set the OWNDATA flag on the returned ndarray. + freed while the returned array is in existence. .. c:function:: PyObject* PyArray_SimpleNewFromDescr( \ int nd, npy_int const* dims, PyArray_Descr* descr) @@ -519,34 +518,40 @@ From other objects :c:data:`NPY_ARRAY_CARRAY` - .. c:macro:: NPY_ARRAY_IN_ARRAY +.. + dedented to allow internal linking, pending a refactoring - :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` +.. c:macro:: NPY_ARRAY_IN_ARRAY + + :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` .. c:macro:: NPY_ARRAY_IN_FARRAY :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` - .. c:macro:: NPY_OUT_ARRAY +.. c:macro:: NPY_OUT_ARRAY - :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \| - :c:data:`NPY_ARRAY_ALIGNED` + :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \| + :c:data:`NPY_ARRAY_ALIGNED` - .. c:macro:: NPY_ARRAY_OUT_ARRAY +.. c:macro:: NPY_ARRAY_OUT_ARRAY - :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` \| - :c:data:`NPY_ARRAY_WRITEABLE` + :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` \| + :c:data:`NPY_ARRAY_WRITEABLE` .. c:macro:: NPY_ARRAY_OUT_FARRAY :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \| :c:data:`NPY_ARRAY_ALIGNED` - .. c:macro:: NPY_ARRAY_INOUT_ARRAY +.. + dedented to allow internal linking, pending a refactoring - :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \| - :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \| - :c:data:`NPY_ARRAY_UPDATEIFCOPY` +.. c:macro:: NPY_ARRAY_INOUT_ARRAY + + :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \| + :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \| + :c:data:`NPY_ARRAY_UPDATEIFCOPY` .. c:macro:: NPY_ARRAY_INOUT_FARRAY @@ -584,6 +589,9 @@ From other objects did not have the _ARRAY_ macro namespace in them. That form of the constant names is deprecated in 1.7. +.. + dedented to allow internal linking, pending a refactoring + .. c:macro:: NPY_ARRAY_NOTSWAPPED Make sure the returned array has a data-type descriptor that is in @@ -595,9 +603,13 @@ From other objects not in machine byte- order), then a new data-type descriptor is created and used with its byte-order field set to native. -.. c:macro:: NPY_ARRAY_BEHAVED_NS + .. c:macro:: NPY_ARRAY_BEHAVED_NS - :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \| :c:data:`NPY_ARRAY_NOTSWAPPED` + :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \| + :c:data:`NPY_ARRAY_NOTSWAPPED` + +.. + dedented to allow internal linking, pending a refactoring .. c:macro:: NPY_ARRAY_ELEMENTSTRIDES @@ -723,6 +735,13 @@ From other objects broadcastable to the shape of ``dest``. The data areas of dest and src must not overlap. +.. c:function:: int PyArray_CopyObject(PyArrayObject* dest, PyObject* src) + + Assign an object ``src`` to a NumPy array ``dest`` according to + array-coercion rules. This is basically identical to + :c:func:`PyArray_FromAny`, but assigns directly to the output array. + Returns 0 on success and -1 on failures. + .. c:function:: int PyArray_MoveInto(PyArrayObject* dest, PyArrayObject* src) Move data from the source array, ``src``, into the destination @@ -1303,7 +1322,7 @@ User-defined data types data-type object, *descr*, of the given *scalar* kind. Use *scalar* = :c:data:`NPY_NOSCALAR` to register that an array of data-type *descr* can be cast safely to a data-type whose type_number is - *totype*. + *totype*. The return value is 0 on success or -1 on failure. .. c:function:: int PyArray_TypeNumFromName( \ char const *str) @@ -1443,7 +1462,9 @@ of the constant names is deprecated in 1.7. .. c:macro:: NPY_ARRAY_OWNDATA - The data area is owned by this array. + The data area is owned by this array. Should never be set manually, instead + create a ``PyObject`` wrapping the data and set the array's base to that + object. For an example, see the test in ``test_mem_policy``. .. c:macro:: NPY_ARRAY_ALIGNED @@ -2707,6 +2728,45 @@ cost of a slight overhead. neighborhood. Calling this function after every point of the neighborhood has been visited is undefined. +Array mapping +------------- + +Array mapping is the machinery behind advanced indexing. + +.. c:function:: PyObject* PyArray_MapIterArray(PyArrayObject *a, \ + PyObject *index) + + Use advanced indexing to iterate an array. + +.. c:function:: void PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, \ + PyArrayObject **ret, int getmap) + + Swap the axes to or from their inserted form. ``MapIter`` always puts the + advanced (array) indices first in the iteration. But if they are + consecutive, it will insert/transpose them back before returning. + This is stored as ``mit->consec != 0`` (the place where they are inserted). + For assignments, the opposite happens: the values to be assigned are + transposed (``getmap=1`` instead of ``getmap=0``). ``getmap=0`` and + ``getmap=1`` undo the other operation. + +.. c:function:: void PyArray_MapIterNext(PyArrayMapIterObject *mit) + + This function needs to update the state of the map iterator + and point ``mit->dataptr`` to the memory-location of the next object. + + Note that this function never handles an extra operand but provides + compatibility for an old (exposed) API. + +.. c:function:: PyObject* PyArray_MapIterArrayCopyIfOverlap(PyArrayObject *a, \ + PyObject *index, int copy_if_overlap, PyArrayObject *extra_op) + + Similar to :c:func:`PyArray_MapIterArray` but with an additional + ``copy_if_overlap`` argument. If ``copy_if_overlap != 0``, checks if ``a`` + has memory overlap with any of the arrays in ``index`` and with + ``extra_op``, and make copies as appropriate to avoid problems if the + input is modified during the iteration. ``iter->array`` may contain a + copied array (UPDATEIFCOPY/WRITEBACKIFCOPY set). + Array Scalars ------------- @@ -2719,13 +2779,19 @@ Array Scalars whenever 0-dimensional arrays could be returned to Python. .. c:function:: PyObject* PyArray_Scalar( \ - void* data, PyArray_Descr* dtype, PyObject* itemsize) - - Return an array scalar object of the given enumerated *typenum* - and *itemsize* by **copying** from memory pointed to by *data* - . If *swap* is nonzero then this function will byteswap the data - if appropriate to the data-type because array scalars are always - in correct machine-byte order. + void* data, PyArray_Descr* dtype, PyObject* base) + + Return an array scalar object of the given *dtype* by **copying** + from memory pointed to by *data*. *base* is expected to be the + array object that is the owner of the data. *base* is required + if `dtype` is a ``void`` scalar, or if the ``NPY_USE_GETITEM`` + flag is set and it is known that the ``getitem`` method uses + the ``arr`` argument without checking if it is ``NULL``. Otherwise + `base` may be ``NULL``. + + If the data is not in native byte order (as indicated by + ``dtype->byteorder``) then this function will byteswap the data, + because array scalars are always in correct machine-byte order. .. c:function:: PyObject* PyArray_ToScalar(void* data, PyArrayObject* arr) diff --git a/doc/source/reference/c-api/data_memory.rst b/doc/source/reference/c-api/data_memory.rst new file mode 100644 index 000000000..11a37adc4 --- /dev/null +++ b/doc/source/reference/c-api/data_memory.rst @@ -0,0 +1,158 @@ +.. _data_memory: + +Memory management in NumPy +========================== + +The `numpy.ndarray` is a python class. It requires additional memory allocations +to hold `numpy.ndarray.strides`, `numpy.ndarray.shape` and +`numpy.ndarray.data` attributes. These attributes are specially allocated +after creating the python object in `__new__`. The ``strides`` and +``shape`` are stored in a piece of memory allocated internally. + +The ``data`` allocation used to store the actual array values (which could be +pointers in the case of ``object`` arrays) can be very large, so NumPy has +provided interfaces to manage its allocation and release. This document details +how those interfaces work. + +Historical overview +------------------- + +Since version 1.7.0, NumPy has exposed a set of ``PyDataMem_*`` functions +(:c:func:`PyDataMem_NEW`, :c:func:`PyDataMem_FREE`, :c:func:`PyDataMem_RENEW`) +which are backed by `alloc`, `free`, `realloc` respectively. In that version +NumPy also exposed the `PyDataMem_EventHook` function described below, which +wrap the OS-level calls. + +Since those early days, Python also improved its memory management +capabilities, and began providing +various :ref:`management policies <memoryoverview>` beginning in version +3.4. These routines are divided into a set of domains, each domain has a +:c:type:`PyMemAllocatorEx` structure of routines for memory management. Python also +added a `tracemalloc` module to trace calls to the various routines. These +tracking hooks were added to the NumPy ``PyDataMem_*`` routines. + +NumPy added a small cache of allocated memory in its internal +``npy_alloc_cache``, ``npy_alloc_cache_zero``, and ``npy_free_cache`` +functions. These wrap ``alloc``, ``alloc-and-memset(0)`` and ``free`` +respectively, but when ``npy_free_cache`` is called, it adds the pointer to a +short list of available blocks marked by size. These blocks can be re-used by +subsequent calls to ``npy_alloc*``, avoiding memory thrashing. + +Configurable memory routines in NumPy (NEP 49) +---------------------------------------------- + +Users may wish to override the internal data memory routines with ones of their +own. Since NumPy does not use the Python domain strategy to manage data memory, +it provides an alternative set of C-APIs to change memory routines. There are +no Python domain-wide strategies for large chunks of object data, so those are +less suited to NumPy's needs. User who wish to change the NumPy data memory +management routines can use :c:func:`PyDataMem_SetHandler`, which uses a +:c:type:`PyDataMem_Handler` structure to hold pointers to functions used to +manage the data memory. The calls are still wrapped by internal routines to +call :c:func:`PyTraceMalloc_Track`, :c:func:`PyTraceMalloc_Untrack`, and will +use the :c:func:`PyDataMem_EventHookFunc` mechanism. Since the functions may +change during the lifetime of the process, each ``ndarray`` carries with it the +functions used at the time of its instantiation, and these will be used to +reallocate or free the data memory of the instance. + +.. c:type:: PyDataMem_Handler + + A struct to hold function pointers used to manipulate memory + + .. code-block:: c + + typedef struct { + char name[128]; /* multiple of 64 to keep the struct aligned */ + PyDataMemAllocator allocator; + } PyDataMem_Handler; + + where the allocator structure is + + .. code-block:: c + + /* The declaration of free differs from PyMemAllocatorEx */ + typedef struct { + void *ctx; + void* (*malloc) (void *ctx, size_t size); + void* (*calloc) (void *ctx, size_t nelem, size_t elsize); + void* (*realloc) (void *ctx, void *ptr, size_t new_size); + void (*free) (void *ctx, void *ptr, size_t size); + } PyDataMemAllocator; + +.. c:function:: PyObject * PyDataMem_SetHandler(PyObject *handler) + + Set a new allocation policy. If the input value is ``NULL``, will reset the + policy to the default. Return the previous policy, or + return ``NULL`` if an error has occurred. We wrap the user-provided functions + so they will still call the python and numpy memory management callback + hooks. + +.. c:function:: PyObject * PyDataMem_GetHandler() + + Return the current policy that will be used to allocate data for the + next ``PyArrayObject``. On failure, return ``NULL``. + +For an example of setting up and using the PyDataMem_Handler, see the test in +:file:`numpy/core/tests/test_mem_policy.py` + +.. c:function:: void PyDataMem_EventHookFunc(void *inp, void *outp, size_t size, void *user_data); + + This function will be called during data memory manipulation + +.. c:function:: PyDataMem_EventHookFunc * PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook, void *user_data, void **old_data) + + Sets the allocation event hook for numpy array data. + + Returns a pointer to the previous hook or ``NULL``. If old_data is + non-``NULL``, the previous user_data pointer will be copied to it. + + If not ``NULL``, hook will be called at the end of each ``PyDataMem_NEW/FREE/RENEW``: + + .. code-block:: c + + result = PyDataMem_NEW(size) -> (*hook)(NULL, result, size, user_data) + PyDataMem_FREE(ptr) -> (*hook)(ptr, NULL, 0, user_data) + result = PyDataMem_RENEW(ptr, size) -> (*hook)(ptr, result, size, user_data) + + When the hook is called, the GIL will be held by the calling + thread. The hook should be written to be reentrant, if it performs + operations that might cause new allocation events (such as the + creation/destruction numpy objects, or creating/destroying Python + objects which might cause a gc) + +What happens when deallocating if there is no policy set +-------------------------------------------------------- + +A rare but useful technique is to allocate a buffer outside NumPy, use +:c:func:`PyArray_NewFromDescr` to wrap the buffer in a ``ndarray``, then switch +the ``OWNDATA`` flag to true. When the ``ndarray`` is released, the +appropriate function from the ``ndarray``'s ``PyDataMem_Handler`` should be +called to free the buffer. But the ``PyDataMem_Handler`` field was never set, +it will be ``NULL``. For backward compatibility, NumPy will call ``free()`` to +release the buffer. If ``NUMPY_WARN_IF_NO_MEM_POLICY`` is set to ``1``, a +warning will be emitted. The current default is not to emit a warning, this may +change in a future version of NumPy. + +A better technique would be to use a ``PyCapsule`` as a base object: + +.. code-block:: c + + /* define a PyCapsule_Destructor, using the correct deallocator for buff */ + void free_wrap(void *capsule){ + void * obj = PyCapsule_GetPointer(capsule, PyCapsule_GetName(capsule)); + free(obj); + }; + + /* then inside the function that creates arr from buff */ + ... + arr = PyArray_NewFromDescr(... buf, ...); + if (arr == NULL) { + return NULL; + } + capsule = PyCapsule_New(buf, "my_wrapped_buffer", + (PyCapsule_Destructor)&free_wrap); + if (PyArray_SetBaseObject(arr, capsule) == -1) { + Py_DECREF(arr); + return NULL; + } + ... diff --git a/doc/source/reference/c-api/index.rst b/doc/source/reference/c-api/index.rst index bb1ed154e..6288ff33b 100644 --- a/doc/source/reference/c-api/index.rst +++ b/doc/source/reference/c-api/index.rst @@ -49,3 +49,4 @@ code. generalized-ufuncs coremath deprecations + data_memory diff --git a/doc/source/reference/c-api/iterator.rst b/doc/source/reference/c-api/iterator.rst index 2208cdd2f..83644d8b2 100644 --- a/doc/source/reference/c-api/iterator.rst +++ b/doc/source/reference/c-api/iterator.rst @@ -1230,7 +1230,7 @@ Functions For Iteration .. c:function:: npy_intp* NpyIter_GetIndexPtr(NpyIter* iter) This gives back a pointer to the index being tracked, or NULL - if no index is being tracked. It is only useable if one of + if no index is being tracked. It is only usable if one of the flags :c:data:`NPY_ITER_C_INDEX` or :c:data:`NPY_ITER_F_INDEX` were specified during construction. diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 39a17cc72..605a4ae71 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -94,7 +94,7 @@ PyArray_Type and PyArrayObject PyArray_Descr *descr; int flags; PyObject *weakreflist; - /* version dependend private members */ + /* version dependent private members */ } PyArrayObject; .. c:macro:: PyObject_HEAD @@ -178,7 +178,7 @@ PyArray_Type and PyArrayObject .. note:: - Further members are considered private and version dependend. If the size + Further members are considered private and version dependent. If the size of the struct is important for your code, special care must be taken. A possible use-case when this is relevant is subclassing in C. If your code relies on ``sizeof(PyArrayObject)`` to be constant, @@ -286,48 +286,54 @@ PyArrayDescr_Type and PyArray_Descr array like behavior. Each bit in this member is a flag which are named as: - .. c:macro:: NPY_ITEM_REFCOUNT +.. + dedented to allow internal linking, pending a refactoring - Indicates that items of this data-type must be reference - counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ). +.. c:macro:: NPY_ITEM_REFCOUNT + + Indicates that items of this data-type must be reference + counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ). .. c:macro:: NPY_ITEM_HASOBJECT Same as :c:data:`NPY_ITEM_REFCOUNT`. - .. c:macro:: NPY_LIST_PICKLE +.. + dedented to allow internal linking, pending a refactoring + +.. c:macro:: NPY_LIST_PICKLE - Indicates arrays of this data-type must be converted to a list - before pickling. + Indicates arrays of this data-type must be converted to a list + before pickling. - .. c:macro:: NPY_ITEM_IS_POINTER +.. c:macro:: NPY_ITEM_IS_POINTER - Indicates the item is a pointer to some other data-type + Indicates the item is a pointer to some other data-type - .. c:macro:: NPY_NEEDS_INIT +.. c:macro:: NPY_NEEDS_INIT - Indicates memory for this data-type must be initialized (set - to 0) on creation. + Indicates memory for this data-type must be initialized (set + to 0) on creation. - .. c:macro:: NPY_NEEDS_PYAPI +.. c:macro:: NPY_NEEDS_PYAPI - Indicates this data-type requires the Python C-API during - access (so don't give up the GIL if array access is going to - be needed). + Indicates this data-type requires the Python C-API during + access (so don't give up the GIL if array access is going to + be needed). - .. c:macro:: NPY_USE_GETITEM +.. c:macro:: NPY_USE_GETITEM - On array access use the ``f->getitem`` function pointer - instead of the standard conversion to an array scalar. Must - use if you don't define an array scalar to go along with - the data-type. + On array access use the ``f->getitem`` function pointer + instead of the standard conversion to an array scalar. Must + use if you don't define an array scalar to go along with + the data-type. - .. c:macro:: NPY_USE_SETITEM +.. c:macro:: NPY_USE_SETITEM - When creating a 0-d array from an array scalar use - ``f->setitem`` instead of the standard copy from an array - scalar. Must use if you don't define an array scalar to go - along with the data-type. + When creating a 0-d array from an array scalar use + ``f->setitem`` instead of the standard copy from an array + scalar. Must use if you don't define an array scalar to go + along with the data-type. .. c:macro:: NPY_FROM_FIELDS @@ -961,8 +967,8 @@ PyUFunc_Type and PyUFuncObject .. deprecated:: 1.22 Some fallback support for this slot exists, but will be removed - eventually. A univiersal function which relied on this will have - eventually have to be ported. + eventually. A universal function that relied on this will + have to be ported eventually. See ref:`NEP 41 <NEP41>` and ref:`NEP 43 <NEP43>` .. c:member:: void *reserved2 @@ -989,14 +995,17 @@ PyUFunc_Type and PyUFuncObject For each distinct core dimension, a set of ``UFUNC_CORE_DIM*`` flags - .. c:macro:: UFUNC_CORE_DIM_CAN_IGNORE +.. + dedented to allow internal linking, pending a refactoring + +.. c:macro:: UFUNC_CORE_DIM_CAN_IGNORE - if the dim name ends in ``?`` + if the dim name ends in ``?`` - .. c:macro:: UFUNC_CORE_DIM_SIZE_INFERRED +.. c:macro:: UFUNC_CORE_DIM_SIZE_INFERRED - if the dim size will be determined from the operands - and not from a :ref:`frozen <frozen>` signature + if the dim size will be determined from the operands + and not from a :ref:`frozen <frozen>` signature .. c:member:: PyObject *identity_value diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst index f18481235..20874ceaa 100644 --- a/doc/source/reference/global_state.rst +++ b/doc/source/reference/global_state.rst @@ -84,3 +84,13 @@ contiguous in memory. Most users will have no reason to change these; for details see the :ref:`memory layout <memory-layout>` documentation. + +Warn if no memory allocation policy when deallocating data +---------------------------------------------------------- + +Some users might pass ownership of the data pointer to the ``ndarray`` by +setting the ``OWNDATA`` flag. If they do this without setting (manually) a +memory allocation policy, the default will be to call ``free``. If +``NUMPY_WARN_IF_NO_MEM_POLICY`` is set to ``"1"``, a ``RuntimeWarning`` will +be emitted. A better alternative is to use a ``PyCapsule`` with a deallocator +and set the ``ndarray.base``. diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index f12d923df..a18211cca 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -26,7 +26,6 @@ For learning how to use NumPy, see the :ref:`complete documentation <numpy_docs_ distutils distutils_guide c-api/index - internals simd/simd-optimizations swig diff --git a/doc/source/reference/internals.code-explanations.rst b/doc/source/reference/internals.code-explanations.rst index e8e428f2e..d34314610 100644 --- a/doc/source/reference/internals.code-explanations.rst +++ b/doc/source/reference/internals.code-explanations.rst @@ -1,618 +1,9 @@ -.. currentmodule:: numpy +:orphan: ************************* NumPy C Code Explanations ************************* - Fanaticism consists of redoubling your efforts when you have forgotten - your aim. - --- *George Santayana* +.. This document has been moved to ../dev/internals.code-explanations.rst. - An authority is a person who can tell you more about something than - you really care to know. - --- *Unknown* - -This Chapter attempts to explain the logic behind some of the new -pieces of code. The purpose behind these explanations is to enable -somebody to be able to understand the ideas behind the implementation -somewhat more easily than just staring at the code. Perhaps in this -way, the algorithms can be improved on, borrowed from, and/or -optimized by more people. - - -Memory model -============ - -.. index:: - pair: ndarray; memory model - -One fundamental aspect of the ndarray is that an array is seen as a -"chunk" of memory starting at some location. The interpretation of -this memory depends on the stride information. For each dimension in -an :math:`N` -dimensional array, an integer (stride) dictates how many -bytes must be skipped to get to the next element in that dimension. -Unless you have a single-segment array, this stride information must -be consulted when traversing through an array. It is not difficult to -write code that accepts strides, you just have to use (char \*) -pointers because strides are in units of bytes. Keep in mind also that -strides do not have to be unit-multiples of the element size. Also, -remember that if the number of dimensions of the array is 0 (sometimes -called a rank-0 array), then the strides and dimensions variables are -NULL. - -Besides the structural information contained in the strides and -dimensions members of the :c:type:`PyArrayObject`, the flags contain -important information about how the data may be accessed. In particular, -the :c:data:`NPY_ARRAY_ALIGNED` flag is set when the memory is on a -suitable boundary according to the data-type array. Even if you have -a contiguous chunk of memory, you cannot just assume it is safe to -dereference a data- type-specific pointer to an element. Only if the -:c:data:`NPY_ARRAY_ALIGNED` flag is set is this a safe operation (on -some platforms it will work but on others, like Solaris, it will cause -a bus error). The :c:data:`NPY_ARRAY_WRITEABLE` should also be ensured -if you plan on writing to the memory area of the array. It is also -possible to obtain a pointer to an unwritable memory area. Sometimes, -writing to the memory area when the :c:data:`NPY_ARRAY_WRITEABLE` flag is not -set will just be rude. Other times it can cause program crashes ( *e.g.* -a data-area that is a read-only memory-mapped file). - - -Data-type encapsulation -======================= - -.. index:: - single: dtype - -The data-type is an important abstraction of the ndarray. Operations -will look to the data-type to provide the key functionality that is -needed to operate on the array. This functionality is provided in the -list of function pointers pointed to by the 'f' member of the -:c:type:`PyArray_Descr` structure. In this way, the number of data-types can be -extended simply by providing a :c:type:`PyArray_Descr` structure with suitable -function pointers in the 'f' member. For built-in types there are some -optimizations that by-pass this mechanism, but the point of the data- -type abstraction is to allow new data-types to be added. - -One of the built-in data-types, the void data-type allows for -arbitrary structured types containing 1 or more fields as elements of the -array. A field is simply another data-type object along with an offset -into the current structured type. In order to support arbitrarily nested -fields, several recursive implementations of data-type access are -implemented for the void type. A common idiom is to cycle through the -elements of the dictionary and perform a specific operation based on -the data-type object stored at the given offset. These offsets can be -arbitrary numbers. Therefore, the possibility of encountering mis- -aligned data must be recognized and taken into account if necessary. - - -N-D Iterators -============= - -.. index:: - single: array iterator - -A very common operation in much of NumPy code is the need to iterate -over all the elements of a general, strided, N-dimensional array. This -operation of a general-purpose N-dimensional loop is abstracted in the -notion of an iterator object. To write an N-dimensional loop, you only -have to create an iterator object from an ndarray, work with the -dataptr member of the iterator object structure and call the macro -:c:func:`PyArray_ITER_NEXT` (it) on the iterator object to move to the next -element. The "next" element is always in C-contiguous order. The macro -works by first special casing the C-contiguous, 1-D, and 2-D cases -which work very simply. - -For the general case, the iteration works by keeping track of a list -of coordinate counters in the iterator object. At each iteration, the -last coordinate counter is increased (starting from 0). If this -counter is smaller than one less than the size of the array in that -dimension (a pre-computed and stored value), then the counter is -increased and the dataptr member is increased by the strides in that -dimension and the macro ends. If the end of a dimension is reached, -the counter for the last dimension is reset to zero and the dataptr is -moved back to the beginning of that dimension by subtracting the -strides value times one less than the number of elements in that -dimension (this is also pre-computed and stored in the backstrides -member of the iterator object). In this case, the macro does not end, -but a local dimension counter is decremented so that the next-to-last -dimension replaces the role that the last dimension played and the -previously-described tests are executed again on the next-to-last -dimension. In this way, the dataptr is adjusted appropriately for -arbitrary striding. - -The coordinates member of the :c:type:`PyArrayIterObject` structure maintains -the current N-d counter unless the underlying array is C-contiguous in -which case the coordinate counting is by-passed. The index member of -the :c:type:`PyArrayIterObject` keeps track of the current flat index of the -iterator. It is updated by the :c:func:`PyArray_ITER_NEXT` macro. - - -Broadcasting -============ - -.. index:: - single: broadcasting - -In Numeric, the ancestor of Numpy, broadcasting was implemented in several -lines of code buried deep in ufuncobject.c. In NumPy, the notion of broadcasting -has been abstracted so that it can be performed in multiple places. -Broadcasting is handled by the function :c:func:`PyArray_Broadcast`. This -function requires a :c:type:`PyArrayMultiIterObject` (or something that is a -binary equivalent) to be passed in. The :c:type:`PyArrayMultiIterObject` keeps -track of the broadcast number of dimensions and size in each -dimension along with the total size of the broadcast result. It also -keeps track of the number of arrays being broadcast and a pointer to -an iterator for each of the arrays being broadcast. - -The :c:func:`PyArray_Broadcast` function takes the iterators that have already -been defined and uses them to determine the broadcast shape in each -dimension (to create the iterators at the same time that broadcasting -occurs then use the :c:func:`PyArray_MultiIterNew` function). -Then, the iterators are -adjusted so that each iterator thinks it is iterating over an array -with the broadcast size. This is done by adjusting the iterators -number of dimensions, and the shape in each dimension. This works -because the iterator strides are also adjusted. Broadcasting only -adjusts (or adds) length-1 dimensions. For these dimensions, the -strides variable is simply set to 0 so that the data-pointer for the -iterator over that array doesn't move as the broadcasting operation -operates over the extended dimension. - -Broadcasting was always implemented in Numeric using 0-valued strides -for the extended dimensions. It is done in exactly the same way in -NumPy. The big difference is that now the array of strides is kept -track of in a :c:type:`PyArrayIterObject`, the iterators involved in a -broadcast result are kept track of in a :c:type:`PyArrayMultiIterObject`, -and the :c:func:`PyArray_Broadcast` call implements the broad-casting rules. - - -Array Scalars -============= - -.. index:: - single: array scalars - -The array scalars offer a hierarchy of Python types that allow a one- -to-one correspondence between the data-type stored in an array and the -Python-type that is returned when an element is extracted from the -array. An exception to this rule was made with object arrays. Object -arrays are heterogeneous collections of arbitrary Python objects. When -you select an item from an object array, you get back the original -Python object (and not an object array scalar which does exist but is -rarely used for practical purposes). - -The array scalars also offer the same methods and attributes as arrays -with the intent that the same code can be used to support arbitrary -dimensions (including 0-dimensions). The array scalars are read-only -(immutable) with the exception of the void scalar which can also be -written to so that structured array field setting works more naturally -(a[0]['f1'] = ``value`` ). - - -Indexing -======== - -.. index:: - single: indexing - -All python indexing operations ``arr[index]`` are organized by first preparing -the index and finding the index type. The supported index types are: - -* integer -* newaxis -* slice -* ellipsis -* integer arrays/array-likes (fancy) -* boolean (single boolean array); if there is more than one boolean array as - index or the shape does not match exactly, the boolean array will be - converted to an integer array instead. -* 0-d boolean (and also integer); 0-d boolean arrays are a special - case which has to be handled in the advanced indexing code. They signal - that a 0-d boolean array had to be interpreted as an integer array. - -As well as the scalar array special case signaling that an integer array -was interpreted as an integer index, which is important because an integer -array index forces a copy but is ignored if a scalar is returned (full integer -index). The prepared index is guaranteed to be valid with the exception of -out of bound values and broadcasting errors for advanced indexing. This -includes that an ellipsis is added for incomplete indices for example when -a two dimensional array is indexed with a single integer. - -The next step depends on the type of index which was found. If all -dimensions are indexed with an integer a scalar is returned or set. A -single boolean indexing array will call specialized boolean functions. -Indices containing an ellipsis or slice but no advanced indexing will -always create a view into the old array by calculating the new strides and -memory offset. This view can then either be returned or, for assignments, -filled using :c:func:`PyArray_CopyObject`. Note that `PyArray_CopyObject` -may also be called on temporary arrays in other branches to support -complicated assignments when the array is of object dtype. - -Advanced indexing ------------------ - -By far the most complex case is advanced indexing, which may or may not be -combined with typical view based indexing. Here integer indices are -interpreted as view based. Before trying to understand this, you may want -to make yourself familiar with its subtleties. The advanced indexing code -has three different branches and one special case: - -* There is one indexing array and it, as well as the assignment array, can - be iterated trivially. For example they may be contiguous. Also the - indexing array must be of `intp` type and the value array in assignments - should be of the correct type. This is purely a fast path. -* There are only integer array indices so that no subarray exists. -* View based and advanced indexing is mixed. In this case the view based - indexing defines a collection of subarrays that are combined by the - advanced indexing. For example, ``arr[[1, 2, 3], :]`` is created by - vertically stacking the subarrays ``arr[1, :]``, ``arr[2,:]``, and - ``arr[3, :]``. -* There is a subarray but it has exactly one element. This case can be handled - as if there is no subarray, but needs some care during setup. - -Deciding what case applies, checking broadcasting, and determining the kind -of transposition needed are all done in `PyArray_MapIterNew`. After setting -up, there are two cases. If there is no subarray or it only has one -element, no subarray iteration is necessary and an iterator is prepared -which iterates all indexing arrays *as well as* the result or value array. -If there is a subarray, there are three iterators prepared. One for the -indexing arrays, one for the result or value array (minus its subarray), -and one for the subarrays of the original and the result/assignment array. -The first two iterators give (or allow calculation) of the pointers into -the start of the subarray, which then allows to restart the subarray -iteration. - -When advanced indices are next to each other transposing may be necessary. -All necessary transposing is handled by :c:func:`PyArray_MapIterSwapAxes` and -has to be handled by the caller unless `PyArray_MapIterNew` is asked to -allocate the result. - -After preparation, getting and setting is relatively straight forward, -although the different modes of iteration need to be considered. Unless -there is only a single indexing array during item getting, the validity of -the indices is checked beforehand. Otherwise it is handled in the inner -loop itself for optimization. - - -Universal Functions -=================== - -.. index:: - single: ufunc - -Universal functions are callable objects that take :math:`N` inputs -and produce :math:`M` outputs by wrapping basic 1-D loops that work -element-by-element into full easy-to use functions that seamlessly -implement broadcasting, type-checking and buffered coercion, and -output-argument handling. New universal functions are normally created -in C, although there is a mechanism for creating ufuncs from Python -functions (:func:`frompyfunc`). The user must supply a 1-D loop that -implements the basic function taking the input scalar values and -placing the resulting scalars into the appropriate output slots as -explained in implementation. - - -Setup ------ - -Every ufunc calculation involves some overhead related to setting up -the calculation. The practical significance of this overhead is that -even though the actual calculation of the ufunc is very fast, you will -be able to write array and type-specific code that will work faster -for small arrays than the ufunc. In particular, using ufuncs to -perform many calculations on 0-D arrays will be slower than other -Python-based solutions (the silently-imported scalarmath module exists -precisely to give array scalars the look-and-feel of ufunc based -calculations with significantly reduced overhead). - -When a ufunc is called, many things must be done. The information -collected from these setup operations is stored in a loop-object. This -loop object is a C-structure (that could become a Python object but is -not initialized as such because it is only used internally). This loop -object has the layout needed to be used with PyArray_Broadcast so that -the broadcasting can be handled in the same way as it is handled in -other sections of code. - -The first thing done is to look-up in the thread-specific global -dictionary the current values for the buffer-size, the error mask, and -the associated error object. The state of the error mask controls what -happens when an error condition is found. It should be noted that -checking of the hardware error flags is only performed after each 1-D -loop is executed. This means that if the input and output arrays are -contiguous and of the correct type so that a single 1-D loop is -performed, then the flags may not be checked until all elements of the -array have been calculated. Looking up these values in a thread- -specific dictionary takes time which is easily ignored for all but -very small arrays. - -After checking, the thread-specific global variables, the inputs are -evaluated to determine how the ufunc should proceed and the input and -output arrays are constructed if necessary. Any inputs which are not -arrays are converted to arrays (using context if necessary). Which of -the inputs are scalars (and therefore converted to 0-D arrays) is -noted. - -Next, an appropriate 1-D loop is selected from the 1-D loops available -to the ufunc based on the input array types. This 1-D loop is selected -by trying to match the signature of the data-types of the inputs -against the available signatures. The signatures corresponding to -built-in types are stored in the types member of the ufunc structure. -The signatures corresponding to user-defined types are stored in a -linked-list of function-information with the head element stored as a -``CObject`` in the userloops dictionary keyed by the data-type number -(the first user-defined type in the argument list is used as the key). -The signatures are searched until a signature is found to which the -input arrays can all be cast safely (ignoring any scalar arguments -which are not allowed to determine the type of the result). The -implication of this search procedure is that "lesser types" should be -placed below "larger types" when the signatures are stored. If no 1-D -loop is found, then an error is reported. Otherwise, the argument_list -is updated with the stored signature --- in case casting is necessary -and to fix the output types assumed by the 1-D loop. - -If the ufunc has 2 inputs and 1 output and the second input is an -Object array then a special-case check is performed so that -NotImplemented is returned if the second input is not an ndarray, has -the __array_priority\__ attribute, and has an __r{op}\__ special -method. In this way, Python is signaled to give the other object a -chance to complete the operation instead of using generic object-array -calculations. This allows (for example) sparse matrices to override -the multiplication operator 1-D loop. - -For input arrays that are smaller than the specified buffer size, -copies are made of all non-contiguous, mis-aligned, or out-of- -byteorder arrays to ensure that for small arrays, a single loop is -used. Then, array iterators are created for all the input arrays and -the resulting collection of iterators is broadcast to a single shape. - -The output arguments (if any) are then processed and any missing -return arrays are constructed. If any provided output array doesn't -have the correct type (or is mis-aligned) and is smaller than the -buffer size, then a new output array is constructed with the special -:c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set. At the end of the function, -:c:func:`PyArray_ResolveWritebackIfCopy` is called so that -its contents will be copied back into the output array. -Iterators for the output arguments are then processed. - -Finally, the decision is made about how to execute the looping -mechanism to ensure that all elements of the input arrays are combined -to produce the output arrays of the correct type. The options for loop -execution are one-loop (for contiguous, aligned, and correct data -type), strided-loop (for non-contiguous but still aligned and correct -data type), and a buffered loop (for mis-aligned or incorrect data -type situations). Depending on which execution method is called for, -the loop is then setup and computed. - - -Function call -------------- - -This section describes how the basic universal function computation loop is -setup and executed for each of the three different kinds of execution. If -:c:data:`NPY_ALLOW_THREADS` is defined during compilation, then as long as -no object arrays are involved, the Python Global Interpreter Lock (GIL) is -released prior to calling the loops. It is re-acquired if necessary to -handle error conditions. The hardware error flags are checked only after -the 1-D loop is completed. - - -One Loop -^^^^^^^^ - -This is the simplest case of all. The ufunc is executed by calling the -underlying 1-D loop exactly once. This is possible only when we have -aligned data of the correct type (including byte-order) for both input -and output and all arrays have uniform strides (either contiguous, -0-D, or 1-D). In this case, the 1-D computational loop is called once -to compute the calculation for the entire array. Note that the -hardware error flags are only checked after the entire calculation is -complete. - - -Strided Loop -^^^^^^^^^^^^ - -When the input and output arrays are aligned and of the correct type, -but the striding is not uniform (non-contiguous and 2-D or larger), -then a second looping structure is employed for the calculation. This -approach converts all of the iterators for the input and output -arguments to iterate over all but the largest dimension. The inner -loop is then handled by the underlying 1-D computational loop. The -outer loop is a standard iterator loop on the converted iterators. The -hardware error flags are checked after each 1-D loop is completed. - - -Buffered Loop -^^^^^^^^^^^^^ - -This is the code that handles the situation whenever the input and/or -output arrays are either misaligned or of the wrong data-type -(including being byte-swapped) from what the underlying 1-D loop -expects. The arrays are also assumed to be non-contiguous. The code -works very much like the strided-loop except for the inner 1-D loop is -modified so that pre-processing is performed on the inputs and post- -processing is performed on the outputs in bufsize chunks (where -bufsize is a user-settable parameter). The underlying 1-D -computational loop is called on data that is copied over (if it needs -to be). The setup code and the loop code is considerably more -complicated in this case because it has to handle: - -- memory allocation of the temporary buffers - -- deciding whether or not to use buffers on the input and output data - (mis-aligned and/or wrong data-type) - -- copying and possibly casting data for any inputs or outputs for which - buffers are necessary. - -- special-casing Object arrays so that reference counts are properly - handled when copies and/or casts are necessary. - -- breaking up the inner 1-D loop into bufsize chunks (with a possible - remainder). - -Again, the hardware error flags are checked at the end of each 1-D -loop. - - -Final output manipulation -------------------------- - -Ufuncs allow other array-like classes to be passed seamlessly through -the interface in that inputs of a particular class will induce the -outputs to be of that same class. The mechanism by which this works is -the following. If any of the inputs are not ndarrays and define the -:obj:`~numpy.class.__array_wrap__` method, then the class with the largest -:obj:`~numpy.class.__array_priority__` attribute determines the type of all the -outputs (with the exception of any output arrays passed in). The -:obj:`~numpy.class.__array_wrap__` method of the input array will be called with the -ndarray being returned from the ufunc as it's input. There are two -calling styles of the :obj:`~numpy.class.__array_wrap__` function supported. The first -takes the ndarray as the first argument and a tuple of "context" as -the second argument. The context is (ufunc, arguments, output argument -number). This is the first call tried. If a TypeError occurs, then the -function is called with just the ndarray as the first argument. - - -Methods -------- - -There are three methods of ufuncs that require calculation similar to -the general-purpose ufuncs. These are reduce, accumulate, and -reduceat. Each of these methods requires a setup command followed by a -loop. There are four loop styles possible for the methods -corresponding to no-elements, one-element, strided-loop, and buffered- -loop. These are the same basic loop styles as implemented for the -general purpose function call except for the no-element and one- -element cases which are special-cases occurring when the input array -objects have 0 and 1 elements respectively. - - -Setup -^^^^^ - -The setup function for all three methods is ``construct_reduce``. -This function creates a reducing loop object and fills it with -parameters needed to complete the loop. All of the methods only work -on ufuncs that take 2-inputs and return 1 output. Therefore, the -underlying 1-D loop is selected assuming a signature of [ ``otype``, -``otype``, ``otype`` ] where ``otype`` is the requested reduction -data-type. The buffer size and error handling is then retrieved from -(per-thread) global storage. For small arrays that are mis-aligned or -have incorrect data-type, a copy is made so that the un-buffered -section of code is used. Then, the looping strategy is selected. If -there is 1 element or 0 elements in the array, then a simple looping -method is selected. If the array is not mis-aligned and has the -correct data-type, then strided looping is selected. Otherwise, -buffered looping must be performed. Looping parameters are then -established, and the return array is constructed. The output array is -of a different shape depending on whether the method is reduce, -accumulate, or reduceat. If an output array is already provided, then -it's shape is checked. If the output array is not C-contiguous, -aligned, and of the correct data type, then a temporary copy is made -with the WRITEBACKIFCOPY flag set. In this way, the methods will be able -to work with a well-behaved output array but the result will be copied -back into the true output array when :c:func:`PyArray_ResolveWritebackIfCopy` -is called at function completion. -Finally, iterators are set up to loop over the correct axis -(depending on the value of axis provided to the method) and the setup -routine returns to the actual computation routine. - - -Reduce -^^^^^^ - -.. index:: - triple: ufunc; methods; reduce - -All of the ufunc methods use the same underlying 1-D computational -loops with input and output arguments adjusted so that the appropriate -reduction takes place. For example, the key to the functioning of -reduce is that the 1-D loop is called with the output and the second -input pointing to the same position in memory and both having a step- -size of 0. The first input is pointing to the input array with a step- -size given by the appropriate stride for the selected axis. In this -way, the operation performed is - -.. math:: - :nowrap: - - \begin{align*} - o & = & i[0] \\ - o & = & i[k]\textrm{<op>}o\quad k=1\ldots N - \end{align*} - -where :math:`N+1` is the number of elements in the input, :math:`i`, -:math:`o` is the output, and :math:`i[k]` is the -:math:`k^{\textrm{th}}` element of :math:`i` along the selected axis. -This basic operations is repeated for arrays with greater than 1 -dimension so that the reduction takes place for every 1-D sub-array -along the selected axis. An iterator with the selected dimension -removed handles this looping. - -For buffered loops, care must be taken to copy and cast data before -the loop function is called because the underlying loop expects -aligned data of the correct data-type (including byte-order). The -buffered loop must handle this copying and casting prior to calling -the loop function on chunks no greater than the user-specified -bufsize. - - -Accumulate -^^^^^^^^^^ - -.. index:: - triple: ufunc; methods; accumulate - -The accumulate function is very similar to the reduce function in that -the output and the second input both point to the output. The -difference is that the second input points to memory one stride behind -the current output pointer. Thus, the operation performed is - -.. math:: - :nowrap: - - \begin{align*} - o[0] & = & i[0] \\ - o[k] & = & i[k]\textrm{<op>}o[k-1]\quad k=1\ldots N. - \end{align*} - -The output has the same shape as the input and each 1-D loop operates -over :math:`N` elements when the shape in the selected axis is :math:`N+1`. -Again, buffered loops take care to copy and cast the data before -calling the underlying 1-D computational loop. - - -Reduceat -^^^^^^^^ - -.. index:: - triple: ufunc; methods; reduceat - single: ufunc - -The reduceat function is a generalization of both the reduce and -accumulate functions. It implements a reduce over ranges of the input -array specified by indices. The extra indices argument is checked to -be sure that every input is not too large for the input array along -the selected dimension before the loop calculations take place. The -loop implementation is handled using code that is very similar to the -reduce code repeated as many times as there are elements in the -indices input. In particular: the first input pointer passed to the -underlying 1-D computational loop points to the input array at the -correct location indicated by the index array. In addition, the output -pointer and the second input pointer passed to the underlying 1-D loop -point to the same position in memory. The size of the 1-D -computational loop is fixed to be the difference between the current -index and the next index (when the current index is the last index, -then the next index is assumed to be the length of the array along the -selected dimension). In this way, the 1-D loop will implement a reduce -over the specified indices. - -Mis-aligned or a loop data-type that does not match the input and/or -output data-type is handled using buffered code where-in data is -copied to a temporary buffer and cast to the correct data-type if -necessary prior to calling the underlying 1-D function. The temporary -buffers are created in (element) sizes no bigger than the user -settable buffer-size value. Thus, the loop must be flexible enough to -call the underlying 1-D computational loop enough times to complete -the total calculation in chunks no bigger than the buffer-size. +This document has been moved to :ref:`c-code-explanations`.
\ No newline at end of file diff --git a/doc/source/reference/internals.rst b/doc/source/reference/internals.rst index ed8042c08..7a5e6374c 100644 --- a/doc/source/reference/internals.rst +++ b/doc/source/reference/internals.rst @@ -1,168 +1,10 @@ -.. _numpy-internals: +:orphan: *************** NumPy internals *************** -.. toctree:: - - internals.code-explanations - alignment - -Internal organization of numpy arrays -===================================== - -It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to NumPy". - -NumPy arrays consist of two major components, the raw array data (from now on, -referred to as the data buffer), and the information about the raw array data. -The data buffer is typically what people think of as arrays in C or Fortran, -a contiguous (and fixed) block of memory containing fixed sized data items. -NumPy also contains a significant set of data that describes how to interpret -the data in the data buffer. This extra information contains (among other things): - - 1) The basic data element's size in bytes - 2) The start of the data within the data buffer (an offset relative to the - beginning of the data buffer). - 3) The number of dimensions and the size of each dimension - 4) The separation between elements for each dimension (the 'stride'). This - does not have to be a multiple of the element size - 5) The byte order of the data (which may not be the native byte order) - 6) Whether the buffer is read-only - 7) Information (via the dtype object) about the interpretation of the basic - data element. The basic data element may be as simple as a int or a float, - or it may be a compound object (e.g., struct-like), a fixed character field, - or Python object pointers. - 8) Whether the array is to interpreted as C-order or Fortran-order. - -This arrangement allow for very flexible use of arrays. One thing that it allows -is simple changes of the metadata to change the interpretation of the array buffer. -Changing the byteorder of the array is a simple change involving no rearrangement -of the data. The shape of the array can be changed very easily without changing -anything in the data buffer or any data copying at all - -Among other things that are made possible is one can create a new array metadata -object that uses the same data buffer -to create a new view of that data buffer that has a different interpretation -of the buffer (e.g., different shape, offset, byte order, strides, etc) but -shares the same data bytes. Many operations in numpy do just this such as -slices. Other operations, such as transpose, don't move data elements -around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move. - -Typically these new versions of the array metadata but the same data buffer are -new 'views' into the data buffer. There is a different ndarray object, but it -uses the same data buffer. This is why it is necessary to force copies through -use of the .copy() method if one really wants to make a new and independent -copy of the data buffer. - -New views into arrays mean the object reference counts for the data buffer -increase. Simply doing away with the original array object will not remove the -data buffer if other views of it still exist. - -Multidimensional Array Indexing Order Issues -============================================ - -What is the right way to index -multi-dimensional arrays? Before you jump to conclusions about the one and -true way to index multi-dimensional arrays, it pays to understand why this is -a confusing issue. This section will try to explain in detail how numpy -indexing works and why we adopt the convention we do for images, and when it -may be appropriate to adopt other conventions. - -The first thing to understand is -that there are two conflicting conventions for indexing 2-dimensional arrays. -Matrix notation uses the first index to indicate which row is being selected and -the second index to indicate which column is selected. This is opposite the -geometrically oriented-convention for images where people generally think the -first index represents x position (i.e., column) and the second represents y -position (i.e., row). This alone is the source of much confusion; -matrix-oriented users and image-oriented users expect two different things with -regard to indexing. - -The second issue to understand is how indices correspond -to the order the array is stored in memory. In Fortran the first index is the -most rapidly varying index when moving through the elements of a two -dimensional array as it is stored in memory. If you adopt the matrix -convention for indexing, then this means the matrix is stored one column at a -time (since the first index moves to the next row as it changes). Thus Fortran -is considered a Column-major language. C has just the opposite convention. In -C, the last index changes most rapidly as one moves through the array as -stored in memory. Thus C is a Row-major language. The matrix is stored by -rows. Note that in both cases it presumes that the matrix convention for -indexing is being used, i.e., for both Fortran and C, the first index is the -row. Note this convention implies that the indexing convention is invariant -and that the data order changes to keep that so. - -But that's not the only way -to look at it. Suppose one has large two-dimensional arrays (images or -matrices) stored in data files. Suppose the data are stored by rows rather than -by columns. If we are to preserve our index convention (whether matrix or -image) that means that depending on the language we use, we may be forced to -reorder the data if it is read into memory to preserve our indexing -convention. For example if we read row-ordered data into memory without -reordering, it will match the matrix indexing convention for C, but not for -Fortran. Conversely, it will match the image indexing convention for Fortran, -but not for C. For C, if one is using data stored in row order, and one wants -to preserve the image index convention, the data must be reordered when -reading into memory. - -In the end, which you do for Fortran or C depends on -which is more important, not reordering data or preserving the indexing -convention. For large images, reordering data is potentially expensive, and -often the indexing convention is inverted to avoid that. - -The situation with -numpy makes this issue yet more complicated. The internal machinery of numpy -arrays is flexible enough to accept any ordering of indices. One can simply -reorder indices by manipulating the internal stride information for arrays -without reordering the data at all. NumPy will know how to map the new index -order to the data without moving the data. - -So if this is true, why not choose -the index order that matches what you most expect? In particular, why not define -row-ordered images to use the image convention? (This is sometimes referred -to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN' -order options for array ordering in numpy.) The drawback of doing this is -potential performance penalties. It's common to access the data sequentially, -either implicitly in array operations or explicitly by looping over rows of an -image. When that is done, then the data will be accessed in non-optimal order. -As the first index is incremented, what is actually happening is that elements -spaced far apart in memory are being sequentially accessed, with usually poor -memory access speeds. For example, for a two dimensional image 'im' defined so -that im[0, 10] represents the value at x=0, y=10. To be consistent with usual -Python behavior then im[0] would represent a column at x=0. Yet that data -would be spread over the whole array since the data are stored in row order. -Despite the flexibility of numpy's indexing, it can't really paper over the fact -basic operations are rendered inefficient because of data order or that getting -contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs -im[0]), thus one can't use an idiom such as for row in im; for col in im does -work, but doesn't yield contiguous column data. - -As it turns out, numpy is -smart enough when dealing with ufuncs to determine which index is the most -rapidly varying one in memory and uses that for the innermost loop. Thus for -ufuncs there is no large intrinsic advantage to either approach in most cases. -On the other hand, use of .flat with an FORTRAN ordered array will lead to -non-optimal memory access as adjacent elements in the flattened array (iterator, -actually) are not contiguous in memory. - -Indeed, the fact is that Python -indexing on lists and other sequences naturally leads to an outside-to inside -ordering (the first index gets the largest grouping, the next the next largest, -and the last gets the smallest element). Since image data are normally stored -by rows, this corresponds to position within rows being the last item indexed. - -If you do want to use Fortran ordering realize that -there are two approaches to consider: 1) accept that the first index is just not -the most rapidly changing in memory and have all your I/O routines reorder -your data when going from memory to disk or visa versa, or use numpy's -mechanism for mapping the first index to the most rapidly varying data. We -recommend the former if possible. The disadvantage of the latter is that many -of numpy's functions will yield arrays without Fortran ordering unless you are -careful to use the 'order' keyword. Doing this would be highly inconvenient. - -Otherwise we recommend simply learning to reverse the usual order of indices -when accessing elements of an array. Granted, it goes against the grain, but -it is more in line with Python semantics and the natural order of the data. +.. This document has been moved to ../dev/internals.rst. +This document has been moved to :ref:`numpy-internals`. diff --git a/doc/source/reference/random/bit_generators/index.rst b/doc/source/reference/random/bit_generators/index.rst index c5c349806..211f0d60e 100644 --- a/doc/source/reference/random/bit_generators/index.rst +++ b/doc/source/reference/random/bit_generators/index.rst @@ -4,7 +4,7 @@ Bit Generators -------------- The random values produced by :class:`~Generator` -orignate in a BitGenerator. The BitGenerators do not directly provide +originate in a BitGenerator. The BitGenerators do not directly provide random numbers and only contains methods used for seeding, getting or setting the state, jumping or advancing the state, and for accessing low-level wrappers for consumption by code that can efficiently diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index 96cd47017..aaabc9b39 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -55,7 +55,7 @@ properties than the legacy `MT19937` used in `RandomState`. more_vals = random.standard_normal(10) `Generator` can be used as a replacement for `RandomState`. Both class -instances hold a internal `BitGenerator` instance to provide the bit +instances hold an internal `BitGenerator` instance to provide the bit stream, it is accessible as ``gen.bit_generator``. Some long-overdue API cleanup means that legacy and compatibility methods have been removed from `Generator` diff --git a/doc/source/reference/random/performance.rst b/doc/source/reference/random/performance.rst index 85855be59..cb9b94113 100644 --- a/doc/source/reference/random/performance.rst +++ b/doc/source/reference/random/performance.rst @@ -13,7 +13,7 @@ full-featured, and fast on most platforms, but somewhat slow when compiled for parallelism would indicate using `PCG64DXSM`. `Philox` is fairly slow, but its statistical properties have -very high quality, and it is easy to get assuredly-independent stream by using +very high quality, and it is easy to get an assuredly-independent stream by using unique keys. If that is the style you wish to use for parallel streams, or you are porting from another system that uses that style, then `Philox` is your choice. diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst index d961cbf02..5404c43d8 100644 --- a/doc/source/reference/routines.ma.rst +++ b/doc/source/reference/routines.ma.rst @@ -44,7 +44,9 @@ Ones and zeros ma.masked_all ma.masked_all_like ma.ones + ma.ones_like ma.zeros + ma.zeros_like _____ @@ -287,11 +289,11 @@ Filling a masked array _____ -Masked arrays arithmetics -========================= +Masked arrays arithmetic +======================== -Arithmetics -~~~~~~~~~~~ +Arithmetic +~~~~~~~~~~ .. autosummary:: :toctree: generated/ @@ -331,6 +333,7 @@ Minimum/maximum ma.max ma.min ma.ptp + ma.diff ma.MaskedArray.argmax ma.MaskedArray.argmin diff --git a/doc/source/reference/routines.math.rst b/doc/source/reference/routines.math.rst index 3c2f96830..2a09b8d20 100644 --- a/doc/source/reference/routines.math.rst +++ b/doc/source/reference/routines.math.rst @@ -143,6 +143,21 @@ Handling complex numbers conj conjugate +Extrema Finding +--------------- +.. autosummary:: + :toctree: generated/ + + maximum + fmax + amax + nanmax + + minimum + fmin + amin + nanmin + Miscellaneous ------------- @@ -160,11 +175,7 @@ Miscellaneous fabs sign heaviside - maximum - minimum - fmax - fmin - + nan_to_num real_if_close diff --git a/doc/source/reference/routines.polynomials.rst b/doc/source/reference/routines.polynomials.rst index ecfb012f0..4aea963c0 100644 --- a/doc/source/reference/routines.polynomials.rst +++ b/doc/source/reference/routines.polynomials.rst @@ -22,7 +22,7 @@ Therefore :mod:`numpy.polynomial` is recommended for new coding. the polynomial functions prefixed with *poly* accessible from the `numpy` namespace (e.g. `numpy.polyadd`, `numpy.polyval`, `numpy.polyfit`, etc.). - The term *polynomial package* refers to the new API definied in + The term *polynomial package* refers to the new API defined in `numpy.polynomial`, which includes the convenience classes for the different kinds of polynomials (`numpy.polynomial.Polynomial`, `numpy.polynomial.Chebyshev`, etc.). @@ -110,7 +110,7 @@ See the documentation for the `convenience classes <routines.polynomials.classes>`_ for further details on the ``domain`` and ``window`` attributes. -Another major difference bewteen the legacy polynomial module and the +Another major difference between the legacy polynomial module and the polynomial package is polynomial fitting. In the old module, fitting was done via the `~numpy.polyfit` function. In the polynomial package, the `~numpy.polynomial.polynomial.Polynomial.fit` class method is preferred. For diff --git a/doc/source/reference/routines.statistics.rst b/doc/source/reference/routines.statistics.rst index c675b6090..cd93e6025 100644 --- a/doc/source/reference/routines.statistics.rst +++ b/doc/source/reference/routines.statistics.rst @@ -9,11 +9,7 @@ Order statistics .. autosummary:: :toctree: generated/ - - amin - amax - nanmin - nanmax + ptp percentile nanpercentile diff --git a/doc/source/reference/simd/simd-optimizations.rst b/doc/source/reference/simd/simd-optimizations.rst index 956824321..9de6d1734 100644 --- a/doc/source/reference/simd/simd-optimizations.rst +++ b/doc/source/reference/simd/simd-optimizations.rst @@ -14,7 +14,7 @@ written only once. There are three layers: written using the maximum set of intrinsics possible. - At *compile* time, a distutils command is used to define the minimum and maximum features to support, based on user choice and compiler support. The - appropriate macros are overlayed with the platform / architecture intrinsics, + appropriate macros are overlaid with the platform / architecture intrinsics, and the three loops are compiled. - At *runtime import*, the CPU is probed for the set of supported intrinsic features. A mechanism is used to grab the pointer to the most appropriate @@ -89,7 +89,7 @@ NOTES ~~~~~~~~~~~~~ - CPU features and other options are case-insensitive. -- The order of the requsted optimizations doesn't matter. +- The order of the requested optimizations doesn't matter. - Either commas or spaces can be used as a separator, e.g. ``--cpu-dispatch``\ = "avx2 avx512f" or ``--cpu-dispatch``\ = "avx2, avx512f" both work, but the @@ -113,7 +113,7 @@ NOTES compiler native flag ``-march=native`` or ``-xHost`` or ``QxHost`` is enabled through environment variable ``CFLAGS`` -- The validation process for the requsted optimizations when it comes to +- The validation process for the requested optimizations when it comes to ``--cpu-baseline`` isn't strict. For example, if the user requested ``AVX2`` but the compiler doesn't support it then we just skip it and return the maximum optimization that the compiler can handle depending on the @@ -379,15 +379,15 @@ through ``--cpu-dispatch``, but it can also represent other options such as: #include "numpy/utils.h" // NPY_CAT, NPY_TOSTR #ifndef NPY__CPU_TARGET_CURRENT - // wrapping the dispatch-able source only happens to the addtional optimizations - // but if the keyword 'baseline' provided within the configuration statments, + // wrapping the dispatch-able source only happens to the additional optimizations + // but if the keyword 'baseline' provided within the configuration statements, // the infrastructure will add extra compiling for the dispatch-able source by // passing it as-is to the compiler without any changes. #define CURRENT_TARGET(X) X #define NPY__CPU_TARGET_CURRENT baseline // for printing only #else // since we reach to this point, that's mean we're dealing with - // the addtional optimizations, so it could be SSE42 or AVX512F + // the additional optimizations, so it could be SSE42 or AVX512F #define CURRENT_TARGET(X) NPY_CAT(NPY_CAT(X, _), NPY__CPU_TARGET_CURRENT) #endif // Macro 'CURRENT_TARGET' adding the current target as suffux to the exported symbols, @@ -418,7 +418,7 @@ through ``--cpu-dispatch``, but it can also represent other options such as: #undef NPY__CPU_DISPATCH_BASELINE_CALL #undef NPY__CPU_DISPATCH_CALL // nothing strange here, just a normal preprocessor callback - // enabled only if 'baseline' spesfied withiin the configration statments + // enabled only if 'baseline' specified within the configuration statements #define NPY__CPU_DISPATCH_BASELINE_CALL(CB, ...) \ NPY__CPU_DISPATCH_EXPAND_(CB(__VA_ARGS__)) // 'NPY__CPU_DISPATCH_CALL' is an abstract macro is used for dispatching @@ -427,7 +427,7 @@ through ``--cpu-dispatch``, but it can also represent other options such as: // @param CHK, Expected a macro that can be used to detect CPU features // in runtime, which takes a CPU feature name without string quotes and // returns the testing result in a shape of boolean value. - // NumPy already has macro called "NPY_CPU_HAVE", which fit this requirment. + // NumPy already has macro called "NPY_CPU_HAVE", which fits this requirement. // // @param CB, a callback macro that expected to be called multiple times depending // on the required optimizations, the callback should receive the following arguments: diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index b832dad04..6ace5b233 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -185,7 +185,7 @@ attribute of the ufunc. (This list may be missing DTypes not defined by NumPy.) The ``signature`` only specifies the DType class/type. For example, it -can specifiy that the operation should be ``datetime64`` or ``float64`` +can specify that the operation should be ``datetime64`` or ``float64`` operation. It does not specify the ``datetime64`` time-unit or the ``float64`` byte-order. diff --git a/doc/source/release.rst b/doc/source/release.rst index e9057a531..aa490b5f5 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -1,11 +1,12 @@ ************* -Release Notes +Release notes ************* .. toctree:: :maxdepth: 3 1.22.0 <release/1.22.0-notes> + 1.21.3 <release/1.21.3-notes> 1.21.2 <release/1.21.2-notes> 1.21.1 <release/1.21.1-notes> 1.21.0 <release/1.21.0-notes> diff --git a/doc/source/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst index 8ee876fd3..346b5af99 100644 --- a/doc/source/release/1.14.0-notes.rst +++ b/doc/source/release/1.14.0-notes.rst @@ -332,7 +332,7 @@ eliminating their use internally and two new C-API functions, * ``PyArray_SetWritebackIfCopyBase`` * ``PyArray_ResolveWritebackIfCopy``, -have been added together with a complimentary flag, +have been added together with a complementary flag, ``NPY_ARRAY_WRITEBACKIFCOPY``. Using the new functionality also requires that some flags be changed when new arrays are created, to wit: ``NPY_ARRAY_INOUT_ARRAY`` should be replaced by ``NPY_ARRAY_INOUT_ARRAY2`` and diff --git a/doc/source/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst index 7235ca915..2d9d068e5 100644 --- a/doc/source/release/1.15.0-notes.rst +++ b/doc/source/release/1.15.0-notes.rst @@ -326,8 +326,8 @@ passed explicitly, and are not yet computed automatically. No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins chosen is related to the data size in this situation. -The edges retuned by `histogram`` and ``histogramdd`` now match the data float type ------------------------------------------------------------------------------------ +The edges returned by `histogram`` and ``histogramdd`` now match the data float type +------------------------------------------------------------------------------------ When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the returned edges are now of the same dtype. Previously, ``histogram`` would only return the same type if explicit bins were given, and ``histogram`` would diff --git a/doc/source/release/1.16.0-notes.rst b/doc/source/release/1.16.0-notes.rst index 17d24160a..122f20eba 100644 --- a/doc/source/release/1.16.0-notes.rst +++ b/doc/source/release/1.16.0-notes.rst @@ -119,7 +119,7 @@ NaT comparisons Consistent with the behavior of NaN, all comparisons other than inequality checks with datetime64 or timedelta64 NaT ("not-a-time") values now always return ``False``, and inequality checks with NaT now always return ``True``. -This includes comparisons beteween NaT values. For compatibility with the +This includes comparisons between NaT values. For compatibility with the old behavior, use ``np.isnat`` to explicitly check for NaT or convert datetime64/timedelta64 arrays with ``.astype(np.int64)`` before making comparisons. @@ -365,7 +365,7 @@ Alpine Linux (and other musl c library distros) support We now default to use `fenv.h` for floating point status error reporting. Previously we had a broken default that sometimes would not report underflow, overflow, and invalid floating point operations. Now we can support non-glibc -distrubutions like Alpine Linux as long as they ship `fenv.h`. +distributions like Alpine Linux as long as they ship `fenv.h`. Speedup ``np.block`` for large arrays ------------------------------------- diff --git a/doc/source/release/1.19.0-notes.rst b/doc/source/release/1.19.0-notes.rst index 8f5c2c0ce..410890697 100644 --- a/doc/source/release/1.19.0-notes.rst +++ b/doc/source/release/1.19.0-notes.rst @@ -402,7 +402,7 @@ Ability to disable madvise hugepages ------------------------------------ On Linux NumPy has previously added support for madavise hugepages which can improve performance for very large arrays. Unfortunately, on older Kernel -versions this led to peformance regressions, thus by default the support has +versions this led to performance regressions, thus by default the support has been disabled on kernels before version 4.6. To override the default, you can use the environment variable:: diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index b8b7a0c79..494e4f19e 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -842,7 +842,7 @@ The compiler command selection for Fortran Portland Group Compiler is changed in `numpy.distutils.fcompiler`. This only affects the linking command. This forces the use of the executable provided by the command line option (if provided) instead of the pgfortran executable. If no executable is provided to -the command line option it defaults to the pgf90 executable, wich is an alias +the command line option it defaults to the pgf90 executable, which is an alias for pgfortran according to the PGI documentation. (`gh-16730 <https://github.com/numpy/numpy/pull/16730>`__) diff --git a/doc/source/release/1.21.0-notes.rst b/doc/source/release/1.21.0-notes.rst index 270cc32de..88a4503de 100644 --- a/doc/source/release/1.21.0-notes.rst +++ b/doc/source/release/1.21.0-notes.rst @@ -522,7 +522,7 @@ either of these distributions are produced. Placeholder annotations have been improved ------------------------------------------ All placeholder annotations, that were previously annotated as ``typing.Any``, -have been improved. Where appropiate they have been replaced with explicit +have been improved. Where appropriate they have been replaced with explicit function definitions, classes or other miscellaneous objects. (`gh-18934 <https://github.com/numpy/numpy/pull/18934>`__) diff --git a/doc/source/release/1.21.3-notes.rst b/doc/source/release/1.21.3-notes.rst new file mode 100644 index 000000000..4058452ef --- /dev/null +++ b/doc/source/release/1.21.3-notes.rst @@ -0,0 +1,44 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.21.3 Release Notes +========================== + +NumPy 1.21.3 is a maintenance release that fixes a few bugs discovered after +1.21.2. It also provides 64 bit Python 3.10.0 wheels. Note a few oddities about +Python 3.10: + +* There are no 32 bit wheels for Windows, Mac, or Linux. +* The Mac Intel builds are only available in universal2 wheels. + +The Python versions supported in this release are 3.7-3.10. If you want to +compile your own version using gcc-11, you will need to use gcc-11.2+ to avoid +problems. + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Meurer +* Bas van Beek +* Charles Harris +* Developer-Ecosystem-Engineering + +* Kevin Sheppard +* Sebastian Berg +* Warren Weckesser + +Pull requests merged +==================== + +A total of 8 pull requests were merged for this release. + +* `#19745 <https://github.com/numpy/numpy/pull/19745>`__: ENH: Add dtype-support to 3 ```generic``/``ndarray`` methods +* `#19955 <https://github.com/numpy/numpy/pull/19955>`__: BUG: Resolve Divide by Zero on Apple silicon + test failures... +* `#19958 <https://github.com/numpy/numpy/pull/19958>`__: MAINT: Mark type-check-only ufunc subclasses as ufunc aliases... +* `#19994 <https://github.com/numpy/numpy/pull/19994>`__: BUG: np.tan(np.inf) test failure +* `#20080 <https://github.com/numpy/numpy/pull/20080>`__: BUG: Correct incorrect advance in PCG with emulated int128 +* `#20081 <https://github.com/numpy/numpy/pull/20081>`__: BUG: Fix NaT handling in the PyArray_CompareFunc for datetime... +* `#20082 <https://github.com/numpy/numpy/pull/20082>`__: DOC: Ensure that we add documentation also as to the dict for... +* `#20106 <https://github.com/numpy/numpy/pull/20106>`__: BUG: core: result_type(0, np.timedelta64(4)) would seg. fault. diff --git a/doc/source/release/1.8.0-notes.rst b/doc/source/release/1.8.0-notes.rst index 80c39f8bc..65a471b92 100644 --- a/doc/source/release/1.8.0-notes.rst +++ b/doc/source/release/1.8.0-notes.rst @@ -33,7 +33,7 @@ Future Changes The Datetime64 type remains experimental in this release. In 1.9 there will -probably be some changes to make it more useable. +probably be some changes to make it more usable. The diagonal method currently returns a new array and raises a FutureWarning. In 1.9 it will return a readonly view. @@ -315,8 +315,8 @@ If used with the `overwrite_input` option the array will now only be partially sorted instead of fully sorted. -Overrideable operand flags in ufunc C-API ------------------------------------------ +Overridable operand flags in ufunc C-API +---------------------------------------- When creating a ufunc, the default ufunc operand flags can be overridden via the new op_flags attribute of the ufunc object. For example, to set the operand flag for the first input to read/write: diff --git a/doc/source/release/1.9.0-notes.rst b/doc/source/release/1.9.0-notes.rst index 7ea29e354..a19a05cb7 100644 --- a/doc/source/release/1.9.0-notes.rst +++ b/doc/source/release/1.9.0-notes.rst @@ -389,7 +389,7 @@ uses a per-state lock instead of the GIL. MaskedArray support for more complicated base classes ----------------------------------------------------- Built-in assumptions that the baseclass behaved like a plain array are being -removed. In particalur, ``repr`` and ``str`` should now work more reliably. +removed. In particular, ``repr`` and ``str`` should now work more reliably. C-API diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index bb570f622..27e9e1f63 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -391,7 +391,7 @@ this array to an array with three rows and two columns:: With ``np.reshape``, you can specify a few optional parameters:: - >>> numpy.reshape(a, newshape=(1, 6), order='C') + >>> np.reshape(a, newshape=(1, 6), order='C') array([[0, 1, 2, 3, 4, 5]]) ``a`` is the array to be reshaped. @@ -613,7 +613,7 @@ How to create an array from existing data ----- -You can easily use create a new array from a section of an existing array. +You can easily create a new array from a section of an existing array. Let's say you have this array: @@ -899,12 +899,18 @@ You can aggregate matrices the same way you aggregated vectors:: .. image:: images/np_matrix_aggregation.png You can aggregate all the values in a matrix and you can aggregate them across -columns or rows using the ``axis`` parameter:: +columns or rows using the ``axis`` parameter. To illustrate this point, let's +look at a slightly modified dataset:: + >>> data = np.array([[1, 2], [5, 3], [4, 6]]) + >>> data + array([[1, 2], + [5, 3], + [4, 6]]) >>> data.max(axis=0) array([5, 6]) >>> data.max(axis=1) - array([2, 4, 6]) + array([2, 5, 6]) .. image:: images/np_matrix_aggregation_row.png diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst index 5a252122f..ca299085a 100644 --- a/doc/source/user/basics.broadcasting.rst +++ b/doc/source/user/basics.broadcasting.rst @@ -170,6 +170,7 @@ An example of broadcasting when a 1-d array is added to a 2-d array:: [ 31., 32., 33.]]) >>> b = array([1.0, 2.0, 3.0, 4.0]) >>> a + b + Traceback (most recent call last): ValueError: operands could not be broadcast together with shapes (4,3) (4,) As shown in :ref:`broadcasting.figure-2`, ``b`` is added to each row of ``a``. diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst new file mode 100644 index 000000000..583a59b95 --- /dev/null +++ b/doc/source/user/basics.copies.rst @@ -0,0 +1,152 @@ +.. _basics.copies-and-views: + +**************** +Copies and views +**************** + +When operating on NumPy arrays, it is possible to access the internal data +buffer directly using a :ref:`view <view>` without copying data around. This +ensures good performance but can also cause unwanted problems if the user is +not aware of how this works. Hence, it is important to know the difference +between these two terms and to know which operations return copies and +which return views. + +The NumPy array is a data structure consisting of two parts: +the :term:`contiguous` data buffer with the actual data elements and the +metadata that contains information about the data buffer. The metadata +includes data type, strides, and other important information that helps +manipulate the :class:`.ndarray` easily. See the :ref:`numpy-internals` +section for a detailed look. + +.. _view: + +View +==== + +It is possible to access the array differently by just changing certain +metadata like :term:`stride` and :term:`dtype` without changing the +data buffer. This creates a new way of looking at the data and these new +arrays are called views. The data buffer remains the same, so any changes made +to a view reflects in the original copy. A view can be forced through the +:meth:`.ndarray.view` method. + +Copy +==== + +When a new array is created by duplicating the data buffer as well as the +metadata, it is called a copy. Changes made to the copy +do not reflect on the original array. Making a copy is slower and +memory-consuming but sometimes necessary. A copy can be forced by using +:meth:`.ndarray.copy`. + +Indexing operations +=================== + +.. seealso:: :ref:`basics.indexing` + +Views are created when elements can be addressed with offsets and strides +in the original array. Hence, basic indexing always creates views. +For example:: + + >>> x = np.arange(10) + >>> x + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> y = x[1:3] # creates a view + >>> y + array([1, 2]) + >>> x[1:3] = [10, 11] + >>> x + array([ 0, 10, 11, 3, 4, 5, 6, 7, 8, 9]) + >>> y + array([10, 11]) + +Here, ``y`` gets changed when ``x`` is changed because it is a view. + +:ref:`advanced-indexing`, on the other hand, always creates copies. +For example:: + + >>> x = np.arange(9).reshape(3, 3) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> y = x[[1, 2]] + >>> y + array([[3, 4, 5], + [6, 7, 8]]) + >>> y.base is None + True + +Here, ``y`` is a copy, as signified by the :attr:`base <.ndarray.base>` +attribute. We can also confirm this by assigning new values to ``x[[1, 2]]`` +which in turn will not affect ``y`` at all:: + + >>> x[[1, 2]] = [[10, 11, 12], [13, 14, 15]] + >>> x + array([[ 0, 1, 2], + [10, 11, 12], + [13, 14, 15]]) + >>> y + array([[3, 4, 5], + [6, 7, 8]]) + +It must be noted here that during the assignment of ``x[[1, 2]]`` no view +or copy is created as the assignment happens in-place. + + +Other operations +================ + +The :func:`numpy.reshape` function creates a view where possible or a copy +otherwise. In most cases, the strides can be modified to reshape the +array with a view. However, in some cases where the array becomes +non-contiguous (perhaps after a :meth:`.ndarray.transpose` operation), +the reshaping cannot be done by modifying strides and requires a copy. +In these cases, we can raise an error by assigning the new shape to the +shape attribute of the array. For example:: + + >>> x = np.ones((2, 3)) + >>> y = x.T # makes the array non-contiguous + >>> y + array([[1., 1.], + [1., 1.], + [1., 1.]]) + >>> z = y.view() + >>> z.shape = 6 + Traceback (most recent call last): + ... + AttributeError: Incompatible shape for in-place modification. Use + `.reshape()` to make a copy with the desired shape. + +Taking the example of another operation, :func:`.ravel` returns a contiguous +flattened view of the array wherever possible. On the other hand, +:meth:`.ndarray.flatten` always returns a flattened copy of the array. +However, to guarantee a view in most cases, ``x.reshape(-1)`` may be preferable. + +How to tell if the array is a view or a copy +============================================ + +The :attr:`base <.ndarray.base>` attribute of the ndarray makes it easy +to tell if an array is a view or a copy. The base attribute of a view returns +the original array while it returns ``None`` for a copy. + + >>> x = np.arange(9) + >>> x + array([0, 1, 2, 3, 4, 5, 6, 7, 8]) + >>> y = x.reshape(3, 3) + >>> y + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> y.base # .reshape() creates a view + array([0, 1, 2, 3, 4, 5, 6, 7, 8]) + >>> z = y[[2, 1]] + >>> z + array([[6, 7, 8], + [3, 4, 5]]) + >>> z.base is None # advanced indexing creates a copy + True + +Note that the ``base`` attribute should not be used to determine +if an ndarray object is *new*; only if it is a view or a copy +of another ndarray.
\ No newline at end of file diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst index a68def887..84ff1c30e 100644 --- a/doc/source/user/basics.creation.rst +++ b/doc/source/user/basics.creation.rst @@ -37,8 +37,7 @@ respectively. Lists and tuples can define ndarray creation: >>> a1D = np.array([1, 2, 3, 4]) >>> a2D = np.array([[1, 2], [3, 4]]) - >>> a3D = np.array([[[1, 2], [3, 4]], - [[5, 6], [7, 8]]]) + >>> a3D = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) When you use :func:`numpy.array` to define a new array, you should consider the :doc:`dtype <basics.types>` of the elements in the array, @@ -116,7 +115,7 @@ examples are shown:: Note: best practice for :func:`numpy.arange` is to use integer start, end, and step values. There are some subtleties regarding ``dtype``. In the second example, the ``dtype`` is defined. In the third example, the array is -``dtype=float`` to accomodate the step size of ``0.1``. Due to roundoff error, +``dtype=float`` to accommodate the step size of ``0.1``. Due to roundoff error, the ``stop`` value is sometimes included. :func:`numpy.linspace` will create arrays with a specified number of elements, and @@ -173,11 +172,11 @@ list or tuple, routine is helpful in generating linear least squares models, as such:: >>> np.vander(np.linspace(0, 2, 5), 2) - array([[0. , 0. , 1. ], - [0.25, 0.5 , 1. ], - [1. , 1. , 1. ], - [2.25, 1.5 , 1. ], - [4. , 2. , 1. ]]) + array([[0. , 1. ], + [0.5, 1. ], + [1. , 1. ], + [1.5, 1. ], + [2. , 1. ]]) >>> np.vander([1, 2, 3, 4], 2) array([[1, 1], [2, 1], @@ -208,7 +207,7 @@ specified shape. The default dtype is ``float64``:: array([[[0., 0.], [0., 0.], [0., 0.]], - + <BLANKLINE> [[0., 0.], [0., 0.], [0., 0.]]]) @@ -223,7 +222,7 @@ specified shape. The default dtype is ``float64``:: array([[[1., 1.], [1., 1.], [1., 1.]], - + <BLANKLINE> [[1., 1.], [1., 1.], [1., 1.]]]) @@ -275,7 +274,7 @@ following example:: >>> b = a[:2] >>> b += 1 >>> print('a =', a, '; b =', b) - a = [2 3 3 4 5 6]; b = [2 3] + a = [2 3 3 4 5 6] ; b = [2 3] In this example, you did not create a new array. You created a variable, ``b`` that viewed the first 2 elements of ``a``. When you added 1 to ``b`` you @@ -286,7 +285,7 @@ would get the same result by adding 1 to ``a[:2]``. If you want to create a >>> b = a[:2].copy() >>> b += 1 >>> print('a = ', a, 'b = ', b) - a = [1 2 3 4 5 6] b = [2 3] + a = [1 2 3 4] b = [2 3] For more information and examples look at :ref:`Copies and Views <quickstart.copies-and-views>`. @@ -299,8 +298,7 @@ arrays into a 4-by-4 array using ``block``:: >>> B = np.eye(2, 2) >>> C = np.zeros((2, 2)) >>> D = np.diag((-3, -4)) - >>> np.block([[A, B], - [C, D]]) + >>> np.block([[A, B], [C, D]]) array([[ 1., 1., 1., 0. ], [ 1., 1., 0., 1. ], [ 0., 0., -3., 0. ], diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst index 5364acbe9..8fe7565aa 100644 --- a/doc/source/user/basics.io.genfromtxt.rst +++ b/doc/source/user/basics.io.genfromtxt.rst @@ -437,7 +437,7 @@ process these missing data. By default, any empty string is marked as missing. We can also consider more complex strings, such as ``"N/A"`` or ``"???"`` to represent missing -or invalid data. The ``missing_values`` argument accepts three kind +or invalid data. The ``missing_values`` argument accepts three kinds of values: a string or a comma-separated string diff --git a/doc/source/user/basics.rec.rst b/doc/source/user/basics.rec.rst index 0524fde8e..1e6f30506 100644 --- a/doc/source/user/basics.rec.rst +++ b/doc/source/user/basics.rec.rst @@ -128,7 +128,7 @@ summary they are: ... 'formats': ['i4', 'f4'], ... 'offsets': [0, 4], ... 'itemsize': 12}) - dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12}) + dtype({'names': ['col1', 'col2'], 'formats': ['<i4', '<f4'], 'offsets': [0, 4], 'itemsize': 12}) Offsets may be chosen such that the fields overlap, though this will mean that assigning to one field may clobber any overlapping field's data. As diff --git a/doc/source/user/basics.rst b/doc/source/user/basics.rst index bcd51d983..affb85db2 100644 --- a/doc/source/user/basics.rst +++ b/doc/source/user/basics.rst @@ -19,3 +19,4 @@ fundamental NumPy ideas and philosophy. basics.dispatch basics.subclassing basics.ufuncs + basics.copies diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst index 10983ce8f..22efca4a6 100644 --- a/doc/source/user/building.rst +++ b/doc/source/user/building.rst @@ -45,6 +45,9 @@ Building NumPy requires the following software installed: 2) Compilers + Much of NumPy is written in C. You will need a C compiler that complies + with the C99 standard. + While a FORTRAN 77 compiler is not necessary for building NumPy, it is needed to run the ``numpy.f2py`` tests. These tests are skipped if the compiler is not auto-detected. diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 121384d04..7dd22afbf 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -174,14 +174,13 @@ incrementing is automatically performed by :c:func:`PyArray_MultiIter_NEXT` ( ``obj`` ) macro (which can handle a multiterator ``obj`` as either a :c:expr:`PyArrayMultiIterObject *` or a :c:expr:`PyObject *`). The data from input number ``i`` is available using -:c:func:`PyArray_MultiIter_DATA` ( ``obj``, ``i`` ) and the total (broadcasted) -size as :c:func:`PyArray_MultiIter_SIZE` ( ``obj``). An example of using this +:c:func:`PyArray_MultiIter_DATA` ( ``obj``, ``i`` ). An example of using this feature follows. .. code-block:: c mobj = PyArray_MultiIterNew(2, obj1, obj2); - size = PyArray_MultiIter_SIZE(obj); + size = mobj->size; while(size--) { ptr1 = PyArray_MultiIter_DATA(mobj, 0); ptr2 = PyArray_MultiIter_DATA(mobj, 1); diff --git a/doc/source/user/c-info.how-to-extend.rst b/doc/source/user/c-info.how-to-extend.rst index ebb4b7518..96727a177 100644 --- a/doc/source/user/c-info.how-to-extend.rst +++ b/doc/source/user/c-info.how-to-extend.rst @@ -433,7 +433,7 @@ writeable). The syntax is The requirements flag allows specification of what kind of array is acceptable. If the object passed in does not satisfy - this requirements then a copy is made so that thre returned + this requirements then a copy is made so that the returned object will satisfy the requirements. these ndarray can use a very generic pointer to memory. This flag allows specification of the desired properties of the returned array object. All diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index 8643d0dd1..6d514f146 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -1,6 +1,6 @@ -******************** +==================== Using Python as glue -******************** +==================== | There is no conversation more boring than the one where everybody | agrees. @@ -124,9 +124,9 @@ Creating source for a basic extension module Probably the easiest way to introduce f2py is to offer a simple example. Here is one of the subroutines contained in a file named -:file:`add.f`: +:file:`add.f` -.. code-block:: none +.. code-block:: fortran C SUBROUTINE ZADD(A,B,C,N) @@ -149,14 +149,14 @@ routine can be automatically generated by f2py:: You should be able to run this command assuming your search-path is set-up properly. This command will produce an extension module named -addmodule.c in the current directory. This extension module can now be +:file:`addmodule.c` in the current directory. This extension module can now be compiled and used from Python just like any other extension module. Creating a compiled extension module ------------------------------------ -You can also get f2py to compile add.f and also compile its produced +You can also get f2py to both compile :file:`add.f` along with the produced extension module leaving only a shared-library extension file that can be imported from Python:: @@ -211,7 +211,7 @@ interface file use the -h option:: This command leaves the file add.pyf in the current directory. The section of this file corresponding to zadd is: -.. code-block:: none +.. code-block:: fortran subroutine zadd(a,b,c,n) ! in :add:add.f double complex dimension(*) :: a @@ -224,7 +224,7 @@ By placing intent directives and checking code, the interface can be cleaned up quite a bit until the Python module method is both easier to use and more robust. -.. code-block:: none +.. code-block:: fortran subroutine zadd(a,b,c,n) ! in :add:add.f double complex dimension(n) :: a @@ -277,9 +277,9 @@ Inserting directives in Fortran source The nice interface can also be generated automatically by placing the variable directives as special comments in the original Fortran code. -Thus, if I modify the source code to contain: +Thus, if the source code is modified to contain: -.. code-block:: none +.. code-block:: fortran C SUBROUTINE ZADD(A,B,C,N) @@ -298,14 +298,14 @@ Thus, if I modify the source code to contain: 20 CONTINUE END -Then, I can compile the extension module using:: +Then, one can compile the extension module using:: f2py -c -m add add.f The resulting signature for the function add.zadd is exactly the same one that was created previously. If the original source code had contained ``A(N)`` instead of ``A(*)`` and so forth with ``B`` and ``C``, -then I could obtain (nearly) the same interface simply by placing the +then nearly the same interface can be obtained by placing the ``INTENT(OUT) :: C`` comment line in the source code. The only difference is that ``N`` would be an optional input that would default to the length of ``A``. @@ -320,7 +320,7 @@ precision floating-point numbers using a fixed averaging filter. The advantage of using Fortran to index into multi-dimensional arrays should be clear from this example. -.. code-block:: none +.. code-block:: SUBROUTINE DFILTER2D(A,B,M,N) C @@ -407,13 +407,12 @@ conversion of the .pyf file to a .c file is handled by `numpy.disutils`. Conclusion ---------- -The interface definition file (.pyf) is how you can fine-tune the -interface between Python and Fortran. There is decent documentation -for f2py found in the numpy/f2py/docs directory where-ever NumPy is -installed on your system (usually under site-packages). There is also -more information on using f2py (including how to use it to wrap C -codes) at https://scipy-cookbook.readthedocs.io under the "Interfacing -With Other Languages" heading. +The interface definition file (.pyf) is how you can fine-tune the interface +between Python and Fortran. There is decent documentation for f2py at +:ref:`f2py`. There is also more information on using f2py (including how to use +it to wrap C codes) at the `"Interfacing With Other Languages" heading of the +SciPy Cookbook. +<https://scipy-cookbook.readthedocs.io/items/idx_interfacing_with_other_languages.html>`_ The f2py method of linking compiled code is currently the most sophisticated and integrated approach. It allows clean separation of @@ -422,7 +421,7 @@ distribution of the extension module. The only draw-back is that it requires the existence of a Fortran compiler in order for a user to install the code. However, with the existence of the free-compilers g77, gfortran, and g95, as well as high-quality commercial compilers, -this restriction is not particularly onerous. In my opinion, Fortran +this restriction is not particularly onerous. In our opinion, Fortran is still the easiest way to write fast and clear code for scientific computing. It handles complex numbers, and multi-dimensional indexing in the most straightforward way. Be aware, however, that some Fortran @@ -493,7 +492,7 @@ Complex addition in Cython Here is part of a Cython module named ``add.pyx`` which implements the complex addition functions we previously implemented using f2py: -.. code-block:: none +.. code-block:: cython cimport cython cimport numpy as np @@ -546,7 +545,7 @@ Image filter in Cython The two-dimensional example we created using Fortran is just as easy to write in Cython: -.. code-block:: none +.. code-block:: cython cimport numpy as np import numpy as np @@ -809,7 +808,7 @@ Calling the function The function is accessed as an attribute of or an item from the loaded shared-library. Thus, if ``./mylib.so`` has a function named -``cool_function1``, I could access this function either as: +``cool_function1``, it may be accessed either as: .. code-block:: python @@ -859,7 +858,7 @@ kind of array from a given input. Complete example ---------------- -In this example, I will show how the addition function and the filter +In this example, we will demonstrate how the addition function and the filter function implemented previously using the other approaches can be implemented using ctypes. First, the C code which implements the algorithms contains the functions ``zadd``, ``dadd``, ``sadd``, ``cadd``, @@ -1073,7 +1072,7 @@ Its disadvantages include - It is difficult to distribute an extension module made using ctypes because of a lack of support for building shared libraries in - distutils (but I suspect this will change in time). + distutils. - You must have shared-libraries of your code (no static libraries). @@ -1095,15 +1094,14 @@ Additional tools you may find useful These tools have been found useful by others using Python and so are included here. They are discussed separately because they are either older ways to do things now handled by f2py, Cython, or ctypes -(SWIG, PyFort) or because I don't know much about them (SIP, Boost). -I have not added links to these -methods because my experience is that you can find the most relevant -link faster using Google or some other search engine, and any links -provided here would be quickly dated. Do not assume that just because -it is included in this list, I don't think the package deserves your -attention. I'm including information about these packages because many -people have found them useful and I'd like to give you as many options -as possible for tackling the problem of easily integrating your code. +(SWIG, PyFort) or because of a lack of reasonable documentation (SIP, Boost). +Links to these methods are not included since the most relevant +can be found using Google or some other search engine, and any links provided +here would be quickly dated. Do not assume that inclusion in this list means +that the package deserves attention. Information about these packages are +collected here because many people have found them useful and we'd like to give +you as many options as possible for tackling the problem of easily integrating +your code. SWIG @@ -1115,7 +1113,7 @@ SWIG Simplified Wrapper and Interface Generator (SWIG) is an old and fairly stable method for wrapping C/C++-libraries to a large variety of other languages. It does not specifically understand NumPy arrays but can be -made useable with NumPy through the use of typemaps. There are some +made usable with NumPy through the use of typemaps. There are some sample typemaps in the numpy/tools/swig directory under numpy.i together with an example module that makes use of them. SWIG excels at wrapping large C/C++ libraries because it can (almost) parse their headers and @@ -1132,12 +1130,12 @@ to the Python-specific typemaps, SWIG can be used to interface a library with other languages such as Perl, Tcl, and Ruby. My experience with SWIG has been generally positive in that it is -relatively easy to use and quite powerful. I used to use it quite +relatively easy to use and quite powerful. It has been used often before becoming more proficient at writing C-extensions. -However, I struggled writing custom interfaces with SWIG because it +However, writing custom interfaces with SWIG is often troublesome because it must be done using the concept of typemaps which are not Python -specific and are written in a C-like syntax. Therefore, I tend to -prefer other gluing strategies and would only attempt to use SWIG to +specific and are written in a C-like syntax. Therefore, other gluing strategies +are preferred and SWIG would be probably considered only to wrap a very-large C/C++ library. Nonetheless, there are others who use SWIG quite happily. @@ -1170,12 +1168,11 @@ those libraries which provides a concise interface for binding C++ classes and functions to Python. The amazing part of the Boost.Python approach is that it works entirely in pure C++ without introducing a new syntax. Many users of C++ report that Boost.Python makes it -possible to combine the best of both worlds in a seamless fashion. I -have not used Boost.Python because I am not a big user of C++ and -using Boost to wrap simple C-subroutines is usually over-kill. It's -primary purpose is to make C++ classes available in Python. So, if you -have a set of C++ classes that need to be integrated cleanly into -Python, consider learning about and using Boost.Python. +possible to combine the best of both worlds in a seamless fashion. Using Boost +to wrap simple C-subroutines is usually over-kill. Its primary purpose is to +make C++ classes available in Python. So, if you have a set of C++ classes that +need to be integrated cleanly into Python, consider learning about and using +Boost.Python. PyFort diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 8ff45a934..9bd01b963 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -80,6 +80,7 @@ the module. .. code-block:: c + #define PY_SSIZE_T_CLEAN #include <Python.h> #include <math.h> @@ -252,11 +253,12 @@ the primary thing that must be changed to create your own ufunc. .. code-block:: c - #include "Python.h" - #include "math.h" + #define PY_SSIZE_T_CLEAN + #include <Python.h> #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "numpy/npy_3kcompat.h" + #include <math.h> /* * single_type_logit.c @@ -427,11 +429,12 @@ the primary thing that must be changed to create your own ufunc. .. code-block:: c - #include "Python.h" - #include "math.h" + #define PY_SSIZE_T_CLEAN + #include <Python.h> #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "numpy/halffloat.h" + #include <math.h> /* * multi_type_logit.c @@ -696,11 +699,12 @@ as well as all other properties of a ufunc. .. code-block:: c - #include "Python.h" - #include "math.h" + #define PY_SSIZE_T_CLEAN + #include <Python.h> #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "numpy/halffloat.h" + #include <math.h> /* * multi_arg_logit.c @@ -828,11 +832,12 @@ The C file is given below. .. code-block:: c - #include "Python.h" - #include "math.h" + #define PY_SSIZE_T_CLEAN + #include <Python.h> #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "numpy/npy_3kcompat.h" + #include <math.h> /* diff --git a/doc/source/user/how-to-how-to.rst b/doc/source/user/how-to-how-to.rst index 13d2b405f..cdf1ad5c3 100644 --- a/doc/source/user/how-to-how-to.rst +++ b/doc/source/user/how-to-how-to.rst @@ -102,7 +102,7 @@ knowledge). We distinguish both tutorials and how-tos from `Explanations`, which are deep dives intended to give understanding rather than immediate assistance, -and `References`, which give complete, autoritative data on some concrete +and `References`, which give complete, authoritative data on some concrete part of NumPy (like its API) but aren't obligated to paint a broader picture. For more on tutorials, see :doc:`content/tutorial-style-guide` diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst index f0a7f5e4c..316473151 100644 --- a/doc/source/user/misc.rst +++ b/doc/source/user/misc.rst @@ -143,7 +143,7 @@ Only a survey of the choices. Little detail on how each works. - Plusses: - part of Python standard library - - good for interfacing to existing sharable libraries, particularly + - good for interfacing to existing shareable libraries, particularly Windows DLLs - avoids API/reference counting issues - good numpy support: arrays have all these in their ctypes diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index ed0be82a0..21e23482a 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -313,11 +313,11 @@ Linear algebra equivalents * - ``a(:,find(v > 0.5))`` - ``a[:,np.nonzero(v > 0.5)[0]]`` - - extract the columms of ``a`` where vector v > 0.5 + - extract the columns of ``a`` where vector v > 0.5 * - ``a(:,find(v>0.5))`` - ``a[:, v.T > 0.5]`` - - extract the columms of ``a`` where column vector v > 0.5 + - extract the columns of ``a`` where column vector v > 0.5 * - ``a(a<0.5)=0`` - ``a[a < 0.5]=0`` @@ -819,6 +819,6 @@ found in the `topical software page <https://scipy.org/topical-software.html>`__ See `List of Python software: scripting <https://en.wikipedia.org/wiki/List_of_Python_software#Embedded_as_a_scripting_language>`_ -for a list of softwares that use Python as a scripting language +for a list of software that use Python as a scripting language MATLAB® and SimuLink® are registered trademarks of The MathWorks, Inc. diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index dd5773878..a9cfeca31 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -45,10 +45,11 @@ NumPy's main object is the homogeneous multidimensional array. It is a table of elements (usually numbers), all of the same type, indexed by a tuple of non-negative integers. In NumPy dimensions are called *axes*. -For example, the coordinates of a point in 3D space ``[1, 2, 1]`` has -one axis. That axis has 3 elements in it, so we say it has a length -of 3. In the example pictured below, the array has 2 axes. The first -axis has a length of 2, the second axis has a length of 3. +For example, the array for the coordinates of a point in 3D space, +``[1, 2, 1]``, has one axis. That axis has 3 elements in it, so we say +it has a length of 3. In the example pictured below, the array has 2 +axes. The first axis has a length of 2, the second axis has a length of +3. :: diff --git a/doc/source/user/whatisnumpy.rst b/doc/source/user/whatisnumpy.rst index 154f91c84..e152a4ae2 100644 --- a/doc/source/user/whatisnumpy.rst +++ b/doc/source/user/whatisnumpy.rst @@ -125,7 +125,7 @@ same shape, or a scalar and an array, or even two arrays of with different shapes, provided that the smaller array is "expandable" to the shape of the larger in such a way that the resulting broadcast is unambiguous. For detailed "rules" of broadcasting see -`basics.broadcasting`. +:ref:`Broadcasting <basics.broadcasting>`. Who Else Uses NumPy? -------------------- diff --git a/doc/ufuncs.rst.txt b/doc/ufuncs.rst.txt index d628b3f95..9257d3cb0 100644 --- a/doc/ufuncs.rst.txt +++ b/doc/ufuncs.rst.txt @@ -18,7 +18,7 @@ Some benchmarks show that this results in a significant slow-down The approach is therefore, to loop over the largest-dimension (just like the NO_BUFFER) portion of the code. All arrays will either have N or -1 in this last dimension (or their would be a mis-match error). The +1 in this last dimension (or their would be a mismatch error). The buffer size is B. If N <= B (and only if needed), we copy the entire last-dimension into diff --git a/doc_requirements.txt b/doc_requirements.txt index 61ce7549b..5324d4d0c 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,7 +1,8 @@ -sphinx==4.1.2 +sphinx==4.2.0 numpydoc==1.1.0 ipython scipy matplotlib pandas pydata-sphinx-theme +breathe diff --git a/environment.yml b/environment.yml index 188e29a4f..6a13499e0 100644 --- a/environment.yml +++ b/environment.yml @@ -18,7 +18,7 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - mypy=0.902 + - mypy=0.910 # For building docs - sphinx=4.1.1 - numpydoc=1.1.0 @@ -27,6 +27,7 @@ dependencies: - pandas - matplotlib - pydata-sphinx-theme + - breathe # For linting - pycodestyle=2.7.0 - gitpython diff --git a/linter_requirements.txt b/linter_requirements.txt index 51a769ee0..6ed26c5c0 100644 --- a/linter_requirements.txt +++ b/linter_requirements.txt @@ -1,2 +1,2 @@ -pycodestyle==2.7.0 +pycodestyle==2.8.0 GitPython==3.1.13
\ No newline at end of file diff --git a/numpy/__init__.py b/numpy/__init__.py index ffef369e3..a1b1005cb 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -135,13 +135,6 @@ else: __all__ = ['ModuleDeprecationWarning', 'VisibleDeprecationWarning'] - # get the version using versioneer - from ._version import get_versions - vinfo = get_versions() - __version__ = vinfo.get("closest-tag", vinfo["version"]) - __git_version__ = vinfo.get("full-revisionid") - del get_versions, vinfo - # mapping of {name: (value, deprecation_msg)} __deprecated_attrs__ = {} @@ -196,12 +189,19 @@ else: n: (getattr(_builtins, n), _msg.format(n=n, extended_msg=extended_msg)) for n, extended_msg in _type_info }) + # Numpy 1.20.0, 2020-10-19 __deprecated_attrs__["typeDict"] = ( core.numerictypes.typeDict, "`np.typeDict` is a deprecated alias for `np.sctypeDict`." ) + # NumPy 1.22, 2021-10-20 + __deprecated_attrs__["MachAr"] = ( + core._machar.MachAr, + "`np.MachAr` is deprecated (NumPy 1.22)." + ) + _msg = ( "`np.{n}` is a deprecated alias for `np.compat.{n}`. " "To silence this warning, use `np.compat.{n}` by itself. " @@ -409,6 +409,6 @@ else: # it is tidier organized. core.multiarray._multiarray_umath._reload_guard() -from ._version import get_versions -__version__ = get_versions()['version'] -del get_versions + +# get the version using versioneer +from .version import __version__, git_revision as __git_version__ diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 30b0944fd..4e6969e32 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -10,10 +10,11 @@ from abc import abstractmethod from types import TracebackType, MappingProxyType from contextlib import ContextDecorator +if sys.version_info >= (3, 9): + from types import GenericAlias + from numpy._pytesttester import PytestTester -from numpy.core.multiarray import flagsobj from numpy.core._internal import _ctypes -from numpy.core.getlimits import MachArLike from numpy.typing import ( # Arrays @@ -21,7 +22,7 @@ from numpy.typing import ( NDArray, _SupportsArray, _NestedSequence, - _RecursiveSequence, + _FiniteNestedSequence, _SupportsArray, _ArrayLikeBool_co, _ArrayLikeUInt_co, @@ -32,6 +33,8 @@ from numpy.typing import ( _ArrayLikeTD64_co, _ArrayLikeDT64_co, _ArrayLikeObject_co, + _ArrayLikeStr_co, + _ArrayLikeBytes_co, # DTypes DTypeLike, @@ -194,11 +197,13 @@ from typing import ( Protocol, SupportsIndex, Final, + final, + ClassVar, + Set, ) # Ensures that the stubs are picked up from numpy import ( - char as char, ctypeslib as ctypeslib, fft as fft, lib as lib, @@ -207,11 +212,14 @@ from numpy import ( matrixlib as matrixlib, polynomial as polynomial, random as random, - rec as rec, testing as testing, version as version, ) +from numpy.core import defchararray, records +char = defchararray +rec = records + from numpy.core.function_base import ( linspace as linspace, logspace as logspace, @@ -275,7 +283,6 @@ from numpy.core._ufunc_config import ( getbufsize as getbufsize, seterrcall as seterrcall, geterrcall as geterrcall, - _SupportsWrite, _ErrKind, _ErrFunc, _ErrDictOptional, @@ -347,6 +354,8 @@ from numpy.core.multiarray import ( geterrobj as geterrobj, fromstring as fromstring, frompyfunc as frompyfunc, + nested_iters as nested_iters, + flagsobj, ) from numpy.core.numeric import ( @@ -622,6 +631,8 @@ from numpy.matrixlib import ( bmat as bmat, ) +_AnyStr_contra = TypeVar("_AnyStr_contra", str, bytes, contravariant=True) + # Protocol for representing file-like-objects accepted # by `ndarray.tofile` and `fromfile` class _IOProtocol(Protocol): @@ -630,6 +641,20 @@ class _IOProtocol(Protocol): def tell(self) -> SupportsIndex: ... def seek(self, offset: int, whence: int, /) -> object: ... +# NOTE: `seek`, `write` and `flush` are technically only required +# for `readwrite`/`write` modes +class _MemMapIOProtocol(Protocol): + def flush(self) -> object: ... + def fileno(self) -> SupportsIndex: ... + def tell(self) -> int: ... + def seek(self, offset: int, whence: int, /) -> object: ... + def write(self, s: bytes, /) -> object: ... + @property + def read(self) -> object: ... + +class _SupportsWrite(Protocol[_AnyStr_contra]): + def write(self, s: _AnyStr_contra, /) -> object: ... + __all__: List[str] __path__: List[str] __version__: str @@ -640,272 +665,6 @@ test: PytestTester # their annotations are properly implemented # # Placeholders for classes -# TODO: Remove `__getattr__` once the classes are stubbed out -class MachAr: - def __init__( - self, - float_conv: Any = ..., - int_conv: Any = ..., - float_to_float: Any = ..., - float_to_str: Any = ..., - title: Any = ..., - ) -> None: ... - def __getattr__(self, key: str) -> Any: ... - -class chararray(ndarray[_ShapeType, _DType_co]): - def __new__( - subtype, - shape: Any, - itemsize: Any = ..., - unicode: Any = ..., - buffer: Any = ..., - offset: Any = ..., - strides: Any = ..., - order: Any = ..., - ) -> Any: ... - def __array_finalize__(self, obj): ... - def argsort(self, axis=..., kind=..., order=...): ... - def capitalize(self): ... - def center(self, width, fillchar=...): ... - def count(self, sub, start=..., end=...): ... - def decode(self, encoding=..., errors=...): ... - def encode(self, encoding=..., errors=...): ... - def endswith(self, suffix, start=..., end=...): ... - def expandtabs(self, tabsize=...): ... - def find(self, sub, start=..., end=...): ... - def index(self, sub, start=..., end=...): ... - def isalnum(self): ... - def isalpha(self): ... - def isdigit(self): ... - def islower(self): ... - def isspace(self): ... - def istitle(self): ... - def isupper(self): ... - def join(self, seq): ... - def ljust(self, width, fillchar=...): ... - def lower(self): ... - def lstrip(self, chars=...): ... - def partition(self, sep): ... - def replace(self, old, new, count=...): ... - def rfind(self, sub, start=..., end=...): ... - def rindex(self, sub, start=..., end=...): ... - def rjust(self, width, fillchar=...): ... - def rpartition(self, sep): ... - def rsplit(self, sep=..., maxsplit=...): ... - def rstrip(self, chars=...): ... - def split(self, sep=..., maxsplit=...): ... - def splitlines(self, keepends=...): ... - def startswith(self, prefix, start=..., end=...): ... - def strip(self, chars=...): ... - def swapcase(self): ... - def title(self): ... - def translate(self, table, deletechars=...): ... - def upper(self): ... - def zfill(self, width): ... - def isnumeric(self): ... - def isdecimal(self): ... - -class format_parser: - def __init__( - self, - formats: Any, - names: Any, - titles: Any, - aligned: Any = ..., - byteorder: Any = ..., - ) -> None: ... - -class matrix(ndarray[_ShapeType, _DType_co]): - def __new__( - subtype, - data: Any, - dtype: Any = ..., - copy: Any = ..., - ) -> Any: ... - def __array_finalize__(self, obj): ... - def __getitem__(self, index): ... - def __mul__(self, other): ... - def __rmul__(self, other): ... - def __imul__(self, other): ... - def __pow__(self, other): ... - def __ipow__(self, other): ... - def __rpow__(self, other): ... - def tolist(self): ... - def sum(self, axis=..., dtype=..., out=...): ... - def squeeze(self, axis=...): ... - def flatten(self, order=...): ... - def mean(self, axis=..., dtype=..., out=...): ... - def std(self, axis=..., dtype=..., out=..., ddof=...): ... - def var(self, axis=..., dtype=..., out=..., ddof=...): ... - def prod(self, axis=..., dtype=..., out=...): ... - def any(self, axis=..., out=...): ... - def all(self, axis=..., out=...): ... - def max(self, axis=..., out=...): ... - def argmax(self, axis=..., out=...): ... - def min(self, axis=..., out=...): ... - def argmin(self, axis=..., out=...): ... - def ptp(self, axis=..., out=...): ... - def ravel(self, order=...): ... - @property - def T(self): ... - @property - def I(self): ... - @property - def A(self): ... - @property - def A1(self): ... - @property - def H(self): ... - def getT(self): ... - def getA(self): ... - def getA1(self): ... - def getH(self): ... - def getI(self): ... - -class memmap(ndarray[_ShapeType, _DType_co]): - def __new__( - subtype, - filename: Any, - dtype: Any = ..., - mode: Any = ..., - offset: Any = ..., - shape: Any = ..., - order: Any = ..., - ) -> Any: ... - def __getattr__(self, key: str) -> Any: ... - -class nditer: - def __new__( - cls, - op: Any, - flags: Any = ..., - op_flags: Any = ..., - op_dtypes: Any = ..., - order: Any = ..., - casting: Any = ..., - op_axes: Any = ..., - itershape: Any = ..., - buffersize: Any = ..., - ) -> Any: ... - def __getattr__(self, key: str) -> Any: ... - def __enter__(self) -> nditer: ... - def __exit__( - self, - exc_type: None | Type[BaseException], - exc_value: None | BaseException, - traceback: None | TracebackType, - ) -> None: ... - def __iter__(self) -> Iterator[Any]: ... - def __next__(self) -> Any: ... - def __len__(self) -> int: ... - def __copy__(self) -> nditer: ... - def __getitem__(self, index: SupportsIndex | slice) -> Any: ... - def __setitem__(self, index: SupportsIndex | slice, value: Any) -> None: ... - def __delitem__(self, key: SupportsIndex | slice) -> None: ... - - -class poly1d: - def __init__( - self, - c_or_r: Any, - r: Any = ..., - variable: Any = ..., - ) -> None: ... - def __call__(self, val: Any) -> Any: ... - __hash__: Any - @property - def coeffs(self): ... - @coeffs.setter - def coeffs(self, value): ... - @property - def c(self): ... - @c.setter - def c(self, value): ... - @property - def coef(self): ... - @coef.setter - def coef(self, value): ... - @property - def coefficients(self): ... - @coefficients.setter - def coefficients(self, value): ... - @property - def variable(self): ... - @property - def order(self): ... - @property - def o(self): ... - @property - def roots(self): ... - @property - def r(self): ... - def __array__(self, t=...): ... - def __len__(self): ... - def __neg__(self): ... - def __pos__(self): ... - def __mul__(self, other): ... - def __rmul__(self, other): ... - def __add__(self, other): ... - def __radd__(self, other): ... - def __pow__(self, val): ... - def __sub__(self, other): ... - def __rsub__(self, other): ... - def __div__(self, other): ... - def __truediv__(self, other): ... - def __rdiv__(self, other): ... - def __rtruediv__(self, other): ... - def __eq__(self, other): ... - def __ne__(self, other): ... - def __getitem__(self, val): ... - def __setitem__(self, key, val): ... - def __iter__(self): ... - def integ(self, m=..., k=...): ... - def deriv(self, m=...): ... - -class recarray(ndarray[_ShapeType, _DType_co]): - def __new__( - subtype, - shape: Any, - dtype: Any = ..., - buf: Any = ..., - offset: Any = ..., - strides: Any = ..., - formats: Any = ..., - names: Any = ..., - titles: Any = ..., - byteorder: Any = ..., - aligned: Any = ..., - order: Any = ..., - ) -> Any: ... - def __array_finalize__(self, obj): ... - def __getattribute__(self, attr): ... - def __setattr__(self, attr, val): ... - def __getitem__(self, indx): ... - def field(self, attr, val=...): ... - -class record(void): - def __getattribute__(self, attr): ... - def __setattr__(self, attr, val): ... - def __getitem__(self, indx): ... - def pprint(self): ... - -class vectorize: - pyfunc: Any - cache: Any - signature: Any - otypes: Any - excluded: Any - __doc__: Any - def __init__( - self, - pyfunc, - otypes: Any = ..., - doc: Any = ..., - excluded: Any = ..., - cache: Any = ..., - signature: Any = ..., - ) -> None: ... - def __call__(self, *args: Any, **kwargs: Any) -> Any: ... # Some of these are aliases; others are wrappers with an identical signature round = around @@ -919,9 +678,6 @@ alltrue = all def show_config() -> None: ... -# TODO: Sort out which parameters are positional-only -def nested_iters(*args, **kwargs): ... # TODO: Sort out parameters - _NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) _ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"] @@ -1089,6 +845,9 @@ class dtype(Generic[_DTypeScalar_co]): copy: bool = ..., ) -> dtype[object_]: ... + if sys.version_info >= (3, 9): + def __class_getitem__(self, item: Any) -> GenericAlias: ... + @overload def __getitem__(self: dtype[void], key: List[str]) -> dtype[void]: ... @overload @@ -1114,6 +873,13 @@ class dtype(Generic[_DTypeScalar_co]): def __ge__(self, other: DTypeLike) -> bool: ... def __lt__(self, other: DTypeLike) -> bool: ... def __le__(self, other: DTypeLike) -> bool: ... + + # Explicitly defined `__eq__` and `__ne__` to get around mypy's + # `strict_equality` option; even though their signatures are + # identical to their `object`-based counterpart + def __eq__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + @property def alignment(self) -> int: ... @property @@ -1224,11 +990,14 @@ class _ArrayOrScalarCommon: def __str__(self) -> str: ... def __repr__(self) -> str: ... def __copy__(self: _ArraySelf) -> _ArraySelf: ... - def __deepcopy__(self: _ArraySelf, memo: None | dict = ..., /) -> _ArraySelf: ... - def __eq__(self, other): ... - def __ne__(self, other): ... + def __deepcopy__(self: _ArraySelf, memo: None | Dict[int, Any], /) -> _ArraySelf: ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Any) -> Any: ... + def __ne__(self, other: Any) -> Any: ... def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ... - def dump(self, file: str) -> None: ... + def dump(self, file: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsWrite[bytes]) -> None: ... def dumps(self) -> bytes: ... def tobytes(self, order: _OrderKACF = ...) -> bytes: ... # NOTE: `tostring()` is deprecated and therefore excluded @@ -1242,15 +1011,19 @@ class _ArrayOrScalarCommon: # generics and 0d arrays return builtin scalars def tolist(self) -> Any: ... - # TODO: Add proper signatures - def __getitem__(self, key) -> Any: ... @property - def __array_interface__(self): ... + def __array_interface__(self) -> Dict[str, Any]: ... @property def __array_priority__(self) -> float: ... @property - def __array_struct__(self): ... - def __setstate__(self, state, /): ... + def __array_struct__(self) -> Any: ... # builtins.PyCapsule + def __setstate__(self, state: Tuple[ + SupportsIndex, # version + _ShapeLike, # Shape + _DType_co, # DType + bool, # F-continuous + bytes | List[Any], # Data + ], /) -> None: ... # a `bool_` is returned when `keepdims=True` and `self` is a 0d array @overload @@ -1693,16 +1466,39 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): cls: Type[_ArraySelf], shape: _ShapeLike, dtype: DTypeLike = ..., - buffer: _SupportsBuffer = ..., - offset: int = ..., - strides: _ShapeLike = ..., + buffer: None | _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: None | _ShapeLike = ..., order: _OrderKACF = ..., ) -> _ArraySelf: ... + + if sys.version_info >= (3, 9): + def __class_getitem__(self, item: Any) -> GenericAlias: ... + @overload def __array__(self, dtype: None = ..., /) -> ndarray[Any, _DType_co]: ... @overload def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + + def __array_function__( + self, + func: Callable[..., Any], + types: Iterable[type], + args: Iterable[Any], + kwargs: Mapping[str, Any], + ) -> Any: ... + + @property + def __array_finalize__(self) -> None: ... + def __array_wrap__( self, array: ndarray[_ShapeType2, _DType], @@ -1717,6 +1513,26 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): /, ) -> ndarray[_ShapeType2, _DType]: ... + @overload + def __getitem__(self, key: Union[ + SupportsIndex, + _ArrayLikeInt_co, + Tuple[SupportsIndex | _ArrayLikeInt_co, ...], + ]) -> Any: ... + @overload + def __getitem__(self, key: Union[ + None, + slice, + ellipsis, + SupportsIndex, + _ArrayLikeInt_co, + Tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...], + ]) -> ndarray[Any, _DType_co]: ... + @overload + def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ... + @overload + def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType, dtype[void]]: ... + @property def ctypes(self) -> _ctypes[int]: ... @property @@ -1989,12 +1805,10 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # The last overload is for catching recursive objects whose # nesting is too deep. # The first overload is for catching `bytes` (as they are a subtype of - # `Sequence[int]`) and `str`. As `str` is a recusive sequence of + # `Sequence[int]`) and `str`. As `str` is a recursive sequence of # strings, it will pass through the final overload otherwise @overload - def __lt__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... @overload def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... @@ -2004,15 +1818,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __lt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... @overload def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... - @overload - def __lt__( - self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], - other: _RecursiveSequence, - ) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... @overload def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... @@ -2022,15 +1829,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __le__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... @overload def __le__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... - @overload - def __le__( - self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], - other: _RecursiveSequence, - ) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... @overload def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... @@ -2040,15 +1840,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __gt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... @overload def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... - @overload - def __gt__( - self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], - other: _RecursiveSequence, - ) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... @overload def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... @@ -2058,11 +1851,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __ge__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... @overload def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... - @overload - def __ge__( - self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], - other: _RecursiveSequence, - ) -> NDArray[bool_]: ... # Unary ops @overload @@ -2100,8 +1888,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # Binary ops # NOTE: `ndarray` does not implement `__imatmul__` @overload - def __matmul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2115,15 +1901,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __matmul__(self: NDArray[object_], other: Any) -> Any: ... @overload def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __matmul__( - self: _ArrayNumber_co, - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rmatmul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rmatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2137,15 +1916,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rmatmul__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rmatmul__( - self: _ArrayNumber_co, - other: _RecursiveSequence, - ) -> Any: ... @overload - def __mod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __mod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2154,20 +1926,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __mod__(self: _ArrayTD64_co, other: _SupportsArray[dtype[timedelta64]] | _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... @overload def __mod__(self: NDArray[object_], other: Any) -> Any: ... @overload def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __mod__( - self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rmod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2176,20 +1941,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[dtype[timedelta64]] | _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... @overload def __rmod__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rmod__( - self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __divmod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __divmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] @overload def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] @@ -2198,16 +1956,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[NDArray[int64], NDArray[timedelta64]]: ... - @overload - def __divmod__( - self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], - other: _RecursiveSequence, - ) -> _2Tuple[Any]: ... + def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[dtype[timedelta64]] | _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload - def __rdivmod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rdivmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] @overload def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] @@ -2216,16 +1967,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[NDArray[int64], NDArray[timedelta64]]: ... - @overload - def __rdivmod__( - self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], - other: _RecursiveSequence, - ) -> _2Tuple[Any]: ... + def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[dtype[timedelta64]] | _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload - def __add__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2245,15 +1989,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __add__(self: NDArray[object_], other: Any) -> Any: ... @overload def __add__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __add__( - self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __radd__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __radd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2273,15 +2010,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __radd__(self: NDArray[object_], other: Any) -> Any: ... @overload def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __radd__( - self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __sub__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __sub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... @overload def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2301,15 +2031,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __sub__(self: NDArray[object_], other: Any) -> Any: ... @overload def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __sub__( - self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rsub__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rsub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... @overload def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2329,15 +2052,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rsub__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rsub__( - self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __mul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __mul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2355,15 +2071,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __mul__(self: NDArray[object_], other: Any) -> Any: ... @overload def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __mul__( - self: NDArray[Union[bool_, number[Any], timedelta64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rmul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2381,15 +2090,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rmul__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rmul__( - self: NDArray[Union[bool_, number[Any], timedelta64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __floordiv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __floordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2398,7 +2100,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[int64]: ... + def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[dtype[timedelta64]] | _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[int64]: ... @overload def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... @overload @@ -2407,15 +2109,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __floordiv__(self: NDArray[object_], other: Any) -> Any: ... @overload def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __floordiv__( - self: NDArray[Union[bool_, number[Any], timedelta64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rfloordiv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2424,7 +2119,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[int64]: ... + def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[dtype[timedelta64]] | _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[int64]: ... @overload def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... @overload @@ -2433,15 +2128,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rfloordiv__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rfloordiv__( - self: NDArray[Union[bool_, number[Any], timedelta64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __pow__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __pow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2455,15 +2143,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __pow__(self: NDArray[object_], other: Any) -> Any: ... @overload def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __pow__( - self: NDArray[Union[bool_, number[Any]]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rpow__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rpow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2477,22 +2158,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rpow__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rpow__( - self: NDArray[Union[bool_, number[Any]]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __truediv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] @overload def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __truediv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[float64]: ... + def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[dtype[timedelta64]] | _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[float64]: ... @overload def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... @overload @@ -2501,22 +2175,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __truediv__(self: NDArray[object_], other: Any) -> Any: ... @overload def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __truediv__( - self: NDArray[Union[bool_, number[Any], timedelta64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rtruediv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] @overload def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rtruediv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[float64]: ... + def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[dtype[timedelta64]] | _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[float64]: ... @overload def __rtruediv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... @overload @@ -2525,15 +2192,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rtruediv__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rtruediv__( - self: NDArray[Union[bool_, number[Any], timedelta64]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __lshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __lshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2543,15 +2203,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __lshift__(self: NDArray[object_], other: Any) -> Any: ... @overload def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __lshift__( - self: NDArray[Union[bool_, integer[Any]]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rlshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rlshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2561,15 +2214,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rlshift__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rlshift__( - self: NDArray[Union[bool_, integer[Any]]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2579,15 +2225,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rshift__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rshift__( - self: NDArray[Union[bool_, integer[Any]]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rrshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rrshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] @overload def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2597,15 +2236,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rrshift__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rrshift__( - self: NDArray[Union[bool_, integer[Any]]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __and__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __and__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2615,15 +2247,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __and__(self: NDArray[object_], other: Any) -> Any: ... @overload def __and__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __and__( - self: NDArray[Union[bool_, integer[Any]]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rand__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2633,15 +2258,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rand__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rand__( - self: NDArray[Union[bool_, integer[Any]]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __xor__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __xor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2651,15 +2269,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __xor__(self: NDArray[object_], other: Any) -> Any: ... @overload def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __xor__( - self: NDArray[Union[bool_, integer[Any]]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __rxor__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __rxor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2669,15 +2280,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __rxor__(self: NDArray[object_], other: Any) -> Any: ... @overload def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __rxor__( - self: NDArray[Union[bool_, integer[Any]]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __or__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __or__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2687,15 +2291,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __or__(self: NDArray[object_], other: Any) -> Any: ... @overload def __or__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __or__( - self: NDArray[Union[bool_, integer[Any]]], - other: _RecursiveSequence, - ) -> Any: ... @overload - def __ror__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... - @overload def __ror__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @@ -2705,15 +2302,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __ror__(self: NDArray[object_], other: Any) -> Any: ... @overload def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - @overload - def __ror__( - self: NDArray[Union[bool_, integer[Any]]], - other: _RecursiveSequence, - ) -> Any: ... # `np.generic` does not support inplace operations - @overload # type: ignore[misc] - def __iadd__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __iadd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... @overload @@ -2730,11 +2320,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... @overload def __iadd__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __iadd__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload # type: ignore[misc] - def __isub__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload @@ -2749,11 +2335,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... @overload def __isub__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __isub__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload # type: ignore[misc] - def __imul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __imul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... @overload @@ -2768,11 +2350,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... @overload def __imul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __imul__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload # type: ignore[misc] - def __itruediv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... @overload @@ -2783,11 +2361,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... @overload def __itruediv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __itruediv__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload # type: ignore[misc] - def __ifloordiv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload @@ -2802,11 +2376,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... @overload def __ifloordiv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __ifloordiv__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload # type: ignore[misc] - def __ipow__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload @@ -2817,11 +2387,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload def __ipow__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __ipow__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload # type: ignore[misc] - def __imod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload @@ -2829,36 +2395,24 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... @overload - def __imod__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __imod__(self: NDArray[timedelta64], other: _SupportsArray[dtype[timedelta64]] | _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... @overload def __imod__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __imod__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload # type: ignore[misc] - def __ilshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload def __ilshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __ilshift__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload # type: ignore[misc] - def __irshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... @overload def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload def __irshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __irshift__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload # type: ignore[misc] - def __iand__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __iand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... @overload @@ -2867,11 +2421,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload def __iand__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __iand__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload # type: ignore[misc] - def __ixor__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __ixor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... @overload @@ -2880,11 +2430,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload def __ixor__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __ixor__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... - @overload # type: ignore[misc] - def __ior__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... @overload def __ior__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... @overload @@ -2893,8 +2439,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... @overload def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - @overload - def __ior__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property @@ -3052,6 +2596,8 @@ class number(generic, Generic[_NBit1]): # type: ignore def real(self: _ArraySelf) -> _ArraySelf: ... @property def imag(self: _ArraySelf) -> _ArraySelf: ... + if sys.version_info >= (3, 9): + def __class_getitem__(self, item: Any) -> GenericAlias: ... def __int__(self) -> int: ... def __float__(self) -> float: ... def __complex__(self) -> complex: ... @@ -3205,6 +2751,7 @@ class integer(number[_NBit1]): # type: ignore ) -> int: ... def tolist(self) -> int: ... def is_integer(self) -> L[True]: ... + def bit_count(self: _ScalarType) -> int: ... def __index__(self) -> int: ... __truediv__: _IntTrueDiv[_NBit1] __rtruediv__: _IntTrueDiv[_NBit1] @@ -3456,8 +3003,15 @@ class void(flexible): def setfield( self, val: ArrayLike, dtype: DTypeLike, offset: int = ... ) -> None: ... - def __getitem__(self, key: SupportsIndex) -> Any: ... - def __setitem__(self, key: SupportsIndex, value: ArrayLike) -> None: ... + @overload + def __getitem__(self, key: str | SupportsIndex) -> Any: ... + @overload + def __getitem__(self, key: list[str]) -> void: ... + def __setitem__( + self, + key: str | List[str] | SupportsIndex, + value: ArrayLike, + ) -> None: ... void0 = void @@ -3710,7 +3264,7 @@ class AxisError(ValueError, IndexError): @overload def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None: ... -_CallType = TypeVar("_CallType", bound=Union[_ErrFunc, _SupportsWrite]) +_CallType = TypeVar("_CallType", bound=Union[_ErrFunc, _SupportsWrite[str]]) class errstate(Generic[_CallType], ContextDecorator): call: _CallType @@ -3740,22 +3294,20 @@ class ndenumerate(Generic[_ScalarType]): iter: flatiter[NDArray[_ScalarType]] @overload def __new__( - cls, arr: _NestedSequence[_SupportsArray[dtype[_ScalarType]]], + cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_ScalarType]]], ) -> ndenumerate[_ScalarType]: ... @overload - def __new__(cls, arr: _NestedSequence[str]) -> ndenumerate[str_]: ... - @overload - def __new__(cls, arr: _NestedSequence[bytes]) -> ndenumerate[bytes_]: ... + def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[str_]: ... @overload - def __new__(cls, arr: _NestedSequence[bool]) -> ndenumerate[bool_]: ... + def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[bytes_]: ... @overload - def __new__(cls, arr: _NestedSequence[int]) -> ndenumerate[int_]: ... + def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[bool_]: ... @overload - def __new__(cls, arr: _NestedSequence[float]) -> ndenumerate[float_]: ... + def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[int_]: ... @overload - def __new__(cls, arr: _NestedSequence[complex]) -> ndenumerate[complex_]: ... + def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float_]: ... @overload - def __new__(cls, arr: _RecursiveSequence) -> ndenumerate[Any]: ... + def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex_]: ... def __next__(self: ndenumerate[_ScalarType]) -> Tuple[_Shape, _ScalarType]: ... def __iter__(self: _T) -> _T: ... @@ -3838,14 +3390,6 @@ class finfo(Generic[_FloatType]): def smallest_normal(self) -> _FloatType: ... @property def tiny(self) -> _FloatType: ... - - # NOTE: Not technically a property, but this is the only way we can - # access the precision of the underlying float - @property - def machar(self: finfo[floating[_NBit1]]) -> MachArLike[_NBit1]: ... - @machar.setter - def machar(self: finfo[floating[_NBit1]], value: MachArLike[_NBit1]) -> None: ... - @overload def __new__( cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]] @@ -3875,3 +3419,909 @@ class iinfo(Generic[_IntType]): def __new__(cls, dtype: int | Type[int]) -> iinfo[int_]: ... @overload def __new__(cls, dtype: str) -> iinfo[Any]: ... + +class format_parser: + dtype: dtype[void] + def __init__( + self, + formats: DTypeLike, + names: None | str | Sequence[str], + titles: None | str | Sequence[str], + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., + ) -> None: ... + +class recarray(ndarray[_ShapeType, _DType_co]): + # NOTE: While not strictly mandatory, we're demanding here that arguments + # for the `format_parser`- and `dtype`-based dtype constructors are + # mutually exclusive + @overload + def __new__( + subtype, + shape: _ShapeLike, + dtype: None = ..., + buf: None | _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: None | _ShapeLike = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + byteorder: None | _ByteOrder = ..., + aligned: bool = ..., + order: _OrderKACF = ..., + ) -> recarray[Any, dtype[record]]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + dtype: DTypeLike, + buf: None | _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: None | _ShapeLike = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + byteorder: None = ..., + aligned: L[False] = ..., + order: _OrderKACF = ..., + ) -> recarray[Any, dtype[Any]]: ... + def __array_finalize__(self, obj: object) -> None: ... + def __getattribute__(self, attr: str) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike) -> None: ... + @overload + def __getitem__(self, indx: Union[ + SupportsIndex, + _ArrayLikeInt_co, + Tuple[SupportsIndex | _ArrayLikeInt_co, ...], + ]) -> Any: ... + @overload + def __getitem__(self: recarray[Any, dtype[void]], indx: Union[ + None, + slice, + ellipsis, + SupportsIndex, + _ArrayLikeInt_co, + Tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...], + ]) -> recarray[Any, _DType_co]: ... + @overload + def __getitem__(self, indx: Union[ + None, + slice, + ellipsis, + SupportsIndex, + _ArrayLikeInt_co, + Tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...], + ]) -> ndarray[Any, _DType_co]: ... + @overload + def __getitem__(self, indx: str) -> NDArray[Any]: ... + @overload + def __getitem__(self, indx: list[str]) -> recarray[_ShapeType, dtype[record]]: ... + @overload + def field(self, attr: int | str, val: None = ...) -> Any: ... + @overload + def field(self, attr: int | str, val: ArrayLike) -> None: ... + +class record(void): + def __getattribute__(self, attr: str) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike) -> None: ... + def pprint(self) -> str: ... + @overload + def __getitem__(self, key: str | SupportsIndex) -> Any: ... + @overload + def __getitem__(self, key: list[str]) -> record: ... + +_NDIterFlagsKind = L[ + "buffered", + "c_index", + "copy_if_overlap", + "common_dtype", + "delay_bufalloc", + "external_loop", + "f_index", + "grow_inner", "growinner", + "multi_index", + "ranged", + "refs_ok", + "reduce_ok", + "zerosize_ok", +] + +_NDIterOpFlagsKind = L[ + "aligned", + "allocate", + "arraymask", + "copy", + "config", + "nbo", + "no_subtype", + "no_broadcast", + "overlap_assume_elementwise", + "readonly", + "readwrite", + "updateifcopy", + "virtual", + "writeonly", + "writemasked" +] + +@final +class nditer: + def __new__( + cls, + op: ArrayLike | Sequence[ArrayLike], + flags: None | Sequence[_NDIterFlagsKind] = ..., + op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + order: _OrderKACF = ..., + casting: _CastingKind = ..., + op_axes: None | Sequence[Sequence[SupportsIndex]] = ..., + itershape: None | _ShapeLike = ..., + buffersize: SupportsIndex = ..., + ) -> nditer: ... + def __enter__(self) -> nditer: ... + def __exit__( + self, + exc_type: None | Type[BaseException], + exc_value: None | BaseException, + traceback: None | TracebackType, + ) -> None: ... + def __iter__(self) -> nditer: ... + def __next__(self) -> Tuple[NDArray[Any], ...]: ... + def __len__(self) -> int: ... + def __copy__(self) -> nditer: ... + @overload + def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ... + @overload + def __getitem__(self, index: slice) -> Tuple[NDArray[Any], ...]: ... + def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... + def close(self) -> None: ... + def copy(self) -> nditer: ... + def debug_print(self) -> None: ... + def enable_external_loop(self) -> None: ... + def iternext(self) -> bool: ... + def remove_axis(self, i: SupportsIndex, /) -> None: ... + def remove_multi_index(self) -> None: ... + def reset(self) -> None: ... + @property + def dtypes(self) -> Tuple[dtype[Any], ...]: ... + @property + def finished(self) -> bool: ... + @property + def has_delayed_bufalloc(self) -> bool: ... + @property + def has_index(self) -> bool: ... + @property + def has_multi_index(self) -> bool: ... + @property + def index(self) -> int: ... + @property + def iterationneedsapi(self) -> bool: ... + @property + def iterindex(self) -> int: ... + @property + def iterrange(self) -> Tuple[int, ...]: ... + @property + def itersize(self) -> int: ... + @property + def itviews(self) -> Tuple[NDArray[Any], ...]: ... + @property + def multi_index(self) -> Tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + @property + def nop(self) -> int: ... + @property + def operands(self) -> Tuple[NDArray[Any], ...]: ... + @property + def shape(self) -> Tuple[int, ...]: ... + @property + def value(self) -> Tuple[NDArray[Any], ...]: ... + +_MemMapModeKind = L[ + "readonly", "r", + "copyonwrite", "c", + "readwrite", "r+", + "write", "w+", +] + +class memmap(ndarray[_ShapeType, _DType_co]): + __array_priority__: ClassVar[float] + filename: str | None + offset: int + mode: str + @overload + def __new__( + subtype, + filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, + dtype: Type[uint8] = ..., + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: None | int | Tuple[int, ...] = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[uint8]]: ... + @overload + def __new__( + subtype, + filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, + dtype: _DTypeLike[_ScalarType], + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: None | int | Tuple[int, ...] = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[_ScalarType]]: ... + @overload + def __new__( + subtype, + filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, + dtype: DTypeLike, + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: None | int | Tuple[int, ...] = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[Any]]: ... + def __array_finalize__(self, obj: memmap[Any, Any]) -> None: ... + def __array_wrap__( + self, + array: memmap[_ShapeType, _DType_co], + context: None | Tuple[ufunc, Tuple[Any, ...], int] = ..., + ) -> Any: ... + def flush(self) -> None: ... + +class vectorize: + pyfunc: Callable[..., Any] + cache: bool + signature: None | str + otypes: None | str + excluded: Set[int | str] + __doc__: None | str + def __init__( + self, + pyfunc: Callable[..., Any], + otypes: None | str | Iterable[DTypeLike] = ..., + doc: None | str = ..., + excluded: None | Iterable[int | str] = ..., + cache: bool = ..., + signature: None | str = ..., + ) -> None: ... + def __call__(self, *args: Any, **kwargs: Any) -> NDArray[Any]: ... + +class poly1d: + @property + def variable(self) -> str: ... + @property + def order(self) -> int: ... + @property + def o(self) -> int: ... + @property + def roots(self) -> NDArray[Any]: ... + @property + def r(self) -> NDArray[Any]: ... + + @property + def coeffs(self) -> NDArray[Any]: ... + @coeffs.setter + def coeffs(self, value: NDArray[Any]) -> None: ... + + @property + def c(self) -> NDArray[Any]: ... + @c.setter + def c(self, value: NDArray[Any]) -> None: ... + + @property + def coef(self) -> NDArray[Any]: ... + @coef.setter + def coef(self, value: NDArray[Any]) -> None: ... + + @property + def coefficients(self) -> NDArray[Any]: ... + @coefficients.setter + def coefficients(self, value: NDArray[Any]) -> None: ... + + __hash__: None # type: ignore + + @overload + def __array__(self, t: None = ...) -> NDArray[Any]: ... + @overload + def __array__(self, t: _DType) -> ndarray[Any, _DType]: ... + + @overload + def __call__(self, val: _ScalarLike_co) -> Any: ... + @overload + def __call__(self, val: poly1d) -> poly1d: ... + @overload + def __call__(self, val: ArrayLike) -> NDArray[Any]: ... + + def __init__( + self, + c_or_r: ArrayLike, + r: bool = ..., + variable: None | str = ..., + ) -> None: ... + def __len__(self) -> int: ... + def __neg__(self) -> poly1d: ... + def __pos__(self) -> poly1d: ... + def __mul__(self, other: ArrayLike) -> poly1d: ... + def __rmul__(self, other: ArrayLike) -> poly1d: ... + def __add__(self, other: ArrayLike) -> poly1d: ... + def __radd__(self, other: ArrayLike) -> poly1d: ... + def __pow__(self, val: _FloatLike_co) -> poly1d: ... # Integral floats are accepted + def __sub__(self, other: ArrayLike) -> poly1d: ... + def __rsub__(self, other: ArrayLike) -> poly1d: ... + def __div__(self, other: ArrayLike) -> poly1d: ... + def __truediv__(self, other: ArrayLike) -> poly1d: ... + def __rdiv__(self, other: ArrayLike) -> poly1d: ... + def __rtruediv__(self, other: ArrayLike) -> poly1d: ... + def __getitem__(self, val: int) -> Any: ... + def __setitem__(self, key: int, val: Any) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... + def integ( + self, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., + ) -> poly1d: ... + +class matrix(ndarray[_ShapeType, _DType_co]): + __array_priority__: ClassVar[float] + def __new__( + subtype, + data: ArrayLike, + dtype: DTypeLike = ..., + copy: bool = ..., + ) -> matrix[Any, Any]: ... + def __array_finalize__(self, obj: NDArray[Any]) -> None: ... + + @overload + def __getitem__(self, key: Union[ + SupportsIndex, + _ArrayLikeInt_co, + Tuple[SupportsIndex | _ArrayLikeInt_co, ...], + ]) -> Any: ... + @overload + def __getitem__(self, key: Union[ + None, + slice, + ellipsis, + SupportsIndex, + _ArrayLikeInt_co, + Tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...], + ]) -> matrix[Any, _DType_co]: ... + @overload + def __getitem__(self: NDArray[void], key: str) -> matrix[Any, dtype[Any]]: ... + @overload + def __getitem__(self: NDArray[void], key: list[str]) -> matrix[_ShapeType, dtype[void]]: ... + + def __mul__(self, other: ArrayLike) -> matrix[Any, Any]: ... + def __rmul__(self, other: ArrayLike) -> matrix[Any, Any]: ... + def __imul__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... + def __pow__(self, other: ArrayLike) -> matrix[Any, Any]: ... + def __ipow__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... + + @overload + def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + @overload + def sum(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + @overload + def sum(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def mean(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + @overload + def mean(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + @overload + def mean(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def std(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... + @overload + def std(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ... + @overload + def std(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ... + + @overload + def var(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... + @overload + def var(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ... + @overload + def var(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ... + + @overload + def prod(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + @overload + def prod(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + @overload + def prod(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def any(self, axis: None = ..., out: None = ...) -> bool_: ... + @overload + def any(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[bool_]]: ... + @overload + def any(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def all(self, axis: None = ..., out: None = ...) -> bool_: ... + @overload + def all(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[bool_]]: ... + @overload + def all(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def max(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + @overload + def max(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + @overload + def max(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def min(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + @overload + def min(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + @overload + def min(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def argmax(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ... + @overload + def argmax(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ... + @overload + def argmax(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def argmin(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ... + @overload + def argmin(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def ptp(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + @overload + def ptp(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + @overload + def ptp(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[Any, _DType_co]: ... + def tolist(self: matrix[Any, dtype[_SupportsItem[_T]]]) -> List[List[_T]]: ... # type: ignore[typevar] + def ravel(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... + def flatten(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... + + @property + def T(self) -> matrix[Any, _DType_co]: ... + @property + def I(self) -> matrix[Any, Any]: ... + @property + def A(self) -> ndarray[_ShapeType, _DType_co]: ... + @property + def A1(self) -> ndarray[Any, _DType_co]: ... + @property + def H(self) -> matrix[Any, _DType_co]: ... + def getT(self) -> matrix[Any, _DType_co]: ... + def getI(self) -> matrix[Any, Any]: ... + def getA(self) -> ndarray[_ShapeType, _DType_co]: ... + def getA1(self) -> ndarray[Any, _DType_co]: ... + def getH(self) -> matrix[Any, _DType_co]: ... + +_CharType = TypeVar("_CharType", str_, bytes_) +_CharDType = TypeVar("_CharDType", dtype[str_], dtype[bytes_]) +_CharArray = chararray[Any, dtype[_CharType]] + +class chararray(ndarray[_ShapeType, _CharDType]): + @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt = ..., + unicode: L[False] = ..., + buffer: _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> chararray[Any, dtype[bytes_]]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt = ..., + unicode: L[True] = ..., + buffer: _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> chararray[Any, dtype[str_]]: ... + + def __array_finalize__(self, obj: NDArray[str_ | bytes_]) -> None: ... + def __mul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ... + def __rmul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ... + def __mod__(self, i: Any) -> chararray[Any, _CharDType]: ... + + @overload + def __eq__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __eq__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __ne__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __ne__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __ge__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __ge__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __le__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __le__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __gt__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __gt__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __lt__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __lt__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __add__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> _CharArray[str_]: ... + @overload + def __add__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> _CharArray[bytes_]: ... + + @overload + def __radd__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> _CharArray[str_]: ... + @overload + def __radd__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> _CharArray[bytes_]: ... + + @overload + def center( + self: _CharArray[str_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def center( + self: _CharArray[bytes_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def count( + self: _CharArray[str_], + sub: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + @overload + def count( + self: _CharArray[bytes_], + sub: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + + def decode( + self: _CharArray[bytes_], + encoding: None | str = ..., + errors: None | str = ..., + ) -> _CharArray[str_]: ... + + def encode( + self: _CharArray[str_], + encoding: None | str = ..., + errors: None | str = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def endswith( + self: _CharArray[str_], + suffix: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[bool_]: ... + @overload + def endswith( + self: _CharArray[bytes_], + suffix: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[bool_]: ... + + def expandtabs( + self, + tabsize: _ArrayLikeInt_co = ..., + ) -> chararray[Any, _CharDType]: ... + + @overload + def find( + self: _CharArray[str_], + sub: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + @overload + def find( + self: _CharArray[bytes_], + sub: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + + @overload + def index( + self: _CharArray[str_], + sub: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + @overload + def index( + self: _CharArray[bytes_], + sub: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + + @overload + def join( + self: _CharArray[str_], + seq: _ArrayLikeStr_co, + ) -> _CharArray[str_]: ... + @overload + def join( + self: _CharArray[bytes_], + seq: _ArrayLikeBytes_co, + ) -> _CharArray[bytes_]: ... + + @overload + def ljust( + self: _CharArray[str_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def ljust( + self: _CharArray[bytes_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def lstrip( + self: _CharArray[str_], + chars: None | _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def lstrip( + self: _CharArray[bytes_], + chars: None | _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def partition( + self: _CharArray[str_], + sep: _ArrayLikeStr_co, + ) -> _CharArray[str_]: ... + @overload + def partition( + self: _CharArray[bytes_], + sep: _ArrayLikeBytes_co, + ) -> _CharArray[bytes_]: ... + + @overload + def replace( + self: _CharArray[str_], + old: _ArrayLikeStr_co, + new: _ArrayLikeStr_co, + count: None | _ArrayLikeInt_co = ..., + ) -> _CharArray[str_]: ... + @overload + def replace( + self: _CharArray[bytes_], + old: _ArrayLikeBytes_co, + new: _ArrayLikeBytes_co, + count: None | _ArrayLikeInt_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def rfind( + self: _CharArray[str_], + sub: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + @overload + def rfind( + self: _CharArray[bytes_], + sub: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + + @overload + def rindex( + self: _CharArray[str_], + sub: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + @overload + def rindex( + self: _CharArray[bytes_], + sub: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + + @overload + def rjust( + self: _CharArray[str_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def rjust( + self: _CharArray[bytes_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def rpartition( + self: _CharArray[str_], + sep: _ArrayLikeStr_co, + ) -> _CharArray[str_]: ... + @overload + def rpartition( + self: _CharArray[bytes_], + sep: _ArrayLikeBytes_co, + ) -> _CharArray[bytes_]: ... + + @overload + def rsplit( + self: _CharArray[str_], + sep: None | _ArrayLikeStr_co = ..., + maxsplit: None | _ArrayLikeInt_co = ..., + ) -> NDArray[object_]: ... + @overload + def rsplit( + self: _CharArray[bytes_], + sep: None | _ArrayLikeBytes_co = ..., + maxsplit: None | _ArrayLikeInt_co = ..., + ) -> NDArray[object_]: ... + + @overload + def rstrip( + self: _CharArray[str_], + chars: None | _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def rstrip( + self: _CharArray[bytes_], + chars: None | _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def split( + self: _CharArray[str_], + sep: None | _ArrayLikeStr_co = ..., + maxsplit: None | _ArrayLikeInt_co = ..., + ) -> NDArray[object_]: ... + @overload + def split( + self: _CharArray[bytes_], + sep: None | _ArrayLikeBytes_co = ..., + maxsplit: None | _ArrayLikeInt_co = ..., + ) -> NDArray[object_]: ... + + def splitlines(self, keepends: None | _ArrayLikeBool_co = ...) -> NDArray[object_]: ... + + @overload + def startswith( + self: _CharArray[str_], + prefix: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[bool_]: ... + @overload + def startswith( + self: _CharArray[bytes_], + prefix: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[bool_]: ... + + @overload + def strip( + self: _CharArray[str_], + chars: None | _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def strip( + self: _CharArray[bytes_], + chars: None | _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def translate( + self: _CharArray[str_], + table: _ArrayLikeStr_co, + deletechars: None | _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def translate( + self: _CharArray[bytes_], + table: _ArrayLikeBytes_co, + deletechars: None | _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + def zfill(self, width: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ... + def capitalize(self) -> chararray[_ShapeType, _CharDType]: ... + def title(self) -> chararray[_ShapeType, _CharDType]: ... + def swapcase(self) -> chararray[_ShapeType, _CharDType]: ... + def lower(self) -> chararray[_ShapeType, _CharDType]: ... + def upper(self) -> chararray[_ShapeType, _CharDType]: ... + def isalnum(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isalpha(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isdigit(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def islower(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isspace(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def istitle(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isupper(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isnumeric(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isdecimal(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + +# NOTE: Deprecated +# class MachAr: ... diff --git a/numpy/array_api/__init__.py b/numpy/array_api/__init__.py index 790157504..d8b29057e 100644 --- a/numpy/array_api/__init__.py +++ b/numpy/array_api/__init__.py @@ -143,6 +143,8 @@ from ._creation_functions import ( meshgrid, ones, ones_like, + tril, + triu, zeros, zeros_like, ) @@ -160,6 +162,8 @@ __all__ += [ "meshgrid", "ones", "ones_like", + "tril", + "triu", "zeros", "zeros_like", ] @@ -333,21 +337,22 @@ __all__ += [ # from ._linear_algebra_functions import einsum # __all__ += ['einsum'] -from ._linear_algebra_functions import matmul, tensordot, transpose, vecdot +from ._linear_algebra_functions import matmul, tensordot, matrix_transpose, vecdot -__all__ += ["matmul", "tensordot", "transpose", "vecdot"] +__all__ += ["matmul", "tensordot", "matrix_transpose", "vecdot"] from ._manipulation_functions import ( concat, expand_dims, flip, + permute_dims, reshape, roll, squeeze, stack, ) -__all__ += ["concat", "expand_dims", "flip", "reshape", "roll", "squeeze", "stack"] +__all__ += ["concat", "expand_dims", "flip", "permute_dims", "reshape", "roll", "squeeze", "stack"] from ._searching_functions import argmax, argmin, nonzero, where diff --git a/numpy/array_api/_array_object.py b/numpy/array_api/_array_object.py index 2d746e78b..ef66c5efd 100644 --- a/numpy/array_api/_array_object.py +++ b/numpy/array_api/_array_object.py @@ -29,7 +29,7 @@ from ._dtypes import ( _dtype_categories, ) -from typing import TYPE_CHECKING, Optional, Tuple, Union +from typing import TYPE_CHECKING, Optional, Tuple, Union, Any if TYPE_CHECKING: from ._typing import PyCapsule, Device, Dtype @@ -99,7 +99,10 @@ class Array: """ Performs the operation __repr__. """ - return f"Array({np.array2string(self._array, separator=', ')}, dtype={self.dtype.name})" + prefix = "Array(" + suffix = f", dtype={self.dtype.name})" + mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix) + return prefix + mid + suffix # These are various helper functions to make the array behavior match the # spec in places where it either deviates from or is more strict than @@ -379,7 +382,7 @@ class Array: def __array_namespace__( self: Array, /, *, api_version: Optional[str] = None - ) -> object: + ) -> Any: if api_version is not None and not api_version.startswith("2021."): raise ValueError(f"Unrecognized array API version: {api_version!r}") return array_api @@ -391,6 +394,8 @@ class Array: # Note: This is an error here. if self._array.ndim != 0: raise TypeError("bool is only allowed on arrays with 0 dimensions") + if self.dtype not in _boolean_dtypes: + raise ValueError("bool is only allowed on boolean arrays") res = self._array.__bool__() return res @@ -429,6 +434,8 @@ class Array: # Note: This is an error here. if self._array.ndim != 0: raise TypeError("float is only allowed on arrays with 0 dimensions") + if self.dtype not in _floating_dtypes: + raise ValueError("float is only allowed on floating-point arrays") res = self._array.__float__() return res @@ -488,9 +495,18 @@ class Array: # Note: This is an error here. if self._array.ndim != 0: raise TypeError("int is only allowed on arrays with 0 dimensions") + if self.dtype not in _integer_dtypes: + raise ValueError("int is only allowed on integer arrays") res = self._array.__int__() return res + def __index__(self: Array, /) -> int: + """ + Performs the operation __index__. + """ + res = self._array.__index__() + return res + def __invert__(self: Array, /) -> Array: """ Performs the operation __invert__. @@ -979,6 +995,11 @@ class Array: res = self._array.__rxor__(other._array) return self.__class__._new(res) + def to_device(self: Array, device: Device, /) -> Array: + if device == 'cpu': + return self + raise ValueError(f"Unsupported device {device!r}") + @property def dtype(self) -> Dtype: """ @@ -992,6 +1013,12 @@ class Array: def device(self) -> Device: return "cpu" + # Note: mT is new in array API spec (see matrix_transpose) + @property + def mT(self) -> Array: + from ._linear_algebra_functions import matrix_transpose + return matrix_transpose(self) + @property def ndim(self) -> int: """ @@ -1026,4 +1053,9 @@ class Array: See its docstring for more information. """ + # Note: T only works on 2-dimensional arrays. See the corresponding + # note in the specification: + # https://data-apis.org/array-api/latest/API_specification/array_object.html#t + if self.ndim != 2: + raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.") return self._array.T diff --git a/numpy/array_api/_creation_functions.py b/numpy/array_api/_creation_functions.py index 2d6cf4414..d760bf2fc 100644 --- a/numpy/array_api/_creation_functions.py +++ b/numpy/array_api/_creation_functions.py @@ -22,7 +22,7 @@ def _check_valid_dtype(dtype): # Note: Only spelling dtypes as the dtype objects is supported. # We use this instead of "dtype in _all_dtypes" because the dtype objects - # define equality with the sorts of things we want to disallw. + # define equality with the sorts of things we want to disallow. for d in (None,) + _all_dtypes: if dtype is d: return @@ -134,7 +134,7 @@ def eye( n_cols: Optional[int] = None, /, *, - k: Optional[int] = 0, + k: int = 0, dtype: Optional[Dtype] = None, device: Optional[Device] = None, ) -> Array: @@ -232,7 +232,7 @@ def linspace( return Array._new(np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint)) -def meshgrid(*arrays: Sequence[Array], indexing: str = "xy") -> List[Array, ...]: +def meshgrid(*arrays: Array, indexing: str = "xy") -> List[Array]: """ Array API compatible wrapper for :py:func:`np.meshgrid <numpy.meshgrid>`. @@ -281,6 +281,34 @@ def ones_like( return Array._new(np.ones_like(x._array, dtype=dtype)) +def tril(x: Array, /, *, k: int = 0) -> Array: + """ + Array API compatible wrapper for :py:func:`np.tril <numpy.tril>`. + + See its docstring for more information. + """ + from ._array_object import Array + + if x.ndim < 2: + # Note: Unlike np.tril, x must be at least 2-D + raise ValueError("x must be at least 2-dimensional for tril") + return Array._new(np.tril(x._array, k=k)) + + +def triu(x: Array, /, *, k: int = 0) -> Array: + """ + Array API compatible wrapper for :py:func:`np.triu <numpy.triu>`. + + See its docstring for more information. + """ + from ._array_object import Array + + if x.ndim < 2: + # Note: Unlike np.triu, x must be at least 2-D + raise ValueError("x must be at least 2-dimensional for triu") + return Array._new(np.triu(x._array, k=k)) + + def zeros( shape: Union[int, Tuple[int, ...]], *, diff --git a/numpy/array_api/_data_type_functions.py b/numpy/array_api/_data_type_functions.py index fd92aa250..7ccbe9469 100644 --- a/numpy/array_api/_data_type_functions.py +++ b/numpy/array_api/_data_type_functions.py @@ -13,7 +13,7 @@ if TYPE_CHECKING: import numpy as np -def broadcast_arrays(*arrays: Sequence[Array]) -> List[Array]: +def broadcast_arrays(*arrays: Array) -> List[Array]: """ Array API compatible wrapper for :py:func:`np.broadcast_arrays <numpy.broadcast_arrays>`. @@ -98,7 +98,7 @@ def iinfo(type: Union[Dtype, Array], /) -> iinfo_object: return iinfo_object(ii.bits, ii.max, ii.min) -def result_type(*arrays_and_dtypes: Sequence[Union[Array, Dtype]]) -> Dtype: +def result_type(*arrays_and_dtypes: Union[Array, Dtype]) -> Dtype: """ Array API compatible wrapper for :py:func:`np.result_type <numpy.result_type>`. diff --git a/numpy/array_api/_linear_algebra_functions.py b/numpy/array_api/_linear_algebra_functions.py index 089081725..7a6c9846c 100644 --- a/numpy/array_api/_linear_algebra_functions.py +++ b/numpy/array_api/_linear_algebra_functions.py @@ -52,13 +52,12 @@ def tensordot( return Array._new(np.tensordot(x1._array, x2._array, axes=axes)) -def transpose(x: Array, /, *, axes: Optional[Tuple[int, ...]] = None) -> Array: - """ - Array API compatible wrapper for :py:func:`np.transpose <numpy.transpose>`. - - See its docstring for more information. - """ - return Array._new(np.transpose(x._array, axes=axes)) +# Note: this function is new in the array API spec. Unlike transpose, it only +# transposes the last two axes. +def matrix_transpose(x: Array, /) -> Array: + if x.ndim < 2: + raise ValueError("x must be at least 2-dimensional for matrix_transpose") + return Array._new(np.swapaxes(x._array, -1, -2)) # Note: vecdot is not in NumPy diff --git a/numpy/array_api/_manipulation_functions.py b/numpy/array_api/_manipulation_functions.py index c11866261..4f2114ff5 100644 --- a/numpy/array_api/_manipulation_functions.py +++ b/numpy/array_api/_manipulation_functions.py @@ -41,6 +41,17 @@ def flip(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> return Array._new(np.flip(x._array, axis=axis)) +# Note: The function name is different here (see also matrix_transpose). +# Unlike transpose(), the axes argument is required. +def permute_dims(x: Array, /, axes: Tuple[int, ...]) -> Array: + """ + Array API compatible wrapper for :py:func:`np.transpose <numpy.transpose>`. + + See its docstring for more information. + """ + return Array._new(np.transpose(x._array, axes)) + + def reshape(x: Array, /, shape: Tuple[int, ...]) -> Array: """ Array API compatible wrapper for :py:func:`np.reshape <numpy.reshape>`. diff --git a/numpy/array_api/_statistical_functions.py b/numpy/array_api/_statistical_functions.py index 63790b447..c5abf9468 100644 --- a/numpy/array_api/_statistical_functions.py +++ b/numpy/array_api/_statistical_functions.py @@ -1,8 +1,17 @@ from __future__ import annotations +from ._dtypes import ( + _floating_dtypes, + _numeric_dtypes, +) from ._array_object import Array +from ._creation_functions import asarray +from ._dtypes import float32, float64 -from typing import Optional, Tuple, Union +from typing import TYPE_CHECKING, Optional, Tuple, Union + +if TYPE_CHECKING: + from ._typing import Dtype import numpy as np @@ -14,6 +23,8 @@ def max( axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ) -> Array: + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in max") return Array._new(np.max(x._array, axis=axis, keepdims=keepdims)) @@ -24,6 +35,8 @@ def mean( axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ) -> Array: + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in mean") return Array._new(np.mean(x._array, axis=axis, keepdims=keepdims)) @@ -34,6 +47,8 @@ def min( axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ) -> Array: + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in min") return Array._new(np.min(x._array, axis=axis, keepdims=keepdims)) @@ -42,8 +57,15 @@ def prod( /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, keepdims: bool = False, ) -> Array: + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in prod") + # Note: sum() and prod() always upcast float32 to float64 for dtype=None + # We need to do so here before computing the product to avoid overflow + if dtype is None and x.dtype == float32: + x = asarray(x, dtype=float64) return Array._new(np.prod(x._array, axis=axis, keepdims=keepdims)) @@ -56,6 +78,8 @@ def std( keepdims: bool = False, ) -> Array: # Note: the keyword argument correction is different here + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in std") return Array._new(np.std(x._array, axis=axis, ddof=correction, keepdims=keepdims)) @@ -64,8 +88,15 @@ def sum( /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, keepdims: bool = False, ) -> Array: + if x.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in sum") + # Note: sum() and prod() always upcast float32 to float64 for dtype=None + # We need to do so here before summing to avoid overflow + if dtype is None and x.dtype == float32: + x = asarray(x, dtype=float64) return Array._new(np.sum(x._array, axis=axis, keepdims=keepdims)) @@ -78,4 +109,6 @@ def var( keepdims: bool = False, ) -> Array: # Note: the keyword argument correction is different here + if x.dtype not in _floating_dtypes: + raise TypeError("Only floating-point dtypes are allowed in var") return Array._new(np.var(x._array, axis=axis, ddof=correction, keepdims=keepdims)) diff --git a/numpy/array_api/_typing.py b/numpy/array_api/_typing.py index d530a91ae..dfa87b358 100644 --- a/numpy/array_api/_typing.py +++ b/numpy/array_api/_typing.py @@ -6,6 +6,8 @@ annotations in the function signatures. The functions in the module are only valid for inputs that match the given type annotations. """ +from __future__ import annotations + __all__ = [ "Array", "Device", @@ -15,10 +17,21 @@ __all__ = [ "PyCapsule", ] -from typing import Any, Sequence, Type, Union +import sys +from typing import ( + Any, + Literal, + Sequence, + Type, + Union, + TYPE_CHECKING, + TypeVar, + Protocol, +) -from . import ( - Array, +from ._array_object import Array +from numpy import ( + dtype, int8, int16, int32, @@ -31,14 +44,31 @@ from . import ( float64, ) -# This should really be recursive, but that isn't supported yet. See the -# similar comment in numpy/typing/_array_like.py -NestedSequence = Sequence[Sequence[Any]] +_T_co = TypeVar("_T_co", covariant=True) + +class NestedSequence(Protocol[_T_co]): + def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ... + def __len__(self, /) -> int: ... + +Device = Literal["cpu"] +if TYPE_CHECKING or sys.version_info >= (3, 9): + Dtype = dtype[Union[ + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + ]] +else: + Dtype = dtype -Device = Any -Dtype = Type[ - Union[[int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64]] -] -SupportsDLPack = Any SupportsBufferProtocol = Any PyCapsule = Any + +class SupportsDLPack(Protocol): + def __dlpack__(self, /, *, stream: None = ...) -> PyCapsule: ... diff --git a/numpy/array_api/tests/test_array_object.py b/numpy/array_api/tests/test_array_object.py index 088e09b9f..7959f92b4 100644 --- a/numpy/array_api/tests/test_array_object.py +++ b/numpy/array_api/tests/test_array_object.py @@ -1,3 +1,5 @@ +import operator + from numpy.testing import assert_raises import numpy as np @@ -255,15 +257,31 @@ def test_operators(): def test_python_scalar_construtors(): - a = asarray(False) - b = asarray(0) - c = asarray(0.0) + b = asarray(False) + i = asarray(0) + f = asarray(0.0) - assert bool(a) == bool(b) == bool(c) == False - assert int(a) == int(b) == int(c) == 0 - assert float(a) == float(b) == float(c) == 0.0 + assert bool(b) == False + assert int(i) == 0 + assert float(f) == 0.0 + assert operator.index(i) == 0 # bool/int/float should only be allowed on 0-D arrays. assert_raises(TypeError, lambda: bool(asarray([False]))) assert_raises(TypeError, lambda: int(asarray([0]))) assert_raises(TypeError, lambda: float(asarray([0.0]))) + assert_raises(TypeError, lambda: operator.index(asarray([0]))) + + # bool/int/float should only be allowed on arrays of the corresponding + # dtype + assert_raises(ValueError, lambda: bool(i)) + assert_raises(ValueError, lambda: bool(f)) + + assert_raises(ValueError, lambda: int(b)) + assert_raises(ValueError, lambda: int(f)) + + assert_raises(ValueError, lambda: float(b)) + assert_raises(ValueError, lambda: float(i)) + + assert_raises(TypeError, lambda: operator.index(b)) + assert_raises(TypeError, lambda: operator.index(f)) diff --git a/numpy/array_api/tests/test_creation_functions.py b/numpy/array_api/tests/test_creation_functions.py index 2ee23a47b..c13bc4262 100644 --- a/numpy/array_api/tests/test_creation_functions.py +++ b/numpy/array_api/tests/test_creation_functions.py @@ -8,30 +8,15 @@ from .._creation_functions import ( empty, empty_like, eye, - from_dlpack, full, full_like, linspace, - meshgrid, ones, ones_like, zeros, zeros_like, ) from .._array_object import Array -from .._dtypes import ( - _all_dtypes, - _boolean_dtypes, - _floating_dtypes, - _integer_dtypes, - _integer_or_boolean_dtypes, - _numeric_dtypes, - int8, - int16, - int32, - int64, - uint64, -) def test_asarray_errors(): diff --git a/numpy/char.pyi b/numpy/char.pyi deleted file mode 100644 index 4904aa27a..000000000 --- a/numpy/char.pyi +++ /dev/null @@ -1,59 +0,0 @@ -from typing import Any, List - -from numpy import ( - chararray as chararray, -) - -__all__: List[str] - -def equal(x1, x2): ... -def not_equal(x1, x2): ... -def greater_equal(x1, x2): ... -def less_equal(x1, x2): ... -def greater(x1, x2): ... -def less(x1, x2): ... -def str_len(a): ... -def add(x1, x2): ... -def multiply(a, i): ... -def mod(a, values): ... -def capitalize(a): ... -def center(a, width, fillchar=...): ... -def count(a, sub, start=..., end=...): ... -def decode(a, encoding=..., errors=...): ... -def encode(a, encoding=..., errors=...): ... -def endswith(a, suffix, start=..., end=...): ... -def expandtabs(a, tabsize=...): ... -def find(a, sub, start=..., end=...): ... -def index(a, sub, start=..., end=...): ... -def isalnum(a): ... -def isalpha(a): ... -def isdigit(a): ... -def islower(a): ... -def isspace(a): ... -def istitle(a): ... -def isupper(a): ... -def join(sep, seq): ... -def ljust(a, width, fillchar=...): ... -def lower(a): ... -def lstrip(a, chars=...): ... -def partition(a, sep): ... -def replace(a, old, new, count=...): ... -def rfind(a, sub, start=..., end=...): ... -def rindex(a, sub, start=..., end=...): ... -def rjust(a, width, fillchar=...): ... -def rpartition(a, sep): ... -def rsplit(a, sep=..., maxsplit=...): ... -def rstrip(a, chars=...): ... -def split(a, sep=..., maxsplit=...): ... -def splitlines(a, keepends=...): ... -def startswith(a, prefix, start=..., end=...): ... -def strip(a, chars=...): ... -def swapcase(a): ... -def title(a): ... -def translate(a, table, deletechars=...): ... -def upper(a): ... -def zfill(a, width): ... -def isnumeric(a): ... -def isdecimal(a): ... -def array(obj, itemsize=..., copy=..., unicode=..., order=...): ... -def asarray(obj, itemsize=..., unicode=..., order=...): ... diff --git a/numpy/conftest.py b/numpy/conftest.py index e15ee0845..fd5fdd77d 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -33,7 +33,7 @@ hypothesis.settings.register_profile( suppress_health_check=hypothesis.HealthCheck.all(), ) # Note that the default profile is chosen based on the presence -# of pytest.ini, but can be overriden by passing the +# of pytest.ini, but can be overridden by passing the # --hypothesis-profile=NAME argument to pytest. _pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini") hypothesis.settings.load_profile( diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index dad9293e1..332f9940e 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -9,6 +9,7 @@ are available in the main ``numpy`` namespace - use that instead. from numpy.version import version as __version__ import os +import warnings # disables OpenBLAS affinity setting of the main thread that limits # python threads or processes to one core @@ -80,8 +81,8 @@ from .memmap import * from .defchararray import chararray from . import function_base from .function_base import * -from . import machar -from .machar import * +from . import _machar +from ._machar import * from . import getlimits from .getlimits import * from . import shape_base @@ -109,7 +110,6 @@ __all__ += fromnumeric.__all__ __all__ += ['record', 'recarray', 'format_parser'] __all__ += ['chararray'] __all__ += function_base.__all__ -__all__ += machar.__all__ __all__ += getlimits.__all__ __all__ += shape_base.__all__ __all__ += einsumfunc.__all__ @@ -151,6 +151,17 @@ def _DType_reduce(DType): return _DType_reconstruct, (scalar_type,) +def __getattr__(name): + # Deprecated 2021-10-20, NumPy 1.22 + if name == "machar": + warnings.warn( + "The `np.core.machar` module is deprecated (NumPy 1.22)", + DeprecationWarning, stacklevel=2, + ) + return _machar + raise AttributeError(f"Module {__name__!r} has no attribute {name!r}") + + import copyreg copyreg.pickle(ufunc, _ufunc_reduce) diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index 06f2a6376..c8a24db0c 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -328,7 +328,7 @@ add_newdoc('numpy.core', 'nditer', ... with it: ... for (a, b, c) in it: ... addop(a, b, out=c) - ... return it.operands[2] + ... return it.operands[2] Here is the same function, but following the C-style pattern: @@ -478,7 +478,7 @@ add_newdoc('numpy.core', 'nditer', ('iternext', add_newdoc('numpy.core', 'nditer', ('remove_axis', """ - remove_axis(i) + remove_axis(i, /) Removes axis `i` from the iterator. Requires that the flag "multi_index" be enabled. @@ -504,6 +504,9 @@ add_newdoc('numpy.core', 'nditer', ('reset', add_newdoc('numpy.core', 'nested_iters', """ + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \ + order="K", casting="safe", buffersize=0) + Create nditers for use in nested loops Create a tuple of `nditer` objects which iterate in nested loops over @@ -796,7 +799,7 @@ add_newdoc('numpy.core.multiarray', 'array', object : array_like An array, any object exposing the array interface, an object whose __array__ method returns an array, or any (nested) sequence. - If object is a scalar, a 0-dimensional array containing object is + If object is a scalar, a 0-dimensional array containing object is returned. dtype : data-type, optional The desired data-type for the array. If not given, then the type will @@ -2201,8 +2204,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', empty : Create an array, but leave its allocated memory unchanged (i.e., it contains "garbage"). dtype : Create a data-type. - numpy.typing.NDArray : A :term:`generic <generic type>` version - of ndarray. + numpy.typing.NDArray : An ndarray alias :term:`generic <generic type>` + w.r.t. its `dtype.type <numpy.dtype.type>`. Notes ----- @@ -2798,6 +2801,39 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', """)) +add_newdoc('numpy.core.multiarray', 'ndarray', ('__class_getitem__', + """a.__class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.ndarray` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.ndarray` type. + + Examples + -------- + >>> from typing import Any + >>> import numpy as np + + >>> np.ndarray[Any, np.dtype[Any]] + numpy.ndarray[typing.Any, numpy.dtype[typing.Any]] + + Notes + ----- + This method is only available for python 3.9 and later. + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + numpy.typing.NDArray : An ndarray alias :term:`generic <generic type>` + w.r.t. its `dtype.type <numpy.dtype.type>`. + + """)) + + add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', """a.__deepcopy__(memo, /) -> Deep copy of array. @@ -3541,7 +3577,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', * 'S' - swap dtype from current to opposite endian * {'<', 'little'} - little endian * {'>', 'big'} - big endian - * '=' - native order, equivalent to `sys.byteorder` + * {'=', 'native'} - native order, equivalent to `sys.byteorder` * {'|', 'I'} - ignore (no change to byte order) The default value ('S') results in swapping the current @@ -4008,6 +4044,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('partition', The order of all elements in the partitions is undefined. If provided with a sequence of kth it will partition all elements indexed by kth of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. axis : int, optional Axis along which to sort. Default is -1, which means sort along the last axis. @@ -4688,6 +4727,16 @@ add_newdoc('numpy.core.umath', '_add_newdoc_ufunc', and then throwing away the ufunc. """) +add_newdoc('numpy.core.multiarray', 'get_handler_name', + """ + get_handler_name(a: ndarray) -> str,None + + Return the name of the memory handler used by `a`. If not provided, return + the name of the memory handler that will be used to allocate data for the + next `ndarray` in this context. May return None if `a` does not own its + memory, in which case you can traverse ``a.base`` for a memory handler. + """) + add_newdoc('numpy.core.multiarray', '_set_madvise_hugepage', """ _set_madvise_hugepage(enabled: bool) -> bool @@ -6001,7 +6050,7 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', * 'S' - swap dtype from current to opposite endian * {'<', 'little'} - little endian * {'>', 'big'} - big endian - * '=' - native order + * {'=', 'native'} - native order * {'|', 'I'} - ignore (no change to byte order) Returns @@ -6044,6 +6093,97 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', """)) +add_newdoc('numpy.core.multiarray', 'dtype', ('__class_getitem__', + """ + __class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.dtype` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.dtype` type. + + Examples + -------- + >>> import numpy as np + + >>> np.dtype[np.int64] + numpy.dtype[numpy.int64] + + Notes + ----- + This method is only available for python 3.9 and later. + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('__ge__', + """ + __ge__(value, /) + + Return ``self >= value``. + + Equivalent to ``np.can_cast(value, self, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('__le__', + """ + __le__(value, /) + + Return ``self <= value``. + + Equivalent to ``np.can_cast(self, value, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('__gt__', + """ + __ge__(value, /) + + Return ``self > value``. + + Equivalent to + ``self != value and np.can_cast(value, self, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('__lt__', + """ + __lt__(value, /) + + Return ``self < value``. + + Equivalent to + ``self != value and np.can_cast(self, value, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) ############################################################################## # @@ -6372,7 +6512,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', * 'S' - swap dtype from current to opposite endian * {'<', 'little'} - little endian * {'>', 'big'} - big endian - * '=' - native order + * {'=', 'native'} - native order * {'|', 'I'} - ignore (no change to byte order) Parameters @@ -6465,6 +6605,36 @@ add_newdoc('numpy.core.numerictypes', 'generic', add_newdoc('numpy.core.numerictypes', 'generic', refer_to_array_attribute('view')) +add_newdoc('numpy.core.numerictypes', 'number', ('__class_getitem__', + """ + __class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.number` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.number` type. + + Examples + -------- + >>> from typing import Any + >>> import numpy as np + + >>> np.signedinteger[Any] + numpy.signedinteger[typing.Any] + + Notes + ----- + This method is only available for python 3.9 and later. + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + + """)) ############################################################################## # diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py index 8773d6c96..94859a9d5 100644 --- a/numpy/core/_add_newdocs_scalars.py +++ b/numpy/core/_add_newdocs_scalars.py @@ -290,3 +290,22 @@ for float_name in ('half', 'single', 'double', 'longdouble'): >>> np.{float_name}(3.2).is_integer() False """)) + +for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): + # Add negative examples for signed cases by checking typecode + add_newdoc('numpy.core.numerictypes', int_name, ('bit_count', + f""" + {int_name}.bit_count() -> int + + Computes the number of 1-bits in the absolute value of the input. + Analogous to the builtin `int.bit_count` or ``popcount`` in C++. + + Examples + -------- + >>> np.{int_name}(127).bit_count() + 7""" + + (f""" + >>> np.{int_name}(-127).bit_count() + 7 + """ if dtype(int_name).char.islower() else ""))) diff --git a/numpy/core/_dtype.py b/numpy/core/_dtype.py index 4249071ff..c3a22b1c6 100644 --- a/numpy/core/_dtype.py +++ b/numpy/core/_dtype.py @@ -200,30 +200,37 @@ def _struct_dict_str(dtype, includealignedflag): # Build up a string to make the dictionary + if np.core.arrayprint._get_legacy_print_mode() <= 121: + colon = ":" + fieldsep = "," + else: + colon = ": " + fieldsep = ", " + # First, the names - ret = "{'names':[" - ret += ",".join(repr(name) for name in names) + ret = "{'names'%s[" % colon + ret += fieldsep.join(repr(name) for name in names) # Second, the formats - ret += "], 'formats':[" - ret += ",".join( + ret += "], 'formats'%s[" % colon + ret += fieldsep.join( _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes) # Third, the offsets - ret += "], 'offsets':[" - ret += ",".join("%d" % offset for offset in offsets) + ret += "], 'offsets'%s[" % colon + ret += fieldsep.join("%d" % offset for offset in offsets) # Fourth, the titles if any(title is not None for title in titles): - ret += "], 'titles':[" - ret += ",".join(repr(title) for title in titles) + ret += "], 'titles'%s[" % colon + ret += fieldsep.join(repr(title) for title in titles) # Fifth, the itemsize - ret += "], 'itemsize':%d" % dtype.itemsize + ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize) if (includealignedflag and dtype.isalignedstruct): # Finally, the aligned flag - ret += ", 'aligned':True}" + ret += ", 'aligned'%sTrue}" % colon else: ret += "}" diff --git a/numpy/core/machar.py b/numpy/core/_machar.py index 04dad4d77..ace19a429 100644 --- a/numpy/core/machar.py +++ b/numpy/core/_machar.py @@ -1,5 +1,5 @@ """ -Machine arithmetics - determine the parameters of the +Machine arithmetic - determine the parameters of the floating-point arithmetic system Author: Pearu Peterson, September 2003 @@ -13,6 +13,7 @@ from numpy.core.overrides import set_module # Need to speed this up...especially for longfloat +# Deprecated 2021-10-20, NumPy 1.22 @set_module('numpy') class MachAr: """ diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py index e475b94df..a239e2c87 100644 --- a/numpy/core/_methods.py +++ b/numpy/core/_methods.py @@ -221,8 +221,10 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, if isinstance(arrmean, mu.ndarray): arrmean = um.true_divide(arrmean, div, out=arrmean, casting='unsafe', subok=False) - else: + elif hasattr(arrmean, "dtype"): arrmean = arrmean.dtype.type(arrmean / rcount) + else: + arrmean = arrmean / rcount # Compute sum of squared deviations from mean # Note that x may not be inexact and that we need it to be an array, diff --git a/numpy/core/_ufunc_config.pyi b/numpy/core/_ufunc_config.pyi index 9c8cc8ab6..cd7129bcb 100644 --- a/numpy/core/_ufunc_config.pyi +++ b/numpy/core/_ufunc_config.pyi @@ -1,11 +1,10 @@ -from typing import Optional, Union, Callable, Any, Literal, Protocol, TypedDict +from typing import Optional, Union, Callable, Any, Literal, TypedDict + +from numpy import _SupportsWrite _ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] _ErrFunc = Callable[[str, int], Any] -class _SupportsWrite(Protocol): - def write(self, msg: str, /) -> Any: ... - class _ErrDict(TypedDict): divide: _ErrKind over: _ErrKind @@ -30,8 +29,8 @@ def geterr() -> _ErrDict: ... def setbufsize(size: int) -> int: ... def getbufsize() -> int: ... def seterrcall( - func: Union[None, _ErrFunc, _SupportsWrite] -) -> Union[None, _ErrFunc, _SupportsWrite]: ... -def geterrcall() -> Union[None, _ErrFunc, _SupportsWrite]: ... + func: Union[None, _ErrFunc, _SupportsWrite[str]] +) -> Union[None, _ErrFunc, _SupportsWrite[str]]: ... +def geterrcall() -> Union[None, _ErrFunc, _SupportsWrite[str]]: ... # See `numpy/__init__.pyi` for the `errstate` class diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 2a4bef669..d7e9bf795 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -24,6 +24,7 @@ __docformat__ = 'restructuredtext' import functools import numbers +import sys try: from _thread import get_ident except ImportError: @@ -56,12 +57,17 @@ _format_options = { 'infstr': 'inf', 'sign': '-', 'formatter': None, - 'legacy': False} + # Internally stored as an int to simplify comparisons; converted from/to + # str/False on the way in/out. + 'legacy': sys.maxsize} def _make_options_dict(precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=None, nanstr=None, infstr=None, sign=None, formatter=None, floatmode=None, legacy=None): - """ make a dictionary out of the non-None arguments, plus sanity checks """ + """ + Make a dictionary out of the non-None arguments, plus conversion of + *legacy* and sanity checks. + """ options = {k: v for k, v in locals().items() if v is not None} @@ -76,9 +82,18 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, if sign not in [None, '-', '+', ' ']: raise ValueError("sign option must be one of ' ', '+', or '-'") - if legacy not in [None, False, '1.13']: - warnings.warn("legacy printing option can currently only be '1.13' or " - "`False`", stacklevel=3) + if legacy == False: + options['legacy'] = sys.maxsize + elif legacy == '1.13': + options['legacy'] = 113 + elif legacy == '1.21': + options['legacy'] = 121 + elif legacy is None: + pass # OK, do nothing. + else: + warnings.warn( + "legacy printing option can currently only be '1.13', '1.21', or " + "`False`", stacklevel=3) if threshold is not None: # forbid the bad threshold arg suggested by stack overflow, gh-12351 @@ -186,11 +201,21 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, legacy : string or `False`, optional If set to the string `'1.13'` enables 1.13 legacy printing mode. This approximates numpy 1.13 print output by including a space in the sign - position of floats and different behavior for 0d arrays. If set to - `False`, disables legacy mode. Unrecognized strings will be ignored - with a warning for forward compatibility. + position of floats and different behavior for 0d arrays. This also + enables 1.21 legacy printing mode (described below). + + If set to the string `'1.21'` enables 1.21 legacy printing mode. This + approximates numpy 1.21 print output of complex structured dtypes + by not inserting spaces after commas that separate fields and after + colons. + + If set to `False`, disables legacy mode. + + Unrecognized strings will be ignored with a warning for forward + compatibility. .. versionadded:: 1.14.0 + .. versionchanged:: 1.22.0 See Also -------- @@ -257,11 +282,13 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, _format_options.update(opt) # set the C variable for legacy mode - if _format_options['legacy'] == '1.13': + if _format_options['legacy'] == 113: set_legacy_print_mode(113) # reset the sign option in legacy mode to avoid confusion _format_options['sign'] = '-' - elif _format_options['legacy'] is False: + elif _format_options['legacy'] == 121: + set_legacy_print_mode(121) + elif _format_options['legacy'] == sys.maxsize: set_legacy_print_mode(0) @@ -292,7 +319,16 @@ def get_printoptions(): set_printoptions, printoptions, set_string_function """ - return _format_options.copy() + opts = _format_options.copy() + opts['legacy'] = { + 113: '1.13', 121: '1.21', sys.maxsize: False, + }[opts['legacy']] + return opts + + +def _get_legacy_print_mode(): + """Return the legacy print mode as an int.""" + return _format_options['legacy'] @set_module('numpy') @@ -678,7 +714,7 @@ def array2string(a, max_line_width=None, precision=None, options = _format_options.copy() options.update(overrides) - if options['legacy'] == '1.13': + if options['legacy'] <= 113: if style is np._NoValue: style = repr @@ -690,7 +726,7 @@ def array2string(a, max_line_width=None, precision=None, " except in 1.13 'legacy' mode", DeprecationWarning, stacklevel=3) - if options['legacy'] != '1.13': + if options['legacy'] > 113: options['linewidth'] -= len(suffix) # treat as a null array if any of shape elements == 0 @@ -702,7 +738,7 @@ def array2string(a, max_line_width=None, precision=None, def _extendLine(s, line, word, line_width, next_line_prefix, legacy): needs_wrap = len(line) + len(word) > line_width - if legacy != '1.13': + if legacy > 113: # don't wrap lines if it won't help if len(line) <= len(next_line_prefix): needs_wrap = False @@ -719,7 +755,7 @@ def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy): Extends line with nicely formatted (possibly multi-line) string ``word``. """ words = word.splitlines() - if len(words) == 1 or legacy == '1.13': + if len(words) == 1 or legacy <= 113: return _extendLine(s, line, word, line_width, next_line_prefix, legacy) max_word_length = max(len(word) for word in words) @@ -765,7 +801,7 @@ def _formatArray(a, format_function, line_width, next_line_prefix, # when recursing, add a space to align with the [ added, and reduce the # length of the line by 1 next_hanging_indent = hanging_indent + ' ' - if legacy == '1.13': + if legacy <= 113: next_width = curr_width else: next_width = curr_width - len(']') @@ -785,7 +821,7 @@ def _formatArray(a, format_function, line_width, next_line_prefix, # last axis (rows) - wrap elements if they would not fit on one line if axes_left == 1: # the length up until the beginning of the separator / bracket - if legacy == '1.13': + if legacy <= 113: elem_width = curr_width - len(separator.rstrip()) else: elem_width = curr_width - max(len(separator.rstrip()), len(']')) @@ -800,7 +836,7 @@ def _formatArray(a, format_function, line_width, next_line_prefix, if show_summary: s, line = _extendLine( s, line, summary_insert, elem_width, hanging_indent, legacy) - if legacy == '1.13': + if legacy <= 113: line += ", " else: line += separator @@ -811,7 +847,7 @@ def _formatArray(a, format_function, line_width, next_line_prefix, s, line, word, elem_width, hanging_indent, legacy) line += separator - if legacy == '1.13': + if legacy <= 113: # width of the separator is not considered on 1.13 elem_width = curr_width word = recurser(index + (-1,), next_hanging_indent, next_width) @@ -830,7 +866,7 @@ def _formatArray(a, format_function, line_width, next_line_prefix, s += hanging_indent + nested + line_sep if show_summary: - if legacy == '1.13': + if legacy <= 113: # trailing space, fixed nbr of newlines, and fixed separator s += hanging_indent + summary_insert + ", \n" else: @@ -875,7 +911,7 @@ class FloatingFormat: sign = '+' if sign else '-' self._legacy = legacy - if self._legacy == '1.13': + if self._legacy <= 113: # when not 0d, legacy does not support '-' if data.shape != () and sign == '-': sign = ' ' @@ -919,7 +955,7 @@ class FloatingFormat: self.min_digits = None elif self.exp_format: trim, unique = '.', True - if self.floatmode == 'fixed' or self._legacy == '1.13': + if self.floatmode == 'fixed' or self._legacy <= 113: trim, unique = 'k', False strs = (dragon4_scientific(x, precision=self.precision, unique=unique, trim=trim, sign=self.sign == '+') @@ -934,7 +970,7 @@ class FloatingFormat: self.unique = unique # for back-compat with np 1.13, use 2 spaces & sign and full prec - if self._legacy == '1.13': + if self._legacy <= 113: self.pad_left = 3 else: # this should be only 1 or 2. Can be calculated from sign. @@ -951,7 +987,7 @@ class FloatingFormat: sign=self.sign == '+') for x in finite_vals) int_part, frac_part = zip(*(s.split('.') for s in strs)) - if self._legacy == '1.13': + if self._legacy <= 113: self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) else: self.pad_left = max(len(s) for s in int_part) @@ -966,7 +1002,7 @@ class FloatingFormat: self.trim = '.' self.min_digits = 0 - if self._legacy != '1.13': + if self._legacy > 113: # account for sign = ' ' by adding one to pad_left if self.sign == ' ' and not any(np.signbit(finite_vals)): self.pad_left += 1 @@ -1215,7 +1251,7 @@ class ComplexFloatingFormat: sign = '+' if sign else '-' floatmode_real = floatmode_imag = floatmode - if legacy == '1.13': + if legacy <= 113: floatmode_real = 'maxprec_equal' floatmode_imag = 'maxprec' @@ -1286,7 +1322,7 @@ class DatetimeFormat(_TimelikeFormat): super().__init__(x) def __call__(self, x): - if self.legacy == '1.13': + if self.legacy <= 113: return self._format_non_nat(x) return super().__call__(x) @@ -1390,7 +1426,7 @@ def dtype_is_implied(dtype): array([1, 2, 3], dtype=int8) """ dtype = np.dtype(dtype) - if _format_options['legacy'] == '1.13' and dtype.type == bool_: + if _format_options['legacy'] <= 113 and dtype.type == bool_: return False # not just void types can be structured, and names are not part of the repr @@ -1445,7 +1481,7 @@ def _array_repr_implementation( prefix = class_name + "(" suffix = ")" if skipdtype else "," - if (_format_options['legacy'] == '1.13' and + if (_format_options['legacy'] <= 113 and arr.shape == () and not arr.dtype.names): lst = repr(arr.item()) elif arr.size > 0 or arr.shape == (0,): @@ -1466,7 +1502,7 @@ def _array_repr_implementation( # Note: This line gives the correct result even when rfind returns -1. last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) spacer = " " - if _format_options['legacy'] == '1.13': + if _format_options['legacy'] <= 113: if issubclass(arr.dtype.type, flexible): spacer = '\n' + ' '*len(class_name + "(") elif last_line_len + len(dtype_str) + 1 > max_line_width: @@ -1540,7 +1576,7 @@ def _array_str_implementation( a, max_line_width=None, precision=None, suppress_small=None, array2string=array2string): """Internal version of array_str() that allows overriding array2string.""" - if (_format_options['legacy'] == '1.13' and + if (_format_options['legacy'] <= 113 and a.shape == () and not a.dtype.names): return str(a.item()) diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi index df22efed6..0d338206f 100644 --- a/numpy/core/arrayprint.pyi +++ b/numpy/core/arrayprint.pyi @@ -1,8 +1,8 @@ from types import TracebackType from typing import Any, Optional, Callable, Union, Type, Literal, TypedDict, SupportsIndex -# Using a private class is by no means ideal, but it is simply a consquence -# of a `contextlib.context` returning an instance of aformentioned class +# Using a private class is by no means ideal, but it is simply a consequence +# of a `contextlib.context` returning an instance of aforementioned class from contextlib import _GeneratorContextManager from numpy import ( @@ -53,7 +53,7 @@ class _FormatOptions(TypedDict): formatter: Optional[_FormatDict] sign: Literal["-", "+", " "] floatmode: _FloatMode - legacy: Literal[False, "1.13"] + legacy: Literal[False, "1.13", "1.21"] def set_printoptions( precision: Optional[SupportsIndex] = ..., @@ -67,7 +67,7 @@ def set_printoptions( sign: Optional[Literal["-", "+", " "]] = ..., floatmode: Optional[_FloatMode] = ..., *, - legacy: Optional[Literal[False, "1.13"]] = ... + legacy: Optional[Literal[False, "1.13", "1.21"]] = ... ) -> None: ... def get_printoptions() -> _FormatOptions: ... def array2string( @@ -87,7 +87,7 @@ def array2string( sign: Optional[Literal["-", "+", " "]] = ..., floatmode: Optional[_FloatMode] = ..., suffix: str = ..., - legacy: Optional[Literal[False, "1.13"]] = ..., + legacy: Optional[Literal[False, "1.13", "1.21"]] = ..., ) -> str: ... def format_float_scientific( x: _FloatLike_co, @@ -137,5 +137,5 @@ def printoptions( sign: Optional[Literal["-", "+", " "]] = ..., floatmode: Optional[_FloatMode] = ..., *, - legacy: Optional[Literal[False, "1.13"]] = ... + legacy: Optional[Literal[False, "1.13", "1.21"]] = ... ) -> _GeneratorContextManager[_FormatOptions]: ... diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt index a02c7153a..38ee4dac2 100644 --- a/numpy/core/code_generators/cversions.txt +++ b/numpy/core/code_generators/cversions.txt @@ -56,5 +56,7 @@ # DType related API additions. # A new field was added to the end of PyArrayObject_fields. # Version 14 (NumPy 1.21) No change. -# Version 14 (NumPy 1.22) No change. 0x0000000e = 17a0f366e55ec05e5c5c149123478452 + +# Version 15 (NumPy 1.22) Configurable memory allocations +0x0000000f = 0c420aed67010594eb81f23ddfb02a88 diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 4891e8f23..3a27a34cd 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -359,7 +359,7 @@ defdict = { docstrings.get('numpy.core.umath.fmod'), None, TD(ints), - TD(flts, f='fmod', astype={'e':'f'}), + TD(flts, f='fmod', astype={'e': 'f'}), TD(P, f='fmod'), ), 'square': @@ -390,7 +390,7 @@ defdict = { docstrings.get('numpy.core.umath.power'), None, TD(ints), - TD(inexact, f='pow', astype={'e':'f'}), + TD(inexact, f='pow', astype={'e': 'f'}), TD(O, f='npy_ObjectPower'), ), 'float_power': @@ -551,13 +551,13 @@ defdict = { Ufunc(2, 1, MinusInfinity, docstrings.get('numpy.core.umath.logaddexp'), None, - TD(flts, f="logaddexp", astype={'e':'f'}) + TD(flts, f="logaddexp", astype={'e': 'f'}) ), 'logaddexp2': Ufunc(2, 1, MinusInfinity, docstrings.get('numpy.core.umath.logaddexp2'), None, - TD(flts, f="logaddexp2", astype={'e':'f'}) + TD(flts, f="logaddexp2", astype={'e': 'f'}) ), 'bitwise_and': Ufunc(2, 1, AllOnes, @@ -605,80 +605,93 @@ defdict = { Ufunc(2, 1, None, docstrings.get('numpy.core.umath.heaviside'), None, - TD(flts, f='heaviside', astype={'e':'f'}), + TD(flts, f='heaviside', astype={'e': 'f'}), ), 'degrees': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.degrees'), None, - TD(fltsP, f='degrees', astype={'e':'f'}), + TD(fltsP, f='degrees', astype={'e': 'f'}), ), 'rad2deg': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.rad2deg'), None, - TD(fltsP, f='rad2deg', astype={'e':'f'}), + TD(fltsP, f='rad2deg', astype={'e': 'f'}), ), 'radians': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.radians'), None, - TD(fltsP, f='radians', astype={'e':'f'}), + TD(fltsP, f='radians', astype={'e': 'f'}), ), 'deg2rad': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.deg2rad'), None, - TD(fltsP, f='deg2rad', astype={'e':'f'}), + TD(fltsP, f='deg2rad', astype={'e': 'f'}), ), 'arccos': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arccos'), None, - TD(inexact, f='acos', astype={'e':'f'}), + TD('e', f='acos', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='acos', astype={'e': 'f'}), TD(P, f='arccos'), ), 'arccosh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arccosh'), None, - TD(inexact, f='acosh', astype={'e':'f'}), + TD('e', f='acosh', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='acosh', astype={'e': 'f'}), TD(P, f='arccosh'), ), 'arcsin': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arcsin'), None, - TD(inexact, f='asin', astype={'e':'f'}), + TD('e', f='asin', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='asin', astype={'e': 'f'}), TD(P, f='arcsin'), ), 'arcsinh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arcsinh'), None, - TD(inexact, f='asinh', astype={'e':'f'}), + TD('e', f='asinh', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='asinh', astype={'e': 'f'}), TD(P, f='arcsinh'), ), 'arctan': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arctan'), None, - TD(inexact, f='atan', astype={'e':'f'}), + TD('e', f='atan', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='atan', astype={'e': 'f'}), TD(P, f='arctan'), ), 'arctanh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arctanh'), None, - TD(inexact, f='atanh', astype={'e':'f'}), + TD('e', f='atanh', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='atanh', astype={'e': 'f'}), TD(P, f='arctanh'), ), 'cos': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.cos'), None, - TD('e', f='cos', astype={'e':'f'}), + TD('e', f='cos', astype={'e': 'f'}), TD('f', dispatch=[('loops_trigonometric', 'f')]), + TD('d', dispatch=[('loops_umath_fp', 'd')]), TD('fdg' + cmplx, f='cos'), TD(P, f='cos'), ), @@ -686,8 +699,9 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.sin'), None, - TD('e', f='sin', astype={'e':'f'}), + TD('e', f='sin', astype={'e': 'f'}), TD('f', dispatch=[('loops_trigonometric', 'f')]), + TD('d', dispatch=[('loops_umath_fp', 'd')]), TD('fdg' + cmplx, f='sin'), TD(P, f='sin'), ), @@ -695,35 +709,43 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.tan'), None, - TD(inexact, f='tan', astype={'e':'f'}), + TD('e', f='tan', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='tan', astype={'e': 'f'}), TD(P, f='tan'), ), 'cosh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.cosh'), None, - TD(inexact, f='cosh', astype={'e':'f'}), + TD('e', f='cosh', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='cosh', astype={'e': 'f'}), TD(P, f='cosh'), ), 'sinh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.sinh'), None, - TD(inexact, f='sinh', astype={'e':'f'}), + TD('e', f='sinh', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='sinh', astype={'e': 'f'}), TD(P, f='sinh'), ), 'tanh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.tanh'), None, - TD(inexact, f='tanh', astype={'e':'f'}), + TD('e', f='tanh', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='tanh', astype={'e': 'f'}), TD(P, f='tanh'), ), 'exp': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.exp'), None, - TD('e', f='exp', astype={'e':'f'}), + TD('e', f='exp', astype={'e': 'f'}), TD('fd', dispatch=[('loops_exponent_log', 'fd')]), TD('fdg' + cmplx, f='exp'), TD(P, f='exp'), @@ -732,21 +754,25 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.exp2'), None, - TD(inexact, f='exp2', astype={'e':'f'}), + TD('e', f='exp2', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='exp2', astype={'e': 'f'}), TD(P, f='exp2'), ), 'expm1': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.expm1'), None, - TD(inexact, f='expm1', astype={'e':'f'}), + TD('e', f='expm1', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='expm1', astype={'e': 'f'}), TD(P, f='expm1'), ), 'log': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.log'), None, - TD('e', f='log', astype={'e':'f'}), + TD('e', f='log', astype={'e': 'f'}), TD('fd', dispatch=[('loops_exponent_log', 'fd')]), TD('fdg' + cmplx, f='log'), TD(P, f='log'), @@ -755,28 +781,34 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.log2'), None, - TD(inexact, f='log2', astype={'e':'f'}), + TD('e', f='log2', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='log2', astype={'e': 'f'}), TD(P, f='log2'), ), 'log10': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.log10'), None, - TD(inexact, f='log10', astype={'e':'f'}), + TD('e', f='log10', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='log10', astype={'e': 'f'}), TD(P, f='log10'), ), 'log1p': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.log1p'), None, - TD(inexact, f='log1p', astype={'e':'f'}), + TD('e', f='log1p', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(inexact, f='log1p', astype={'e': 'f'}), TD(P, f='log1p'), ), 'sqrt': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.sqrt'), None, - TD('e', f='sqrt', astype={'e':'f'}), + TD('e', f='sqrt', astype={'e': 'f'}), TD(inexactvec, dispatch=[('loops_unary_fp', 'fd')]), TD('fdg' + cmplx, f='sqrt'), TD(P, f='sqrt'), @@ -785,14 +817,16 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.cbrt'), None, - TD(flts, f='cbrt', astype={'e':'f'}), + TD('e', f='cbrt', astype={'e': 'f'}), + TD('fd', dispatch=[('loops_umath_fp', 'fd')]), + TD(flts, f='cbrt', astype={'e': 'f'}), TD(P, f='cbrt'), ), 'ceil': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.ceil'), None, - TD('e', f='ceil', astype={'e':'f'}), + TD('e', f='ceil', astype={'e': 'f'}), TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]), TD('fdg', f='ceil'), TD(O, f='npy_ObjectCeil'), @@ -801,7 +835,7 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.trunc'), None, - TD('e', f='trunc', astype={'e':'f'}), + TD('e', f='trunc', astype={'e': 'f'}), TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]), TD('fdg', f='trunc'), TD(O, f='npy_ObjectTrunc'), @@ -810,14 +844,14 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.fabs'), None, - TD(flts, f='fabs', astype={'e':'f'}), + TD(flts, f='fabs', astype={'e': 'f'}), TD(P, f='fabs'), ), 'floor': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.floor'), None, - TD('e', f='floor', astype={'e':'f'}), + TD('e', f='floor', astype={'e': 'f'}), TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]), TD('fdg', f='floor'), TD(O, f='npy_ObjectFloor'), @@ -826,7 +860,7 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.rint'), None, - TD('e', f='rint', astype={'e':'f'}), + TD('e', f='rint', astype={'e': 'f'}), TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]), TD('fdg' + cmplx, f='rint'), TD(P, f='rint'), @@ -835,7 +869,7 @@ defdict = { Ufunc(2, 1, None, docstrings.get('numpy.core.umath.arctan2'), None, - TD(flts, f='atan2', astype={'e':'f'}), + TD(flts, f='atan2', astype={'e': 'f'}), TD(P, f='arctan2'), ), 'remainder': @@ -858,7 +892,7 @@ defdict = { Ufunc(2, 1, Zero, docstrings.get('numpy.core.umath.hypot'), None, - TD(flts, f='hypot', astype={'e':'f'}), + TD(flts, f='hypot', astype={'e': 'f'}), TD(P, f='hypot'), ), 'isnan': diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py index fbd323368..3813c6ad7 100644 --- a/numpy/core/code_generators/numpy_api.py +++ b/numpy/core/code_generators/numpy_api.py @@ -76,9 +76,9 @@ multiarray_types_api = { # End 1.6 API } -#define NPY_NUMUSERTYPES (*(int *)PyArray_API[6]) -#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[7]) -#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[8]) +# define NPY_NUMUSERTYPES (*(int *)PyArray_API[6]) +# define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[7]) +# define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[8]) multiarray_funcs_api = { 'PyArray_GetNDArrayCVersion': (0,), @@ -350,6 +350,9 @@ multiarray_funcs_api = { 'PyArray_ResolveWritebackIfCopy': (302,), 'PyArray_SetWritebackIfCopyBase': (303,), # End 1.14 API + 'PyDataMem_SetHandler': (304,), + 'PyDataMem_GetHandler': (305,), + # End 1.21 API } ufunc_types_api = { diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index f19946be4..8d9316f2c 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -201,7 +201,8 @@ add_newdoc('numpy.core.umath', 'arccos', References ---------- M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/ + 10th printing, 1964, pp. 79. + https://personal.math.ubc.ca/~cbm/aands/page_79.htm Examples -------- @@ -258,7 +259,8 @@ add_newdoc('numpy.core.umath', 'arccosh', References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ + 10th printing, 1964, pp. 86. + https://personal.math.ubc.ca/~cbm/aands/page_86.htm .. [2] Wikipedia, "Inverse hyperbolic function", https://en.wikipedia.org/wiki/Arccosh @@ -312,7 +314,7 @@ add_newdoc('numpy.core.umath', 'arcsin', ---------- Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 79ff. - http://www.math.sfu.ca/~cbm/aands/ + https://personal.math.ubc.ca/~cbm/aands/page_79.htm Examples -------- @@ -360,7 +362,8 @@ add_newdoc('numpy.core.umath', 'arcsinh', References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ + 10th printing, 1964, pp. 86. + https://personal.math.ubc.ca/~cbm/aands/page_86.htm .. [2] Wikipedia, "Inverse hyperbolic function", https://en.wikipedia.org/wiki/Arcsinh @@ -415,7 +418,7 @@ add_newdoc('numpy.core.umath', 'arctan', ---------- Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 79. - http://www.math.sfu.ca/~cbm/aands/ + https://personal.math.ubc.ca/~cbm/aands/page_79.htm Examples -------- @@ -560,7 +563,8 @@ add_newdoc('numpy.core.umath', 'arctanh', References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ + 10th printing, 1964, pp. 86. + https://personal.math.ubc.ca/~cbm/aands/page_86.htm .. [2] Wikipedia, "Inverse hyperbolic function", https://en.wikipedia.org/wiki/Arctanh @@ -664,7 +668,7 @@ add_newdoc('numpy.core.umath', 'bitwise_or', Examples -------- - The number 13 has the binaray representation ``00001101``. Likewise, + The number 13 has the binary representation ``00001101``. Likewise, 16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is then ``000111011``, or 29: @@ -1087,9 +1091,7 @@ add_newdoc('numpy.core.umath', 'divide', Behavior on division by zero can be changed using ``seterr``. - In Python 2, when both ``x1`` and ``x2`` are of an integer type, - ``divide`` will behave like ``floor_divide``. In Python 3, it behaves - like ``true_divide``. + Behaves like ``true_divide``. Examples -------- @@ -1102,27 +1104,6 @@ add_newdoc('numpy.core.umath', 'divide', [ Inf, 4. , 2.5], [ Inf, 7. , 4. ]]) - Note the behavior with integer types (Python 2 only): - - >>> np.divide(2, 4) - 0 - >>> np.divide(2, 4.) - 0.5 - - Division by zero always yields zero in integer arithmetic (again, - Python 2 only), and does not raise an exception or a warning: - - >>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int)) - array([0, 0]) - - Division by zero can, however, be caught using ``seterr``: - - >>> old_err_state = np.seterr(divide='raise') - >>> np.divide(1, 0) - Traceback (most recent call last): - File "<stdin>", line 1, in <module> - FloatingPointError: divide by zero encountered in divide - >>> ignored_states = np.seterr(**old_err_state) >>> np.divide(1, 0) 0 @@ -1222,7 +1203,7 @@ add_newdoc('numpy.core.umath', 'exp', https://en.wikipedia.org/wiki/Exponential_function .. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69, - http://www.math.sfu.ca/~cbm/aands/page_69.htm + https://personal.math.ubc.ca/~cbm/aands/page_69.htm Examples -------- @@ -1439,7 +1420,7 @@ add_newdoc('numpy.core.umath', 'floor_divide', add_newdoc('numpy.core.umath', 'fmod', """ - Return the element-wise remainder of division. + Returns the element-wise remainder of division. This is the NumPy implementation of the C library function fmod, the remainder has the same sign as the dividend `x1`. It is equivalent to @@ -2052,7 +2033,8 @@ add_newdoc('numpy.core.umath', 'log', References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ + 10th printing, 1964, pp. 67. + https://personal.math.ubc.ca/~cbm/aands/page_67.htm .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm Examples @@ -2101,7 +2083,8 @@ add_newdoc('numpy.core.umath', 'log10', References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ + 10th printing, 1964, pp. 67. + https://personal.math.ubc.ca/~cbm/aands/page_67.htm .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm Examples @@ -2289,7 +2272,8 @@ add_newdoc('numpy.core.umath', 'log1p', References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ + 10th printing, 1964, pp. 67. + https://personal.math.ubc.ca/~cbm/aands/page_67.htm .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm Examples @@ -3081,8 +3065,14 @@ add_newdoc('numpy.core.umath', 'power', First array elements raised to powers from second array, element-wise. Raise each base in `x1` to the positionally-corresponding power in - `x2`. `x1` and `x2` must be broadcastable to the same shape. Note that an - integer type raised to a negative integer power will raise a ValueError. + `x2`. `x1` and `x2` must be broadcastable to the same shape. + + An integer type raised to a negative integer power will raise a + ``ValueError``. + + Negative values raised to a non-integral value will return ``nan``. + To get complex results, cast the input to complex, or specify the + ``dtype`` to be ``complex`` (see the example below). Parameters ---------- @@ -3137,6 +3127,21 @@ add_newdoc('numpy.core.umath', 'power', >>> x1 ** x2 array([ 0, 1, 8, 27, 16, 5]) + Negative values raised to a non-integral value will result in ``nan`` + (and a warning will be generated). + + >>> x3 = np.array([-1.0, -4.0]) + >>> with np.errstate(invalid='ignore'): + ... p = np.power(x3, 1.5) + ... + >>> p + array([nan, nan]) + + To get complex results, give the argument ``dtype=complex``. + + >>> np.power(x3, 1.5, dtype=complex) + array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) + """) add_newdoc('numpy.core.umath', 'float_power', @@ -3150,6 +3155,10 @@ add_newdoc('numpy.core.umath', 'float_power', inexact. The intent is that the function will return a usable result for negative powers and seldom overflow for positive powers. + Negative values raised to a non-integral value will return ``nan``. + To get complex results, cast the input to complex, or specify the + ``dtype`` to be ``complex`` (see the example below). + .. versionadded:: 1.12.0 Parameters @@ -3197,6 +3206,21 @@ add_newdoc('numpy.core.umath', 'float_power', array([[ 0., 1., 8., 27., 16., 5.], [ 0., 1., 8., 27., 16., 5.]]) + Negative values raised to a non-integral value will result in ``nan`` + (and a warning will be generated). + + >>> x3 = np.array([-1, -4]) + >>> with np.errstate(invalid='ignore'): + ... p = np.float_power(x3, 1.5) + ... + >>> p + array([nan, nan]) + + To get complex results, give the argument ``dtype=complex``. + + >>> np.float_power(x3, 1.5, dtype=complex) + array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) + """) add_newdoc('numpy.core.umath', 'radians', @@ -3308,7 +3332,7 @@ add_newdoc('numpy.core.umath', 'reciprocal', add_newdoc('numpy.core.umath', 'remainder', """ - Return element-wise remainder of division. + Returns the element-wise remainder of division. Computes the remainder complementary to the `floor_divide` function. It is equivalent to the Python modulus operator``x1 % x2`` and has the same sign @@ -4002,7 +4026,7 @@ add_newdoc('numpy.core.umath', 'tanh', ---------- .. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. New York, NY: Dover, 1972, pg. 83. - http://www.math.sfu.ca/~cbm/aands/ + https://personal.math.ubc.ca/~cbm/aands/page_83.htm .. [2] Wikipedia, "Hyperbolic function", https://en.wikipedia.org/wiki/Hyperbolic_function @@ -4031,9 +4055,8 @@ add_newdoc('numpy.core.umath', 'true_divide', """ Returns a true division of the inputs, element-wise. - Instead of the Python traditional 'floor division', this returns a true - division. True division adjusts the output type to present the best - answer, regardless of input types. + Unlike 'floor division', true division adjusts the output type + to present the best answer, regardless of input types. Parameters ---------- diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py index e264fa210..3521e778e 100644 --- a/numpy/core/defchararray.py +++ b/numpy/core/defchararray.py @@ -2609,6 +2609,7 @@ class chararray(ndarray): return isdecimal(self) +@set_module("numpy.char") def array(obj, itemsize=None, copy=True, unicode=None, order=None): """ Create a `chararray`. @@ -2742,6 +2743,7 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None): return val.view(chararray) +@set_module("numpy.char") def asarray(obj, itemsize=None, unicode=None, order=None): """ Convert the input to a `chararray`, copying the data only if diff --git a/numpy/core/defchararray.pyi b/numpy/core/defchararray.pyi new file mode 100644 index 000000000..28d247b05 --- /dev/null +++ b/numpy/core/defchararray.pyi @@ -0,0 +1,422 @@ +from typing import ( + Literal as L, + overload, + TypeVar, + Any, + List, +) + +from numpy import ( + chararray as chararray, + dtype, + str_, + bytes_, + int_, + bool_, + object_, + _OrderKACF, +) + +from numpy.typing import ( + NDArray, + _ArrayLikeStr_co as U_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeBool_co as b_co, +) + +from numpy.core.multiarray import compare_chararrays as compare_chararrays + +_SCT = TypeVar("_SCT", str_, bytes_) +_CharArray = chararray[Any, dtype[_SCT]] + +__all__: List[str] + +# Comparison +@overload +def equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +@overload +def not_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def not_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +@overload +def greater_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def greater_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +@overload +def less_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def less_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +@overload +def greater(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def greater(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +@overload +def less(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def less(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +# String operations +@overload +def add(x1: U_co, x2: U_co) -> NDArray[str_]: ... +@overload +def add(x1: S_co, x2: S_co) -> NDArray[bytes_]: ... + +@overload +def multiply(a: U_co, i: i_co) -> NDArray[str_]: ... +@overload +def multiply(a: S_co, i: i_co) -> NDArray[bytes_]: ... + +@overload +def mod(a: U_co, value: Any) -> NDArray[str_]: ... +@overload +def mod(a: S_co, value: Any) -> NDArray[bytes_]: ... + +@overload +def capitalize(a: U_co) -> NDArray[str_]: ... +@overload +def capitalize(a: S_co) -> NDArray[bytes_]: ... + +@overload +def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +@overload +def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... + +def decode( + a: S_co, + encoding: None | str = ..., + errors: None | str = ..., +) -> NDArray[str_]: ... + +def encode( + a: U_co, + encoding: None | str = ..., + errors: None | str = ..., +) -> NDArray[bytes_]: ... + +@overload +def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ... +@overload +def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ... + +@overload +def join(sep: U_co, seq: U_co) -> NDArray[str_]: ... +@overload +def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ... + +@overload +def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +@overload +def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... + +@overload +def lower(a: U_co) -> NDArray[str_]: ... +@overload +def lower(a: S_co) -> NDArray[bytes_]: ... + +@overload +def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +@overload +def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... + +@overload +def partition(a: U_co, sep: U_co) -> NDArray[str_]: ... +@overload +def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... + +@overload +def replace( + a: U_co, + old: U_co, + new: U_co, + count: None | i_co = ..., +) -> NDArray[str_]: ... +@overload +def replace( + a: S_co, + old: S_co, + new: S_co, + count: None | i_co = ..., +) -> NDArray[bytes_]: ... + +@overload +def rjust( + a: U_co, + width: i_co, + fillchar: U_co = ..., +) -> NDArray[str_]: ... +@overload +def rjust( + a: S_co, + width: i_co, + fillchar: S_co = ..., +) -> NDArray[bytes_]: ... + +@overload +def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ... +@overload +def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... + +@overload +def rsplit( + a: U_co, + sep: None | U_co = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... +@overload +def rsplit( + a: S_co, + sep: None | S_co = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... + +@overload +def rstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +@overload +def rstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... + +@overload +def split( + a: U_co, + sep: None | U_co = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... +@overload +def split( + a: S_co, + sep: None | S_co = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... + +@overload +def splitlines(a: U_co, keepends: None | b_co = ...) -> NDArray[object_]: ... +@overload +def splitlines(a: S_co, keepends: None | b_co = ...) -> NDArray[object_]: ... + +@overload +def strip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +@overload +def strip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... + +@overload +def swapcase(a: U_co) -> NDArray[str_]: ... +@overload +def swapcase(a: S_co) -> NDArray[bytes_]: ... + +@overload +def title(a: U_co) -> NDArray[str_]: ... +@overload +def title(a: S_co) -> NDArray[bytes_]: ... + +@overload +def translate( + a: U_co, + table: U_co, + deletechars: None | U_co = ..., +) -> NDArray[str_]: ... +@overload +def translate( + a: S_co, + table: S_co, + deletechars: None | S_co = ..., +) -> NDArray[bytes_]: ... + +@overload +def upper(a: U_co) -> NDArray[str_]: ... +@overload +def upper(a: S_co) -> NDArray[bytes_]: ... + +@overload +def zfill(a: U_co, width: i_co) -> NDArray[str_]: ... +@overload +def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ... + +# String information +@overload +def count( + a: U_co, + sub: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... +@overload +def count( + a: S_co, + sub: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... + +@overload +def endswith( + a: U_co, + suffix: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[bool_]: ... +@overload +def endswith( + a: S_co, + suffix: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[bool_]: ... + +@overload +def find( + a: U_co, + sub: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... +@overload +def find( + a: S_co, + sub: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... + +@overload +def index( + a: U_co, + sub: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... +@overload +def index( + a: S_co, + sub: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... + +def isalpha(a: U_co | S_co) -> NDArray[bool_]: ... +def isalnum(a: U_co | S_co) -> NDArray[bool_]: ... +def isdecimal(a: U_co | S_co) -> NDArray[bool_]: ... +def isdigit(a: U_co | S_co) -> NDArray[bool_]: ... +def islower(a: U_co | S_co) -> NDArray[bool_]: ... +def isnumeric(a: U_co | S_co) -> NDArray[bool_]: ... +def isspace(a: U_co | S_co) -> NDArray[bool_]: ... +def istitle(a: U_co | S_co) -> NDArray[bool_]: ... +def isupper(a: U_co | S_co) -> NDArray[bool_]: ... + +@overload +def rfind( + a: U_co, + sub: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... +@overload +def rfind( + a: S_co, + sub: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... + +@overload +def rindex( + a: U_co, + sub: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... +@overload +def rindex( + a: S_co, + sub: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... + +@overload +def startswith( + a: U_co, + prefix: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[bool_]: ... +@overload +def startswith( + a: S_co, + prefix: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[bool_]: ... + +def str_len(A: U_co | S_co) -> NDArray[int_]: ... + +# Overload 1 and 2: str- or bytes-based array-likes +# overload 3: arbitrary object with unicode=False (-> bytes_) +# overload 4: arbitrary object with unicode=True (-> str_) +@overload +def array( + obj: U_co, + itemsize: None | int = ..., + copy: bool = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def array( + obj: S_co, + itemsize: None | int = ..., + copy: bool = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: None | int = ..., + copy: bool = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: None | int = ..., + copy: bool = ..., + unicode: L[True] = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... + +@overload +def asarray( + obj: U_co, + itemsize: None | int = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: S_co, + itemsize: None | int = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: None | int = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: None | int = ..., + unicode: L[True] = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py index 18157641a..c78d3db23 100644 --- a/numpy/core/einsumfunc.py +++ b/numpy/core/einsumfunc.py @@ -987,7 +987,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): - # Arguably we dispatch on more arguments that we really should; see note in + # Arguably we dispatch on more arguments than we really should; see note in # _einsum_path_dispatcher for why. yield from operands yield out diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi index 52025d502..aabb04c47 100644 --- a/numpy/core/einsumfunc.pyi +++ b/numpy/core/einsumfunc.pyi @@ -41,7 +41,7 @@ __all__: List[str] # TODO: Properly handle the `casting`-based combinatorics # TODO: We need to evaluate the content `__subscripts` in order # to identify whether or an array or scalar is returned. At a cursory -# glance this seems like something that can quite easilly be done with +# glance this seems like something that can quite easily be done with # a mypy plugin. # Something like `is_scalar = bool(__subscripts.partition("->")[-1])` @overload diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 5ecb1e666..3242124ac 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -689,6 +689,9 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): it. The order of all elements in the partitions is undefined. If provided with a sequence of k-th it will partition all elements indexed by k-th of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. @@ -781,6 +784,9 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): elements in the partitions is undefined. If provided with a sequence of k-th it will partition all of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. @@ -1138,6 +1144,8 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. + .. versionadded:: 1.22.0 + Returns ------- index_array : ndarray of ints @@ -1232,6 +1240,8 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. + .. versionadded:: 1.22.0 + Returns ------- index_array : ndarray of ints diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi index c35629aa7..68d3b3a98 100644 --- a/numpy/core/function_base.pyi +++ b/numpy/core/function_base.pyi @@ -1,4 +1,4 @@ -from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal +from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal, List from numpy import ndarray from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co @@ -8,6 +8,9 @@ _ArrayLikeNested = Sequence[Sequence[Any]] _ArrayLikeNumber = Union[ _NumberLike_co, Sequence[_NumberLike_co], ndarray, _SupportsArray, _ArrayLikeNested ] + +__all__: List[str] + @overload def linspace( start: _ArrayLikeNumber, @@ -47,3 +50,11 @@ def geomspace( dtype: DTypeLike = ..., axis: SupportsIndex = ..., ) -> ndarray: ... + +# Re-exported to `np.lib.function_base` +def add_newdoc( + place: str, + obj: str, + doc: str | Tuple[str, str] | List[Tuple[str, str]], + warn_on_python: bool = ..., +) -> None: ... diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py index 0f7031bac..ab4a4d2be 100644 --- a/numpy/core/getlimits.py +++ b/numpy/core/getlimits.py @@ -5,13 +5,12 @@ __all__ = ['finfo', 'iinfo'] import warnings -from .machar import MachAr +from ._machar import MachAr from .overrides import set_module from . import numeric from . import numerictypes as ntypes from .numeric import array, inf, NaN from .umath import log10, exp2, nextafter, isnan -from . import umath def _fr0(a): @@ -386,6 +385,8 @@ class finfo: machar : MachAr The object which calculated these parameters and holds more detailed information. + + .. deprecated:: 1.22 machep : int The exponent that yields `eps`. max : floating point number of the appropriate type @@ -502,7 +503,7 @@ class finfo: self.eps = machar.eps.flat[0] self.nexp = machar.iexp self.nmant = machar.it - self.machar = machar + self._machar = machar self._str_tiny = machar._str_xmin.strip() self._str_max = machar._str_xmax.strip() self._str_epsneg = machar._str_epsneg.strip() @@ -552,11 +553,11 @@ class finfo: """ # This check is necessary because the value for smallest_normal is # platform dependent for longdouble types. - if isnan(self.machar.smallest_normal.flat[0]): + if isnan(self._machar.smallest_normal.flat[0]): warnings.warn( 'The value of smallest normal is undefined for double double', UserWarning, stacklevel=2) - return self.machar.smallest_normal.flat[0] + return self._machar.smallest_normal.flat[0] @property def tiny(self): @@ -575,6 +576,20 @@ class finfo: """ return self.smallest_normal + @property + def machar(self): + """The object which calculated these parameters and holds more + detailed information. + + .. deprecated:: 1.22 + """ + # Deprecated 2021-10-27, NumPy 1.22 + warnings.warn( + "`finfo.machar` is deprecated (NumPy 1.22)", + DeprecationWarning, stacklevel=2, + ) + return self._machar + @set_module('numpy') class iinfo: diff --git a/numpy/core/getlimits.pyi b/numpy/core/getlimits.pyi index ca22e18f7..66d062995 100644 --- a/numpy/core/getlimits.pyi +++ b/numpy/core/getlimits.pyi @@ -1,58 +1,8 @@ -from typing import Any, Generic, List, Type, TypeVar +from typing import List from numpy import ( finfo as finfo, iinfo as iinfo, - floating, - signedinteger, ) -from numpy.typing import NBitBase, NDArray - -_NBit = TypeVar("_NBit", bound=NBitBase) - __all__: List[str] - -class MachArLike(Generic[_NBit]): - def __init__( - self, - ftype: Type[floating[_NBit]], - *, - eps: floating[Any], - epsneg: floating[Any], - huge: floating[Any], - tiny: floating[Any], - ibeta: int, - smallest_subnormal: None | floating[Any] = ..., - # Expand `**kwargs` into keyword-only arguments - machep: int, - negep: int, - minexp: int, - maxexp: int, - it: int, - iexp: int, - irnd: int, - ngrd: int, - ) -> None: ... - @property - def smallest_subnormal(self) -> NDArray[floating[_NBit]]: ... - eps: NDArray[floating[_NBit]] - epsilon: NDArray[floating[_NBit]] - epsneg: NDArray[floating[_NBit]] - huge: NDArray[floating[_NBit]] - ibeta: signedinteger[_NBit] - iexp: int - irnd: int - it: int - machep: int - maxexp: int - minexp: int - negep: int - ngrd: int - precision: int - resolution: NDArray[floating[_NBit]] - smallest_normal: NDArray[floating[_NBit]] - tiny: NDArray[floating[_NBit]] - title: str - xmax: NDArray[floating[_NBit]] - xmin: NDArray[floating[_NBit]] diff --git a/numpy/core/include/numpy/.doxyfile b/numpy/core/include/numpy/.doxyfile new file mode 100644 index 000000000..ed2aefff7 --- /dev/null +++ b/numpy/core/include/numpy/.doxyfile @@ -0,0 +1,2 @@ +INCLUDE_PATH += @CUR_DIR +PREDEFINED += NPY_INTERNAL_BUILD diff --git a/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/numpy/core/include/numpy/_neighborhood_iterator_imp.h index e8860cbc7..07e2363d0 100644 --- a/numpy/core/include/numpy/_neighborhood_iterator_imp.h +++ b/numpy/core/include/numpy/_neighborhood_iterator_imp.h @@ -1,4 +1,4 @@ -#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP +#ifndef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ #error You should not include this header directly #endif /* diff --git a/numpy/core/include/numpy/arrayobject.h b/numpy/core/include/numpy/arrayobject.h index 4f46d6b1a..da47bb096 100644 --- a/numpy/core/include/numpy/arrayobject.h +++ b/numpy/core/include/numpy/arrayobject.h @@ -1,4 +1,5 @@ -#ifndef Py_ARRAYOBJECT_H +#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ #define Py_ARRAYOBJECT_H #include "ndarrayobject.h" @@ -8,4 +9,4 @@ #include "noprefix.h" #endif -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ */ diff --git a/numpy/core/include/numpy/arrayscalars.h b/numpy/core/include/numpy/arrayscalars.h index 14a31988f..a20a68016 100644 --- a/numpy/core/include/numpy/arrayscalars.h +++ b/numpy/core/include/numpy/arrayscalars.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAYSCALARS_H_ -#define _NPY_ARRAYSCALARS_H_ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ #ifndef _MULTIARRAYMODULE typedef struct { @@ -179,4 +179,4 @@ typedef struct { #define PyArrayScalar_ASSIGN(obj, cls, val) \ PyArrayScalar_VAL(obj, cls) = val -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ */ diff --git a/numpy/core/include/numpy/experimental_dtype_api.h b/numpy/core/include/numpy/experimental_dtype_api.h new file mode 100644 index 000000000..554c7fb6c --- /dev/null +++ b/numpy/core/include/numpy/experimental_dtype_api.h @@ -0,0 +1,380 @@ +/* + * This header exports the new experimental DType API as proposed in + * NEPs 41 to 43. For background, please check these NEPs. Otherwise, + * this header also serves as documentation for the time being. + * + * Please do not hesitate to contact @seberg with questions. This is + * developed together with https://github.com/seberg/experimental_user_dtypes + * and those interested in experimenting are encouraged to contribute there. + * + * To use the functions defined in the header, call:: + * + * if (import_experimental_dtype_api(version) < 0) { + * return NULL; + * } + * + * in your module init. (A version mismatch will be reported, just update + * to the correct one, this will alert you of possible changes.) + * + * The following lists the main symbols currently exported. Please do not + * hesitate to ask for help or clarification: + * + * - PyUFunc_AddLoopFromSpec: + * + * Register a new loop for a ufunc. This uses the `PyArrayMethod_Spec` + * which must be filled in (see in-line comments). + * + * - PyUFunc_AddPromoter: + * + * Register a new promoter for a ufunc. A promoter is a function stored + * in a PyCapsule (see in-line comments). It is passed the operation and + * requested DType signatures and can mutate it to attempt a new search + * for a matching loop/promoter. + * I.e. for Numba a promoter could even add the desired loop. + * + * - PyArrayInitDTypeMeta_FromSpec: + * + * Initialize a new DType. It must currently be a static Python C type + * that is declared as `PyArray_DTypeMeta` and not `PyTypeObject`. + * Further, it must subclass `np.dtype` and set its type to + * `PyArrayDTypeMeta_Type` (before calling `PyType_Read()`). + * + * - PyArray_CommonDType: + * + * Find the common-dtype ("promotion") for two DType classes. Similar + * to `np.result_type`, but works on the classes and not instances. + * + * - PyArray_PromoteDTypeSequence: + * + * Same as CommonDType, but works with an arbitrary number of DTypes. + * This function is smarter and can often return successful and unambiguous + * results when `common_dtype(common_dtype(dt1, dt2), dt3)` would + * depend on the operation order or fail. Nevertheless, DTypes should + * aim to ensure that their common-dtype implementation is associative + * and commutative! (Mainly, unsigned and signed integers are not.) + * + * For guaranteed consistent results DTypes must implement common-Dtype + * "transitively". If A promotes B and B promotes C, than A must generally + * also promote C; where "promotes" means implements the promotion. + * (There are some exceptions for abstract DTypes) + * + * WARNING + * ======= + * + * By using this header, you understand that this is a fully experimental + * exposure. Details are expected to change, and some options may have no + * effect. (Please contact @seberg if you have questions!) + * If the exposure stops working, please file a bug report with NumPy. + * Further, a DType created using this API/header should still be expected + * to be incompatible with some functionality inside and outside of NumPy. + * In this case crashes must be expected. Please report any such problems + * so that they can be fixed before final exposure. + * Furthermore, expect missing checks for programming errors which the final + * API is expected to have. + * + * Symbols with a leading underscore are likely to not be included in the + * first public version, if these are central to your use-case, please let + * us know, so that we can reconsider. + * + * "Array-like" consumer API not yet under considerations + * ====================================================== + * + * The new DType API is designed in a way to make it potentially useful for + * alternative "array-like" implementations. This will require careful + * exposure of details and functions and is not part of this experimental API. + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_ + +#include <Python.h> +#include "ndarraytypes.h" + + +/* + * Just a hack so I don't forget importing as much myself, I spend way too + * much time noticing it the first time around :). + */ +static void +__not_imported(void) +{ + printf("*****\nCritical error, dtype API not imported\n*****\n"); +} +static void *__uninitialized_table[] = { + &__not_imported, &__not_imported, &__not_imported, &__not_imported, + &__not_imported, &__not_imported, &__not_imported, &__not_imported}; + + +static void **__experimental_dtype_api_table = __uninitialized_table; + + +/* + * DTypeMeta struct, the content may be made fully opaque (except the size). + * We may also move everything into a single `void *dt_slots`. + */ +typedef struct { + PyHeapTypeObject super; + PyArray_Descr *singleton; + int type_num; + PyTypeObject *scalar_type; + npy_uint64 flags; + void *dt_slots; + void *reserved[3]; +} PyArray_DTypeMeta; + + +/* + * ****************************************************** + * ArrayMethod API (Casting and UFuncs) + * ****************************************************** + */ +/* + * NOTE: Expected changes: + * * invert logic of floating point error flag + * * probably split runtime and general flags into two + * * should possibly not use an enum for typdef for more stable ABI? + */ +typedef enum { + /* Flag for whether the GIL is required */ + NPY_METH_REQUIRES_PYAPI = 1 << 1, + /* + * Some functions cannot set floating point error flags, this flag + * gives us the option (not requirement) to skip floating point error + * setup/check. No function should set error flags and ignore them + * since it would interfere with chaining operations (e.g. casting). + */ + NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 2, + /* Whether the method supports unaligned access (not runtime) */ + NPY_METH_SUPPORTS_UNALIGNED = 1 << 3, + + /* All flags which can change at runtime */ + NPY_METH_RUNTIME_FLAGS = ( + NPY_METH_REQUIRES_PYAPI | + NPY_METH_NO_FLOATINGPOINT_ERRORS), +} NPY_ARRAYMETHOD_FLAGS; + + +/* + * The main object for creating a new ArrayMethod. We use the typical `slots` + * mechanism used by the Python limited API (see below for the slot defs). + */ +typedef struct { + const char *name; + int nin, nout; + NPY_CASTING casting; + NPY_ARRAYMETHOD_FLAGS flags; + PyObject **dtypes; /* array of DType class objects */ + PyType_Slot *slots; +} PyArrayMethod_Spec; + + +typedef PyObject *_ufunc_addloop_fromspec_func( + PyObject *ufunc, PyArrayMethod_Spec *spec); +/* + * The main ufunc registration function. This adds a new implementation/loop + * to a ufunc. It replaces `PyUFunc_RegisterLoopForType`. + */ +#define PyUFunc_AddLoopFromSpec \ + (*(_ufunc_addloop_fromspec_func *)(__experimental_dtype_api_table[0])) + + +/* + * Type of the C promoter function, which must be wrapped into a + * PyCapsule with name "numpy._ufunc_promoter". + */ +typedef int promoter_function(PyObject *ufunc, + PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]); + +/* + * Function to register a promoter. + * + * @param ufunc The ufunc object to register the promoter with. + * @param DType_tuple A Python tuple containing DTypes or None matching the + * number of inputs and outputs of the ufunc. + * @param promoter A PyCapsule with name "numpy._ufunc_promoter" containing + * a pointer to a `promoter_function`. + */ +typedef int _ufunc_addpromoter_func( + PyObject *ufunc, PyObject *DType_tuple, PyObject *promoter); +#define PyUFunc_AddPromoter \ + (*(_ufunc_addpromoter_func *)(__experimental_dtype_api_table[1])) + +/* + * In addition to the normal casting levels, NPY_CAST_IS_VIEW indicates + * that no cast operation is necessary at all (although a copy usually will be) + * + * NOTE: The most likely modification here is to add an additional + * `view_offset` output to resolve_descriptors. If set, it would + * indicate both that it is a view and what offset to use. This means that + * e.g. `arr.imag` could be implemented by an ArrayMethod. + */ +#define NPY_CAST_IS_VIEW _NPY_CAST_IS_VIEW + +/* + * The resolve descriptors function, must be able to handle NULL values for + * all output (but not input) `given_descrs` and fill `loop_descrs`. + * Return -1 on error or 0 if the operation is not possible without an error + * set. (This may still be in flux.) + * Otherwise must return the "casting safety", for normal functions, this is + * almost always "safe" (or even "equivalent"?). + * + * `resolve_descriptors` is optional if all output DTypes are non-parametric. + */ +#define NPY_METH_resolve_descriptors 1 +typedef NPY_CASTING (resolve_descriptors_function)( + /* "method" is currently opaque (necessary e.g. to wrap Python) */ + PyObject *method, + /* DTypes the method was created for */ + PyObject **dtypes, + /* Input descriptors (instances). Outputs may be NULL. */ + PyArray_Descr **given_descrs, + /* Exact loop descriptors to use, must not hold references on error */ + PyArray_Descr **loop_descrs); + +/* NOT public yet: Signature needs adapting as external API. */ +#define _NPY_METH_get_loop 2 + +/* + * Current public API to define fast inner-loops. You must provide a + * strided loop. If this is a cast between two "versions" of the same dtype + * you must also provide an unaligned strided loop. + * Other loops are useful to optimize the very common contiguous case. + * + * NOTE: As of now, NumPy will NOT use unaligned loops in ufuncs! + */ +#define NPY_METH_strided_loop 3 +#define NPY_METH_contiguous_loop 4 +#define NPY_METH_unaligned_strided_loop 5 +#define NPY_METH_unaligned_contiguous_loop 6 + + +typedef struct { + PyObject *caller; /* E.g. the original ufunc, may be NULL */ + PyObject *method; /* The method "self". Currently an opaque object */ + + /* Operand descriptors, filled in by resolve_descriptors */ + PyArray_Descr **descriptors; + /* Structure may grow (this is harmless for DType authors) */ +} PyArrayMethod_Context; + +typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); + + + +/* + * **************************** + * DTYPE API + * **************************** + */ + +#define NPY_DT_ABSTRACT 1 << 1 +#define NPY_DT_PARAMETRIC 1 << 2 + +#define NPY_DT_discover_descr_from_pyobject 1 +#define _NPY_DT_is_known_scalar_type 2 +#define NPY_DT_default_descr 3 +#define NPY_DT_common_dtype 4 +#define NPY_DT_common_instance 5 +#define NPY_DT_setitem 6 +#define NPY_DT_getitem 7 + + +// TODO: These slots probably still need some thought, and/or a way to "grow"? +typedef struct{ + PyTypeObject *typeobj; /* type of python scalar or NULL */ + int flags; /* flags, including parametric and abstract */ + /* NULL terminated cast definitions. Use NULL for the newly created DType */ + PyArrayMethod_Spec **casts; + PyType_Slot *slots; + /* Baseclass or NULL (will always subclass `np.dtype`) */ + PyTypeObject *baseclass; +} PyArrayDTypeMeta_Spec; + + +#define PyArrayDTypeMeta_Type \ + (*(PyTypeObject *)__experimental_dtype_api_table[2]) +typedef int __dtypemeta_fromspec( + PyArray_DTypeMeta *DType, PyArrayDTypeMeta_Spec *dtype_spec); +/* + * Finalize creation of a DTypeMeta. You must ensure that the DTypeMeta is + * a proper subclass. The DTypeMeta object has additional fields compared to + * a normal PyTypeObject! + * The only (easy) creation of a new DType is to create a static Type which + * inherits `PyArray_DescrType`, sets its type to `PyArrayDTypeMeta_Type` and + * uses `PyArray_DTypeMeta` defined above as the C-structure. + */ +#define PyArrayInitDTypeMeta_FromSpec \ + ((__dtypemeta_fromspec *)(__experimental_dtype_api_table[3])) + + +/* + * ************************************* + * WORKING WITH DTYPES + * ************************************* + */ + +typedef PyArray_DTypeMeta *__common_dtype( + PyArray_DTypeMeta *DType1, PyArray_DTypeMeta *DType2); +#define PyArray_CommonDType \ + ((__common_dtype *)(__experimental_dtype_api_table[4])) + + +typedef PyArray_DTypeMeta *__promote_dtype_sequence( + npy_intp num, PyArray_DTypeMeta *DTypes[]); +#define PyArray_PromoteDTypeSequence \ + ((__promote_dtype_sequence *)(__experimental_dtype_api_table[5])) + + +/* + * ******************************** + * Initialization + * ******************************** + * + * Import the experimental API, the version must match the one defined in + * the header to ensure changes are taken into account. NumPy will further + * runtime-check this. + * You must call this function to use the symbols defined in this file. + */ +#define __EXPERIMENTAL_DTYPE_VERSION 2 + +static int +import_experimental_dtype_api(int version) +{ + if (version != __EXPERIMENTAL_DTYPE_VERSION) { + PyErr_Format(PyExc_RuntimeError, + "DType API version %d did not match header version %d. Please " + "update the import statement and check for API changes.", + version, __EXPERIMENTAL_DTYPE_VERSION); + return -1; + } + if (__experimental_dtype_api_table != __uninitialized_table) { + /* already imported. */ + return 0; + } + + PyObject *multiarray = PyImport_ImportModule("numpy.core._multiarray_umath"); + if (multiarray == NULL) { + return -1; + } + + PyObject *api = PyObject_CallMethod(multiarray, + "_get_experimental_dtype_api", "i", version); + Py_DECREF(multiarray); + if (api == NULL) { + return -1; + } + __experimental_dtype_api_table = PyCapsule_GetPointer(api, + "experimental_dtype_api_table"); + Py_DECREF(api); + + if (__experimental_dtype_api_table == NULL) { + __experimental_dtype_api_table = __uninitialized_table; + return -1; + } + return 0; +} + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_ */ diff --git a/numpy/core/include/numpy/halffloat.h b/numpy/core/include/numpy/halffloat.h index ab0d221fb..950401664 100644 --- a/numpy/core/include/numpy/halffloat.h +++ b/numpy/core/include/numpy/halffloat.h @@ -1,5 +1,5 @@ -#ifndef __NPY_HALFFLOAT_H__ -#define __NPY_HALFFLOAT_H__ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ #include <Python.h> #include <numpy/npy_math.h> @@ -67,4 +67,4 @@ npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); } #endif -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ */ diff --git a/numpy/core/include/numpy/libdivide/libdivide.h b/numpy/core/include/numpy/libdivide/libdivide.h index 81057b7b4..f4eb8039b 100644 --- a/numpy/core/include/numpy/libdivide/libdivide.h +++ b/numpy/core/include/numpy/libdivide/libdivide.h @@ -8,8 +8,8 @@ // You may use libdivide under the terms of either of these. // See LICENSE.txt for more details. -#ifndef LIBDIVIDE_H -#define LIBDIVIDE_H +#ifndef NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ #define LIBDIVIDE_VERSION "3.0" #define LIBDIVIDE_VERSION_MAJOR 3 @@ -2072,8 +2072,8 @@ T& operator/=(T& n, const divider<T, ALGO>& div) { template <typename T> using branchfree_divider = divider<T, BRANCHFREE>; -} // namespace libdivide +} // namespace libdivide -#endif // __cplusplus +#endif // __cplusplus -#endif // LIBDIVIDE_H +#endif // NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h index 5ef1f10aa..2eb951486 100644 --- a/numpy/core/include/numpy/ndarrayobject.h +++ b/numpy/core/include/numpy/ndarrayobject.h @@ -1,9 +1,9 @@ /* * DON'T INCLUDE THIS DIRECTLY. */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ -#ifndef NPY_NDARRAYOBJECT_H -#define NPY_NDARRAYOBJECT_H #ifdef __cplusplus extern "C" { #endif @@ -265,4 +265,4 @@ PyArray_XDECREF_ERR(PyArrayObject *arr) #endif -#endif /* NPY_NDARRAYOBJECT_H */ +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ */ diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 60f16094c..a1d1c01dc 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -1,5 +1,5 @@ -#ifndef NDARRAYTYPES_H -#define NDARRAYTYPES_H +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ #include "npy_common.h" #include "npy_endian.h" @@ -355,12 +355,10 @@ struct NpyAuxData_tag { #define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); #define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - /* - * Macros to define how array, and dimension/strides data is - * allocated. - */ - - /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ +/* +* Macros to define how array, and dimension/strides data is +* allocated. These should be made private +*/ #define NPY_USE_PYMEM 1 @@ -673,6 +671,24 @@ typedef struct _arr_descr { } PyArray_ArrayDescr; /* + * Memory handler structure for array data. + */ +/* The declaration of free differs from PyMemAllocatorEx */ +typedef struct { + void *ctx; + void* (*malloc) (void *ctx, size_t size); + void* (*calloc) (void *ctx, size_t nelem, size_t elsize); + void* (*realloc) (void *ctx, void *ptr, size_t new_size); + void (*free) (void *ctx, void *ptr, size_t size); +} PyDataMemAllocator; + +typedef struct { + char name[128]; /* multiple of 64 to keep the struct aligned */ + PyDataMemAllocator allocator; +} PyDataMem_Handler; + + +/* * The main array object structure. * * It has been recommended to use the inline functions defined below @@ -722,6 +738,10 @@ typedef struct tagPyArrayObject_fields { /* For weak references */ PyObject *weakreflist; void *_buffer_info; /* private buffer info, tagged to allow warning */ + /* + * For malloc/calloc/realloc/free per object + */ + PyObject *mem_handler; } PyArrayObject_fields; /* @@ -1472,9 +1492,11 @@ PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); * Include inline implementations - functions defined there are not * considered public API */ -#define _NPY_INCLUDE_NEIGHBORHOOD_IMP +#define NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ #include "_neighborhood_iterator_imp.h" -#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP +#undef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ + + /* The default array type */ #define NPY_DEFAULT_TYPE NPY_DOUBLE @@ -1665,6 +1687,12 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) ((PyArrayObject_fields *)arr)->flags &= ~flags; } +static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject * +PyArray_HANDLER(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->mem_handler; +} + #define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) #define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ @@ -1864,32 +1892,14 @@ typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, */ #if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD /* - * The Structures defined in this block are considered private API and - * may change without warning! + * The Structures defined in this block are currently considered + * private API and may change without warning! + * Part of this (at least the size) is exepcted to be public API without + * further modifications. */ /* TODO: Make this definition public in the API, as soon as its settled */ NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type; - typedef struct PyArray_DTypeMeta_tag PyArray_DTypeMeta; - - typedef PyArray_Descr *(discover_descr_from_pyobject_function)( - PyArray_DTypeMeta *cls, PyObject *obj); - - /* - * Before making this public, we should decide whether it should pass - * the type, or allow looking at the object. A possible use-case: - * `np.array(np.array([0]), dtype=np.ndarray)` - * Could consider arrays that are not `dtype=ndarray` "scalars". - */ - typedef int (is_known_scalar_type_function)( - PyArray_DTypeMeta *cls, PyTypeObject *obj); - - typedef PyArray_Descr *(default_descr_function)(PyArray_DTypeMeta *cls); - typedef PyArray_DTypeMeta *(common_dtype_function)( - PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtyep2); - typedef PyArray_Descr *(common_instance_function)( - PyArray_Descr *dtype1, PyArray_Descr *dtyep2); - /* * While NumPy DTypes would not need to be heap types the plan is to * make DTypes available in Python at which point they will be heap types. @@ -1900,7 +1910,7 @@ typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, * it is a fairly complex construct which may be better to allow * refactoring of. */ - struct PyArray_DTypeMeta_tag { + typedef struct { PyHeapTypeObject super; /* @@ -1928,7 +1938,7 @@ typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, */ void *dt_slots; void *reserved[3]; - }; + } PyArray_DTypeMeta; #endif /* NPY_INTERNAL_BUILD */ @@ -1959,4 +1969,4 @@ typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, */ #undef NPY_DEPRECATED_INCLUDES -#endif /* NPY_ARRAYTYPES_H */ +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */ diff --git a/numpy/core/include/numpy/noprefix.h b/numpy/core/include/numpy/noprefix.h index 041f30192..2c0ce1420 100644 --- a/numpy/core/include/numpy/noprefix.h +++ b/numpy/core/include/numpy/noprefix.h @@ -1,5 +1,5 @@ -#ifndef NPY_NOPREFIX_H -#define NPY_NOPREFIX_H +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_ /* * You can directly include noprefix.h as a backward @@ -209,4 +209,4 @@ #define MAX_ELSIZE NPY_MAX_ELSIZE #endif -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_ */ diff --git a/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/numpy/core/include/numpy/npy_1_7_deprecated_api.h index a4f90e019..4fd4015a9 100644 --- a/numpy/core/include/numpy/npy_1_7_deprecated_api.h +++ b/numpy/core/include/numpy/npy_1_7_deprecated_api.h @@ -1,10 +1,10 @@ -#ifndef _NPY_1_7_DEPRECATED_API_H -#define _NPY_1_7_DEPRECATED_API_H - #ifndef NPY_DEPRECATED_INCLUDES #error "Should never include npy_*_*_deprecated_api directly." #endif +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + /* Emit a warning if the user did not specifically request the old API */ #ifndef NPY_NO_DEPRECATED_API #if defined(_WIN32) @@ -122,4 +122,4 @@ */ #include "old_defines.h" -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ */ diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h index 551ec6be8..22c103e93 100644 --- a/numpy/core/include/numpy/npy_3kcompat.h +++ b/numpy/core/include/numpy/npy_3kcompat.h @@ -7,8 +7,8 @@ * strong backwards compatibility guarantees at the moment. */ -#ifndef _NPY_3KCOMPAT_H_ -#define _NPY_3KCOMPAT_H_ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ #include <Python.h> #include <stdio.h> @@ -592,4 +592,4 @@ NpyCapsule_Check(PyObject *ptr) #endif -#endif /* _NPY_3KCOMPAT_H_ */ +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ */ diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h index d5f329b66..57cc592b9 100644 --- a/numpy/core/include/numpy/npy_common.h +++ b/numpy/core/include/numpy/npy_common.h @@ -1,5 +1,5 @@ -#ifndef _NPY_COMMON_H_ -#define _NPY_COMMON_H_ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ /* need Python.h for npy_intp, npy_uintp */ #include <Python.h> @@ -359,12 +359,11 @@ typedef unsigned char npy_bool; #if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE - typedef double npy_longdouble; #define NPY_LONGDOUBLE_FMT "g" #else - typedef long double npy_longdouble; #define NPY_LONGDOUBLE_FMT "Lg" #endif +typedef long double npy_longdouble; #ifndef Py_USING_UNICODE #error Must use Python with unicode enabled. @@ -1107,4 +1106,4 @@ typedef npy_int64 npy_datetime; /* End of typedefs for numarray style bit-width names */ -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ */ diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h index e975b0105..78d229e7d 100644 --- a/numpy/core/include/numpy/npy_cpu.h +++ b/numpy/core/include/numpy/npy_cpu.h @@ -21,8 +21,8 @@ * NPY_CPU_LOONGARCH * NPY_CPU_WASM */ -#ifndef _NPY_CPUARCH_H_ -#define _NPY_CPUARCH_H_ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ #include "numpyconfig.h" @@ -114,7 +114,7 @@ information about your platform (OS, CPU and compiler) #endif -/* +/* * Except for the following architectures, memory access is limited to the natural * alignment of data types otherwise it may lead to bus error or performance regression. * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt. @@ -126,4 +126,4 @@ #define NPY_ALIGNMENT_REQUIRED 1 #endif -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h index 620595bec..5e58a7f52 100644 --- a/numpy/core/include/numpy/npy_endian.h +++ b/numpy/core/include/numpy/npy_endian.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ENDIAN_H_ -#define _NPY_ENDIAN_H_ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ /* * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in @@ -52,6 +52,7 @@ || defined(NPY_CPU_LOONGARCH) \ || defined(NPY_CPU_WASM) #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN + #elif defined(NPY_CPU_PPC) \ || defined(NPY_CPU_SPARC) \ || defined(NPY_CPU_S390) \ @@ -66,9 +67,11 @@ || defined(NPY_CPU_M68K) \ || defined(NPY_CPU_ARCEB) #define NPY_BYTE_ORDER NPY_BIG_ENDIAN + #else #error Unknown CPU: can not set endianness #endif -#endif #endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ */ diff --git a/numpy/core/include/numpy/npy_interrupt.h b/numpy/core/include/numpy/npy_interrupt.h index bcb539326..69a0374dd 100644 --- a/numpy/core/include/numpy/npy_interrupt.h +++ b/numpy/core/include/numpy/npy_interrupt.h @@ -14,8 +14,8 @@ * https://github.com/python/cpython/pull/20599). */ -#ifndef NPY_INTERRUPT_H -#define NPY_INTERRUPT_H +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_ #ifndef NPY_NO_SIGNAL @@ -46,11 +46,11 @@ PyOS_setsig(SIGINT, _npy_sig_save); \ } -#else /* NPY_NO_SIGNAL */ +#else /* NPY_NO_SIGNAL */ #define NPY_SIGINT_ON #define NPY_SIGINT_OFF -#endif /* HAVE_SIGSETJMP */ +#endif /* HAVE_SIGSETJMP */ -#endif /* NPY_INTERRUPT_H */ +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_ */ diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h index e9a6a30d2..bead0dc14 100644 --- a/numpy/core/include/numpy/npy_math.h +++ b/numpy/core/include/numpy/npy_math.h @@ -1,5 +1,5 @@ -#ifndef __NPY_MATH_C99_H_ -#define __NPY_MATH_C99_H_ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ #ifdef __cplusplus extern "C" { @@ -150,6 +150,17 @@ NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b); NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b); NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b); +NPY_INPLACE uint8_t npy_popcountuhh(npy_ubyte a); +NPY_INPLACE uint8_t npy_popcountuh(npy_ushort a); +NPY_INPLACE uint8_t npy_popcountu(npy_uint a); +NPY_INPLACE uint8_t npy_popcountul(npy_ulong a); +NPY_INPLACE uint8_t npy_popcountull(npy_ulonglong a); +NPY_INPLACE uint8_t npy_popcounthh(npy_byte a); +NPY_INPLACE uint8_t npy_popcounth(npy_short a); +NPY_INPLACE uint8_t npy_popcount(npy_int a); +NPY_INPLACE uint8_t npy_popcountl(npy_long a); +NPY_INPLACE uint8_t npy_popcountll(npy_longlong a); + /* * C99 double math funcs */ @@ -585,4 +596,4 @@ void npy_set_floatstatus_invalid(void); #include "npy_math_internal.h" #endif -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ */ diff --git a/numpy/core/include/numpy/npy_no_deprecated_api.h b/numpy/core/include/numpy/npy_no_deprecated_api.h index 6183dc278..39658c0bd 100644 --- a/numpy/core/include/numpy/npy_no_deprecated_api.h +++ b/numpy/core/include/numpy/npy_no_deprecated_api.h @@ -9,11 +9,12 @@ #ifndef NPY_NO_DEPRECATED_API /* put this check here since there may be multiple includes in C extensions. */ -#if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \ - defined(OLD_DEFINES_H) +#if defined(NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_) || \ + defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \ + defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_) #error "npy_no_deprecated_api.h" must be first among numpy includes. #else #define NPY_NO_DEPRECATED_API NPY_API_VERSION #endif -#endif +#endif /* NPY_NO_DEPRECATED_API */ diff --git a/numpy/core/include/numpy/npy_os.h b/numpy/core/include/numpy/npy_os.h index 9228c3916..efa0e4012 100644 --- a/numpy/core/include/numpy/npy_os.h +++ b/numpy/core/include/numpy/npy_os.h @@ -1,5 +1,5 @@ -#ifndef _NPY_OS_H_ -#define _NPY_OS_H_ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ #if defined(linux) || defined(__linux) || defined(__linux__) #define NPY_OS_LINUX @@ -27,4 +27,4 @@ #define NPY_OS_UNKNOWN #endif -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ */ diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h index 726f1dfac..1c3686769 100644 --- a/numpy/core/include/numpy/numpyconfig.h +++ b/numpy/core/include/numpy/numpyconfig.h @@ -1,5 +1,5 @@ -#ifndef _NPY_NUMPYCONFIG_H_ -#define _NPY_NUMPYCONFIG_H_ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ #include "_numpyconfig.h" @@ -19,6 +19,19 @@ #define NPY_SIZEOF_LONG 4 #define NPY_SIZEOF_PY_INTPTR_T 4 #endif + + #undef NPY_SIZEOF_LONGDOUBLE + #undef NPY_SIZEOF_COMPLEX_LONGDOUBLE + + #ifdef __x86_64 + #define NPY_SIZEOF_LONGDOUBLE 16 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 + #elif defined(__arm64__) + #define NPY_SIZEOF_LONGDOUBLE 8 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16 + #else + #error "unknown architecture" + #endif #endif /** @@ -45,4 +58,4 @@ #define NPY_1_21_API_VERSION 0x0000000e #define NPY_1_22_API_VERSION 0x0000000e -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */ diff --git a/numpy/core/include/numpy/old_defines.h b/numpy/core/include/numpy/old_defines.h index abf81595a..b3fa67751 100644 --- a/numpy/core/include/numpy/old_defines.h +++ b/numpy/core/include/numpy/old_defines.h @@ -1,6 +1,6 @@ /* This header is deprecated as of NumPy 1.7 */ -#ifndef OLD_DEFINES_H -#define OLD_DEFINES_H +#ifndef NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_ #if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION #error The header "old_defines.h" is deprecated as of NumPy 1.7. @@ -184,4 +184,4 @@ #define PyArray_UCS4 npy_ucs4 -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_ */ diff --git a/numpy/core/include/numpy/oldnumeric.h b/numpy/core/include/numpy/oldnumeric.h index 38530faf0..6604e8d17 100644 --- a/numpy/core/include/numpy/oldnumeric.h +++ b/numpy/core/include/numpy/oldnumeric.h @@ -1,3 +1,8 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_OLDNUMERIC_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_OLDNUMERIC_H_ + +/* FIXME -- this file can be deleted? */ + #include "arrayobject.h" #ifndef PYPY_VERSION @@ -23,3 +28,5 @@ #undef import_array #define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } } + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_OLDNUMERIC_H_ */ diff --git a/numpy/core/include/numpy/random/bitgen.h b/numpy/core/include/numpy/random/bitgen.h index 83c2858dd..162dd5c57 100644 --- a/numpy/core/include/numpy/random/bitgen.h +++ b/numpy/core/include/numpy/random/bitgen.h @@ -1,5 +1,5 @@ -#ifndef _RANDOM_BITGEN_H -#define _RANDOM_BITGEN_H +#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ #pragma once #include <stddef.h> @@ -17,4 +17,4 @@ typedef struct bitgen { } bitgen_t; -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ */ diff --git a/numpy/core/include/numpy/random/distributions.h b/numpy/core/include/numpy/random/distributions.h index 554198174..dacf77829 100644 --- a/numpy/core/include/numpy/random/distributions.h +++ b/numpy/core/include/numpy/random/distributions.h @@ -1,11 +1,11 @@ -#ifndef _RANDOMDGEN__DISTRIBUTIONS_H_ -#define _RANDOMDGEN__DISTRIBUTIONS_H_ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ #ifdef __cplusplus extern "C" { #endif -#include "Python.h" +#include <Python.h> #include "numpy/npy_common.h" #include <stddef.h> #include <stdbool.h> @@ -206,4 +206,4 @@ static NPY_INLINE double next_double(bitgen_t *bitgen_state) { } #endif -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ */ diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h index fd7307703..3f184bd45 100644 --- a/numpy/core/include/numpy/ufuncobject.h +++ b/numpy/core/include/numpy/ufuncobject.h @@ -1,5 +1,5 @@ -#ifndef Py_UFUNCOBJECT_H -#define Py_UFUNCOBJECT_H +#ifndef NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ #include <numpy/npy_math.h> #include <numpy/npy_common.h> @@ -349,8 +349,8 @@ typedef struct _loop1d_info { #endif #endif - #ifdef __cplusplus } #endif -#endif /* !Py_UFUNCOBJECT_H */ + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ */ diff --git a/numpy/core/include/numpy/utils.h b/numpy/core/include/numpy/utils.h index e251a5201..e2b57f9e5 100644 --- a/numpy/core/include/numpy/utils.h +++ b/numpy/core/include/numpy/utils.h @@ -1,5 +1,5 @@ -#ifndef __NUMPY_UTILS_HEADER__ -#define __NUMPY_UTILS_HEADER__ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ #ifndef __COMP_NPY_UNUSED #if defined(__GNUC__) @@ -34,4 +34,4 @@ #define NPY_CAT_(a, b) NPY_CAT__(a, b) #define NPY_CAT(a, b) NPY_CAT_(a, b) -#endif +#endif /* NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ */ diff --git a/numpy/core/memmap.pyi b/numpy/core/memmap.pyi new file mode 100644 index 000000000..ba595bf1e --- /dev/null +++ b/numpy/core/memmap.pyi @@ -0,0 +1,5 @@ +from typing import List + +from numpy import memmap as memmap + +__all__: List[str] diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py index 154df6f4d..351cd3a1b 100644 --- a/numpy/core/multiarray.py +++ b/numpy/core/multiarray.py @@ -31,8 +31,8 @@ __all__ = [ 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype', 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', - 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner', - 'interp', 'interp_complex', 'is_busday', 'lexsort', + 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'get_handler_name', + 'inner', 'interp', 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', 'normalize_axis_index', 'packbits', 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', diff --git a/numpy/core/multiarray.pyi b/numpy/core/multiarray.pyi index 501e55634..a9f68e181 100644 --- a/numpy/core/multiarray.pyi +++ b/numpy/core/multiarray.pyi @@ -30,7 +30,6 @@ from numpy import ( nditer as nditer, # The rest - nditer, ufunc, str_, bool_, @@ -51,7 +50,9 @@ from numpy import ( _ModeKind, _SupportsBuffer, _IOProtocol, - _CopyMode + _CopyMode, + _NDIterFlagsKind, + _NDIterOpFlagsKind, ) from numpy.typing import ( @@ -66,7 +67,7 @@ from numpy.typing import ( NDArray, ArrayLike, _SupportsArray, - _NestedSequence, + _FiniteNestedSequence, _ArrayLikeBool_co, _ArrayLikeUInt_co, _ArrayLikeInt_co, @@ -92,7 +93,7 @@ _DTypeLike = Union[ Type[_SCT], _SupportsDType[dtype[_SCT]], ] -_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]] +_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] # Valid time units _UnitKind = L[ @@ -1013,3 +1014,14 @@ class flagsobj: def owndata(self) -> bool: ... def __getitem__(self, key: _GetItemKeys) -> bool: ... def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ... + +def nested_iters( + op: ArrayLike | Sequence[ArrayLike], + axes: Sequence[Sequence[SupportsIndex]], + flags: None | Sequence[_NDIterFlagsKind] = ..., + op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + order: _OrderKACF = ..., + casting: _CastingKind = ..., + buffersize: SupportsIndex = ..., +) -> Tuple[nditer, ...]: ... diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index d8a0cf9a6..1654e8364 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -1184,7 +1184,7 @@ def roll(a, shift, axis=None): >>> np.roll(x, -2) array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) - >>> x2 = np.reshape(x, (2,5)) + >>> x2 = np.reshape(x, (2, 5)) >>> x2 array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) @@ -1206,6 +1206,12 @@ def roll(a, shift, axis=None): >>> np.roll(x2, -1, axis=1) array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]) + >>> np.roll(x2, (1, 1), axis=(1, 0)) + array([[9, 5, 6, 7, 8], + [4, 0, 1, 2, 3]]) + >>> np.roll(x2, (2, 1), axis=(1, 0)) + array([[8, 9, 5, 6, 7], + [3, 4, 0, 1, 2]]) """ a = asanyarray(a) diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi index 54ab4b7c8..d7ec30351 100644 --- a/numpy/core/numeric.pyi +++ b/numpy/core/numeric.pyi @@ -1,6 +1,5 @@ from typing import ( Any, - Optional, Union, Sequence, Tuple, @@ -8,18 +7,64 @@ from typing import ( List, overload, TypeVar, - Iterable, Literal, + Type, + SupportsAbs, + SupportsIndex, + NoReturn, ) +from typing_extensions import TypeGuard -from numpy import ndarray, generic, dtype, bool_, signedinteger, _OrderKACF, _OrderCF -from numpy.typing import ArrayLike, DTypeLike, _ShapeLike +from numpy import ( + ComplexWarning as ComplexWarning, + dtype, + generic, + unsignedinteger, + signedinteger, + floating, + complexfloating, + bool_, + int_, + intp, + float64, + timedelta64, + object_, + _OrderKACF, + _OrderCF, +) + +from numpy.typing import ( + ArrayLike, + NDArray, + DTypeLike, + _ShapeLike, + _SupportsDType, + _FiniteNestedSequence, + _SupportsArray, + _ScalarLike_co, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeTD64_co, + _ArrayLikeObject_co, +) _T = TypeVar("_T") -_ArrayType = TypeVar("_ArrayType", bound=ndarray) +_SCT = TypeVar("_SCT", bound=generic) +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_DTypeLike = Union[ + dtype[_SCT], + Type[_SCT], + _SupportsDType[dtype[_SCT]], +] +_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] _CorrelateMode = Literal["valid", "same", "full"] +__all__: List[str] + @overload def zeros_like( a: _ArrayType, @@ -30,20 +75,61 @@ def zeros_like( ) -> _ArrayType: ... @overload def zeros_like( - a: ArrayLike, - dtype: DTypeLike = ..., + a: _ArrayLike[_SCT], + dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., - shape: Optional[_ShapeLike] = ..., -) -> ndarray: ... + shape: None | _ShapeLike = ..., +) -> NDArray[_SCT]: ... +@overload +def zeros_like( + a: object, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... +@overload +def zeros_like( + a: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[_SCT]: ... +@overload +def zeros_like( + a: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... +@overload def ones( shape: _ShapeLike, - dtype: DTypeLike = ..., + dtype: None = ..., + order: _OrderCF = ..., + *, + like: ArrayLike = ..., +) -> NDArray[float64]: ... +@overload +def ones( + shape: _ShapeLike, + dtype: _DTypeLike[_SCT], order: _OrderCF = ..., *, like: ArrayLike = ..., -) -> ndarray: ... +) -> NDArray[_SCT]: ... +@overload +def ones( + shape: _ShapeLike, + dtype: DTypeLike, + order: _OrderCF = ..., + *, + like: ArrayLike = ..., +) -> NDArray[Any]: ... @overload def ones_like( @@ -55,21 +141,64 @@ def ones_like( ) -> _ArrayType: ... @overload def ones_like( - a: ArrayLike, - dtype: DTypeLike = ..., + a: _ArrayLike[_SCT], + dtype: None = ..., order: _OrderKACF = ..., subok: bool = ..., - shape: Optional[_ShapeLike] = ..., -) -> ndarray: ... + shape: None | _ShapeLike = ..., +) -> NDArray[_SCT]: ... +@overload +def ones_like( + a: object, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... +@overload +def ones_like( + a: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[_SCT]: ... +@overload +def ones_like( + a: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... +@overload def full( shape: _ShapeLike, fill_value: Any, - dtype: DTypeLike = ..., + dtype: None = ..., + order: _OrderCF = ..., + *, + like: ArrayLike = ..., +) -> NDArray[Any]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: _DTypeLike[_SCT], order: _OrderCF = ..., *, like: ArrayLike = ..., -) -> ndarray: ... +) -> NDArray[_SCT]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: DTypeLike, + order: _OrderCF = ..., + *, + like: ArrayLike = ..., +) -> NDArray[Any]: ... @overload def full_like( @@ -82,13 +211,40 @@ def full_like( ) -> _ArrayType: ... @overload def full_like( - a: ArrayLike, + a: _ArrayLike[_SCT], fill_value: Any, - dtype: DTypeLike = ..., + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike = ..., +) -> NDArray[_SCT]: ... +@overload +def full_like( + a: object, + fill_value: Any, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... +@overload +def full_like( + a: Any, + fill_value: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[_SCT]: ... +@overload +def full_like( + a: Any, + fill_value: Any, + dtype: DTypeLike, order: _OrderKACF = ..., subok: bool = ..., - shape: Optional[_ShapeLike] = ..., -) -> ndarray: ... + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... @overload def count_nonzero( @@ -105,78 +261,306 @@ def count_nonzero( keepdims: bool = ..., ) -> Any: ... # TODO: np.intp or ndarray[np.intp] -def isfortran(a: Union[ndarray, generic]) -> bool: ... +def isfortran(a: NDArray[Any] | generic) -> bool: ... -def argwhere(a: ArrayLike) -> ndarray: ... +def argwhere(a: ArrayLike) -> NDArray[intp]: ... -def flatnonzero(a: ArrayLike) -> ndarray: ... +def flatnonzero(a: ArrayLike) -> NDArray[intp]: ... +@overload def correlate( - a: ArrayLike, - v: ArrayLike, + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = ..., +) -> NDArray[bool_]: ... +@overload +def correlate( + a: _ArrayLikeUInt_co, + v: _ArrayLikeUInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def correlate( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, mode: _CorrelateMode = ..., -) -> ndarray: ... +) -> NDArray[signedinteger[Any]]: ... +@overload +def correlate( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = ..., +) -> NDArray[floating[Any]]: ... +@overload +def correlate( + a: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, + mode: _CorrelateMode = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def correlate( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = ..., +) -> NDArray[timedelta64]: ... +@overload +def correlate( + a: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, + mode: _CorrelateMode = ..., +) -> NDArray[object_]: ... +@overload def convolve( - a: ArrayLike, - v: ArrayLike, + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, mode: _CorrelateMode = ..., -) -> ndarray: ... +) -> NDArray[bool_]: ... +@overload +def convolve( + a: _ArrayLikeUInt_co, + v: _ArrayLikeUInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def convolve( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def convolve( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = ..., +) -> NDArray[floating[Any]]: ... +@overload +def convolve( + a: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, + mode: _CorrelateMode = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def convolve( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = ..., +) -> NDArray[timedelta64]: ... +@overload +def convolve( + a: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, + mode: _CorrelateMode = ..., +) -> NDArray[object_]: ... @overload def outer( - a: ArrayLike, - b: ArrayLike, + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, out: None = ..., -) -> ndarray: ... +) -> NDArray[bool_]: ... @overload def outer( - a: ArrayLike, - b: ArrayLike, - out: _ArrayType = ..., + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + out: None = ..., +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def outer( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + out: None = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def outer( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def outer( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + out: None = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def outer( + a: _ArrayLikeTD64_co, + b: _ArrayLikeTD64_co, + out: None = ..., +) -> NDArray[timedelta64]: ... +@overload +def outer( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + out: None = ..., +) -> NDArray[object_]: ... +@overload +def outer( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + out: _ArrayType, ) -> _ArrayType: ... +@overload def tensordot( - a: ArrayLike, - b: ArrayLike, - axes: Union[int, Tuple[_ShapeLike, _ShapeLike]] = ..., -) -> ndarray: ... + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[bool_]: ... +@overload +def tensordot( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def tensordot( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def tensordot( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[floating[Any]]: ... +@overload +def tensordot( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def tensordot( + a: _ArrayLikeTD64_co, + b: _ArrayLikeTD64_co, + axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[timedelta64]: ... +@overload +def tensordot( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + axes: int | Tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[object_]: ... +@overload +def roll( + a: _ArrayLike[_SCT], + shift: _ShapeLike, + axis: None | _ShapeLike = ..., +) -> NDArray[_SCT]: ... +@overload def roll( a: ArrayLike, shift: _ShapeLike, - axis: Optional[_ShapeLike] = ..., -) -> ndarray: ... + axis: None | _ShapeLike = ..., +) -> NDArray[Any]: ... -def rollaxis(a: ndarray, axis: int, start: int = ...) -> ndarray: ... +def rollaxis( + a: NDArray[_SCT], + axis: int, + start: int = ..., +) -> NDArray[_SCT]: ... def moveaxis( - a: ndarray, + a: NDArray[_SCT], source: _ShapeLike, destination: _ShapeLike, -) -> ndarray: ... +) -> NDArray[_SCT]: ... +@overload def cross( - a: ArrayLike, - b: ArrayLike, + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., - axis: Optional[int] = ..., -) -> ndarray: ... + axis: None | int = ..., +) -> NoReturn: ... +@overload +def cross( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def cross( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def cross( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NDArray[floating[Any]]: ... +@overload +def cross( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def cross( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NDArray[object_]: ... @overload def indices( dimensions: Sequence[int], - dtype: DTypeLike = ..., + dtype: Type[int] = ..., sparse: Literal[False] = ..., -) -> ndarray: ... +) -> NDArray[int_]: ... @overload def indices( dimensions: Sequence[int], - dtype: DTypeLike = ..., + dtype: Type[int] = ..., sparse: Literal[True] = ..., -) -> Tuple[ndarray, ...]: ... +) -> Tuple[NDArray[int_], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_SCT], + sparse: Literal[False] = ..., +) -> NDArray[_SCT]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_SCT], + sparse: Literal[True], +) -> Tuple[NDArray[_SCT], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike, + sparse: Literal[False] = ..., +) -> NDArray[Any]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike, + sparse: Literal[True], +) -> Tuple[NDArray[Any], ...]: ... def fromfunction( function: Callable[..., _T], @@ -187,18 +571,39 @@ def fromfunction( **kwargs: Any, ) -> _T: ... -def isscalar(element: Any) -> bool: ... +def isscalar(element: object) -> TypeGuard[ + generic | bool | int | float | complex | str | bytes | memoryview +]: ... -def binary_repr(num: int, width: Optional[int] = ...) -> str: ... +def binary_repr(num: int, width: None | int = ...) -> str: ... -def base_repr(number: int, base: int = ..., padding: int = ...) -> str: ... +def base_repr( + number: SupportsAbs[float], + base: float = ..., + padding: SupportsIndex = ..., +) -> str: ... +@overload def identity( n: int, - dtype: DTypeLike = ..., + dtype: None = ..., + *, + like: ArrayLike = ..., +) -> NDArray[float64]: ... +@overload +def identity( + n: int, + dtype: _DTypeLike[_SCT], + *, + like: ArrayLike = ..., +) -> NDArray[_SCT]: ... +@overload +def identity( + n: int, + dtype: DTypeLike, *, like: ArrayLike = ..., -) -> ndarray: ... +) -> NDArray[Any]: ... def allclose( a: ArrayLike, @@ -208,13 +613,22 @@ def allclose( equal_nan: bool = ..., ) -> bool: ... +@overload +def isclose( + a: _ScalarLike_co, + b: _ScalarLike_co, + rtol: float = ..., + atol: float = ..., + equal_nan: bool = ..., +) -> bool_: ... +@overload def isclose( a: ArrayLike, b: ArrayLike, rtol: float = ..., atol: float = ..., equal_nan: bool = ..., -) -> Any: ... +) -> NDArray[bool_]: ... def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 12f424fd4..8e5de852b 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -80,12 +80,10 @@ Exported symbols include: """ import numbers -import warnings from numpy.core.multiarray import ( - typeinfo, ndarray, array, empty, dtype, datetime_data, - datetime_as_string, busday_offset, busday_count, is_busday, - busdaycalendar + ndarray, array, dtype, datetime_data, datetime_as_string, + busday_offset, busday_count, is_busday, busdaycalendar ) from numpy.core.overrides import set_module diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py index e1fdd06f2..840cf38c9 100644 --- a/numpy/core/overrides.py +++ b/numpy/core/overrides.py @@ -2,7 +2,6 @@ import collections import functools import os -import textwrap from numpy.core._multiarray_umath import ( add_docstring, implement_array_function, _get_implementing_args) diff --git a/numpy/core/records.py b/numpy/core/records.py index fd5f1ab39..c014bc97c 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -41,7 +41,7 @@ from . import numeric as sb from . import numerictypes as nt from numpy.compat import os_fspath from numpy.core.overrides import set_module -from .arrayprint import get_printoptions +from .arrayprint import _get_legacy_print_mode # All of the functions allow formats to be a dtype __all__ = [ @@ -68,7 +68,7 @@ _byteorderconv = {'b':'>', 'i':'|'} # formats regular expression -# allows multidimension spec with a tuple syntax in front +# allows multidimensional spec with a tuple syntax in front # of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' # are equally allowed @@ -230,12 +230,12 @@ class record(nt.void): __module__ = 'numpy' def __repr__(self): - if get_printoptions()['legacy'] == '1.13': + if _get_legacy_print_mode() <= 113: return self.__str__() return super().__repr__() def __str__(self): - if get_printoptions()['legacy'] == '1.13': + if _get_legacy_print_mode() <= 113: return str(self.item()) return super().__str__() @@ -551,7 +551,7 @@ class recarray(ndarray): lst = "[], shape=%s" % (repr(self.shape),) lf = '\n'+' '*len(prefix) - if get_printoptions()['legacy'] == '1.13': + if _get_legacy_print_mode() <= 113: lf = ' ' + lf # trailing space return fmt % (lst, lf, repr_dtype) @@ -585,6 +585,7 @@ def _deprecate_shape_0_as_None(shape): return shape +@set_module("numpy.rec") def fromarrays(arrayList, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None): """Create a record array from a (flat) list of arrays @@ -678,6 +679,8 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None, return _array + +@set_module("numpy.rec") def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None): """Create a recarray from a list of records in text form. @@ -762,6 +765,7 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, return res +@set_module("numpy.rec") def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None): r"""Create a record array from binary data @@ -844,6 +848,8 @@ def get_remaining_size(fd): finally: fd.seek(pos, 0) + +@set_module("numpy.rec") def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None): """Create an array from binary file data @@ -943,6 +949,8 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, return _array + +@set_module("numpy.rec") def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, copy=True): """ diff --git a/numpy/core/records.pyi b/numpy/core/records.pyi new file mode 100644 index 000000000..fda118276 --- /dev/null +++ b/numpy/core/records.pyi @@ -0,0 +1,183 @@ +import os +from typing import ( + List, + Sequence, + Any, + TypeVar, + Iterable, + overload, + Tuple, + Protocol, +) + +from numpy import ( + format_parser as format_parser, + record as record, + recarray as recarray, + dtype, + generic, + void, + _ByteOrder, + _SupportsBuffer, +) + +from numpy.typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ShapeLike, + _ArrayLikeVoid_co, + _NestedSequence, +) + +_SCT = TypeVar("_SCT", bound=generic) + +_RecArray = recarray[Any, dtype[_SCT]] + +class _SupportsReadInto(Protocol): + def seek(self, offset: int, whence: int, /) -> object: ... + def tell(self, /) -> int: ... + def readinto(self, buffer: memoryview, /) -> int: ... + +__all__: List[str] + +@overload +def fromarrays( + arrayList: Iterable[ArrayLike], + dtype: DTypeLike = ..., + shape: None | _ShapeLike = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., +) -> _RecArray[Any]: ... +@overload +def fromarrays( + arrayList: Iterable[ArrayLike], + dtype: None = ..., + shape: None | _ShapeLike = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., +) -> _RecArray[record]: ... + +@overload +def fromrecords( + recList: _ArrayLikeVoid_co | Tuple[Any, ...] | _NestedSequence[Tuple[Any, ...]], + dtype: DTypeLike = ..., + shape: None | _ShapeLike = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., +) -> _RecArray[record]: ... +@overload +def fromrecords( + recList: _ArrayLikeVoid_co | Tuple[Any, ...] | _NestedSequence[Tuple[Any, ...]], + dtype: None = ..., + shape: None | _ShapeLike = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., +) -> _RecArray[record]: ... + +@overload +def fromstring( + datastring: _SupportsBuffer, + dtype: DTypeLike, + shape: None | _ShapeLike = ..., + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., +) -> _RecArray[record]: ... +@overload +def fromstring( + datastring: _SupportsBuffer, + dtype: None = ..., + shape: None | _ShapeLike = ..., + offset: int = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., +) -> _RecArray[record]: ... + +@overload +def fromfile( + fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto, + dtype: DTypeLike, + shape: None | _ShapeLike = ..., + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., +) -> _RecArray[Any]: ... +@overload +def fromfile( + fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto, + dtype: None = ..., + shape: None | _ShapeLike = ..., + offset: int = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., +) -> _RecArray[record]: ... + +@overload +def array( + obj: _SCT | NDArray[_SCT], + dtype: None = ..., + shape: None | _ShapeLike = ..., + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., + copy: bool = ..., +) -> _RecArray[_SCT]: ... +@overload +def array( + obj: ArrayLike, + dtype: DTypeLike, + shape: None | _ShapeLike = ..., + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., + copy: bool = ..., +) -> _RecArray[Any]: ... +@overload +def array( + obj: ArrayLike, + dtype: None = ..., + shape: None | _ShapeLike = ..., + offset: int = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., + copy: bool = ..., +) -> _RecArray[record]: ... diff --git a/numpy/core/setup.py b/numpy/core/setup.py index ba7d83787..3e1ed4c9b 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -5,6 +5,7 @@ import copy import warnings import platform import textwrap +import glob from os.path import join from numpy.distutils import log @@ -63,6 +64,20 @@ class CallOnceOnly: out = copy.deepcopy(pickle.loads(self._check_complex)) return out +def can_link_svml(): + """SVML library is supported only on x86_64 architecture and currently + only on linux + """ + machine = platform.machine() + system = platform.system() + return "x86_64" in machine and system == "Linux" + +def check_svml_submodule(svmlpath): + if not os.path.exists(svmlpath + "/README.md"): + raise RuntimeError("Missing `SVML` submodule! Run `git submodule " + "update --init` to fix this.") + return True + def pythonlib_dir(): """return path where libpython* is.""" if sys.platform == 'win32': @@ -455,6 +470,9 @@ def configuration(parent_package='',top_path=None): # Inline check inline = config_cmd.check_inline() + if can_link_svml(): + moredefs.append(('NPY_CAN_LINK_SVML', 1)) + # Use relaxed stride checking if NPY_RELAXED_STRIDES_CHECKING: moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) @@ -496,7 +514,7 @@ def configuration(parent_package='',top_path=None): # add the guard to make sure config.h is never included directly, # but always through npy_config.h target_f.write(textwrap.dedent(""" - #ifndef _NPY_NPY_CONFIG_H_ + #ifndef NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ #error config.h should never be included directly, include npy_config.h instead #endif """)) @@ -678,16 +696,24 @@ def configuration(parent_package='',top_path=None): join('src', 'npymath', 'halffloat.c') ] - # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977. - # Intel and Clang also don't seem happy with /GL - is_msvc = (platform.platform().startswith('Windows') and - platform.python_compiler().startswith('MS')) + def gl_if_msvc(build_cmd): + """ Add flag if we are using MSVC compiler + + We can't see this in our scope, because we have not initialized the + distutils build command, so use this deferred calculation to run when + we are building the library. + """ + if build_cmd.compiler.compiler_type == 'msvc': + # explicitly disable whole-program optimization + return ['/GL-'] + return [] + config.add_installed_library('npymath', sources=npymath_sources + [get_mathlib_info], install_dir='lib', build_info={ 'include_dirs' : [], # empty list required for creating npy_math_internal.h - 'extra_compiler_args' : (['/GL-'] if is_msvc else []), + 'extra_compiler_args': [gl_if_msvc], }) config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", subst_dict) @@ -727,6 +753,7 @@ def configuration(parent_package='',top_path=None): join('src', 'common', 'npy_import.h'), join('src', 'common', 'npy_hashtable.h'), join('src', 'common', 'npy_longdouble.h'), + join('src', 'common', 'npy_svml.h'), join('src', 'common', 'templ_common.h.src'), join('src', 'common', 'ucsnarrow.h'), join('src', 'common', 'ufunc_override.h'), @@ -791,6 +818,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'dragon4.h'), join('src', 'multiarray', 'einsum_debug.h'), join('src', 'multiarray', 'einsum_sumprod.h'), + join('src', 'multiarray', 'experimental_public_dtype_api.h'), join('src', 'multiarray', 'getset.h'), join('src', 'multiarray', 'hashdescr.h'), join('src', 'multiarray', 'iterators.h'), @@ -858,6 +886,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'dtype_transfer.c'), join('src', 'multiarray', 'einsum.c.src'), join('src', 'multiarray', 'einsum_sumprod.c.src'), + join('src', 'multiarray', 'experimental_public_dtype_api.c'), join('src', 'multiarray', 'flagsobject.c'), join('src', 'multiarray', 'getset.c'), join('src', 'multiarray', 'hashdescr.c'), @@ -888,7 +917,7 @@ def configuration(parent_package='',top_path=None): join('src', 'npysort', 'mergesort.c.src'), join('src', 'npysort', 'timsort.c.src'), join('src', 'npysort', 'heapsort.c.src'), - join('src', 'npysort', 'radixsort.c.src'), + join('src', 'npysort', 'radixsort.cpp'), join('src', 'common', 'npy_partition.h.src'), join('src', 'npysort', 'selection.c.src'), join('src', 'common', 'npy_binsearch.h.src'), @@ -923,11 +952,12 @@ def configuration(parent_package='',top_path=None): join('src', 'umath', 'loops_arithm_fp.dispatch.c.src'), join('src', 'umath', 'loops_arithmetic.dispatch.c.src'), join('src', 'umath', 'loops_trigonometric.dispatch.c.src'), + join('src', 'umath', 'loops_umath_fp.dispatch.c.src'), join('src', 'umath', 'loops_exponent_log.dispatch.c.src'), join('src', 'umath', 'matmul.h.src'), join('src', 'umath', 'matmul.c.src'), - join('src', 'umath', 'clip.h.src'), - join('src', 'umath', 'clip.c.src'), + join('src', 'umath', 'clip.h'), + join('src', 'umath', 'clip.cpp'), join('src', 'umath', 'dispatching.c'), join('src', 'umath', 'legacy_array_method.c'), join('src', 'umath', 'ufunc_object.c'), @@ -951,7 +981,15 @@ def configuration(parent_package='',top_path=None): join(codegen_dir, 'generate_ufunc_api.py'), ] + svml_path = join('numpy', 'core', 'src', 'umath', 'svml') + svml_objs = [] + if can_link_svml() and check_svml_submodule(svml_path): + svml_objs = glob.glob(svml_path + '/**/*.s', recursive=True) + config.add_extension('_multiarray_umath', + # Forcing C language even though we have C++ sources. + # It forces the C linker and don't link C++ runtime. + language = 'c', sources=multiarray_src + umath_src + common_src + [generate_config_h, @@ -965,7 +1003,12 @@ def configuration(parent_package='',top_path=None): depends=deps + multiarray_deps + umath_deps + common_deps, libraries=['npymath'], - extra_info=extra_info) + extra_objects=svml_objs, + extra_info=extra_info, + extra_cxx_compile_args=['-std=c++11', + '-D__STDC_VERSION__=0', + '-fno-exceptions', + '-fno-rtti']) ####################################################################### # umath_tests module # diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index 85c8f16d1..70e8fc897 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -43,8 +43,8 @@ C_ABI_VERSION = 0x01000009 # 0x0000000d - 1.19.x # 0x0000000e - 1.20.x # 0x0000000e - 1.21.x -# 0x0000000e - 1.22.x -C_API_VERSION = 0x0000000e +# 0x0000000f - 1.22.x +C_API_VERSION = 0x0000000f class MismatchCAPIWarning(Warning): pass diff --git a/numpy/core/shape_base.pyi b/numpy/core/shape_base.pyi index d7914697d..159ad2781 100644 --- a/numpy/core/shape_base.pyi +++ b/numpy/core/shape_base.pyi @@ -1,12 +1,12 @@ from typing import TypeVar, overload, List, Sequence, Any, SupportsIndex from numpy import generic, dtype -from numpy.typing import ArrayLike, NDArray, _NestedSequence, _SupportsArray +from numpy.typing import ArrayLike, NDArray, _FiniteNestedSequence, _SupportsArray _SCT = TypeVar("_SCT", bound=generic) _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]] +_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] __all__: List[str] diff --git a/numpy/core/src/_simd/_simd_inc.h.src b/numpy/core/src/_simd/_simd_inc.h.src index 9858fc0dc..fbdf982c2 100644 --- a/numpy/core/src/_simd/_simd_inc.h.src +++ b/numpy/core/src/_simd/_simd_inc.h.src @@ -113,7 +113,7 @@ typedef struct int is_scalar:1; // returns '1' if the type represent a vector int is_vector:1; - // returns the len of multi-vector if the type reprsent x2 or x3 vector + // returns the len of multi-vector if the type represent x2 or x3 vector // otherwise returns 0, e.g. returns 2 if data type is simd_data_vu8x2 int is_vectorx; // returns the equivalent scalar data type e.g. simd_data_vu8 -> simd_data_u8 diff --git a/numpy/core/src/common/.doxyfile b/numpy/core/src/common/.doxyfile new file mode 100644 index 000000000..462cbbcfa --- /dev/null +++ b/numpy/core/src/common/.doxyfile @@ -0,0 +1 @@ +INCLUDE_PATH += @CUR_DIR diff --git a/numpy/core/src/common/array_assign.c b/numpy/core/src/common/array_assign.c index c55f6bdb4..b7495fc09 100644 --- a/numpy/core/src/common/array_assign.c +++ b/numpy/core/src/common/array_assign.c @@ -7,12 +7,12 @@ * * See LICENSE.txt for the license. */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include <numpy/ndarraytypes.h> #include "npy_config.h" #include "npy_pycompat.h" diff --git a/numpy/core/src/common/array_assign.h b/numpy/core/src/common/array_assign.h index f5d884dd9..8a28ed1d3 100644 --- a/numpy/core/src/common/array_assign.h +++ b/numpy/core/src/common/array_assign.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PRIVATE__ARRAY_ASSIGN_H_ -#define _NPY_PRIVATE__ARRAY_ASSIGN_H_ +#ifndef NUMPY_CORE_SRC_COMMON_ARRAY_ASSIGN_H_ +#define NUMPY_CORE_SRC_COMMON_ARRAY_ASSIGN_H_ /* * An array assignment function for copying arrays, treating the @@ -115,4 +115,4 @@ NPY_NO_EXPORT int arrays_overlap(PyArrayObject *arr1, PyArrayObject *arr2); -#endif +#endif /* NUMPY_CORE_SRC_COMMON_ARRAY_ASSIGN_H_ */ diff --git a/numpy/core/src/common/binop_override.h b/numpy/core/src/common/binop_override.h index c5e7ab808..61bc05ef3 100644 --- a/numpy/core/src/common/binop_override.h +++ b/numpy/core/src/common/binop_override.h @@ -1,5 +1,5 @@ -#ifndef __BINOP_OVERRIDE_H -#define __BINOP_OVERRIDE_H +#ifndef NUMPY_CORE_SRC_COMMON_BINOP_OVERRIDE_H_ +#define NUMPY_CORE_SRC_COMMON_BINOP_OVERRIDE_H_ #include <string.h> #include <Python.h> @@ -212,4 +212,4 @@ binop_should_defer(PyObject *self, PyObject *other, int inplace) } \ } while (0) -#endif +#endif /* NUMPY_CORE_SRC_COMMON_BINOP_OVERRIDE_H_ */ diff --git a/numpy/core/src/common/cblasfuncs.c b/numpy/core/src/common/cblasfuncs.c index e78587de0..714636782 100644 --- a/numpy/core/src/common/cblasfuncs.c +++ b/numpy/core/src/common/cblasfuncs.c @@ -2,17 +2,19 @@ * This module provides a BLAS optimized matrix multiply, * inner product and dot for numpy arrays */ - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#define PY_SSIZE_T_CLEAN #include <Python.h> -#include <assert.h> -#include <numpy/arrayobject.h> + +#include "numpy/arrayobject.h" #include "npy_cblas.h" #include "arraytypes.h" #include "common.h" +#include <assert.h> + static const double oneD[2] = {1.0, 0.0}, zeroD[2] = {0.0, 0.0}; static const float oneF[2] = {1.0, 0.0}, zeroF[2] = {0.0, 0.0}; diff --git a/numpy/core/src/common/cblasfuncs.h b/numpy/core/src/common/cblasfuncs.h index 66ce4ca5b..71c533f36 100644 --- a/numpy/core/src/common/cblasfuncs.h +++ b/numpy/core/src/common/cblasfuncs.h @@ -1,7 +1,7 @@ -#ifndef _NPY_CBLASFUNCS_H_ -#define _NPY_CBLASFUNCS_H_ +#ifndef NUMPY_CORE_SRC_COMMON_CBLASFUNCS_H_ +#define NUMPY_CORE_SRC_COMMON_CBLASFUNCS_H_ NPY_NO_EXPORT PyObject * cblas_matrixproduct(int, PyArrayObject *, PyArrayObject *, PyArrayObject *); -#endif +#endif /* NUMPY_CORE_SRC_COMMON_CBLASFUNCS_H_ */ diff --git a/numpy/core/src/common/get_attr_string.h b/numpy/core/src/common/get_attr_string.h index 8b7cf1c5b..3b23b2e66 100644 --- a/numpy/core/src/common/get_attr_string.h +++ b/numpy/core/src/common/get_attr_string.h @@ -1,5 +1,5 @@ -#ifndef __GET_ATTR_STRING_H -#define __GET_ATTR_STRING_H +#ifndef NUMPY_CORE_SRC_COMMON_GET_ATTR_STRING_H_ +#define NUMPY_CORE_SRC_COMMON_GET_ATTR_STRING_H_ static NPY_INLINE npy_bool _is_basic_python_type(PyTypeObject *tp) @@ -113,4 +113,4 @@ PyArray_LookupSpecial_OnInstance(PyObject *obj, char const *name) return maybe_get_attr(obj, name); } -#endif +#endif /* NUMPY_CORE_SRC_COMMON_GET_ATTR_STRING_H_ */ diff --git a/numpy/core/src/common/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h index 3df054b40..ad86c0489 100644 --- a/numpy/core/src/common/lowlevel_strided_loops.h +++ b/numpy/core/src/common/lowlevel_strided_loops.h @@ -1,8 +1,8 @@ -#ifndef __LOWLEVEL_STRIDED_LOOPS_H -#define __LOWLEVEL_STRIDED_LOOPS_H +#ifndef NUMPY_CORE_SRC_COMMON_LOWLEVEL_STRIDED_LOOPS_H_ +#define NUMPY_CORE_SRC_COMMON_LOWLEVEL_STRIDED_LOOPS_H_ #include "common.h" -#include <npy_config.h> -#include <array_method.h> +#include "npy_config.h" +#include "array_method.h" #include "dtype_transfer.h" #include "mem_overlap.h" @@ -770,4 +770,4 @@ PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(PyArrayObject *arr1, PyArrayObject *arr stride2 = PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(size2, arr2); \ } -#endif +#endif /* NUMPY_CORE_SRC_COMMON_LOWLEVEL_STRIDED_LOOPS_H_ */ diff --git a/numpy/core/src/common/mem_overlap.c b/numpy/core/src/common/mem_overlap.c index 9da33bfc1..2632e1413 100644 --- a/numpy/core/src/common/mem_overlap.c +++ b/numpy/core/src/common/mem_overlap.c @@ -181,9 +181,11 @@ All rights reserved. Licensed under 3-clause BSD license, see LICENSE.txt. */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION + +#define PY_SSIZE_T_CLEAN #include <Python.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "numpy/ndarraytypes.h" #include "mem_overlap.h" #include "npy_extint128.h" diff --git a/numpy/core/src/common/mem_overlap.h b/numpy/core/src/common/mem_overlap.h index 8044f1663..3aa4f798b 100644 --- a/numpy/core/src/common/mem_overlap.h +++ b/numpy/core/src/common/mem_overlap.h @@ -1,5 +1,5 @@ -#ifndef MEM_OVERLAP_H_ -#define MEM_OVERLAP_H_ +#ifndef NUMPY_CORE_SRC_COMMON_MEM_OVERLAP_H_ +#define NUMPY_CORE_SRC_COMMON_MEM_OVERLAP_H_ #include "npy_config.h" #include "numpy/ndarraytypes.h" @@ -46,5 +46,4 @@ offset_bounds_from_strides(const int itemsize, const int nd, const npy_intp *dims, const npy_intp *strides, npy_intp *lower_offset, npy_intp *upper_offset); -#endif - +#endif /* NUMPY_CORE_SRC_COMMON_MEM_OVERLAP_H_ */ diff --git a/numpy/core/src/common/npy_argparse.c b/numpy/core/src/common/npy_argparse.c index 8460a38e6..76123c1ed 100644 --- a/numpy/core/src/common/npy_argparse.c +++ b/numpy/core/src/common/npy_argparse.c @@ -1,8 +1,9 @@ -#include "Python.h" - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#define PY_SSIZE_T_CLEAN +#include <Python.h> + #include "numpy/ndarraytypes.h" #include "npy_argparse.h" #include "npy_pycompat.h" diff --git a/numpy/core/src/common/npy_argparse.h b/numpy/core/src/common/npy_argparse.h index 5da535c91..f4122103d 100644 --- a/numpy/core/src/common/npy_argparse.h +++ b/numpy/core/src/common/npy_argparse.h @@ -1,7 +1,7 @@ -#ifndef _NPY_ARGPARSE_H -#define _NPY_ARGPARSE_H +#ifndef NUMPY_CORE_SRC_COMMON_NPY_ARGPARSE_H +#define NUMPY_CORE_SRC_COMMON_NPY_ARGPARSE_H -#include "Python.h" +#include <Python.h> #include "numpy/ndarraytypes.h" /* @@ -93,4 +93,4 @@ _npy_parse_arguments(const char *funcname, _npy_parse_arguments(funcname, &__argparse_cache, \ args, len_args, kwnames, __VA_ARGS__) -#endif /* _NPY_ARGPARSE_H */ +#endif /* NUMPY_CORE_SRC_COMMON_NPY_ARGPARSE_H */ diff --git a/numpy/core/src/common/npy_cblas.h b/numpy/core/src/common/npy_cblas.h index 072993ec2..30fec1a65 100644 --- a/numpy/core/src/common/npy_cblas.h +++ b/numpy/core/src/common/npy_cblas.h @@ -3,8 +3,8 @@ * because not all providers of cblas provide cblas.h. For instance, MKL provides * mkl_cblas.h and also typedefs the CBLAS_XXX enums. */ -#ifndef _NPY_CBLAS_H_ -#define _NPY_CBLAS_H_ +#ifndef NUMPY_CORE_SRC_COMMON_NPY_CBLAS_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_CBLAS_H_ #include <stddef.h> @@ -98,4 +98,4 @@ blas_stride(npy_intp stride, unsigned itemsize) } #endif -#endif +#endif /* NUMPY_CORE_SRC_COMMON_NPY_CBLAS_H_ */ diff --git a/numpy/core/src/common/npy_cblas_base.h b/numpy/core/src/common/npy_cblas_base.h index 792b6f09e..12dfb2e78 100644 --- a/numpy/core/src/common/npy_cblas_base.h +++ b/numpy/core/src/common/npy_cblas_base.h @@ -9,6 +9,9 @@ * Prototypes for level 1 BLAS functions (complex are recast as routines) * =========================================================================== */ +#ifndef NUMPY_CORE_SRC_COMMON_NPY_CBLAS_BASE_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_CBLAS_BASE_H_ + float BLASNAME(cblas_sdsdot)(const BLASINT N, const float alpha, const float *X, const BLASINT incX, const float *Y, const BLASINT incY); double BLASNAME(cblas_dsdot)(const BLASINT N, const float *X, const BLASINT incX, const float *Y, @@ -555,3 +558,5 @@ void BLASNAME(cblas_zher2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO void *C, const BLASINT ldc); void BLASNAME(cblas_xerbla)(BLASINT p, const char *rout, const char *form, ...); + +#endif /* NUMPY_CORE_SRC_COMMON_NPY_CBLAS_BASE_H_ */ diff --git a/numpy/core/src/common/npy_config.h b/numpy/core/src/common/npy_config.h index c6de0cd30..fd0f1855c 100644 --- a/numpy/core/src/common/npy_config.h +++ b/numpy/core/src/common/npy_config.h @@ -1,5 +1,5 @@ -#ifndef _NPY_NPY_CONFIG_H_ -#define _NPY_NPY_CONFIG_H_ +#ifndef NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ #include "config.h" #include "npy_cpu_features.h" @@ -167,9 +167,9 @@ #undef HAVE_CACOSHF #undef HAVE_CACOSHL -#endif /* __GLIBC_PREREQ(2, 18) */ -#endif /* defined(__GLIBC_PREREQ) */ +#endif /* __GLIBC_PREREQ(2, 18) */ +#endif /* defined(__GLIBC_PREREQ) */ -#endif /* defined(HAVE_FEATURES_H) */ +#endif /* defined(HAVE_FEATURES_H) */ -#endif +#endif /* NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ */ diff --git a/numpy/core/src/common/npy_cpu_dispatch.h b/numpy/core/src/common/npy_cpu_dispatch.h index 09e00badf..e814cd425 100644 --- a/numpy/core/src/common/npy_cpu_dispatch.h +++ b/numpy/core/src/common/npy_cpu_dispatch.h @@ -1,5 +1,5 @@ -#ifndef NPY_CPU_DISPATCH_H_ -#define NPY_CPU_DISPATCH_H_ +#ifndef NUMPY_CORE_SRC_COMMON_NPY_CPU_DISPATCH_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_CPU_DISPATCH_H_ /** * This file is part of the NumPy CPU dispatcher. Please have a look at doc/reference/simd-optimizations.html * To get a better understanding of the mechanism behind it. @@ -196,7 +196,7 @@ * Example: * Assume we have a dispatch-able source exporting the following function: * - * @targets baseline avx2 avx512_skx // configration statements + * @targets baseline avx2 avx512_skx // configuration statements * * void NPY_CPU_DISPATCH_CURFX(dispatch_me)(const int *src, int *dst) * { @@ -262,4 +262,4 @@ #define NPY_CPU_DISPATCH_CALL_ALL_BASE_CB_(LEFT, ...) \ ( LEFT __VA_ARGS__ ) -#endif // NPY_CPU_DISPATCH_H_ +#endif // NUMPY_CORE_SRC_COMMON_NPY_CPU_DISPATCH_H_ diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src index 1e0f4a571..a2383c45f 100644 --- a/numpy/core/src/common/npy_cpu_features.c.src +++ b/numpy/core/src/common/npy_cpu_features.c.src @@ -230,7 +230,7 @@ npy__cpu_try_disable_env(void) notsupp_cur[flen] = ' '; notsupp_cur += flen + 1; goto next; } - // Finaly we can disable it + // Finally we can disable it npy__cpu_have[feature_id] = 0; next: feature = strtok(NULL, delim); diff --git a/numpy/core/src/common/npy_cpu_features.h b/numpy/core/src/common/npy_cpu_features.h index 28dd00032..ce1fc822a 100644 --- a/numpy/core/src/common/npy_cpu_features.h +++ b/numpy/core/src/common/npy_cpu_features.h @@ -1,5 +1,5 @@ -#ifndef _NPY_CPU_FEATURES_H_ -#define _NPY_CPU_FEATURES_H_ +#ifndef NUMPY_CORE_SRC_COMMON_NPY_CPU_FEATURES_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_CPU_FEATURES_H_ #include <Python.h> // for PyObject #include "numpy/numpyconfig.h" // for NPY_VISIBILITY_HIDDEN @@ -168,4 +168,4 @@ npy_cpu_dispatch_list(void); } #endif -#endif // _NPY_CPU_FEATURES_H_ +#endif // NUMPY_CORE_SRC_COMMON_NPY_CPU_FEATURES_H_ diff --git a/numpy/core/src/common/npy_cpuinfo_parser.h b/numpy/core/src/common/npy_cpuinfo_parser.h index f4540f6ab..364873a23 100644 --- a/numpy/core/src/common/npy_cpuinfo_parser.h +++ b/numpy/core/src/common/npy_cpuinfo_parser.h @@ -25,8 +25,8 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#ifndef __NPY_CPUINFO_PARSER_H__ -#define __NPY_CPUINFO_PARSER_H__ +#ifndef NUMPY_CORE_SRC_COMMON_NPY_CPUINFO_PARSER_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_CPUINFO_PARSER_H_ #include <errno.h> #include <stdio.h> #include <fcntl.h> @@ -123,7 +123,7 @@ read_file(const char* pathname, char* buffer, size_t buffsize) } /* - * Extract the content of a the first occurence of a given field in + * Extract the content of a the first occurrence of a given field in * the content of /proc/cpuinfo and return it as a heap-allocated * string that must be freed by the caller. * @@ -138,7 +138,7 @@ extract_cpuinfo_field(const char* buffer, int buflen, const char* field) int len; const char *p, *q; - /* Look for first field occurence, and ensures it starts the line. */ + /* Look for first field occurrence, and ensures it starts the line. */ p = buffer; for (;;) { p = memmem(p, bufend-p, field, fieldlen); @@ -259,4 +259,4 @@ get_feature_from_proc_cpuinfo(unsigned long *hwcap, unsigned long *hwcap2) { *hwcap2 |= has_list_item(cpuFeatures, "crc32") ? NPY__HWCAP2_CRC32 : 0; return 1; } -#endif +#endif /* NUMPY_CORE_SRC_COMMON_NPY_CPUINFO_PARSER_H_ */ diff --git a/numpy/core/src/common/npy_ctypes.h b/numpy/core/src/common/npy_ctypes.h index c0cc4f1a1..05761cad3 100644 --- a/numpy/core/src/common/npy_ctypes.h +++ b/numpy/core/src/common/npy_ctypes.h @@ -1,5 +1,5 @@ -#ifndef NPY_CTYPES_H -#define NPY_CTYPES_H +#ifndef NUMPY_CORE_SRC_COMMON_NPY_CTYPES_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_CTYPES_H_ #include <Python.h> @@ -47,4 +47,4 @@ fail: return 0; } -#endif +#endif /* NUMPY_CORE_SRC_COMMON_NPY_CTYPES_H_ */ diff --git a/numpy/core/src/common/npy_extint128.h b/numpy/core/src/common/npy_extint128.h index a887ff317..d563c2ac8 100644 --- a/numpy/core/src/common/npy_extint128.h +++ b/numpy/core/src/common/npy_extint128.h @@ -1,5 +1,5 @@ -#ifndef NPY_EXTINT128_H_ -#define NPY_EXTINT128_H_ +#ifndef NUMPY_CORE_SRC_COMMON_NPY_EXTINT128_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_EXTINT128_H_ typedef struct { @@ -314,4 +314,4 @@ ceildiv_128_64(npy_extint128_t a, npy_int64 b) return result; } -#endif +#endif /* NUMPY_CORE_SRC_COMMON_NPY_EXTINT128_H_ */ diff --git a/numpy/core/src/common/npy_fpmath.h b/numpy/core/src/common/npy_fpmath.h index dbb3fb23d..27e9ea3f4 100644 --- a/numpy/core/src/common/npy_fpmath.h +++ b/numpy/core/src/common/npy_fpmath.h @@ -1,5 +1,5 @@ -#ifndef _NPY_NPY_FPMATH_H_ -#define _NPY_NPY_FPMATH_H_ +#ifndef NUMPY_CORE_SRC_COMMON_NPY_NPY_FPMATH_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_NPY_FPMATH_H_ #include "npy_config.h" @@ -27,4 +27,4 @@ #define HAVE_LDOUBLE_DOUBLE_DOUBLE_BE #endif -#endif +#endif /* NUMPY_CORE_SRC_COMMON_NPY_NPY_FPMATH_H_ */ diff --git a/numpy/core/src/common/npy_hashtable.h b/numpy/core/src/common/npy_hashtable.h index 5f11d2c1d..a0bf81967 100644 --- a/numpy/core/src/common/npy_hashtable.h +++ b/numpy/core/src/common/npy_hashtable.h @@ -1,5 +1,5 @@ -#ifndef _NPY_NPY_HASHTABLE_H -#define _NPY_NPY_HASHTABLE_H +#ifndef NUMPY_CORE_SRC_COMMON_NPY_NPY_HASHTABLE_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_NPY_HASHTABLE_H_ #include <Python.h> @@ -29,4 +29,4 @@ PyArrayIdentityHash_New(int key_len); NPY_NO_EXPORT void PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb); -#endif /* _NPY_NPY_HASHTABLE_H */ +#endif /* NUMPY_CORE_SRC_COMMON_NPY_NPY_HASHTABLE_H_ */ diff --git a/numpy/core/src/common/npy_import.h b/numpy/core/src/common/npy_import.h index f485514d1..f36b6924a 100644 --- a/numpy/core/src/common/npy_import.h +++ b/numpy/core/src/common/npy_import.h @@ -1,5 +1,5 @@ -#ifndef NPY_IMPORT_H -#define NPY_IMPORT_H +#ifndef NUMPY_CORE_SRC_COMMON_NPY_IMPORT_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_IMPORT_H_ #include <Python.h> @@ -29,4 +29,4 @@ npy_cache_import(const char *module, const char *attr, PyObject **cache) } } -#endif +#endif /* NUMPY_CORE_SRC_COMMON_NPY_IMPORT_H_ */ diff --git a/numpy/core/src/common/npy_longdouble.c b/numpy/core/src/common/npy_longdouble.c index 260e02a64..38dfd325c 100644 --- a/numpy/core/src/common/npy_longdouble.c +++ b/numpy/core/src/common/npy_longdouble.c @@ -1,8 +1,9 @@ -#include <Python.h> - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#define PY_SSIZE_T_CLEAN +#include <Python.h> + #include "numpy/ndarraytypes.h" #include "numpy/npy_math.h" #include "npy_pycompat.h" diff --git a/numpy/core/src/common/npy_longdouble.h b/numpy/core/src/common/npy_longdouble.h index 01db06de7..cf8b37bc9 100644 --- a/numpy/core/src/common/npy_longdouble.h +++ b/numpy/core/src/common/npy_longdouble.h @@ -1,5 +1,5 @@ -#ifndef __NPY_LONGDOUBLE_H -#define __NPY_LONGDOUBLE_H +#ifndef NUMPY_CORE_SRC_COMMON_NPY_LONGDOUBLE_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_LONGDOUBLE_H_ #include "npy_config.h" #include "numpy/ndarraytypes.h" @@ -24,4 +24,4 @@ npy_longdouble_to_PyLong(npy_longdouble ldval); NPY_VISIBILITY_HIDDEN npy_longdouble npy_longdouble_from_PyLong(PyObject *long_obj); -#endif +#endif /* NUMPY_CORE_SRC_COMMON_NPY_LONGDOUBLE_H_ */ diff --git a/numpy/core/src/common/npy_pycompat.h b/numpy/core/src/common/npy_pycompat.h index 9e94a9710..6641cd591 100644 --- a/numpy/core/src/common/npy_pycompat.h +++ b/numpy/core/src/common/npy_pycompat.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PYCOMPAT_H_ -#define _NPY_PYCOMPAT_H_ +#ifndef NUMPY_CORE_SRC_COMMON_NPY_PYCOMPAT_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_PYCOMPAT_H_ #include "numpy/npy_3kcompat.h" @@ -19,4 +19,4 @@ Npy_HashDouble(PyObject *NPY_UNUSED(identity), double val) #endif -#endif /* _NPY_COMPAT_H_ */ +#endif /* NUMPY_CORE_SRC_COMMON_NPY_PYCOMPAT_H_ */ diff --git a/numpy/core/src/common/npy_sort.h.src b/numpy/core/src/common/npy_sort.h.src index ddbde0c9b..b4a1e9b0c 100644 --- a/numpy/core/src/common/npy_sort.h.src +++ b/numpy/core/src/common/npy_sort.h.src @@ -49,9 +49,14 @@ NPY_NO_EXPORT int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void * * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong, * longlong, ulonglong# */ - +#ifdef __cplusplus +extern "C" { +#endif NPY_NO_EXPORT int radixsort_@suff@(void *vec, npy_intp cnt, void *null); NPY_NO_EXPORT int aradixsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null); +#ifdef __cplusplus +} +#endif /**end repeat**/ diff --git a/numpy/core/src/common/npy_svml.h b/numpy/core/src/common/npy_svml.h new file mode 100644 index 000000000..4292f7090 --- /dev/null +++ b/numpy/core/src/common/npy_svml.h @@ -0,0 +1,41 @@ +#if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) +extern __m512 __svml_exp2f16(__m512 x); +extern __m512 __svml_log2f16(__m512 x); +extern __m512 __svml_log10f16(__m512 x); +extern __m512 __svml_expm1f16(__m512 x); +extern __m512 __svml_log1pf16(__m512 x); +extern __m512 __svml_cbrtf16(__m512 x); +extern __m512 __svml_sinf16(__m512 x); +extern __m512 __svml_cosf16(__m512 x); +extern __m512 __svml_tanf16(__m512 x); +extern __m512 __svml_asinf16(__m512 x); +extern __m512 __svml_acosf16(__m512 x); +extern __m512 __svml_atanf16(__m512 x); +extern __m512 __svml_atan2f16(__m512 x); +extern __m512 __svml_sinhf16(__m512 x); +extern __m512 __svml_coshf16(__m512 x); +extern __m512 __svml_tanhf16(__m512 x); +extern __m512 __svml_asinhf16(__m512 x); +extern __m512 __svml_acoshf16(__m512 x); +extern __m512 __svml_atanhf16(__m512 x); + +extern __m512d __svml_exp28(__m512d x); +extern __m512d __svml_log28(__m512d x); +extern __m512d __svml_log108(__m512d x); +extern __m512d __svml_expm18(__m512d x); +extern __m512d __svml_log1p8(__m512d x); +extern __m512d __svml_cbrt8(__m512d x); +extern __m512d __svml_sin8(__m512d x); +extern __m512d __svml_cos8(__m512d x); +extern __m512d __svml_tan8(__m512d x); +extern __m512d __svml_asin8(__m512d x); +extern __m512d __svml_acos8(__m512d x); +extern __m512d __svml_atan8(__m512d x); +extern __m512d __svml_atan28(__m512d x); +extern __m512d __svml_sinh8(__m512d x); +extern __m512d __svml_cosh8(__m512d x); +extern __m512d __svml_tanh8(__m512d x); +extern __m512d __svml_asinh8(__m512d x); +extern __m512d __svml_acosh8(__m512d x); +extern __m512d __svml_atanh8(__m512d x); +#endif diff --git a/numpy/core/src/common/numpy_tag.h b/numpy/core/src/common/numpy_tag.h new file mode 100644 index 000000000..dc8d5286b --- /dev/null +++ b/numpy/core/src/common/numpy_tag.h @@ -0,0 +1,78 @@ +#ifndef _NPY_COMMON_TAG_H_ +#define _NPY_COMMON_TAG_H_ + +namespace npy { + +struct integral_tag { +}; +struct floating_point_tag { +}; +struct complex_tag { +}; +struct date_tag { +}; + +struct bool_tag : integral_tag { + using type = npy_bool; +}; +struct byte_tag : integral_tag { + using type = npy_byte; +}; +struct ubyte_tag : integral_tag { + using type = npy_ubyte; +}; +struct short_tag : integral_tag { + using type = npy_short; +}; +struct ushort_tag : integral_tag { + using type = npy_ushort; +}; +struct int_tag : integral_tag { + using type = npy_int; +}; +struct uint_tag : integral_tag { + using type = npy_uint; +}; +struct long_tag : integral_tag { + using type = npy_long; +}; +struct ulong_tag : integral_tag { + using type = npy_ulong; +}; +struct longlong_tag : integral_tag { + using type = npy_longlong; +}; +struct ulonglong_tag : integral_tag { + using type = npy_ulonglong; +}; +struct half_tag { + using type = npy_half; +}; +struct float_tag : floating_point_tag { + using type = npy_float; +}; +struct double_tag : floating_point_tag { + using type = npy_double; +}; +struct longdouble_tag : floating_point_tag { + using type = npy_longdouble; +}; +struct cfloat_tag : complex_tag { + using type = npy_cfloat; +}; +struct cdouble_tag : complex_tag { + using type = npy_cdouble; +}; +struct clongdouble_tag : complex_tag { + using type = npy_clongdouble; +}; +struct datetime_tag : date_tag { + using type = npy_datetime; +}; +struct timedelta_tag : date_tag { + using type = npy_timedelta; +}; + +} // namespace npy + +#endif diff --git a/numpy/core/src/common/numpyos.c b/numpy/core/src/common/numpyos.c index 42a71777b..4551a06a2 100644 --- a/numpy/core/src/common/numpyos.c +++ b/numpy/core/src/common/numpyos.c @@ -1,11 +1,9 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include <locale.h> -#include <stdio.h> - -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/npy_math.h" @@ -13,14 +11,13 @@ #include "npy_pycompat.h" +#include <locale.h> +#include <stdio.h> + #ifdef HAVE_STRTOLD_L #include <stdlib.h> #ifdef HAVE_XLOCALE_H - /* - * the defines from xlocale.h are included in locale.h on some systems; - * see gh-8367 - */ - #include <xlocale.h> +#include <xlocale.h> // xlocale was removed in glibc 2.26, see gh-8367 #endif #endif diff --git a/numpy/core/src/common/numpyos.h b/numpy/core/src/common/numpyos.h index 4deed8400..ce49cbea7 100644 --- a/numpy/core/src/common/numpyos.h +++ b/numpy/core/src/common/numpyos.h @@ -1,5 +1,5 @@ -#ifndef _NPY_NUMPYOS_H_ -#define _NPY_NUMPYOS_H_ +#ifndef NUMPY_CORE_SRC_COMMON_NPY_NUMPYOS_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_NUMPYOS_H_ NPY_NO_EXPORT char* NumPyOS_ascii_formatd(char *buffer, size_t buf_size, @@ -38,4 +38,5 @@ NumPyOS_strtoll(const char *str, char **endptr, int base); /* Convert a string to an int in an arbitrary base */ NPY_NO_EXPORT npy_ulonglong NumPyOS_strtoull(const char *str, char **endptr, int base); -#endif + +#endif /* NUMPY_CORE_SRC_COMMON_NPY_NUMPYOS_H_ */ diff --git a/numpy/core/src/common/python_xerbla.c b/numpy/core/src/common/python_xerbla.c index fe2f718b2..37a41408b 100644 --- a/numpy/core/src/common/python_xerbla.c +++ b/numpy/core/src/common/python_xerbla.c @@ -1,4 +1,6 @@ -#include "Python.h" +#define PY_SSIZE_T_CLEAN +#include <Python.h> + #include "numpy/npy_common.h" #include "npy_cblas.h" diff --git a/numpy/core/src/common/simd/avx2/memory.h b/numpy/core/src/common/simd/avx2/memory.h index e27bf15fe..5891a270a 100644 --- a/numpy/core/src/common/simd/avx2/memory.h +++ b/numpy/core/src/common/simd/avx2/memory.h @@ -87,7 +87,7 @@ NPY_FINLINE npyv_f32 npyv_loadn_f32(const float *ptr, npy_intp stride) #if 0 // slower NPY_FINLINE npyv_u64 npyv_loadn_u64(const npy_uint64 *ptr, npy_intp stride) { - const __m256i idx = _mm256_setr_epi64x(0, 1*stride, 2*stride, 3*stride); + const __m256i idx = npyv_set_s64(0, 1*stride, 2*stride, 3*stride); return _mm256_i64gather_epi64((const void*)ptr, idx, 8); } NPY_FINLINE npyv_s64 npyv_loadn_s64(const npy_int64 *ptr, npy_intp stride) @@ -170,9 +170,9 @@ NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, npy_int64 fill) { assert(nlane > 0); - const __m256i vfill = _mm256_set1_epi64x(fill); - const __m256i steps = _mm256_setr_epi64x(0, 1, 2, 3); - __m256i vnlane = _mm256_set1_epi64x(nlane > 4 ? 4 : (int)nlane); + const __m256i vfill = npyv_setall_s64(fill); + const __m256i steps = npyv_set_s64(0, 1, 2, 3); + __m256i vnlane = npyv_setall_s64(nlane > 4 ? 4 : (int)nlane); __m256i mask = _mm256_cmpgt_epi64(vnlane, steps); __m256i payload = _mm256_maskload_epi64((const void*)ptr, mask); return _mm256_blendv_epi8(vfill, payload, mask); @@ -181,8 +181,8 @@ NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, n NPY_FINLINE npyv_s64 npyv_load_tillz_s64(const npy_int64 *ptr, npy_uintp nlane) { assert(nlane > 0); - const __m256i steps = _mm256_setr_epi64x(0, 1, 2, 3); - __m256i vnlane = _mm256_set1_epi64x(nlane > 4 ? 4 : (int)nlane); + const __m256i steps = npyv_set_s64(0, 1, 2, 3); + __m256i vnlane = npyv_setall_s64(nlane > 4 ? 4 : (int)nlane); __m256i mask = _mm256_cmpgt_epi64(vnlane, steps); return _mm256_maskload_epi64((const void*)ptr, mask); } @@ -211,10 +211,10 @@ NPY_FINLINE npyv_s64 npyv_loadn_till_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane, npy_int64 fill) { assert(nlane > 0); - const __m256i vfill = _mm256_set1_epi64x(fill); - const __m256i idx = _mm256_setr_epi64x(0, 1*stride, 2*stride, 3*stride); - const __m256i steps = _mm256_setr_epi64x(0, 1, 2, 3); - __m256i vnlane = _mm256_set1_epi64x(nlane > 4 ? 4 : (int)nlane); + const __m256i vfill = npyv_setall_s64(fill); + const __m256i idx = npyv_set_s64(0, 1*stride, 2*stride, 3*stride); + const __m256i steps = npyv_set_s64(0, 1, 2, 3); + __m256i vnlane = npyv_setall_s64(nlane > 4 ? 4 : (int)nlane); __m256i mask = _mm256_cmpgt_epi64(vnlane, steps); return _mm256_mask_i64gather_epi64(vfill, (const void*)ptr, idx, mask, 8); } @@ -238,8 +238,8 @@ NPY_FINLINE void npyv_store_till_s32(npy_int32 *ptr, npy_uintp nlane, npyv_s32 a NPY_FINLINE void npyv_store_till_s64(npy_int64 *ptr, npy_uintp nlane, npyv_s64 a) { assert(nlane > 0); - const __m256i steps = _mm256_setr_epi64x(0, 1, 2, 3); - __m256i vnlane = _mm256_set1_epi64x(nlane > 8 ? 8 : (int)nlane); + const __m256i steps = npyv_set_s64(0, 1, 2, 3); + __m256i vnlane = npyv_setall_s64(nlane > 8 ? 8 : (int)nlane); __m256i mask = _mm256_cmpgt_epi64(vnlane, steps); _mm256_maskstore_epi64((void*)ptr, mask, a); } diff --git a/numpy/core/src/common/simd/avx2/misc.h b/numpy/core/src/common/simd/avx2/misc.h index e96696dc9..5e91e91b3 100644 --- a/numpy/core/src/common/simd/avx2/misc.h +++ b/numpy/core/src/common/simd/avx2/misc.h @@ -24,11 +24,27 @@ #define npyv_setall_s16(VAL) _mm256_set1_epi16((short)VAL) #define npyv_setall_u32(VAL) _mm256_set1_epi32((int)VAL) #define npyv_setall_s32(VAL) _mm256_set1_epi32(VAL) -#define npyv_setall_u64(VAL) _mm256_set1_epi64x(VAL) -#define npyv_setall_s64(VAL) _mm256_set1_epi64x(VAL) #define npyv_setall_f32(VAL) _mm256_set1_ps(VAL) #define npyv_setall_f64(VAL) _mm256_set1_pd(VAL) +NPY_FINLINE __m256i npyv__setr_epi64(npy_int64, npy_int64, npy_int64, npy_int64); +NPY_FINLINE npyv_u64 npyv_setall_u64(npy_uint64 a) +{ + npy_int64 ai = (npy_int64)a; +#if defined(_MSC_VER) && defined(_M_IX86) + return npyv__setr_epi64(ai, ai, ai, ai); +#else + return _mm256_set1_epi64x(ai); +#endif +} +NPY_FINLINE npyv_s64 npyv_setall_s64(npy_int64 a) +{ +#if defined(_MSC_VER) && defined(_M_IX86) + return npyv__setr_epi64(a, a, a, a); +#else + return _mm256_set1_epi64x(a); +#endif +} /* * vector with specific values set to each lane and * set a specific value to all remained lanes @@ -59,7 +75,14 @@ NPY_FINLINE __m256i npyv__setr_epi32(int i0, int i1, int i2, int i3, int i4, int } NPY_FINLINE __m256i npyv__setr_epi64(npy_int64 i0, npy_int64 i1, npy_int64 i2, npy_int64 i3) { +#if defined(_MSC_VER) && defined(_M_IX86) + return _mm256_setr_epi32( + (int)i0, (int)(i0 >> 32), (int)i1, (int)(i1 >> 32), + (int)i2, (int)(i2 >> 32), (int)i3, (int)(i3 >> 32) + ); +#else return _mm256_setr_epi64x(i0, i1, i2, i3); +#endif } NPY_FINLINE __m256 npyv__setr_ps(float i0, float i1, float i2, float i3, float i4, float i5, diff --git a/numpy/core/src/common/simd/avx512/math.h b/numpy/core/src/common/simd/avx512/math.h index 0141396d0..0949b2b06 100644 --- a/numpy/core/src/common/simd/avx512/math.h +++ b/numpy/core/src/common/simd/avx512/math.h @@ -35,7 +35,7 @@ NPY_FINLINE npyv_f64 npyv_abs_f64(npyv_f64 a) return _mm512_range_pd(a, a, 8); #else return npyv_and_f64( - a, _mm512_castsi512_pd(_mm512_set1_epi64(0x7fffffffffffffffLL)) + a, _mm512_castsi512_pd(npyv_setall_s64(0x7fffffffffffffffLL)) ); #endif } diff --git a/numpy/core/src/common/simd/avx512/memory.h b/numpy/core/src/common/simd/avx512/memory.h index bffd6e907..47095bf72 100644 --- a/numpy/core/src/common/simd/avx512/memory.h +++ b/numpy/core/src/common/simd/avx512/memory.h @@ -110,7 +110,7 @@ NPY_FINLINE npyv_f32 npyv_loadn_f32(const float *ptr, npy_intp stride) //// 64 NPY_FINLINE npyv_u64 npyv_loadn_u64(const npy_uint64 *ptr, npy_intp stride) { - const __m512i idx = _mm512_setr_epi64( + const __m512i idx = npyv_set_s64( 0*stride, 1*stride, 2*stride, 3*stride, 4*stride, 5*stride, 6*stride, 7*stride ); @@ -140,7 +140,7 @@ NPY_FINLINE void npyv_storen_f32(float *ptr, npy_intp stride, npyv_f32 a) //// 64 NPY_FINLINE void npyv_storen_u64(npy_uint64 *ptr, npy_intp stride, npyv_u64 a) { - const __m512i idx = _mm512_setr_epi64( + const __m512i idx = npyv_set_s64( 0*stride, 1*stride, 2*stride, 3*stride, 4*stride, 5*stride, 6*stride, 7*stride ); @@ -173,7 +173,7 @@ NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, npy_int64 fill) { assert(nlane > 0); - const __m512i vfill = _mm512_set1_epi64(fill); + const __m512i vfill = npyv_setall_s64(fill); const __mmask8 mask = nlane > 31 ? -1 : (1 << nlane) - 1; return _mm512_mask_loadu_epi64(vfill, mask, (const __m512i*)ptr); } @@ -210,11 +210,11 @@ NPY_FINLINE npyv_s64 npyv_loadn_till_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane, npy_int64 fill) { assert(nlane > 0); - const __m512i idx = _mm512_setr_epi64( + const __m512i idx = npyv_set_s64( 0*stride, 1*stride, 2*stride, 3*stride, 4*stride, 5*stride, 6*stride, 7*stride ); - const __m512i vfill = _mm512_set1_epi64(fill); + const __m512i vfill = npyv_setall_s64(fill); const __mmask8 mask = nlane > 31 ? -1 : (1 << nlane) - 1; return _mm512_mask_i64gather_epi64(vfill, mask, idx, (const __m512i*)ptr, 8); } @@ -258,7 +258,7 @@ NPY_FINLINE void npyv_storen_till_s32(npy_int32 *ptr, npy_intp stride, npy_uintp NPY_FINLINE void npyv_storen_till_s64(npy_int64 *ptr, npy_intp stride, npy_uintp nlane, npyv_s64 a) { assert(nlane > 0); - const __m512i idx = _mm512_setr_epi64( + const __m512i idx = npyv_set_s64( 0*stride, 1*stride, 2*stride, 3*stride, 4*stride, 5*stride, 6*stride, 7*stride ); diff --git a/numpy/core/src/common/simd/avx512/misc.h b/numpy/core/src/common/simd/avx512/misc.h index 4b6729b05..c3039ecfe 100644 --- a/numpy/core/src/common/simd/avx512/misc.h +++ b/numpy/core/src/common/simd/avx512/misc.h @@ -24,11 +24,30 @@ #define npyv_setall_s16(VAL) _mm512_set1_epi16((short)VAL) #define npyv_setall_u32(VAL) _mm512_set1_epi32((int)VAL) #define npyv_setall_s32(VAL) _mm512_set1_epi32(VAL) -#define npyv_setall_u64(VAL) _mm512_set1_epi64(VAL) -#define npyv_setall_s64(VAL) _mm512_set1_epi64(VAL) #define npyv_setall_f32(VAL) _mm512_set1_ps(VAL) #define npyv_setall_f64(VAL) _mm512_set1_pd(VAL) +NPY_FINLINE __m512i npyv__setr_epi64( + npy_int64, npy_int64, npy_int64, npy_int64, + npy_int64, npy_int64, npy_int64, npy_int64 +); +NPY_FINLINE npyv_u64 npyv_setall_u64(npy_uint64 a) +{ + npy_int64 ai = (npy_int64)a; +#if defined(_MSC_VER) && defined(_M_IX86) + return npyv__setr_epi64(ai, ai, ai, ai, ai, ai, ai, ai); +#else + return _mm512_set1_epi64(ai); +#endif +} +NPY_FINLINE npyv_s64 npyv_setall_s64(npy_int64 a) +{ +#if defined(_MSC_VER) && defined(_M_IX86) + return npyv__setr_epi64(a, a, a, a, a, a, a, a); +#else + return _mm512_set1_epi64(a); +#endif +} /** * vector with specific values set to each lane and * set a specific value to all remained lanes @@ -76,7 +95,16 @@ NPY_FINLINE __m512i npyv__setr_epi32( NPY_FINLINE __m512i npyv__setr_epi64(npy_int64 i0, npy_int64 i1, npy_int64 i2, npy_int64 i3, npy_int64 i4, npy_int64 i5, npy_int64 i6, npy_int64 i7) { +#if defined(_MSC_VER) && defined(_M_IX86) + return _mm512_setr_epi32( + (int)i0, (int)(i0 >> 32), (int)i1, (int)(i1 >> 32), + (int)i2, (int)(i2 >> 32), (int)i3, (int)(i3 >> 32), + (int)i4, (int)(i4 >> 32), (int)i5, (int)(i5 >> 32), + (int)i6, (int)(i6 >> 32), (int)i7, (int)(i7 >> 32) + ); +#else return _mm512_setr_epi64(i0, i1, i2, i3, i4, i5, i6, i7); +#endif } NPY_FINLINE __m512 npyv__setr_ps( diff --git a/numpy/core/src/common/simd/emulate_maskop.h b/numpy/core/src/common/simd/emulate_maskop.h index 7e7446bc5..41e397c2d 100644 --- a/numpy/core/src/common/simd/emulate_maskop.h +++ b/numpy/core/src/common/simd/emulate_maskop.h @@ -1,5 +1,5 @@ /** - * This header is used internaly by all current supported SIMD extention, + * This header is used internally by all current supported SIMD extensions, * execpt for AVX512. */ #ifndef NPY_SIMD diff --git a/numpy/core/src/common/simd/intdiv.h b/numpy/core/src/common/simd/intdiv.h index f6ea9abf2..a7a461721 100644 --- a/numpy/core/src/common/simd/intdiv.h +++ b/numpy/core/src/common/simd/intdiv.h @@ -39,7 +39,7 @@ * for (; len >= vstep; src += vstep, dst += vstep, len -= vstep) { * npyv_s32 a = npyv_load_s32(*src); // load s32 vector from memory * a = npyv_divc_s32(a, divisor); // divide all elements by x - * npyv_store_s32(dst, a); // store s32 vector into memroy + * npyv_store_s32(dst, a); // store s32 vector into memory * } * ** NOTES: @@ -162,11 +162,12 @@ NPY_FINLINE npy_uint64 npyv__divh128_u64(npy_uint64 high, npy_uint64 divisor) npy_uint32 divisor_hi = divisor >> 32; npy_uint32 divisor_lo = divisor & 0xFFFFFFFF; // compute high quotient digit - npy_uint32 quotient_hi = (npy_uint32)(high / divisor_hi); + npy_uint64 quotient_hi = high / divisor_hi; npy_uint64 remainder = high - divisor_hi * quotient_hi; npy_uint64 base32 = 1ULL << 32; while (quotient_hi >= base32 || quotient_hi*divisor_lo > base32*remainder) { - remainder += --divisor_hi; + --quotient_hi; + remainder += divisor_hi; if (remainder >= base32) { break; } @@ -200,7 +201,7 @@ NPY_FINLINE npyv_u8x3 npyv_divisor_u8(npy_uint8 d) default: l = npyv__bitscan_revnz_u32(d - 1) + 1; // ceil(log2(d)) l2 = (npy_uint8)(1 << l); // 2^l, overflow to 0 if l = 8 - m = ((l2 - d) << 8) / d + 1; // multiplier + m = ((npy_uint16)((l2 - d) << 8)) / d + 1; // multiplier sh1 = 1; sh2 = l - 1; // shift counts } npyv_u8x3 divisor; diff --git a/numpy/core/src/common/simd/neon/math.h b/numpy/core/src/common/simd/neon/math.h index ced82d1de..19ea6f22f 100644 --- a/numpy/core/src/common/simd/neon/math.h +++ b/numpy/core/src/common/simd/neon/math.h @@ -31,7 +31,7 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) const npyv_f32 zero = vdupq_n_f32(0.0f); const npyv_u32 pinf = vdupq_n_u32(0x7f800000); npyv_u32 is_zero = vceqq_f32(a, zero), is_inf = vceqq_u32(vreinterpretq_u32_f32(a), pinf); - // guard agianst floating-point division-by-zero error + // guard against floating-point division-by-zero error npyv_f32 guard_byz = vbslq_f32(is_zero, vreinterpretq_f32_u32(pinf), a); // estimate to (1/√a) npyv_f32 rsqrte = vrsqrteq_f32(guard_byz); diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index a3e2b95de..08b2a7d00 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -27,6 +27,25 @@ typedef npy_int64 npyv_lanetype_s64; typedef float npyv_lanetype_f32; typedef double npyv_lanetype_f64; +#if defined(_MSC_VER) && defined(_M_IX86) +/* + * Avoid using any of the following intrinsics with MSVC 32-bit, + * even if they are apparently work on newer versions. + * They had bad impact on the generated instructions, + * sometimes the compiler deal with them without the respect + * of 32-bit mode which lead to crush due to execute 64-bit + * instructions and other times generate bad emulated instructions. + */ + #undef _mm512_set1_epi64 + #undef _mm256_set1_epi64x + #undef _mm_set1_epi64x + #undef _mm512_setr_epi64x + #undef _mm256_setr_epi64x + #undef _mm_setr_epi64x + #undef _mm512_set_epi64x + #undef _mm256_set_epi64x + #undef _mm_set_epi64x +#endif #if defined(NPY_HAVE_AVX512F) && !defined(NPY_SIMD_FORCE_256) && !defined(NPY_SIMD_FORCE_128) #include "avx512/avx512.h" #elif defined(NPY_HAVE_AVX2) && !defined(NPY_SIMD_FORCE_128) diff --git a/numpy/core/src/common/simd/sse/misc.h b/numpy/core/src/common/simd/sse/misc.h index 1099c491d..7d13fbf55 100644 --- a/numpy/core/src/common/simd/sse/misc.h +++ b/numpy/core/src/common/simd/sse/misc.h @@ -24,11 +24,28 @@ #define npyv_setall_s16(VAL) _mm_set1_epi16((short)(VAL)) #define npyv_setall_u32(VAL) _mm_set1_epi32((int)(VAL)) #define npyv_setall_s32(VAL) _mm_set1_epi32((int)(VAL)) -#define npyv_setall_u64(VAL) _mm_set1_epi64x((npy_int64)(VAL)) -#define npyv_setall_s64(VAL) _mm_set1_epi64x((npy_int64)(VAL)) #define npyv_setall_f32 _mm_set1_ps #define npyv_setall_f64 _mm_set1_pd +NPY_FINLINE __m128i npyv__setr_epi64(npy_int64 i0, npy_int64 i1); + +NPY_FINLINE npyv_u64 npyv_setall_u64(npy_uint64 a) +{ +#if defined(_MSC_VER) && defined(_M_IX86) + return npyv__setr_epi64((npy_int64)a, (npy_int64)a); +#else + return _mm_set1_epi64x((npy_int64)a); +#endif +} +NPY_FINLINE npyv_s64 npyv_setall_s64(npy_int64 a) +{ +#if defined(_MSC_VER) && defined(_M_IX86) + return npyv__setr_epi64(a, a); +#else + return _mm_set1_epi64x((npy_int64)a); +#endif +} + /** * vector with specific values set to each lane and * set a specific value to all remained lanes @@ -53,7 +70,11 @@ NPY_FINLINE __m128i npyv__setr_epi32(int i0, int i1, int i2, int i3) } NPY_FINLINE __m128i npyv__setr_epi64(npy_int64 i0, npy_int64 i1) { +#if defined(_MSC_VER) && defined(_M_IX86) + return _mm_setr_epi32((int)i0, (int)(i0 >> 32), (int)i1, (int)(i1 >> 32)); +#else return _mm_set_epi64x(i1, i0); +#endif } NPY_FINLINE __m128 npyv__setr_ps(float i0, float i1, float i2, float i3) { diff --git a/numpy/core/src/common/simd/vsx/operators.h b/numpy/core/src/common/simd/vsx/operators.h index 23c5d0dbe..d34057ff3 100644 --- a/numpy/core/src/common/simd/vsx/operators.h +++ b/numpy/core/src/common/simd/vsx/operators.h @@ -103,7 +103,7 @@ NPYV_IMPL_VSX_BIN_B64(or) NPYV_IMPL_VSX_BIN_B64(xor) // NOT -// note: we implement npyv_not_b*(boolen types) for internal use*/ +// note: we implement npyv_not_b*(boolean types) for internal use*/ #define NPYV_IMPL_VSX_NOT_INT(VEC_LEN) \ NPY_FINLINE npyv_u##VEC_LEN npyv_not_u##VEC_LEN(npyv_u##VEC_LEN a) \ { return vec_nor(a, a); } \ diff --git a/numpy/core/src/common/ucsnarrow.c b/numpy/core/src/common/ucsnarrow.c index 3ef5d6878..4bea4beee 100644 --- a/numpy/core/src/common/ucsnarrow.c +++ b/numpy/core/src/common/ucsnarrow.c @@ -1,12 +1,9 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#include <locale.h> -#include <stdio.h> - -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/npy_math.h" diff --git a/numpy/core/src/common/ucsnarrow.h b/numpy/core/src/common/ucsnarrow.h index c811e1f2c..6fe157199 100644 --- a/numpy/core/src/common/ucsnarrow.h +++ b/numpy/core/src/common/ucsnarrow.h @@ -1,7 +1,7 @@ -#ifndef _NPY_UCSNARROW_H_ -#define _NPY_UCSNARROW_H_ +#ifndef NUMPY_CORE_SRC_COMMON_NPY_UCSNARROW_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_UCSNARROW_H_ NPY_NO_EXPORT PyUnicodeObject * PyUnicode_FromUCS4(char *src, Py_ssize_t size, int swap, int align); -#endif +#endif /* NUMPY_CORE_SRC_COMMON_NPY_UCSNARROW_H_ */ diff --git a/numpy/core/src/common/ufunc_override.h b/numpy/core/src/common/ufunc_override.h index bf86865c9..5da95fb29 100644 --- a/numpy/core/src/common/ufunc_override.h +++ b/numpy/core/src/common/ufunc_override.h @@ -1,5 +1,5 @@ -#ifndef __UFUNC_OVERRIDE_H -#define __UFUNC_OVERRIDE_H +#ifndef NUMPY_CORE_SRC_COMMON_UFUNC_OVERRIDE_H_ +#define NUMPY_CORE_SRC_COMMON_UFUNC_OVERRIDE_H_ #include "npy_config.h" @@ -34,4 +34,5 @@ PyUFunc_HasOverride(PyObject *obj); */ NPY_NO_EXPORT int PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject ***out_objs); -#endif + +#endif /* NUMPY_CORE_SRC_COMMON_UFUNC_OVERRIDE_H_ */ diff --git a/numpy/core/src/common/umathmodule.h b/numpy/core/src/common/umathmodule.h index 5c718a841..6d4169ad5 100644 --- a/numpy/core/src/common/umathmodule.h +++ b/numpy/core/src/common/umathmodule.h @@ -1,3 +1,6 @@ +#ifndef NUMPY_CORE_SRC_COMMON_UMATHMODULE_H_ +#define NUMPY_CORE_SRC_COMMON_UMATHMODULE_H_ + #include "__umath_generated.c" #include "__ufunc_api.c" @@ -8,4 +11,4 @@ PyObject * add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args); PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)); int initumath(PyObject *m); - +#endif /* NUMPY_CORE_SRC_COMMON_UMATHMODULE_H_ */ diff --git a/numpy/core/src/dummymodule.c b/numpy/core/src/dummymodule.c index e26875736..7284ffd68 100644 --- a/numpy/core/src/dummymodule.c +++ b/numpy/core/src/dummymodule.c @@ -4,12 +4,13 @@ * This is a dummy module whose purpose is to get distutils to generate the * configuration files before the libraries are made. */ - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define NO_IMPORT_ARRAY +#define PY_SSIZE_T_CLEAN #include <Python.h> -#include <npy_pycompat.h> + +#include "npy_pycompat.h" static struct PyMethodDef methods[] = { {NULL, NULL, 0, NULL} diff --git a/numpy/core/src/multiarray/_datetime.h b/numpy/core/src/multiarray/_datetime.h index c0d2f1967..2ebeb1dff 100644 --- a/numpy/core/src/multiarray/_datetime.h +++ b/numpy/core/src/multiarray/_datetime.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PRIVATE__DATETIME_H_ -#define _NPY_PRIVATE__DATETIME_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY__DATETIME_H_ +#define NUMPY_CORE_SRC_MULTIARRAY__DATETIME_H_ extern NPY_NO_EXPORT char const *_datetime_strings[NPY_DATETIME_NUMUNITS]; extern NPY_NO_EXPORT int _days_per_month_table[2][12]; @@ -376,4 +376,4 @@ find_object_datetime_type(PyObject *obj, int type_num); NPY_NO_EXPORT int PyArray_InitializeDatetimeCasts(void); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY__DATETIME_H_ */ diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src index f4764b371..e945d0771 100644 --- a/numpy/core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/core/src/multiarray/_multiarray_tests.c.src @@ -1,8 +1,8 @@ /* -*-c-*- */ #define PY_SSIZE_T_CLEAN +#include <Python.h> #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include <Python.h> #define _NPY_NO_DEPRECATIONS /* for NPY_CHAR */ #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" diff --git a/numpy/core/src/multiarray/abstractdtypes.c b/numpy/core/src/multiarray/abstractdtypes.c index 3fa354ddc..cc1d7fad8 100644 --- a/numpy/core/src/multiarray/abstractdtypes.c +++ b/numpy/core/src/multiarray/abstractdtypes.c @@ -1,10 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> - -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/ndarraytypes.h" #include "numpy/arrayobject.h" @@ -157,7 +157,7 @@ int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other) } else if (PyTypeNum_ISNUMBER(other->type_num) || other->type_num == NPY_TIMEDELTA) { - /* All other numeric types (ant timdelta) are preserved: */ + /* All other numeric types (ant timedelta) are preserved: */ Py_INCREF(other); return other; } diff --git a/numpy/core/src/multiarray/abstractdtypes.h b/numpy/core/src/multiarray/abstractdtypes.h index a6c526717..42c192cac 100644 --- a/numpy/core/src/multiarray/abstractdtypes.h +++ b/numpy/core/src/multiarray/abstractdtypes.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ABSTRACTDTYPES_H -#define _NPY_ABSTRACTDTYPES_H +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_ #include "dtypemeta.h" @@ -16,4 +16,4 @@ NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyComplexAbstractDType; NPY_NO_EXPORT int initialize_and_map_pytypes_to_dtypes(void); -#endif /*_NPY_ABSTRACTDTYPES_H */ +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_ */ diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c index e74056736..e4756264d 100644 --- a/numpy/core/src/multiarray/alloc.c +++ b/numpy/core/src/multiarray/alloc.c @@ -1,20 +1,18 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" - +#include <structmember.h> #include <pymem.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE -#include <numpy/ndarraytypes.h> +#include "numpy/ndarraytypes.h" #include "numpy/arrayobject.h" -#include <numpy/npy_common.h> +#include "numpy/npy_common.h" #include "npy_config.h" #include "alloc.h" - #include <assert.h> - #ifdef NPY_OS_LINUX #include <sys/mman.h> #ifndef MADV_HUGEPAGE @@ -135,9 +133,10 @@ npy_alloc_cache(npy_uintp sz) /* zero initialized data, sz is number of bytes to allocate */ NPY_NO_EXPORT void * -npy_alloc_cache_zero(npy_uintp sz) +npy_alloc_cache_zero(size_t nmemb, size_t size) { void * p; + size_t sz = nmemb * size; NPY_BEGIN_THREADS_DEF; if (sz < NBUCKETS) { p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &PyDataMem_NEW); @@ -147,7 +146,7 @@ npy_alloc_cache_zero(npy_uintp sz) return p; } NPY_BEGIN_THREADS; - p = PyDataMem_NEW_ZEROED(sz, 1); + p = PyDataMem_NEW_ZEROED(nmemb, size); NPY_END_THREADS; return p; } @@ -189,8 +188,8 @@ npy_free_cache_dim(void * p, npy_uintp sz) /* malloc/free/realloc hook */ -NPY_NO_EXPORT PyDataMem_EventHookFunc *_PyDataMem_eventhook; -NPY_NO_EXPORT void *_PyDataMem_eventhook_user_data; +NPY_NO_EXPORT PyDataMem_EventHookFunc *_PyDataMem_eventhook = NULL; +NPY_NO_EXPORT void *_PyDataMem_eventhook_user_data = NULL; /*NUMPY_API * Sets the allocation event hook for numpy array data. @@ -256,21 +255,21 @@ PyDataMem_NEW(size_t size) * Allocates zeroed memory for array data. */ NPY_NO_EXPORT void * -PyDataMem_NEW_ZEROED(size_t size, size_t elsize) +PyDataMem_NEW_ZEROED(size_t nmemb, size_t size) { void *result; - result = calloc(size, elsize); + result = calloc(nmemb, size); if (_PyDataMem_eventhook != NULL) { NPY_ALLOW_C_API_DEF NPY_ALLOW_C_API if (_PyDataMem_eventhook != NULL) { - (*_PyDataMem_eventhook)(NULL, result, size * elsize, + (*_PyDataMem_eventhook)(NULL, result, nmemb * size, _PyDataMem_eventhook_user_data); } NPY_DISABLE_C_API } - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); return result; } @@ -318,3 +317,325 @@ PyDataMem_RENEW(void *ptr, size_t size) } return result; } + +// The default data mem allocator malloc routine does not make use of a ctx. +// It should be called only through PyDataMem_UserNEW +// since itself does not handle eventhook and tracemalloc logic. +static NPY_INLINE void * +default_malloc(void *NPY_UNUSED(ctx), size_t size) +{ + return _npy_alloc_cache(size, 1, NBUCKETS, datacache, &malloc); +} + +// The default data mem allocator calloc routine does not make use of a ctx. +// It should be called only through PyDataMem_UserNEW_ZEROED +// since itself does not handle eventhook and tracemalloc logic. +static NPY_INLINE void * +default_calloc(void *NPY_UNUSED(ctx), size_t nelem, size_t elsize) +{ + void * p; + size_t sz = nelem * elsize; + NPY_BEGIN_THREADS_DEF; + if (sz < NBUCKETS) { + p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &malloc); + if (p) { + memset(p, 0, sz); + } + return p; + } + NPY_BEGIN_THREADS; + p = calloc(nelem, elsize); + NPY_END_THREADS; + return p; +} + +// The default data mem allocator realloc routine does not make use of a ctx. +// It should be called only through PyDataMem_UserRENEW +// since itself does not handle eventhook and tracemalloc logic. +static NPY_INLINE void * +default_realloc(void *NPY_UNUSED(ctx), void *ptr, size_t new_size) +{ + return realloc(ptr, new_size); +} + +// The default data mem allocator free routine does not make use of a ctx. +// It should be called only through PyDataMem_UserFREE +// since itself does not handle eventhook and tracemalloc logic. +static NPY_INLINE void +default_free(void *NPY_UNUSED(ctx), void *ptr, size_t size) +{ + _npy_free_cache(ptr, size, NBUCKETS, datacache, &free); +} + +/* Memory handler global default */ +PyDataMem_Handler default_handler = { + "default_allocator", + { + NULL, /* ctx */ + default_malloc, /* malloc */ + default_calloc, /* calloc */ + default_realloc, /* realloc */ + default_free /* free */ + } +}; + +#if (!defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM >= 0x07030600) +PyObject *current_handler; +#endif + +int uo_index=0; /* user_override index */ + +/* Wrappers for the default or any user-assigned PyDataMem_Handler */ + +NPY_NO_EXPORT void * +PyDataMem_UserNEW(size_t size, PyObject *mem_handler) +{ + void *result; + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + if (handler == NULL) { + return NULL; + } + + assert(size != 0); + result = handler->allocator.malloc(handler->allocator.ctx, size); + if (_PyDataMem_eventhook != NULL) { + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API + if (_PyDataMem_eventhook != NULL) { + (*_PyDataMem_eventhook)(NULL, result, size, + _PyDataMem_eventhook_user_data); + } + NPY_DISABLE_C_API + } + PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + return result; +} + +NPY_NO_EXPORT void * +PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler) +{ + void *result; + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + if (handler == NULL) { + return NULL; + } + result = handler->allocator.calloc(handler->allocator.ctx, nmemb, size); + if (_PyDataMem_eventhook != NULL) { + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API + if (_PyDataMem_eventhook != NULL) { + (*_PyDataMem_eventhook)(NULL, result, nmemb * size, + _PyDataMem_eventhook_user_data); + } + NPY_DISABLE_C_API + } + PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + return result; +} + +/* Similar to array_dealloc in arrayobject.c */ +static NPY_INLINE void +WARN_IN_FREE(PyObject* warning, const char * msg) { + if (PyErr_WarnEx(warning, msg, 1) < 0) { + PyObject * s; + + s = PyUnicode_FromString("PyDataMem_UserFREE"); + if (s) { + PyErr_WriteUnraisable(s); + Py_DECREF(s); + } + else { + PyErr_WriteUnraisable(Py_None); + } + } +} + + + +NPY_NO_EXPORT void +PyDataMem_UserFREE(void *ptr, size_t size, PyObject *mem_handler) +{ + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + if (handler == NULL) { + WARN_IN_FREE(PyExc_RuntimeWarning, + "Could not get pointer to 'mem_handler' from PyCapsule"); + PyErr_Clear(); + return; + } + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + handler->allocator.free(handler->allocator.ctx, ptr, size); + if (_PyDataMem_eventhook != NULL) { + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API + if (_PyDataMem_eventhook != NULL) { + (*_PyDataMem_eventhook)(ptr, NULL, 0, + _PyDataMem_eventhook_user_data); + } + NPY_DISABLE_C_API + } +} + +NPY_NO_EXPORT void * +PyDataMem_UserRENEW(void *ptr, size_t size, PyObject *mem_handler) +{ + void *result; + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + if (handler == NULL) { + return NULL; + } + + assert(size != 0); + result = handler->allocator.realloc(handler->allocator.ctx, ptr, size); + if (result != ptr) { + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + } + PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (_PyDataMem_eventhook != NULL) { + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API + if (_PyDataMem_eventhook != NULL) { + (*_PyDataMem_eventhook)(ptr, result, size, + _PyDataMem_eventhook_user_data); + } + NPY_DISABLE_C_API + } + return result; +} + +/*NUMPY_API + * Set a new allocation policy. If the input value is NULL, will reset + * the policy to the default. Return the previous policy, or + * return NULL if an error has occurred. We wrap the user-provided + * functions so they will still call the python and numpy + * memory management callback hooks. + */ +NPY_NO_EXPORT PyObject * +PyDataMem_SetHandler(PyObject *handler) +{ + PyObject *old_handler; +#if (!defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM >= 0x07030600) + PyObject *token; + if (PyContextVar_Get(current_handler, NULL, &old_handler)) { + return NULL; + } + if (handler == NULL) { + handler = PyCapsule_New(&default_handler, "mem_handler", NULL); + if (handler == NULL) { + return NULL; + } + } + else { + Py_INCREF(handler); + } + token = PyContextVar_Set(current_handler, handler); + Py_DECREF(handler); + if (token == NULL) { + Py_DECREF(old_handler); + return NULL; + } + Py_DECREF(token); + return old_handler; +#else + PyObject *p; + p = PyThreadState_GetDict(); + if (p == NULL) { + return NULL; + } + old_handler = PyDict_GetItemString(p, "current_allocator"); + if (old_handler == NULL) { + old_handler = PyCapsule_New(&default_handler, "mem_handler", NULL); + if (old_handler == NULL) { + return NULL; + } + } + else { + Py_INCREF(old_handler); + } + if (handler == NULL) { + handler = PyCapsule_New(&default_handler, "mem_handler", NULL); + if (handler == NULL) { + Py_DECREF(old_handler); + return NULL; + } + } + else { + Py_INCREF(handler); + } + const int error = PyDict_SetItemString(p, "current_allocator", handler); + Py_DECREF(handler); + if (error) { + Py_DECREF(old_handler); + return NULL; + } + return old_handler; +#endif +} + +/*NUMPY_API + * Return the policy that will be used to allocate data + * for the next PyArrayObject. On failure, return NULL. + */ +NPY_NO_EXPORT PyObject * +PyDataMem_GetHandler() +{ + PyObject *handler; +#if (!defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM >= 0x07030600) + if (PyContextVar_Get(current_handler, NULL, &handler)) { + return NULL; + } + return handler; +#else + PyObject *p = PyThreadState_GetDict(); + if (p == NULL) { + return NULL; + } + handler = PyDict_GetItemString(p, "current_allocator"); + if (handler == NULL) { + handler = PyCapsule_New(&default_handler, "mem_handler", NULL); + if (handler == NULL) { + return NULL; + } + } + else { + Py_INCREF(handler); + } + return handler; +#endif +} + +NPY_NO_EXPORT PyObject * +get_handler_name(PyObject *NPY_UNUSED(self), PyObject *args) +{ + PyObject *arr=NULL; + if (!PyArg_ParseTuple(args, "|O:get_handler_name", &arr)) { + return NULL; + } + if (arr != NULL && !PyArray_Check(arr)) { + PyErr_SetString(PyExc_ValueError, "if supplied, argument must be an ndarray"); + return NULL; + } + PyObject *mem_handler; + PyDataMem_Handler *handler; + PyObject *name; + if (arr != NULL) { + mem_handler = PyArray_HANDLER((PyArrayObject *) arr); + if (mem_handler == NULL) { + Py_RETURN_NONE; + } + Py_INCREF(mem_handler); + } + else { + mem_handler = PyDataMem_GetHandler(); + if (mem_handler == NULL) { + return NULL; + } + } + handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + if (handler == NULL) { + Py_DECREF(mem_handler); + return NULL; + } + name = PyUnicode_FromString(handler->name); + Py_DECREF(mem_handler); + return name; +} diff --git a/numpy/core/src/multiarray/alloc.h b/numpy/core/src/multiarray/alloc.h index 15e31ebb5..4f7df1f84 100644 --- a/numpy/core/src/multiarray/alloc.h +++ b/numpy/core/src/multiarray/alloc.h @@ -1,8 +1,9 @@ -#ifndef _NPY_ARRAY_ALLOC_H_ -#define _NPY_ARRAY_ALLOC_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ + #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE -#include <numpy/ndarraytypes.h> +#include "numpy/ndarraytypes.h" #define NPY_TRACE_DOMAIN 389047 @@ -10,13 +11,16 @@ NPY_NO_EXPORT PyObject * _set_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *enabled_obj); NPY_NO_EXPORT void * -npy_alloc_cache(npy_uintp sz); +PyDataMem_UserNEW(npy_uintp sz, PyObject *mem_handler); NPY_NO_EXPORT void * -npy_alloc_cache_zero(npy_uintp sz); +PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler); NPY_NO_EXPORT void -npy_free_cache(void * p, npy_uintp sd); +PyDataMem_UserFREE(void * p, npy_uintp sd, PyObject *mem_handler); + +NPY_NO_EXPORT void * +PyDataMem_UserRENEW(void *ptr, size_t size, PyObject *mem_handler); NPY_NO_EXPORT void * npy_alloc_cache_dim(npy_uintp sz); @@ -36,4 +40,12 @@ npy_free_cache_dim_array(PyArrayObject * arr) npy_free_cache_dim(PyArray_DIMS(arr), PyArray_NDIM(arr)); } +#if (!defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM >= 0x07030600) +extern PyObject *current_handler; /* PyContextVar/PyCapsule */ +extern PyDataMem_Handler default_handler; #endif + +NPY_NO_EXPORT PyObject * +get_handler_name(PyObject *NPY_UNUSED(self), PyObject *obj); + +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ */ diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c index 665dadfbf..020a7f29a 100644 --- a/numpy/core/src/multiarray/array_assign_array.c +++ b/numpy/core/src/multiarray/array_assign_array.c @@ -6,13 +6,13 @@ * * See LICENSE.txt for the license. */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE -#include <numpy/ndarraytypes.h> +#include "numpy/ndarraytypes.h" #include "npy_config.h" #include "npy_pycompat.h" diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c index 6cd5f4ad9..4ffef7ecc 100644 --- a/numpy/core/src/multiarray/array_assign_scalar.c +++ b/numpy/core/src/multiarray/array_assign_scalar.c @@ -6,12 +6,12 @@ * * See LICENSE.txt for the license. */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include <numpy/ndarraytypes.h> #include "npy_config.h" diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c index 713bf7505..b335b64a0 100644 --- a/numpy/core/src/multiarray/array_coercion.c +++ b/numpy/core/src/multiarray/array_coercion.c @@ -1,8 +1,9 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _UMATHMODULE #define _MULTIARRAYMODULE +#define _UMATHMODULE -#include "Python.h" +#define PY_SSIZE_T_CLEAN +#include <Python.h> #include "numpy/npy_3kcompat.h" @@ -135,7 +136,7 @@ _prime_global_pytype_to_type_dict(void) * * This assumes that the DType class is guaranteed to hold on the * python type (this assumption is guaranteed). - * This functionality supercedes ``_typenum_fromtypeobj``. + * This functionality supersedes ``_typenum_fromtypeobj``. * * @param DType DType to map the python type to * @param pytype Python type to map from @@ -1400,7 +1401,7 @@ PyArray_DiscoverDTypeAndShape( * These should be largely deprecated, and represent only the DType class * for most `dtype` parameters. * - * TODO: This function should eventually recieve a deprecation warning and + * TODO: This function should eventually receive a deprecation warning and * be removed. * * @param descr diff --git a/numpy/core/src/multiarray/array_coercion.h b/numpy/core/src/multiarray/array_coercion.h index c5ccad225..db0e479fe 100644 --- a/numpy/core/src/multiarray/array_coercion.h +++ b/numpy/core/src/multiarray/array_coercion.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAY_COERCION_H -#define _NPY_ARRAY_COERCION_H +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAY_COERCION_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ARRAY_COERCION_H_ /* @@ -54,4 +54,4 @@ npy_unlink_coercion_cache(coercion_cache_obj *current); NPY_NO_EXPORT int PyArray_AssignFromCache(PyArrayObject *self, coercion_cache_obj *cache); -#endif /* _NPY_ARRAY_COERCION_H */ +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAY_COERCION_H_ */ diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c index 44ba8c733..d93dac506 100644 --- a/numpy/core/src/multiarray/array_method.c +++ b/numpy/core/src/multiarray/array_method.c @@ -26,10 +26,9 @@ * It is then sufficient for a ufunc (or other owner) to only hold a * weak reference to the input DTypes. */ - - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE + #include <npy_pycompat.h> #include "arrayobject.h" #include "array_method.h" @@ -59,16 +58,10 @@ default_resolve_descriptors( { int nin = method->nin; int nout = method->nout; - int all_defined = 1; for (int i = 0; i < nin + nout; i++) { PyArray_DTypeMeta *dtype = dtypes[i]; - if (dtype == NULL) { - output_descrs[i] = NULL; - all_defined = 0; - continue; - } - if (NPY_DTYPE(input_descrs[i]) == dtype) { + if (input_descrs[i] != NULL) { output_descrs[i] = ensure_dtype_nbo(input_descrs[i]); } else { @@ -78,41 +71,11 @@ default_resolve_descriptors( goto fail; } } - if (all_defined) { - return method->casting; - } - - if (NPY_UNLIKELY(nin == 0 || dtypes[0] == NULL)) { - /* Registration should reject this, so this would be indicates a bug */ - PyErr_SetString(PyExc_RuntimeError, - "Invalid use of default resolver without inputs or with " - "input or output DType incorrectly missing."); - goto fail; - } - /* We find the common dtype of all inputs, and use it for the unknowns */ - PyArray_DTypeMeta *common_dtype = dtypes[0]; - assert(common_dtype != NULL); - for (int i = 1; i < nin; i++) { - Py_SETREF(common_dtype, PyArray_CommonDType(common_dtype, dtypes[i])); - if (common_dtype == NULL) { - goto fail; - } - } - for (int i = nin; i < nin + nout; i++) { - if (output_descrs[i] != NULL) { - continue; - } - if (NPY_DTYPE(input_descrs[i]) == common_dtype) { - output_descrs[i] = ensure_dtype_nbo(input_descrs[i]); - } - else { - output_descrs[i] = NPY_DT_CALL_default_descr(common_dtype); - } - if (NPY_UNLIKELY(output_descrs[i] == NULL)) { - goto fail; - } - } - + /* + * If we relax the requirement for specifying all `dtypes` (e.g. allow + * abstract ones or unspecified outputs). We can use the common-dtype + * operation to provide a default here. + */ return method->casting; fail: @@ -220,9 +183,18 @@ validate_spec(PyArrayMethod_Spec *spec) } for (int i = 0; i < nargs; i++) { - if (spec->dtypes[i] == NULL && i < spec->nin) { + /* + * Note that we could allow for output dtypes to not be specified + * (the array-method would have to make sure to support this). + * We could even allow for some dtypes to be abstract. + * For now, assume that this is better handled in a promotion step. + * One problem with providing all DTypes is the definite need to + * hold references. We probably, eventually, have to implement + * traversal and trust the GC to deal with it. + */ + if (spec->dtypes[i] == NULL) { PyErr_Format(PyExc_TypeError, - "ArrayMethod must have well defined input DTypes. " + "ArrayMethod must provide all input and output DTypes. " "(method: %s)", spec->name); return -1; } @@ -232,10 +204,10 @@ validate_spec(PyArrayMethod_Spec *spec) "(method: %s)", spec->dtypes[i], spec->name); return -1; } - if (NPY_DT_is_abstract(spec->dtypes[i]) && i < spec->nin) { + if (NPY_DT_is_abstract(spec->dtypes[i])) { PyErr_Format(PyExc_TypeError, - "abstract DType %S are currently not allowed for inputs." - "(method: %s defined at %s)", spec->dtypes[i], spec->name); + "abstract DType %S are currently not supported." + "(method: %s)", spec->dtypes[i], spec->name); return -1; } } @@ -324,7 +296,7 @@ fill_arraymethod_from_slots( PyErr_Format(PyExc_TypeError, "Must specify output DTypes or use custom " "`resolve_descriptors` when there are no inputs. " - "(method: %s defined at %s)", spec->name); + "(method: %s)", spec->name); return -1; } } @@ -371,6 +343,26 @@ fill_arraymethod_from_slots( } +/* + * Public version of `PyArrayMethod_FromSpec_int` (see below). + * + * TODO: Error paths will probably need to be improved before a release into + * the non-experimental public API. + */ +NPY_NO_EXPORT PyObject * +PyArrayMethod_FromSpec(PyArrayMethod_Spec *spec) +{ + for (int i = 0; i < spec->nin + spec->nout; i++) { + if (!PyObject_TypeCheck(spec->dtypes[i], &PyArrayDTypeMeta_Type)) { + PyErr_SetString(PyExc_RuntimeError, + "ArrayMethod spec contained a non DType."); + return NULL; + } + } + return (PyObject *)PyArrayMethod_FromSpec_int(spec, 0); +} + + /** * Create a new ArrayMethod (internal version). * @@ -467,7 +459,6 @@ NPY_NO_EXPORT PyTypeObject PyArrayMethod_Type = { }; - static PyObject * boundarraymethod_repr(PyBoundArrayMethodObject *self) { @@ -477,9 +468,11 @@ boundarraymethod_repr(PyBoundArrayMethodObject *self) if (dtypes == NULL) { return NULL; } - return PyUnicode_FromFormat( - "<np._BoundArrayMethod `%s` for dtypes %S>", - self->method->name, dtypes); + PyObject *repr = PyUnicode_FromFormat( + "<np._BoundArrayMethod `%s` for dtypes %S>", + self->method->name, dtypes); + Py_DECREF(dtypes); + return repr; } @@ -683,7 +676,7 @@ boundarraymethod__simple_strided_call( "All arrays must have the same length."); return NULL; } - if (i >= nout) { + if (i >= nin) { if (PyArray_FailUnlessWriteable( arrays[i], "_simple_strided_call() output") < 0) { return NULL; @@ -787,6 +780,13 @@ _masked_stridedloop_data_free(NpyAuxData *auxdata) * This function wraps a regular unmasked strided-loop as a * masked strided-loop, only calling the function for elements * where the mask is True. + * + * TODO: Reductions also use this code to implement masked reductions. + * Before consolidating them, reductions had a special case for + * broadcasts: when the mask stride was 0 the code does not check all + * elements as `npy_memchr` currently does. + * It may be worthwhile to add such an optimization again if broadcasted + * masks are common enough. */ static int generic_masked_strided_loop(PyArrayMethod_Context *context, @@ -806,7 +806,7 @@ generic_masked_strided_loop(PyArrayMethod_Context *context, npy_intp N = dimensions[0]; /* Process the data as runs of unmasked values */ do { - ssize_t subloopsize; + Py_ssize_t subloopsize; /* Skip masked values */ mask = npy_memchr(mask, 0, mask_stride, N, &subloopsize, 1); diff --git a/numpy/core/src/multiarray/array_method.h b/numpy/core/src/multiarray/array_method.h index fc2304889..7b7372bd0 100644 --- a/numpy/core/src/multiarray/array_method.h +++ b/numpy/core/src/multiarray/array_method.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAY_METHOD_H -#define _NPY_ARRAY_METHOD_H +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAY_METHOD_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ARRAY_METHOD_H_ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE @@ -21,6 +21,17 @@ typedef enum { NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 2, /* Whether the method supports unaligned access (not runtime) */ NPY_METH_SUPPORTS_UNALIGNED = 1 << 3, + /* + * Private flag for now for *logic* functions. The logical functions + * `logical_or` and `logical_and` can always cast the inputs to booleans + * "safely" (because that is how the cast to bool is defined). + * @seberg: I am not sure this is the best way to handle this, so its + * private for now (also it is very limited anyway). + * There is one "exception". NA aware dtypes cannot cast to bool + * (hopefully), so the `??->?` loop should error even with this flag. + * But a second NA fallback loop will be necessary. + */ + _NPY_METH_FORCE_CAST_INPUTS = 1 << 17, /* All flags which can change at runtime */ NPY_METH_RUNTIME_FLAGS = ( @@ -170,6 +181,11 @@ PyArrayMethod_GetMaskedStridedLoop( NPY_ARRAYMETHOD_FLAGS *flags); + +NPY_NO_EXPORT PyObject * +PyArrayMethod_FromSpec(PyArrayMethod_Spec *spec); + + /* * TODO: This function is the internal version, and its error paths may * need better tests when a public version is exposed. @@ -177,4 +193,4 @@ PyArrayMethod_GetMaskedStridedLoop( NPY_NO_EXPORT PyBoundArrayMethodObject * PyArrayMethod_FromSpec_int(PyArrayMethod_Spec *spec, int private); -#endif /*_NPY_ARRAY_METHOD_H*/ +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAY_METHOD_H_ */ diff --git a/numpy/core/src/multiarray/arrayfunction_override.h b/numpy/core/src/multiarray/arrayfunction_override.h index fdf0dfcaf..09f7ee548 100644 --- a/numpy/core/src/multiarray/arrayfunction_override.h +++ b/numpy/core/src/multiarray/arrayfunction_override.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PRIVATE__ARRAYFUNCTION_OVERRIDE_H -#define _NPY_PRIVATE__ARRAYFUNCTION_OVERRIDE_H +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAYFUNCTION_OVERRIDE_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ARRAYFUNCTION_OVERRIDE_H_ NPY_NO_EXPORT PyObject * array_implement_array_function( @@ -19,4 +19,4 @@ NPY_NO_EXPORT PyObject * array_function_method_impl(PyObject *func, PyObject *types, PyObject *args, PyObject *kwargs); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAYFUNCTION_OVERRIDE_H_ */ diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index 55ba5601b..c8aaced4e 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -20,13 +20,13 @@ maintainer email: oliphant.travis@ieee.org Space Science Telescope Institute (J. Todd Miller, Perry Greenfield, Rick White) */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -/*#include <stdio.h>*/ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -493,7 +493,28 @@ array_dealloc(PyArrayObject *self) if (PyDataType_FLAGCHK(fa->descr, NPY_ITEM_REFCOUNT)) { PyArray_XDECREF(self); } - npy_free_cache(fa->data, PyArray_NBYTES(self)); + /* + * Allocation will never be 0, see comment in ctors.c + * line 820 + */ + size_t nbytes = PyArray_NBYTES(self); + if (nbytes == 0) { + nbytes = fa->descr->elsize ? fa->descr->elsize : 1; + } + if (fa->mem_handler == NULL) { + char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); + if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { + char const * msg = "Trying to dealloc data, but a memory policy " + "is not set. If you take ownership of the data, you must " + "set a base owning the data (e.g. a PyCapsule)."; + WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg); + } + // Guess at malloc/free ??? + free(fa->data); + } else { + PyDataMem_UserFREE(fa->data, nbytes, fa->mem_handler); + Py_DECREF(fa->mem_handler); + } } /* must match allocation in PyArray_NewFromDescr */ @@ -858,7 +879,7 @@ _uni_release(char *ptr, int nc) relfunc(aptr, N1); \ return -1; \ } \ - val = compfunc(aptr, bptr, N1, N2); \ + val = compfunc(aptr, bptr, N1, N2); \ *dptr = (val CMP 0); \ PyArray_ITER_NEXT(iself); \ PyArray_ITER_NEXT(iother); \ @@ -870,7 +891,7 @@ _uni_release(char *ptr, int nc) #define _reg_loop(CMP) { \ while(size--) { \ - val = compfunc((void *)iself->dataptr, \ + val = compfunc((void *)iself->dataptr, \ (void *)iother->dataptr, \ N1, N2); \ *dptr = (val CMP 0); \ @@ -1705,22 +1726,6 @@ array_iter(PyArrayObject *arr) return PySeqIter_New((PyObject *)arr); } -static PyObject * -array_alloc(PyTypeObject *type, Py_ssize_t NPY_UNUSED(nitems)) -{ - /* nitems will always be 0 */ - PyObject *obj = PyObject_Malloc(type->tp_basicsize); - PyObject_Init(obj, type); - return obj; -} - -static void -array_free(PyObject * v) -{ - /* avoid same deallocator as PyBaseObject, see gentype_free */ - PyObject_Free(v); -} - NPY_NO_EXPORT PyTypeObject PyArray_Type = { PyVarObject_HEAD_INIT(NULL, 0) @@ -1741,7 +1746,5 @@ NPY_NO_EXPORT PyTypeObject PyArray_Type = { .tp_iter = (getiterfunc)array_iter, .tp_methods = array_methods, .tp_getset = array_getsetlist, - .tp_alloc = (allocfunc)array_alloc, .tp_new = (newfunc)array_new, - .tp_free = (freefunc)array_free, }; diff --git a/numpy/core/src/multiarray/arrayobject.h b/numpy/core/src/multiarray/arrayobject.h index 9b74944ff..fb9b0bd81 100644 --- a/numpy/core/src/multiarray/arrayobject.h +++ b/numpy/core/src/multiarray/arrayobject.h @@ -1,10 +1,10 @@ -#ifndef _NPY_INTERNAL_ARRAYOBJECT_H_ -#define _NPY_INTERNAL_ARRAYOBJECT_H_ - #ifndef _MULTIARRAYMODULE #error You should not include this #endif +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAYOBJECT_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ARRAYOBJECT_H_ + NPY_NO_EXPORT PyObject * _strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op, int rstrip); @@ -26,4 +26,4 @@ array_might_be_written(PyArrayObject *obj); */ static const int NPY_ARRAY_WARN_ON_WRITE = (1 << 31); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAYOBJECT_H_ */ diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index b3ea7544d..71808cc48 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -1,7 +1,7 @@ /* -*- c -*- */ #define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" +#include <Python.h> +#include <structmember.h> #include <limits.h> #include <assert.h> @@ -62,7 +62,7 @@ get_dummy_stack_array(PyArrayObject *orig) PyArrayObject_fields new_fields; new_fields.flags = PyArray_FLAGS(orig); /* Set to NULL so the dummy object can be distinguished from the real one */ - Py_TYPE(&new_fields) = NULL; + Py_SET_TYPE(&new_fields, NULL); new_fields.base = (PyObject *)orig; return new_fields; } @@ -2759,10 +2759,10 @@ VOID_nonzero (char *ip, PyArrayObject *ap) dummy_fields.descr = new; if ((new->alignment > 1) && !__ALIGNED(ip + offset, new->alignment)) { - PyArray_CLEARFLAGS(ap, NPY_ARRAY_ALIGNED); + PyArray_CLEARFLAGS(dummy_arr, NPY_ARRAY_ALIGNED); } else { - PyArray_ENABLEFLAGS(ap, NPY_ARRAY_ALIGNED); + PyArray_ENABLEFLAGS(dummy_arr, NPY_ARRAY_ALIGNED); } if (new->f->nonzero(ip+offset, dummy_arr)) { nonz = NPY_TRUE; @@ -3093,6 +3093,10 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) if (!PyArray_HASFIELDS(ap)) { return STRING_compare(ip1, ip2, ap); } + PyObject *mem_handler = PyDataMem_GetHandler(); + if (mem_handler == NULL) { + goto finish; + } descr = PyArray_DESCR(ap); /* * Compare on the first-field. If equal, then @@ -3107,15 +3111,19 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) if (_unpack_field(tup, &new, &offset) < 0) { goto finish; } - /* descr is the only field checked by compare or copyswap */ + /* Set the fields needed by compare or copyswap */ dummy_struct.descr = new; + swap = PyArray_ISBYTESWAPPED(dummy); nip1 = ip1 + offset; nip2 = ip2 + offset; if (swap || new->alignment > 1) { if (swap || !npy_is_aligned(nip1, new->alignment)) { - /* create buffer and copy */ - nip1 = npy_alloc_cache(new->elsize); + /* + * create temporary buffer and copy, + * always use the current handler for internal allocations + */ + nip1 = PyDataMem_UserNEW(new->elsize, mem_handler); if (nip1 == NULL) { goto finish; } @@ -3124,11 +3132,15 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) new->f->copyswap(nip1, NULL, swap, dummy); } if (swap || !npy_is_aligned(nip2, new->alignment)) { - /* create buffer and copy */ - nip2 = npy_alloc_cache(new->elsize); + /* + * create temporary buffer and copy, + * always use the current handler for internal allocations + */ + nip2 = PyDataMem_UserNEW(new->elsize, mem_handler); if (nip2 == NULL) { if (nip1 != ip1 + offset) { - npy_free_cache(nip1, new->elsize); + /* destroy temporary buffer */ + PyDataMem_UserFREE(nip1, new->elsize, mem_handler); } goto finish; } @@ -3140,10 +3152,12 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) res = new->f->compare(nip1, nip2, dummy); if (swap || new->alignment > 1) { if (nip1 != ip1 + offset) { - npy_free_cache(nip1, new->elsize); + /* destroy temporary buffer */ + PyDataMem_UserFREE(nip1, new->elsize, mem_handler); } if (nip2 != ip2 + offset) { - npy_free_cache(nip2, new->elsize); + /* destroy temporary buffer */ + PyDataMem_UserFREE(nip2, new->elsize, mem_handler); } } if (res != 0) { @@ -3152,6 +3166,7 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) } finish: + Py_XDECREF(mem_handler); return res; } diff --git a/numpy/core/src/multiarray/arraytypes.h b/numpy/core/src/multiarray/arraytypes.h index a9469aef7..b3a13b297 100644 --- a/numpy/core/src/multiarray/arraytypes.h +++ b/numpy/core/src/multiarray/arraytypes.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAYTYPES_H_ -#define _NPY_ARRAYTYPES_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ #include "common.h" @@ -28,4 +28,4 @@ small_correlate(const char * d_, npy_intp dstride, npy_intp nk, enum NPY_TYPES ktype, char * out_, npy_intp ostride); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ */ diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c index 5458c81cc..d10122c4f 100644 --- a/numpy/core/src/multiarray/buffer.c +++ b/numpy/core/src/multiarray/buffer.c @@ -1,9 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c index 21e52c32b..327f685d4 100644 --- a/numpy/core/src/multiarray/calculation.c +++ b/numpy/core/src/multiarray/calculation.c @@ -1,9 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "lowlevel_strided_loops.h" diff --git a/numpy/core/src/multiarray/calculation.h b/numpy/core/src/multiarray/calculation.h index 49105a138..6a9c3c7c9 100644 --- a/numpy/core/src/multiarray/calculation.h +++ b/numpy/core/src/multiarray/calculation.h @@ -1,5 +1,5 @@ -#ifndef _NPY_CALCULATION_H_ -#define _NPY_CALCULATION_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_CALCULATION_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_CALCULATION_H_ NPY_NO_EXPORT PyObject* PyArray_ArgMax(PyArrayObject* self, int axis, PyArrayObject *out); @@ -67,4 +67,4 @@ PyArray_All(PyArrayObject* self, int axis, PyArrayObject* out); NPY_NO_EXPORT PyObject* PyArray_Any(PyArrayObject* self, int axis, PyArrayObject* out); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_CALCULATION_H_ */ diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c index 1fd9ab1a3..82d34193d 100644 --- a/numpy/core/src/multiarray/common.c +++ b/numpy/core/src/multiarray/common.c @@ -1,8 +1,9 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "npy_config.h" diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index 203decaa0..b3526c4c1 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -1,10 +1,11 @@ -#ifndef _NPY_PRIVATE_COMMON_H_ -#define _NPY_PRIVATE_COMMON_H_ -#include "structmember.h" -#include <numpy/npy_common.h> -#include <numpy/ndarraytypes.h> -#include <limits.h> +#ifndef NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ + +#include <structmember.h> +#include "numpy/npy_common.h" +#include "numpy/ndarraytypes.h" #include "npy_import.h" +#include <limits.h> #define error_converting(x) (((x) == -1) && PyErr_Occurred()) @@ -343,5 +344,4 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, */ #define NPY_ITER_REDUCTION_AXIS(axis) (axis + (1 << (NPY_BITSOF_INT - 2))) -#endif - +#endif /* NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ */ diff --git a/numpy/core/src/multiarray/common_dtype.c b/numpy/core/src/multiarray/common_dtype.c index 659580c98..ca80b1ed7 100644 --- a/numpy/core/src/multiarray/common_dtype.c +++ b/numpy/core/src/multiarray/common_dtype.c @@ -1,9 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE -#include <numpy/npy_common.h> +#include "numpy/npy_common.h" #include "numpy/arrayobject.h" #include "common_dtype.h" diff --git a/numpy/core/src/multiarray/common_dtype.h b/numpy/core/src/multiarray/common_dtype.h index b3666531a..13d38ddf8 100644 --- a/numpy/core/src/multiarray/common_dtype.h +++ b/numpy/core/src/multiarray/common_dtype.h @@ -1,5 +1,5 @@ -#ifndef _NPY_COMMON_DTYPE_H_ -#define _NPY_COMMON_DTYPE_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_COMMON_DTYPE_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_COMMON_DTYPE_H_ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE @@ -14,4 +14,4 @@ NPY_NO_EXPORT PyArray_DTypeMeta * PyArray_PromoteDTypeSequence( npy_intp length, PyArray_DTypeMeta **dtypes_in); -#endif /* _NPY_COMMON_DTYPE_H_ */ +#endif /* NUMPY_CORE_SRC_MULTIARRAY_COMMON_DTYPE_H_ */ diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index de793f87c..9910fffe6 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1,9 +1,10 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#define PY_SSIZE_T_CLEAN #include <Python.h> #include <structmember.h> -#include <string.h> -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/npy_3kcompat.h" #include "numpy/npy_math.h" @@ -15,6 +16,8 @@ #include "common.h" #include "simd/simd.h" +#include <string.h> + typedef enum { PACK_ORDER_LITTLE = 0, PACK_ORDER_BIG @@ -1425,9 +1428,26 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) PyCFunctionObject *new = (PyCFunctionObject *)obj; _ADDDOC(new->m_ml->ml_doc, new->m_ml->ml_name); } - else if (Py_TYPE(obj) == &PyType_Type) { + else if (PyObject_TypeCheck(obj, &PyType_Type)) { + /* + * We add it to both `tp_doc` and `__doc__` here. Note that in theory + * `tp_doc` extracts the signature line, but we currently do not use + * it. It may make sense to only add it as `__doc__` and + * `__text_signature__` to the dict in the future. + * The dictionary path is only necessary for heaptypes (currently not + * used) and metaclasses. + * If `__doc__` as stored in `tp_dict` is None, we assume this was + * filled in by `PyType_Ready()` and should also be replaced. + */ PyTypeObject *new = (PyTypeObject *)obj; _ADDDOC(new->tp_doc, new->tp_name); + if (new->tp_dict != NULL && PyDict_CheckExact(new->tp_dict) && + PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) { + /* Warning: Modifying `tp_dict` is not generally safe! */ + if (PyDict_SetItemString(new->tp_dict, "__doc__", str) < 0) { + return NULL; + } + } } else if (Py_TYPE(obj) == &PyMemberDescr_Type) { PyMemberDescrObject *new = (PyMemberDescrObject *)obj; diff --git a/numpy/core/src/multiarray/compiled_base.h b/numpy/core/src/multiarray/compiled_base.h index 082139910..d3bc08cb2 100644 --- a/numpy/core/src/multiarray/compiled_base.h +++ b/numpy/core/src/multiarray/compiled_base.h @@ -1,6 +1,7 @@ -#ifndef _NPY_PRIVATE__COMPILED_BASE_H_ -#define _NPY_PRIVATE__COMPILED_BASE_H_ -#include <numpy/ndarraytypes.h> +#ifndef NUMPY_CORE_SRC_MULTIARRAY_COMPILED_BASE_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_COMPILED_BASE_H_ + +#include "numpy/ndarraytypes.h" NPY_NO_EXPORT PyObject * arr_insert(PyObject *, PyObject *, PyObject *); @@ -23,4 +24,4 @@ io_pack(PyObject *, PyObject *, PyObject *); NPY_NO_EXPORT PyObject * io_unpack(PyObject *, PyObject *, PyObject *); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_COMPILED_BASE_H_ */ diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c index 15fe4bde2..59e3b4922 100644 --- a/numpy/core/src/multiarray/conversion_utils.c +++ b/numpy/core/src/multiarray/conversion_utils.c @@ -1,9 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" diff --git a/numpy/core/src/multiarray/conversion_utils.h b/numpy/core/src/multiarray/conversion_utils.h index 35525b7fe..4662c6a8b 100644 --- a/numpy/core/src/multiarray/conversion_utils.h +++ b/numpy/core/src/multiarray/conversion_utils.h @@ -1,7 +1,7 @@ -#ifndef _NPY_PRIVATE_CONVERSION_UTILS_H_ -#define _NPY_PRIVATE_CONVERSION_UTILS_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_CONVERSION_UTILS_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_CONVERSION_UTILS_H_ -#include <numpy/ndarraytypes.h> +#include "numpy/ndarraytypes.h" NPY_NO_EXPORT int PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq); @@ -85,4 +85,4 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags); */ extern NPY_NO_EXPORT int evil_global_disable_warn_O4O8_flag; -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_CONVERSION_UTILS_H_ */ diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c index 2ad8d6d0e..2f68db07c 100644 --- a/numpy/core/src/multiarray/convert.c +++ b/numpy/core/src/multiarray/convert.c @@ -1,11 +1,12 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#include <npy_config.h> +#include "npy_config.h" -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" #include "npy_pycompat.h" diff --git a/numpy/core/src/multiarray/convert.h b/numpy/core/src/multiarray/convert.h index 96df19711..d64d9be3f 100644 --- a/numpy/core/src/multiarray/convert.h +++ b/numpy/core/src/multiarray/convert.h @@ -1,8 +1,8 @@ -#ifndef _NPY_ARRAYOBJECT_CONVERT_H_ -#define _NPY_ARRAYOBJECT_CONVERT_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_CONVERT_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_CONVERT_H_ NPY_NO_EXPORT int PyArray_AssignZero(PyArrayObject *dst, PyArrayObject *wheremask); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_CONVERT_H_ */ diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 45b03a6f3..3135d6989 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -1,9 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -1547,6 +1548,40 @@ should_use_min_scalar(npy_intp narrs, PyArrayObject **arr, } +/* + * Utility function used only in PyArray_ResultType for value-based logic. + * See that function for the meaning and contents of the parameters. + */ +static PyArray_Descr * +get_descr_from_cast_or_value( + npy_intp i, + PyArrayObject *arrs[], + npy_intp ndtypes, + PyArray_Descr *descriptor, + PyArray_DTypeMeta *common_dtype) +{ + PyArray_Descr *curr; + if (NPY_LIKELY(i < ndtypes || + !(PyArray_FLAGS(arrs[i-ndtypes]) & _NPY_ARRAY_WAS_PYSCALAR))) { + curr = PyArray_CastDescrToDType(descriptor, common_dtype); + } + else { + /* + * Unlike `PyArray_CastToDTypeAndPromoteDescriptors`, deal with + * plain Python values "graciously". This recovers the original + * value the long route, but it should almost never happen... + */ + PyObject *tmp = PyArray_GETITEM(arrs[i-ndtypes], + PyArray_BYTES(arrs[i-ndtypes])); + if (tmp == NULL) { + return NULL; + } + curr = NPY_DT_CALL_discover_descr_from_pyobject(common_dtype, tmp); + Py_DECREF(tmp); + } + return curr; +} + /*NUMPY_API * * Produces the result type of a bunch of inputs, using the same rules @@ -1648,7 +1683,7 @@ PyArray_ResultType( } Py_INCREF(all_DTypes[i_all]); /* - * Leave the decriptor empty, if we need it, we will have to go + * Leave the descriptor empty, if we need it, we will have to go * to more extreme lengths unfortunately. */ all_descriptors[i_all] = NULL; @@ -1683,28 +1718,15 @@ PyArray_ResultType( result = NPY_DT_CALL_default_descr(common_dtype); } else { - result = PyArray_CastDescrToDType(all_descriptors[0], common_dtype); + result = get_descr_from_cast_or_value( + 0, arrs, ndtypes, all_descriptors[0], common_dtype); + if (result == NULL) { + goto error; + } for (npy_intp i = 1; i < ndtypes+narrs; i++) { - PyArray_Descr *curr; - if (NPY_LIKELY(i < ndtypes || - !(PyArray_FLAGS(arrs[i-ndtypes]) & _NPY_ARRAY_WAS_PYSCALAR))) { - curr = PyArray_CastDescrToDType(all_descriptors[i], common_dtype); - } - else { - /* - * Unlike `PyArray_CastToDTypeAndPromoteDescriptors` deal with - * plain Python values "graciously". This recovers the original - * value the long route, but it should almost never happen... - */ - PyObject *tmp = PyArray_GETITEM( - arrs[i-ndtypes], PyArray_BYTES(arrs[i-ndtypes])); - if (tmp == NULL) { - goto error; - } - curr = NPY_DT_CALL_discover_descr_from_pyobject(common_dtype, tmp); - Py_DECREF(tmp); - } + PyArray_Descr *curr = get_descr_from_cast_or_value( + i, arrs, ndtypes, all_descriptors[i], common_dtype); if (curr == NULL) { goto error; } @@ -2097,7 +2119,7 @@ PyArray_ObjectType(PyObject *op, int minimum_type) * This function is only used in one place within NumPy and should * generally be avoided. It is provided mainly for backward compatibility. * - * The user of the function has to free the returned array. + * The user of the function has to free the returned array with PyDataMem_FREE. */ NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType(PyObject *op, int *retn) @@ -2242,7 +2264,7 @@ PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth) * Add a new casting implementation using a PyArrayMethod_Spec. * * @param spec - * @param private If private, allow slots not publically exposed. + * @param private If private, allow slots not publicly exposed. * @return 0 on success -1 on failure */ NPY_NO_EXPORT int diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h index 22b3859d2..5e0682f22 100644 --- a/numpy/core/src/multiarray/convert_datatype.h +++ b/numpy/core/src/multiarray/convert_datatype.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAY_CONVERT_DATATYPE_H_ -#define _NPY_ARRAY_CONVERT_DATATYPE_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_CONVERT_DATATYPE_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_CONVERT_DATATYPE_H_ #include "array_method.h" @@ -78,9 +78,9 @@ PyArray_CheckCastSafety(NPY_CASTING casting, NPY_NO_EXPORT NPY_CASTING legacy_same_dtype_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta **dtypes, - PyArray_Descr **given_descrs, - PyArray_Descr **loop_descrs); + PyArray_DTypeMeta *dtypes[2], + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2]); NPY_NO_EXPORT int legacy_cast_get_strided_loop( @@ -92,11 +92,11 @@ legacy_cast_get_strided_loop( NPY_NO_EXPORT NPY_CASTING simple_cast_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta **dtypes, - PyArray_Descr **input_descrs, - PyArray_Descr **loop_descrs); + PyArray_DTypeMeta *dtypes[2], + PyArray_Descr *input_descrs[2], + PyArray_Descr *loop_descrs[2]); NPY_NO_EXPORT int PyArray_InitializeCasts(void); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_CONVERT_DATATYPE_H_ */ diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index deab7d2a1..27fd3a057 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -1,9 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -725,6 +726,7 @@ PyArray_NewFromDescr_int( fa->nd = nd; fa->dimensions = NULL; fa->data = NULL; + fa->mem_handler = NULL; if (data == NULL) { fa->flags = NPY_ARRAY_DEFAULT; @@ -804,12 +806,19 @@ PyArray_NewFromDescr_int( fa->flags |= NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_F_CONTIGUOUS; } + if (data == NULL) { + /* Store the handler in case the default is modified */ + fa->mem_handler = PyDataMem_GetHandler(); + if (fa->mem_handler == NULL) { + goto fail; + } /* * Allocate something even for zero-space arrays * e.g. shape=(0,) -- otherwise buffer exposure * (a.data) doesn't work as it should. * Could probably just allocate a few bytes here. -- Chuck + * Note: always sync this with calls to PyDataMem_UserFREE */ if (nbytes == 0) { nbytes = descr->elsize ? descr->elsize : 1; @@ -819,21 +828,23 @@ PyArray_NewFromDescr_int( * which could also be sub-fields of a VOID array */ if (zeroed || PyDataType_FLAGCHK(descr, NPY_NEEDS_INIT)) { - data = npy_alloc_cache_zero(nbytes); + data = PyDataMem_UserNEW_ZEROED(nbytes, 1, fa->mem_handler); } else { - data = npy_alloc_cache(nbytes); + data = PyDataMem_UserNEW(nbytes, fa->mem_handler); } if (data == NULL) { raise_memory_error(fa->nd, fa->dimensions, descr); goto fail; } + fa->flags |= NPY_ARRAY_OWNDATA; } else { + /* The handlers should never be called in this case */ + fa->mem_handler = NULL; /* - * If data is passed in, this object won't own it by default. - * Caller must arrange for this to be reset if truly desired + * If data is passed in, this object won't own it. */ fa->flags &= ~NPY_ARRAY_OWNDATA; } @@ -901,6 +912,7 @@ PyArray_NewFromDescr_int( return (PyObject *)fa; fail: + Py_XDECREF(fa->mem_handler); Py_DECREF(fa); return NULL; } @@ -1019,6 +1031,17 @@ PyArray_NewLikeArrayWithShape(PyArrayObject *prototype, NPY_ORDER order, /* Build the new strides */ stride = dtype->elsize; + if (stride == 0 && PyDataType_ISSTRING(dtype)) { + /* Special case for dtype=str or dtype=bytes. */ + if (dtype->type_num == NPY_STRING) { + /* dtype is bytes */ + stride = 1; + } + else { + /* dtype is str (type_num is NPY_UNICODE) */ + stride = 4; + } + } for (idim = ndim-1; idim >= 0; --idim) { npy_intp i_perm = strideperm[idim].perm; strides[i_perm] = stride; @@ -2724,7 +2747,7 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) /* If we exhausted the dst block, refresh it */ if (dst_count == count) { res = dst_iternext(dst_iter); - if (!res) { + if (res == 0) { break; } dst_count = *dst_countptr; @@ -2738,7 +2761,7 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) /* If we exhausted the src block, refresh it */ if (src_count == count) { res = src_iternext(src_iter); - if (!res) { + if (res == 0) { break; } src_count = *src_countptr; @@ -2755,10 +2778,6 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) NPY_cast_info_xfree(&cast_info); NpyIter_Deallocate(dst_iter); NpyIter_Deallocate(src_iter); - if (res > 0) { - /* The iteration stopped successfully, do not report an error */ - return 0; - } return res; } @@ -3420,7 +3439,9 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char const *sep, size_t *nre dptr += dtype->elsize; if (num < 0 && thisbuf == size) { totalbytes += bytes; - tmp = PyDataMem_RENEW(PyArray_DATA(r), totalbytes); + /* The handler is always valid */ + tmp = PyDataMem_UserRENEW(PyArray_DATA(r), totalbytes, + PyArray_HANDLER(r)); if (tmp == NULL) { err = 1; break; @@ -3442,7 +3463,9 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char const *sep, size_t *nre const size_t nsize = PyArray_MAX(*nread,1)*dtype->elsize; if (nsize != 0) { - tmp = PyDataMem_RENEW(PyArray_DATA(r), nsize); + /* The handler is always valid */ + tmp = PyDataMem_UserRENEW(PyArray_DATA(r), nsize, + PyArray_HANDLER(r)); if (tmp == NULL) { err = 1; } @@ -3547,7 +3570,9 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char *sep) const size_t nsize = PyArray_MAX(nread,1) * dtype->elsize; char *tmp; - if ((tmp = PyDataMem_RENEW(PyArray_DATA(ret), nsize)) == NULL) { + /* The handler is always valid */ + if((tmp = PyDataMem_UserRENEW(PyArray_DATA(ret), nsize, + PyArray_HANDLER(ret))) == NULL) { Py_DECREF(dtype); Py_DECREF(ret); return PyErr_NoMemory(); @@ -3831,7 +3856,9 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) */ elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; if (!npy_mul_with_overflow_intp(&nbytes, elcount, elsize)) { - new_data = PyDataMem_RENEW(PyArray_DATA(ret), nbytes); + /* The handler is always valid */ + new_data = PyDataMem_UserRENEW(PyArray_DATA(ret), nbytes, + PyArray_HANDLER(ret)); } else { new_data = NULL; @@ -3869,10 +3896,12 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) * (assuming realloc is reasonably good about reusing space...) */ if (i == 0 || elsize == 0) { - /* The size cannot be zero for PyDataMem_RENEW. */ + /* The size cannot be zero for realloc. */ goto done; } - new_data = PyDataMem_RENEW(PyArray_DATA(ret), i * elsize); + /* The handler is always valid */ + new_data = PyDataMem_UserRENEW(PyArray_DATA(ret), i * elsize, + PyArray_HANDLER(ret)); if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate array memory"); diff --git a/numpy/core/src/multiarray/ctors.h b/numpy/core/src/multiarray/ctors.h index 8db1412c7..e59e86e8b 100644 --- a/numpy/core/src/multiarray/ctors.h +++ b/numpy/core/src/multiarray/ctors.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAY_CTORS_H_ -#define _NPY_ARRAY_CTORS_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_CTORS_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_CTORS_H_ NPY_NO_EXPORT PyObject * PyArray_NewFromDescr( @@ -102,4 +102,4 @@ NPY_NO_EXPORT PyArrayObject * PyArray_SubclassWrap(PyArrayObject *arr_of_subclass, PyArrayObject *towrap); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_CTORS_H_ */ diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index 182eb12f9..e0064c017 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -6,16 +6,14 @@ * * See LICENSE.txt for the license. */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#include <datetime.h> -#include <time.h> - -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE -#include <numpy/arrayobject.h> +#include "numpy/arrayobject.h" +#include "numpyos.h" #include "npy_config.h" #include "npy_pycompat.h" @@ -30,7 +28,11 @@ #include "usertypes.h" #include "dtype_transfer.h" -#include <lowlevel_strided_loops.h> +#include "lowlevel_strided_loops.h" + +#include <datetime.h> +#include <time.h> + /* * Computes the python `ret, d = divmod(d, unit)`. @@ -426,7 +428,7 @@ PyArray_DatetimeStructToDatetime( } /*NUMPY_API - * Create a timdelta value from a filled timedelta struct and resolution unit. + * Create a timedelta value from a filled timedelta struct and resolution unit. * * TO BE REMOVED - NOT USED INTERNALLY. */ @@ -722,12 +724,21 @@ parse_datetime_extended_unit_from_string(char const *str, Py_ssize_t len, { char const *substr = str, *substrend = NULL; int den = 1; + npy_longlong true_meta_val; /* First comes an optional integer multiplier */ out_meta->num = (int)strtol_const(substr, &substrend, 10); if (substr == substrend) { out_meta->num = 1; } + else { + // check for 32-bit integer overflow + char *endptr = NULL; + true_meta_val = NumPyOS_strtoll(substr, &endptr, 10); + if (true_meta_val > INT_MAX || true_meta_val < 0) { + goto bad_input; + } + } substr = substrend; /* Next comes the unit itself, followed by either '/' or the string end */ @@ -1159,7 +1170,7 @@ get_datetime_conversion_factor(PyArray_DatetimeMetaData *src_meta, } /* If something overflowed, make both num and denom 0 */ - if (denom == 0 || num == 0) { + if (num == 0) { PyErr_Format(PyExc_OverflowError, "Integer overflow while computing the conversion " "factor between NumPy datetime units %s and %s", @@ -3775,7 +3786,17 @@ time_to_time_resolve_descriptors( meta2 = get_datetime_metadata_from_dtype(loop_descrs[1]); assert(meta2 != NULL); - if (meta1->base == meta2->base && meta1->num == meta2->num) { + if ((meta1->base == meta2->base && meta1->num == meta2->num) || + // handle some common metric prefix conversions + // 1000 fold conversions + ((meta2->base >= 7) && (meta1->base - meta2->base == 1) + && ((meta1->num / meta2->num) == 1000)) || + // 10^6 fold conversions + ((meta2->base >= 7) && (meta1->base - meta2->base == 2) + && ((meta1->num / meta2->num) == 1000000)) || + // 10^9 fold conversions + ((meta2->base >= 7) && (meta1->base - meta2->base == 3) + && ((meta1->num / meta2->num) == 1000000000))) { if (byteorder_may_allow_view) { return NPY_NO_CASTING | byteorder_may_allow_view; } diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c index f0564146d..d3e9e1451 100644 --- a/numpy/core/src/multiarray/datetime_busday.c +++ b/numpy/core/src/multiarray/datetime_busday.c @@ -6,12 +6,12 @@ * * See LICENSE.txt for the license. */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include <numpy/arrayobject.h> #include "npy_config.h" diff --git a/numpy/core/src/multiarray/datetime_busday.h b/numpy/core/src/multiarray/datetime_busday.h index 483151122..b53a25010 100644 --- a/numpy/core/src/multiarray/datetime_busday.h +++ b/numpy/core/src/multiarray/datetime_busday.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PRIVATE__DATETIME_BUSDAY_H_ -#define _NPY_PRIVATE__DATETIME_BUSDAY_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_DATETIME_BUSDAY_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_DATETIME_BUSDAY_H_ /* * This is the 'busday_offset' function exposed for calling @@ -25,4 +25,4 @@ NPY_NO_EXPORT PyObject * array_is_busday(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_DATETIME_BUSDAY_H_ */ diff --git a/numpy/core/src/multiarray/datetime_busdaycal.c b/numpy/core/src/multiarray/datetime_busdaycal.c index e3e729d3c..880efe934 100644 --- a/numpy/core/src/multiarray/datetime_busdaycal.c +++ b/numpy/core/src/multiarray/datetime_busdaycal.c @@ -7,19 +7,19 @@ * * See LICENSE.txt for the license. */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE -#include <numpy/arrayobject.h> +#include "numpy/arrayobject.h" +#include "numpy/arrayscalars.h" #include "npy_config.h" #include "npy_pycompat.h" #include "common.h" -#include "numpy/arrayscalars.h" #include "lowlevel_strided_loops.h" #include "_datetime.h" #include "datetime_busday.h" diff --git a/numpy/core/src/multiarray/datetime_busdaycal.h b/numpy/core/src/multiarray/datetime_busdaycal.h index 02903e3d2..20efebe0a 100644 --- a/numpy/core/src/multiarray/datetime_busdaycal.h +++ b/numpy/core/src/multiarray/datetime_busdaycal.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PRIVATE__DATETIME_BUSDAYDEF_H_ -#define _NPY_PRIVATE__DATETIME_BUSDAYDEF_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_DATETIME_BUSDAYCAL_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_DATETIME_BUSDAYCAL_H_ /* * A list of holidays, which should be sorted, not contain any @@ -59,4 +59,4 @@ PyArray_HolidaysConverter(PyObject *dates_in, npy_holidayslist *holidays); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_DATETIME_BUSDAYCAL_H_ */ diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c index 360868568..5080647cb 100644 --- a/numpy/core/src/multiarray/datetime_strings.c +++ b/numpy/core/src/multiarray/datetime_strings.c @@ -6,15 +6,14 @@ * * See LICENSE.txt for the license. */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#include <time.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE -#include <numpy/arrayobject.h> +#include "numpy/arrayobject.h" #include "npy_config.h" #include "npy_pycompat.h" @@ -24,6 +23,8 @@ #include "_datetime.h" #include "datetime_strings.h" +#include <time.h> + /* * Platform-specific time_t typedef. Some platforms use 32 bit, some use 64 bit * and we just use the default with the exception of mingw, where we must use diff --git a/numpy/core/src/multiarray/datetime_strings.h b/numpy/core/src/multiarray/datetime_strings.h index 148369595..ca35d29c8 100644 --- a/numpy/core/src/multiarray/datetime_strings.h +++ b/numpy/core/src/multiarray/datetime_strings.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PRIVATE__DATETIME_STRINGS_H_ -#define _NPY_PRIVATE__DATETIME_STRINGS_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_DATETIME_STRINGS_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_DATETIME_STRINGS_H_ /* * Parses (almost) standard ISO 8601 date strings. The differences are: @@ -81,4 +81,4 @@ NPY_NO_EXPORT PyObject * array_datetime_as_string(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_DATETIME_STRINGS_H_ */ diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index 90453e38f..6a09f92ac 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -1,11 +1,11 @@ /* Array Descr Object */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -257,7 +257,7 @@ static PyArray_Descr * _convert_from_tuple(PyObject *obj, int align) { if (PyTuple_GET_SIZE(obj) != 2) { - PyErr_Format(PyExc_TypeError, + PyErr_Format(PyExc_TypeError, "Tuple must have size 2, but has size %zd", PyTuple_GET_SIZE(obj)); return NULL; @@ -449,8 +449,8 @@ _convert_from_array_descr(PyObject *obj, int align) for (int i = 0; i < n; i++) { PyObject *item = PyList_GET_ITEM(obj, i); if (!PyTuple_Check(item) || (PyTuple_GET_SIZE(item) < 2)) { - PyErr_Format(PyExc_TypeError, - "Field elements must be 2- or 3-tuples, got '%R'", + PyErr_Format(PyExc_TypeError, + "Field elements must be 2- or 3-tuples, got '%R'", item); goto fail; } @@ -461,7 +461,7 @@ _convert_from_array_descr(PyObject *obj, int align) } else if (PyTuple_Check(name)) { if (PyTuple_GET_SIZE(name) != 2) { - PyErr_Format(PyExc_TypeError, + PyErr_Format(PyExc_TypeError, "If a tuple, the first element of a field tuple must have " "two elements, not %zd", PyTuple_GET_SIZE(name)); @@ -475,7 +475,7 @@ _convert_from_array_descr(PyObject *obj, int align) } } else { - PyErr_SetString(PyExc_TypeError, + PyErr_SetString(PyExc_TypeError, "First element of field tuple is " "neither a tuple nor str"); goto fail; @@ -2304,6 +2304,33 @@ arraydescr_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) { if (subtype != &PyArrayDescr_Type) { + if (Py_TYPE(subtype) == &PyArrayDTypeMeta_Type && + !(PyType_GetFlags(Py_TYPE(subtype)) & Py_TPFLAGS_HEAPTYPE) && + (NPY_DT_SLOTS((PyArray_DTypeMeta *)subtype)) != NULL) { + /* + * Appears to be a properly initialized user DType. Allocate + * it and initialize the main part as best we can. + * TODO: This should probably be a user function, and enforce + * things like the `elsize` being correctly set. + * TODO: This is EXPERIMENTAL API! + */ + PyArray_DTypeMeta *DType = (PyArray_DTypeMeta *)subtype; + PyArray_Descr *descr = (PyArray_Descr *)subtype->tp_alloc(subtype, 0); + if (descr == 0) { + PyErr_NoMemory(); + return NULL; + } + PyObject_Init((PyObject *)descr, subtype); + descr->f = &NPY_DT_SLOTS(DType)->f; + Py_XINCREF(DType->scalar_type); + descr->typeobj = DType->scalar_type; + descr->type_num = DType->type_num; + descr->flags = NPY_USE_GETITEM|NPY_USE_SETITEM; + descr->byteorder = '|'; /* If DType uses it, let it override */ + descr->elsize = -1; /* Initialize to invalid value */ + descr->hash = -1; + return (PyObject *)descr; + } /* The DTypeMeta class should prevent this from happening. */ PyErr_Format(PyExc_SystemError, "'%S' must not inherit np.dtype.__new__().", subtype); @@ -3101,6 +3128,30 @@ arraydescr_newbyteorder(PyArray_Descr *self, PyObject *args) return (PyObject *)PyArray_DescrNewByteorder(self, endian); } +static PyObject * +arraydescr_class_getitem(PyObject *cls, PyObject *args) +{ + PyObject *generic_alias; + +#ifdef Py_GENERICALIASOBJECT_H + Py_ssize_t args_len; + + args_len = PyTuple_Check(args) ? PyTuple_Size(args) : 1; + if (args_len != 1) { + return PyErr_Format(PyExc_TypeError, + "Too %s arguments for %s", + args_len > 1 ? "many" : "few", + ((PyTypeObject *)cls)->tp_name); + } + generic_alias = Py_GenericAlias(cls, args); +#else + PyErr_SetString(PyExc_TypeError, + "Type subscription requires python >= 3.9"); + generic_alias = NULL; +#endif + return generic_alias; +} + static PyMethodDef arraydescr_methods[] = { /* for pickling */ {"__reduce__", @@ -3112,6 +3163,10 @@ static PyMethodDef arraydescr_methods[] = { {"newbyteorder", (PyCFunction)arraydescr_newbyteorder, METH_VARARGS, NULL}, + /* for typing; requires python >= 3.9 */ + {"__class_getitem__", + (PyCFunction)arraydescr_class_getitem, + METH_CLASS | METH_O, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/core/src/multiarray/descriptor.h b/numpy/core/src/multiarray/descriptor.h index e1316acbd..f832958da 100644 --- a/numpy/core/src/multiarray/descriptor.h +++ b/numpy/core/src/multiarray/descriptor.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAYDESCR_H_ -#define _NPY_ARRAYDESCR_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_DESCRIPTOR_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_DESCRIPTOR_H_ NPY_NO_EXPORT PyObject *arraydescr_protocol_typestr_get( PyArray_Descr *, void *); @@ -30,4 +30,4 @@ arraydescr_field_subset_view(PyArray_Descr *self, PyObject *ind); extern NPY_NO_EXPORT char const *_datetime_strings[]; -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_DESCRIPTOR_H_ */ diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c index 1d8c27570..ce0293615 100644 --- a/numpy/core/src/multiarray/dragon4.c +++ b/numpy/core/src/multiarray/dragon4.c @@ -2206,7 +2206,7 @@ Dragon4_PrintFloat_IEEE_binary16( Dragon4_Scratch *scratch, npy_half *value, Dragon4_Options *opt) { char *buffer = scratch->repr; - npy_uint32 bufferSize = sizeof(scratch->repr); + const npy_uint32 bufferSize = sizeof(scratch->repr); BigInt *bigints = scratch->bigints; npy_uint16 val = *value; @@ -2218,15 +2218,6 @@ Dragon4_PrintFloat_IEEE_binary16( npy_bool hasUnequalMargins; char signbit = '\0'; - if (bufferSize == 0) { - return 0; - } - - if (bufferSize == 1) { - buffer[0] = '\0'; - return 0; - } - /* deconstruct the floating point value */ floatMantissa = val & bitmask_u32(10); floatExponent = (val >> 10) & bitmask_u32(5); @@ -2303,7 +2294,7 @@ Dragon4_PrintFloat_IEEE_binary32( Dragon4_Options *opt) { char *buffer = scratch->repr; - npy_uint32 bufferSize = sizeof(scratch->repr); + const npy_uint32 bufferSize = sizeof(scratch->repr); BigInt *bigints = scratch->bigints; union @@ -2319,15 +2310,6 @@ Dragon4_PrintFloat_IEEE_binary32( npy_bool hasUnequalMargins; char signbit = '\0'; - if (bufferSize == 0) { - return 0; - } - - if (bufferSize == 1) { - buffer[0] = '\0'; - return 0; - } - /* deconstruct the floating point value */ floatUnion.floatingPoint = *value; floatMantissa = floatUnion.integer & bitmask_u32(23); @@ -2404,7 +2386,7 @@ Dragon4_PrintFloat_IEEE_binary64( Dragon4_Scratch *scratch, npy_float64 *value, Dragon4_Options *opt) { char *buffer = scratch->repr; - npy_uint32 bufferSize = sizeof(scratch->repr); + const npy_uint32 bufferSize = sizeof(scratch->repr); BigInt *bigints = scratch->bigints; union @@ -2421,14 +2403,6 @@ Dragon4_PrintFloat_IEEE_binary64( npy_bool hasUnequalMargins; char signbit = '\0'; - if (bufferSize == 0) { - return 0; - } - - if (bufferSize == 1) { - buffer[0] = '\0'; - return 0; - } /* deconstruct the floating point value */ floatUnion.floatingPoint = *value; @@ -2527,7 +2501,7 @@ Dragon4_PrintFloat_Intel_extended( Dragon4_Scratch *scratch, FloatVal128 value, Dragon4_Options *opt) { char *buffer = scratch->repr; - npy_uint32 bufferSize = sizeof(scratch->repr); + const npy_uint32 bufferSize = sizeof(scratch->repr); BigInt *bigints = scratch->bigints; npy_uint32 floatExponent, floatSign; @@ -2539,15 +2513,6 @@ Dragon4_PrintFloat_Intel_extended( npy_bool hasUnequalMargins; char signbit = '\0'; - if (bufferSize == 0) { - return 0; - } - - if (bufferSize == 1) { - buffer[0] = '\0'; - return 0; - } - /* deconstruct the floating point value (we ignore the intbit) */ floatMantissa = value.lo & bitmask_u64(63); floatExponent = value.hi & bitmask_u32(15); @@ -2748,7 +2713,7 @@ Dragon4_PrintFloat_IEEE_binary128( Dragon4_Scratch *scratch, FloatVal128 val128, Dragon4_Options *opt) { char *buffer = scratch->repr; - npy_uint32 bufferSize = sizeof(scratch->repr); + const npy_uint32 bufferSize = sizeof(scratch->repr); BigInt *bigints = scratch->bigints; npy_uint32 floatExponent, floatSign; @@ -2759,15 +2724,6 @@ Dragon4_PrintFloat_IEEE_binary128( npy_bool hasUnequalMargins; char signbit = '\0'; - if (bufferSize == 0) { - return 0; - } - - if (bufferSize == 1) { - buffer[0] = '\0'; - return 0; - } - mantissa_hi = val128.hi & bitmask_u64(48); mantissa_lo = val128.lo; floatExponent = (val128.hi >> 48) & bitmask_u32(15); @@ -2917,7 +2873,7 @@ Dragon4_PrintFloat_IBM_double_double( Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) { char *buffer = scratch->repr; - npy_uint32 bufferSize = sizeof(scratch->repr); + const npy_uint32 bufferSize = sizeof(scratch->repr); BigInt *bigints = scratch->bigints; FloatVal128 val128; @@ -2934,15 +2890,6 @@ Dragon4_PrintFloat_IBM_double_double( npy_bool hasUnequalMargins; char signbit = '\0'; - if (bufferSize == 0) { - return 0; - } - - if (bufferSize == 1) { - buffer[0] = '\0'; - return 0; - } - /* The high part always comes before the low part, regardless of the * endianness of the system. */ buf128.floatingPoint = *value; diff --git a/numpy/core/src/multiarray/dragon4.h b/numpy/core/src/multiarray/dragon4.h index 4b76bf9e5..e3325bfa2 100644 --- a/numpy/core/src/multiarray/dragon4.h +++ b/numpy/core/src/multiarray/dragon4.h @@ -29,12 +29,11 @@ * Ryan Juckett's original code was under the Zlib license; he gave numpy * permission to include it under the MIT license instead. */ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_DRAGON4_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_DRAGON4_H_ -#ifndef _NPY_DRAGON4_H_ -#define _NPY_DRAGON4_H_ - -#include "Python.h" -#include "structmember.h" +#include <Python.h> +#include <structmember.h> #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" @@ -136,5 +135,4 @@ Dragon4_Scientific(PyObject *obj, DigitMode digit_mode, int precision, int min_digits, int sign, TrimMode trim, int pad_left, int exp_digits); -#endif - +#endif /* NUMPY_CORE_SRC_MULTIARRAY_DRAGON4_H_ */ diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index 50db627ea..8fb44c4f6 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -7,16 +7,16 @@ * The University of British Columbia * * See LICENSE.txt for the license. - + * */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" +#include <Python.h> +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE -#include <numpy/arrayobject.h> +#include "numpy/arrayobject.h" #include "lowlevel_strided_loops.h" #include "npy_pycompat.h" diff --git a/numpy/core/src/multiarray/dtype_transfer.h b/numpy/core/src/multiarray/dtype_transfer.h index e29ac40b8..c7e0a029f 100644 --- a/numpy/core/src/multiarray/dtype_transfer.h +++ b/numpy/core/src/multiarray/dtype_transfer.h @@ -1,5 +1,5 @@ -#ifndef _NPY_DTYPE_TRANSFER_H -#define _NPY_DTYPE_TRANSFER_H +#ifndef NUMPY_CORE_SRC_MULTIARRAY_DTYPE_TRANSFER_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_DTYPE_TRANSFER_H_ #include "array_method.h" @@ -202,4 +202,4 @@ get_wrapped_legacy_cast_function(int aligned, int *out_needs_api, int allow_wrapped); -#endif /* _NPY_DTYPE_TRANSFER_H */ +#endif /* NUMPY_CORE_SRC_MULTIARRAY_DTYPE_TRANSFER_H_ */ diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index 597468c50..cd489d5e7 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -1,12 +1,11 @@ /* Array Descr Object */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" -#include "assert.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include <numpy/ndarraytypes.h> #include <numpy/arrayscalars.h> #include "npy_pycompat.h" @@ -19,6 +18,7 @@ #include "convert_datatype.h" #include "usertypes.h" +#include <assert.h> static void dtypemeta_dealloc(PyArray_DTypeMeta *self) { @@ -101,7 +101,7 @@ static PyObject * legacy_dtype_default_new(PyArray_DTypeMeta *self, PyObject *args, PyObject *kwargs) { - /* TODO: This should allow endianess and possibly metadata */ + /* TODO: This should allow endianness and possibly metadata */ if (NPY_DT_is_parametric(self)) { /* reject parametric ones since we would need to get unit, etc. info */ PyErr_Format(PyExc_TypeError, @@ -290,7 +290,7 @@ void_common_instance(PyArray_Descr *descr1, PyArray_Descr *descr2) return descr1; } -static int +NPY_NO_EXPORT int python_builtins_are_known_scalar_types( PyArray_DTypeMeta *NPY_UNUSED(cls), PyTypeObject *pytype) { diff --git a/numpy/core/src/multiarray/dtypemeta.h b/numpy/core/src/multiarray/dtypemeta.h index 200111ac2..2a61fe39d 100644 --- a/numpy/core/src/multiarray/dtypemeta.h +++ b/numpy/core/src/multiarray/dtypemeta.h @@ -1,5 +1,5 @@ -#ifndef _NPY_DTYPEMETA_H -#define _NPY_DTYPEMETA_H +#ifndef NUMPY_CORE_SRC_MULTIARRAY_DTYPEMETA_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_DTYPEMETA_H_ /* DType flags, currently private, since we may just expose functions */ @@ -8,6 +8,35 @@ #define NPY_DT_PARAMETRIC 1 << 2 +typedef PyArray_Descr *(discover_descr_from_pyobject_function)( + PyArray_DTypeMeta *cls, PyObject *obj); + +/* + * Before making this public, we should decide whether it should pass + * the type, or allow looking at the object. A possible use-case: + * `np.array(np.array([0]), dtype=np.ndarray)` + * Could consider arrays that are not `dtype=ndarray` "scalars". + */ +typedef int (is_known_scalar_type_function)( + PyArray_DTypeMeta *cls, PyTypeObject *obj); + +typedef PyArray_Descr *(default_descr_function)(PyArray_DTypeMeta *cls); +typedef PyArray_DTypeMeta *(common_dtype_function)( + PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2); +typedef PyArray_Descr *(common_instance_function)( + PyArray_Descr *dtype1, PyArray_Descr *dtype2); + +/* + * TODO: These two functions are currently only used for experimental DType + * API support. Their relation should be "reversed": NumPy should + * always use them internally. + * There are open points about "casting safety" though, e.g. setting + * elements is currently always unsafe. + */ +typedef int(setitemfunction)(PyArray_Descr *, PyObject *, char *); +typedef PyObject *(getitemfunction)(PyArray_Descr *, char *); + + typedef struct { /* DType methods, these could be moved into its own struct */ discover_descr_from_pyobject_function *discover_descr_from_pyobject; @@ -16,6 +45,12 @@ typedef struct { common_dtype_function *common_dtype; common_instance_function *common_instance; /* + * Currently only used for experimental user DTypes. + * Typing as `void *` until NumPy itself uses these (directly). + */ + setitemfunction *setitem; + getitemfunction *getitem; + /* * The casting implementation (ArrayMethod) to convert between two * instances of this DType, stored explicitly for fast access: */ @@ -39,9 +74,9 @@ typedef struct { #define NPY_DTYPE(descr) ((PyArray_DTypeMeta *)Py_TYPE(descr)) #define NPY_DT_SLOTS(dtype) ((NPY_DType_Slots *)(dtype)->dt_slots) -#define NPY_DT_is_legacy(dtype) ((dtype)->flags & NPY_DT_LEGACY) -#define NPY_DT_is_abstract(dtype) ((dtype)->flags & NPY_DT_ABSTRACT) -#define NPY_DT_is_parametric(dtype) ((dtype)->flags & NPY_DT_PARAMETRIC) +#define NPY_DT_is_legacy(dtype) (((dtype)->flags & NPY_DT_LEGACY) != 0) +#define NPY_DT_is_abstract(dtype) (((dtype)->flags & NPY_DT_ABSTRACT) != 0) +#define NPY_DT_is_parametric(dtype) (((dtype)->flags & NPY_DT_PARAMETRIC) != 0) /* * Macros for convenient classmethod calls, since these require @@ -58,7 +93,10 @@ typedef struct { NPY_DT_SLOTS(dtype)->default_descr(dtype) #define NPY_DT_CALL_common_dtype(dtype, other) \ NPY_DT_SLOTS(dtype)->common_dtype(dtype, other) - +#define NPY_DT_CALL_getitem(descr, data_ptr) \ + NPY_DT_SLOTS(NPY_DTYPE(descr))->getitem(descr, data_ptr) +#define NPY_DT_CALL_setitem(descr, value, data_ptr) \ + NPY_DT_SLOTS(NPY_DTYPE(descr))->setitem(descr, value, data_ptr) /* * This function will hopefully be phased out or replaced, but was convenient @@ -78,6 +116,10 @@ PyArray_DTypeFromTypeNum(int typenum) NPY_NO_EXPORT int +python_builtins_are_known_scalar_types( + PyArray_DTypeMeta *cls, PyTypeObject *pytype); + +NPY_NO_EXPORT int dtypemeta_wrap_legacy_descriptor(PyArray_Descr *dtypem); -#endif /*_NPY_DTYPEMETA_H */ +#endif /* NUMPY_CORE_SRC_MULTIARRAY_DTYPEMETA_H_ */ diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src index 85806fab3..cd1a58982 100644 --- a/numpy/core/src/multiarray/einsum.c.src +++ b/numpy/core/src/multiarray/einsum.c.src @@ -9,8 +9,8 @@ */ #define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" +#include <Python.h> +#include <structmember.h> #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE diff --git a/numpy/core/src/multiarray/einsum_debug.h b/numpy/core/src/multiarray/einsum_debug.h index 9aa81fcbd..964964743 100644 --- a/numpy/core/src/multiarray/einsum_debug.h +++ b/numpy/core/src/multiarray/einsum_debug.h @@ -6,8 +6,8 @@ * * See LICENSE.txt for the license. */ -#ifndef _NPY_MULTIARRAY_EINSUM_DEBUG_H -#define _NPY_MULTIARRAY_EINSUM_DEBUG_H +#ifndef NUMPY_CORE_SRC_MULTIARRAY_EINSUM_DEBUG_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_EINSUM_DEBUG_H_ /********** PRINTF DEBUG TRACING **************/ #define NPY_EINSUM_DBG_TRACING 0 @@ -25,4 +25,4 @@ #define NPY_EINSUM_DBG_PRINT3(s, p1, p2, p3) #endif -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_EINSUM_DEBUG_H_ */ diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src index 333b8e188..29ceabd71 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.c.src +++ b/numpy/core/src/multiarray/einsum_sumprod.c.src @@ -80,7 +80,7 @@ static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ *data, npy_intp count) /* Use aligned instructions if possible */ const int is_aligned = EINSUM_IS_ALIGNED(data); const int vstep = npyv_nlanes_@sfx@; - npyv_@sfx@ vaccum = npyv_zero_@sfx@(); + npyv_@sfx@ v_accum = npyv_zero_@sfx@(); const npy_intp vstepx4 = vstep * 4; /**begin repeat1 @@ -98,15 +98,15 @@ static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ *data, npy_intp count) npyv_@sfx@ a01 = npyv_add_@sfx@(a0, a1); npyv_@sfx@ a23 = npyv_add_@sfx@(a2, a3); npyv_@sfx@ a0123 = npyv_add_@sfx@(a01, a23); - vaccum = npyv_add_@sfx@(a0123, vaccum); + v_accum = npyv_add_@sfx@(a0123, v_accum); } } /**end repeat1**/ for (; count > 0; count -= vstep, data += vstep) { npyv_@sfx@ a = npyv_load_tillz_@sfx@(data, count); - vaccum = npyv_add_@sfx@(a, vaccum); + v_accum = npyv_add_@sfx@(a, v_accum); } - accum = npyv_sum_@sfx@(vaccum); + accum = npyv_sum_@sfx@(v_accum); npyv_cleanup(); #else #ifndef NPY_DISABLE_OPTIMIZATION @@ -485,7 +485,7 @@ static NPY_GCC_OPT_3 void /* Use aligned instructions if possible */ const int is_aligned = EINSUM_IS_ALIGNED(data0) && EINSUM_IS_ALIGNED(data1); const int vstep = npyv_nlanes_@sfx@; - npyv_@sfx@ vaccum = npyv_zero_@sfx@(); + npyv_@sfx@ v_accum = npyv_zero_@sfx@(); /**begin repeat2 * #cond = if(is_aligned), else# @@ -501,19 +501,19 @@ static NPY_GCC_OPT_3 void npyv_@sfx@ a@i@ = npyv_@ld@_@sfx@(data0 + vstep * @i@); npyv_@sfx@ b@i@ = npyv_@ld@_@sfx@(data1 + vstep * @i@); /**end repeat3**/ - npyv_@sfx@ ab3 = npyv_muladd_@sfx@(a3, b3, vaccum); + npyv_@sfx@ ab3 = npyv_muladd_@sfx@(a3, b3, v_accum); npyv_@sfx@ ab2 = npyv_muladd_@sfx@(a2, b2, ab3); npyv_@sfx@ ab1 = npyv_muladd_@sfx@(a1, b1, ab2); - vaccum = npyv_muladd_@sfx@(a0, b0, ab1); + v_accum = npyv_muladd_@sfx@(a0, b0, ab1); } } /**end repeat2**/ for (; count > 0; count -= vstep, data0 += vstep, data1 += vstep) { npyv_@sfx@ a = npyv_load_tillz_@sfx@(data0, count); npyv_@sfx@ b = npyv_load_tillz_@sfx@(data1, count); - vaccum = npyv_muladd_@sfx@(a, b, vaccum); + v_accum = npyv_muladd_@sfx@(a, b, v_accum); } - accum = npyv_sum_@sfx@(vaccum); + accum = npyv_sum_@sfx@(v_accum); npyv_cleanup(); #else #ifndef NPY_DISABLE_OPTIMIZATION diff --git a/numpy/core/src/multiarray/einsum_sumprod.h b/numpy/core/src/multiarray/einsum_sumprod.h index c6cf18ec6..29ddaea14 100644 --- a/numpy/core/src/multiarray/einsum_sumprod.h +++ b/numpy/core/src/multiarray/einsum_sumprod.h @@ -1,5 +1,5 @@ -#ifndef _NPY_MULTIARRAY_EINSUM_SUMPROD_H -#define _NPY_MULTIARRAY_EINSUM_SUMPROD_H +#ifndef NUMPY_CORE_SRC_MULTIARRAY_EINSUM_SUMPROD_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_EINSUM_SUMPROD_H_ #include <numpy/npy_common.h> @@ -9,4 +9,4 @@ NPY_VISIBILITY_HIDDEN sum_of_products_fn get_sum_of_products_function(int nop, int type_num, npy_intp itemsize, npy_intp const *fixed_strides); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_EINSUM_SUMPROD_H_ */ diff --git a/numpy/core/src/multiarray/experimental_public_dtype_api.c b/numpy/core/src/multiarray/experimental_public_dtype_api.c new file mode 100644 index 000000000..ef5030471 --- /dev/null +++ b/numpy/core/src/multiarray/experimental_public_dtype_api.c @@ -0,0 +1,392 @@ +#include <Python.h> + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _UMATHMODULE +#define _MULTIARRAYMODULE +#include <numpy/npy_common.h> +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" +#include "common.h" + +#include "experimental_public_dtype_api.h" +#include "array_method.h" +#include "dtypemeta.h" +#include "array_coercion.h" +#include "convert_datatype.h" +#include "common_dtype.h" + + +#define EXPERIMENTAL_DTYPE_API_VERSION 2 + + +typedef struct{ + PyTypeObject *typeobj; /* type of python scalar or NULL */ + int flags; /* flags, including parametric and abstract */ + /* NULL terminated cast definitions. Use NULL for the newly created DType */ + PyArrayMethod_Spec **casts; + PyType_Slot *slots; +} PyArrayDTypeMeta_Spec; + + + +static PyArray_DTypeMeta * +dtype_does_not_promote( + PyArray_DTypeMeta *NPY_UNUSED(self), PyArray_DTypeMeta *NPY_UNUSED(other)) +{ + /* `other` is guaranteed not to be `self`, so we don't have to do much... */ + Py_INCREF(Py_NotImplemented); + return (PyArray_DTypeMeta *)Py_NotImplemented; +} + + +static PyArray_Descr * +discover_as_default(PyArray_DTypeMeta *cls, PyObject *NPY_UNUSED(obj)) +{ + return NPY_DT_CALL_default_descr(cls); +} + + +static PyArray_Descr * +use_new_as_default(PyArray_DTypeMeta *self) +{ + PyObject *res = PyObject_CallObject((PyObject *)self, NULL); + if (res == NULL) { + return NULL; + } + /* + * Lets not trust that the DType is implemented correctly + * TODO: Should probably do an exact type-check (at least unless this is + * an abstract DType). + */ + if (!PyArray_DescrCheck(res)) { + PyErr_Format(PyExc_RuntimeError, + "Instantiating %S did not return a dtype instance, this is " + "invalid (especially without a custom `default_descr()`).", + self); + Py_DECREF(res); + return NULL; + } + PyArray_Descr *descr = (PyArray_Descr *)res; + /* + * Should probably do some more sanity checks here on the descriptor + * to ensure the user is not being naughty. But in the end, we have + * only limited control anyway. + */ + return descr; +} + + +static int +legacy_setitem_using_DType(PyObject *obj, void *data, void *arr) +{ + if (arr == NULL) { + PyErr_SetString(PyExc_RuntimeError, + "Using legacy SETITEM with NULL array object is only " + "supported for basic NumPy DTypes."); + return -1; + } + setitemfunction *setitem; + setitem = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(arr)))->setitem; + return setitem(PyArray_DESCR(arr), obj, data); +} + + +static PyObject * +legacy_getitem_using_DType(void *data, void *arr) +{ + if (arr == NULL) { + PyErr_SetString(PyExc_RuntimeError, + "Using legacy SETITEM with NULL array object is only " + "supported for basic NumPy DTypes."); + return NULL; + } + getitemfunction *getitem; + getitem = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(arr)))->getitem; + return getitem(PyArray_DESCR(arr), data); +} + + +/* + * The descr->f structure used user-DTypes. Some functions may be filled + * from the user in the future and more could get defaults for compatibility. + */ +PyArray_ArrFuncs default_funcs = { + .setitem = &legacy_setitem_using_DType, + .getitem = &legacy_getitem_using_DType +}; + + +/* other slots are in order, so keep only last around: */ +#define NUM_DTYPE_SLOTS 7 + + +int +PyArrayInitDTypeMeta_FromSpec( + PyArray_DTypeMeta *DType, PyArrayDTypeMeta_Spec *spec) +{ + if (!PyObject_TypeCheck(DType, &PyArrayDTypeMeta_Type)) { + PyErr_SetString(PyExc_RuntimeError, + "Passed in DType must be a valid (initialized) DTypeMeta " + "instance!"); + return -1; + } + + if (spec->typeobj == NULL || !PyType_Check(spec->typeobj)) { + PyErr_SetString(PyExc_TypeError, + "Not giving a type object is currently not supported, but " + "is expected to be supported eventually. This would mean " + "that e.g. indexing a NumPy array will return a 0-D array " + "and not a scalar."); + return -1; + } + + if (DType->dt_slots != NULL) { + PyErr_Format(PyExc_RuntimeError, + "DType %R appears already registered?", DType); + return -1; + } + + /* Check and handle flags: */ + if (spec->flags & ~(NPY_DT_PARAMETRIC|NPY_DT_ABSTRACT)) { + PyErr_SetString(PyExc_RuntimeError, + "invalid DType flags specified, only parametric and abstract " + "are valid flags for user DTypes."); + return -1; + } + + DType->flags = spec->flags; + DType->dt_slots = PyMem_Calloc(1, sizeof(NPY_DType_Slots)); + if (DType->dt_slots == NULL) { + return -1; + } + + /* Set default values (where applicable) */ + NPY_DT_SLOTS(DType)->discover_descr_from_pyobject = &discover_as_default; + NPY_DT_SLOTS(DType)->is_known_scalar_type = ( + &python_builtins_are_known_scalar_types); + NPY_DT_SLOTS(DType)->default_descr = use_new_as_default; + NPY_DT_SLOTS(DType)->common_dtype = dtype_does_not_promote; + /* May need a default for non-parametric? */ + NPY_DT_SLOTS(DType)->common_instance = NULL; + NPY_DT_SLOTS(DType)->setitem = NULL; + NPY_DT_SLOTS(DType)->getitem = NULL; + + PyType_Slot *spec_slot = spec->slots; + while (1) { + int slot = spec_slot->slot; + void *pfunc = spec_slot->pfunc; + spec_slot++; + if (slot == 0) { + break; + } + if (slot > NUM_DTYPE_SLOTS || slot < 0) { + PyErr_Format(PyExc_RuntimeError, + "Invalid slot with value %d passed in.", slot); + return -1; + } + /* + * It is up to the user to get this right, and slots are sorted + * exactly like they are stored right now: + */ + void **current = (void **)(&( + NPY_DT_SLOTS(DType)->discover_descr_from_pyobject)); + current += slot - 1; + *current = pfunc; + } + if (NPY_DT_SLOTS(DType)->setitem == NULL + || NPY_DT_SLOTS(DType)->getitem == NULL) { + PyErr_SetString(PyExc_RuntimeError, + "A DType must provide a getitem/setitem (there may be an " + "exception here in the future if no scalar type is provided)"); + return -1; + } + + /* + * Now that the spec is read we can check that all required functions were + * defined by the user. + */ + if (spec->flags & NPY_DT_PARAMETRIC) { + if (NPY_DT_SLOTS(DType)->common_instance == NULL || + NPY_DT_SLOTS(DType)->discover_descr_from_pyobject + == &discover_as_default) { + PyErr_SetString(PyExc_RuntimeError, + "Parametric DType must define a common-instance and " + "descriptor discovery function!"); + return -1; + } + } + NPY_DT_SLOTS(DType)->f = default_funcs; + /* invalid type num. Ideally, we get away with it! */ + DType->type_num = -1; + + /* + * Handle the scalar type mapping. + */ + Py_INCREF(spec->typeobj); + DType->scalar_type = spec->typeobj; + if (PyType_GetFlags(spec->typeobj) & Py_TPFLAGS_HEAPTYPE) { + if (PyObject_SetAttrString((PyObject *)DType->scalar_type, + "__associated_array_dtype__", (PyObject *)DType) < 0) { + Py_DECREF(DType); + return -1; + } + } + if (_PyArray_MapPyTypeToDType(DType, DType->scalar_type, 0) < 0) { + Py_DECREF(DType); + return -1; + } + + /* Ensure cast dict is defined (not sure we have to do it here) */ + NPY_DT_SLOTS(DType)->castingimpls = PyDict_New(); + if (NPY_DT_SLOTS(DType)->castingimpls == NULL) { + return -1; + } + /* + * And now, register all the casts that are currently defined! + */ + PyArrayMethod_Spec **next_meth_spec = spec->casts; + while (1) { + PyArrayMethod_Spec *meth_spec = *next_meth_spec; + next_meth_spec++; + if (meth_spec == NULL) { + break; + } + /* + * The user doesn't know the name of DType yet, so we have to fill it + * in for them! + */ + for (int i=0; i < meth_spec->nin + meth_spec->nout; i++) { + if (meth_spec->dtypes[i] == NULL) { + meth_spec->dtypes[i] = DType; + } + } + /* Register the cast! */ + int res = PyArray_AddCastingImplementation_FromSpec(meth_spec, 0); + + /* Also clean up again, so nobody can get bad ideas... */ + for (int i=0; i < meth_spec->nin + meth_spec->nout; i++) { + if (meth_spec->dtypes[i] == DType) { + meth_spec->dtypes[i] = NULL; + } + } + + if (res < 0) { + return -1; + } + } + + if (NPY_DT_SLOTS(DType)->within_dtype_castingimpl == NULL) { + /* + * We expect this for now. We should have a default for DType that + * only supports simple copy (and possibly byte-order assuming that + * they swap the full itemsize). + */ + PyErr_SetString(PyExc_RuntimeError, + "DType must provide a function to cast (or just copy) between " + "its own instances!"); + return -1; + } + + /* And finally, we have to register all the casts! */ + return 0; +} + + +/* Function is defined in umath/dispatching.c (same/one compilation unit) */ +NPY_NO_EXPORT int +PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate); + +static int +PyUFunc_AddLoopFromSpec(PyObject *ufunc, PyArrayMethod_Spec *spec) +{ + if (!PyObject_TypeCheck(ufunc, &PyUFunc_Type)) { + PyErr_SetString(PyExc_TypeError, + "ufunc object passed is not a ufunc!"); + return -1; + } + PyBoundArrayMethodObject *bmeth = + (PyBoundArrayMethodObject *)PyArrayMethod_FromSpec(spec); + if (bmeth == NULL) { + return -1; + } + int nargs = bmeth->method->nin + bmeth->method->nout; + PyObject *dtypes = PyArray_TupleFromItems( + nargs, (PyObject **)bmeth->dtypes, 1); + if (dtypes == NULL) { + return -1; + } + PyObject *info = PyTuple_Pack(2, dtypes, bmeth->method); + Py_DECREF(bmeth); + Py_DECREF(dtypes); + if (info == NULL) { + return -1; + } + return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); +} + + +static int +PyUFunc_AddPromoter( + PyObject *ufunc, PyObject *DType_tuple, PyObject *promoter) +{ + if (!PyObject_TypeCheck(ufunc, &PyUFunc_Type)) { + PyErr_SetString(PyExc_TypeError, + "ufunc object passed is not a ufunc!"); + return -1; + } + if (!PyCapsule_CheckExact(promoter)) { + PyErr_SetString(PyExc_TypeError, + "promoter must (currently) be a PyCapsule."); + return -1; + } + if (PyCapsule_GetPointer(promoter, "numpy._ufunc_promoter") == NULL) { + return -1; + } + PyObject *info = PyTuple_Pack(2, DType_tuple, promoter); + if (info == NULL) { + return -1; + } + return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); +} + + +NPY_NO_EXPORT PyObject * +_get_experimental_dtype_api(PyObject *NPY_UNUSED(mod), PyObject *arg) +{ + static void *experimental_api_table[] = { + &PyUFunc_AddLoopFromSpec, + &PyUFunc_AddPromoter, + &PyArrayDTypeMeta_Type, + &PyArrayInitDTypeMeta_FromSpec, + &PyArray_CommonDType, + &PyArray_PromoteDTypeSequence, + NULL, + }; + + char *env = getenv("NUMPY_EXPERIMENTAL_DTYPE_API"); + if (env == NULL || strcmp(env, "1") != 0) { + PyErr_Format(PyExc_RuntimeError, + "The new DType API is currently in an exploratory phase and " + "should NOT be used for production code. " + "Expect modifications and crashes! " + "To experiment with the new API you must set " + "`NUMPY_EXPERIMENTAL_DTYPE_API=1` as an environment variable."); + return NULL; + } + + long version = PyLong_AsLong(arg); + if (error_converting(version)) { + return NULL; + } + if (version != EXPERIMENTAL_DTYPE_API_VERSION) { + PyErr_Format(PyExc_RuntimeError, + "Experimental DType API version %d requested, but NumPy " + "is exporting version %d. Recompile your DType and/or upgrade " + "NumPy to match.", + version, EXPERIMENTAL_DTYPE_API_VERSION); + return NULL; + } + + return PyCapsule_New(&experimental_api_table, + "experimental_dtype_api_table", NULL); +} diff --git a/numpy/core/src/multiarray/experimental_public_dtype_api.h b/numpy/core/src/multiarray/experimental_public_dtype_api.h new file mode 100644 index 000000000..270cb82bf --- /dev/null +++ b/numpy/core/src/multiarray/experimental_public_dtype_api.h @@ -0,0 +1,18 @@ +/* + * This file exports the experimental dtype API as exposed via the + * `numpy/core/include/numpy/experimental_dtype_api.h` + * header file. + * + * This file is a stub, all important definitions are in the code file. + * + * NOTE: This file is considered in-flux, exploratory and transitional. + */ + +#ifndef NUMPY_CORE_SRC_MULTIARRAY_EXPERIMENTAL_PUBLIC_DTYPE_API_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_EXPERIMENTAL_PUBLIC_DTYPE_API_H_ + +NPY_NO_EXPORT PyObject * +_get_experimental_dtype_api(PyObject *mod, PyObject *arg); + + +#endif /* NUMPY_CORE_SRC_MULTIARRAY_EXPERIMENTAL_PUBLIC_DTYPE_API_H_ */ diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c index fe73c18ee..3b1b4f406 100644 --- a/numpy/core/src/multiarray/flagsobject.c +++ b/numpy/core/src/multiarray/flagsobject.c @@ -1,11 +1,11 @@ /* Array Flags Object */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "arrayobject.h" #include "numpy/arrayscalars.h" diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c index de2a8c14e..e81ca2947 100644 --- a/numpy/core/src/multiarray/getset.c +++ b/numpy/core/src/multiarray/getset.c @@ -1,11 +1,11 @@ /* Array Descr Object */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "npy_config.h" @@ -384,7 +384,23 @@ array_data_set(PyArrayObject *self, PyObject *op, void *NPY_UNUSED(ignored)) } if (PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA) { PyArray_XDECREF(self); - PyDataMem_FREE(PyArray_DATA(self)); + size_t nbytes = PyArray_NBYTES(self); + /* + * Allocation will never be 0, see comment in ctors.c + * line 820 + */ + if (nbytes == 0) { + PyArray_Descr *dtype = PyArray_DESCR(self); + nbytes = dtype->elsize ? dtype->elsize : 1; + } + PyObject *handler = PyArray_HANDLER(self); + if (handler == NULL) { + /* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */ + PyErr_SetString(PyExc_RuntimeError, + "no memory handler found but OWNDATA flag set"); + return -1; + } + PyDataMem_UserFREE(PyArray_DATA(self), nbytes, handler); } if (PyArray_BASE(self)) { if ((PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) || diff --git a/numpy/core/src/multiarray/getset.h b/numpy/core/src/multiarray/getset.h index 4f1209de5..a95c98020 100644 --- a/numpy/core/src/multiarray/getset.h +++ b/numpy/core/src/multiarray/getset.h @@ -1,6 +1,6 @@ -#ifndef _NPY_ARRAY_GETSET_H_ -#define _NPY_ARRAY_GETSET_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_GETSET_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_GETSET_H_ extern NPY_NO_EXPORT PyGetSetDef array_getsetlist[]; -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_GETSET_H_ */ diff --git a/numpy/core/src/multiarray/hashdescr.c b/numpy/core/src/multiarray/hashdescr.c index e9a99cc8f..a3c9e986b 100644 --- a/numpy/core/src/multiarray/hashdescr.c +++ b/numpy/core/src/multiarray/hashdescr.c @@ -1,7 +1,9 @@ -#define PY_SSIZE_T_CLEAN -#include <Python.h> #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE + +#define PY_SSIZE_T_CLEAN +#include <Python.h> + #include <numpy/arrayobject.h> #include "npy_config.h" diff --git a/numpy/core/src/multiarray/hashdescr.h b/numpy/core/src/multiarray/hashdescr.h index 8d577e7b0..97375b4af 100644 --- a/numpy/core/src/multiarray/hashdescr.h +++ b/numpy/core/src/multiarray/hashdescr.h @@ -1,7 +1,7 @@ -#ifndef _NPY_HASHDESCR_H_ -#define _NPY_HASHDESCR_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_HASHDESCR_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_HASHDESCR_H_ NPY_NO_EXPORT npy_hash_t PyArray_DescrHash(PyObject* odescr); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_HASHDESCR_H_ */ diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 2b8ea9e79..086b674c8 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -1,10 +1,10 @@ -#define PY_SSIZE_T_CLEAN -#include <Python.h> -#include "structmember.h" - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#define PY_SSIZE_T_CLEAN +#include <Python.h> +#include <structmember.h> + #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -776,6 +776,7 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) return NULL; } + /*NUMPY_API */ NPY_NO_EXPORT PyObject * @@ -907,7 +908,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, Py_XDECREF(mps[i]); } Py_DECREF(ap); - npy_free_cache(mps, n * sizeof(mps[0])); + PyDataMem_FREE(mps); if (out != NULL && out != obj) { Py_INCREF(out); PyArray_ResolveWritebackIfCopy(obj); @@ -922,7 +923,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, Py_XDECREF(mps[i]); } Py_XDECREF(ap); - npy_free_cache(mps, n * sizeof(mps[0])); + PyDataMem_FREE(mps); PyArray_DiscardWritebackIfCopy(obj); Py_XDECREF(obj); return NULL; @@ -962,14 +963,19 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, return 0; } + PyObject *mem_handler = PyDataMem_GetHandler(); + if (mem_handler == NULL) { + return -1; + } it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)op, &axis); if (it == NULL) { + Py_DECREF(mem_handler); return -1; } size = it->size; if (needcopy) { - buffer = npy_alloc_cache(N * elsize); + buffer = PyDataMem_UserNEW(N * elsize, mem_handler); if (buffer == NULL) { ret = -1; goto fail; @@ -1053,12 +1059,14 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, fail: NPY_END_THREADS_DESCR(PyArray_DESCR(op)); - npy_free_cache(buffer, N * elsize); + /* cleanup internal buffer */ + PyDataMem_UserFREE(buffer, N * elsize, mem_handler); if (ret < 0 && !PyErr_Occurred()) { /* Out of memory during sorting or buffer creation */ PyErr_NoMemory(); } Py_DECREF(it); + Py_DECREF(mem_handler); return ret; } @@ -1090,11 +1098,16 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, NPY_BEGIN_THREADS_DEF; + PyObject *mem_handler = PyDataMem_GetHandler(); + if (mem_handler == NULL) { + return NULL; + } rop = (PyArrayObject *)PyArray_NewFromDescr( Py_TYPE(op), PyArray_DescrFromType(NPY_INTP), PyArray_NDIM(op), PyArray_DIMS(op), NULL, NULL, 0, (PyObject *)op); if (rop == NULL) { + Py_DECREF(mem_handler); return NULL; } rstride = PyArray_STRIDE(rop, axis); @@ -1102,6 +1115,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, /* Check if there is any argsorting to do */ if (N <= 1 || PyArray_SIZE(op) == 0) { + Py_DECREF(mem_handler); memset(PyArray_DATA(rop), 0, PyArray_NBYTES(rop)); return (PyObject *)rop; } @@ -1115,7 +1129,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, size = it->size; if (needcopy) { - valbuffer = npy_alloc_cache(N * elsize); + valbuffer = PyDataMem_UserNEW(N * elsize, mem_handler); if (valbuffer == NULL) { ret = -1; goto fail; @@ -1123,7 +1137,8 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } if (needidxbuffer) { - idxbuffer = (npy_intp *)npy_alloc_cache(N * sizeof(npy_intp)); + idxbuffer = (npy_intp *)PyDataMem_UserNEW(N * sizeof(npy_intp), + mem_handler); if (idxbuffer == NULL) { ret = -1; goto fail; @@ -1212,8 +1227,9 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, fail: NPY_END_THREADS_DESCR(PyArray_DESCR(op)); - npy_free_cache(valbuffer, N * elsize); - npy_free_cache(idxbuffer, N * sizeof(npy_intp)); + /* cleanup internal buffers */ + PyDataMem_UserFREE(valbuffer, N * elsize, mem_handler); + PyDataMem_UserFREE(idxbuffer, N * sizeof(npy_intp), mem_handler); if (ret < 0) { if (!PyErr_Occurred()) { /* Out of memory during sorting or buffer creation */ @@ -1224,6 +1240,7 @@ fail: } Py_XDECREF(it); Py_XDECREF(rit); + Py_DECREF(mem_handler); return (PyObject *)rop; } @@ -1292,7 +1309,15 @@ partition_prep_kth_array(PyArrayObject * ktharray, npy_intp * kth; npy_intp nkth, i; - if (!PyArray_CanCastSafely(PyArray_TYPE(ktharray), NPY_INTP)) { + if (PyArray_ISBOOL(ktharray)) { + /* 2021-09-29, NumPy 1.22 */ + if (DEPRECATE( + "Passing booleans as partition index is deprecated" + " (warning added in NumPy 1.22)") < 0) { + return NULL; + } + } + else if (!PyArray_ISINTEGER(ktharray)) { PyErr_Format(PyExc_TypeError, "Partition index must be integer"); return NULL; } @@ -2390,19 +2415,14 @@ PyArray_CountNonzero(PyArrayObject *self) npy_intp *strideptr, *innersizeptr; NPY_BEGIN_THREADS_DEF; - // Special low-overhead version specific to the boolean/int types dtype = PyArray_DESCR(self); - switch(dtype->kind) { - case 'u': - case 'i': - case 'b': - if (dtype->elsize > 8) { - break; - } - return count_nonzero_int( - PyArray_NDIM(self), PyArray_BYTES(self), PyArray_DIMS(self), - PyArray_STRIDES(self), dtype->elsize - ); + /* Special low-overhead version specific to the boolean/int types */ + if (PyArray_ISALIGNED(self) && ( + PyDataType_ISBOOL(dtype) || PyDataType_ISINTEGER(dtype))) { + return count_nonzero_int( + PyArray_NDIM(self), PyArray_BYTES(self), PyArray_DIMS(self), + PyArray_STRIDES(self), dtype->elsize + ); } nonzero = PyArray_DESCR(self)->f->nonzero; diff --git a/numpy/core/src/multiarray/item_selection.h b/numpy/core/src/multiarray/item_selection.h index c1c8b5567..40d9eb298 100644 --- a/numpy/core/src/multiarray/item_selection.h +++ b/numpy/core/src/multiarray/item_selection.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PRIVATE__ITEM_SELECTION_H_ -#define _NPY_PRIVATE__ITEM_SELECTION_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ITEM_SELECTION_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ITEM_SELECTION_H_ /* * Counts the number of True values in a raw boolean array. This @@ -27,4 +27,4 @@ NPY_NO_EXPORT int PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index, PyObject *obj); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ITEM_SELECTION_H_ */ diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index f724837ce..f959162fd 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -1,9 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -1123,6 +1124,35 @@ NPY_NO_EXPORT PyTypeObject PyArrayIter_Type = { /** END of Array Iterator **/ + +static int +set_shape_mismatch_exception(PyArrayMultiIterObject *mit, int i1, int i2) +{ + PyObject *shape1, *shape2, *msg; + + shape1 = PyObject_GetAttrString((PyObject *) mit->iters[i1]->ao, "shape"); + if (shape1 == NULL) { + return -1; + } + shape2 = PyObject_GetAttrString((PyObject *) mit->iters[i2]->ao, "shape"); + if (shape2 == NULL) { + Py_DECREF(shape1); + return -1; + } + msg = PyUnicode_FromFormat("shape mismatch: objects cannot be broadcast " + "to a single shape. Mismatch is between arg %d " + "with shape %S and arg %d with shape %S.", + i1, shape1, i2, shape2); + Py_DECREF(shape1); + Py_DECREF(shape2); + if (msg == NULL) { + return -1; + } + PyErr_SetObject(PyExc_ValueError, msg); + Py_DECREF(msg); + return 0; +} + /* Adjust dimensionality and strides for index object iterators --- i.e. broadcast */ @@ -1131,6 +1161,7 @@ NPY_NO_EXPORT int PyArray_Broadcast(PyArrayMultiIterObject *mit) { int i, nd, k, j; + int src_iter = -1; /* Initializing avoids a compiler warning. */ npy_intp tmp; PyArrayIterObject *it; @@ -1154,12 +1185,10 @@ PyArray_Broadcast(PyArrayMultiIterObject *mit) } if (mit->dimensions[i] == 1) { mit->dimensions[i] = tmp; + src_iter = j; } else if (mit->dimensions[i] != tmp) { - PyErr_SetString(PyExc_ValueError, - "shape mismatch: objects" \ - " cannot be broadcast" \ - " to a single shape"); + set_shape_mismatch_exception(mit, src_iter, j); return -1; } } diff --git a/numpy/core/src/multiarray/iterators.h b/numpy/core/src/multiarray/iterators.h index d942f45b8..883615cc9 100644 --- a/numpy/core/src/multiarray/iterators.h +++ b/numpy/core/src/multiarray/iterators.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAYITERATORS_H_ -#define _NPY_ARRAYITERATORS_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ITERATORS_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ITERATORS_H_ NPY_NO_EXPORT PyObject *iter_subscript(PyArrayIterObject *, PyObject *); @@ -10,4 +10,4 @@ iter_ass_subscript(PyArrayIterObject *, PyObject *, PyObject *); NPY_NO_EXPORT void PyArray_RawIterBaseInit(PyArrayIterObject *it, PyArrayObject *ao); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ITERATORS_H_ */ diff --git a/numpy/core/src/multiarray/legacy_dtype_implementation.c b/numpy/core/src/multiarray/legacy_dtype_implementation.c index 9b4946da3..72a52d7a8 100644 --- a/numpy/core/src/multiarray/legacy_dtype_implementation.c +++ b/numpy/core/src/multiarray/legacy_dtype_implementation.c @@ -6,9 +6,9 @@ * until such a time where legay user dtypes are deprecated and removed * entirely. */ - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE + #include "numpy/arrayobject.h" #include "scalartypes.h" #include "_datetime.h" diff --git a/numpy/core/src/multiarray/legacy_dtype_implementation.h b/numpy/core/src/multiarray/legacy_dtype_implementation.h index b36eb019a..04f455cde 100644 --- a/numpy/core/src/multiarray/legacy_dtype_implementation.h +++ b/numpy/core/src/multiarray/legacy_dtype_implementation.h @@ -1,8 +1,8 @@ -#ifndef _NPY_LEGACY_DTYPE_IMPLEMENTATION_H -#define _NPY_LEGACY_DTYPE_IMPLEMENTATION_H +#ifndef NUMPY_CORE_SRC_MULTIARRAY_LEGACY_DTYPE_IMPLEMENTATION_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_LEGACY_DTYPE_IMPLEMENTATION_H_ NPY_NO_EXPORT npy_bool PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, NPY_CASTING casting); -#endif /*_NPY_LEGACY_DTYPE_IMPLEMENTATION_H*/ +#endif /* NUMPY_CORE_SRC_MULTIARRAY_LEGACY_DTYPE_IMPLEMENTATION_H_ */ diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src index e38873746..e313d2447 100644 --- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src @@ -9,7 +9,7 @@ */ #define PY_SSIZE_T_CLEAN -#include "Python.h" +#include <Python.h> #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE @@ -1849,7 +1849,7 @@ mapiter_@name@(PyArrayMapIterObject *mit) return -1; } #else - /* The operand order is reveresed here */ + /* The operand order is reversed here */ char *args[2] = {subspace_ptrs[1], subspace_ptrs[0]}; npy_intp strides[2] = {subspace_strides[1], subspace_strides[0]}; if (NPY_UNLIKELY(cast_info.func(&cast_info.context, diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 41311b03f..014a863d5 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -1,10 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -/*#include <stdio.h>*/ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "arrayobject.h" diff --git a/numpy/core/src/multiarray/mapping.h b/numpy/core/src/multiarray/mapping.h index 4e22f79df..e929b8b3f 100644 --- a/numpy/core/src/multiarray/mapping.h +++ b/numpy/core/src/multiarray/mapping.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAYMAPPING_H_ -#define _NPY_ARRAYMAPPING_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_MAPPING_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_MAPPING_H_ extern NPY_NO_EXPORT PyMappingMethods array_as_mapping; @@ -70,4 +70,4 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, npy_uint32 subspace_iter_flags, npy_uint32 subspace_flags, npy_uint32 extra_op_flags, PyArrayObject *extra_op, PyArray_Descr *extra_op_dtype); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_MAPPING_H_ */ diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index ffa735b38..bb0006e32 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -1,14 +1,14 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN -#include <stdarg.h> #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" -#include "arrayobject.h" #include "numpy/arrayscalars.h" +#include "arrayobject.h" #include "arrayfunction_override.h" #include "npy_argparse.h" #include "npy_config.h" @@ -30,6 +30,8 @@ #include "methods.h" #include "alloc.h" +#include <stdarg.h> + /* NpyArg_ParseKeywords * @@ -1983,6 +1985,16 @@ array_setstate(PyArrayObject *self, PyObject *args) return NULL; } + /* + * Reassigning fa->descr messes with the reallocation strategy, + * since fa could be a 0-d or scalar, and then + * PyDataMem_UserFREE will be confused + */ + size_t n_tofree = PyArray_NBYTES(self); + if (n_tofree == 0) { + PyArray_Descr *dtype = PyArray_DESCR(self); + n_tofree = dtype->elsize ? dtype->elsize : 1; + } Py_XDECREF(PyArray_DESCR(self)); fa->descr = typecode; Py_INCREF(typecode); @@ -2049,7 +2061,18 @@ array_setstate(PyArrayObject *self, PyObject *args) } if ((PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA)) { - PyDataMem_FREE(PyArray_DATA(self)); + /* + * Allocation will never be 0, see comment in ctors.c + * line 820 + */ + PyObject *handler = PyArray_HANDLER(self); + if (handler == NULL) { + /* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */ + PyErr_SetString(PyExc_RuntimeError, + "no memory handler found but OWNDATA flag set"); + return NULL; + } + PyDataMem_UserFREE(PyArray_DATA(self), n_tofree, handler); PyArray_CLEARFLAGS(self, NPY_ARRAY_OWNDATA); } Py_XDECREF(PyArray_BASE(self)); @@ -2085,7 +2108,6 @@ array_setstate(PyArrayObject *self, PyObject *args) if (!PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { int swap = PyArray_ISBYTESWAPPED(self); - fa->data = datastr; /* Bytes should always be considered immutable, but we just grab the * pointer if they are large, to save memory. */ if (!IsAligned(self) || swap || (len <= 1000)) { @@ -2094,8 +2116,16 @@ array_setstate(PyArrayObject *self, PyObject *args) Py_DECREF(rawdata); Py_RETURN_NONE; } - fa->data = PyDataMem_NEW(num); + /* Store the handler in case the default is modified */ + Py_XDECREF(fa->mem_handler); + fa->mem_handler = PyDataMem_GetHandler(); + if (fa->mem_handler == NULL) { + Py_DECREF(rawdata); + return NULL; + } + fa->data = PyDataMem_UserNEW(num, PyArray_HANDLER(self)); if (PyArray_DATA(self) == NULL) { + Py_DECREF(fa->mem_handler); Py_DECREF(rawdata); return PyErr_NoMemory(); } @@ -2131,7 +2161,12 @@ array_setstate(PyArrayObject *self, PyObject *args) Py_DECREF(rawdata); } else { + /* The handlers should never be called in this case */ + Py_XDECREF(fa->mem_handler); + fa->mem_handler = NULL; + fa->data = datastr; if (PyArray_SetBaseObject(self, rawdata) < 0) { + Py_DECREF(rawdata); return NULL; } } @@ -2142,8 +2177,15 @@ array_setstate(PyArrayObject *self, PyObject *args) if (num == 0 || elsize == 0) { Py_RETURN_NONE; } - fa->data = PyDataMem_NEW(num); + /* Store the functions in case the default handler is modified */ + Py_XDECREF(fa->mem_handler); + fa->mem_handler = PyDataMem_GetHandler(); + if (fa->mem_handler == NULL) { + return NULL; + } + fa->data = PyDataMem_UserNEW(num, PyArray_HANDLER(self)); if (PyArray_DATA(self) == NULL) { + Py_DECREF(fa->mem_handler); return PyErr_NoMemory(); } if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_NEEDS_INIT)) { @@ -2152,6 +2194,7 @@ array_setstate(PyArrayObject *self, PyObject *args) PyArray_ENABLEFLAGS(self, NPY_ARRAY_OWNDATA); fa->base = NULL; if (_setlist_pkl(self, rawdata) < 0) { + Py_DECREF(fa->mem_handler); return NULL; } } @@ -2707,6 +2750,30 @@ array_complex(PyArrayObject *self, PyObject *NPY_UNUSED(args)) return c; } +static PyObject * +array_class_getitem(PyObject *cls, PyObject *args) +{ + PyObject *generic_alias; + +#ifdef Py_GENERICALIASOBJECT_H + Py_ssize_t args_len; + + args_len = PyTuple_Check(args) ? PyTuple_Size(args) : 1; + if (args_len != 2) { + return PyErr_Format(PyExc_TypeError, + "Too %s arguments for %s", + args_len > 2 ? "many" : "few", + ((PyTypeObject *)cls)->tp_name); + } + generic_alias = Py_GenericAlias(cls, args); +#else + PyErr_SetString(PyExc_TypeError, + "Type subscription requires python >= 3.9"); + generic_alias = NULL; +#endif + return generic_alias; +} + NPY_NO_EXPORT PyMethodDef array_methods[] = { /* for subtypes */ @@ -2764,6 +2831,11 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { (PyCFunction) array_format, METH_VARARGS, NULL}, + /* for typing; requires python >= 3.9 */ + {"__class_getitem__", + (PyCFunction)array_class_getitem, + METH_CLASS | METH_O, NULL}, + /* Original and Extended methods added 2005 */ {"all", (PyCFunction)array_all, diff --git a/numpy/core/src/multiarray/methods.h b/numpy/core/src/multiarray/methods.h index c0de23c35..bcada0fea 100644 --- a/numpy/core/src/multiarray/methods.h +++ b/numpy/core/src/multiarray/methods.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAY_METHODS_H_ -#define _NPY_ARRAY_METHODS_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ #include "npy_import.h" @@ -31,4 +31,4 @@ NpyPath_PathlikeToFspath(PyObject *file) return PyObject_CallFunctionObjArgs(os_fspath, file, NULL); } -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ */ diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index d33c7060b..c00f14045 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -11,16 +11,14 @@ oliphant@ee.byu.edu Brigham Young University */ - -/* $Id: multiarraymodule.c,v 1.36 2005/09/14 00:14:00 teoliphant Exp $ */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _UMATHMODULE #define _MULTIARRAYMODULE + +#define PY_SSIZE_T_CLEAN +#include <Python.h> +#include <structmember.h> + #include <numpy/npy_common.h> #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -70,6 +68,7 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "typeinfo.h" #include "get_attr_string.h" +#include "experimental_public_dtype_api.h" /* _get_experimental_dtype_api */ /* ***************************************************************************** @@ -84,11 +83,12 @@ NPY_NO_EXPORT int set_matmul_flags(PyObject *d); /* in ufunc_object.c */ /* * global variable to determine if legacy printing is enabled, accessible from - * C. For simplicity the mode is encoded as an integer where '0' means no - * legacy mode, and '113' means 1.13 legacy mode. We can upgrade this if we - * have more complex requirements in the future. + * C. For simplicity the mode is encoded as an integer where INT_MAX means no + * legacy mode, and '113'/'121' means 1.13/1.21 legacy mode; and 0 maps to + * INT_MAX. We can upgrade this if we have more complex requirements in the + * future. */ -int npy_legacy_print_mode = 0; +int npy_legacy_print_mode = INT_MAX; static PyObject * set_legacy_print_mode(PyObject *NPY_UNUSED(self), PyObject *args) @@ -96,6 +96,9 @@ set_legacy_print_mode(PyObject *NPY_UNUSED(self), PyObject *args) if (!PyArg_ParseTuple(args, "i", &npy_legacy_print_mode)) { return NULL; } + if (!npy_legacy_print_mode) { + npy_legacy_print_mode = INT_MAX; + } Py_RETURN_NONE; } @@ -4431,7 +4434,9 @@ static struct PyMethodDef array_module_methods[] = { {"_discover_array_parameters", (PyCFunction)_discover_array_parameters, METH_VARARGS | METH_KEYWORDS, NULL}, {"_get_castingimpl", (PyCFunction)_get_castingimpl, - METH_VARARGS | METH_KEYWORDS, NULL}, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"_get_experimental_dtype_api", (PyCFunction)_get_experimental_dtype_api, + METH_O, NULL}, /* from umath */ {"frompyfunc", (PyCFunction) ufunc_frompyfunc, @@ -4442,6 +4447,9 @@ static struct PyMethodDef array_module_methods[] = { {"geterrobj", (PyCFunction) ufunc_geterr, METH_VARARGS, NULL}, + {"get_handler_name", + (PyCFunction) get_handler_name, + METH_VARARGS, NULL}, {"_add_newdoc_ufunc", (PyCFunction)add_newdoc_ufunc, METH_VARARGS, NULL}, {"_get_sfloat_dtype", @@ -4919,6 +4927,20 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { if (initumath(m) != 0) { goto err; } +#if (!defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM >= 0x07030600) + /* + * Initialize the context-local PyDataMem_Handler capsule. + */ + c_api = PyCapsule_New(&default_handler, "mem_handler", NULL); + if (c_api == NULL) { + goto err; + } + current_handler = PyContextVar_New("current_allocator", c_api); + Py_DECREF(c_api); + if (current_handler == NULL) { + goto err; + } +#endif return m; err: diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h index 4cdb6ef72..640940d2a 100644 --- a/numpy/core/src/multiarray/multiarraymodule.h +++ b/numpy/core/src/multiarray/multiarraymodule.h @@ -1,5 +1,5 @@ -#ifndef _NPY_MULTIARRAY_H_ -#define _NPY_MULTIARRAY_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_wrap; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_finalize; @@ -9,4 +9,4 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis2; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_like; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_numpy; -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index a1ca5bff5..860c8c1f6 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -11,8 +11,9 @@ */ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -/* Indicate that this .c file is allowed to include the header */ +/* Allow this .c file to include nditer_impl.h */ #define NPY_ITERATOR_IMPLEMENTATION_CODE + #include "nditer_impl.h" #include "templ_common.h" #include "ctors.h" @@ -115,7 +116,7 @@ NpyIter_RemoveAxis(NpyIter *iter, int axis) --p; } } - else if (p <= 0) { + else { if (p < -1-axis) { ++p; } @@ -2129,7 +2130,7 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) /* * Try to do make the outersize as big as possible. This allows * it to shrink when processing the last bit of the outer reduce loop, - * then grow again at the beginnning of the next outer reduce loop. + * then grow again at the beginning of the next outer reduce loop. */ NBF_REDUCE_OUTERSIZE(bufferdata) = (NAD_SHAPE(reduce_outeraxisdata)- NAD_INDEX(reduce_outeraxisdata)); @@ -2803,9 +2804,9 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count, if (coord != 0) { /* * In this case, it is only safe to reuse the buffer if the amount - * of data copied is not more then the current axes, as is the + * of data copied is not more than the current axes, as is the * case when reuse_reduce_loops was active already. - * It should be in principle OK when the idim loop returns immidiatly. + * It should be in principle OK when the idim loop returns immediately. */ NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS; } diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c index 98d4f5a75..bf32e1f6b 100644 --- a/numpy/core/src/multiarray/nditer_constr.c +++ b/numpy/core/src/multiarray/nditer_constr.c @@ -11,10 +11,10 @@ */ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -/* Indicate that this .c file is allowed to include the header */ +/* Allow this .c file to include nditer_impl.h */ #define NPY_ITERATOR_IMPLEMENTATION_CODE -#include "nditer_impl.h" +#include "nditer_impl.h" #include "arrayobject.h" #include "array_coercion.h" #include "templ_common.h" @@ -1405,7 +1405,7 @@ check_mask_for_writemasked_reduction(NpyIter *iter, int iop) /* * Check whether a reduction is OK based on the flags and the operand being * readwrite. This path is deprecated, since usually only specific axes - * should be reduced. If axes are specified explicitely, the flag is + * should be reduced. If axes are specified explicitly, the flag is * unnecessary. */ static int diff --git a/numpy/core/src/multiarray/nditer_impl.h b/numpy/core/src/multiarray/nditer_impl.h index a5a9177e5..2a82b7e54 100644 --- a/numpy/core/src/multiarray/nditer_impl.h +++ b/numpy/core/src/multiarray/nditer_impl.h @@ -4,20 +4,21 @@ * should use the exposed iterator API. */ #ifndef NPY_ITERATOR_IMPLEMENTATION_CODE -#error "This header is intended for use ONLY by iterator implementation code." +#error This header is intended for use ONLY by iterator implementation code. #endif -#ifndef _NPY_PRIVATE__NDITER_IMPL_H_ -#define _NPY_PRIVATE__NDITER_IMPL_H_ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" +#ifndef NUMPY_CORE_SRC_MULTIARRAY_NDITER_IMPL_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_NDITER_IMPL_H_ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE -#include <numpy/arrayobject.h> -#include <npy_pycompat.h> + +#define PY_SSIZE_T_CLEAN +#include <Python.h> +#include <structmember.h> + +#include "numpy/arrayobject.h" +#include "npy_pycompat.h" #include "convert_datatype.h" #include "lowlevel_strided_loops.h" @@ -288,7 +289,7 @@ struct NpyIter_AxisData_tag { 1 + \ /* intp stride[nop+1] AND char* ptr[nop+1] */ \ 2*((nop)+1) \ - )*NPY_SIZEOF_INTP ) + )*(size_t)NPY_SIZEOF_INTP) /* * Macro to advance an AXISDATA pointer by a specified count. @@ -355,4 +356,4 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs); NPY_NO_EXPORT void npyiter_clear_buffers(NpyIter *iter); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_NDITER_IMPL_H_ */ diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c index 8acc7f87f..8e072d5f4 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.c +++ b/numpy/core/src/multiarray/nditer_pywrap.c @@ -6,13 +6,14 @@ * * See LICENSE.txt for the license. */ -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE -#include <numpy/arrayobject.h> + +#define PY_SSIZE_T_CLEAN +#include <Python.h> +#include <structmember.h> + +#include "numpy/arrayobject.h" #include "npy_config.h" #include "npy_pycompat.h" #include "alloc.h" diff --git a/numpy/core/src/multiarray/nditer_pywrap.h b/numpy/core/src/multiarray/nditer_pywrap.h index 49eb5d89d..d2fcafebd 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.h +++ b/numpy/core/src/multiarray/nditer_pywrap.h @@ -1,8 +1,8 @@ -#ifndef __NDITER_PYWRAP_H -#define __NDITER_PYWRAP_H +#ifndef NUMPY_CORE_SRC_MULTIARRAY_NDITER_PYWRAP_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_NDITER_PYWRAP_H_ NPY_NO_EXPORT PyObject * NpyIter_NestedIters(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_NDITER_PYWRAP_H_ */ diff --git a/numpy/core/src/multiarray/nditer_templ.c.src b/numpy/core/src/multiarray/nditer_templ.c.src index 05ce6ae75..3f91a482b 100644 --- a/numpy/core/src/multiarray/nditer_templ.c.src +++ b/numpy/core/src/multiarray/nditer_templ.c.src @@ -132,7 +132,7 @@ npyiter_iternext_itflags@tag_itflags@_dims@tag_ndim@_iters@tag_nop@( /* Reset the 1st and 2nd indices to 0 */ NAD_INDEX(axisdata0) = 0; NAD_INDEX(axisdata1) = 0; - /* Reset the 1st and 2nd pointers to the value of the 3nd */ + /* Reset the 1st and 2nd pointers to the value of the 3rd */ for (istrides = 0; istrides < nstrides; ++istrides) { NAD_PTRS(axisdata0)[istrides] = NAD_PTRS(axisdata2)[istrides]; NAD_PTRS(axisdata1)[istrides] = NAD_PTRS(axisdata2)[istrides]; diff --git a/numpy/core/src/multiarray/npy_buffer.h b/numpy/core/src/multiarray/npy_buffer.h index d10f1a020..62e08573c 100644 --- a/numpy/core/src/multiarray/npy_buffer.h +++ b/numpy/core/src/multiarray/npy_buffer.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PRIVATE_BUFFER_H_ -#define _NPY_PRIVATE_BUFFER_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_NPY_BUFFER_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_NPY_BUFFER_H_ extern NPY_NO_EXPORT PyBufferProcs array_as_buffer; @@ -12,4 +12,4 @@ _descriptor_from_pep3118_format(char const *s); NPY_NO_EXPORT int void_getbuffer(PyObject *obj, Py_buffer *view, int flags); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_NPY_BUFFER_H_ */ diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index 9ed7cde47..292ef55a6 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -1,10 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -/*#include <stdio.h>*/ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "npy_config.h" diff --git a/numpy/core/src/multiarray/number.h b/numpy/core/src/multiarray/number.h index 4f426f964..054840305 100644 --- a/numpy/core/src/multiarray/number.h +++ b/numpy/core/src/multiarray/number.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAY_NUMBER_H_ -#define _NPY_ARRAY_NUMBER_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_NUMBER_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_NUMBER_H_ typedef struct { PyObject *add; @@ -69,4 +69,4 @@ NPY_NO_EXPORT PyObject * PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, int rtype, PyArrayObject *out); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_NUMBER_H_ */ diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c index 41dd059b0..a1c310700 100644 --- a/numpy/core/src/multiarray/refcount.c +++ b/numpy/core/src/multiarray/refcount.c @@ -2,13 +2,13 @@ * This module corresponds to the `Special functions for NPY_OBJECT` * section in the numpy reference for C-API. */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" #include "iterators.h" diff --git a/numpy/core/src/multiarray/refcount.h b/numpy/core/src/multiarray/refcount.h index 761d53dd0..959eef5ba 100644 --- a/numpy/core/src/multiarray/refcount.h +++ b/numpy/core/src/multiarray/refcount.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PRIVATE_REFCOUNT_H_ -#define _NPY_PRIVATE_REFCOUNT_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_REFCOUNT_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_REFCOUNT_H_ NPY_NO_EXPORT void PyArray_Item_INCREF(char *data, PyArray_Descr *descr); @@ -16,4 +16,4 @@ PyArray_XDECREF(PyArrayObject *mp); NPY_NO_EXPORT void PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_REFCOUNT_H_ */ diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c index 0e93cbbe9..e409e9874 100644 --- a/numpy/core/src/multiarray/scalarapi.c +++ b/numpy/core/src/multiarray/scalarapi.c @@ -1,9 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 740ec8cc2..bbbc5bfa2 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -1,7 +1,7 @@ /* -*- c -*- */ #define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "structmember.h" +#include <Python.h> +#include <structmember.h> #define NPY_NO_DEPRECATED_API NPY_API_VERSION #ifndef _MULTIARRAYMODULE @@ -34,6 +34,16 @@ #include "binop_override.h" +/* + * used for allocating a single scalar, so use the default numpy + * memory allocators instead of the (maybe) user overrides + */ +NPY_NO_EXPORT void * +npy_alloc_cache_zero(size_t nmemb, size_t size); + +NPY_NO_EXPORT void +npy_free_cache(void * p, npy_uintp sz); + NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[] = { {PyObject_HEAD_INIT(&PyBoolArrType_Type) 0}, {PyObject_HEAD_INIT(&PyBoolArrType_Type) 1}, @@ -209,6 +219,27 @@ gentype_multiply(PyObject *m1, PyObject *m2) } /**begin repeat + * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG# + * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * npy_long, npy_ulong, npy_longlong, npy_ulonglong# + * #c = hh, uhh, h, uh,, u, l, ul, ll, ull# + * #Name = Byte, UByte, Short, UShort, Int, UInt, + * Long, ULong, LongLong, ULongLong# + * #convert = Long*8, LongLong*2# + */ +static PyObject * +@type@_bit_count(PyObject *self) +{ + @type@ scalar = PyArrayScalar_VAL(self, @Name@); + uint8_t count = npy_popcount@c@(scalar); + PyObject *result = PyLong_From@convert@(count); + + return result; +} +/**end repeat**/ + +/**begin repeat * * #name = positive, negative, absolute, invert, int, float# */ @@ -866,7 +897,7 @@ static PyObject * { npy_@name@ absval; - if (npy_legacy_print_mode == 113) { + if (npy_legacy_print_mode <= 113) { return legacy_@name@_format@kind@(val); } @@ -892,7 +923,7 @@ c@name@type_@kind@(PyObject *self) npy_c@name@ val = PyArrayScalar_VAL(self, C@Name@); TrimMode trim = TrimMode_DptZeros; - if (npy_legacy_print_mode == 113) { + if (npy_legacy_print_mode <= 113) { return legacy_c@name@_format@kind@(val); } @@ -957,7 +988,7 @@ halftype_@kind@(PyObject *self) float floatval = npy_half_to_float(val); float absval; - if (npy_legacy_print_mode == 113) { + if (npy_legacy_print_mode <= 113) { return legacy_float_format@kind@(floatval); } @@ -1321,7 +1352,7 @@ gentype_imag_get(PyObject *self, void *NPY_UNUSED(ignored)) int elsize; typecode = PyArray_DescrFromScalar(self); elsize = typecode->elsize; - temp = npy_alloc_cache_zero(elsize); + temp = npy_alloc_cache_zero(1, elsize); ret = PyArray_Scalar(temp, typecode, NULL); npy_free_cache(temp, elsize); } @@ -1805,6 +1836,59 @@ gentype_setflags(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), Py_RETURN_NONE; } +static PyObject * +numbertype_class_getitem_abc(PyObject *cls, PyObject *args) +{ + PyObject *generic_alias; + +#ifdef Py_GENERICALIASOBJECT_H + Py_ssize_t args_len; + int args_len_expected; + + /* complexfloating should take 2 parameters, all others take 1 */ + if (PyType_IsSubtype((PyTypeObject *)cls, + &PyComplexFloatingArrType_Type)) { + args_len_expected = 2; + } + else { + args_len_expected = 1; + } + + args_len = PyTuple_Check(args) ? PyTuple_Size(args) : 1; + if (args_len != args_len_expected) { + return PyErr_Format(PyExc_TypeError, + "Too %s arguments for %s", + args_len > args_len_expected ? "many" : "few", + ((PyTypeObject *)cls)->tp_name); + } + generic_alias = Py_GenericAlias(cls, args); +#else + PyErr_SetString(PyExc_TypeError, + "Type subscription requires python >= 3.9"); + generic_alias = NULL; +#endif + return generic_alias; +} + +/* + * Use for concrete np.number subclasses, making them act as if they + * were subtyped from e.g. np.signedinteger[object], thus lacking any + * free subscription parameters. Requires python >= 3.9. + */ +static PyObject * +numbertype_class_getitem(PyObject *cls, PyObject *args) +{ +#ifdef Py_GENERICALIASOBJECT_H + PyErr_Format(PyExc_TypeError, + "There are no type variables left in %s", + ((PyTypeObject *)cls)->tp_name); +#else + PyErr_SetString(PyExc_TypeError, + "Type subscription requires python >= 3.9"); +#endif + return NULL; +} + /* * casting complex numbers (that don't inherit from Python complex) * to Python complex @@ -2188,6 +2272,14 @@ static PyGetSetDef inttype_getsets[] = { {NULL, NULL, NULL, NULL, NULL} }; +static PyMethodDef numbertype_methods[] = { + /* for typing; requires python >= 3.9 */ + {"__class_getitem__", + (PyCFunction)numbertype_class_getitem_abc, + METH_CLASS | METH_O, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ +}; + /**begin repeat * #name = cfloat,clongdouble# */ @@ -2195,6 +2287,10 @@ static PyMethodDef @name@type_methods[] = { {"__complex__", (PyCFunction)@name@_complex, METH_VARARGS | METH_KEYWORDS, NULL}, + /* for typing; requires python >= 3.9 */ + {"__class_getitem__", + (PyCFunction)numbertype_class_getitem, + METH_CLASS | METH_O, NULL}, {NULL, NULL, 0, NULL} }; /**end repeat**/ @@ -2232,10 +2328,43 @@ static PyMethodDef @name@type_methods[] = { {"is_integer", (PyCFunction)@name@_is_integer, METH_NOARGS, NULL}, + /* for typing; requires python >= 3.9 */ + {"__class_getitem__", + (PyCFunction)numbertype_class_getitem, + METH_CLASS | METH_O, NULL}, + {NULL, NULL, 0, NULL} +}; +/**end repeat**/ + +/**begin repeat + * #name = timedelta, cdouble# + */ +static PyMethodDef @name@type_methods[] = { + /* for typing; requires python >= 3.9 */ + {"__class_getitem__", + (PyCFunction)numbertype_class_getitem, + METH_CLASS | METH_O, NULL}, {NULL, NULL, 0, NULL} }; /**end repeat**/ +/**begin repeat + * #name = byte, ubyte, short, ushort, int, uint, + * long, ulong, longlong, ulonglong# + */ +static PyMethodDef @name@type_methods[] = { + /* for typing; requires python >= 3.9 */ + {"__class_getitem__", + (PyCFunction)numbertype_class_getitem, + METH_CLASS | METH_O, NULL}, + {"bit_count", + (PyCFunction)npy_@name@_bit_count, + METH_NOARGS, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ +}; +/**end repeat**/ + + /************* As_mapping functions for void array scalar ************/ static Py_ssize_t @@ -3069,7 +3198,10 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds) (int) NPY_MAX_INT); return NULL; } - destptr = npy_alloc_cache_zero(memu); + if (memu == 0) { + memu = 1; + } + destptr = npy_alloc_cache_zero(memu, 1); if (destptr == NULL) { return PyErr_NoMemory(); } @@ -3951,6 +4083,8 @@ initialize_numeric_types(void) PyIntegerArrType_Type.tp_getset = inttype_getsets; + PyNumberArrType_Type.tp_methods = numbertype_methods; + /**begin repeat * #NAME= Number, Integer, SignedInteger, UnsignedInteger, Inexact, * Floating, ComplexFloating, Flexible, Character# @@ -4008,6 +4142,17 @@ initialize_numeric_types(void) /**end repeat**/ /**begin repeat + * #name = byte, short, int, long, longlong, + * ubyte, ushort, uint, ulong, ulonglong# + * #Name = Byte, Short, Int, Long, LongLong, + * UByte, UShort, UInt, ULong, ULongLong# + */ + + Py@Name@ArrType_Type.tp_methods = @name@type_methods; + + /**end repeat**/ + + /**begin repeat * #name = half, float, double, longdouble# * #Name = Half, Float, Double, LongDouble# */ @@ -4016,6 +4161,17 @@ initialize_numeric_types(void) /**end repeat**/ + /**begin repeat + * #name = byte, short, int, long, longlong, ubyte, ushort, + * uint, ulong, ulonglong, timedelta, cdouble# + * #Name = Byte, Short, Int, Long, LongLong, UByte, UShort, + * UInt, ULong, ULongLong, Timedelta, CDouble# + */ + + Py@Name@ArrType_Type.tp_methods = @name@type_methods; + + /**end repeat**/ + /* We won't be inheriting from Python Int type. */ PyIntArrType_Type.tp_hash = int_arrtype_hash; diff --git a/numpy/core/src/multiarray/scalartypes.h b/numpy/core/src/multiarray/scalartypes.h index 861f2c943..95a2f66c6 100644 --- a/numpy/core/src/multiarray/scalartypes.h +++ b/numpy/core/src/multiarray/scalartypes.h @@ -1,5 +1,5 @@ -#ifndef _NPY_SCALARTYPES_H_ -#define _NPY_SCALARTYPES_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_SCALARTYPES_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_SCALARTYPES_H_ /* Internal look-up tables */ extern NPY_NO_EXPORT unsigned char @@ -31,4 +31,4 @@ _typenum_fromtypeobj(PyObject *type, int user); NPY_NO_EXPORT void * scalar_value(PyObject *scalar, PyArray_Descr *descr); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_SCALARTYPES_H_ */ diff --git a/numpy/core/src/multiarray/sequence.c b/numpy/core/src/multiarray/sequence.c index 1c74f1719..8db0690a1 100644 --- a/numpy/core/src/multiarray/sequence.c +++ b/numpy/core/src/multiarray/sequence.c @@ -1,9 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" diff --git a/numpy/core/src/multiarray/sequence.h b/numpy/core/src/multiarray/sequence.h index b28c50d97..aff6aeb7e 100644 --- a/numpy/core/src/multiarray/sequence.h +++ b/numpy/core/src/multiarray/sequence.h @@ -1,6 +1,6 @@ -#ifndef _NPY_ARRAY_SEQUENCE_H_ -#define _NPY_ARRAY_SEQUENCE_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_SEQUENCE_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_SEQUENCE_H_ extern NPY_NO_EXPORT PySequenceMethods array_as_sequence; -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_SEQUENCE_H_ */ diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c index 02c349759..162abd6a4 100644 --- a/numpy/core/src/multiarray/shape.c +++ b/numpy/core/src/multiarray/shape.c @@ -1,9 +1,10 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -120,8 +121,16 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, } /* Reallocate space if needed - allocating 0 is forbidden */ - new_data = PyDataMem_RENEW( - PyArray_DATA(self), newnbytes == 0 ? elsize : newnbytes); + PyObject *handler = PyArray_HANDLER(self); + if (handler == NULL) { + /* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */ + PyErr_SetString(PyExc_RuntimeError, + "no memory handler found but OWNDATA flag set"); + return NULL; + } + new_data = PyDataMem_UserRENEW(PyArray_DATA(self), + newnbytes == 0 ? elsize : newnbytes, + handler); if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); diff --git a/numpy/core/src/multiarray/shape.h b/numpy/core/src/multiarray/shape.h index 875b5430f..bef386ed1 100644 --- a/numpy/core/src/multiarray/shape.h +++ b/numpy/core/src/multiarray/shape.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAY_SHAPE_H_ -#define _NPY_ARRAY_SHAPE_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_SHAPE_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_SHAPE_H_ /* * Creates a sorted stride perm matching the KEEPORDER behavior @@ -21,4 +21,4 @@ PyArray_CreateMultiSortedStridePerm(int narrays, PyArrayObject **arrays, NPY_NO_EXPORT PyObject * PyArray_SqueezeSelected(PyArrayObject *self, npy_bool *axis_flags); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_SHAPE_H_ */ diff --git a/numpy/core/src/multiarray/strfuncs.c b/numpy/core/src/multiarray/strfuncs.c index d9d9b7c0a..ba457f4f4 100644 --- a/numpy/core/src/multiarray/strfuncs.c +++ b/numpy/core/src/multiarray/strfuncs.c @@ -1,8 +1,10 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#define PY_SSIZE_T_CLEAN #include <Python.h> -#include <numpy/arrayobject.h> + +#include "numpy/arrayobject.h" #include "npy_pycompat.h" #include "npy_import.h" #include "strfuncs.h" diff --git a/numpy/core/src/multiarray/strfuncs.h b/numpy/core/src/multiarray/strfuncs.h index 5dd661a20..134b56ed3 100644 --- a/numpy/core/src/multiarray/strfuncs.h +++ b/numpy/core/src/multiarray/strfuncs.h @@ -1,5 +1,5 @@ -#ifndef _NPY_ARRAY_STRFUNCS_H_ -#define _NPY_ARRAY_STRFUNCS_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_STRFUNCS_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_STRFUNCS_H_ NPY_NO_EXPORT void PyArray_SetStringFunction(PyObject *op, int repr); @@ -13,4 +13,4 @@ array_str(PyArrayObject *self); NPY_NO_EXPORT PyObject * array_format(PyArrayObject *self, PyObject *args); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_STRFUNCS_H_ */ diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c index 2b4621744..f615aa336 100644 --- a/numpy/core/src/multiarray/temp_elide.c +++ b/numpy/core/src/multiarray/temp_elide.c @@ -1,8 +1,9 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "npy_config.h" #include "numpy/arrayobject.h" diff --git a/numpy/core/src/multiarray/temp_elide.h b/numpy/core/src/multiarray/temp_elide.h index 206bb0253..a1fec98d5 100644 --- a/numpy/core/src/multiarray/temp_elide.h +++ b/numpy/core/src/multiarray/temp_elide.h @@ -1,5 +1,6 @@ -#ifndef _NPY_ARRAY_TEMP_AVOID_H_ -#define _NPY_ARRAY_TEMP_AVOID_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_TEMP_ELIDE_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_TEMP_ELIDE_H_ + #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #include <numpy/ndarraytypes.h> @@ -12,4 +13,4 @@ try_binary_elide(PyObject * m1, PyObject * m2, PyObject * (inplace_op)(PyArrayObject * m1, PyObject * m2), PyObject ** res, int commutative); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_TEMP_ELIDE_H_ */ diff --git a/numpy/core/src/multiarray/typeinfo.c b/numpy/core/src/multiarray/typeinfo.c index b0563b3c0..8cf6bc1e0 100644 --- a/numpy/core/src/multiarray/typeinfo.c +++ b/numpy/core/src/multiarray/typeinfo.c @@ -3,6 +3,10 @@ * Unfortunately, we need two different types to cover the cases where min/max * do and do not appear in the tuple. */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include "npy_pycompat.h" #include "typeinfo.h" #if (defined(PYPY_VERSION_NUM) && (PYPY_VERSION_NUM <= 0x07030000)) @@ -10,9 +14,6 @@ #include <structseq.h> #endif -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE -#include "npy_pycompat.h" static PyTypeObject PyArray_typeinfoType; diff --git a/numpy/core/src/multiarray/typeinfo.h b/numpy/core/src/multiarray/typeinfo.h index 28afa4120..af4637fc9 100644 --- a/numpy/core/src/multiarray/typeinfo.h +++ b/numpy/core/src/multiarray/typeinfo.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PRIVATE_TYPEINFO_H_ -#define _NPY_PRIVATE_TYPEINFO_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_TYPEINFO_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_TYPEINFO_H_ #define PY_SSIZE_T_CLEAN #include <Python.h> @@ -18,4 +18,4 @@ PyArray_typeinforanged( char typechar, int typenum, int nbits, int align, PyObject *max, PyObject *min, PyTypeObject *type_obj); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_TYPEINFO_H_ */ diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c index 5602304e9..a338d712d 100644 --- a/numpy/core/src/multiarray/usertypes.c +++ b/numpy/core/src/multiarray/usertypes.c @@ -20,13 +20,13 @@ maintainer email: oliphant.travis@ieee.org Space Science Telescope Institute (J. Todd Miller, Perry Greenfield, Rick White) */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include <Python.h> -#include "structmember.h" +#include <structmember.h> -/*#include <stdio.h>*/ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -268,6 +268,56 @@ PyArray_RegisterDataType(PyArray_Descr *descr) return typenum; } + +/* + * Checks that there is no cast already cached using the new casting-impl + * mechanism. + * In that case, we do not clear out the cache (but otherwise silently + * continue). Users should not modify casts after they have been used, + * but this may also happen accidentally during setup (and may never have + * mattered). See https://github.com/numpy/numpy/issues/20009 + */ +static int _warn_if_cast_exists_already( + PyArray_Descr *descr, int totype, char *funcname) +{ + PyArray_DTypeMeta *to_DType = PyArray_DTypeFromTypeNum(totype); + if (to_DType == NULL) { + return -1; + } + PyObject *cast_impl = PyDict_GetItemWithError( + NPY_DT_SLOTS(NPY_DTYPE(descr))->castingimpls, (PyObject *)to_DType); + Py_DECREF(to_DType); + if (cast_impl == NULL) { + if (PyErr_Occurred()) { + return -1; + } + } + else { + char *extra_msg; + if (cast_impl == Py_None) { + extra_msg = "the cast will continue to be considered impossible."; + } + else { + extra_msg = "the previous definition will continue to be used."; + } + Py_DECREF(cast_impl); + PyArray_Descr *to_descr = PyArray_DescrFromType(totype); + int ret = PyErr_WarnFormat(PyExc_RuntimeWarning, 1, + "A cast from %R to %R was registered/modified using `%s` " + "after the cast had been used. " + "This registration will have (mostly) no effect: %s\n" + "The most likely fix is to ensure that casts are the first " + "thing initialized after dtype registration. " + "Please contact the NumPy developers with any questions!", + descr, to_descr, funcname, extra_msg); + Py_DECREF(to_descr); + if (ret < 0) { + return -1; + } + } + return 0; +} + /*NUMPY_API Register Casting Function Replaces any function currently stored. @@ -279,14 +329,19 @@ PyArray_RegisterCastFunc(PyArray_Descr *descr, int totype, PyObject *cobj, *key; int ret; - if (totype < NPY_NTYPES_ABI_COMPATIBLE) { - descr->f->cast[totype] = castfunc; - return 0; - } if (totype >= NPY_NTYPES && !PyTypeNum_ISUSERDEF(totype)) { PyErr_SetString(PyExc_TypeError, "invalid type number."); return -1; } + if (_warn_if_cast_exists_already( + descr, totype, "PyArray_RegisterCastFunc") < 0) { + return -1; + } + + if (totype < NPY_NTYPES_ABI_COMPATIBLE) { + descr->f->cast[totype] = castfunc; + return 0; + } if (descr->f->castdict == NULL) { descr->f->castdict = PyDict_New(); if (descr->f->castdict == NULL) { @@ -328,6 +383,10 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype, "RegisterCanCast must be user-defined."); return -1; } + if (_warn_if_cast_exists_already( + descr, totype, "PyArray_RegisterCanCast") < 0) { + return -1; + } if (scalar == NPY_NOSCALAR) { /* diff --git a/numpy/core/src/multiarray/usertypes.h b/numpy/core/src/multiarray/usertypes.h index 8b2fc80e6..6768e2c42 100644 --- a/numpy/core/src/multiarray/usertypes.h +++ b/numpy/core/src/multiarray/usertypes.h @@ -1,5 +1,5 @@ -#ifndef _NPY_PRIVATE_USERTYPES_H_ -#define _NPY_PRIVATE_USERTYPES_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_USERTYPES_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_USERTYPES_H_ #include "array_method.h" @@ -27,4 +27,4 @@ NPY_NO_EXPORT int PyArray_AddLegacyWrapping_CastingImpl( PyArray_DTypeMeta *from, PyArray_DTypeMeta *to, NPY_CASTING casting); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_USERTYPES_H_ */ diff --git a/numpy/core/src/multiarray/vdot.c b/numpy/core/src/multiarray/vdot.c index 9b5d19522..ff08ed2d4 100644 --- a/numpy/core/src/multiarray/vdot.c +++ b/numpy/core/src/multiarray/vdot.c @@ -1,7 +1,9 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#define PY_SSIZE_T_CLEAN #include <Python.h> + #include "common.h" #include "vdot.h" #include "npy_cblas.h" diff --git a/numpy/core/src/multiarray/vdot.h b/numpy/core/src/multiarray/vdot.h index 0f60ca6d1..f6da5ddea 100644 --- a/numpy/core/src/multiarray/vdot.h +++ b/numpy/core/src/multiarray/vdot.h @@ -1,5 +1,5 @@ -#ifndef _NPY_VDOT_H_ -#define _NPY_VDOT_H_ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_VDOT_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_VDOT_H_ #include "common.h" @@ -15,4 +15,4 @@ CLONGDOUBLE_vdot(char *, npy_intp, char *, npy_intp, char *, npy_intp, void *); NPY_NO_EXPORT void OBJECT_vdot(char *, npy_intp, char *, npy_intp, char *, npy_intp, void *); -#endif +#endif /* NUMPY_CORE_SRC_MULTIARRAY_VDOT_H_ */ diff --git a/numpy/core/src/npymath/halffloat.c b/numpy/core/src/npymath/halffloat.c index cbaa11e43..51948c736 100644 --- a/numpy/core/src/npymath/halffloat.c +++ b/numpy/core/src/npymath/halffloat.c @@ -1,4 +1,5 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION + #include "numpy/halffloat.h" /* diff --git a/numpy/core/src/npymath/npy_math_internal.h.src b/numpy/core/src/npymath/npy_math_internal.h.src index 1e46a2303..dd2424db8 100644 --- a/numpy/core/src/npymath/npy_math_internal.h.src +++ b/numpy/core/src/npymath/npy_math_internal.h.src @@ -55,6 +55,29 @@ */ #include "npy_math_private.h" +/* Magic binary numbers used by bit_count + * For type T, the magic numbers are computed as follows: + * Magic[0]: 01 01 01 01 01 01... = (T)~(T)0/3 + * Magic[1]: 0011 0011 0011... = (T)~(T)0/15 * 3 + * Magic[2]: 00001111 00001111... = (T)~(T)0/255 * 15 + * Magic[3]: 00000001 00000001... = (T)~(T)0/255 + * + * Counting bits set, in parallel + * Based on: http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel + * + * Generic Algorithm for type T: + * a = a - ((a >> 1) & (T)~(T)0/3); + * a = (a & (T)~(T)0/15*3) + ((a >> 2) & (T)~(T)0/15*3); + * a = (a + (a >> 4)) & (T)~(T)0/255*15; + * c = (T)(a * ((T)~(T)0/255)) >> (sizeof(T) - 1) * CHAR_BIT; +*/ + +static const npy_uint8 MAGIC8[] = {0x55u, 0x33u, 0x0Fu, 0x01u}; +static const npy_uint16 MAGIC16[] = {0x5555u, 0x3333u, 0x0F0Fu, 0x0101u}; +static const npy_uint32 MAGIC32[] = {0x55555555ul, 0x33333333ul, 0x0F0F0F0Ful, 0x01010101ul}; +static const npy_uint64 MAGIC64[] = {0x5555555555555555ull, 0x3333333333333333ull, 0x0F0F0F0F0F0F0F0Full, 0x0101010101010101ull}; + + /* ***************************************************************************** ** BASIC MATH FUNCTIONS ** @@ -457,21 +480,40 @@ NPY_INPLACE @type@ npy_frexp@c@(@type@ x, int* exp) * #c = l,,f# * #C = L,,F# */ + +/* + * On arm64 macOS, there's a bug with sin, cos, and tan where they don't + * raise "invalid" when given INFINITY as input. + */ +#if defined(__APPLE__) && defined(__arm64__) +#define WORKAROUND_APPLE_TRIG_BUG 1 +#else +#define WORKAROUND_APPLE_TRIG_BUG 0 +#endif + /**begin repeat1 * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# + * #TRIG_WORKAROUND = WORKAROUND_APPLE_TRIG_BUG*3, 0*22# */ #ifdef HAVE_@KIND@@C@ NPY_INPLACE @type@ npy_@kind@@c@(@type@ x) { +#if @TRIG_WORKAROUND@ + if (!npy_isfinite(x)) { + return (x - x); + } +#endif return @kind@@c@(x); } #endif /**end repeat1**/ +#undef WORKAROUND_APPLE_TRIG_BUG + /**begin repeat1 * #kind = atan2,hypot,pow,fmod,copysign# * #KIND = ATAN2,HYPOT,POW,FMOD,COPYSIGN# @@ -795,3 +837,66 @@ npy_rshift@u@@c@(npy_@u@@type@ a, npy_@u@@type@ b) } /**end repeat1**/ /**end repeat**/ + + +#define __popcnt32 __popcnt +/**begin repeat + * + * #type = ubyte, ushort, uint, ulong, ulonglong# + * #STYPE = BYTE, SHORT, INT, LONG, LONGLONG# + * #c = hh, h, , l, ll# + */ +#undef TO_BITS_LEN +#if 0 +/**begin repeat1 + * #len = 8, 16, 32, 64# + */ +#elif NPY_BITSOF_@STYPE@ == @len@ + #define TO_BITS_LEN(X) X##@len@ +/**end repeat1**/ +#endif + + +NPY_INPLACE uint8_t +npy_popcount_parallel@c@(npy_@type@ a) +{ + a = a - ((a >> 1) & (npy_@type@) TO_BITS_LEN(MAGIC)[0]); + a = ((a & (npy_@type@) TO_BITS_LEN(MAGIC)[1])) + ((a >> 2) & (npy_@type@) TO_BITS_LEN(MAGIC)[1]); + a = (a + (a >> 4)) & (npy_@type@) TO_BITS_LEN(MAGIC)[2]; + return (npy_@type@) (a * (npy_@type@) TO_BITS_LEN(MAGIC)[3]) >> ((NPY_SIZEOF_@STYPE@ - 1) * CHAR_BIT); +} + +NPY_INPLACE uint8_t +npy_popcountu@c@(npy_@type@ a) +{ +/* use built-in popcount if present, else use our implementation */ +#if (defined(__clang__) || defined(__GNUC__)) && NPY_BITSOF_@STYPE@ >= 32 + return __builtin_popcount@c@(a); +#elif defined(_MSC_VER) && NPY_BITSOF_@STYPE@ >= 16 + /* no builtin __popcnt64 for 32 bits */ + #if defined(_WIN64) || (defined(_WIN32) && NPY_BITSOF_@STYPE@ != 64) + return TO_BITS_LEN(__popcnt)(a); + /* split 64 bit number into two 32 bit ints and return sum of counts */ + #elif (defined(_WIN32) && NPY_BITSOF_@STYPE@ == 64) + npy_uint32 left = (npy_uint32) (a>>32); + npy_uint32 right = (npy_uint32) a; + return __popcnt32(left) + __popcnt32(right); + #endif +#else + return npy_popcount_parallel@c@(a); +#endif +} +/**end repeat**/ + +/**begin repeat + * + * #type = byte, short, int, long, longlong# + * #c = hh, h, , l, ll# + */ +NPY_INPLACE uint8_t +npy_popcount@c@(npy_@type@ a) +{ + /* Return popcount of abs(a) */ + return npy_popcountu@c@(a < 0 ? -a : a); +} +/**end repeat**/ diff --git a/numpy/core/src/npymath/npy_math_private.h b/numpy/core/src/npymath/npy_math_private.h index 212d11a0b..7ca0c5ba0 100644 --- a/numpy/core/src/npymath/npy_math_private.h +++ b/numpy/core/src/npymath/npy_math_private.h @@ -19,7 +19,13 @@ #define _NPY_MATH_PRIVATE_H_ #include <Python.h> +#ifdef __cplusplus +#include <cmath> +using std::isgreater; +using std::isless; +#else #include <math.h> +#endif #include "npy_config.h" #include "npy_fpmath.h" @@ -507,17 +513,29 @@ typedef union { #else /* !_MSC_VER */ typedef union { npy_cdouble npy_z; +#ifdef __cplusplus + std::complex<double> c99z; +#else complex double c99_z; +#endif } __npy_cdouble_to_c99_cast; typedef union { npy_cfloat npy_z; +#ifdef __cplusplus + std::complex<float> c99z; +#else complex float c99_z; +#endif } __npy_cfloat_to_c99_cast; typedef union { npy_clongdouble npy_z; +#ifdef __cplusplus + std::complex<long double> c99_z; +#else complex long double c99_z; +#endif } __npy_clongdouble_to_c99_cast; #endif /* !_MSC_VER */ diff --git a/numpy/core/src/npysort/radixsort.c.src b/numpy/core/src/npysort/radixsort.c.src deleted file mode 100644 index 99d8ed42a..000000000 --- a/numpy/core/src/npysort/radixsort.c.src +++ /dev/null @@ -1,231 +0,0 @@ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION - -#include "npy_sort.h" -#include "npysort_common.h" -#include <stdlib.h> - -/* - ***************************************************************************** - ** INTEGER SORTS ** - ***************************************************************************** - */ - - -/**begin repeat - * - * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG# - * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong, - * longlong, ulonglong# - * #type = npy_ubyte, npy_ubyte, npy_ubyte, npy_ushort, npy_ushort, npy_uint, - * npy_uint, npy_ulong, npy_ulong, npy_ulonglong, npy_ulonglong# - * #sign = 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0# - * #floating = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0# - */ - -// Reference: https://github.com/eloj/radix-sorting#-key-derivation -#if @sign@ - // Floating-point is currently disabled. - // Floating-point tests succeed for double and float on macOS but not on Windows/Linux. - // Basic sorting tests succeed but others relying on sort fail. - // Possibly related to floating-point normalisation or multiple NaN reprs? Not sure. - #if @floating@ - // For floats, we invert the key if the sign bit is set, else we invert the sign bit. - #define KEY_OF(x) ((x) ^ (-((x) >> (sizeof(@type@) * 8 - 1)) | ((@type@)1 << (sizeof(@type@) * 8 - 1)))) - #else - // For signed ints, we flip the sign bit so the negatives are below the positives. - #define KEY_OF(x) ((x) ^ ((@type@)1 << (sizeof(@type@) * 8 - 1))) - #endif -#else - // For unsigned ints, the key is as-is - #define KEY_OF(x) (x) -#endif - -static inline npy_ubyte -nth_byte_@suff@(@type@ key, npy_intp l) { - return (key >> (l << 3)) & 0xFF; -} - -static @type@* -radixsort0_@suff@(@type@ *arr, @type@ *aux, npy_intp num) -{ - npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } }; - npy_intp i; - size_t l; - @type@ key0 = KEY_OF(arr[0]); - size_t ncols = 0; - npy_ubyte cols[sizeof(@type@)]; - - for (i = 0; i < num; i++) { - @type@ k = KEY_OF(arr[i]); - - for (l = 0; l < sizeof(@type@); l++) { - cnt[l][nth_byte_@suff@(k, l)]++; - } - } - - for (l = 0; l < sizeof(@type@); l++) { - if (cnt[l][nth_byte_@suff@(key0, l)] != num) { - cols[ncols++] = l; - } - } - - for (l = 0; l < ncols; l++) { - npy_intp a = 0; - for (i = 0; i < 256; i++) { - npy_intp b = cnt[cols[l]][i]; - cnt[cols[l]][i] = a; - a += b; - } - } - - for (l = 0; l < ncols; l++) { - @type@* temp; - for (i = 0; i < num; i++) { - @type@ k = KEY_OF(arr[i]); - npy_intp dst = cnt[cols[l]][nth_byte_@suff@(k, cols[l])]++; - aux[dst] = arr[i]; - } - - temp = aux; - aux = arr; - arr = temp; - } - - return arr; -} - -NPY_NO_EXPORT int -radixsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr)) -{ - void *sorted; - @type@ *aux; - @type@ *arr = start; - @type@ k1, k2; - npy_bool all_sorted = 1; - - if (num < 2) { - return 0; - } - - k1 = KEY_OF(arr[0]); - for (npy_intp i = 1; i < num; i++) { - k2 = KEY_OF(arr[i]); - if (k1 > k2) { - all_sorted = 0; - break; - } - k1 = k2; - } - - if (all_sorted) { - return 0; - } - - aux = malloc(num * sizeof(@type@)); - if (aux == NULL) { - return -NPY_ENOMEM; - } - - sorted = radixsort0_@suff@(start, aux, num); - if (sorted != start) { - memcpy(start, sorted, num * sizeof(@type@)); - } - - free(aux); - return 0; -} - -static npy_intp* -aradixsort0_@suff@(@type@ *arr, npy_intp *aux, npy_intp *tosort, npy_intp num) -{ - npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } }; - npy_intp i; - size_t l; - @type@ key0 = KEY_OF(arr[0]); - size_t ncols = 0; - npy_ubyte cols[sizeof(@type@)]; - - for (i = 0; i < num; i++) { - @type@ k = KEY_OF(arr[i]); - - for (l = 0; l < sizeof(@type@); l++) { - cnt[l][nth_byte_@suff@(k, l)]++; - } - } - - for (l = 0; l < sizeof(@type@); l++) { - if (cnt[l][nth_byte_@suff@(key0, l)] != num) { - cols[ncols++] = l; - } - } - - for (l = 0; l < ncols; l++) { - npy_intp a = 0; - for (i = 0; i < 256; i++) { - npy_intp b = cnt[cols[l]][i]; - cnt[cols[l]][i] = a; - a += b; - } - } - - for (l = 0; l < ncols; l++) { - npy_intp* temp; - for (i = 0; i < num; i++) { - @type@ k = KEY_OF(arr[tosort[i]]); - npy_intp dst = cnt[cols[l]][nth_byte_@suff@(k, cols[l])]++; - aux[dst] = tosort[i]; - } - - temp = aux; - aux = tosort; - tosort = temp; - } - - return tosort; -} - -NPY_NO_EXPORT int -aradixsort_@suff@(void *start, npy_intp* tosort, npy_intp num, void *NPY_UNUSED(varr)) -{ - npy_intp *sorted; - npy_intp *aux; - @type@ *arr = start; - @type@ k1, k2; - npy_bool all_sorted = 1; - - if (num < 2) { - return 0; - } - - k1 = KEY_OF(arr[tosort[0]]); - for (npy_intp i = 1; i < num; i++) { - k2 = KEY_OF(arr[tosort[i]]); - if (k1 > k2) { - all_sorted = 0; - break; - } - k1 = k2; - } - - if (all_sorted) { - return 0; - } - - aux = malloc(num * sizeof(npy_intp)); - if (aux == NULL) { - return -NPY_ENOMEM; - } - - sorted = aradixsort0_@suff@(start, aux, tosort, num); - if (sorted != tosort) { - memcpy(tosort, sorted, num * sizeof(npy_intp)); - } - - free(aux); - return 0; -} - -#undef KEY_OF - -/**end repeat**/ diff --git a/numpy/core/src/npysort/radixsort.cpp b/numpy/core/src/npysort/radixsort.cpp new file mode 100644 index 000000000..017ea43b6 --- /dev/null +++ b/numpy/core/src/npysort/radixsort.cpp @@ -0,0 +1,354 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION + +#include "npy_sort.h" +#include "npysort_common.h" + +#include "../common/numpy_tag.h" +#include <stdlib.h> +#include <type_traits> + +/* + ***************************************************************************** + ** INTEGER SORTS ** + ***************************************************************************** + */ + +// Reference: https://github.com/eloj/radix-sorting#-key-derivation +template <class T> +T +KEY_OF(T x) +{ + // Floating-point is currently disabled. + // Floating-point tests succeed for double and float on macOS but not on + // Windows/Linux. Basic sorting tests succeed but others relying on sort + // fail. Possibly related to floating-point normalisation or multiple NaN + // reprs? Not sure. + if (std::is_floating_point<T>::value) { + // For floats, we invert the key if the sign bit is set, else we invert + // the sign bit. + return ((x) ^ (-((x) >> (sizeof(T) * 8 - 1)) | + ((T)1 << (sizeof(T) * 8 - 1)))); + } + else if (std::is_signed<T>::value) { + // For signed ints, we flip the sign bit so the negatives are below the + // positives. + return ((x) ^ ((T)1 << (sizeof(T) * 8 - 1))); + } + else { + return x; + } +} + +template <class T> +static inline npy_ubyte +nth_byte(T key, npy_intp l) +{ + return (key >> (l << 3)) & 0xFF; +} + +template <class T> +static T * +radixsort0(T *start, T *aux, npy_intp num) +{ + npy_intp cnt[sizeof(T)][1 << 8] = {{0}}; + T key0 = KEY_OF(start[0]); + + for (npy_intp i = 0; i < num; i++) { + T k = KEY_OF(start[i]); + + for (size_t l = 0; l < sizeof(T); l++) { + cnt[l][nth_byte(k, l)]++; + } + } + + size_t ncols = 0; + npy_ubyte cols[sizeof(T)]; + for (size_t l = 0; l < sizeof(T); l++) { + if (cnt[l][nth_byte(key0, l)] != num) { + cols[ncols++] = l; + } + } + + for (size_t l = 0; l < ncols; l++) { + npy_intp a = 0; + for (npy_intp i = 0; i < 256; i++) { + npy_intp b = cnt[cols[l]][i]; + cnt[cols[l]][i] = a; + a += b; + } + } + + for (size_t l = 0; l < ncols; l++) { + T *temp; + for (npy_intp i = 0; i < num; i++) { + T k = KEY_OF(start[i]); + npy_intp dst = cnt[cols[l]][nth_byte(k, cols[l])]++; + aux[dst] = start[i]; + } + + temp = aux; + aux = start; + start = temp; + } + + return start; +} + +template <class T> +static int +radixsort_(T *start, npy_intp num) +{ + if (num < 2) { + return 0; + } + + npy_bool all_sorted = 1; + T k1 = KEY_OF(start[0]), k2; + for (npy_intp i = 1; i < num; i++) { + k2 = KEY_OF(start[i]); + if (k1 > k2) { + all_sorted = 0; + break; + } + k1 = k2; + } + + if (all_sorted) { + return 0; + } + + T *aux = (T *)malloc(num * sizeof(T)); + if (aux == nullptr) { + return -NPY_ENOMEM; + } + + T *sorted = radixsort0(start, aux, num); + if (sorted != start) { + memcpy(start, sorted, num * sizeof(T)); + } + + free(aux); + return 0; +} + +template <class T> +static int +radixsort(void *start, npy_intp num) +{ + return radixsort_((T *)start, num); +} + +template <class T> +static npy_intp * +aradixsort0(T *start, npy_intp *aux, npy_intp *tosort, npy_intp num) +{ + npy_intp cnt[sizeof(T)][1 << 8] = {{0}}; + T key0 = KEY_OF(start[0]); + + for (npy_intp i = 0; i < num; i++) { + T k = KEY_OF(start[i]); + + for (size_t l = 0; l < sizeof(T); l++) { + cnt[l][nth_byte(k, l)]++; + } + } + + size_t ncols = 0; + npy_ubyte cols[sizeof(T)]; + for (size_t l = 0; l < sizeof(T); l++) { + if (cnt[l][nth_byte(key0, l)] != num) { + cols[ncols++] = l; + } + } + + for (size_t l = 0; l < ncols; l++) { + npy_intp a = 0; + for (npy_intp i = 0; i < 256; i++) { + npy_intp b = cnt[cols[l]][i]; + cnt[cols[l]][i] = a; + a += b; + } + } + + for (size_t l = 0; l < ncols; l++) { + npy_intp *temp; + for (npy_intp i = 0; i < num; i++) { + T k = KEY_OF(start[tosort[i]]); + npy_intp dst = cnt[cols[l]][nth_byte(k, cols[l])]++; + aux[dst] = tosort[i]; + } + + temp = aux; + aux = tosort; + tosort = temp; + } + + return tosort; +} + +template <class T> +static int +aradixsort_(T *start, npy_intp *tosort, npy_intp num) +{ + npy_intp *sorted; + npy_intp *aux; + T k1, k2; + npy_bool all_sorted = 1; + + if (num < 2) { + return 0; + } + + k1 = KEY_OF(start[tosort[0]]); + for (npy_intp i = 1; i < num; i++) { + k2 = KEY_OF(start[tosort[i]]); + if (k1 > k2) { + all_sorted = 0; + break; + } + k1 = k2; + } + + if (all_sorted) { + return 0; + } + + aux = (npy_intp *)malloc(num * sizeof(npy_intp)); + if (aux == NULL) { + return -NPY_ENOMEM; + } + + sorted = aradixsort0(start, aux, tosort, num); + if (sorted != tosort) { + memcpy(tosort, sorted, num * sizeof(npy_intp)); + } + + free(aux); + return 0; +} + +template <class T> +static int +aradixsort(void *start, npy_intp *tosort, npy_intp num) +{ + return aradixsort_((T *)start, tosort, num); +} + +extern "C" { +NPY_NO_EXPORT int +radixsort_bool(void *vec, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return radixsort<npy_bool>(vec, cnt); +} +NPY_NO_EXPORT int +radixsort_byte(void *vec, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return radixsort<npy_byte>(vec, cnt); +} +NPY_NO_EXPORT int +radixsort_ubyte(void *vec, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return radixsort<npy_ubyte>(vec, cnt); +} +NPY_NO_EXPORT int +radixsort_short(void *vec, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return radixsort<npy_short>(vec, cnt); +} +NPY_NO_EXPORT int +radixsort_ushort(void *vec, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return radixsort<npy_ushort>(vec, cnt); +} +NPY_NO_EXPORT int +radixsort_int(void *vec, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return radixsort<npy_int>(vec, cnt); +} +NPY_NO_EXPORT int +radixsort_uint(void *vec, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return radixsort<npy_uint>(vec, cnt); +} +NPY_NO_EXPORT int +radixsort_long(void *vec, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return radixsort<npy_long>(vec, cnt); +} +NPY_NO_EXPORT int +radixsort_ulong(void *vec, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return radixsort<npy_ulong>(vec, cnt); +} +NPY_NO_EXPORT int +radixsort_longlong(void *vec, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return radixsort<npy_longlong>(vec, cnt); +} +NPY_NO_EXPORT int +radixsort_ulonglong(void *vec, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return radixsort<npy_ulonglong>(vec, cnt); +} +NPY_NO_EXPORT int +aradixsort_bool(void *vec, npy_intp *ind, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return aradixsort<npy_bool>(vec, ind, cnt); +} +NPY_NO_EXPORT int +aradixsort_byte(void *vec, npy_intp *ind, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return aradixsort<npy_byte>(vec, ind, cnt); +} +NPY_NO_EXPORT int +aradixsort_ubyte(void *vec, npy_intp *ind, npy_intp cnt, + void *NPY_UNUSED(null)) +{ + return aradixsort<npy_ubyte>(vec, ind, cnt); +} +NPY_NO_EXPORT int +aradixsort_short(void *vec, npy_intp *ind, npy_intp cnt, + void *NPY_UNUSED(null)) +{ + return aradixsort<npy_short>(vec, ind, cnt); +} +NPY_NO_EXPORT int +aradixsort_ushort(void *vec, npy_intp *ind, npy_intp cnt, + void *NPY_UNUSED(null)) +{ + return aradixsort<npy_ushort>(vec, ind, cnt); +} +NPY_NO_EXPORT int +aradixsort_int(void *vec, npy_intp *ind, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return aradixsort<npy_int>(vec, ind, cnt); +} +NPY_NO_EXPORT int +aradixsort_uint(void *vec, npy_intp *ind, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return aradixsort<npy_uint>(vec, ind, cnt); +} +NPY_NO_EXPORT int +aradixsort_long(void *vec, npy_intp *ind, npy_intp cnt, void *NPY_UNUSED(null)) +{ + return aradixsort<npy_long>(vec, ind, cnt); +} +NPY_NO_EXPORT int +aradixsort_ulong(void *vec, npy_intp *ind, npy_intp cnt, + void *NPY_UNUSED(null)) +{ + return aradixsort<npy_ulong>(vec, ind, cnt); +} +NPY_NO_EXPORT int +aradixsort_longlong(void *vec, npy_intp *ind, npy_intp cnt, + void *NPY_UNUSED(null)) +{ + return aradixsort<npy_longlong>(vec, ind, cnt); +} +NPY_NO_EXPORT int +aradixsort_ulonglong(void *vec, npy_intp *ind, npy_intp cnt, + void *NPY_UNUSED(null)) +{ + return aradixsort<npy_ulonglong>(vec, ind, cnt); +} +} diff --git a/numpy/core/src/umath/_operand_flag_tests.c.src b/numpy/core/src/umath/_operand_flag_tests.c.src index d22a5c507..c59e13baf 100644 --- a/numpy/core/src/umath/_operand_flag_tests.c.src +++ b/numpy/core/src/umath/_operand_flag_tests.c.src @@ -1,6 +1,7 @@ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION - +#define PY_SSIZE_T_CLEAN #include <Python.h> + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION #include <numpy/arrayobject.h> #include <numpy/ufuncobject.h> #include "numpy/npy_3kcompat.h" diff --git a/numpy/core/src/umath/_rational_tests.c.src b/numpy/core/src/umath/_rational_tests.c.src index 7b1e5627a..bf50a2226 100644 --- a/numpy/core/src/umath/_rational_tests.c.src +++ b/numpy/core/src/umath/_rational_tests.c.src @@ -1,16 +1,16 @@ /* Fixed size rational numbers exposed to Python */ - -#define NPY_NO_DEPRECATED_API NPY_API_VERSION - +#define PY_SSIZE_T_CLEAN #include <Python.h> #include <structmember.h> -#include <numpy/arrayobject.h> -#include <numpy/ufuncobject.h> -#include <numpy/npy_3kcompat.h> -#include <math.h> +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" +#include "numpy/npy_3kcompat.h" #include "common.h" /* for error_converting */ +#include <math.h> + /* Relevant arithmetic exceptions */ diff --git a/numpy/core/src/umath/_scaled_float_dtype.c b/numpy/core/src/umath/_scaled_float_dtype.c index cbea378f0..b6c19362a 100644 --- a/numpy/core/src/umath/_scaled_float_dtype.c +++ b/numpy/core/src/umath/_scaled_float_dtype.c @@ -11,10 +11,10 @@ * NOTE: The tests were initially written using private API and ABI, ideally * they should be replaced/modified with versions using public API. */ - -#define _UMATHMODULE -#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#define _UMATHMODULE + #include "numpy/ndarrayobject.h" #include "numpy/ufuncobject.h" @@ -398,6 +398,42 @@ float_to_from_sfloat_resolve_descriptors( } +/* + * Cast to boolean (for testing the logical functions a bit better). + */ +static int +cast_sfloat_to_bool(PyArrayMethod_Context *NPY_UNUSED(context), + char *const data[], npy_intp const dimensions[], + npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) +{ + npy_intp N = dimensions[0]; + char *in = data[0]; + char *out = data[1]; + for (npy_intp i = 0; i < N; i++) { + *(npy_bool *)out = *(double *)in != 0; + in += strides[0]; + out += strides[1]; + } + return 0; +} + +static NPY_CASTING +sfloat_to_bool_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2]) +{ + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + if (loop_descrs[0] == NULL) { + return -1; + } + loop_descrs[1] = PyArray_DescrFromType(NPY_BOOL); /* cannot fail */ + return NPY_UNSAFE_CASTING; +} + + static int init_casts(void) { @@ -453,6 +489,22 @@ init_casts(void) return -1; } + slots[0].slot = NPY_METH_resolve_descriptors; + slots[0].pfunc = &sfloat_to_bool_resolve_descriptors; + slots[1].slot = NPY_METH_strided_loop; + slots[1].pfunc = &cast_sfloat_to_bool; + slots[2].slot = 0; + slots[2].pfunc = NULL; + + spec.name = "sfloat_to_bool_cast"; + dtypes[0] = &PyArray_SFloatDType; + dtypes[1] = PyArray_DTypeFromTypeNum(NPY_BOOL); + Py_DECREF(dtypes[1]); /* immortal anyway */ + + if (PyArray_AddCastingImplementation_FromSpec(&spec, 0)) { + return -1; + } + return 0; } @@ -733,9 +785,9 @@ NPY_NO_EXPORT PyObject * get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) { /* Allow calling the function multiple times. */ - static npy_bool initalized = NPY_FALSE; + static npy_bool initialized = NPY_FALSE; - if (initalized) { + if (initialized) { Py_INCREF(&PyArray_SFloatDType); return (PyObject *)&PyArray_SFloatDType; } @@ -764,6 +816,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } - initalized = NPY_TRUE; + initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } diff --git a/numpy/core/src/umath/_struct_ufunc_tests.c.src b/numpy/core/src/umath/_struct_ufunc_tests.c.src index d602656c8..ee71c4698 100644 --- a/numpy/core/src/umath/_struct_ufunc_tests.c.src +++ b/numpy/core/src/umath/_struct_ufunc_tests.c.src @@ -1,11 +1,13 @@ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define PY_SSIZE_T_CLEAN +#include <Python.h> -#include "Python.h" -#include "math.h" +#define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "numpy/npy_3kcompat.h" +#include <math.h> + /* * struct_ufunc_test.c diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src index 2e79d377e..ce42fc271 100644 --- a/numpy/core/src/umath/_umath_tests.c.src +++ b/numpy/core/src/umath/_umath_tests.c.src @@ -5,9 +5,10 @@ ** INCLUDES ** ***************************************************************************** */ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define PY_SSIZE_T_CLEAN +#include <Python.h> -#include "Python.h" +#define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "numpy/npy_math.h" @@ -57,6 +58,19 @@ ***************************************************************************** */ +static void +always_error_loop( + char **NPY_UNUSED(args), npy_intp const *NPY_UNUSED(dimensions), + npy_intp const *NPY_UNUSED(steps), void *NPY_UNUSED(func)) +{ + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API; + PyErr_SetString(PyExc_RuntimeError, "How unexpected :)!"); + NPY_DISABLE_C_API; + return; +} + + char *inner1d_signature = "(i),(i)->()"; /**begin repeat @@ -347,6 +361,9 @@ defdict = { */ +static PyUFuncGenericFunction always_error_functions[] = { always_error_loop }; +static void *always_error_data[] = { (void *)NULL }; +static char always_error_signatures[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction inner1d_functions[] = { LONG_inner1d, DOUBLE_inner1d }; static void *inner1d_data[] = { (void *)NULL, (void *)NULL }; static char inner1d_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; @@ -374,6 +391,25 @@ static int addUfuncs(PyObject *dictionary) { PyObject *f; + f = PyUFunc_FromFuncAndData(always_error_functions, always_error_data, + always_error_signatures, 1, 2, 1, PyUFunc_None, "always_error", + "simply, broken, ufunc that sets an error (but releases the GIL).", + 0); + if (f == NULL) { + return -1; + } + PyDict_SetItemString(dictionary, "always_error", f); + Py_DECREF(f); + f = PyUFunc_FromFuncAndDataAndSignature(always_error_functions, + always_error_data, always_error_signatures, 1, 2, 1, PyUFunc_None, + "always_error_gufunc", + "simply, broken, gufunc that sets an error (but releases the GIL).", + 0, "(i),()->()"); + if (f == NULL) { + return -1; + } + PyDict_SetItemString(dictionary, "always_error_gufunc", f); + Py_DECREF(f); f = PyUFunc_FromFuncAndDataAndSignature(inner1d_functions, inner1d_data, inner1d_signatures, 2, 2, 1, PyUFunc_None, "inner1d", "inner on the last dimension and broadcast on the rest \n" @@ -585,7 +621,7 @@ fail: return NULL; } -// Testing the utilites of the CPU dispatcher +// Testing the utilities of the CPU dispatcher #ifndef NPY_DISABLE_OPTIMIZATION #include "_umath_tests.dispatch.h" #endif diff --git a/numpy/core/src/umath/_umath_tests.dispatch.c b/numpy/core/src/umath/_umath_tests.dispatch.c index 85f365010..9d8df4c86 100644 --- a/numpy/core/src/umath/_umath_tests.dispatch.c +++ b/numpy/core/src/umath/_umath_tests.dispatch.c @@ -1,12 +1,14 @@ /** - * Testing the utilites of the CPU dispatcher + * Testing the utilities of the CPU dispatcher * * @targets $werror baseline * SSE2 SSE41 AVX2 * VSX VSX2 VSX3 * NEON ASIMD ASIMDHP */ +#define PY_SSIZE_T_CLEAN #include <Python.h> + #include "npy_cpu_dispatch.h" #ifndef NPY_DISABLE_OPTIMIZATION diff --git a/numpy/core/src/umath/clip.c.src b/numpy/core/src/umath/clip.c.src deleted file mode 100644 index 9c4bac2d1..000000000 --- a/numpy/core/src/umath/clip.c.src +++ /dev/null @@ -1,119 +0,0 @@ -/** - * This module provides the inner loops for the clip ufunc - */ -#define _UMATHMODULE -#define _MULTIARRAYMODULE -#define NPY_NO_DEPRECATED_API NPY_API_VERSION - -#include "Python.h" - -#include "numpy/halffloat.h" -#include "numpy/npy_math.h" -#include "numpy/ndarraytypes.h" -#include "numpy/npy_common.h" -#include "numpy/utils.h" -#include "fast_loop_macros.h" - -/* - * Produce macros that perform nan/nat-propagating min and max - */ - -/**begin repeat - * #name = BOOL, - * BYTE, UBYTE, SHORT, USHORT, INT, UINT, - * LONG, ULONG, LONGLONG, ULONGLONG# - */ -#define _NPY_@name@_MIN(a, b) PyArray_MIN(a, b) -#define _NPY_@name@_MAX(a, b) PyArray_MAX(a, b) -/**end repeat**/ - -#define _NPY_HALF_MIN(a, b) (npy_half_isnan(a) || npy_half_le(a, b) ? (a) : (b)) -#define _NPY_HALF_MAX(a, b) (npy_half_isnan(a) || npy_half_ge(a, b) ? (a) : (b)) - -/**begin repeat - * #name = FLOAT, DOUBLE, LONGDOUBLE# - */ -#define _NPY_@name@_MIN(a, b) (npy_isnan(a) ? (a) : PyArray_MIN(a, b)) -#define _NPY_@name@_MAX(a, b) (npy_isnan(a) ? (a) : PyArray_MAX(a, b)) -/**end repeat**/ - -/**begin repeat - * #name = CFLOAT, CDOUBLE, CLONGDOUBLE# - */ -#define _NPY_@name@_MIN(a, b) (npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CLT(a, b) ? (a) : (b)) -#define _NPY_@name@_MAX(a, b) (npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CGT(a, b) ? (a) : (b)) -/**end repeat**/ - -/**begin repeat - * #name = DATETIME, TIMEDELTA# - */ -#define _NPY_@name@_MIN(a, b) ( \ - (a) == NPY_DATETIME_NAT ? (a) : \ - (b) == NPY_DATETIME_NAT ? (b) : \ - (a) < (b) ? (a) : (b) \ -) -#define _NPY_@name@_MAX(a, b) ( \ - (a) == NPY_DATETIME_NAT ? (a) : \ - (b) == NPY_DATETIME_NAT ? (b) : \ - (a) > (b) ? (a) : (b) \ -) -/**end repeat**/ - -/**begin repeat - * - * #name = BOOL, - * BYTE, UBYTE, SHORT, USHORT, INT, UINT, - * LONG, ULONG, LONGLONG, ULONGLONG, - * HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, - * DATETIME, TIMEDELTA# - * #type = npy_bool, - * npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, - * npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_half, npy_float, npy_double, npy_longdouble, - * npy_cfloat, npy_cdouble, npy_clongdouble, - * npy_datetime, npy_timedelta# - */ - -#define _NPY_CLIP(x, min, max) \ - _NPY_@name@_MIN(_NPY_@name@_MAX((x), (min)), (max)) - -NPY_NO_EXPORT void -@name@_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - if (steps[1] == 0 && steps[2] == 0) { - /* min and max are constant throughout the loop, the most common case */ - /* NOTE: it may be possible to optimize these checks for nan */ - @type@ min_val = *(@type@ *)args[1]; - @type@ max_val = *(@type@ *)args[2]; - - char *ip1 = args[0], *op1 = args[3]; - npy_intp is1 = steps[0], os1 = steps[3]; - npy_intp n = dimensions[0]; - - /* contiguous, branch to let the compiler optimize */ - if (is1 == sizeof(@type@) && os1 == sizeof(@type@)) { - for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) { - *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, min_val, max_val); - } - } - else { - for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) { - *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, min_val, max_val); - } - } - } - else { - TERNARY_LOOP { - *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, *(@type@ *)ip2, *(@type@ *)ip3); - } - } - npy_clear_floatstatus_barrier((char*)dimensions); -} - -// clean up the macros we defined above -#undef _NPY_CLIP -#undef _NPY_@name@_MAX -#undef _NPY_@name@_MIN - -/**end repeat**/ diff --git a/numpy/core/src/umath/clip.cpp b/numpy/core/src/umath/clip.cpp new file mode 100644 index 000000000..19d05c848 --- /dev/null +++ b/numpy/core/src/umath/clip.cpp @@ -0,0 +1,282 @@ +/** + * This module provides the inner loops for the clip ufunc + */ +#define _UMATHMODULE +#define _MULTIARRAYMODULE +#define NPY_NO_DEPRECATED_API NPY_API_VERSION + +#define PY_SSIZE_T_CLEAN +#include <Python.h> + +#include "numpy/halffloat.h" +#include "numpy/ndarraytypes.h" +#include "numpy/npy_common.h" +#include "numpy/npy_math.h" +#include "numpy/utils.h" + +#include "fast_loop_macros.h" + +#include "../common/numpy_tag.h" + +template <class T> +T +_NPY_MIN(T a, T b, npy::integral_tag const &) +{ + return PyArray_MIN(a, b); +} +template <class T> +T +_NPY_MAX(T a, T b, npy::integral_tag const &) +{ + return PyArray_MAX(a, b); +} + +npy_half +_NPY_MIN(npy_half a, npy_half b, npy::half_tag const &) +{ + return npy_half_isnan(a) || npy_half_le(a, b) ? (a) : (b); +} +npy_half +_NPY_MAX(npy_half a, npy_half b, npy::half_tag const &) +{ + return npy_half_isnan(a) || npy_half_ge(a, b) ? (a) : (b); +} + +template <class T> +T +_NPY_MIN(T a, T b, npy::floating_point_tag const &) +{ + return npy_isnan(a) ? (a) : PyArray_MIN(a, b); +} +template <class T> +T +_NPY_MAX(T a, T b, npy::floating_point_tag const &) +{ + return npy_isnan(a) ? (a) : PyArray_MAX(a, b); +} + +template <class T> +T +_NPY_MIN(T a, T b, npy::complex_tag const &) +{ + return npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CLT(a, b) + ? (a) + : (b); +} +template <class T> +T +_NPY_MAX(T a, T b, npy::complex_tag const &) +{ + return npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CGT(a, b) + ? (a) + : (b); +} + +template <class T> +T +_NPY_MIN(T a, T b, npy::date_tag const &) +{ + return (a) == NPY_DATETIME_NAT ? (a) + : (b) == NPY_DATETIME_NAT ? (b) + : (a) < (b) ? (a) + : (b); +} +template <class T> +T +_NPY_MAX(T a, T b, npy::date_tag const &) +{ + return (a) == NPY_DATETIME_NAT ? (a) + : (b) == NPY_DATETIME_NAT ? (b) + : (a) > (b) ? (a) + : (b); +} + +/* generic dispatcher */ +template <class Tag, class T = typename Tag::type> +T +_NPY_MIN(T const &a, T const &b) +{ + return _NPY_MIN(a, b, Tag{}); +} +template <class Tag, class T = typename Tag::type> +T +_NPY_MAX(T const &a, T const &b) +{ + return _NPY_MAX(a, b, Tag{}); +} + +template <class Tag, class T> +T +_NPY_CLIP(T x, T min, T max) +{ + return _NPY_MIN<Tag>(_NPY_MAX<Tag>((x), (min)), (max)); +} + +template <class Tag, class T = typename Tag::type> +static void +_npy_clip_(T **args, npy_intp const *dimensions, npy_intp const *steps) +{ + npy_intp n = dimensions[0]; + if (steps[1] == 0 && steps[2] == 0) { + /* min and max are constant throughout the loop, the most common case + */ + /* NOTE: it may be possible to optimize these checks for nan */ + T min_val = *args[1]; + T max_val = *args[2]; + + T *ip1 = args[0], *op1 = args[3]; + npy_intp is1 = steps[0] / sizeof(T), os1 = steps[3] / sizeof(T); + + /* contiguous, branch to let the compiler optimize */ + if (is1 == 1 && os1 == 1) { + for (npy_intp i = 0; i < n; i++, ip1++, op1++) { + *op1 = _NPY_CLIP<Tag>(*ip1, min_val, max_val); + } + } + else { + for (npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) { + *op1 = _NPY_CLIP<Tag>(*ip1, min_val, max_val); + } + } + } + else { + T *ip1 = args[0], *ip2 = args[1], *ip3 = args[2], *op1 = args[3]; + npy_intp is1 = steps[0] / sizeof(T), is2 = steps[1] / sizeof(T), + is3 = steps[2] / sizeof(T), os1 = steps[3] / sizeof(T); + for (npy_intp i = 0; i < n; + i++, ip1 += is1, ip2 += is2, ip3 += is3, op1 += os1) + *op1 = _NPY_CLIP<Tag>(*ip1, *ip2, *ip3); + } + npy_clear_floatstatus_barrier((char *)dimensions); +} + +template <class Tag> +static void +_npy_clip(char **args, npy_intp const *dimensions, npy_intp const *steps) +{ + using T = typename Tag::type; + return _npy_clip_<Tag>((T **)args, dimensions, steps); +} + +extern "C" { +NPY_NO_EXPORT void +BOOL_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::bool_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +BYTE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::byte_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +UBYTE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::ubyte_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +SHORT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::short_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +USHORT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::ushort_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +INT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::int_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +UINT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::uint_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +LONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::long_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +ULONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::ulong_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +LONGLONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::longlong_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +ULONGLONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::ulonglong_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +HALF_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::half_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +FLOAT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::float_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +DOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::double_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +LONGDOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::longdouble_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +CFLOAT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::cfloat_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +CDOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::cdouble_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +CLONGDOUBLE_clip(char **args, npy_intp const *dimensions, + npy_intp const *steps, void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::clongdouble_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +DATETIME_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::datetime_tag>(args, dimensions, steps); +} +NPY_NO_EXPORT void +TIMEDELTA_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + return _npy_clip<npy::timedelta_tag>(args, dimensions, steps); +} +} diff --git a/numpy/core/src/umath/clip.h b/numpy/core/src/umath/clip.h new file mode 100644 index 000000000..f69ebd1e3 --- /dev/null +++ b/numpy/core/src/umath/clip.h @@ -0,0 +1,73 @@ +#ifndef _NPY_UMATH_CLIP_H_ +#define _NPY_UMATH_CLIP_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +NPY_NO_EXPORT void +BOOL_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +BYTE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +UBYTE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +SHORT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +USHORT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +INT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +UINT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +LONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +ULONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +LONGLONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +ULONGLONG_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +HALF_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +FLOAT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +DOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +LONGDOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +CFLOAT_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +CDOUBLE_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +CLONGDOUBLE_clip(char **args, npy_intp const *dimensions, + npy_intp const *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +DATETIME_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +TIMEDELTA_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, + void *NPY_UNUSED(func)); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/numpy/core/src/umath/clip.h.src b/numpy/core/src/umath/clip.h.src deleted file mode 100644 index f16856cdf..000000000 --- a/numpy/core/src/umath/clip.h.src +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _NPY_UMATH_CLIP_H_ -#define _NPY_UMATH_CLIP_H_ - - -/**begin repeat - * - * #name = BOOL, - * BYTE, UBYTE, SHORT, USHORT, INT, UINT, - * LONG, ULONG, LONGLONG, ULONGLONG, - * HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, - * DATETIME, TIMEDELTA# - */ -NPY_NO_EXPORT void -@name@_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); -/**end repeat**/ - -#endif diff --git a/numpy/core/src/umath/dispatching.c b/numpy/core/src/umath/dispatching.c index b97441b13..8e99c0420 100644 --- a/numpy/core/src/umath/dispatching.c +++ b/numpy/core/src/umath/dispatching.c @@ -34,11 +34,12 @@ * into the `signature` so that it is available to the ufunc loop. * */ -#include <Python.h> - -#define _UMATHMODULE -#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#define _UMATHMODULE + +#define PY_SSIZE_T_CLEAN +#include <Python.h> #include "numpy/ndarraytypes.h" #include "common.h" @@ -192,6 +193,10 @@ resolve_implementation_info(PyUFuncObject *ufunc, /* Unspecified out always matches (see below for inputs) */ continue; } + if (resolver_dtype == (PyArray_DTypeMeta *)Py_None) { + /* always matches */ + continue; + } if (given_dtype == resolver_dtype) { continue; } @@ -266,8 +271,39 @@ resolve_implementation_info(PyUFuncObject *ufunc, * the subclass should be considered a better match * (subclasses are always more specific). */ + /* Whether this (normally output) dtype was specified at all */ + if (op_dtypes[i] == NULL) { + /* + * When DType is completely unspecified, prefer abstract + * over concrete, assuming it will resolve. + * Furthermore, we cannot decide which abstract/None + * is "better", only concrete ones which are subclasses + * of Abstract ones are defined as worse. + */ + npy_bool prev_is_concrete = NPY_FALSE; + npy_bool new_is_concrete = NPY_FALSE; + if ((prev_dtype != Py_None) && + !NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) { + prev_is_concrete = NPY_TRUE; + } + if ((new_dtype != Py_None) && + !NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) { + new_is_concrete = NPY_TRUE; + } + if (prev_is_concrete == new_is_concrete) { + best = -1; + } + else if (prev_is_concrete) { + unambiguously_equally_good = 0; + best = 1; + } + else { + unambiguously_equally_good = 0; + best = 0; + } + } /* If either is None, the other is strictly more specific */ - if (prev_dtype == Py_None) { + else if (prev_dtype == Py_None) { unambiguously_equally_good = 0; best = 1; } @@ -288,13 +324,29 @@ resolve_implementation_info(PyUFuncObject *ufunc, */ best = -1; } + else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) { + /* old is not abstract, so better (both not possible) */ + unambiguously_equally_good = 0; + best = 0; + } + else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) { + /* new is not abstract, so better (both not possible) */ + unambiguously_equally_good = 0; + best = 1; + } /* - * TODO: Unreachable, but we will need logic for abstract - * DTypes to decide if one is a subclass of the other - * (And their subclass relation is well defined.) + * TODO: This will need logic for abstract DTypes to decide if + * one is a subclass of the other (And their subclass + * relation is well defined). For now, we bail out + * in cas someone manages to get here. */ else { - assert(0); + PyErr_SetString(PyExc_NotImplementedError, + "deciding which one of two abstract dtypes is " + "a better match is not yet implemented. This " + "will pick the better (or bail) in the future."); + *out_info = NULL; + return -1; } if ((current_best != -1) && (current_best != best)) { @@ -611,6 +663,35 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, } return info; } + else if (info == NULL && op_dtypes[0] == NULL) { + /* + * If we have a reduction, fill in the unspecified input/array + * assuming it should have the same dtype as the operand input + * (or the output one if given). + * Then, try again. In some cases, this will choose different + * paths, such as `ll->?` instead of an `??->?` loop for `np.equal` + * when the input is `.l->.` (`.` meaning undefined). This will + * then cause an error. But cast to `?` would always lose + * information, and in many cases important information: + * + * ```python + * from operator import eq + * from functools import reduce + * + * reduce(eq, [1, 2, 3]) != reduce(eq, [True, True, True]) + * ``` + * + * The special cases being `logical_(and|or|xor)` which can always + * cast to boolean ahead of time and still give the right answer + * (unsafe cast to bool is fine here). We special case these at + * the time of this comment (NumPy 1.21). + */ + assert(ufunc->nin == 2 && ufunc->nout == 1); + op_dtypes[0] = op_dtypes[2] != NULL ? op_dtypes[2] : op_dtypes[1]; + Py_INCREF(op_dtypes[0]); + return promote_and_get_info_and_ufuncimpl(ufunc, + ops, signature, op_dtypes, allow_legacy_promotion, 1); + } } /* @@ -742,3 +823,94 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, return method; } + + +/* + * Special promoter for the logical ufuncs. The logical ufuncs can always + * use the ??->? and still get the correct output (as long as the output + * is not supposed to be `object`). + */ +static int +logical_ufunc_promoter(PyUFuncObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + /* + * If we find any object DType at all, we currently force to object. + * However, if the output is specified and not object, there is no point, + * it should be just as well to cast the input rather than doing the + * unsafe out cast. + */ + int force_object = 0; + + for (int i = 0; i < 3; i++) { + PyArray_DTypeMeta *item; + if (signature[i] != NULL) { + item = signature[i]; + Py_INCREF(item); + if (item->type_num == NPY_OBJECT) { + force_object = 1; + } + } + else { + /* Always override to boolean */ + item = PyArray_DTypeFromTypeNum(NPY_BOOL); + if (op_dtypes[i] != NULL && op_dtypes[i]->type_num == NPY_OBJECT) { + force_object = 1; + } + } + new_op_dtypes[i] = item; + } + + if (!force_object || (op_dtypes[2] != NULL + && op_dtypes[2]->type_num != NPY_OBJECT)) { + return 0; + } + /* + * Actually, we have to use the OBJECT loop after all, set all we can + * to object (that might not work out, but try). + * + * NOTE: Change this to check for `op_dtypes[0] == NULL` to STOP + * returning `object` for `np.logical_and.reduce(obj_arr)` + * which will also affect `np.all` and `np.any`! + */ + for (int i = 0; i < 3; i++) { + if (signature[i] != NULL) { + continue; + } + Py_SETREF(new_op_dtypes[i], PyArray_DTypeFromTypeNum(NPY_OBJECT)); + } + return 0; +} + + +NPY_NO_EXPORT int +install_logical_ufunc_promoter(PyObject *ufunc) +{ + if (PyObject_Type(ufunc) != (PyObject *)&PyUFunc_Type) { + PyErr_SetString(PyExc_RuntimeError, + "internal numpy array, logical ufunc was not a ufunc?!"); + return -1; + } + PyObject *dtype_tuple = PyTuple_Pack(3, + &PyArrayDescr_Type, &PyArrayDescr_Type, &PyArrayDescr_Type, NULL); + if (dtype_tuple == NULL) { + return -1; + } + PyObject *promoter = PyCapsule_New(&logical_ufunc_promoter, + "numpy._ufunc_promoter", NULL); + if (promoter == NULL) { + Py_DECREF(dtype_tuple); + return -1; + } + + PyObject *info = PyTuple_Pack(2, dtype_tuple, promoter); + Py_DECREF(dtype_tuple); + Py_DECREF(promoter); + if (info == NULL) { + return -1; + } + + return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); +} + diff --git a/numpy/core/src/umath/dispatching.h b/numpy/core/src/umath/dispatching.h index 8d116873c..2f314615d 100644 --- a/numpy/core/src/umath/dispatching.h +++ b/numpy/core/src/umath/dispatching.h @@ -26,4 +26,8 @@ NPY_NO_EXPORT PyObject * add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc, PyArray_DTypeMeta *operation_dtypes[], int ignore_duplicate); +NPY_NO_EXPORT int +install_logical_ufunc_promoter(PyObject *ufunc); + + #endif /*_NPY_DISPATCHING_H */ diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c index cd81f7734..6b9a27e26 100644 --- a/numpy/core/src/umath/extobj.c +++ b/numpy/core/src/umath/extobj.c @@ -1,7 +1,8 @@ -#define _UMATHMODULE -#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#define _UMATHMODULE +#define PY_SSIZE_T_CLEAN #include <Python.h> #include "npy_config.h" diff --git a/numpy/core/src/umath/legacy_array_method.c b/numpy/core/src/umath/legacy_array_method.c index 4351f1d25..a423823d4 100644 --- a/numpy/core/src/umath/legacy_array_method.c +++ b/numpy/core/src/umath/legacy_array_method.c @@ -2,12 +2,13 @@ * This file defines most of the machinery in order to wrap legacy style * ufunc loops into new style arraymethods. */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#define _UMATHMODULE +#define PY_SSIZE_T_CLEAN #include <Python.h> -#define _UMATHMODULE -#define _MULTIARRAYMODULE -#define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "numpy/ndarraytypes.h" #include "convert_datatype.h" @@ -216,6 +217,25 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, */ int any_output_flexible = 0; NPY_ARRAYMETHOD_FLAGS flags = 0; + if (ufunc->nargs == 3 && + signature[0]->type_num == NPY_BOOL && + signature[1]->type_num == NPY_BOOL && + signature[2]->type_num == NPY_BOOL && ( + strcmp(ufunc->name, "logical_or") == 0 || + strcmp(ufunc->name, "logical_and") == 0 || + strcmp(ufunc->name, "logical_xor") == 0)) { + /* + * This is a logical ufunc, and the `??->?` loop`. It is always OK + * to cast any input to bool, because that cast is defined by + * truthiness. + * This allows to ensure two things: + * 1. `np.all`/`np.any` know that force casting the input is OK + * (they must do this since there are no `?l->?`, etc. loops) + * 2. The logical functions automatically work for any DType + * implementing a cast to boolean. + */ + flags = _NPY_METH_FORCE_CAST_INPUTS; + } for (int i = 0; i < ufunc->nin+ufunc->nout; i++) { if (signature[i]->singleton->flags & ( diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 8df439aca..7c0710819 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1,11 +1,11 @@ /* -*- c -*- */ +#define PY_SSIZE_T_CLEAN +#include <Python.h> #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include "Python.h" - #include "npy_config.h" #include "numpy/npy_common.h" #include "numpy/arrayobject.h" diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src index 02d749a5e..0938cd050 100644 --- a/numpy/core/src/umath/loops.h.src +++ b/numpy/core/src/umath/loops.h.src @@ -210,6 +210,32 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, /**end repeat1**/ /**end repeat**/ +#ifndef NPY_DISABLE_OPTIMIZATION + #include "loops_umath_fp.dispatch.h" +#endif + +/**begin repeat + * #TYPE = FLOAT, DOUBLE# + */ +/**begin repeat1 + * #func = tanh, exp2, log2, log10, expm1, log1p, cbrt, tan, arcsin, arccos, arctan, sinh, cosh, arcsinh, arccosh, arctanh# + */ + +NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@func@, + (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))) + +/**end repeat1**/ +/**end repeat**/ + +/**begin repeat + * #func = sin, cos# + */ + +NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void DOUBLE_@func@, + (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))) + +/**end repeat**/ + /**begin repeat * #TYPE = FLOAT, DOUBLE# */ diff --git a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src index cc0fd19bb..95cce553a 100644 --- a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src @@ -868,6 +868,32 @@ AVX512F_exp_DOUBLE(npy_double * op, * = p(r) * = 2((r/2) + 1/3*(r/2)^3 + 1/5*(r/2)^5 + ...) */ + +/* LLVM has a bug where AVX-512F intrinsic `_mm512_mask_mul_pd` emits an + * unmasked operation with a masked store. This can cause FP exceptions to + * occur for the lanes that are suppose to have been masked. + * + * See https://bugs.llvm.org/show_bug.cgi?id=51988 + * + * Note, this affects LLVM based compilers like Apple Clang, Clang, and Intel's + * ICX. + */ +#if defined(__clang__) + #if defined(__apple_build_version__) + // Apple Clang + #if __apple_build_version__ > 11000000 + // Apple Clang after v11 + #define WORKAROUND_LLVM__mm512_mask_mul_pd + #endif + #else + // Clang, not Apple Clang + #if __clang_major__ > 9 + // Clang v9+ + #define WORKAROUND_LLVM__mm512_mask_mul_pd + #endif + #endif +#endif + static void AVX512F_log_DOUBLE(npy_double * op, npy_double * ip, @@ -954,8 +980,12 @@ AVX512F_log_DOUBLE(npy_double * op, denormal_mask = _mm512_cmp_epi64_mask(top12, _mm512_set1_epi64(0), _CMP_EQ_OQ); denormal_mask = (~zero_mask) & denormal_mask; + __m512d masked_x = x; + #ifdef WORKAROUND_LLVM__mm512_mask_mul_pd + masked_x = avx512_set_masked_lanes_pd(masked_x, zeros_d, (~denormal_mask)); + #endif ix = _mm512_castpd_si512(_mm512_mask_mul_pd(x, denormal_mask, - x, _mm512_set1_pd(0x1p52))); + masked_x, _mm512_set1_pd(0x1p52))); ix = _mm512_mask_sub_epi64(ix, denormal_mask, ix, _mm512_set1_epi64(52ULL << 52)); @@ -1039,6 +1069,9 @@ AVX512F_log_DOUBLE(npy_double * op, npy_set_floatstatus_divbyzero(); } } + +#undef WORKAROUND_LLVM__mm512_mask_mul_pd + #endif // AVX512F_NOCLANG_BUG #ifdef SIMD_AVX512_SKX diff --git a/numpy/core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/core/src/umath/loops_umath_fp.dispatch.c.src new file mode 100644 index 000000000..852604655 --- /dev/null +++ b/numpy/core/src/umath/loops_umath_fp.dispatch.c.src @@ -0,0 +1,141 @@ +/*@targets + ** $maxopt baseline avx512_skx + */ +#include "numpy/npy_math.h" +#include "simd/simd.h" +#include "loops_utils.h" +#include "loops.h" +#include "npy_svml.h" +#include "fast_loop_macros.h" + +#if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) +/**begin repeat + * #sfx = f32, f64# + * #func_suffix = f16, 8# + */ +/**begin repeat1 + * #func = tanh, exp2, log2, log10, expm1, log1p, cbrt, tan, asin, acos, atan, sinh, cosh, asinh, acosh, atanh# + * #default_val = 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0# + */ +static void +simd_@func@_@sfx@(const npyv_lanetype_@sfx@ *src, npy_intp ssrc, + npyv_lanetype_@sfx@ *dst, npy_intp sdst, npy_intp len) +{ + const int vstep = npyv_nlanes_@sfx@; + for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) { + npyv_@sfx@ x; + #if @default_val@ + if (ssrc == 1) { + x = npyv_load_till_@sfx@(src, len, @default_val@); + } else { + x = npyv_loadn_till_@sfx@(src, ssrc, len, @default_val@); + } + #else + if (ssrc == 1) { + x = npyv_load_tillz_@sfx@(src, len); + } else { + x = npyv_loadn_tillz_@sfx@(src, ssrc, len); + } + #endif + npyv_@sfx@ out = __svml_@func@@func_suffix@(x); + if (sdst == 1) { + npyv_store_till_@sfx@(dst, len, out); + } else { + npyv_storen_till_@sfx@(dst, sdst, len, out); + } + } + npyv_cleanup(); +} +/**end repeat1**/ +/**end repeat**/ + +/**begin repeat + * #func = sin, cos# + */ +static void +simd_@func@_f64(const double *src, npy_intp ssrc, + double *dst, npy_intp sdst, npy_intp len) +{ + const int vstep = npyv_nlanes_f64; + for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) { + npyv_f64 x; + if (ssrc == 1) { + x = npyv_load_tillz_f64(src, len); + } else { + x = npyv_loadn_tillz_f64(src, ssrc, len); + } + npyv_f64 out = __svml_@func@8(x); + if (sdst == 1) { + npyv_store_till_f64(dst, len, out); + } else { + npyv_storen_till_f64(dst, sdst, len, out); + } + } + npyv_cleanup(); +} +/**end repeat**/ +#endif + +/**begin repeat + * #TYPE = DOUBLE, FLOAT# + * #type = npy_double, npy_float# + * #vsub = , f# + * #sfx = f64, f32# + */ +/**begin repeat1 + * #func = tanh, exp2, log2, log10, expm1, log1p, cbrt, tan, arcsin, arccos, arctan, sinh, cosh, arcsinh, arccosh, arctanh# + * #intrin = tanh, exp2, log2, log10, expm1, log1p, cbrt, tan, asin, acos, atan, sinh, cosh, asinh, acosh, atanh# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ +#if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) + const @type@ *src = (@type@*)args[0]; + @type@ *dst = (@type@*)args[1]; + const int lsize = sizeof(src[0]); + const npy_intp ssrc = steps[0] / lsize; + const npy_intp sdst = steps[1] / lsize; + const npy_intp len = dimensions[0]; + assert(steps[0] % lsize == 0 && steps[1] % lsize == 0); + if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && + npyv_loadable_stride_@sfx@(ssrc) && + npyv_storable_stride_@sfx@(sdst)) { + simd_@intrin@_@sfx@(src, ssrc, dst, sdst, len); + return; + } +#endif + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = npy_@intrin@@vsub@(in1); + } +} +/**end repeat1**/ +/**end repeat**/ + +/**begin repeat + * #func = sin, cos# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ +#if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) + const double *src = (double*)args[0]; + double *dst = (double*)args[1]; + const int lsize = sizeof(src[0]); + const npy_intp ssrc = steps[0] / lsize; + const npy_intp sdst = steps[1] / lsize; + const npy_intp len = dimensions[0]; + assert(steps[0] % lsize == 0 && steps[1] % lsize == 0); + if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && + npyv_loadable_stride_f64(ssrc) && + npyv_storable_stride_f64(sdst)) { + simd_@func@_f64(src, ssrc, dst, sdst, len); + return; + } +#endif + UNARY_LOOP { + const npy_double in1 = *(npy_double *)ip1; + *(npy_double *)op1 = npy_@func@(in1); + } +} +/**end repeat**/ diff --git a/numpy/core/src/umath/loops_unary_fp.dispatch.c.src b/numpy/core/src/umath/loops_unary_fp.dispatch.c.src index 3a1ea82f9..2d5917282 100644 --- a/numpy/core/src/umath/loops_unary_fp.dispatch.c.src +++ b/numpy/core/src/umath/loops_unary_fp.dispatch.c.src @@ -77,6 +77,56 @@ NPY_FINLINE double c_square_f64(double a) */ #define CONTIG 0 #define NCONTIG 1 + +/* + * clang has a bug that's present at -O1 or greater. When partially loading a + * vector register for a reciprocal operation, the remaining elements are set + * to 1 to avoid divide-by-zero. The partial load is paired with a partial + * store after the reciprocal operation. clang notices that the entire register + * is not needed for the store and optimizes out the fill of 1 to the remaining + * elements. This causes either a divide-by-zero or 0/0 with invalid exception + * that we were trying to avoid by filling. + * + * Using a dummy variable marked 'volatile' convinces clang not to ignore + * the explicit fill of remaining elements. If `-ftrapping-math` is + * supported, then it'll also avoid the bug. `-ftrapping-math` is supported + * on Apple clang v12+ for x86_64. It is not currently supported for arm64. + * `-ftrapping-math` is set by default of Numpy builds in + * numpy/distutils/ccompiler.py. + * + * Note: Apple clang and clang upstream have different versions that overlap + */ +#if defined(__clang__) + #if defined(__apple_build_version__) + // Apple Clang + #if __apple_build_version__ < 12000000 + // Apple Clang before v12 + #define WORKAROUND_CLANG_RECIPROCAL_BUG 1 + #elif defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) + // Apple Clang after v12, targeting i386 or x86_64 + #define WORKAROUND_CLANG_RECIPROCAL_BUG 0 + #else + // Apple Clang after v12, not targeting i386 or x86_64 + #define WORKAROUND_CLANG_RECIPROCAL_BUG 1 + #endif + #else + // Clang, not Apple Clang + #if __clang_major__ < 10 + // Clang before v10 + #define WORKAROUND_CLANG_RECIPROCAL_BUG 1 + #elif defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) + // Clang v10+, targeting i386 or x86_64 + #define WORKAROUND_CLANG_RECIPROCAL_BUG 0 + #else + // Clang v10+, not targeting i386 or x86_64 + #define WORKAROUND_CLANG_RECIPROCAL_BUG 1 + #endif + #endif +#else +// Not a Clang compiler +#define WORKAROUND_CLANG_RECIPROCAL_BUG 0 +#endif + /**begin repeat * #TYPE = FLOAT, DOUBLE# * #sfx = f32, f64# @@ -87,6 +137,7 @@ NPY_FINLINE double c_square_f64(double a) * #kind = sqrt, absolute, square, reciprocal# * #intr = sqrt, abs, square, recip# * #repl_0w1 = 0, 0, 0, 1# + * #RECIP_WORKAROUND = 0, 0, 0, WORKAROUND_CLANG_RECIPROCAL_BUG# */ /**begin repeat2 * #STYPE = CONTIG, NCONTIG, CONTIG, NCONTIG# @@ -101,6 +152,8 @@ static void simd_@TYPE@_@kind@_@STYPE@_@DTYPE@ const int vstep = npyv_nlanes_@sfx@; const int wstep = vstep * @unroll@; + + // unrolled iterations for (; len >= wstep; len -= wstep, src += ssrc*wstep, dst += sdst*wstep) { /**begin repeat3 * #N = 0, 1, 2, 3# @@ -126,7 +179,24 @@ static void simd_@TYPE@_@kind@_@STYPE@_@DTYPE@ #endif /**end repeat3**/ } - for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) { + + // vector-sized iterations + for (; len >= vstep; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) { + #if @STYPE@ == CONTIG + npyv_@sfx@ v_src0 = npyv_load_@sfx@(src); + #else + npyv_@sfx@ v_src0 = npyv_loadn_@sfx@(src, ssrc); + #endif + npyv_@sfx@ v_unary0 = npyv_@intr@_@sfx@(v_src0); + #if @DTYPE@ == CONTIG + npyv_store_@sfx@(dst, v_unary0); + #else + npyv_storen_@sfx@(dst, sdst, v_unary0); + #endif + } + + // last partial iteration, if needed + if(len > 0){ #if @STYPE@ == CONTIG #if @repl_0w1@ npyv_@sfx@ v_src0 = npyv_load_till_@sfx@(src, len, 1); @@ -140,6 +210,15 @@ static void simd_@TYPE@_@kind@_@STYPE@_@DTYPE@ npyv_@sfx@ v_src0 = npyv_loadn_tillz_@sfx@(src, ssrc, len); #endif #endif + #if @RECIP_WORKAROUND@ + /* + * Workaround clang bug. We use a dummy variable marked 'volatile' + * to convince clang that the entire vector is needed. We only + * want to do this for the last iteration / partial load-store of + * the loop since 'volatile' forces a refresh of the contents. + */ + volatile npyv_@sfx@ unused_but_workaround_bug = v_src0; + #endif // @RECIP_WORKAROUND@ npyv_@sfx@ v_unary0 = npyv_@intr@_@sfx@(v_src0); #if @DTYPE@ == CONTIG npyv_store_till_@sfx@(dst, len, v_unary0); @@ -147,6 +226,7 @@ static void simd_@TYPE@_@kind@_@STYPE@_@DTYPE@ npyv_storen_till_@sfx@(dst, sdst, len, v_unary0); #endif } + npyv_cleanup(); } /**end repeat2**/ @@ -154,6 +234,8 @@ static void simd_@TYPE@_@kind@_@STYPE@_@DTYPE@ #endif // @VCHK@ /**end repeat**/ +#undef WORKAROUND_CLANG_RECIPROCAL_BUG + /******************************************************************************** ** Defining ufunc inner functions ********************************************************************************/ diff --git a/numpy/core/src/umath/loops_utils.h.src b/numpy/core/src/umath/loops_utils.h.src index 1a2a5a32b..762e9ee59 100644 --- a/numpy/core/src/umath/loops_utils.h.src +++ b/numpy/core/src/umath/loops_utils.h.src @@ -6,7 +6,7 @@ /** * Old versions of MSVC causes ambiguous link errors when we deal with large SIMD kernels - * which lead to break the build, probably releated to the following bug: + * which lead to break the build, probably related to the following bug: * https://developercommunity.visualstudio.com/content/problem/415095/internal-compiler-error-with-perfectly-forwarded-r.html */ #if defined(_MSC_VER) && _MSC_VER < 1916 diff --git a/numpy/core/src/umath/matmul.c.src b/numpy/core/src/umath/matmul.c.src index 0e47d1ab5..4dd0c4759 100644 --- a/numpy/core/src/umath/matmul.c.src +++ b/numpy/core/src/umath/matmul.c.src @@ -1,11 +1,11 @@ /* -*- c -*- */ +#define PY_SSIZE_T_CLEAN +#include <Python.h> #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include "Python.h" - #include "npy_config.h" #include "numpy/npy_common.h" #include "numpy/arrayobject.h" diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index 86cc20eb1..c28c8abd8 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -6,15 +6,15 @@ * * See LICENSE.txt for the license. */ -#define _UMATHMODULE -#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#define _UMATHMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> #include "npy_config.h" -#include <numpy/arrayobject.h> +#include "numpy/arrayobject.h" #include "npy_pycompat.h" #include "ctors.h" @@ -145,14 +145,12 @@ PyArray_CopyInitialReduceValues( * boilerplate code, just calling the appropriate inner loop function where * necessary. * + * context : The ArrayMethod context (with ufunc, method, and descriptors). * operand : The array to be reduced. * out : NULL, or the array into which to place the result. * wheremask : NOT YET SUPPORTED, but this parameter is placed here * so that support can be added in the future without breaking * API compatibility. Pass in NULL. - * operand_dtype : The dtype the inner loop expects for the operand. - * result_dtype : The dtype the inner loop expects for the result. - * casting : The casting rule to apply to the operands. * axis_flags : Flags indicating the reduction axes of 'operand'. * reorderable : If True, the reduction being done is reorderable, which * means specifying multiple axes of reduction at once is ok, @@ -182,10 +180,8 @@ PyArray_CopyInitialReduceValues( * generalized ufuncs!) */ NPY_NO_EXPORT PyArrayObject * -PyUFunc_ReduceWrapper( +PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, PyArrayObject *operand, PyArrayObject *out, PyArrayObject *wheremask, - PyArray_Descr *operand_dtype, PyArray_Descr *result_dtype, - NPY_CASTING casting, npy_bool *axis_flags, int reorderable, int keepdims, PyObject *identity, PyArray_ReduceLoopFunc *loop, void *data, npy_intp buffersize, const char *funcname, int errormask) @@ -199,6 +195,8 @@ PyUFunc_ReduceWrapper( PyArrayObject *op[3]; PyArray_Descr *op_dtypes[3]; npy_uint32 it_flags, op_flags[3]; + /* Loop auxdata (must be freed on error) */ + NpyAuxData *auxdata = NULL; /* More than one axis means multiple orders are possible */ if (!reorderable && count_axes(PyArray_NDIM(operand), axis_flags) > 1) { @@ -221,8 +219,8 @@ PyUFunc_ReduceWrapper( /* Set up the iterator */ op[0] = out; op[1] = operand; - op_dtypes[0] = result_dtype; - op_dtypes[1] = operand_dtype; + op_dtypes[0] = context->descriptors[0]; + op_dtypes[1] = context->descriptors[1]; it_flags = NPY_ITER_BUFFERED | NPY_ITER_EXTERNAL_LOOP | @@ -291,7 +289,7 @@ PyUFunc_ReduceWrapper( } iter = NpyIter_AdvancedNew(wheremask == NULL ? 2 : 3, op, it_flags, - NPY_KEEPORDER, casting, + NPY_KEEPORDER, NPY_UNSAFE_CASTING, op_flags, op_dtypes, PyArray_NDIM(operand), op_axes, NULL, buffersize); @@ -301,9 +299,29 @@ PyUFunc_ReduceWrapper( result = NpyIter_GetOperandArray(iter)[0]; - int needs_api = NpyIter_IterationNeedsAPI(iter); - /* Start with the floating-point exception flags cleared */ - npy_clear_floatstatus_barrier((char*)&iter); + PyArrayMethod_StridedLoop *strided_loop; + NPY_ARRAYMETHOD_FLAGS flags = 0; + npy_intp fixed_strides[3]; + NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); + if (wheremask != NULL) { + if (PyArrayMethod_GetMaskedStridedLoop(context, + 1, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { + goto fail; + } + } + else { + if (context->method->get_strided_loop(context, + 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { + goto fail; + } + } + + int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; + needs_api |= NpyIter_IterationNeedsAPI(iter); + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + /* Start with the floating-point exception flags cleared */ + npy_clear_floatstatus_barrier((char*)&iter); + } /* * Initialize the result to the reduction unit if possible, @@ -345,16 +363,18 @@ PyUFunc_ReduceWrapper( strideptr = NpyIter_GetInnerStrideArray(iter); countptr = NpyIter_GetInnerLoopSizePtr(iter); - if (loop(iter, dataptr, strideptr, countptr, - iternext, needs_api, skip_first_count, data) < 0) { + if (loop(context, strided_loop, auxdata, + iter, dataptr, strideptr, countptr, iternext, + needs_api, skip_first_count) < 0) { goto fail; } } - /* Check whether any errors occurred during the loop */ - if (PyErr_Occurred() || - _check_ufunc_fperr(errormask, NULL, "reduce") < 0) { - goto fail; + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + /* NOTE: We could check float errors even on error */ + if (_check_ufunc_fperr(errormask, NULL, "reduce") < 0) { + goto fail; + } } if (out != NULL) { @@ -369,6 +389,7 @@ PyUFunc_ReduceWrapper( return result; fail: + NPY_AUXDATA_FREE(auxdata); if (iter != NULL) { NpyIter_Deallocate(iter); } diff --git a/numpy/core/src/umath/reduction.h b/numpy/core/src/umath/reduction.h index 372605dba..2170e27a7 100644 --- a/numpy/core/src/umath/reduction.h +++ b/numpy/core/src/umath/reduction.h @@ -19,93 +19,17 @@ typedef int (PyArray_AssignReduceIdentityFunc)(PyArrayObject *result, void *data); /* - * This is a function for the reduce loop. + * Inner definition of the reduce loop, only used for a static function. + * At some point around NumPy 1.6, there was probably an intention to make + * the reduce loop customizable at this level (per ufunc?). * - * The needs_api parameter indicates whether it's ok to release the GIL during - * the loop, such as when the iternext() function never calls - * a function which could raise a Python exception. - * - * The skip_first_count parameter indicates how many elements need to be - * skipped based on NpyIter_IsFirstVisit checks. This can only be positive - * when the 'assign_identity' parameter was NULL when calling - * PyArray_ReduceWrapper. - * - * The loop gets two data pointers and two strides, and should - * look roughly like this: - * { - * NPY_BEGIN_THREADS_DEF; - * if (!needs_api) { - * NPY_BEGIN_THREADS; - * } - * // This first-visit loop can be skipped if 'assign_identity' was non-NULL - * if (skip_first_count > 0) { - * do { - * char *data0 = dataptr[0], *data1 = dataptr[1]; - * npy_intp stride0 = strideptr[0], stride1 = strideptr[1]; - * npy_intp count = *countptr; - * - * // Skip any first-visit elements - * if (NpyIter_IsFirstVisit(iter, 0)) { - * if (stride0 == 0) { - * --count; - * --skip_first_count; - * data1 += stride1; - * } - * else { - * skip_first_count -= count; - * count = 0; - * } - * } - * - * while (count--) { - * *(result_t *)data0 = my_reduce_op(*(result_t *)data0, - * *(operand_t *)data1); - * data0 += stride0; - * data1 += stride1; - * } - * - * // Jump to the faster loop when skipping is done - * if (skip_first_count == 0) { - * if (iternext(iter)) { - * break; - * } - * else { - * goto finish_loop; - * } - * } - * } while (iternext(iter)); - * } - * do { - * char *data0 = dataptr[0], *data1 = dataptr[1]; - * npy_intp stride0 = strideptr[0], stride1 = strideptr[1]; - * npy_intp count = *countptr; - * - * while (count--) { - * *(result_t *)data0 = my_reduce_op(*(result_t *)data0, - * *(operand_t *)data1); - * data0 += stride0; - * data1 += stride1; - * } - * } while (iternext(iter)); - * finish_loop: - * if (!needs_api) { - * NPY_END_THREADS; - * } - * return (needs_api && PyErr_Occurred()) ? -1 : 0; - * } - * - * If needs_api is True, this function should call PyErr_Occurred() - * to check if an error occurred during processing, and return -1 for - * error, 0 for success. + * TODO: This should be refactored/removed. */ -typedef int (PyArray_ReduceLoopFunc)(NpyIter *iter, - char **dataptr, - npy_intp const *strideptr, - npy_intp const *countptr, - NpyIter_IterNextFunc *iternext, - int needs_api, - npy_intp skip_first_count, - void *data); +typedef int (PyArray_ReduceLoopFunc)(PyArrayMethod_Context *context, + PyArrayMethod_StridedLoop *strided_loop, NpyAuxData *auxdata, + NpyIter *iter, char **dataptrs, npy_intp const *strides, + npy_intp const *countptr, NpyIter_IterNextFunc *iternext, + int needs_api, npy_intp skip_first_count); /* * This function executes all the standard NumPy reduction function @@ -138,16 +62,10 @@ typedef int (PyArray_ReduceLoopFunc)(NpyIter *iter, * errormask : forwarded from _get_bufsize_errmask */ NPY_NO_EXPORT PyArrayObject * -PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, - PyArrayObject *wheremask, - PyArray_Descr *operand_dtype, - PyArray_Descr *result_dtype, - NPY_CASTING casting, - npy_bool *axis_flags, int reorderable, - int keepdims, - PyObject *identity, - PyArray_ReduceLoopFunc *loop, - void *data, npy_intp buffersize, const char *funcname, - int errormask); +PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, + PyArrayObject *operand, PyArrayObject *out, PyArrayObject *wheremask, + npy_bool *axis_flags, int reorderable, int keepdims, + PyObject *identity, PyArray_ReduceLoopFunc *loop, + void *data, npy_intp buffersize, const char *funcname, int errormask); #endif diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index 5836545f8..402e6b561 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -5,12 +5,13 @@ but still supports error-modes. */ +#define PY_SSIZE_T_CLEAN +#include <Python.h> #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include "Python.h" #include "npy_config.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 654ab81cc..d47be9a30 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -116,9 +116,8 @@ run_binary_avx512f_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_in #endif return 0; } - - /**end repeat1**/ + /**end repeat**/ /**begin repeat @@ -1152,6 +1151,7 @@ NPY_FINLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@d * #is_finite = 0, 1, 0, 0# * #is_signbit = 0, 0, 0, 1# */ + #if defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS static NPY_INLINE NPY_GCC_TARGET_AVX512_SKX void AVX512_SKX_@func@_@TYPE@(npy_bool* op, @type@* ip, const npy_intp array_size, const npy_intp steps) diff --git a/numpy/core/src/umath/svml b/numpy/core/src/umath/svml new file mode 160000 +Subproject 9f8af767ed6c75455d9a382af829048f8dd1806 diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index ebc6bf02a..237af81b2 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -23,12 +23,14 @@ * Rick White * */ -#define _UMATHMODULE -#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#define _UMATHMODULE + +#define PY_SSIZE_T_CLEAN +#include <Python.h> -#include "Python.h" -#include "stddef.h" +#include <stddef.h> #include "npy_config.h" #include "npy_pycompat.h" @@ -614,9 +616,24 @@ _is_same_name(const char* s1, const char* s2) } /* - * Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets, - * and core_signature in PyUFuncObject "ufunc". Returns 0 unless an - * error occurred. + * Sets the following fields in the PyUFuncObject 'ufunc': + * + * Field Type Array Length + * core_enabled int (effectively bool) N/A + * core_num_dim_ix int N/A + * core_dim_flags npy_uint32 * core_num_dim_ix + * core_dim_sizes npy_intp * core_num_dim_ix + * core_num_dims int * nargs (i.e. nin+nout) + * core_offsets int * nargs + * core_dim_ixs int * sum(core_num_dims) + * core_signature char * strlen(signature) + 1 + * + * The function assumes that the values that are arrays have not + * been set already, and sets these pointers to memory allocated + * with PyArray_malloc. These are freed when the ufunc dealloc + * method is called. + * + * Returns 0 unless an error occurred. */ static int _parse_signature(PyUFuncObject *ufunc, const char *signature) @@ -988,6 +1005,7 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, } /* Convert and fill in output arguments */ + memset(out_op_DTypes + nin, 0, nout * sizeof(*out_op_DTypes)); if (full_args.out != NULL) { for (int i = 0; i < nout; i++) { obj = PyTuple_GET_ITEM(full_args.out, i); @@ -1045,6 +1063,7 @@ check_for_trivial_loop(PyArrayMethodObject *ufuncimpl, PyArrayObject **op, PyArray_Descr **dtypes, NPY_CASTING casting, npy_intp buffersize) { + int force_cast_input = ufuncimpl->flags & _NPY_METH_FORCE_CAST_INPUTS; int i, nin = ufuncimpl->nin, nop = nin + ufuncimpl->nout; for (i = 0; i < nop; ++i) { @@ -1060,15 +1079,21 @@ check_for_trivial_loop(PyArrayMethodObject *ufuncimpl, if (dtypes[i] != PyArray_DESCR(op[i])) { NPY_CASTING safety = PyArray_GetCastSafety( PyArray_DESCR(op[i]), dtypes[i], NULL); - if (safety < 0) { - /* A proper error during a cast check should be rare */ + if (safety < 0 && PyErr_Occurred()) { + /* A proper error during a cast check, should be rare */ return -1; } if (!(safety & _NPY_CAST_IS_VIEW)) { must_copy = 1; } - if (PyArray_MinCastSafety(safety, casting) != casting) { + if (force_cast_input && i < nin) { + /* + * ArrayMethod flagged to ignore casting (logical funcs + * can force cast to bool) + */ + } + else if (PyArray_MinCastSafety(safety, casting) != casting) { return 0; /* the cast is not safe enough */ } } @@ -1323,6 +1348,14 @@ try_trivial_single_output_loop(PyArrayMethod_Context *context, NPY_END_THREADS; NPY_AUXDATA_FREE(auxdata); + /* + * An error should only be possible if `res != 0` is already set. + * But this is not strictly correct for old-style ufuncs (e.g. `power` + * released the GIL but manually set an Exception). + */ + if (PyErr_Occurred()) { + res = -1; + } if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { /* NOTE: We could check float errors even when `res < 0` */ @@ -1350,8 +1383,15 @@ validate_casting(PyArrayMethodObject *method, PyUFuncObject *ufunc, */ return 0; } - if (PyUFunc_ValidateCasting(ufunc, casting, ops, descriptors) < 0) { - return -1; + if (method->flags & _NPY_METH_FORCE_CAST_INPUTS) { + if (PyUFunc_ValidateOutCasting(ufunc, casting, ops, descriptors) < 0) { + return -1; + } + } + else { + if (PyUFunc_ValidateCasting(ufunc, casting, ops, descriptors) < 0) { + return -1; + } } return 0; } @@ -2460,9 +2500,9 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, /* Final preparation of the arraymethod call */ PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, + .caller = (PyObject *)ufunc, + .method = ufuncimpl, + .descriptors = operation_descrs, }; PyArrayMethod_StridedLoop *strided_loop; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -2517,7 +2557,7 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, PyArray_free(inner_strides); NPY_AUXDATA_FREE(auxdata); - if (NpyIter_Deallocate(iter) < 0) { + if (!NpyIter_Deallocate(iter)) { retval = -1; } @@ -2582,9 +2622,9 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, /* Final preparation of the arraymethod call */ PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, + .caller = (PyObject *)ufunc, + .method = ufuncimpl, + .descriptors = operation_descrs, }; /* Do the ufunc loop */ @@ -2651,195 +2691,129 @@ PyUFunc_GenericFunction(PyUFuncObject *NPY_UNUSED(ufunc), /* - * Given the output type, finds the specified binary op. The - * ufunc must have nin==2 and nout==1. The function may modify - * otype if the given type isn't found. + * Promote and resolve a reduction like operation. * - * Returns 0 on success, -1 on failure. + * @param ufunc + * @param arr The operation array + * @param out The output array or NULL if not provided. Note that NumPy always + * used out to mean the same as `dtype=out.dtype` and never passed + * the array itself to the type-resolution. + * @param signature The DType signature, which may already be set due to the + * dtype passed in by the user, or the special cases (add, multiply). + * (Contains strong references and may be modified.) + * @param enforce_uniform_args If `NPY_TRUE` fully uniform dtypes/descriptors + * are enforced as required for accumulate and (currently) reduceat. + * @param out_descrs New references to the resolved descriptors (on success). + * @param method The ufunc method, "reduce", "reduceat", or "accumulate". + + * @returns ufuncimpl The `ArrayMethod` implemention to use. Or NULL if an + * error occurred. */ -static int -get_binary_op_function(PyUFuncObject *ufunc, int *otype, - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata) +static PyArrayMethodObject * +reducelike_promote_and_resolve(PyUFuncObject *ufunc, + PyArrayObject *arr, PyArrayObject *out, + PyArray_DTypeMeta *signature[3], + npy_bool enforce_uniform_args, PyArray_Descr *out_descrs[3], + char *method) { - int i; - - NPY_UF_DBG_PRINT1("Getting binary op function for type number %d\n", - *otype); - - /* If the type is custom and there are userloops, search for it here */ - if (ufunc->userloops != NULL && PyTypeNum_ISUSERDEF(*otype)) { - PyObject *key, *obj; - key = PyLong_FromLong(*otype); - if (key == NULL) { - return -1; - } - obj = PyDict_GetItemWithError(ufunc->userloops, key); - Py_DECREF(key); - if (obj == NULL && PyErr_Occurred()) { - return -1; - } - else if (obj != NULL) { - PyUFunc_Loop1d *funcdata = PyCapsule_GetPointer(obj, NULL); - if (funcdata == NULL) { - return -1; - } - while (funcdata != NULL) { - int *types = funcdata->arg_types; - - if (types[0] == *otype && types[1] == *otype && - types[2] == *otype) { - *out_innerloop = funcdata->func; - *out_innerloopdata = funcdata->data; - return 0; - } + /* + * Note that the `ops` is not realy correct. But legacy resolution + * cannot quite handle the correct ops (e.g. a NULL first item if `out` + * is NULL), and it should only matter in very strange cases. + */ + PyArrayObject *ops[3] = {arr, arr, NULL}; + /* + * TODO: If `out` is not provided, arguably `initial` could define + * the first DType (and maybe also the out one), that way + * `np.add.reduce([1, 2, 3], initial=3.4)` would return a float + * value. As of 1.20, it returned an integer, so that should + * probably go to an error/warning first. + */ + PyArray_DTypeMeta *operation_DTypes[3] = { + NULL, NPY_DTYPE(PyArray_DESCR(arr)), NULL}; + Py_INCREF(operation_DTypes[1]); - funcdata = funcdata->next; - } - } + if (out != NULL) { + operation_DTypes[0] = NPY_DTYPE(PyArray_DESCR(out)); + Py_INCREF(operation_DTypes[0]); + operation_DTypes[2] = operation_DTypes[0]; + Py_INCREF(operation_DTypes[2]); } - /* Search for a function with compatible inputs */ - for (i = 0; i < ufunc->ntypes; ++i) { - char *types = ufunc->types + i*ufunc->nargs; - - NPY_UF_DBG_PRINT3("Trying loop with signature %d %d -> %d\n", - types[0], types[1], types[2]); - - if (PyArray_CanCastSafely(*otype, types[0]) && - types[0] == types[1] && - (*otype == NPY_OBJECT || types[0] != NPY_OBJECT)) { - /* If the signature is "xx->x", we found the loop */ - if (types[2] == types[0]) { - *out_innerloop = ufunc->functions[i]; - *out_innerloopdata = ufunc->data[i]; - *otype = types[0]; - return 0; - } - /* - * Otherwise, we found the natural type of the reduction, - * replace otype and search again - */ - else { - *otype = types[2]; - break; - } - } + PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, + ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE); + Py_DECREF(operation_DTypes[1]); + if (out != NULL) { + Py_DECREF(operation_DTypes[0]); + Py_DECREF(operation_DTypes[2]); } - - /* Search for the exact function */ - for (i = 0; i < ufunc->ntypes; ++i) { - char *types = ufunc->types + i*ufunc->nargs; - - if (PyArray_CanCastSafely(*otype, types[0]) && - types[0] == types[1] && - types[1] == types[2] && - (*otype == NPY_OBJECT || types[0] != NPY_OBJECT)) { - /* Since the signature is "xx->x", we found the loop */ - *out_innerloop = ufunc->functions[i]; - *out_innerloopdata = ufunc->data[i]; - *otype = types[0]; - return 0; - } + if (ufuncimpl == NULL) { + return NULL; } - return -1; -} - -static int -reduce_type_resolver(PyUFuncObject *ufunc, PyArrayObject *arr, - PyArray_Descr *odtype, PyArray_Descr **out_dtype) -{ - int i, retcode; - PyArrayObject *op[3] = {arr, arr, NULL}; - PyArray_Descr *dtypes[3] = {NULL, NULL, NULL}; - const char *ufunc_name = ufunc_get_name_cstr(ufunc); - PyObject *type_tup = NULL; - - *out_dtype = NULL; - /* - * If odtype is specified, make a type tuple for the type - * resolution. + * Find the correct descriptors for the operation. We use unsafe casting + * for historic reasons: The logic ufuncs required it to cast everything to + * boolean. However, we now special case the logical ufuncs, so that the + * casting safety could in principle be set to the default same-kind. + * (although this should possibly happen through a deprecation) */ - if (odtype != NULL) { - type_tup = PyTuple_Pack(3, odtype, odtype, Py_None); - if (type_tup == NULL) { - return -1; - } - } - - /* Use the type resolution function to find our loop */ - retcode = ufunc->type_resolver( - ufunc, NPY_UNSAFE_CASTING, - op, type_tup, dtypes); - Py_DECREF(type_tup); - if (retcode == -1) { - return -1; - } - else if (retcode == -2) { - PyErr_Format(PyExc_RuntimeError, - "type resolution returned NotImplemented to " - "reduce ufunc %s", ufunc_name); - return -1; + if (resolve_descriptors(3, ufunc, ufuncimpl, + ops, out_descrs, signature, NPY_UNSAFE_CASTING) < 0) { + return NULL; } /* - * The first two type should be equivalent. Because of how - * reduce has historically behaved in NumPy, the return type - * could be different, and it is the return type on which the - * reduction occurs. + * The first operand and output should be the same array, so they should + * be identical. The second argument can be different for reductions, + * but is checked to be identical for accumulate and reduceat. */ - if (!PyArray_EquivTypes(dtypes[0], dtypes[1])) { - for (i = 0; i < 3; ++i) { - Py_DECREF(dtypes[i]); - } - PyErr_Format(PyExc_RuntimeError, - "could not find a type resolution appropriate for " - "reduce ufunc %s", ufunc_name); - return -1; + if (out_descrs[0] != out_descrs[2] || ( + enforce_uniform_args && out_descrs[0] != out_descrs[1])) { + PyErr_Format(PyExc_TypeError, + "the resolved dtypes are not compatible with %s.%s", + ufunc_get_name_cstr(ufunc), method); + goto fail; + } + /* TODO: This really should _not_ be unsafe casting (same above)! */ + if (validate_casting(ufuncimpl, + ufunc, ops, out_descrs, NPY_UNSAFE_CASTING) < 0) { + goto fail; } - Py_DECREF(dtypes[0]); - Py_DECREF(dtypes[1]); - *out_dtype = dtypes[2]; + return ufuncimpl; - return 0; + fail: + for (int i = 0; i < 3; ++i) { + Py_DECREF(out_descrs[i]); + } + return NULL; } + static int -reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides, - npy_intp const *countptr, NpyIter_IterNextFunc *iternext, - int needs_api, npy_intp skip_first_count, void *data) +reduce_loop(PyArrayMethod_Context *context, + PyArrayMethod_StridedLoop *strided_loop, NpyAuxData *auxdata, + NpyIter *iter, char **dataptrs, npy_intp const *strides, + npy_intp const *countptr, NpyIter_IterNextFunc *iternext, + int needs_api, npy_intp skip_first_count) { - PyArray_Descr *dtypes[3], **iter_dtypes; - PyUFuncObject *ufunc = (PyUFuncObject *)data; - char *dataptrs_copy[3]; - npy_intp strides_copy[3]; + int retval; + char *dataptrs_copy[4]; + npy_intp strides_copy[4]; npy_bool masked; - /* The normal selected inner loop */ - PyUFuncGenericFunction innerloop = NULL; - void *innerloopdata = NULL; - NPY_BEGIN_THREADS_DEF; /* Get the number of operands, to determine whether "where" is used */ masked = (NpyIter_GetNOp(iter) == 3); - /* Get the inner loop */ - iter_dtypes = NpyIter_GetDescrArray(iter); - dtypes[0] = iter_dtypes[0]; - dtypes[1] = iter_dtypes[1]; - dtypes[2] = iter_dtypes[0]; - if (ufunc->legacy_inner_loop_selector(ufunc, dtypes, - &innerloop, &innerloopdata, &needs_api) < 0) { - return -1; + if (!needs_api) { + NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter)); } - NPY_BEGIN_THREADS_NDITER(iter); - if (skip_first_count > 0) { - do { + assert(!masked); /* Path currently not available for masked */ + while (1) { npy_intp count = *countptr; /* Skip any first-visit elements */ @@ -2862,27 +2836,23 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides, strides_copy[0] = strides[0]; strides_copy[1] = strides[1]; strides_copy[2] = strides[0]; - innerloop(dataptrs_copy, &count, - strides_copy, innerloopdata); - if (needs_api && PyErr_Occurred()) { + retval = strided_loop(context, + dataptrs_copy, &count, strides_copy, auxdata); + if (retval < 0) { goto finish_loop; } - /* Jump to the faster loop when skipping is done */ - if (skip_first_count == 0) { - if (iternext(iter)) { - break; - } - else { - goto finish_loop; - } + /* Advance loop, and abort on error (or finish) */ + if (!iternext(iter)) { + goto finish_loop; } - } while (iternext(iter)); - } - if (needs_api && PyErr_Occurred()) { - goto finish_loop; + /* When skipping is done break and continue with faster loop */ + if (skip_first_count == 0) { + break; + } + } } do { @@ -2893,42 +2863,23 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides, strides_copy[0] = strides[0]; strides_copy[1] = strides[1]; strides_copy[2] = strides[0]; - - if (!masked) { - innerloop(dataptrs_copy, countptr, - strides_copy, innerloopdata); + if (masked) { + dataptrs_copy[3] = dataptrs[2]; + strides_copy[3] = strides[2]; } - else { - npy_intp count = *countptr; - char *maskptr = dataptrs[2]; - npy_intp mask_stride = strides[2]; - /* Optimization for when the mask is broadcast */ - npy_intp n = mask_stride == 0 ? count : 1; - while (count) { - char mask = *maskptr; - maskptr += mask_stride; - while (n < count && mask == *maskptr) { - n++; - maskptr += mask_stride; - } - /* If mask set, apply inner loop on this contiguous region */ - if (mask) { - innerloop(dataptrs_copy, &n, - strides_copy, innerloopdata); - } - dataptrs_copy[0] += n * strides[0]; - dataptrs_copy[1] += n * strides[1]; - dataptrs_copy[2] = dataptrs_copy[0]; - count -= n; - n = 1; - } + + retval = strided_loop(context, + dataptrs_copy, countptr, strides_copy, auxdata); + if (retval < 0) { + goto finish_loop; } - } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); + + } while (iternext(iter)); finish_loop: NPY_END_THREADS; - return (needs_api && PyErr_Occurred()) ? -1 : 0; + return retval; } /* @@ -2949,15 +2900,14 @@ finish_loop: * this function does not validate them. */ static PyArrayObject * -PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, - int naxes, int *axes, PyArray_Descr *odtype, int keepdims, +PyUFunc_Reduce(PyUFuncObject *ufunc, + PyArrayObject *arr, PyArrayObject *out, + int naxes, int *axes, PyArray_DTypeMeta *signature[3], int keepdims, PyObject *initial, PyArrayObject *wheremask) { int iaxes, ndim; npy_bool reorderable; npy_bool axis_flags[NPY_MAXDIMS]; - PyArray_Descr *dtype; - PyArrayObject *result; PyObject *identity; const char *ufunc_name = ufunc_get_name_cstr(ufunc); /* These parameters come from a TLS global */ @@ -2984,6 +2934,7 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, } /* Get the identity */ + /* TODO: Both of these should be provided by the ArrayMethod! */ identity = _get_identity(ufunc, &reorderable); if (identity == NULL) { return NULL; @@ -3007,21 +2958,27 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, Py_INCREF(initial); /* match the reference count in the if above */ } - /* Get the reduction dtype */ - if (reduce_type_resolver(ufunc, arr, odtype, &dtype) < 0) { + PyArray_Descr *descrs[3]; + PyArrayMethodObject *ufuncimpl = reducelike_promote_and_resolve(ufunc, + arr, out, signature, NPY_FALSE, descrs, "reduce"); + if (ufuncimpl == NULL) { Py_DECREF(initial); return NULL; } - result = PyUFunc_ReduceWrapper(arr, out, wheremask, dtype, dtype, - NPY_UNSAFE_CASTING, - axis_flags, reorderable, - keepdims, - initial, - reduce_loop, - ufunc, buffersize, ufunc_name, errormask); + PyArrayMethod_Context context = { + .caller = (PyObject *)ufunc, + .method = ufuncimpl, + .descriptors = descrs, + }; - Py_DECREF(dtype); + PyArrayObject *result = PyUFunc_ReduceWrapper(&context, + arr, out, wheremask, axis_flags, reorderable, keepdims, + initial, reduce_loop, ufunc, buffersize, ufunc_name, errormask); + + for (int i = 0; i < 3; i++) { + Py_DECREF(descrs[i]); + } Py_DECREF(initial); return result; } @@ -3029,23 +2986,21 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, static PyObject * PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, - int axis, int otype) + int axis, PyArray_DTypeMeta *signature[3]) { PyArrayObject *op[2]; - PyArray_Descr *op_dtypes[2] = {NULL, NULL}; int op_axes_arrays[2][NPY_MAXDIMS]; int *op_axes[2] = {op_axes_arrays[0], op_axes_arrays[1]}; npy_uint32 op_flags[2]; - int idim, ndim, otype_final; + int idim, ndim; int needs_api, need_outer_iterator; - NpyIter *iter = NULL; + int res = 0; - /* The selected inner loop */ - PyUFuncGenericFunction innerloop = NULL; - void *innerloopdata = NULL; + PyArrayMethod_StridedLoop *strided_loop; + NpyAuxData *auxdata = NULL; - const char *ufunc_name = ufunc_get_name_cstr(ufunc); + NpyIter *iter = NULL; /* These parameters come from extobj= or from a TLS global */ int buffersize = 0, errormask = 0; @@ -3067,42 +3022,32 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, /* Take a reference to out for later returning */ Py_XINCREF(out); - otype_final = otype; - if (get_binary_op_function(ufunc, &otype_final, - &innerloop, &innerloopdata) < 0) { - PyArray_Descr *dtype = PyArray_DescrFromType(otype); - PyErr_Format(PyExc_ValueError, - "could not find a matching type for %s.accumulate, " - "requested type has type code '%c'", - ufunc_name, dtype ? dtype->type : '-'); - Py_XDECREF(dtype); - goto fail; + PyArray_Descr *descrs[3]; + PyArrayMethodObject *ufuncimpl = reducelike_promote_and_resolve(ufunc, + arr, out, signature, NPY_TRUE, descrs, "accumulate"); + if (ufuncimpl == NULL) { + return NULL; } - ndim = PyArray_NDIM(arr); + /* The below code assumes that all descriptors are identical: */ + assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]); - /* - * Set up the output data type, using the input's exact - * data type if the type number didn't change to preserve - * metadata - */ - if (PyArray_DESCR(arr)->type_num == otype_final) { - if (PyArray_ISNBO(PyArray_DESCR(arr)->byteorder)) { - op_dtypes[0] = PyArray_DESCR(arr); - Py_INCREF(op_dtypes[0]); - } - else { - op_dtypes[0] = PyArray_DescrNewByteorder(PyArray_DESCR(arr), - NPY_NATIVE); - } - } - else { - op_dtypes[0] = PyArray_DescrFromType(otype_final); - } - if (op_dtypes[0] == NULL) { + if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) { + /* This can be removed, but the initial element copy needs fixing */ + PyErr_SetString(PyExc_TypeError, + "accumulation currently only supports `object` dtype with " + "references"); goto fail; } + PyArrayMethod_Context context = { + .caller = (PyObject *)ufunc, + .method = ufuncimpl, + .descriptors = descrs, + }; + + ndim = PyArray_NDIM(arr); + #if NPY_UF_DBG_TRACING printf("Found %s.accumulate inner loop with dtype : ", ufunc_name); PyObject_Print((PyObject *)op_dtypes[0], stdout, 0); @@ -3128,9 +3073,9 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, need_outer_iterator = (ndim > 1); /* We can't buffer, so must do UPDATEIFCOPY */ if (!PyArray_ISALIGNED(arr) || (out && !PyArray_ISALIGNED(out)) || - !PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(arr)) || + !PyArray_EquivTypes(descrs[1], PyArray_DESCR(arr)) || (out && - !PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(out)))) { + !PyArray_EquivTypes(descrs[0], PyArray_DESCR(out)))) { need_outer_iterator = 1; } /* If input and output overlap in memory, use iterator to figure it out */ @@ -3143,7 +3088,6 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, npy_uint32 flags = NPY_ITER_ZEROSIZE_OK| NPY_ITER_REFS_OK| NPY_ITER_COPY_IF_OVERLAP; - PyArray_Descr **op_dtypes_param = NULL; /* * The way accumulate is set up, we can't do buffering, @@ -3160,13 +3104,11 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, */ op_flags[0] |= NPY_ITER_UPDATEIFCOPY|NPY_ITER_ALIGNED|NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE; op_flags[1] |= NPY_ITER_COPY|NPY_ITER_ALIGNED|NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE; - op_dtypes_param = op_dtypes; - op_dtypes[1] = op_dtypes[0]; + NPY_UF_DBG_PRINT("Allocating outer iterator\n"); iter = NpyIter_AdvancedNew(2, op, flags, NPY_KEEPORDER, NPY_UNSAFE_CASTING, - op_flags, - op_dtypes_param, + op_flags, descrs, ndim_iter, op_axes, NULL, 0); if (iter == NULL) { goto fail; @@ -3184,14 +3126,14 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, } } - /* Get the output */ + /* Get the output from the iterator if it was allocated */ if (out == NULL) { if (iter) { op[0] = out = NpyIter_GetOperandArray(iter)[0]; Py_INCREF(out); } else { - PyArray_Descr *dtype = op_dtypes[0]; + PyArray_Descr *dtype = descrs[0]; Py_INCREF(dtype); op[0] = out = (PyArrayObject *)PyArray_NewFromDescr( &PyArray_Type, dtype, @@ -3200,10 +3142,31 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, if (out == NULL) { goto fail; } - } } + npy_intp fixed_strides[3]; + if (need_outer_iterator) { + NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); + } + else { + fixed_strides[0] = PyArray_STRIDES(op[0])[axis]; + fixed_strides[1] = PyArray_STRIDES(op[1])[axis]; + fixed_strides[2] = fixed_strides[0]; + } + + + NPY_ARRAYMETHOD_FLAGS flags = 0; + if (ufuncimpl->get_strided_loop(&context, + 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { + goto fail; + } + needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + /* Start with the floating-point exception flags cleared */ + npy_clear_floatstatus_barrier((char*)&iter); + } + /* * If the reduction axis has size zero, either return the reduction * unit for UFUNC_REDUCE, or return the zero-sized output array @@ -3224,7 +3187,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, NpyIter_IterNextFunc *iternext; char **dataptr; - int itemsize = op_dtypes[0]->elsize; + int itemsize = descrs[0]->elsize; /* Get the variables needed for the loop */ iternext = NpyIter_GetIterNext(iter, NULL); @@ -3232,8 +3195,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, goto fail; } dataptr = NpyIter_GetDataPtrArray(iter); - needs_api = NpyIter_IterationNeedsAPI(iter); - + needs_api |= NpyIter_IterationNeedsAPI(iter); /* Execute the loop with just the outer iterator */ count_m1 = PyArray_DIM(op[1], axis)-1; @@ -3247,7 +3209,9 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, stride_copy[1] = stride1; stride_copy[2] = stride0; - NPY_BEGIN_THREADS_NDITER(iter); + if (!needs_api) { + NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter)); + } do { dataptr_copy[0] = dataptr[0]; @@ -3260,7 +3224,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, * Output (dataptr[0]) and input (dataptr[1]) may point to * the same memory, e.g. np.add.accumulate(a, out=a). */ - if (otype == NPY_OBJECT) { + if (descrs[2]->type_num == NPY_OBJECT) { /* * Incref before decref to avoid the possibility of the * reference count being zero temporarily. @@ -3280,18 +3244,17 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, dataptr_copy[2] += stride0; NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count_m1); - innerloop(dataptr_copy, &count_m1, - stride_copy, innerloopdata); + res = strided_loop(&context, + dataptr_copy, &count_m1, stride_copy, auxdata); } - } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); + } while (res == 0 && iternext(iter)); NPY_END_THREADS; } else if (iter == NULL) { char *dataptr_copy[3]; - npy_intp stride_copy[3]; - int itemsize = op_dtypes[0]->elsize; + int itemsize = descrs[0]->elsize; /* Execute the loop with no iterators */ npy_intp count = PyArray_DIM(op[1], axis); @@ -3305,15 +3268,11 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, PyArray_NDIM(op[0]))) { PyErr_SetString(PyExc_ValueError, "provided out is the wrong size " - "for the reduction"); + "for the accumulation."); goto fail; } stride0 = PyArray_STRIDE(op[0], axis); - stride_copy[0] = stride0; - stride_copy[1] = stride1; - stride_copy[2] = stride0; - /* Turn the two items into three for the inner loop */ dataptr_copy[0] = PyArray_BYTES(op[0]); dataptr_copy[1] = PyArray_BYTES(op[1]); @@ -3325,7 +3284,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, * Output (dataptr[0]) and input (dataptr[1]) may point to the * same memory, e.g. np.add.accumulate(a, out=a). */ - if (otype == NPY_OBJECT) { + if (descrs[2]->type_num == NPY_OBJECT) { /* * Incref before decref to avoid the possibility of the * reference count being zero temporarily. @@ -3346,25 +3305,34 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count); - needs_api = PyDataType_REFCHK(op_dtypes[0]); + needs_api = PyDataType_REFCHK(descrs[0]); if (!needs_api) { NPY_BEGIN_THREADS_THRESHOLDED(count); } - innerloop(dataptr_copy, &count, - stride_copy, innerloopdata); + res = strided_loop(&context, + dataptr_copy, &count, fixed_strides, auxdata); NPY_END_THREADS; } } finish: - Py_XDECREF(op_dtypes[0]); - int res = 0; + NPY_AUXDATA_FREE(auxdata); + Py_DECREF(descrs[0]); + Py_DECREF(descrs[1]); + Py_DECREF(descrs[2]); + if (!NpyIter_Deallocate(iter)) { res = -1; } + + if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + /* NOTE: We could check float errors even when `res < 0` */ + res = _check_ufunc_fperr(errormask, NULL, "accumulate"); + } + if (res < 0) { Py_DECREF(out); return NULL; @@ -3374,7 +3342,11 @@ finish: fail: Py_XDECREF(out); - Py_XDECREF(op_dtypes[0]); + + NPY_AUXDATA_FREE(auxdata); + Py_XDECREF(descrs[0]); + Py_XDECREF(descrs[1]); + Py_XDECREF(descrs[2]); NpyIter_Deallocate(iter); @@ -3399,28 +3371,31 @@ fail: * indices[1::2] = range(1,len(array)) * * output shape is based on the size of indices + * + * TODO: Reduceat duplicates too much code from accumulate! */ static PyObject * PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, - PyArrayObject *out, int axis, int otype) + PyArrayObject *out, int axis, PyArray_DTypeMeta *signature[3]) { PyArrayObject *op[3]; - PyArray_Descr *op_dtypes[3] = {NULL, NULL, NULL}; int op_axes_arrays[3][NPY_MAXDIMS]; int *op_axes[3] = {op_axes_arrays[0], op_axes_arrays[1], op_axes_arrays[2]}; npy_uint32 op_flags[3]; - int idim, ndim, otype_final; - int need_outer_iterator = 0; + int idim, ndim; + int needs_api, need_outer_iterator = 0; + + int res = 0; NpyIter *iter = NULL; + PyArrayMethod_StridedLoop *strided_loop; + NpyAuxData *auxdata = NULL; + /* The reduceat indices - ind must be validated outside this call */ npy_intp *reduceat_ind; npy_intp i, ind_size, red_axis_size; - /* The selected inner loop */ - PyUFuncGenericFunction innerloop = NULL; - void *innerloopdata = NULL; const char *ufunc_name = ufunc_get_name_cstr(ufunc); char *opname = "reduceat"; @@ -3460,42 +3435,32 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, /* Take a reference to out for later returning */ Py_XINCREF(out); - otype_final = otype; - if (get_binary_op_function(ufunc, &otype_final, - &innerloop, &innerloopdata) < 0) { - PyArray_Descr *dtype = PyArray_DescrFromType(otype); - PyErr_Format(PyExc_ValueError, - "could not find a matching type for %s.%s, " - "requested type has type code '%c'", - ufunc_name, opname, dtype ? dtype->type : '-'); - Py_XDECREF(dtype); - goto fail; + PyArray_Descr *descrs[3]; + PyArrayMethodObject *ufuncimpl = reducelike_promote_and_resolve(ufunc, + arr, out, signature, NPY_TRUE, descrs, "reduceat"); + if (ufuncimpl == NULL) { + return NULL; } - ndim = PyArray_NDIM(arr); + /* The below code assumes that all descriptors are identical: */ + assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]); - /* - * Set up the output data type, using the input's exact - * data type if the type number didn't change to preserve - * metadata - */ - if (PyArray_DESCR(arr)->type_num == otype_final) { - if (PyArray_ISNBO(PyArray_DESCR(arr)->byteorder)) { - op_dtypes[0] = PyArray_DESCR(arr); - Py_INCREF(op_dtypes[0]); - } - else { - op_dtypes[0] = PyArray_DescrNewByteorder(PyArray_DESCR(arr), - NPY_NATIVE); - } - } - else { - op_dtypes[0] = PyArray_DescrFromType(otype_final); - } - if (op_dtypes[0] == NULL) { + if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) { + /* This can be removed, but the initial element copy needs fixing */ + PyErr_SetString(PyExc_TypeError, + "reduceat currently only supports `object` dtype with " + "references"); goto fail; } + PyArrayMethod_Context context = { + .caller = (PyObject *)ufunc, + .method = ufuncimpl, + .descriptors = descrs, + }; + + ndim = PyArray_NDIM(arr); + #if NPY_UF_DBG_TRACING printf("Found %s.%s inner loop with dtype : ", ufunc_name, opname); PyObject_Print((PyObject *)op_dtypes[0], stdout, 0); @@ -3522,11 +3487,13 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, op[2] = ind; if (out != NULL || ndim > 1 || !PyArray_ISALIGNED(arr) || - !PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(arr))) { + !PyArray_EquivTypes(descrs[0], PyArray_DESCR(arr))) { need_outer_iterator = 1; } if (need_outer_iterator) { + PyArray_Descr *op_dtypes[3] = {descrs[0], descrs[1], NULL}; + npy_uint32 flags = NPY_ITER_ZEROSIZE_OK| NPY_ITER_REFS_OK| NPY_ITER_MULTI_INDEX| @@ -3555,8 +3522,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, NPY_UF_DBG_PRINT("Allocating outer iterator\n"); iter = NpyIter_AdvancedNew(3, op, flags, NPY_KEEPORDER, NPY_UNSAFE_CASTING, - op_flags, - op_dtypes, + op_flags, op_dtypes, ndim, op_axes, NULL, 0); if (iter == NULL) { goto fail; @@ -3580,11 +3546,15 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, Py_INCREF(out); } } - /* Allocate the output for when there's no outer iterator */ - else if (out == NULL) { - Py_INCREF(op_dtypes[0]); + else { + /* + * Allocate the output for when there's no outer iterator, we always + * use the outer_iteration path when `out` is passed. + */ + assert(out == NULL); + Py_INCREF(descrs[0]); op[0] = out = (PyArrayObject *)PyArray_NewFromDescr( - &PyArray_Type, op_dtypes[0], + &PyArray_Type, descrs[0], 1, &ind_size, NULL, NULL, 0, NULL); if (out == NULL) { @@ -3592,6 +3562,28 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, } } + npy_intp fixed_strides[3]; + if (need_outer_iterator) { + NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); + } + else { + fixed_strides[1] = PyArray_STRIDES(op[1])[axis]; + } + /* The reduce axis does not advance here in the strided-loop */ + fixed_strides[0] = 0; + fixed_strides[2] = 0; + + NPY_ARRAYMETHOD_FLAGS flags = 0; + if (ufuncimpl->get_strided_loop(&context, + 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { + goto fail; + } + needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + /* Start with the floating-point exception flags cleared */ + npy_clear_floatstatus_barrier((char*)&iter); + } + /* * If the output has zero elements, return now. */ @@ -3609,8 +3601,8 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, npy_intp stride0, stride1; npy_intp stride0_ind = PyArray_STRIDE(op[0], axis); - int itemsize = op_dtypes[0]->elsize; - int needs_api = NpyIter_IterationNeedsAPI(iter); + int itemsize = descrs[0]->elsize; + needs_api |= NpyIter_IterationNeedsAPI(iter); /* Get the variables needed for the loop */ iternext = NpyIter_GetIterNext(iter, NULL); @@ -3630,10 +3622,11 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, stride_copy[1] = stride1; stride_copy[2] = stride0; - NPY_BEGIN_THREADS_NDITER(iter); + if (!needs_api) { + NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter)); + } do { - for (i = 0; i < ind_size; ++i) { npy_intp start = reduceat_ind[i], end = (i == ind_size-1) ? count_m1+1 : @@ -3651,7 +3644,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, * to the same memory, e.g. * np.add.reduceat(a, np.arange(len(a)), out=a). */ - if (otype == NPY_OBJECT) { + if (descrs[2]->type_num == NPY_OBJECT) { /* * Incref before decref to avoid the possibility of * the reference count being zero temporarily. @@ -3671,33 +3664,24 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, dataptr_copy[1] += stride1; NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count); - innerloop(dataptr_copy, &count, - stride_copy, innerloopdata); + res = strided_loop(&context, + dataptr_copy, &count, stride_copy, auxdata); } } - } while (!(needs_api && PyErr_Occurred()) && iternext(iter)); + } while (res == 0 && iternext(iter)); NPY_END_THREADS; } else if (iter == NULL) { char *dataptr_copy[3]; - npy_intp stride_copy[3]; - int itemsize = op_dtypes[0]->elsize; + int itemsize = descrs[0]->elsize; npy_intp stride0_ind = PyArray_STRIDE(op[0], axis); - - /* Execute the loop with no iterators */ - npy_intp stride0 = 0, stride1 = PyArray_STRIDE(op[1], axis); - - int needs_api = PyDataType_REFCHK(op_dtypes[0]); + npy_intp stride1 = PyArray_STRIDE(op[1], axis); NPY_UF_DBG_PRINT("UFunc: Reduce loop with no iterators\n"); - stride_copy[0] = stride0; - stride_copy[1] = stride1; - stride_copy[2] = stride0; - if (!needs_api) { NPY_BEGIN_THREADS; } @@ -3719,7 +3703,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, * the same memory, e.g. * np.add.reduceat(a, np.arange(len(a)), out=a). */ - if (otype == NPY_OBJECT) { + if (descrs[2]->type_num == NPY_OBJECT) { /* * Incref before decref to avoid the possibility of the * reference count being zero temporarily. @@ -3739,8 +3723,11 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, dataptr_copy[1] += stride1; NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count); - innerloop(dataptr_copy, &count, - stride_copy, innerloopdata); + res = strided_loop(&context, + dataptr_copy, &count, fixed_strides, auxdata); + if (res != 0) { + break; + } } } @@ -3748,8 +3735,21 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, } finish: - Py_XDECREF(op_dtypes[0]); + NPY_AUXDATA_FREE(auxdata); + Py_DECREF(descrs[0]); + Py_DECREF(descrs[1]); + Py_DECREF(descrs[2]); + if (!NpyIter_Deallocate(iter)) { + res = -1; + } + + if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + /* NOTE: We could check float errors even when `res < 0` */ + res = _check_ufunc_fperr(errormask, NULL, "reduceat"); + } + + if (res < 0) { Py_DECREF(out); return NULL; } @@ -3758,9 +3758,14 @@ finish: fail: Py_XDECREF(out); - Py_XDECREF(op_dtypes[0]); + + NPY_AUXDATA_FREE(auxdata); + Py_XDECREF(descrs[0]); + Py_XDECREF(descrs[1]); + Py_XDECREF(descrs[2]); NpyIter_Deallocate(iter); + return NULL; } @@ -3858,7 +3863,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyArrayObject *mp = NULL, *wheremask = NULL, *ret = NULL; PyObject *op = NULL; PyArrayObject *indices = NULL; - PyArray_Descr *otype = NULL; + PyArray_DTypeMeta *signature[3] = {NULL, NULL, NULL}; PyArrayObject *out = NULL; int keepdims = 0; PyObject *initial = NULL; @@ -4002,13 +4007,10 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, } if (otype_obj && otype_obj != Py_None) { /* Use `_get_dtype` because `dtype` is a DType and not the instance */ - PyArray_DTypeMeta *dtype = _get_dtype(otype_obj); - if (dtype == NULL) { + signature[0] = _get_dtype(otype_obj); + if (signature[0] == NULL) { goto fail; } - otype = dtype->singleton; - Py_INCREF(otype); - Py_DECREF(dtype); } if (out_obj && !PyArray_OutputConverter(out_obj, &out)) { goto fail; @@ -4028,15 +4030,6 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, ndim = PyArray_NDIM(mp); - /* Check to see that type (and otype) is not FLEXIBLE */ - if (PyArray_ISFLEXIBLE(mp) || - (otype && PyTypeNum_ISFLEXIBLE(otype->type_num))) { - PyErr_Format(PyExc_TypeError, - "cannot perform %s with flexible type", - _reduce_type[operation]); - goto fail; - } - /* Convert the 'axis' parameter into a list of axes */ if (axes_obj == NULL) { /* apply defaults */ @@ -4099,14 +4092,12 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, } /* - * If out is specified it determines otype - * unless otype already specified. + * If no dtype is specified and out is not specified, we override the + * integer and bool dtype used for add and multiply. + * + * TODO: The following should be handled by a promoter! */ - if (otype == NULL && out != NULL) { - otype = PyArray_DESCR(out); - Py_INCREF(otype); - } - if (otype == NULL) { + if (signature[0] == NULL && out == NULL) { /* * For integer types --- make sure at least a long * is used for add and multiply reduction to avoid overflow @@ -4126,16 +4117,17 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, typenum = NPY_LONG; } } + signature[0] = PyArray_DTypeFromTypeNum(typenum); } - otype = PyArray_DescrFromType(typenum); } - + Py_XINCREF(signature[0]); + signature[2] = signature[0]; switch(operation) { case UFUNC_REDUCE: - ret = PyUFunc_Reduce(ufunc, mp, out, naxes, axes, - otype, keepdims, initial, wheremask); - Py_XDECREF(wheremask); + ret = PyUFunc_Reduce(ufunc, + mp, out, naxes, axes, signature, keepdims, initial, wheremask); + Py_XSETREF(wheremask, NULL); break; case UFUNC_ACCUMULATE: if (ndim == 0) { @@ -4147,8 +4139,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, "accumulate does not allow multiple axes"); goto fail; } - ret = (PyArrayObject *)PyUFunc_Accumulate(ufunc, mp, out, axes[0], - otype->type_num); + ret = (PyArrayObject *)PyUFunc_Accumulate(ufunc, + mp, out, axes[0], signature); break; case UFUNC_REDUCEAT: if (ndim == 0) { @@ -4161,19 +4153,22 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, goto fail; } ret = (PyArrayObject *)PyUFunc_Reduceat(ufunc, - mp, indices, out, axes[0], otype->type_num); + mp, indices, out, axes[0], signature); Py_SETREF(indices, NULL); break; } + if (ret == NULL) { + goto fail; + } + + Py_DECREF(signature[0]); + Py_DECREF(signature[1]); + Py_DECREF(signature[2]); + Py_DECREF(mp); - Py_DECREF(otype); Py_XDECREF(full_args.in); Py_XDECREF(full_args.out); - if (ret == NULL) { - return NULL; - } - /* Wrap and return the output */ { /* Find __array_wrap__ - note that these rules are different to the @@ -4201,7 +4196,10 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, } fail: - Py_XDECREF(otype); + Py_XDECREF(signature[0]); + Py_XDECREF(signature[1]); + Py_XDECREF(signature[2]); + Py_XDECREF(mp); Py_XDECREF(wheremask); Py_XDECREF(indices); @@ -5576,8 +5574,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, /* Get entry for this user-defined type*/ cobj = PyDict_GetItemWithError(ufunc->userloops, key); if (cobj == NULL && PyErr_Occurred()) { - Py_DECREF(key); - return 0; + goto fail; } /* If it's not there, then make one and return. */ else if (cobj == NULL) { @@ -5883,15 +5880,13 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) PyArrayObject *op2_array = NULL; PyArrayMapIterObject *iter = NULL; PyArrayIterObject *iter2 = NULL; - PyArray_Descr *dtypes[3] = {NULL, NULL, NULL}; PyArrayObject *operands[3] = {NULL, NULL, NULL}; PyArrayObject *array_operands[3] = {NULL, NULL, NULL}; - int needs_api = 0; + PyArray_DTypeMeta *signature[3] = {NULL, NULL, NULL}; + PyArray_DTypeMeta *operand_DTypes[3] = {NULL, NULL, NULL}; + PyArray_Descr *operation_descrs[3] = {NULL, NULL, NULL}; - PyUFuncGenericFunction innerloop; - void *innerloopdata; - npy_intp i; int nop; /* override vars */ @@ -5904,6 +5899,10 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) int buffersize; int errormask = 0; char * err_msg = NULL; + + PyArrayMethod_StridedLoop *strided_loop; + NpyAuxData *auxdata = NULL; + NPY_BEGIN_THREADS_DEF; if (ufunc->nin > 2) { @@ -5991,26 +5990,51 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) /* * Create dtypes array for either one or two input operands. - * The output operand is set to the first input operand + * Compare to the logic in `convert_ufunc_arguments`. + * TODO: It may be good to review some of this behaviour, since the + * operand array is special (it is written to) similar to reductions. + * Using unsafe-casting as done here, is likely not desirable. */ operands[0] = op1_array; + operand_DTypes[0] = NPY_DTYPE(PyArray_DESCR(op1_array)); + Py_INCREF(operand_DTypes[0]); + int force_legacy_promotion = 0; + int allow_legacy_promotion = NPY_DT_is_legacy(operand_DTypes[0]); + if (op2_array != NULL) { operands[1] = op2_array; - operands[2] = op1_array; + operand_DTypes[1] = NPY_DTYPE(PyArray_DESCR(op2_array)); + Py_INCREF(operand_DTypes[1]); + allow_legacy_promotion &= NPY_DT_is_legacy(operand_DTypes[1]); + operands[2] = operands[0]; + operand_DTypes[2] = operand_DTypes[0]; + Py_INCREF(operand_DTypes[2]); + nop = 3; + if (allow_legacy_promotion && ((PyArray_NDIM(op1_array) == 0) + != (PyArray_NDIM(op2_array) == 0))) { + /* both are legacy and only one is 0-D: force legacy */ + force_legacy_promotion = should_use_min_scalar(2, operands, 0, NULL); + } } else { - operands[1] = op1_array; + operands[1] = operands[0]; + operand_DTypes[1] = operand_DTypes[0]; + Py_INCREF(operand_DTypes[1]); operands[2] = NULL; nop = 2; } - if (ufunc->type_resolver(ufunc, NPY_UNSAFE_CASTING, - operands, NULL, dtypes) < 0) { + PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, + operands, signature, operand_DTypes, + force_legacy_promotion, allow_legacy_promotion); + if (ufuncimpl == NULL) { goto fail; } - if (ufunc->legacy_inner_loop_selector(ufunc, dtypes, - &innerloop, &innerloopdata, &needs_api) < 0) { + + /* Find the correct descriptors for the operation */ + if (resolve_descriptors(nop, ufunc, ufuncimpl, + operands, operation_descrs, signature, NPY_UNSAFE_CASTING) < 0) { goto fail; } @@ -6071,21 +6095,44 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) NPY_ITER_GROWINNER| NPY_ITER_DELAY_BUFALLOC, NPY_KEEPORDER, NPY_UNSAFE_CASTING, - op_flags, dtypes, + op_flags, operation_descrs, -1, NULL, NULL, buffersize); if (iter_buffer == NULL) { goto fail; } - needs_api = needs_api | NpyIter_IterationNeedsAPI(iter_buffer); - iternext = NpyIter_GetIterNext(iter_buffer, NULL); if (iternext == NULL) { NpyIter_Deallocate(iter_buffer); goto fail; } + PyArrayMethod_Context context = { + .caller = (PyObject *)ufunc, + .method = ufuncimpl, + .descriptors = operation_descrs, + }; + + NPY_ARRAYMETHOD_FLAGS flags; + /* Use contiguous strides; if there is such a loop it may be faster */ + npy_intp strides[3] = { + operation_descrs[0]->elsize, operation_descrs[1]->elsize, 0}; + if (nop == 3) { + strides[2] = operation_descrs[2]->elsize; + } + + if (ufuncimpl->get_strided_loop(&context, 1, 0, strides, + &strided_loop, &auxdata, &flags) < 0) { + goto fail; + } + int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; + needs_api |= NpyIter_IterationNeedsAPI(iter_buffer); + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + /* Start with the floating-point exception flags cleared */ + npy_clear_floatstatus_barrier((char*)&iter); + } + if (!needs_api) { NPY_BEGIN_THREADS; } @@ -6094,14 +6141,13 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) * Iterate over first and second operands and call ufunc * for each pair of inputs */ - i = iter->size; - while (i > 0) + int res = 0; + for (npy_intp i = iter->size; i > 0; i--) { char *dataptr[3]; char **buffer_dataptr; /* one element at a time, no stride required but read by innerloop */ - npy_intp count[3] = {1, 0xDEADBEEF, 0xDEADBEEF}; - npy_intp stride[3] = {0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF}; + npy_intp count = 1; /* * Set up data pointers for either one or two input operands. @@ -6120,14 +6166,14 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) /* Reset NpyIter data pointers which will trigger a buffer copy */ NpyIter_ResetBasePointers(iter_buffer, dataptr, &err_msg); if (err_msg) { + res = -1; break; } buffer_dataptr = NpyIter_GetDataPtrArray(iter_buffer); - innerloop(buffer_dataptr, count, stride, innerloopdata); - - if (needs_api && PyErr_Occurred()) { + res = strided_loop(&context, buffer_dataptr, &count, strides, auxdata); + if (res != 0) { break; } @@ -6141,27 +6187,35 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) if (iter2 != NULL) { PyArray_ITER_NEXT(iter2); } - - i--; } NPY_END_THREADS; - if (err_msg) { + if (res != 0 && err_msg) { PyErr_SetString(PyExc_ValueError, err_msg); } + if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + /* NOTE: We could check float errors even when `res < 0` */ + res = _check_ufunc_fperr(errormask, NULL, "at"); + } + NPY_AUXDATA_FREE(auxdata); NpyIter_Deallocate(iter_buffer); Py_XDECREF(op2_array); Py_XDECREF(iter); Py_XDECREF(iter2); - for (i = 0; i < 3; i++) { - Py_XDECREF(dtypes[i]); + for (int i = 0; i < 3; i++) { + Py_XDECREF(operation_descrs[i]); Py_XDECREF(array_operands[i]); } - if (needs_api && PyErr_Occurred()) { + /* + * An error should only be possible if needs_api is true or `res != 0`, + * but this is not strictly correct for old-style ufuncs + * (e.g. `power` released the GIL but manually set an Exception). + */ + if (res != 0 || PyErr_Occurred()) { return NULL; } else { @@ -6176,10 +6230,11 @@ fail: Py_XDECREF(op2_array); Py_XDECREF(iter); Py_XDECREF(iter2); - for (i = 0; i < 3; i++) { - Py_XDECREF(dtypes[i]); + for (int i = 0; i < 3; i++) { + Py_XDECREF(operation_descrs[i]); Py_XDECREF(array_operands[i]); } + NPY_AUXDATA_FREE(auxdata); return NULL; } diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index a7d536656..9ed923cf5 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -20,19 +20,18 @@ * * See LICENSE.txt for the license. */ -#define _UMATHMODULE -#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#define _UMATHMODULE + +#define PY_SSIZE_T_CLEAN +#include <Python.h> // printif debug tracing #ifndef NPY_UF_DBG_TRACING #define NPY_UF_DBG_TRACING 0 #endif -#include <stdbool.h> - -#include "Python.h" - #include "npy_config.h" #include "npy_pycompat.h" #include "npy_import.h" @@ -48,6 +47,8 @@ #include "cblasfuncs.h" #endif +#include <stdbool.h> + static PyObject * npy_casting_to_py_object(NPY_CASTING casting) { @@ -246,6 +247,28 @@ PyUFunc_ValidateCasting(PyUFuncObject *ufunc, } +/* + * Same as `PyUFunc_ValidateCasting` but only checks output casting. + */ +NPY_NO_EXPORT int +PyUFunc_ValidateOutCasting(PyUFuncObject *ufunc, + NPY_CASTING casting, PyArrayObject **operands, PyArray_Descr **dtypes) +{ + int i, nin = ufunc->nin, nop = nin + ufunc->nout; + + for (i = nin; i < nop; ++i) { + if (operands[i] == NULL) { + continue; + } + if (!PyArray_CanCastTypeTo(dtypes[i], + PyArray_DESCR(operands[i]), casting)) { + return raise_output_casting_error( + ufunc, casting, dtypes[i], PyArray_DESCR(operands[i]), i); + } + } + return 0; +} + /*UFUNC_API * * This function applies the default type resolution rules @@ -2141,6 +2164,10 @@ type_tuple_type_resolver(PyUFuncObject *self, * `signature=(None,)*nin + (dtype,)*nout`. If the signature matches that * exactly (could be relaxed but that is not necessary for backcompat), * we also try `signature=(dtype,)*(nin+nout)`. + * Since reduction pass in `(dtype, None, dtype)` we broaden this to + * replacing all unspecified dtypes with the homogeneous output one. + * Note that this can (and often will) lead to unsafe casting. This is + * normally rejected (but not currently for reductions!). * This used to be the main meaning for `dtype=dtype`, but some calls broke * the expectation, and changing it allows for `dtype=dtype` to be useful * for ufuncs like `np.ldexp` in the future while also normalizing it to @@ -2159,13 +2186,12 @@ type_tuple_type_resolver(PyUFuncObject *self, if (homogeneous_type != NPY_NOTYPE) { for (int i = 0; i < nin; i++) { if (specified_types[i] != NPY_NOTYPE) { - homogeneous_type = NPY_NOTYPE; - break; + /* Never replace a specified type! */ + continue; } specified_types[i] = homogeneous_type; } - } - if (homogeneous_type != NPY_NOTYPE) { + /* Try again with the homogeneous specified types. */ res = type_tuple_type_resolver_core(self, op, input_casting, casting, specified_types, any_object, diff --git a/numpy/core/src/umath/ufunc_type_resolution.h b/numpy/core/src/umath/ufunc_type_resolution.h index dd88a081a..84a2593f4 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.h +++ b/numpy/core/src/umath/ufunc_type_resolution.h @@ -99,6 +99,10 @@ PyUFunc_DivmodTypeResolver(PyUFuncObject *ufunc, PyObject *type_tup, PyArray_Descr **out_dtypes); +NPY_NO_EXPORT int +PyUFunc_ValidateOutCasting(PyUFuncObject *ufunc, + NPY_CASTING casting, PyArrayObject **operands, PyArray_Descr **dtypes); + /* * Does a linear search for the best inner loop of the ufunc. * diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index 6a718889b..272555704 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -1,25 +1,17 @@ /* -*- c -*- */ - -/* - * vim:syntax=c - */ - -/* - ***************************************************************************** - ** INCLUDES ** - ***************************************************************************** - */ +/* vim:syntax=c */ /* * _UMATHMODULE IS needed in __ufunc_api.h, included from numpy/ufuncobject.h. * This is a mess and it would be nice to fix it. It has nothing to do with * __ufunc_api.c */ -#define _UMATHMODULE -#define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#define _UMATHMODULE -#include "Python.h" +#define PY_SSIZE_T_CLEAN +#include <Python.h> #include "npy_config.h" @@ -30,6 +22,7 @@ #include "numpy/npy_math.h" #include "number.h" +#include "dispatching.h" static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om}; @@ -313,5 +306,33 @@ int initumath(PyObject *m) return -1; } + /* + * Set up promoters for logical functions + * TODO: This should probably be done at a better place, or even in the + * code generator directly. + */ + s = _PyDict_GetItemStringWithError(d, "logical_and"); + if (s == NULL) { + return -1; + } + if (install_logical_ufunc_promoter(s) < 0) { + return -1; + } + + s = _PyDict_GetItemStringWithError(d, "logical_or"); + if (s == NULL) { + return -1; + } + if (install_logical_ufunc_promoter(s) < 0) { + return -1; + } + + s = _PyDict_GetItemStringWithError(d, "logical_xor"); + if (s == NULL) { + return -1; + } + if (install_logical_ufunc_promoter(s) < 0) { + return -1; + } return 0; } diff --git a/numpy/core/tests/data/generate_umath_validation_data.cpp b/numpy/core/tests/data/generate_umath_validation_data.cpp index 9d97ff4ab..418eae670 100644 --- a/numpy/core/tests/data/generate_umath_validation_data.cpp +++ b/numpy/core/tests/data/generate_umath_validation_data.cpp @@ -1,41 +1,46 @@ -#include<math.h> -#include<stdio.h> -#include<iostream> -#include<algorithm> -#include<vector> -#include<random> -#include<fstream> -#include<time.h> +#include <algorithm> +#include <fstream> +#include <iostream> +#include <math.h> +#include <random> +#include <stdio.h> +#include <time.h> +#include <vector> struct ufunc { std::string name; - double (*f32func) (double); - long double (*f64func) (long double); + double (*f32func)(double); + long double (*f64func)(long double); float f32ulp; float f64ulp; }; -template<typename T> -T RandomFloat(T a, T b) { - T random = ((T) rand()) / (T) RAND_MAX; +template <typename T> +T +RandomFloat(T a, T b) +{ + T random = ((T)rand()) / (T)RAND_MAX; T diff = b - a; T r = random * diff; return a + r; } -template<typename T> -void append_random_array(std::vector<T>& arr, T min, T max, size_t N) +template <typename T> +void +append_random_array(std::vector<T> &arr, T min, T max, size_t N) { for (size_t ii = 0; ii < N; ++ii) arr.emplace_back(RandomFloat<T>(min, max)); } -template<typename T1, typename T2> -std::vector<T1> computeTrueVal(const std::vector<T1>& in, T2(*mathfunc)(T2)) { +template <typename T1, typename T2> +std::vector<T1> +computeTrueVal(const std::vector<T1> &in, T2 (*mathfunc)(T2)) +{ std::vector<T1> out; for (T1 elem : in) { - T2 elem_d = (T2) elem; - T1 out_elem = (T1) mathfunc(elem_d); + T2 elem_d = (T2)elem; + T1 out_elem = (T1)mathfunc(elem_d); out.emplace_back(out_elem); } return out; @@ -49,17 +54,20 @@ std::vector<T1> computeTrueVal(const std::vector<T1>& in, T2(*mathfunc)(T2)) { #define MINDEN std::numeric_limits<T>::denorm_min() #define MINFLT std::numeric_limits<T>::min() #define MAXFLT std::numeric_limits<T>::max() -#define INF std::numeric_limits<T>::infinity() -#define qNAN std::numeric_limits<T>::quiet_NaN() -#define sNAN std::numeric_limits<T>::signaling_NaN() +#define INF std::numeric_limits<T>::infinity() +#define qNAN std::numeric_limits<T>::quiet_NaN() +#define sNAN std::numeric_limits<T>::signaling_NaN() -template<typename T> -std::vector<T> generate_input_vector(std::string func) { - std::vector<T> input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT, -MAXFLT, - INF, -INF, qNAN, sNAN, -1.0, 1.0, 0.0, -0.0}; +template <typename T> +std::vector<T> +generate_input_vector(std::string func) +{ + std::vector<T> input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT, + -MAXFLT, INF, -INF, qNAN, sNAN, + -1.0, 1.0, 0.0, -0.0}; // [-1.0, 1.0] - if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")){ + if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")) { append_random_array<T>(input, -1.0, 1.0, 700); } // (0.0, INF] @@ -98,57 +106,62 @@ std::vector<T> generate_input_vector(std::string func) { return input; } -int main() { - srand (42); +int +main() +{ + srand(42); std::vector<struct ufunc> umathfunc = { - {"sin",sin,sin,2.37,3.3}, - {"cos",cos,cos,2.36,3.38}, - {"tan",tan,tan,3.91,3.93}, - {"arcsin",asin,asin,3.12,2.55}, - {"arccos",acos,acos,2.1,1.67}, - {"arctan",atan,atan,2.3,2.52}, - {"sinh",sinh,sinh,1.55,1.89}, - {"cosh",cosh,cosh,2.48,1.97}, - {"tanh",tanh,tanh,1.38,1.19}, - {"arcsinh",asinh,asinh,1.01,1.48}, - {"arccosh",acosh,acosh,1.16,1.05}, - {"arctanh",atanh,atanh,1.45,1.46}, - {"cbrt",cbrt,cbrt,1.94,1.82}, - //{"exp",exp,exp,3.76,1.53}, - {"exp2",exp2,exp2,1.01,1.04}, - {"expm1",expm1,expm1,2.62,2.1}, - //{"log",log,log,1.84,1.67}, - {"log10",log10,log10,3.5,1.92}, - {"log1p",log1p,log1p,1.96,1.93}, - {"log2",log2,log2,2.12,1.84}, + {"sin", sin, sin, 2.37, 3.3}, + {"cos", cos, cos, 2.36, 3.38}, + {"tan", tan, tan, 3.91, 3.93}, + {"arcsin", asin, asin, 3.12, 2.55}, + {"arccos", acos, acos, 2.1, 1.67}, + {"arctan", atan, atan, 2.3, 2.52}, + {"sinh", sinh, sinh, 1.55, 1.89}, + {"cosh", cosh, cosh, 2.48, 1.97}, + {"tanh", tanh, tanh, 1.38, 1.19}, + {"arcsinh", asinh, asinh, 1.01, 1.48}, + {"arccosh", acosh, acosh, 1.16, 1.05}, + {"arctanh", atanh, atanh, 1.45, 1.46}, + {"cbrt", cbrt, cbrt, 1.94, 1.82}, + //{"exp",exp,exp,3.76,1.53}, + {"exp2", exp2, exp2, 1.01, 1.04}, + {"expm1", expm1, expm1, 2.62, 2.1}, + //{"log",log,log,1.84,1.67}, + {"log10", log10, log10, 3.5, 1.92}, + {"log1p", log1p, log1p, 1.96, 1.93}, + {"log2", log2, log2, 2.12, 1.84}, }; for (int ii = 0; ii < umathfunc.size(); ++ii) { - // ignore sin/cos + // ignore sin/cos if ((umathfunc[ii].name != "sin") && (umathfunc[ii].name != "cos")) { - std::string fileName = "umath-validation-set-" + umathfunc[ii].name + ".csv"; + std::string fileName = + "umath-validation-set-" + umathfunc[ii].name + ".csv"; std::ofstream txtOut; - txtOut.open (fileName, std::ofstream::trunc); + txtOut.open(fileName, std::ofstream::trunc); txtOut << "dtype,input,output,ulperrortol" << std::endl; // Single Precision auto f32in = generate_input_vector<float>(umathfunc[ii].name); - auto f32out = computeTrueVal<float, double>(f32in, umathfunc[ii].f32func); + auto f32out = computeTrueVal<float, double>(f32in, + umathfunc[ii].f32func); for (int jj = 0; jj < f32in.size(); ++jj) { - txtOut << "np.float32" << std::hex << - ",0x" << *reinterpret_cast<uint32_t*>(&f32in[jj]) << - ",0x" << *reinterpret_cast<uint32_t*>(&f32out[jj]) << - "," << ceil(umathfunc[ii].f32ulp) << std::endl; + txtOut << "np.float32" << std::hex << ",0x" + << *reinterpret_cast<uint32_t *>(&f32in[jj]) << ",0x" + << *reinterpret_cast<uint32_t *>(&f32out[jj]) << "," + << ceil(umathfunc[ii].f32ulp) << std::endl; } // Double Precision auto f64in = generate_input_vector<double>(umathfunc[ii].name); - auto f64out = computeTrueVal<double, long double>(f64in, umathfunc[ii].f64func); + auto f64out = computeTrueVal<double, long double>( + f64in, umathfunc[ii].f64func); for (int jj = 0; jj < f64in.size(); ++jj) { - txtOut << "np.float64" << std::hex << - ",0x" << *reinterpret_cast<uint64_t*>(&f64in[jj]) << - ",0x" << *reinterpret_cast<uint64_t*>(&f64out[jj]) << - "," << ceil(umathfunc[ii].f64ulp) << std::endl; + txtOut << "np.float64" << std::hex << ",0x" + << *reinterpret_cast<uint64_t *>(&f64in[jj]) << ",0x" + << *reinterpret_cast<uint64_t *>(&f64out[jj]) << "," + << ceil(umathfunc[ii].f64ulp) << std::endl; } txtOut.close(); } diff --git a/numpy/core/tests/test__exceptions.py b/numpy/core/tests/test__exceptions.py index c87412aa4..10b87e052 100644 --- a/numpy/core/tests/test__exceptions.py +++ b/numpy/core/tests/test__exceptions.py @@ -40,7 +40,7 @@ class TestArrayMemoryError: # 1023.9999 Mib should round to 1 GiB assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB' assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB' - # larger than sys.maxsize, adding larger prefices isn't going to help + # larger than sys.maxsize, adding larger prefixes isn't going to help # anyway. assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB' diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py index 076d8e43f..293f5a68f 100644 --- a/numpy/core/tests/test_array_coercion.py +++ b/numpy/core/tests/test_array_coercion.py @@ -376,7 +376,7 @@ class TestScalarDiscovery: def test_scalar_to_int_coerce_does_not_cast(self, dtype): """ Signed integers are currently different in that they do not cast other - NumPy scalar, but instead use scalar.__int__(). The harcoded + NumPy scalar, but instead use scalar.__int__(). The hardcoded exception to this rule is `np.array(scalar, dtype=integer)`. """ dtype = np.dtype(dtype) @@ -444,7 +444,7 @@ class TestTimeScalars: # never use casting. This is because casting will error in this # case, and traditionally in most cases the behaviour is maintained # like this. (`np.array(scalar, dtype="U6")` would have failed before) - # TODO: This discrepency _should_ be resolved, either by relaxing the + # TODO: This discrepancy _should_ be resolved, either by relaxing the # cast, or by deprecating the first part. scalar = np.datetime64(val, unit) dtype = np.dtype(dtype) diff --git a/numpy/core/tests/test_arraymethod.py b/numpy/core/tests/test_arraymethod.py index b1bc79b80..49aa9f6df 100644 --- a/numpy/core/tests/test_arraymethod.py +++ b/numpy/core/tests/test_arraymethod.py @@ -3,6 +3,10 @@ This file tests the generic aspects of ArrayMethod. At the time of writing this is private API, but when added, public API may be added here. """ +import sys +import types +from typing import Any, Type + import pytest import numpy as np @@ -56,3 +60,35 @@ class TestSimpleStridedCall: # This is private API, which may be modified freely with pytest.raises(error): self.method._simple_strided_call(*args) + + +@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9") +class TestClassGetItem: + @pytest.mark.parametrize( + "cls", [np.ndarray, np.recarray, np.chararray, np.matrix, np.memmap] + ) + def test_class_getitem(self, cls: Type[np.ndarray]) -> None: + """Test `ndarray.__class_getitem__`.""" + alias = cls[Any, Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len == 2: + assert np.ndarray[arg_tup] + else: + with pytest.raises(TypeError): + np.ndarray[arg_tup] + + def test_subscript_scalar(self) -> None: + with pytest.raises(TypeError): + np.ndarray[Any] + + +@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8") +def test_class_getitem_38() -> None: + match = "Type subscription requires python >= 3.9" + with pytest.raises(TypeError, match=match): + np.ndarray[Any, Any] diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 09cc79f72..25826d8ed 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- import sys import gc from hypothesis import given diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py index a13e807e2..cb4792090 100644 --- a/numpy/core/tests/test_casting_unittests.py +++ b/numpy/core/tests/test_casting_unittests.py @@ -9,7 +9,6 @@ than integration tests. import pytest import textwrap import enum -import itertools import random import numpy as np @@ -127,7 +126,7 @@ CAST_TABLE = _get_cancast_table() class TestChanges: """ - These test cases excercise some behaviour changes + These test cases exercise some behaviour changes """ @pytest.mark.parametrize("string", ["S", "U"]) @pytest.mark.parametrize("floating", ["e", "f", "d", "g"]) @@ -699,9 +698,14 @@ class TestCasting: else: assert_array_equal(expected, arr_NULLs.astype(dtype)) - def test_float_to_bool(self): - # test case corresponding to gh-19514 - # simple test for casting bool_ to float16 - res = np.array([0, 3, -7], dtype=np.int8).view(bool) + @pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + def test_nonstandard_bool_to_other(self, dtype): + # simple test for casting bool_ to numeric types, which should not + # expose the detail that NumPy bools can sometimes take values other + # than 0 and 1. See also gh-19514. + nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool) + res = nonstandard_bools.astype(dtype) expected = [0, 1, 1] assert_array_equal(res, expected) + diff --git a/numpy/core/tests/test_cpu_dispatcher.py b/numpy/core/tests/test_cpu_dispatcher.py index 8712dee1a..2f7eac7e8 100644 --- a/numpy/core/tests/test_cpu_dispatcher.py +++ b/numpy/core/tests/test_cpu_dispatcher.py @@ -4,7 +4,7 @@ from numpy.testing import assert_equal def test_dispatcher(): """ - Testing the utilites of the CPU dispatcher + Testing the utilities of the CPU dispatcher """ targets = ( "SSE2", "SSE41", "AVX2", @@ -16,7 +16,7 @@ def test_dispatcher(): for feature in reversed(targets): # skip baseline features, by the default `CCompilerOpt` do not generate separated objects # for the baseline, just one object combined all of them via 'baseline' option - # within the configuration statments. + # within the configuration statements. if feature in __cpu_baseline__: continue # check compiler and running machine support diff --git a/numpy/core/tests/test_custom_dtypes.py b/numpy/core/tests/test_custom_dtypes.py index 5eb82bc93..6bcc45d6b 100644 --- a/numpy/core/tests/test_custom_dtypes.py +++ b/numpy/core/tests/test_custom_dtypes.py @@ -101,18 +101,52 @@ class TestSFloat: expected_view = a.view(np.float64) * b.view(np.float64) assert_array_equal(res.view(np.float64), expected_view) + def test_possible_and_impossible_reduce(self): + # For reductions to work, the first and last operand must have the + # same dtype. For this parametric DType that is not necessarily true. + a = self._get_array(2.) + # Addition reductin works (as of writing requires to pass initial + # because setting a scaled-float from the default `0` fails). + res = np.add.reduce(a, initial=0.) + assert res == a.astype(np.float64).sum() + + # But each multiplication changes the factor, so a reduction is not + # possible (the relaxed version of the old refusal to handle any + # flexible dtype). + with pytest.raises(TypeError, + match="the resolved dtypes are not compatible"): + np.multiply.reduce(a) + + def test_basic_ufunc_at(self): + float_a = np.array([1., 2., 3.]) + b = self._get_array(2.) + + float_b = b.view(np.float64).copy() + np.multiply.at(float_b, [1, 1, 1], float_a) + np.multiply.at(b, [1, 1, 1], float_a) + + assert_array_equal(b.view(np.float64), float_b) + def test_basic_multiply_promotion(self): float_a = np.array([1., 2., 3.]) b = self._get_array(2.) res1 = float_a * b res2 = b * float_a + # one factor is one, so we get the factor of b: assert res1.dtype == res2.dtype == b.dtype expected_view = float_a * b.view(np.float64) assert_array_equal(res1.view(np.float64), expected_view) assert_array_equal(res2.view(np.float64), expected_view) + # Check that promotion works when `out` is used: + np.multiply(b, float_a, out=res2) + with pytest.raises(TypeError): + # The promoter accepts this (maybe it should not), but the SFloat + # result cannot be cast to integer: + np.multiply(b, float_a, out=np.arange(3)) + def test_basic_addition(self): a = self._get_array(2.) b = self._get_array(4.) @@ -145,3 +179,23 @@ class TestSFloat: # Check that casting the output fails also (done by the ufunc here) with pytest.raises(TypeError): np.add(a, a, out=c, casting="safe") + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_casts_to_bool(self, ufunc): + a = self._get_array(2.) + a[0] = 0. # make sure first element is considered False. + + float_equiv = a.astype(float) + expected = ufunc(float_equiv, float_equiv) + res = ufunc(a, a) + assert_array_equal(res, expected) + + # also check that the same works for reductions: + expected = ufunc.reduce(float_equiv) + res = ufunc.reduce(a) + assert_array_equal(res, expected) + + # The output casting does not match the bool, bool -> bool loop: + with pytest.raises(TypeError): + ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv") diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 5a490646e..b95d669a8 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -63,6 +63,7 @@ class TestDateTime: assert_raises(TypeError, np.dtype, 'm7') assert_raises(TypeError, np.dtype, 'M16') assert_raises(TypeError, np.dtype, 'm16') + assert_raises(TypeError, np.dtype, 'M8[3000000000ps]') def test_datetime_casting_rules(self): # Cannot cast safely/same_kind between timedelta and datetime @@ -137,6 +138,42 @@ class TestDateTime: assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind')) assert_(not np.can_cast('M8[h]', 'M8', casting='safe')) + def test_datetime_prefix_conversions(self): + # regression tests related to gh-19631; + # test metric prefixes from seconds down to + # attoseconds for bidirectional conversions + smaller_units = ['M8[7000ms]', + 'M8[2000us]', + 'M8[1000ns]', + 'M8[5000ns]', + 'M8[2000ps]', + 'M8[9000fs]', + 'M8[1000as]', + 'M8[2000000ps]', + 'M8[1000000as]', + 'M8[2000000000ps]', + 'M8[1000000000as]'] + larger_units = ['M8[7s]', + 'M8[2ms]', + 'M8[us]', + 'M8[5us]', + 'M8[2ns]', + 'M8[9ps]', + 'M8[1fs]', + 'M8[2us]', + 'M8[1ps]', + 'M8[2ms]', + 'M8[1ns]'] + for larger_unit, smaller_unit in zip(larger_units, smaller_units): + assert np.can_cast(larger_unit, smaller_unit, casting='safe') + assert np.can_cast(smaller_unit, larger_unit, casting='safe') + + @pytest.mark.parametrize("unit", [ + "s", "ms", "us", "ns", "ps", "fs", "as"]) + def test_prohibit_negative_datetime(self, unit): + with assert_raises(TypeError): + np.array([1], dtype=f"M8[-1{unit}]") + def test_compare_generic_nat(self): # regression tests for gh-6452 assert_(np.datetime64('NaT') != @@ -1992,6 +2029,21 @@ class TestDateTime: assert_equal(np.maximum.reduce(a), np.timedelta64(7, 's')) + def test_datetime_no_subtract_reducelike(self): + # subtracting two datetime64 works, but we cannot reduce it, since + # the result of that subtraction will have a different dtype. + arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]") + msg = r"the resolved dtypes are not compatible with subtract\." + + with pytest.raises(TypeError, match=msg + "reduce"): + np.subtract.reduce(arr) + + with pytest.raises(TypeError, match=msg + "accumulate"): + np.subtract.accumulate(arr) + + with pytest.raises(TypeError, match=msg + "reduceat"): + np.subtract.reduceat(arr, [0]) + def test_datetime_busday_offset(self): # First Monday in June assert_equal( diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 44c76e0b8..a1b379d92 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -791,7 +791,7 @@ class TestFutureWarningArrayLikeNotIterable(_DeprecationTestCase): *not* define the sequence protocol. NOTE: Tests for the versions including __len__ and __getitem__ exist - in `test_array_coercion.py` and they can be modified or ammended + in `test_array_coercion.py` and they can be modified or amended when this deprecation expired. """ blueprint = np.arange(10) @@ -1192,3 +1192,41 @@ class TestUFuncForcedDTypeWarning(_DeprecationTestCase): np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns" with pytest.warns(DeprecationWarning, match=self.message): np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns" + + +PARTITION_DICT = { + "partition method": np.arange(10).partition, + "argpartition method": np.arange(10).argpartition, + "partition function": lambda kth: np.partition(np.arange(10), kth), + "argpartition function": lambda kth: np.argpartition(np.arange(10), kth), +} + + +@pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT) +class TestPartitionBoolIndex(_DeprecationTestCase): + # Deprecated 2021-09-29, NumPy 1.22 + warning_cls = DeprecationWarning + message = "Passing booleans as partition index is deprecated" + + def test_deprecated(self, func): + self.assert_deprecated(lambda: func(True)) + self.assert_deprecated(lambda: func([False, True])) + + def test_not_deprecated(self, func): + self.assert_not_deprecated(lambda: func(1)) + self.assert_not_deprecated(lambda: func([0, 1])) + + +class TestMachAr(_DeprecationTestCase): + # Deprecated 2021-10-19, NumPy 1.22 + warning_cls = DeprecationWarning + + def test_deprecated(self): + self.assert_deprecated(lambda: np.MachAr) + + def test_deprecated_module(self): + self.assert_deprecated(lambda: getattr(np.core, "machar")) + + def test_deprecated_attr(self): + finfo = np.finfo(float) + self.assert_deprecated(lambda: getattr(finfo, "machar")) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 23269f01b..8fe859919 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -3,7 +3,8 @@ import operator import pytest import ctypes import gc -import warnings +import types +from typing import Any import numpy as np from numpy.core._rational_tests import rational @@ -111,9 +112,9 @@ class TestBuiltin: @pytest.mark.parametrize("dtype", ['Bool', 'Bytes0', 'Complex32', 'Complex64', 'Datetime64', 'Float16', 'Float32', 'Float64', - 'Int8', 'Int16', 'Int32', 'Int64', + 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Str0', 'Timedelta64', - 'UInt8', 'UInt16', 'Uint32', 'UInt32', + 'UInt8', 'UInt16', 'Uint32', 'UInt32', 'Uint64', 'UInt64', 'Void0', "Float128", "Complex128"]) def test_numeric_style_types_are_invalid(self, dtype): @@ -876,14 +877,24 @@ class TestString: ('bright', '>f4', (8, 36))])], align=True) assert_equal(str(dt), - "{'names':['top','bottom'], " - "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " - "('rtile', '>f4', (64, 36))], (3,))," - "[('bleft', ('>f4', (8, 64)), (1,)), " - "('bright', '>f4', (8, 36))]], " - "'offsets':[0,76800], " - "'itemsize':80000, " - "'aligned':True}") + "{'names': ['top', 'bottom']," + " 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]]," + " 'offsets': [0, 76800]," + " 'itemsize': 80000," + " 'aligned': True}") + with np.printoptions(legacy='1.21'): + assert_equal(str(dt), + "{'names':['top','bottom'], " + "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,))," + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]], " + "'offsets':[0,76800], " + "'itemsize':80000, " + "'aligned':True}") assert_equal(np.dtype(eval(str(dt))), dt) dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], @@ -900,22 +911,22 @@ class TestString: 'titles': ['Color', 'Red pixel', 'Green pixel', 'Blue pixel']}) assert_equal(str(dt), - "{'names':['rgba','r','g','b']," - " 'formats':['<u4','u1','u1','u1']," - " 'offsets':[0,0,1,2]," - " 'titles':['Color','Red pixel'," - "'Green pixel','Blue pixel']," - " 'itemsize':4}") + "{'names': ['rgba', 'r', 'g', 'b']," + " 'formats': ['<u4', 'u1', 'u1', 'u1']," + " 'offsets': [0, 0, 1, 2]," + " 'titles': ['Color', 'Red pixel', " + "'Green pixel', 'Blue pixel']," + " 'itemsize': 4}") dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'offsets': [0, 2], 'titles': ['Red pixel', 'Blue pixel']}) assert_equal(str(dt), - "{'names':['r','b']," - " 'formats':['u1','u1']," - " 'offsets':[0,2]," - " 'titles':['Red pixel','Blue pixel']," - " 'itemsize':3}") + "{'names': ['r', 'b']," + " 'formats': ['u1', 'u1']," + " 'offsets': [0, 2]," + " 'titles': ['Red pixel', 'Blue pixel']," + " 'itemsize': 3}") dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')]) assert_equal(str(dt), @@ -948,23 +959,23 @@ class TestString: 'titles': ['Color', 'Red pixel', 'Green pixel', 'Blue pixel']}, align=True) assert_equal(repr(dt), - "dtype({'names':['rgba','r','g','b']," - " 'formats':['<u4','u1','u1','u1']," - " 'offsets':[0,0,1,2]," - " 'titles':['Color','Red pixel'," - "'Green pixel','Blue pixel']," - " 'itemsize':4}, align=True)") + "dtype({'names': ['rgba', 'r', 'g', 'b']," + " 'formats': ['<u4', 'u1', 'u1', 'u1']," + " 'offsets': [0, 0, 1, 2]," + " 'titles': ['Color', 'Red pixel', " + "'Green pixel', 'Blue pixel']," + " 'itemsize': 4}, align=True)") dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'offsets': [0, 2], 'titles': ['Red pixel', 'Blue pixel'], 'itemsize': 4}) assert_equal(repr(dt), - "dtype({'names':['r','b'], " - "'formats':['u1','u1'], " - "'offsets':[0,2], " - "'titles':['Red pixel','Blue pixel'], " - "'itemsize':4})") + "dtype({'names': ['r', 'b'], " + "'formats': ['u1', 'u1'], " + "'offsets': [0, 2], " + "'titles': ['Red pixel', 'Blue pixel'], " + "'itemsize': 4})") def test_repr_structured_datetime(self): dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')]) @@ -1549,3 +1560,45 @@ class TestUserDType: # Tests that a dtype must have its type field set up to np.dtype # or in this case a builtin instance. create_custom_field_dtype(blueprint, mytype, 2) + + +@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9") +class TestClassGetItem: + def test_dtype(self) -> None: + alias = np.dtype[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is np.dtype + + @pytest.mark.parametrize("code", np.typecodes["All"]) + def test_dtype_subclass(self, code: str) -> None: + cls = type(np.dtype(code)) + alias = cls[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len == 1: + assert np.dtype[arg_tup] + else: + with pytest.raises(TypeError): + np.dtype[arg_tup] + + def test_subscript_scalar(self) -> None: + assert np.dtype[Any] + + +def test_result_type_integers_and_unitless_timedelta64(): + # Regression test for gh-20077. The following call of `result_type` + # would cause a seg. fault. + td = np.timedelta64(4) + result = np.result_type(0, td) + assert_dtype_equal(result, td.dtype) + + +@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8") +def test_class_getitem_38() -> None: + match = "Type subscription requires python >= 3.9" + with pytest.raises(TypeError, match=match): + np.dtype[Any] diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py index c697d0c2d..78c5e527b 100644 --- a/numpy/core/tests/test_einsum.py +++ b/numpy/core/tests/test_einsum.py @@ -1025,7 +1025,7 @@ class TestEinsumPath: self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) def test_path_type_input(self): - # Test explicit path handeling + # Test explicit path handling path_test = self.build_operands('dcc,fce,ea,dbf->ab') path, path_str = np.einsum_path(*path_test, optimize=False) diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py index de7b3e769..c5148db2c 100644 --- a/numpy/core/tests/test_getlimits.py +++ b/numpy/core/tests/test_getlimits.py @@ -46,7 +46,7 @@ class TestFinfo: [np.float16, np.float32, np.float64, np.complex64, np.complex128])) for dt1, dt2 in dts: - for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machar', 'machep', + for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep', 'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp', 'nmant', 'precision', 'resolution', 'tiny', 'smallest_normal', 'smallest_subnormal'): diff --git a/numpy/core/tests/test_machar.py b/numpy/core/tests/test_machar.py index 673f309f1..3a66ec51f 100644 --- a/numpy/core/tests/test_machar.py +++ b/numpy/core/tests/test_machar.py @@ -3,7 +3,7 @@ Test machar. Given recent changes to hardcode type data, we might want to get rid of both MachAr and this test at some point. """ -from numpy.core.machar import MachAr +from numpy.core._machar import MachAr import numpy.core.numerictypes as ntypes from numpy import errstate, array diff --git a/numpy/core/tests/test_mem_policy.py b/numpy/core/tests/test_mem_policy.py new file mode 100644 index 000000000..7fec8897f --- /dev/null +++ b/numpy/core/tests/test_mem_policy.py @@ -0,0 +1,396 @@ +import asyncio +import gc +import os +import pytest +import numpy as np +import threading +import warnings +from numpy.testing import extbuild, assert_warns +import sys + + +@pytest.fixture +def get_module(tmp_path): + """ Add a memory policy that returns a false pointer 64 bytes into the + actual allocation, and fill the prefix with some text. Then check at each + memory manipulation that the prefix exists, to make sure all alloc/realloc/ + free/calloc go via the functions here. + """ + if sys.platform.startswith('cygwin'): + pytest.skip('link fails on cygwin') + functions = [ + ("set_secret_data_policy", "METH_NOARGS", """ + PyObject *secret_data = + PyCapsule_New(&secret_data_handler, "mem_handler", NULL); + if (secret_data == NULL) { + return NULL; + } + PyObject *old = PyDataMem_SetHandler(secret_data); + Py_DECREF(secret_data); + return old; + """), + ("set_old_policy", "METH_O", """ + PyObject *old; + if (args != NULL && PyCapsule_CheckExact(args)) { + old = PyDataMem_SetHandler(args); + } + else { + old = PyDataMem_SetHandler(NULL); + } + if (old == NULL) { + return NULL; + } + Py_DECREF(old); + Py_RETURN_NONE; + """), + ("get_array", "METH_NOARGS", """ + char *buf = (char *)malloc(20); + npy_intp dims[1]; + dims[0] = 20; + PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8); + return PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, NULL, + buf, NPY_ARRAY_WRITEABLE, NULL); + """), + ("set_own", "METH_O", """ + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_ValueError, + "need an ndarray"); + return NULL; + } + PyArray_ENABLEFLAGS((PyArrayObject*)args, NPY_ARRAY_OWNDATA); + // Maybe try this too? + // PyArray_BASE(PyArrayObject *)args) = NULL; + Py_RETURN_NONE; + """), + ("get_array_with_base", "METH_NOARGS", """ + char *buf = (char *)malloc(20); + npy_intp dims[1]; + dims[0] = 20; + PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8); + PyObject *arr = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, + NULL, buf, + NPY_ARRAY_WRITEABLE, NULL); + if (arr == NULL) return NULL; + PyObject *obj = PyCapsule_New(buf, "buf capsule", + (PyCapsule_Destructor)&warn_on_free); + if (obj == NULL) { + Py_DECREF(arr); + return NULL; + } + if (PyArray_SetBaseObject((PyArrayObject *)arr, obj) < 0) { + Py_DECREF(arr); + Py_DECREF(obj); + return NULL; + } + return arr; + + """), + ] + prologue = ''' + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include <numpy/arrayobject.h> + /* + * This struct allows the dynamic configuration of the allocator funcs + * of the `secret_data_allocator`. It is provided here for + * demonstration purposes, as a valid `ctx` use-case scenario. + */ + typedef struct { + void *(*malloc)(size_t); + void *(*calloc)(size_t, size_t); + void *(*realloc)(void *, size_t); + void (*free)(void *); + } SecretDataAllocatorFuncs; + + NPY_NO_EXPORT void * + shift_alloc(void *ctx, size_t sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + char *real = (char *)funcs->malloc(sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld", (unsigned long)sz); + return (void *)(real + 64); + } + NPY_NO_EXPORT void * + shift_zero(void *ctx, size_t sz, size_t cnt) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + char *real = (char *)funcs->calloc(sz + 64, cnt); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld via zero", + (unsigned long)sz); + return (void *)(real + 64); + } + NPY_NO_EXPORT void + shift_free(void *ctx, void * p, npy_uintp sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + if (p == NULL) { + return ; + } + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_free, " + "no appropriate prefix\\n"); + /* Make C runtime crash by calling free on the wrong address */ + funcs->free((char *)p + 10); + /* funcs->free(real); */ + } + else { + npy_uintp i = (npy_uintp)atoi(real +20); + if (i != sz) { + fprintf(stderr, "uh-oh, unmatched shift_free" + "(ptr, %ld) but allocated %ld\\n", sz, i); + /* This happens in some places, only print */ + funcs->free(real); + } + else { + funcs->free(real); + } + } + } + NPY_NO_EXPORT void * + shift_realloc(void *ctx, void * p, npy_uintp sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + if (p != NULL) { + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_realloc\\n"); + return realloc(p, sz); + } + return (void *)((char *)funcs->realloc(real, sz + 64) + 64); + } + else { + char *real = (char *)funcs->realloc(p, sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated " + "%ld via realloc", (unsigned long)sz); + return (void *)(real + 64); + } + } + /* As an example, we use the standard {m|c|re}alloc/free funcs. */ + static SecretDataAllocatorFuncs secret_data_handler_ctx = { + malloc, + calloc, + realloc, + free + }; + static PyDataMem_Handler secret_data_handler = { + "secret_data_allocator", + { + &secret_data_handler_ctx, /* ctx */ + shift_alloc, /* malloc */ + shift_zero, /* calloc */ + shift_realloc, /* realloc */ + shift_free /* free */ + } + }; + void warn_on_free(void *capsule) { + PyErr_WarnEx(PyExc_UserWarning, "in warn_on_free", 1); + void * obj = PyCapsule_GetPointer(capsule, + PyCapsule_GetName(capsule)); + free(obj); + }; + ''' + more_init = "import_array();" + try: + import mem_policy + return mem_policy + except ImportError: + pass + # if it does not exist, build and load it + return extbuild.build_and_import_extension('mem_policy', + functions, + prologue=prologue, + include_dirs=[np.get_include()], + build_dir=tmp_path, + more_init=more_init) + + +def test_set_policy(get_module): + + get_handler_name = np.core.multiarray.get_handler_name + orig_policy_name = get_handler_name() + + a = np.arange(10).reshape((2, 5)) # a doesn't own its own data + assert get_handler_name(a) is None + assert get_handler_name(a.base) == orig_policy_name + + orig_policy = get_module.set_secret_data_policy() + + b = np.arange(10).reshape((2, 5)) # b doesn't own its own data + assert get_handler_name(b) is None + assert get_handler_name(b.base) == 'secret_data_allocator' + + if orig_policy_name == 'default_allocator': + get_module.set_old_policy(None) # tests PyDataMem_SetHandler(NULL) + assert get_handler_name() == 'default_allocator' + else: + get_module.set_old_policy(orig_policy) + assert get_handler_name() == orig_policy_name + + +def test_policy_propagation(get_module): + # The memory policy goes hand-in-hand with flags.owndata + + class MyArr(np.ndarray): + pass + + get_handler_name = np.core.multiarray.get_handler_name + orig_policy_name = get_handler_name() + a = np.arange(10).view(MyArr).reshape((2, 5)) + assert get_handler_name(a) is None + assert a.flags.owndata is False + + assert get_handler_name(a.base) is None + assert a.base.flags.owndata is False + + assert get_handler_name(a.base.base) == orig_policy_name + assert a.base.base.flags.owndata is True + + +async def concurrent_context1(get_module, orig_policy_name, event): + if orig_policy_name == 'default_allocator': + get_module.set_secret_data_policy() + assert np.core.multiarray.get_handler_name() == 'secret_data_allocator' + else: + get_module.set_old_policy(None) + assert np.core.multiarray.get_handler_name() == 'default_allocator' + event.set() + + +async def concurrent_context2(get_module, orig_policy_name, event): + await event.wait() + # the policy is not affected by changes in parallel contexts + assert np.core.multiarray.get_handler_name() == orig_policy_name + # change policy in the child context + if orig_policy_name == 'default_allocator': + get_module.set_secret_data_policy() + assert np.core.multiarray.get_handler_name() == 'secret_data_allocator' + else: + get_module.set_old_policy(None) + assert np.core.multiarray.get_handler_name() == 'default_allocator' + + +async def async_test_context_locality(get_module): + orig_policy_name = np.core.multiarray.get_handler_name() + + event = asyncio.Event() + # the child contexts inherit the parent policy + concurrent_task1 = asyncio.create_task( + concurrent_context1(get_module, orig_policy_name, event)) + concurrent_task2 = asyncio.create_task( + concurrent_context2(get_module, orig_policy_name, event)) + await concurrent_task1 + await concurrent_task2 + + # the parent context is not affected by child policy changes + assert np.core.multiarray.get_handler_name() == orig_policy_name + + +def test_context_locality(get_module): + if (sys.implementation.name == 'pypy' + and sys.pypy_version_info[:3] < (7, 3, 6)): + pytest.skip('no context-locality support in PyPy < 7.3.6') + asyncio.run(async_test_context_locality(get_module)) + + +def concurrent_thread1(get_module, event): + get_module.set_secret_data_policy() + assert np.core.multiarray.get_handler_name() == 'secret_data_allocator' + event.set() + + +def concurrent_thread2(get_module, event): + event.wait() + # the policy is not affected by changes in parallel threads + assert np.core.multiarray.get_handler_name() == 'default_allocator' + # change policy in the child thread + get_module.set_secret_data_policy() + + +def test_thread_locality(get_module): + orig_policy_name = np.core.multiarray.get_handler_name() + + event = threading.Event() + # the child threads do not inherit the parent policy + concurrent_task1 = threading.Thread(target=concurrent_thread1, + args=(get_module, event)) + concurrent_task2 = threading.Thread(target=concurrent_thread2, + args=(get_module, event)) + concurrent_task1.start() + concurrent_task2.start() + concurrent_task1.join() + concurrent_task2.join() + + # the parent thread is not affected by child policy changes + assert np.core.multiarray.get_handler_name() == orig_policy_name + + +@pytest.mark.slow +def test_new_policy(get_module): + a = np.arange(10) + orig_policy_name = np.core.multiarray.get_handler_name(a) + + orig_policy = get_module.set_secret_data_policy() + + b = np.arange(10) + assert np.core.multiarray.get_handler_name(b) == 'secret_data_allocator' + + # test array manipulation. This is slow + if orig_policy_name == 'default_allocator': + # when the np.core.test tests recurse into this test, the + # policy will be set so this "if" will be false, preventing + # infinite recursion + # + # if needed, debug this by + # - running tests with -- -s (to not capture stdout/stderr + # - setting extra_argv=['-vv'] here + assert np.core.test('full', verbose=2, extra_argv=['-vv']) + # also try the ma tests, the pickling test is quite tricky + assert np.ma.test('full', verbose=2, extra_argv=['-vv']) + + get_module.set_old_policy(orig_policy) + + c = np.arange(10) + assert np.core.multiarray.get_handler_name(c) == orig_policy_name + +@pytest.mark.xfail(sys.implementation.name == "pypy", + reason=("bad interaction between getenv and " + "os.environ inside pytest")) +@pytest.mark.parametrize("policy", ["0", "1", None]) +def test_switch_owner(get_module, policy): + a = get_module.get_array() + assert np.core.multiarray.get_handler_name(a) is None + get_module.set_own(a) + oldval = os.environ.get('NUMPY_WARN_IF_NO_MEM_POLICY', None) + if policy is None: + if 'NUMPY_WARN_IF_NO_MEM_POLICY' in os.environ: + os.environ.pop('NUMPY_WARN_IF_NO_MEM_POLICY') + else: + os.environ['NUMPY_WARN_IF_NO_MEM_POLICY'] = policy + try: + # The policy should be NULL, so we have to assume we can call + # "free". A warning is given if the policy == "1" + if policy == "1": + with assert_warns(RuntimeWarning) as w: + del a + gc.collect() + else: + del a + gc.collect() + + finally: + if oldval is None: + if 'NUMPY_WARN_IF_NO_MEM_POLICY' in os.environ: + os.environ.pop('NUMPY_WARN_IF_NO_MEM_POLICY') + else: + os.environ['NUMPY_WARN_IF_NO_MEM_POLICY'] = oldval + +def test_owner_is_base(get_module): + a = get_module.get_array_with_base() + with pytest.warns(UserWarning, match='warn_on_free'): + del a + gc.collect() diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 8f8043c30..fa7f254a6 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -2511,27 +2511,19 @@ class TestMethods: assert_(not isinstance(a.searchsorted(b, 'left', s), A)) assert_(not isinstance(a.searchsorted(b, 'right', s), A)) - def test_argpartition_out_of_range(self): + @pytest.mark.parametrize("dtype", np.typecodes["All"]) + def test_argpartition_out_of_range(self, dtype): # Test out of range values in kth raise an error, gh-5469 - d = np.arange(10) + d = np.arange(10).astype(dtype=dtype) assert_raises(ValueError, d.argpartition, 10) assert_raises(ValueError, d.argpartition, -11) - # Test also for generic type argpartition, which uses sorting - # and used to not bound check kth - d_obj = np.arange(10, dtype=object) - assert_raises(ValueError, d_obj.argpartition, 10) - assert_raises(ValueError, d_obj.argpartition, -11) - def test_partition_out_of_range(self): + @pytest.mark.parametrize("dtype", np.typecodes["All"]) + def test_partition_out_of_range(self, dtype): # Test out of range values in kth raise an error, gh-5469 - d = np.arange(10) + d = np.arange(10).astype(dtype=dtype) assert_raises(ValueError, d.partition, 10) assert_raises(ValueError, d.partition, -11) - # Test also for generic type partition, which uses sorting - # and used to not bound check kth - d_obj = np.arange(10, dtype=object) - assert_raises(ValueError, d_obj.partition, 10) - assert_raises(ValueError, d_obj.partition, -11) def test_argpartition_integer(self): # Test non-integer values in kth raise an error/ @@ -2551,26 +2543,30 @@ class TestMethods: d_obj = np.arange(10, dtype=object) assert_raises(TypeError, d_obj.partition, 9.) - def test_partition_empty_array(self): + @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"]) + def test_partition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays + kth = np.array(0, dtype=kth_dtype)[()] a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array partition with axis={0}'.format(axis) - assert_equal(np.partition(a, 0, axis=axis), a, msg) + assert_equal(np.partition(a, kth, axis=axis), a, msg) msg = 'test empty array partition with axis=None' - assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg) + assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg) - def test_argpartition_empty_array(self): + @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"]) + def test_argpartition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays + kth = np.array(0, dtype=kth_dtype)[()] a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array argpartition with axis={0}'.format(axis) - assert_equal(np.partition(a, 0, axis=axis), + assert_equal(np.partition(a, kth, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argpartition with axis=None' - assert_equal(np.partition(a, 0, axis=None), + assert_equal(np.partition(a, kth, axis=None), np.zeros_like(a.ravel(), dtype=np.intp), msg) def test_partition(self): @@ -2901,10 +2897,12 @@ class TestMethods: assert_array_equal(np.partition(d, kth)[kth], tgt, err_msg="data: %r\n kth: %r" % (d, kth)) - def test_argpartition_gh5524(self): + @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"]) + def test_argpartition_gh5524(self, kth_dtype): # A test for functionality of argpartition on lists. - d = [6,7,3,2,9,0] - p = np.argpartition(d,1) + kth = np.array(1, dtype=kth_dtype)[()] + d = [6, 7, 3, 2, 9, 0] + p = np.argpartition(d, kth) self.assert_partitioned(np.array(d)[p],[1]) def test_flatten(self): @@ -4200,7 +4198,7 @@ class TestArgmaxArgminCommon: (3, 4, 1, 2), (4, 1, 2, 3)] @pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis) - for axis in list(range(-len(size), len(size))) + [None]] + for axis in list(range(-len(size), len(size))) + [None]] for size in sizes])) @pytest.mark.parametrize('method', [np.argmax, np.argmin]) def test_np_argmin_argmax_keepdims(self, size, axis, method): @@ -4221,7 +4219,7 @@ class TestArgmaxArgminCommon: assert_equal(res, res_orig) assert_(res.shape == new_shape) outarray = np.empty(res.shape, dtype=res.dtype) - res1 = method(arr, axis=axis, out=outarray, + res1 = method(arr, axis=axis, out=outarray, keepdims=True) assert_(res1 is outarray) assert_equal(res, outarray) @@ -4234,7 +4232,7 @@ class TestArgmaxArgminCommon: wrong_shape[0] = 2 wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) with pytest.raises(ValueError): - method(arr.T, axis=axis, + method(arr.T, axis=axis, out=wrong_outarray, keepdims=True) # non-contiguous arrays @@ -4252,18 +4250,18 @@ class TestArgmaxArgminCommon: assert_(res.shape == new_shape) outarray = np.empty(new_shape[::-1], dtype=res.dtype) outarray = outarray.T - res1 = method(arr.T, axis=axis, out=outarray, + res1 = method(arr.T, axis=axis, out=outarray, keepdims=True) assert_(res1 is outarray) assert_equal(res, outarray) if len(size) > 0: - # one dimension lesser for non-zero sized + # one dimension lesser for non-zero sized # array should raise an error with pytest.raises(ValueError): - method(arr[0], axis=axis, + method(arr[0], axis=axis, out=outarray, keepdims=True) - + if len(size) > 0: wrong_shape = list(new_shape) if axis is not None: @@ -4272,7 +4270,7 @@ class TestArgmaxArgminCommon: wrong_shape[0] = 2 wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) with pytest.raises(ValueError): - method(arr.T, axis=axis, + method(arr.T, axis=axis, out=wrong_outarray, keepdims=True) @pytest.mark.parametrize('method', ['max', 'min']) @@ -4287,7 +4285,7 @@ class TestArgmaxArgminCommon: axes.remove(i) assert_(np.all(a_maxmin == aarg_maxmin.choose( *a.transpose(i, *axes)))) - + @pytest.mark.parametrize('method', ['argmax', 'argmin']) def test_output_shape(self, method): # see also gh-616 @@ -4330,7 +4328,7 @@ class TestArgmaxArgminCommon: [('argmax', np.argmax), ('argmin', np.argmin)]) def test_np_vs_ndarray(self, arr_method, np_method): - # make sure both ndarray.argmax/argmin and + # make sure both ndarray.argmax/argmin and # numpy.argmax/argmin support out/axis args a = np.random.normal(size=(2, 3)) arg_method = getattr(a, arr_method) @@ -4344,7 +4342,7 @@ class TestArgmaxArgminCommon: # check keyword args out1 = np.zeros(3, dtype=int) out2 = np.zeros(3, dtype=int) - assert_equal(arg_method(out=out1, axis=0), + assert_equal(arg_method(out=out1, axis=0), np_method(a, out=out2, axis=0)) assert_equal(out1, out2) @@ -4438,7 +4436,7 @@ class TestArgmax: assert_equal(np.argmax(arr), pos, err_msg="%r" % arr) assert_equal(arr[np.argmax(arr)], val, err_msg="%r" % arr) - + def test_maximum_signed_integers(self): a = np.array([1, 2**7 - 1, -2**7], dtype=np.int8) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 6b743ab27..ed775cac6 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -9,7 +9,7 @@ import numpy.core._multiarray_tests as _multiarray_tests from numpy import array, arange, nditer, all from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises, - HAS_REFCOUNT, suppress_warnings + HAS_REFCOUNT, suppress_warnings, break_cycles ) @@ -2819,7 +2819,7 @@ def test_iter_writemasked_decref(): for buf, mask_buf in it: buf[...] = (3, singleton) - del buf, mask_buf, it # delete everything to ensure corrrect cleanup + del buf, mask_buf, it # delete everything to ensure correct cleanup if HAS_REFCOUNT: # The buffer would have included additional items, they must be @@ -3128,6 +3128,8 @@ def test_warn_noclose(): assert len(sup.log) == 1 +@pytest.mark.skipif(sys.version_info[:2] == (3, 9) and sys.platform == "win32", + reason="Errors with Python 3.9 on Windows") @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") @pytest.mark.parametrize(["in_dtype", "buf_dtype"], [("i", "O"), ("O", "i"), # most simple cases @@ -3148,6 +3150,8 @@ def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps): # Note that resetting does not free references del it + break_cycles() + break_cycles() assert count == sys.getrefcount(value) # Repeat the test with `iternext` @@ -3157,6 +3161,8 @@ def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps): it.iternext() del it # should ensure cleanup + break_cycles() + break_cycles() assert count == sys.getrefcount(value) @@ -3202,7 +3208,7 @@ def test_debug_print(capfd): Currently uses a subprocess to avoid dealing with the C level `printf`s. """ # the expected output with all addresses and sizes stripped (they vary - # and/or are platform dependend). + # and/or are platform dependent). expected = """ ------ BEGIN ITERATOR DUMP ------ | Iterator Address: diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 19de0a8aa..ad9437911 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -16,7 +16,7 @@ from numpy.testing import ( ) from numpy.core._rational_tests import rational -from hypothesis import assume, given, strategies as st +from hypothesis import given, strategies as st from hypothesis.extra import numpy as hynp @@ -646,7 +646,7 @@ class TestFloatExceptions: if np.dtype(ftype).kind == 'f': # Get some extreme values for the type fi = np.finfo(ftype) - ft_tiny = fi.machar.tiny + ft_tiny = fi._machar.tiny ft_max = fi.max ft_eps = fi.eps underflow = 'underflow' @@ -655,7 +655,7 @@ class TestFloatExceptions: # 'c', complex, corresponding real dtype rtype = type(ftype(0).real) fi = np.finfo(rtype) - ft_tiny = ftype(fi.machar.tiny) + ft_tiny = ftype(fi._machar.tiny) ft_max = ftype(fi.max) ft_eps = ftype(fi.eps) # The complex types raise different exceptions @@ -932,25 +932,6 @@ class TestTypes: # Promote with object: assert_equal(promote_types('O', S+'30'), np.dtype('O')) - @pytest.mark.parametrize(["dtype1", "dtype2"], - [[np.dtype("V6"), np.dtype("V10")], - [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])], - [np.dtype("i8,i8"), np.dtype("i4,i4")], - ]) - def test_invalid_void_promotion(self, dtype1, dtype2): - # Mainly test structured void promotion, which currently allows - # byte-swapping, but nothing else: - with pytest.raises(TypeError): - np.promote_types(dtype1, dtype2) - - @pytest.mark.parametrize(["dtype1", "dtype2"], - [[np.dtype("V10"), np.dtype("V10")], - [np.dtype([("name1", "<i8")]), np.dtype([("name1", ">i8")])], - [np.dtype("i8,i8"), np.dtype("i8,>i8")], - ]) - def test_valid_void_promotion(self, dtype1, dtype2): - assert np.promote_types(dtype1, dtype2) is dtype1 - @pytest.mark.parametrize("dtype", list(np.typecodes["All"]) + ["i,i", "S3", "S100", "U3", "U100", rational]) @@ -1503,6 +1484,18 @@ class TestNonzero: a = np.array([[False], [TrueThenFalse()]]) assert_raises(RuntimeError, np.nonzero, a) + def test_nonzero_sideffects_structured_void(self): + # Checks that structured void does not mutate alignment flag of + # original array. + arr = np.zeros(5, dtype="i1,i8,i8") # `ones` may short-circuit + assert arr.flags.aligned # structs are considered "aligned" + assert not arr["f2"].flags.aligned + # make sure that nonzero/count_nonzero do not flip the flag: + np.nonzero(arr) + assert arr.flags.aligned + np.count_nonzero(arr) + assert arr.flags.aligned + def test_nonzero_exception_safe(self): # gh-13930 @@ -2893,6 +2886,21 @@ class TestLikeFuncs: self.check_like_function(np.full_like, 123.456, True) self.check_like_function(np.full_like, np.inf, True) + @pytest.mark.parametrize('likefunc', [np.empty_like, np.full_like, + np.zeros_like, np.ones_like]) + @pytest.mark.parametrize('dtype', [str, bytes]) + def test_dtype_str_bytes(self, likefunc, dtype): + # Regression test for gh-19860 + a = np.arange(16).reshape(2, 8) + b = a[:, ::2] # Ensure b is not contiguous. + kwargs = {'fill_value': ''} if likefunc == np.full_like else {} + result = likefunc(b, dtype=dtype, **kwargs) + if dtype == str: + assert result.strides == (16, 4) + else: + # dtype is bytes + assert result.strides == (4, 1) + class TestCorrelate: def _setup(self, dt): @@ -3496,6 +3504,12 @@ class TestBroadcast: assert_raises(ValueError, np.broadcast, 1, **{'x': 1}) + def test_shape_mismatch_error_message(self): + with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and " + r"arg 2 with shape \(2,\)"): + np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) + + class TestKeepdims: class sub_array(np.ndarray): diff --git a/numpy/core/tests/test_scalar_methods.py b/numpy/core/tests/test_scalar_methods.py index 94b2dd3c9..eef4c1433 100644 --- a/numpy/core/tests/test_scalar_methods.py +++ b/numpy/core/tests/test_scalar_methods.py @@ -1,8 +1,11 @@ """ Test the scalar constructors, which also do type-coercion """ +import sys import fractions import platform +import types +from typing import Any, Type import pytest import numpy as np @@ -128,3 +131,73 @@ class TestIsInteger: if value == 0: continue assert not value.is_integer() + + +@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9") +class TestClassGetItem: + @pytest.mark.parametrize("cls", [ + np.number, + np.integer, + np.inexact, + np.unsignedinteger, + np.signedinteger, + np.floating, + ]) + def test_abc(self, cls: Type[np.number]) -> None: + alias = cls[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + def test_abc_complexfloating(self) -> None: + alias = np.complexfloating[Any, Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is np.complexfloating + + @pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character]) + def test_abc_non_numeric(self, cls: Type[np.generic]) -> None: + with pytest.raises(TypeError): + cls[Any] + + @pytest.mark.parametrize("code", np.typecodes["All"]) + def test_concrete(self, code: str) -> None: + cls = np.dtype(code).type + with pytest.raises(TypeError): + cls[Any] + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len == 1: + assert np.number[arg_tup] + else: + with pytest.raises(TypeError): + np.number[arg_tup] + + def test_subscript_scalar(self) -> None: + assert np.number[Any] + + +@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8") +@pytest.mark.parametrize("cls", [np.number, np.complexfloating, np.int64]) +def test_class_getitem_38(cls: Type[np.number]) -> None: + match = "Type subscription requires python >= 3.9" + with pytest.raises(TypeError, match=match): + cls[Any] + + +class TestBitCount: + # derived in part from the cpython test "test_bit_count" + + @pytest.mark.parametrize("itype", np.sctypes['int']+np.sctypes['uint']) + def test_small(self, itype): + for a in range(max(np.iinfo(itype).min, 0), 128): + msg = f"Smoke test for {itype}({a}).bit_count()" + assert itype(a).bit_count() == bin(a).count("1"), msg + + def test_bit_count(self): + for exp in [10, 17, 63]: + a = 2**exp + assert np.uint64(a).bit_count() == 1 + assert np.uint64(a - 1).bit_count() == exp + assert np.uint64(a ^ 63).bit_count() == 7 + assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8 diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py index cc53eb244..98d7f7cde 100644 --- a/numpy/core/tests/test_scalarinherit.py +++ b/numpy/core/tests/test_scalarinherit.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Test printing of scalar types. """ diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index becd65b11..90078a2ea 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -5,14 +5,14 @@ import itertools import operator import platform import pytest -from hypothesis import given, settings, Verbosity, assume +from hypothesis import given, settings, Verbosity from hypothesis.strategies import sampled_from import numpy as np from numpy.testing import ( assert_, assert_equal, assert_raises, assert_almost_equal, assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data, - assert_warns, assert_raises_regex, + assert_warns, ) types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py index 2f1c3bc5e..ee21d4aa5 100644 --- a/numpy/core/tests/test_scalarprint.py +++ b/numpy/core/tests/test_scalarprint.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Test printing of scalar types. """ diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py index f0c60953b..0270ad901 100644 --- a/numpy/core/tests/test_simd.py +++ b/numpy/core/tests/test_simd.py @@ -329,7 +329,7 @@ class _SIMD_FP(_Test_Utility): data_square = [x*x for x in data] square = self.square(vdata) assert square == data_square - + def test_max(self): """ Test intrinsics: @@ -818,6 +818,7 @@ class _SIMD_ALL(_Test_Utility): if self._is_fp(): return + int_min = self._int_min() def trunc_div(a, d): """ Divide towards zero works with large integers > 2^53, @@ -830,57 +831,31 @@ class _SIMD_ALL(_Test_Utility): return a // d return (a + sign_d - sign_a) // d + 1 - int_min = self._int_min() if self._is_signed() else 1 - int_max = self._int_max() - rdata = ( - 0, 1, self.nlanes, int_max-self.nlanes, - int_min, int_min//2 + 1 - ) - divisors = (1, 2, 9, 13, self.nlanes, int_min, int_max, int_max//2) - - for x, d in itertools.product(rdata, divisors): - data = self._data(x) - vdata = self.load(data) - data_divc = [trunc_div(a, d) for a in data] - divisor = self.divisor(d) - divc = self.divc(vdata, divisor) - assert divc == data_divc - - if not self._is_signed(): - return - - safe_neg = lambda x: -x-1 if -x > int_max else -x - # test round division for signed integers - for x, d in itertools.product(rdata, divisors): - d_neg = safe_neg(d) - data = self._data(x) - data_neg = [safe_neg(a) for a in data] - vdata = self.load(data) - vdata_neg = self.load(data_neg) - divisor = self.divisor(d) - divisor_neg = self.divisor(d_neg) - - # round towards zero - data_divc = [trunc_div(a, d_neg) for a in data] - divc = self.divc(vdata, divisor_neg) - assert divc == data_divc - data_divc = [trunc_div(a, d) for a in data_neg] - divc = self.divc(vdata_neg, divisor) + data = [1, -int_min] # to test overflow + data += range(0, 2**8, 2**5) + data += range(0, 2**8, 2**5-1) + bsize = self._scalar_size() + if bsize > 8: + data += range(2**8, 2**16, 2**13) + data += range(2**8, 2**16, 2**13-1) + if bsize > 16: + data += range(2**16, 2**32, 2**29) + data += range(2**16, 2**32, 2**29-1) + if bsize > 32: + data += range(2**32, 2**64, 2**61) + data += range(2**32, 2**64, 2**61-1) + # negate + data += [-x for x in data] + for dividend, divisor in itertools.product(data, data): + divisor = self.setall(divisor)[0] # cast + if divisor == 0: + continue + dividend = self.load(self._data(dividend)) + data_divc = [trunc_div(a, divisor) for a in dividend] + divisor_parms = self.divisor(divisor) + divc = self.divc(dividend, divisor_parms) assert divc == data_divc - # test truncate sign if the dividend is zero - vzero = self.zero() - for d in (-1, -10, -100, int_min//2, int_min): - divisor = self.divisor(d) - divc = self.divc(vzero, divisor) - assert divc == vzero - - # test overflow - vmin = self.setall(int_min) - divisor = self.divisor(-1) - divc = self.divc(vmin, divisor) - assert divc == vmin - def test_arithmetic_reduce_sum(self): """ Test reduce sum intrinsics: diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index c3ea10d93..ef0bac957 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -1362,6 +1362,14 @@ class TestUfunc: np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object), ) + def test_object_array_accumulate_failure(self): + # Typical accumulation on object works as expected: + res = np.add.accumulate(np.array([1, 0, 2], dtype=object)) + assert_array_equal(res, np.array([1, 1, 3], dtype=object)) + # But errors are propagated from the inner-loop if they occur: + with pytest.raises(TypeError): + np.add.accumulate([1, None, 2]) + def test_object_array_reduceat_inplace(self): # Checks that in-place reduceats work, see also gh-7465 arr = np.empty(4, dtype=object) @@ -1381,6 +1389,15 @@ class TestUfunc: np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) assert_array_equal(arr, out) + def test_object_array_reduceat_failure(self): + # Reduceat works as expected when no invalid operation occurs (None is + # not involved in an operation here) + res = np.add.reduceat(np.array([1, None, 2], dtype=object), [1, 2]) + assert_array_equal(res, np.array([None, 2], dtype=object)) + # But errors when None would be involved in an operation: + with pytest.raises(TypeError): + np.add.reduceat([1, None, 2], [0, 2]) + def test_zerosize_reduction(self): # Test with default dtype and object dtype for a in [[], np.array([], dtype=object)]: @@ -2098,6 +2115,25 @@ class TestUfunc: with pytest.raises(TypeError): ufunc(a, a, signature=signature) + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_support_anything(self, ufunc): + # The logical ufuncs support even input that can't be promoted: + a = np.array('1') + c = np.array([1., 2.]) + assert_array_equal(ufunc(a, c), ufunc([True, True], True)) + assert ufunc.reduce(a) == True + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_out_cast_check(self, ufunc): + a = np.array('1') + c = np.array([1., 2.]) + out = a.copy() + with pytest.raises(TypeError): + # It would be safe, but not equiv casting: + ufunc(a, c, out=out, casting="equiv") + def test_reduce_noncontig_output(self): # Check that reduction deals with non-contiguous output arrays # appropriately. @@ -2119,6 +2155,22 @@ class TestUfunc: assert_equal(y_base[1,:], y_base_copy[1,:]) assert_equal(y_base[3,:], y_base_copy[3,:]) + @pytest.mark.parametrize("with_cast", [True, False]) + def test_reduceat_and_accumulate_out_shape_mismatch(self, with_cast): + # Should raise an error mentioning "shape" or "size" + arr = np.arange(5) + out = np.arange(3) # definitely wrong shape + if with_cast: + # If a cast is necessary on the output, we can be sure to use + # the generic NpyIter (non-fast) path. + out = out.astype(np.float64) + + with pytest.raises(ValueError, match="(shape|size)"): + np.add.reduceat(arr, [0, 3], out=out) + + with pytest.raises(ValueError, match="(shape|size)"): + np.add.accumulate(arr, out=out) + @pytest.mark.parametrize('out_shape', [(), (1,), (3,), (1, 1), (1, 3), (4, 3)]) @pytest.mark.parametrize('keepdims', [True, False]) @@ -2308,6 +2360,14 @@ def test_ufunc_casterrors(): assert out[-1] == 1 +def test_trivial_loop_invalid_cast(): + # This tests the fast-path "invalid cast", see gh-19904. + with pytest.raises(TypeError, + match="cast ufunc 'add' input 0"): + # the void dtype definitely cannot cast to double: + np.add(np.array(1, "i,i"), 3, signature="dd->d") + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") @pytest.mark.parametrize("offset", [0, np.BUFSIZE//2, int(1.5*np.BUFSIZE)]) @@ -2323,8 +2383,9 @@ def test_reduce_casterrors(offset): out = np.array(-1, dtype=np.intp) count = sys.getrefcount(value) - with pytest.raises(ValueError): - # This is an unsafe cast, but we currently always allow that: + with pytest.raises(ValueError, match="invalid literal"): + # This is an unsafe cast, but we currently always allow that. + # Note that the double loop is picked, but the cast fails. np.add.reduce(arr, dtype=np.intp, out=out) assert count == sys.getrefcount(value) # If an error occurred during casting, the operation is done at most until @@ -2332,3 +2393,20 @@ def test_reduce_casterrors(offset): # if the error happened immediately. # This does not define behaviour, the output is invalid and thus undefined assert out[()] < value * offset + + +@pytest.mark.parametrize("method", + [np.add.accumulate, np.add.reduce, + pytest.param(lambda x: np.add.reduceat(x, [0]), id="reduceat"), + pytest.param(lambda x: np.log.at(x, [2]), id="at")]) +def test_ufunc_methods_floaterrors(method): + # adding inf and -inf (or log(-inf) creates an invalid float and warns + arr = np.array([np.inf, 0, -np.inf]) + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning, match="invalid value"): + method(arr) + + arr = np.array([np.inf, 0, -np.inf]) + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + method(arr) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 4f57c0088..8f5a85824 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -15,7 +15,7 @@ from numpy.testing import ( assert_, assert_equal, assert_raises, assert_raises_regex, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings, - _gen_alignment_data, assert_array_almost_equal_nulp, assert_warns + _gen_alignment_data, assert_array_almost_equal_nulp ) def get_glibc_version(): @@ -973,6 +973,12 @@ class TestLog: xf = np.log(x) assert_almost_equal(np.log(x, out=x), xf) + # test log() of max for dtype does not raise + for dt in ['f', 'd', 'g']: + with np.errstate(all='raise'): + x = np.finfo(dt).max + np.log(x) + def test_log_strides(self): np.random.seed(42) strides = np.array([-4,-3,-2,-1,1,2,3,4]) @@ -3852,3 +3858,39 @@ def test_outer_exceeds_maxdims(): with assert_raises(ValueError): np.add.outer(deep, deep) +def test_bad_legacy_ufunc_silent_errors(): + # legacy ufuncs can't report errors and NumPy can't check if the GIL + # is released. So NumPy has to check after the GIL is released just to + # cover all bases. `np.power` uses/used to use this. + arr = np.arange(3).astype(np.float64) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error(arr, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + # not contiguous means the fast-path cannot be taken + non_contig = arr.repeat(20).reshape(-1, 6)[:, ::2] + ncu_tests.always_error(non_contig, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.outer(arr, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.reduce(arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.reduceat(arr, [0, 1]) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.accumulate(arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.at(arr, [0, 1, 2], arr) + + +@pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]]) +def test_bad_legacy_gufunc_silent_errors(x1): + # Verify that an exception raised in a gufunc loop propagates correctly. + # The signature of always_error_gufunc is '(i),()->()'. + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_gufunc(x1, 0.0) diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py index a703c697a..32e2dca66 100644 --- a/numpy/core/tests/test_umath_accuracy.py +++ b/numpy/core/tests/test_umath_accuracy.py @@ -1,5 +1,4 @@ import numpy as np -import platform import os from os import path import sys diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index 8ba6f15e5..8d105a248 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -90,18 +90,23 @@ else: def load_library(libname, loader_path): """ It is possible to load a library using + >>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP But there are cross-platform considerations, such as library file extensions, plus the fact Windows will just load the first library it finds with that name. NumPy supplies the load_library function as a convenience. + .. versionchanged:: 1.20.0 + Allow libname and loader_path to take any + :term:`python:path-like object`. + Parameters ---------- - libname : str + libname : path-like Name of the library, which can have 'lib' as a prefix, but without an extension. - loader_path : str + loader_path : path-like Where the library can be found. Returns @@ -120,6 +125,10 @@ else: warnings.warn("All features of ctypes interface may not work " "with ctypes < 1.0.1", stacklevel=2) + # Convert path-like objects into strings + libname = os.fsdecode(libname) + loader_path = os.fsdecode(loader_path) + ext = os.path.splitext(libname)[1] if not ext: # Try to load library with platform-specific name, otherwise diff --git a/numpy/ctypeslib.pyi b/numpy/ctypeslib.pyi index 642017ba7..1c396d240 100644 --- a/numpy/ctypeslib.pyi +++ b/numpy/ctypeslib.pyi @@ -1,15 +1,268 @@ -from typing import List, Type - # NOTE: Numpy's mypy plugin is used for importing the correct # platform-specific `ctypes._SimpleCData[int]` sub-type from ctypes import c_int64 as _c_intp +import os +import sys +import ctypes +from typing import ( + Literal as L, + Any, + List, + Union, + TypeVar, + Type, + Generic, + Optional, + overload, + Iterable, + ClassVar, + Tuple, + Sequence, + Dict, +) + +from numpy import ( + ndarray, + dtype, + generic, + bool_, + byte, + short, + intc, + int_, + longlong, + ubyte, + ushort, + uintc, + uint, + ulonglong, + single, + double, + float_, + longdouble, + void, +) +from numpy.core._internal import _ctypes +from numpy.core.multiarray import flagsobj +from numpy.typing import ( + # Arrays + ArrayLike, + NDArray, + _FiniteNestedSequence, + _SupportsArray, + + # Shapes + _ShapeLike, + + # DTypes + DTypeLike, + _SupportsDType, + _VoidDTypeLike, + _BoolCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntCodes, + _ULongLongCodes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntCodes, + _LongLongCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, +) + +# TODO: Add a proper `_Shape` bound once we've got variadic typevars +_DType = TypeVar("_DType", bound=dtype[Any]) +_DTypeOptional = TypeVar("_DTypeOptional", bound=Optional[dtype[Any]]) +_SCT = TypeVar("_SCT", bound=generic) + +_DTypeLike = Union[ + dtype[_SCT], + Type[_SCT], + _SupportsDType[dtype[_SCT]], +] +_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] + +_FlagsKind = L[ + 'C_CONTIGUOUS', 'CONTIGUOUS', 'C', + 'F_CONTIGUOUS', 'FORTRAN', 'F', + 'ALIGNED', 'A', + 'WRITEABLE', 'W', + 'OWNDATA', 'O', + 'UPDATEIFCOPY', 'U', + 'WRITEBACKIFCOPY', 'X', +] + +# TODO: Add a shape typevar once we have variadic typevars (PEP 646) +class _ndptr(ctypes.c_void_p, Generic[_DTypeOptional]): + # In practice these 4 classvars are defined in the dynamic class + # returned by `ndpointer` + _dtype_: ClassVar[_DTypeOptional] + _shape_: ClassVar[None] + _ndim_: ClassVar[None | int] + _flags_: ClassVar[None | List[_FlagsKind]] + + @overload + @classmethod + def from_param(cls: Type[_ndptr[None]], obj: ndarray[Any, Any]) -> _ctypes: ... + @overload + @classmethod + def from_param(cls: Type[_ndptr[_DType]], obj: ndarray[Any, _DType]) -> _ctypes: ... + +class _concrete_ndptr(_ndptr[_DType]): + _dtype_: ClassVar[_DType] + _shape_: ClassVar[Tuple[int, ...]] + @property + def contents(self) -> ndarray[Any, _DType]: ... + +def load_library( + libname: str | bytes | os.PathLike[str] | os.PathLike[bytes], + loader_path: str | bytes | os.PathLike[str] | os.PathLike[bytes], +) -> ctypes.CDLL: ... + __all__: List[str] c_intp = _c_intp -def load_library(libname, loader_path): ... -def ndpointer(dtype=..., ndim=..., shape=..., flags=...): ... -def as_ctypes(obj): ... -def as_array(obj, shape=...): ... -def as_ctypes_type(dtype): ... +@overload +def ndpointer( + dtype: None = ..., + ndim: int = ..., + shape: None | _ShapeLike = ..., + flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., +) -> Type[_ndptr[None]]: ... +@overload +def ndpointer( + dtype: _DTypeLike[_SCT], + ndim: int = ..., + *, + shape: _ShapeLike, + flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., +) -> Type[_concrete_ndptr[dtype[_SCT]]]: ... +@overload +def ndpointer( + dtype: DTypeLike, + ndim: int = ..., + *, + shape: _ShapeLike, + flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., +) -> Type[_concrete_ndptr[dtype[Any]]]: ... +@overload +def ndpointer( + dtype: _DTypeLike[_SCT], + ndim: int = ..., + shape: None = ..., + flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., +) -> Type[_ndptr[dtype[_SCT]]]: ... +@overload +def ndpointer( + dtype: DTypeLike, + ndim: int = ..., + shape: None = ..., + flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., +) -> Type[_ndptr[dtype[Any]]]: ... + +@overload +def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[bool_] | Type[ctypes.c_bool]) -> Type[ctypes.c_bool]: ... +@overload +def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | Type[ctypes.c_byte]) -> Type[ctypes.c_byte]: ... +@overload +def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | Type[ctypes.c_short]) -> Type[ctypes.c_short]: ... +@overload +def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | Type[ctypes.c_int]) -> Type[ctypes.c_int]: ... +@overload +def as_ctypes_type(dtype: _IntCodes | _DTypeLike[int_] | Type[int | ctypes.c_long]) -> Type[ctypes.c_long]: ... +@overload +def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | Type[ctypes.c_longlong]) -> Type[ctypes.c_longlong]: ... +@overload +def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | Type[ctypes.c_ubyte]) -> Type[ctypes.c_ubyte]: ... +@overload +def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | Type[ctypes.c_ushort]) -> Type[ctypes.c_ushort]: ... +@overload +def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | Type[ctypes.c_uint]) -> Type[ctypes.c_uint]: ... +@overload +def as_ctypes_type(dtype: _UIntCodes | _DTypeLike[uint] | Type[ctypes.c_ulong]) -> Type[ctypes.c_ulong]: ... +@overload +def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | Type[ctypes.c_ulonglong]) -> Type[ctypes.c_ulonglong]: ... +@overload +def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | Type[ctypes.c_float]) -> Type[ctypes.c_float]: ... +@overload +def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | Type[float | ctypes.c_double]) -> Type[ctypes.c_double]: ... +@overload +def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | Type[ctypes.c_longdouble]) -> Type[ctypes.c_longdouble]: ... +@overload +def as_ctypes_type(dtype: _VoidDTypeLike) -> Type[Any]: ... # `ctypes.Union` or `ctypes.Structure` +@overload +def as_ctypes_type(dtype: str) -> Type[Any]: ... + +@overload +def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... +@overload +def as_array(obj: _ArrayLike[_SCT], shape: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +@overload +def as_array(obj: object, shape: None | _ShapeLike = ...) -> NDArray[Any]: ... + +@overload +def as_ctypes(obj: bool_) -> ctypes.c_bool: ... +@overload +def as_ctypes(obj: byte) -> ctypes.c_byte: ... +@overload +def as_ctypes(obj: short) -> ctypes.c_short: ... +@overload +def as_ctypes(obj: intc) -> ctypes.c_int: ... +@overload +def as_ctypes(obj: int_) -> ctypes.c_long: ... +@overload +def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... +@overload +def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ... +@overload +def as_ctypes(obj: ushort) -> ctypes.c_ushort: ... +@overload +def as_ctypes(obj: uintc) -> ctypes.c_uint: ... +@overload +def as_ctypes(obj: uint) -> ctypes.c_ulong: ... +@overload +def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... +@overload +def as_ctypes(obj: single) -> ctypes.c_float: ... +@overload +def as_ctypes(obj: double) -> ctypes.c_double: ... +@overload +def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ... +@overload +def as_ctypes(obj: void) -> Any: ... # `ctypes.Union` or `ctypes.Structure` +@overload +def as_ctypes(obj: NDArray[bool_]) -> ctypes.Array[ctypes.c_bool]: ... +@overload +def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ... +@overload +def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ... +@overload +def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ... +@overload +def as_ctypes(obj: NDArray[int_]) -> ctypes.Array[ctypes.c_long]: ... +@overload +def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... +@overload +def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ... +@overload +def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ... +@overload +def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ... +@overload +def as_ctypes(obj: NDArray[uint]) -> ctypes.Array[ctypes.c_ulong]: ... +@overload +def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... +@overload +def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ... +@overload +def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ... +@overload +def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ... +@overload +def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ... # `ctypes.Union` or `ctypes.Structure` diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index 6d063ee4e..9c85d28b9 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -23,7 +23,8 @@ from numpy.distutils.exec_command import ( ) from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ get_num_build_jobs, \ - _commandline_dep_string + _commandline_dep_string, \ + sanitize_cxx_flags # globals for parallel build management import threading @@ -258,9 +259,6 @@ def CCompiler_compile(self, sources, output_dir=None, macros=None, If compilation fails. """ - # This method is effective only with Python >=2.3 distutils. - # Any changes here should be applied also to fcompiler.compile - # method to support pre Python 2.3 distutils. global _job_semaphore jobs = get_num_build_jobs() @@ -677,7 +675,9 @@ def CCompiler_cxx_compiler(self): return self cxx = copy(self) - cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:] + cxx.compiler_cxx = cxx.compiler_cxx + cxx.compiler_so = [cxx.compiler_cxx[0]] + \ + sanitize_cxx_flags(cxx.compiler_so[1:]) if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]: # AIX needs the ld_so_aix script included with Python cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index e7fd494d3..d7df386fe 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -8,7 +8,14 @@ the sources with proper compiler's flags. instead only focuses on the compiler side, but it creates abstract C headers that can be used later for the final runtime dispatching process.""" -import sys, io, os, re, textwrap, pprint, inspect, atexit, subprocess +import atexit +import inspect +import os +import pprint +import re +import subprocess +import textwrap + class _Config: """An abstract class holds all configurable attributes of `CCompilerOpt`, @@ -188,7 +195,8 @@ class _Config: # native usually works only with x86 native = '-march=native', opt = '-O3', - werror = '-Werror' + werror = '-Werror', + cxx = '-std=c++11', ), clang = dict( native = '-march=native', @@ -198,22 +206,26 @@ class _Config: # cases `-Werror` gets skipped during the availability test due to # "unused arguments" warnings. # see https://github.com/numpy/numpy/issues/19624 - werror = '-Werror-implicit-function-declaration -Werror' + werror = '-Werror=switch -Werror', + cxx = '-std=c++11', ), icc = dict( native = '-xHost', opt = '-O3', - werror = '-Werror' + werror = '-Werror', + cxx = '-std=c++11', ), iccw = dict( native = '/QxHost', opt = '/O3', - werror = '/Werror' + werror = '/Werror', + cxx = '-std=c++11', ), msvc = dict( native = None, opt = '/O2', - werror = '/WX' + werror = '/WX', + cxx = '-std=c++11', ) ) conf_min_features = dict( @@ -406,8 +418,8 @@ class _Config: AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT") ) if on_x86 and self.cc_is_msvc: return dict( - SSE = dict(flags="/arch:SSE"), - SSE2 = dict(flags="/arch:SSE2"), + SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {}, + SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {}, SSE3 = {}, SSSE3 = {}, SSE41 = {}, @@ -516,7 +528,8 @@ class _Config: def __init__(self): if self.conf_tmp_path is None: - import tempfile, shutil + import shutil + import tempfile tmp = tempfile.mkdtemp() def rm_temp(): try: @@ -555,6 +568,7 @@ class _Distutils: flags = kwargs.pop("extra_postargs", []) + flags if not ccompiler: ccompiler = self._ccompiler + return ccompiler.compile(sources, extra_postargs=flags, **kwargs) def dist_test(self, source, flags, macros=[]): @@ -696,7 +710,6 @@ class _Distutils: ) @staticmethod def _dist_test_spawn(cmd, display=None): - from distutils.errors import CompileError try: o = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True) diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py index 0e31a7dee..45201f98f 100644 --- a/numpy/distutils/command/build_clib.py +++ b/numpy/distutils/command/build_clib.py @@ -185,6 +185,30 @@ class build_clib(old_build_clib): for (lib_name, build_info) in libraries: self.build_a_library(build_info, lib_name, libraries) + def assemble_flags(self, in_flags): + """ Assemble flags from flag list + + Parameters + ---------- + in_flags : None or sequence + None corresponds to empty list. Sequence elements can be strings + or callables that return lists of strings. Callable takes `self` as + single parameter. + + Returns + ------- + out_flags : list + """ + if in_flags is None: + return [] + out_flags = [] + for in_flag in in_flags: + if callable(in_flag): + out_flags += in_flag(self) + else: + out_flags.append(in_flag) + return out_flags + def build_a_library(self, build_info, lib_name, libraries): # default compilers compiler = self.compiler @@ -263,7 +287,13 @@ class build_clib(old_build_clib): include_dirs = build_info.get('include_dirs') if include_dirs is None: include_dirs = [] - extra_postargs = build_info.get('extra_compiler_args') or [] + # Flags can be strings, or callables that return a list of strings. + extra_postargs = self.assemble_flags( + build_info.get('extra_compiler_args')) + extra_cflags = self.assemble_flags( + build_info.get('extra_cflags')) + extra_cxxflags = self.assemble_flags( + build_info.get('extra_cxxflags')) include_dirs.extend(get_numpy_include_dirs()) # where compiled F90 module files are: @@ -315,38 +345,45 @@ class build_clib(old_build_clib): macros=macros + copt_macros, include_dirs=include_dirs, debug=self.debug, - extra_postargs=extra_postargs, + extra_postargs=extra_postargs + extra_cxxflags, ccompiler=cxx_compiler ) if copt_c_sources: log.info("compiling C dispatch-able sources") - objects += self.compiler_opt.try_dispatch(copt_c_sources, - output_dir=self.build_temp, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) + objects += self.compiler_opt.try_dispatch( + copt_c_sources, + output_dir=self.build_temp, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs + extra_cflags) if c_sources: log.info("compiling C sources") - objects += compiler.compile(c_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + copt_baseline_flags) + objects += compiler.compile( + c_sources, + output_dir=self.build_temp, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=(extra_postargs + + copt_baseline_flags + + extra_cflags)) if cxx_sources: log.info("compiling C++ sources") cxx_compiler = compiler.cxx_compiler() - cxx_objects = cxx_compiler.compile(cxx_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + copt_baseline_flags) + cxx_objects = cxx_compiler.compile( + cxx_sources, + output_dir=self.build_temp, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=(extra_postargs + + copt_baseline_flags + + extra_cxxflags)) objects.extend(cxx_objects) if f_sources or fmodule_sources: diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index 84ec8aa2c..7040a2411 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -243,7 +243,8 @@ class build_ext (old_build_ext): if l and l != ext_language and ext.language: log.warn('resetting extension %r language from %r to %r.' % (ext.name, l, ext_language)) - ext.language = ext_language + if not ext.language: + ext.language = ext_language # global language all_languages.update(ext_languages) @@ -376,6 +377,9 @@ class build_ext (old_build_ext): log.info("building '%s' extension", ext.name) extra_args = ext.extra_compile_args or [] + extra_cflags = ext.extra_c_compile_args or [] + extra_cxxflags = ext.extra_cxx_compile_args or [] + macros = ext.define_macros[:] for undef in ext.undef_macros: macros.append((undef,)) @@ -462,38 +466,43 @@ class build_ext (old_build_ext): macros=macros + copt_macros, include_dirs=include_dirs, debug=self.debug, - extra_postargs=extra_args, + extra_postargs=extra_args + extra_cxxflags, ccompiler=cxx_compiler, **kws ) if copt_c_sources: log.info("compiling C dispatch-able sources") - c_objects += self.compiler_opt.try_dispatch(copt_c_sources, - output_dir=output_dir, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args, - **kws) + c_objects += self.compiler_opt.try_dispatch( + copt_c_sources, + output_dir=output_dir, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args + extra_cflags, + **kws) if c_sources: log.info("compiling C sources") - c_objects += self.compiler.compile(c_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + copt_baseline_flags, - **kws) + c_objects += self.compiler.compile( + c_sources, + output_dir=output_dir, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=(extra_args + copt_baseline_flags + + extra_cflags), + **kws) if cxx_sources: log.info("compiling C++ sources") - c_objects += cxx_compiler.compile(cxx_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + copt_baseline_flags, - **kws) + c_objects += cxx_compiler.compile( + cxx_sources, + output_dir=output_dir, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=(extra_args + copt_baseline_flags + + extra_cxxflags), + **kws) extra_postargs = [] f_objects = [] @@ -602,7 +611,7 @@ class build_ext (old_build_ext): # Expand possible fake static libraries to objects; # make sure to iterate over a copy of the list as # "fake" libraries will be removed as they are - # enountered + # encountered for lib in libraries[:]: for libdir in library_dirs: fake_lib = os.path.join(libdir, lib + '.fobjects') diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py index d5551f349..c4a14e599 100644 --- a/numpy/distutils/core.py +++ b/numpy/distutils/core.py @@ -19,7 +19,7 @@ import warnings import distutils.core import distutils.dist -from numpy.distutils.extension import Extension +from numpy.distutils.extension import Extension # noqa: F401 from numpy.distutils.numpy_distribution import NumpyDistribution from numpy.distutils.command import config, config_compiler, \ build, build_py, build_ext, build_clib, build_src, build_scripts, \ diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py index c90b5d725..3ede013e0 100644 --- a/numpy/distutils/extension.py +++ b/numpy/distutils/extension.py @@ -47,6 +47,8 @@ class Extension(old_Extension): language=None, f2py_options=None, module_dirs=None, + extra_c_compile_args=None, + extra_cxx_compile_args=None, extra_f77_compile_args=None, extra_f90_compile_args=None,): @@ -83,6 +85,8 @@ class Extension(old_Extension): # numpy_distutils features self.f2py_options = f2py_options or [] self.module_dirs = module_dirs or [] + self.extra_c_compile_args = extra_c_compile_args or [] + self.extra_cxx_compile_args = extra_cxx_compile_args or [] self.extra_f77_compile_args = extra_f77_compile_args or [] self.extra_f90_compile_args = extra_f90_compile_args or [] diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py index d7579e976..c333517c0 100644 --- a/numpy/distutils/fcompiler/__init__.py +++ b/numpy/distutils/fcompiler/__init__.py @@ -745,7 +745,8 @@ _default_compilers = ( ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor', 'fujitsu')), - ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), + ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu', + 'g95', 'pg')), ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), ('irix.*', ('mips', 'gnu', 'gnu95',)), ('aix.*', ('ibm', 'gnu', 'gnu95',)), diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index 02372f5e6..39178071d 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -113,7 +113,7 @@ class GnuFCompiler(FCompiler): # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value # and leave it alone. But, distutils will complain if the # environment's value is different from the one in the Python - # Makefile used to build Python. We let disutils handle this + # Makefile used to build Python. We let distutils handle this # error checking. if not target: # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, @@ -535,7 +535,6 @@ def _can_target(cmd, arch): os.remove(output) finally: os.remove(filename) - return False if __name__ == '__main__': diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py index 7df8ffe2c..939201f44 100644 --- a/numpy/distutils/fcompiler/nag.py +++ b/numpy/distutils/fcompiler/nag.py @@ -64,6 +64,11 @@ class NAGFORCompiler(BaseNAGFCompiler): 'ranlib' : ["ranlib"] } + def get_flags_linker_so(self): + if sys.platform == 'darwin': + return ['-unsharedrts', + '-Wl,-bundle,-flat_namespace,-undefined,suppress'] + return BaseNAGFCompiler.get_flags_linker_so(self) def get_flags_debug(self): version = self.get_version() if version and version > '6.1': diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index 4681d403b..82d296434 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -547,12 +547,12 @@ if sys.platform == 'win32': # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 # on Windows XP: _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" - # Python 3.7 uses 1415, but get_build_version returns 140 ?? - _MSVCRVER_TO_FULLVER['140'] = "14.15.26726.0" - if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): - major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2) - _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION - del major, minor, rest + crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None) + if crt_ver is not None: # Available at least back to Python 3.3 + maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups() + _MSVCRVER_TO_FULLVER[maj + min] = crt_ver + del maj, min + del crt_ver except ImportError: # If we are here, means python was not built with MSVC. Not sure what # to do in that case: manifest building will fail, but it should not be @@ -647,11 +647,9 @@ def generate_manifest(config): if msver is not None: if msver >= 8: check_embedded_msvcr_match_linked(msver) - ma = int(msver) - mi = int((msver - ma) * 10) + ma_str, mi_str = str(msver).split('.') # Write the manifest file - manxml = msvc_manifest_xml(ma, mi) - man = open(manifest_name(config), "w") - config.temp_files.append(manifest_name(config)) - man.write(manxml) - man.close() + manxml = msvc_manifest_xml(int(ma_str), int(mi_str)) + with open(manifest_name(config), "w") as man: + config.temp_files.append(manifest_name(config)) + man.write(manxml) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index a903f3ea3..f0f9b4bd7 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -11,6 +11,7 @@ import multiprocessing import textwrap import importlib.util from threading import local as tlocal +from functools import reduce import distutils from distutils.errors import DistutilsError @@ -43,7 +44,7 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', 'dot_join', 'get_frame', 'minrelpath', 'njoin', 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', 'get_build_architecture', 'get_info', 'get_pkg_info', - 'get_num_build_jobs'] + 'get_num_build_jobs', 'sanitize_cxx_flags'] class InstallableLib: """ @@ -128,8 +129,8 @@ def quote_args(args): def allpath(name): "Convert a /-separated pathname to one using the OS's path separator." - splitted = name.split('/') - return os.path.join(*splitted) + split = name.split('/') + return os.path.join(*split) def rel_path(path, parent_path): """Return path relative to parent_path.""" @@ -2478,3 +2479,15 @@ def get_build_architecture(): # systems, so delay the import to here. from distutils.msvccompiler import get_build_architecture return get_build_architecture() + + +_cxx_ignore_flags = {'-Werror=implicit-function-declaration'} + + +def sanitize_cxx_flags(cxxflags): + ''' + Some flags are valid for C but not C++. Prune them. + ''' + return [flag for flag in cxxflags if flag not in _cxx_ignore_flags] + + diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 8467e1c19..7f41bb07e 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -387,11 +387,10 @@ def get_standard_file(fname): f = __file__ except NameError: f = sys.argv[0] - else: - sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], - fname) - if os.path.isfile(sysfile): - filenames.append(sysfile) + sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], + fname) + if os.path.isfile(sysfile): + filenames.append(sysfile) # Home directory # And look for the user config file @@ -414,7 +413,8 @@ def get_standard_file(fname): def _parse_env_order(base_order, env): """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order` - This method will sequence the environment variable and check for their invidual elements in `base_order`. + This method will sequence the environment variable and check for their + individual elements in `base_order`. The items in the environment variable may be negated via '^item' or '!itema,itemb'. It must start with ^/! to negate all options. diff --git a/numpy/distutils/tests/test_ccompiler_opt.py b/numpy/distutils/tests/test_ccompiler_opt.py index 9c54ed66b..1b27ab07c 100644 --- a/numpy/distutils/tests/test_ccompiler_opt.py +++ b/numpy/distutils/tests/test_ccompiler_opt.py @@ -434,7 +434,8 @@ class _Test_CCompilerOpt: self.expect_flags( "sse sse2 vsx vsx2 neon neon_fp16", x86_gcc="-msse -msse2", x86_icc="-msse -msse2", - x86_iccw="/arch:SSE2", x86_msvc="/arch:SSE2", + x86_iccw="/arch:SSE2", + x86_msvc="/arch:SSE2" if self.march() == "x86" else "", ppc64_gcc= "-mcpu=power8", ppc64_clang="-maltivec -mvsx -mpower8-vector", armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee", @@ -636,7 +637,8 @@ class _Test_CCompilerOpt: x86_gcc="avx512f avx2 sse42 sse41 sse2", x86_icc="avx512f avx2 sse42 sse41 sse2", x86_iccw="avx512f avx2 sse42 sse41 sse2", - x86_msvc="avx512f avx2 sse2", + x86_msvc="avx512f avx2 sse2" + if self.march() == 'x86' else "avx512f avx2", ppc64="vsx3 vsx2", armhf="asimddp asimd neon_vfpv4 neon", # neon, neon_vfpv4, asimd implies each other diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py index b722281ad..8c26271af 100644 --- a/numpy/distutils/tests/test_system_info.py +++ b/numpy/distutils/tests/test_system_info.py @@ -254,6 +254,10 @@ class TestSystemInfoReading: finally: os.chdir(previousDir) + HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", []) + + @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if " + "numpy is built with MKL support")) def test_overrides(self): previousDir = os.getcwd() cfg = os.path.join(self._dir1, 'site.cfg') diff --git a/numpy/doc/constants.py b/numpy/doc/constants.py index 128493d90..4db5c6390 100644 --- a/numpy/doc/constants.py +++ b/numpy/doc/constants.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ ========= Constants diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index 47354cd9d..a0fb73619 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -71,8 +71,8 @@ def compile(source, Examples -------- - .. include:: compile_session.dat - :literal: + .. literalinclude:: code/results/compile_session.dat + :language: python """ import tempfile diff --git a/numpy/f2py/__main__.py b/numpy/f2py/__main__.py index c6115070e..936a753a2 100644 --- a/numpy/f2py/__main__.py +++ b/numpy/f2py/__main__.py @@ -1,4 +1,5 @@ -# See http://cens.ioc.ee/projects/f2py2e/ +# See: +# https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e from numpy.f2py.f2py2e import main main() diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 5250fea84..c8f2067c9 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -347,9 +347,9 @@ def iscomplexfunction_warn(rout): ************************************************************** Warning: code with a function returning complex value may not work correctly with your Fortran compiler. - Run the following test before using it in your applications: - $(f2py install dir)/test-site/{b/runme_scalar,e/runme} - When using GNU gcc/g77 compilers, codes should work correctly. + When using GNU gcc/g77 compilers, codes should work + correctly for callbacks with: + f2py -c -DF2PY_CB_RETURNCOMPLEX **************************************************************\n""") return 1 return 0 diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index fe0d4a52b..655cfd768 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -48,7 +48,7 @@ c2py_map = {'double': 'float', 'unsigned_char': 'int', # forced casting 'short': 'int', # forced casting 'unsigned_short': 'int', # forced casting - 'int': 'int', # (forced casting) + 'int': 'int', # forced casting 'long': 'int', 'long_long': 'long', 'unsigned': 'int', # forced casting @@ -95,8 +95,8 @@ if using_newcore: 'complex_double': 'NPY_CDOUBLE', 'complex_long_double': 'NPY_CDOUBLE', 'string':'NPY_STRING' - } + c2pycode_map = {'double': 'd', 'float': 'f', 'long_double': 'd', # forced casting @@ -114,6 +114,7 @@ c2pycode_map = {'double': 'd', 'complex_long_double': 'D', # forced casting 'string': 'c' } + if using_newcore: c2pycode_map = {'double': 'd', 'float': 'f', @@ -133,6 +134,7 @@ if using_newcore: 'complex_double': 'D', 'complex_long_double': 'G', 'string': 'S'} + c2buildvalue_map = {'double': 'd', 'float': 'f', 'char': 'b', @@ -146,10 +148,6 @@ c2buildvalue_map = {'double': 'd', 'complex_long_double': 'N', 'string': 'y'} -if using_newcore: - # c2buildvalue_map=??? - pass - f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', '12': 'long_double', '16': 'long_double'}, 'integer': {'': 'int', '1': 'signed_char', '2': 'short', @@ -185,22 +183,22 @@ def load_f2cmap_file(f2cmap_file): return # User defined additions to f2cmap_all. - # f2cmap_file must contain a dictionary of dictionaries, only. For + # f2cmap_file must contain a dictionary of dictionaries, only. For # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is - # interpreted as C 'float'. This feature is useful for F90/95 users if - # they use PARAMETERSs in type specifications. + # interpreted as C 'float'. This feature is useful for F90/95 users if + # they use PARAMETERS in type specifications. try: outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file)) with open(f2cmap_file, 'r') as f: d = eval(f.read(), {}, {}) - for k, d1 in list(d.items()): - for k1 in list(d1.keys()): + for k, d1 in d.items(): + for k1 in d1.keys(): d1[k1.lower()] = d1[k1] d[k.lower()] = d[k] - for k in list(d.keys()): + for k in d.keys(): if k not in f2cmap_all: f2cmap_all[k] = {} - for k1 in list(d[k].keys()): + for k1 in d[k].keys(): if d[k][k1] in c2py_map: if k1 in f2cmap_all[k]: outmess( @@ -279,11 +277,9 @@ def getctype(var): errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="<C typespec>")) in %s/.f2py_f2cmap file).\n' % (typespec, var['kindselector']['kind'], ctype, typespec, var['kindselector']['kind'], os.getcwd())) - else: if not isexternal(var): - errmess( - 'getctype: No C-type found in "%s", assuming void.\n' % var) + errmess('getctype: No C-type found in "%s", assuming void.\n' % var) return ctype @@ -523,7 +519,7 @@ def sign2map(a, var): if f(var): intent_flags.append('F2PY_%s' % s) if intent_flags: - # XXX: Evaluate intent_flags here. + # TODO: Evaluate intent_flags here. ret['intent'] = '|'.join(intent_flags) else: ret['intent'] = 'F2PY_INTENT_IN' diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py index 5c9ddb00a..4848233d4 100644 --- a/numpy/f2py/cb_rules.py +++ b/numpy/f2py/cb_rules.py @@ -191,7 +191,7 @@ capi_return_pt: 'maxnofargs': '#maxnofargs#', 'nofoptargs': '#nofoptargs#', 'docstr': """\ -\tdef #argname#(#docsignature#): return #docreturn#\\n\\ + def #argname#(#docsignature#): return #docreturn#\\n\\ #docstrsigns#""", 'latexdocstr': """ {{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} @@ -219,10 +219,10 @@ cb_rout_rules = [ 'noargs': '', 'setdims': '/*setdims*/', 'docstrsigns': '', 'latexdocstrsigns': '', - 'docstrreq': '\tRequired arguments:', - 'docstropt': '\tOptional arguments:', - 'docstrout': '\tReturn objects:', - 'docstrcbs': '\tCall-back functions:', + 'docstrreq': ' Required arguments:', + 'docstropt': ' Optional arguments:', + 'docstrout': ' Return objects:', + 'docstrcbs': ' Call-back functions:', 'docreturn': '', 'docsign': '', 'docsignopt': '', 'latexdocstrreq': '\\noindent Required arguments:', 'latexdocstropt': '\\noindent Optional arguments:', @@ -306,7 +306,7 @@ return_value 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], '_check': iscomplexfunction }, - {'docstrout': '\t\t#pydocsignout#', + {'docstrout': ' #pydocsignout#', 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', {hasnote: '--- #note#'}], 'docreturn': '#rname#,', @@ -316,9 +316,9 @@ return_value cb_arg_rules = [ { # Doc - 'docstropt': {l_and(isoptional, isintent_nothide): '\t\t#pydocsign#'}, - 'docstrreq': {l_and(isrequired, isintent_nothide): '\t\t#pydocsign#'}, - 'docstrout': {isintent_out: '\t\t#pydocsignout#'}, + 'docstropt': {l_and(isoptional, isintent_nothide): ' #pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): ' #pydocsign#'}, + 'docstrout': {isintent_out: ' #pydocsignout#'}, 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', {hasnote: '--- #note#'}]}, 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', @@ -492,7 +492,7 @@ def buildcallbacks(m): def buildcallback(rout, um): from . import capi_maps - outmess('\tConstructing call-back function "cb_%s_in_%s"\n' % + outmess(' Constructing call-back function "cb_%s_in_%s"\n' % (rout['name'], um)) args, depargs = getargs(rout) capi_maps.depargs = depargs @@ -612,6 +612,6 @@ def buildcallback(rout, um): 'latexdocstr': ar['latexdocstr'], 'argname': rd['argname'] } - outmess('\t %s\n' % (ar['docstrshort'])) + outmess(' %s\n' % (ar['docstrshort'])) return ################## Build call-back function ############# diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 714f9a932..1d9236dcd 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -51,7 +51,7 @@ includes0['math.h'] = '#include <math.h>' includes0['string.h'] = '#include <string.h>' includes0['setjmp.h'] = '#include <setjmp.h>' -includes['Python.h'] = '#include "Python.h"' +includes['Python.h'] = '#include <Python.h>' needs['arrayobject.h'] = ['Python.h'] includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API #include "arrayobject.h"''' @@ -338,16 +338,16 @@ cppmacros['TRYPYARRAYTEMPLATE'] = """\ if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ switch (PyArray_TYPE(arr)) {\\ - case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_INT: *(int *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ - case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_SHORT: *(short *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=*v; break;\\ case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ @@ -375,15 +375,19 @@ cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\ return 1;\\ }\\ switch (PyArray_TYPE(arr)) {\\ - case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r;*(double *)(PyArray_DATA(arr)+sizeof(double))=(*v).i;break;\\ - case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=(*v).r;*(float *)(PyArray_DATA(arr)+sizeof(float))=(*v).i;break;\\ - case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONG: *(long *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_INT: *(int *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_SHORT: *(short *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_double *)(PyArray_DATA(arr)+sizeof(npy_double))=(*v).i;\\ + break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_float *)(PyArray_DATA(arr)+sizeof(npy_float))=(*v).i;\\ + break;\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ @@ -391,7 +395,9 @@ cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\ case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;\\ + break;\\ case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ default: return -2;\\ };\\ @@ -487,7 +493,7 @@ STRINGPADN replaces null values with padding values from the right. `to` must have size of at least N bytes. If the `to[N-1]` has null value, then replace it and all the -preceeding nulls with the given padding. +preceding, nulls with the given padding. STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation. */ diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index c3ec792e3..67675af45 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -153,7 +153,7 @@ from . import __version__ # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * - +from . import symbolic f2py_version = __version__.version @@ -245,7 +245,6 @@ for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c] = {'typespec': 'real'} for c in "ijklmn": defaultimplicitrules[c] = {'typespec': 'integer'} -del c badnames = {} invbadnames = {} for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', @@ -875,10 +874,11 @@ def appenddecl(decl, decl2, force=1): decl[k] = decl2[k] elif k == 'note': pass - elif k in ['intent', 'check', 'dimension', 'optional', 'required']: + elif k in ['intent', 'check', 'dimension', 'optional', + 'required', 'depend']: errmess('appenddecl: "%s" not implemented.\n' % k) else: - raise Exception('appenddecl: Unknown variable definition key:' + + raise Exception('appenddecl: Unknown variable definition key: ' + str(k)) return decl @@ -2217,188 +2217,6 @@ def getlincoef(e, xset): # e = a*x+b ; x in xset break return None, None, None -_varname_match = re.compile(r'\A[a-z]\w*\Z').match - - -def getarrlen(dl, args, star='*'): - """ - Parameters - ---------- - dl : sequence of two str objects - dimensions of the array - args : Iterable[str] - symbols used in the expression - star : Any - unused - - Returns - ------- - expr : str - Some numeric expression as a string - arg : Optional[str] - If understood, the argument from `args` present in `expr` - expr2 : Optional[str] - If understood, an expression fragment that should be used as - ``"(%s%s".format(something, expr2)``. - - Examples - -------- - >>> getarrlen(['10*x + 20', '40*x'], {'x'}) - ('30 * x - 19', 'x', '+19)/(30)') - >>> getarrlen(['1', '10*x + 20'], {'x'}) - ('10 * x + 20', 'x', '-20)/(10)') - >>> getarrlen(['10*x + 20', '1'], {'x'}) - ('-10 * x - 18', 'x', '+18)/(-10)') - >>> getarrlen(['20', '1'], {'x'}) - ('-18', None, None) - """ - edl = [] - try: - edl.append(myeval(dl[0], {}, {})) - except Exception: - edl.append(dl[0]) - try: - edl.append(myeval(dl[1], {}, {})) - except Exception: - edl.append(dl[1]) - if isinstance(edl[0], int): - p1 = 1 - edl[0] - if p1 == 0: - d = str(dl[1]) - elif p1 < 0: - d = '%s-%s' % (dl[1], -p1) - else: - d = '%s+%s' % (dl[1], p1) - elif isinstance(edl[1], int): - p1 = 1 + edl[1] - if p1 == 0: - d = '-(%s)' % (dl[0]) - else: - d = '%s-(%s)' % (p1, dl[0]) - else: - d = '%s-(%s)+1' % (dl[1], dl[0]) - try: - return repr(myeval(d, {}, {})), None, None - except Exception: - pass - d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args) - if None not in [d1[0], d2[0]]: - if (d1[0], d2[0]) == (0, 0): - return repr(d2[1] - d1[1] + 1), None, None - b = d2[1] - d1[1] + 1 - d1 = (d1[0], 0, d1[2]) - d2 = (d2[0], b, d2[2]) - if d1[0] == 0 and d2[2] in args: - if b < 0: - return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0]) - elif b: - return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0]) - else: - return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0]) - if d2[0] == 0 and d1[2] in args: - - if b < 0: - return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0]) - elif b: - return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0]) - else: - return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0]) - if d1[2] == d2[2] and d1[2] in args: - a = d2[0] - d1[0] - if not a: - return repr(b), None, None - if b < 0: - return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a) - elif b: - return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a) - else: - return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a) - if d1[0] == d2[0] == 1: - c = str(d1[2]) - if c not in args: - if _varname_match(c): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) - c = '(%s)' % c - if b == 0: - d = '%s-%s' % (d2[2], c) - elif b < 0: - d = '%s-%s-%s' % (d2[2], c, -b) - else: - d = '%s-%s+%s' % (d2[2], c, b) - elif d1[0] == 0: - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)' % c2 - if d2[0] == 1: - pass - elif d2[0] == -1: - c2 = '-%s' % c2 - else: - c2 = '%s*%s' % (d2[0], c2) - - if b == 0: - d = c2 - elif b < 0: - d = '%s-%s' % (c2, -b) - else: - d = '%s+%s' % (c2, b) - elif d2[0] == 0: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)' % c1 - if d1[0] == 1: - c1 = '-%s' % c1 - elif d1[0] == -1: - c1 = '+%s' % c1 - elif d1[0] < 0: - c1 = '+%s*%s' % (-d1[0], c1) - else: - c1 = '-%s*%s' % (d1[0], c1) - - if b == 0: - d = c1 - elif b < 0: - d = '%s-%s' % (c1, -b) - else: - d = '%s+%s' % (c1, b) - else: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)' % c1 - if d1[0] == 1: - c1 = '-%s' % c1 - elif d1[0] == -1: - c1 = '+%s' % c1 - elif d1[0] < 0: - c1 = '+%s*%s' % (-d1[0], c1) - else: - c1 = '-%s*%s' % (d1[0], c1) - - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)' % c2 - if d2[0] == 1: - pass - elif d2[0] == -1: - c2 = '-%s' % c2 - else: - c2 = '%s*%s' % (d2[0], c2) - - if b == 0: - d = '%s%s' % (c2, c1) - elif b < 0: - d = '%s%s-%s' % (c2, c1, -b) - else: - d = '%s%s+%s' % (c2, c1, b) - return d, None, None word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) @@ -2409,7 +2227,9 @@ def _get_depend_dict(name, vars, deps): if '=' in vars[name] and not isstring(vars[name]): for word in word_pattern.findall(vars[name]['=']): - if word not in words and word in vars: + # The word_pattern may return values that are not + # only variables, they can be string content for instance + if word not in words and word in vars and word != name: words.append(word) for word in words[:]: for w in deps.get(word, []) \ @@ -2596,7 +2416,8 @@ def _eval_scalar(value, params): if _is_kind_number(value): value = value.split('_')[0] try: - value = str(eval(value, {}, params)) + value = eval(value, {}, params) + value = (repr if isinstance(value, str) else str)(value) except (NameError, SyntaxError, TypeError): return value except Exception as msg: @@ -2683,7 +2504,7 @@ def analyzevars(block): pass vars[n]['kindselector']['kind'] = l - savelindims = {} + dimension_exprs = {} if 'attrspec' in vars[n]: attr = vars[n]['attrspec'] attr.reverse() @@ -2736,18 +2557,18 @@ def analyzevars(block): if dim and 'dimension' not in vars[n]: vars[n]['dimension'] = [] for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]): - star = '*' - if d == ':': - star = ':' + star = ':' if d == ':' else '*' + # Evaluate `d` with respect to params if d in params: d = str(params[d]) - for p in list(params.keys()): + for p in params: re_1 = re.compile(r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I) m = re_1.match(d) while m: d = m.group('before') + \ str(params[p]) + m.group('after') m = re_1.match(d) + if d == star: dl = [star] else: @@ -2755,22 +2576,46 @@ def analyzevars(block): if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) dl = ['*'] d = '*' - if len(dl) == 1 and not dl[0] == star: + if len(dl) == 1 and dl[0] != star: dl = ['1', dl[0]] if len(dl) == 2: - d, v, di = getarrlen(dl, list(block['vars'].keys())) - if d[:4] == '1 * ': - d = d[4:] - if di and di[-4:] == '/(1)': - di = di[:-4] - if v: - savelindims[d] = v, di + d1, d2 = map(symbolic.Expr.parse, dl) + dsize = d2 - d1 + 1 + d = dsize.tostring(language=symbolic.Language.C) + # find variables v that define d as a linear + # function, `d == a * v + b`, and store + # coefficients a and b for further analysis. + solver_and_deps = {} + for v in block['vars']: + s = symbolic.as_symbol(v) + if dsize.contains(s): + try: + a, b = dsize.linear_solve(s) + solve_v = lambda s: (s - b) / a + all_symbols = set(a.symbols()) + all_symbols.update(b.symbols()) + except RuntimeError as msg: + # d is not a linear function of v, + # however, if v can be determined + # from d using other means, + # implement the corresponding + # solve_v function here. + solve_v = None + all_symbols = set(dsize.symbols()) + v_deps = set( + s.data for s in all_symbols + if s.data in vars) + solver_and_deps[v] = solve_v, list(v_deps) + # Note that dsize may contain symbols that are + # not defined in block['vars']. Here we assume + # these correspond to Fortran/C intrinsic + # functions or that are defined by other + # means. We'll let the compiler validate the + # definiteness of such symbols. + dimension_exprs[d] = solver_and_deps vars[n]['dimension'].append(d) + if 'dimension' in vars[n]: - if isintent_c(vars[n]): - shape_macro = 'shape' - else: - shape_macro = 'shape' # 'fshape' if isstringarray(vars[n]): if 'charselector' in vars[n]: d = vars[n]['charselector'] @@ -2789,69 +2634,87 @@ def analyzevars(block): else: errmess( "analyzevars: charselector=%r unhandled." % (d)) + if 'check' not in vars[n] and 'args' in block and n in block['args']: - flag = 'depend' not in vars[n] - if flag: - vars[n]['depend'] = [] - vars[n]['check'] = [] - if 'dimension' in vars[n]: - #/----< no check - i = -1 - ni = len(vars[n]['dimension']) - for d in vars[n]['dimension']: - ddeps = [] # dependencies of 'd' - ad = '' - pd = '' - if d not in vars: - if d in savelindims: - pd, ad = '(', savelindims[d][1] - d = savelindims[d][0] - else: - for r in block['args']: - if r not in vars: - continue - if re.match(r'.*?\b' + r + r'\b', d, re.I): - ddeps.append(r) - if d in vars: - if 'attrspec' in vars[d]: - for aa in vars[d]['attrspec']: - if aa[:6] == 'depend': - ddeps += aa[6:].strip()[1:-1].split(',') - if 'depend' in vars[d]: - ddeps = ddeps + vars[d]['depend'] - i = i + 1 - if d in vars and ('depend' not in vars[d]) \ - and ('=' not in vars[d]) and (d not in vars[n]['depend']) \ - and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): - vars[d]['depend'] = [n] - if ni > 1: - vars[d]['='] = '%s%s(%s,%s)%s' % ( - pd, shape_macro, n, i, ad) - else: - vars[d]['='] = '%slen(%s)%s' % (pd, n, ad) - # /---< no check - if 1 and 'check' not in vars[d]: - if ni > 1: - vars[d]['check'] = ['%s%s(%s,%i)%s==%s' - % (pd, shape_macro, n, i, ad, d)] - else: - vars[d]['check'] = [ - '%slen(%s)%s>=%s' % (pd, n, ad, d)] - if 'attrspec' not in vars[d]: - vars[d]['attrspec'] = ['optional'] - if ('optional' not in vars[d]['attrspec']) and\ - ('required' not in vars[d]['attrspec']): - vars[d]['attrspec'].append('optional') - elif d not in ['*', ':']: - #/----< no check - if flag: - if d in vars: - if n not in ddeps: - vars[n]['depend'].append(d) + # n is an argument that has no checks defined. Here we + # generate some consistency checks for n, and when n is an + # array, generate checks for its dimensions and construct + # initialization expressions. + n_deps = vars[n].get('depend', []) + n_checks = [] + n_is_input = l_or(isintent_in, isintent_inout, + isintent_inplace)(vars[n]) + if 'dimension' in vars[n]: # n is array + for i, d in enumerate(vars[n]['dimension']): + coeffs_and_deps = dimension_exprs.get(d) + if coeffs_and_deps is None: + # d is `:` or `*` or a constant expression + pass + elif n_is_input: + # n is an input array argument and its shape + # may define variables used in dimension + # specifications. + for v, (solver, deps) in coeffs_and_deps.items(): + if ((v in n_deps + or '=' in vars[v] + or 'depend' in vars[v])): + # Skip a variable that + # - n depends on + # - has user-defined initialization expression + # - has user-defined dependecies + continue + if solver is not None: + # v can be solved from d, hence, we + # make it an optional argument with + # initialization expression: + is_required = False + init = solver(symbolic.as_symbol( + f'shape({n}, {i})')) + init = init.tostring( + language=symbolic.Language.C) + vars[v]['='] = init + # n needs to be initialized before v. So, + # making v dependent on n and on any + # variables in solver or d. + vars[v]['depend'] = [n] + deps + if 'check' not in vars[v]: + # add check only when no + # user-specified checks exist + vars[v]['check'] = [ + f'shape({n}, {i}) == {d}'] else: - vars[n]['depend'] = vars[n]['depend'] + ddeps + # d is a non-linear function on v, + # hence, v must be a required input + # argument that n will depend on + is_required = True + if 'intent' not in vars[v]: + vars[v]['intent'] = [] + if 'in' not in vars[v]['intent']: + vars[v]['intent'].append('in') + # v needs to be initialized before n + n_deps.append(v) + n_checks.append( + f'shape({n}, {i}) == {d}') + v_attr = vars[v].get('attrspec', []) + if not ('optional' in v_attr + or 'required' in v_attr): + v_attr.append( + 'required' if is_required else 'optional') + if v_attr: + vars[v]['attrspec'] = v_attr + if coeffs_and_deps is not None: + # extend v dependencies with ones specified in attrspec + for v, (solver, deps) in coeffs_and_deps.items(): + v_deps = vars[v].get('depend', []) + for aa in vars[v].get('attrspec', []): + if aa.startswith('depend'): + aa = ''.join(aa.split()) + v_deps.extend(aa[7:-1].split(',')) + if v_deps: + vars[v]['depend'] = list(set(v_deps)) + if n not in v_deps: + n_deps.append(v) elif isstring(vars[n]): - length = '1' if 'charselector' in vars[n]: if '*' in vars[n]['charselector']: length = _eval_length(vars[n]['charselector']['*'], @@ -2862,11 +2725,11 @@ def analyzevars(block): params) del vars[n]['charselector']['len'] vars[n]['charselector']['*'] = length + if n_checks: + vars[n]['check'] = n_checks + if n_deps: + vars[n]['depend'] = list(set(n_deps)) - if not vars[n]['check']: - del vars[n]['check'] - if flag and not vars[n]['depend']: - del vars[n]['depend'] if '=' in vars[n]: if 'attrspec' not in vars[n]: vars[n]['attrspec'] = [] @@ -2892,8 +2755,6 @@ def analyzevars(block): vars[n] = appenddecl(vars[n], vars[block['result']]) if 'prefix' in block: pr = block['prefix'] - ispure = 0 - isrec = 1 pr1 = pr.replace('pure', '') ispure = (not pr == pr1) pr = pr1.replace('recursive', '') @@ -3357,7 +3218,8 @@ def crack2fortran(block): """ footer = """ ! This file was auto-generated with f2py (version:%s). -! See http://cens.ioc.ee/projects/f2py2e/ +! See: +! https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e """ % (f2py_version) return header + pyf + footer diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index f45374be6..605495574 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -168,7 +168,7 @@ numpy Version: {numpy_version} Requires: Python 3.5 or higher. License: NumPy license (see LICENSE.txt in the NumPy source code) Copyright 1999 - 2011 Pearu Peterson all rights reserved. -http://cens.ioc.ee/projects/f2py2e/""" +https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e""" def scaninputline(inputline): @@ -416,8 +416,8 @@ def run_main(comline_list): Examples -------- - .. include:: run_main_session.dat - :literal: + .. literalinclude:: code/results/run_main_session.dat + :language: python """ crackfortran.reset_global_f2py_vars() @@ -546,30 +546,29 @@ def run_compile(): fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in fc_flags] - if 1: - del_list = [] - for s in flib_flags: - v = '--fcompiler=' - if s[:len(v)] == v: - from numpy.distutils import fcompiler - fcompiler.load_all_fcompiler_classes() - allowed_keys = list(fcompiler.fcompiler_class.keys()) - nv = ov = s[len(v):].lower() - if ov not in allowed_keys: - vmap = {} # XXX - try: - nv = vmap[ov] - except KeyError: - if ov not in vmap.values(): - print('Unknown vendor: "%s"' % (s[len(v):])) - nv = ov - i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv - continue - for s in del_list: + del_list = [] + for s in flib_flags: + v = '--fcompiler=' + if s[:len(v)] == v: + from numpy.distutils import fcompiler + fcompiler.load_all_fcompiler_classes() + allowed_keys = list(fcompiler.fcompiler_class.keys()) + nv = ov = s[len(v):].lower() + if ov not in allowed_keys: + vmap = {} # XXX + try: + nv = vmap[ov] + except KeyError: + if ov not in vmap.values(): + print('Unknown vendor: "%s"' % (s[len(v):])) + nv = ov i = flib_flags.index(s) - del flib_flags[i] - assert len(flib_flags) <= 2, repr(flib_flags) + flib_flags[i] = '--fcompiler=' + nv + continue + for s in del_list: + i = flib_flags.index(s) + del flib_flags[i] + assert len(flib_flags) <= 2, repr(flib_flags) _reg5 = re.compile(r'--(verbose)') setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 587ae2e5f..78810a0a7 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -120,6 +120,10 @@ module_rules = { extern \"C\" { #endif +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ + """ + gentitle("See f2py2e/cfuncs.py: includes") + """ #includes# #includes0# @@ -170,67 +174,67 @@ static PyObject *#modulename#_module; static FortranDataDef f2py_routine_defs[] = { #routine_defs# -\t{NULL} + {NULL} }; static PyMethodDef f2py_module_methods[] = { #pymethoddef# -\t{NULL,NULL} + {NULL,NULL} }; static struct PyModuleDef moduledef = { -\tPyModuleDef_HEAD_INIT, -\t"#modulename#", -\tNULL, -\t-1, -\tf2py_module_methods, -\tNULL, -\tNULL, -\tNULL, -\tNULL + PyModuleDef_HEAD_INIT, + "#modulename#", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL }; PyMODINIT_FUNC PyInit_#modulename#(void) { -\tint i; -\tPyObject *m,*d, *s, *tmp; -\tm = #modulename#_module = PyModule_Create(&moduledef); -\tPy_SET_TYPE(&PyFortran_Type, &PyType_Type); -\timport_array(); -\tif (PyErr_Occurred()) -\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;} -\td = PyModule_GetDict(m); -\ts = PyUnicode_FromString(\"#f2py_version#\"); -\tPyDict_SetItemString(d, \"__version__\", s); -\tPy_DECREF(s); -\ts = PyUnicode_FromString( -\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); -\tPyDict_SetItemString(d, \"__doc__\", s); -\tPy_DECREF(s); -\ts = PyUnicode_FromString(\"""" + numpy_version + """\"); -\tPyDict_SetItemString(d, \"__f2py_numpy_version__\", s); -\tPy_DECREF(s); -\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); -\t/* -\t * Store the error object inside the dict, so that it could get deallocated. -\t * (in practice, this is a module, so it likely will not and cannot.) -\t */ -\tPyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error); -\tPy_DECREF(#modulename#_error); -\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) { -\t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]); -\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp); -\t\tPy_DECREF(tmp); -\t} + int i; + PyObject *m,*d, *s, *tmp; + m = #modulename#_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + {PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;} + d = PyModule_GetDict(m); + s = PyUnicode_FromString(\"#f2py_version#\"); + PyDict_SetItemString(d, \"__version__\", s); + Py_DECREF(s); + s = PyUnicode_FromString( + \"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); + PyDict_SetItemString(d, \"__doc__\", s); + Py_DECREF(s); + s = PyUnicode_FromString(\"""" + numpy_version + """\"); + PyDict_SetItemString(d, \"__f2py_numpy_version__\", s); + Py_DECREF(s); + #modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); + /* + * Store the error object inside the dict, so that it could get deallocated. + * (in practice, this is a module, so it likely will not and cannot.) + */ + PyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error); + Py_DECREF(#modulename#_error); + for(i=0;f2py_routine_defs[i].name!=NULL;i++) { + tmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]); + PyDict_SetItemString(d, f2py_routine_defs[i].name, tmp); + Py_DECREF(tmp); + } #initf2pywraphooks# #initf90modhooks# #initcommonhooks# #interface_usercode# #ifdef F2PY_REPORT_ATEXIT -\tif (! PyErr_Occurred()) -\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\"); + if (! PyErr_Occurred()) + on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); #endif -\treturn m; + return m; } #ifdef __cplusplus } @@ -322,7 +326,7 @@ f2py_stop_clock(); 'externroutines': '#declfortranroutine#', 'doc': '#docreturn##name#(#docsignature#)', 'docshort': '#docreturn##name#(#docsignatureshort#)', - 'docs': '"\t#docreturn##name#(#docsignature#)\\n"\n', + 'docs': '" #docreturn##name#(#docsignature#)\\n"\n', 'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], 'cppmacros': {debugcapi: '#define DEBUGCFUNCS'}, 'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n', @@ -396,25 +400,25 @@ rout_rules = [ ismoduleroutine: '', isdummyroutine: '' }, - 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isdummyroutine): ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', }, 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'}, 'callfortranroutine': [ {debugcapi: [ - """\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, + """ fprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, {hasexternals: """\ -\t\tif (#setjmpbuf#) { -\t\t\tf2py_success = 0; -\t\t} else {"""}, - {isthreadsafe: '\t\t\tPy_BEGIN_ALLOW_THREADS'}, - {hascallstatement: '''\t\t\t\t#callstatement#; -\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''}, + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; + /*(*f2py_func)(#callfortran#);*/'''}, {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t\t\t\t(*f2py_func)(#callfortran#);'}, - {isthreadsafe: '\t\t\tPy_END_ALLOW_THREADS'}, - {hasexternals: """\t\t}"""} + : ' (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: """ }"""} ], '_check': l_and(issubroutine, l_not(issubroutine_wrap)), }, { # Wrapped function @@ -423,8 +427,8 @@ rout_rules = [ isdummyroutine: '', }, - 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', }, 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' { @@ -441,18 +445,18 @@ rout_rules = [ 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, 'callfortranroutine': [ {debugcapi: [ - """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, {hasexternals: """\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t(*f2py_func)(#callfortran#);'}, + : ' (*f2py_func)(#callfortran#);'}, {hascallstatement: - '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, - {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t}'} + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} ], '_check': isfunction_wrap, }, { # Wrapped subroutine @@ -461,8 +465,8 @@ rout_rules = [ isdummyroutine: '', }, - 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', }, 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' { @@ -479,18 +483,18 @@ rout_rules = [ 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, 'callfortranroutine': [ {debugcapi: [ - """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, {hasexternals: """\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t(*f2py_func)(#callfortran#);'}, + : ' (*f2py_func)(#callfortran#);'}, {hascallstatement: - '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, - {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t}'} + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} ], '_check': issubroutine_wrap, }, { # Function @@ -501,13 +505,13 @@ rout_rules = [ {hasresultnote: '--- #resultnote#'}], 'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\ #ifdef USESCOMPAQFORTRAN -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); #else -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); #endif """}, {l_and(debugcapi, l_not(isstringfunction)): """\ -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); """} ], '_check': l_and(isfunction, l_not(isfunction_wrap)) @@ -516,32 +520,32 @@ rout_rules = [ l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);', isdummyroutine: '' }, - 'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + 'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', }, - 'decl': [{iscomplexfunction_warn: '\t#ctype# #name#_return_value={0,0};', - l_not(iscomplexfunction): '\t#ctype# #name#_return_value=0;'}, + 'decl': [{iscomplexfunction_warn: ' #ctype# #name#_return_value={0,0};', + l_not(iscomplexfunction): ' #ctype# #name#_return_value=0;'}, {iscomplexfunction: - '\tPyObject *#name#_return_value_capi = Py_None;'} + ' PyObject *#name#_return_value_capi = Py_None;'} ], 'callfortranroutine': [ {hasexternals: """\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, - {hascallstatement: '''\t#callstatement#; -/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; +/* #name#_return_value = (*f2py_func)(#callfortran#);*/ '''}, {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t#name#_return_value = (*f2py_func)(#callfortran#);'}, - {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t}'}, + : ' #name#_return_value = (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, {l_and(debugcapi, iscomplexfunction) - : '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, - {l_and(debugcapi, l_not(iscomplexfunction)): '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], - 'pyobjfrom': {iscomplexfunction: '\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, + : ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, + {l_and(debugcapi, l_not(iscomplexfunction)): ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], + 'pyobjfrom': {iscomplexfunction: ' #name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, 'need': [{l_not(isdummyroutine): 'F_FUNC'}, {iscomplexfunction: 'pyobj_from_#ctype#1'}, {islong_longfunction: 'long_long'}, @@ -553,50 +557,50 @@ rout_rules = [ }, { # String function # in use for --no-wrap 'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)): - '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + ' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', l_and(l_not(ismoduleroutine), isintent_c): - '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' + ' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' }, - 'decl': ['\t#ctype# #name#_return_value = NULL;', - '\tint #name#_return_value_len = 0;'], + 'decl': [' #ctype# #name#_return_value = NULL;', + ' int #name#_return_value_len = 0;'], 'callfortran':'#name#_return_value,#name#_return_value_len,', - 'callfortranroutine':['\t#name#_return_value_len = #rlength#;', - '\tif ((#name#_return_value = (string)malloc(' - '#name#_return_value_len+1) == NULL) {', - '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");', - '\t\tf2py_success = 0;', - '\t} else {', - "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';", - '\t}', - '\tif (f2py_success) {', + 'callfortranroutine':[' #name#_return_value_len = #rlength#;', + ' if ((#name#_return_value = (string)malloc(' + + '#name#_return_value_len+1) == NULL) {', + ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");', + ' f2py_success = 0;', + ' } else {', + " (#name#_return_value)[#name#_return_value_len] = '\\0';", + ' }', + ' if (f2py_success) {', {hasexternals: """\ -\t\tif (#setjmpbuf#) { -\t\t\tf2py_success = 0; -\t\t} else {"""}, - {isthreadsafe: '\t\tPy_BEGIN_ALLOW_THREADS'}, + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, """\ #ifdef USESCOMPAQFORTRAN -\t\t(*f2py_func)(#callcompaqfortran#); + (*f2py_func)(#callcompaqfortran#); #else -\t\t(*f2py_func)(#callfortran#); + (*f2py_func)(#callfortran#); #endif """, - {isthreadsafe: '\t\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t\t}'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, {debugcapi: - '\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, - '\t} /* if (f2py_success) after (string)malloc */', + ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, + ' } /* if (f2py_success) after (string)malloc */', ], 'returnformat': '#rformat#', 'return': ',#name#_return_value', - 'freemem': '\tSTRINGFREE(#name#_return_value);', + 'freemem': ' STRINGFREE(#name#_return_value);', 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete }, { # Debugging - 'routdebugenter': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', - 'routdebugleave': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', - 'routdebugfailure': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', + 'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', + 'routdebugleave': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', + 'routdebugfailure': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', '_check': debugcapi } ] @@ -621,16 +625,16 @@ aux_rules = [ 'separatorsfor': sepdict }, { # Common - 'frompyobj': ['\t/* Processing auxiliary variable #varname# */', - {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ], - 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', + 'frompyobj': [' /* Processing auxiliary variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', 'need': typedef_need_dict, }, # Scalars (not complex) { # Common - 'decl': '\t#ctype# #varname# = 0;', + 'decl': ' #ctype# #varname# = 0;', 'need': {hasinitvalue: 'math.h'}, - 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'}, + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, '_check': l_and(isscalar, l_not(iscomplex)), }, { @@ -642,23 +646,23 @@ aux_rules = [ }, # Complex scalars { # Common - 'decl': '\t#ctype# #varname#;', - 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, + 'decl': ' #ctype# #varname#;', + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, '_check': iscomplex }, # String { # Common - 'decl': ['\t#ctype# #varname# = NULL;', - '\tint slen(#varname#);', + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', ], 'need':['len..'], '_check':isstring }, # Array { # Common - 'decl': ['\t#ctype# *#varname# = NULL;', - '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', - '\tconst int #varname#_Rank = #rank#;', + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', ], 'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], '_check': isarray @@ -707,9 +711,9 @@ arg_rules = [ 'separatorsfor': sepdict }, { # Common - 'frompyobj': ['\t/* Processing variable #varname# */', - {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ], - 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', + 'frompyobj': [' /* Processing variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', '_depend': '', 'need': typedef_need_dict, }, @@ -828,8 +832,8 @@ if (#varname#_cb.capi==Py_None) { }, # Scalars (not complex) { # Common - 'decl': '\t#ctype# #varname# = 0;', - 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + 'decl': ' #ctype# #varname# = 0;', + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, 'return': {isintent_out: ',#varname#'}, '_check': l_and(isscalar, l_not(iscomplex)) @@ -837,15 +841,15 @@ if (#varname#_cb.capi==Py_None) { 'need': {hasinitvalue: 'math.h'}, '_check': l_and(isscalar, l_not(iscomplex)), }, { # Not hidden - 'decl': '\tPyObject *#varname#_capi = Py_None;', + 'decl': ' PyObject *#varname#_capi = Py_None;', 'argformat': {isrequired: 'O'}, 'keyformat': {isoptional: 'O'}, 'args_capi': {isrequired: ',&#varname#_capi'}, 'keys_capi': {isoptional: ',&#varname#_capi'}, 'pyobjfrom': {isintent_inout: """\ -\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); -\tif (f2py_success) {"""}, - 'closepyobjfrom': {isintent_inout: "\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide) }, { @@ -865,91 +869,91 @@ if (#varname#_cb.capi==Py_None) { # ... # from_pyobj(varname) # - {hasinitvalue: '\tif (#varname#_capi == Py_None) #varname# = #init#; else', + {hasinitvalue: ' if (#varname#_capi == Py_None) #varname# = #init#; else', '_depend': ''}, - {l_and(isoptional, l_not(hasinitvalue)): '\tif (#varname#_capi != Py_None)', + {l_and(isoptional, l_not(hasinitvalue)): ' if (#varname#_capi != Py_None)', '_depend': ''}, {l_not(islogical): '''\ -\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); -\tif (f2py_success) {'''}, + f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); + if (f2py_success) {'''}, {islogical: '''\ -\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); -\t\tf2py_success = 1; -\tif (f2py_success) {'''}, + #varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); + f2py_success = 1; + if (f2py_success) {'''}, ], - 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname#*/', + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname#*/', 'need': {l_not(islogical): '#ctype#_from_pyobj'}, '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide), '_depend': '' }, { # Hidden - 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'}, + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, 'need': typedef_need_dict, '_check': l_and(isscalar, l_not(iscomplex), isintent_hide), '_depend': '' }, { # Common - 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, '_check': l_and(isscalar, l_not(iscomplex)), '_depend': '' }, # Complex scalars { # Common - 'decl': '\t#ctype# #varname#;', + 'decl': ' #ctype# #varname#;', 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, - 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, 'return': {isintent_out: ',#varname#_capi'}, '_check': iscomplex }, { # Not hidden - 'decl': '\tPyObject *#varname#_capi = Py_None;', + 'decl': ' PyObject *#varname#_capi = Py_None;', 'argformat': {isrequired: 'O'}, 'keyformat': {isoptional: 'O'}, 'args_capi': {isrequired: ',&#varname#_capi'}, 'keys_capi': {isoptional: ',&#varname#_capi'}, 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, 'pyobjfrom': {isintent_inout: """\ -\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); -\t\tif (f2py_success) {"""}, - 'closepyobjfrom': {isintent_inout: "\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, '_check': l_and(iscomplex, isintent_nothide) }, { - 'frompyobj': [{hasinitvalue: '\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, + 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, {l_and(isoptional, l_not(hasinitvalue)) - : '\tif (#varname#_capi != Py_None)'}, - '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' - '\n\tif (f2py_success) {'], - 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname# frompyobj*/', + : ' if (#varname#_capi != Py_None)'}, + ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n if (f2py_success) {'], + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/', 'need': ['#ctype#_from_pyobj'], '_check': l_and(iscomplex, isintent_nothide), '_depend': '' }, { # Hidden - 'decl': {isintent_out: '\tPyObject *#varname#_capi = Py_None;'}, + 'decl': {isintent_out: ' PyObject *#varname#_capi = Py_None;'}, '_check': l_and(iscomplex, isintent_hide) }, { - 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, '_check': l_and(iscomplex, isintent_hide), '_depend': '' }, { # Common - 'pyobjfrom': {isintent_out: '\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'}, + 'pyobjfrom': {isintent_out: ' #varname#_capi = pyobj_from_#ctype#1(#varname#);'}, 'need': ['pyobj_from_#ctype#1'], '_check': iscomplex }, { - 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, '_check': iscomplex, '_depend': '' }, # String { # Common - 'decl': ['\t#ctype# #varname# = NULL;', - '\tint slen(#varname#);', - '\tPyObject *#varname#_capi = Py_None;'], + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', + ' PyObject *#varname#_capi = Py_None;'], 'callfortran':'#varname#,', 'callfortranappend':'slen(#varname#),', 'pyobjfrom':[ {debugcapi: - '\tfprintf(stderr,' + ' fprintf(stderr,' '"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, # The trailing null value for Fortran is blank. {l_and(isintent_out, l_not(isintent_c)): - "\t\tSTRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, ], 'return': {isintent_out: ',#varname#'}, 'need': ['len..', @@ -958,18 +962,18 @@ if (#varname#_cb.capi==Py_None) { }, { # Common 'frompyobj': [ """\ -\tslen(#varname#) = #length#; -\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,""" + slen(#varname#) = #length#; + f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,""" """#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#""" """`#varname#\' of #pyname# to C #ctype#\"); -\tif (f2py_success) {""", + if (f2py_success) {""", # The trailing null value for Fortran is blank. {l_not(isintent_c): - "\t\tSTRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"}, + " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"}, ], 'cleanupfrompyobj': """\ -\t\tSTRINGFREE(#varname#); -\t} /*if (f2py_success) of #varname#*/""", + STRINGFREE(#varname#); + } /*if (f2py_success) of #varname#*/""", 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE', {l_not(isintent_c): 'STRINGPADN'}], '_check':isstring, @@ -981,36 +985,36 @@ if (#varname#_cb.capi==Py_None) { 'keys_capi': {isoptional: ',&#varname#_capi'}, 'pyobjfrom': [ {l_and(isintent_inout, l_not(isintent_c)): - "\t\tSTRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, {isintent_inout: '''\ -\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#, -\t slen(#varname#)); -\tif (f2py_success) {'''}], - 'closepyobjfrom': {isintent_inout: '\t} /*if (f2py_success) of #varname# pyobjfrom*/'}, + f2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#, + slen(#varname#)); + if (f2py_success) {'''}], + 'closepyobjfrom': {isintent_inout: ' } /*if (f2py_success) of #varname# pyobjfrom*/'}, 'need': {isintent_inout: 'try_pyarr_from_#ctype#', l_and(isintent_inout, l_not(isintent_c)): 'STRINGPADN'}, '_check': l_and(isstring, isintent_nothide) }, { # Hidden '_check': l_and(isstring, isintent_hide) }, { - 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, '_check': isstring, '_depend': '' }, # Array { # Common - 'decl': ['\t#ctype# *#varname# = NULL;', - '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', - '\tconst int #varname#_Rank = #rank#;', - '\tPyArrayObject *capi_#varname#_tmp = NULL;', - '\tint capi_#varname#_intent = 0;', + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', + ' PyArrayObject *capi_#varname#_tmp = NULL;', + ' int capi_#varname#_intent = 0;', ], 'callfortran':'#varname#,', 'return':{isintent_out: ',capi_#varname#_tmp'}, 'need': 'len..', '_check': isarray }, { # intent(overwrite) array - 'decl': '\tint capi_overwrite_#varname# = 1;', + 'decl': ' int capi_overwrite_#varname# = 1;', 'kwlistxa': '"overwrite_#varname#",', 'xaformat': 'i', 'keys_xa': ',&capi_overwrite_#varname#', @@ -1019,12 +1023,12 @@ if (#varname#_cb.capi==Py_None) { 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', '_check': l_and(isarray, isintent_overwrite), }, { - 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', '_check': l_and(isarray, isintent_overwrite), '_depend': '', }, { # intent(copy) array - 'decl': '\tint capi_overwrite_#varname# = 0;', + 'decl': ' int capi_overwrite_#varname# = 0;', 'kwlistxa': '"overwrite_#varname#",', 'xaformat': 'i', 'keys_xa': ',&capi_overwrite_#varname#', @@ -1033,7 +1037,7 @@ if (#varname#_cb.capi==Py_None) { 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', '_check': l_and(isarray, isintent_copy), }, { - 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', '_check': l_and(isarray, isintent_copy), '_depend': '', }, { @@ -1041,57 +1045,57 @@ if (#varname#_cb.capi==Py_None) { '_check': isarray, '_depend': '' }, { # Not hidden - 'decl': '\tPyObject *#varname#_capi = Py_None;', + 'decl': ' PyObject *#varname#_capi = Py_None;', 'argformat': {isrequired: 'O'}, 'keyformat': {isoptional: 'O'}, 'args_capi': {isrequired: ',&#varname#_capi'}, 'keys_capi': {isoptional: ',&#varname#_capi'}, '_check': l_and(isarray, isintent_nothide) }, { - 'frompyobj': ['\t#setdims#;', - '\tcapi_#varname#_intent |= #intent#;', + 'frompyobj': [' #setdims#;', + ' capi_#varname#_intent |= #intent#;', {isintent_hide: - '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'}, + ' capi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'}, {isintent_nothide: - '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'}, + ' capi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'}, """\ -\tif (capi_#varname#_tmp == NULL) { -\t\tPyObject *exc, *val, *tb; -\t\tPyErr_Fetch(&exc, &val, &tb); -\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" ); -\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb); -\t} else { -\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp)); + if (capi_#varname#_tmp == NULL) { + PyObject *exc, *val, *tb; + PyErr_Fetch(&exc, &val, &tb); + PyErr_SetString(exc ? exc : #modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" ); + npy_PyErr_ChainExceptionsCause(exc, val, tb); + } else { + #varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp)); """, {hasinitvalue: [ {isintent_nothide: - '\tif (#varname#_capi == Py_None) {'}, - {isintent_hide: '\t{'}, - {iscomplexarray: '\t\t#ctype# capi_c;'}, + ' if (#varname#_capi == Py_None) {'}, + {isintent_hide: ' {'}, + {iscomplexarray: ' #ctype# capi_c;'}, """\ -\t\tint *_i,capi_i=0; -\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); -\t\tif (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) { -\t\t\twhile ((_i = nextforcomb())) -\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */ -\t\t} else { -\t\t\tPyObject *exc, *val, *tb; -\t\t\tPyErr_Fetch(&exc, &val, &tb); -\t\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\"); -\t\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb); -\t\t\tf2py_success = 0; -\t\t} -\t} -\tif (f2py_success) {"""]}, + int *_i,capi_i=0; + CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); + if (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) { + while ((_i = nextforcomb())) + #varname#[capi_i++] = #init#; /* fortran way */ + } else { + PyObject *exc, *val, *tb; + PyErr_Fetch(&exc, &val, &tb); + PyErr_SetString(exc ? exc : #modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\"); + npy_PyErr_ChainExceptionsCause(exc, val, tb); + f2py_success = 0; + } + } + if (f2py_success) {"""]}, ], 'cleanupfrompyobj': [ # note that this list will be reversed - '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/', + ' } /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/', {l_not(l_or(isintent_out, isintent_hide)): """\ -\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) { -\t\tPy_XDECREF(capi_#varname#_tmp); }"""}, + if((PyObject *)capi_#varname#_tmp!=#varname#_capi) { + Py_XDECREF(capi_#varname#_tmp); }"""}, {l_and(isintent_hide, l_not(isintent_out)) - : """\t\tPy_XDECREF(capi_#varname#_tmp);"""}, - {hasinitvalue: '\t} /*if (f2py_success) of #varname# init*/'}, + : """ Py_XDECREF(capi_#varname#_tmp);"""}, + {hasinitvalue: ' } /*if (f2py_success) of #varname# init*/'}, ], '_check': isarray, '_depend': '' @@ -1139,30 +1143,30 @@ if (#varname#_cb.capi==Py_None) { check_rules = [ { - 'frompyobj': {debugcapi: '\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, + 'frompyobj': {debugcapi: ' fprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, 'need': 'len..' }, { - 'frompyobj': '\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', - 'cleanupfrompyobj': '\t} /*CHECKSCALAR(#check#)*/', + 'frompyobj': ' CHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSCALAR(#check#)*/', 'need': 'CHECKSCALAR', '_check': l_and(isscalar, l_not(iscomplex)), '_break': '' }, { - 'frompyobj': '\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', - 'cleanupfrompyobj': '\t} /*CHECKSTRING(#check#)*/', + 'frompyobj': ' CHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSTRING(#check#)*/', 'need': 'CHECKSTRING', '_check': isstring, '_break': '' }, { 'need': 'CHECKARRAY', - 'frompyobj': '\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', - 'cleanupfrompyobj': '\t} /*CHECKARRAY(#check#)*/', + 'frompyobj': ' CHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKARRAY(#check#)*/', '_check': isarray, '_break': '' }, { 'need': 'CHECKGENERIC', - 'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', - 'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/', + 'frompyobj': ' CHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKGENERIC(#check#)*/', } ] @@ -1175,7 +1179,7 @@ def buildmodule(m, um): """ Return """ - outmess('\tBuilding module "%s"...\n' % (m['name'])) + outmess(' Building module "%s"...\n' % (m['name'])) ret = {} mod_rules = defmod_rules[:] vrd = capi_maps.modsign2map(m) @@ -1277,7 +1281,7 @@ def buildmodule(m, um): ret['csrc'] = fn with open(fn, 'w') as f: f.write(ar['modulebody'].replace('\t', 2 * ' ')) - outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn)) + outmess(' Wrote C/API module "%s" to file "%s"\n' % (m['name'], fn)) if options['dorestdoc']: fn = os.path.join( @@ -1285,7 +1289,7 @@ def buildmodule(m, um): with open(fn, 'w') as f: f.write('.. -*- rest -*-\n') f.write('\n'.join(ar['restdoc'])) - outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' % + outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' % (options['buildpath'], vrd['modulename'])) if options['dolatexdoc']: fn = os.path.join( @@ -1300,7 +1304,7 @@ def buildmodule(m, um): f.write('\n'.join(ar['latexdoc'])) if 'shortlatex' not in options: f.write('\\end{document}') - outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' % + outmess(' Documentation is saved to file "%s/%smodule.tex"\n' % (options['buildpath'], vrd['modulename'])) if funcwrappers: wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) @@ -1325,7 +1329,7 @@ def buildmodule(m, um): lines.append(l + '\n') lines = ''.join(lines).replace('\n &\n', '\n') f.write(lines) - outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn)) + outmess(' Fortran 77 wrappers are saved to "%s"\n' % (wn)) if funcwrappers2: wn = os.path.join( options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename'])) @@ -1352,7 +1356,7 @@ def buildmodule(m, um): lines.append(l + '\n') lines = ''.join(lines).replace('\n &\n', '\n') f.write(lines) - outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn)) + outmess(' Fortran 90 wrappers are saved to "%s"\n' % (wn)) return ret ################## Build C/API function ############# @@ -1368,10 +1372,10 @@ def buildapi(rout): var = rout['vars'] if ismoduleroutine(rout): - outmess('\t\t\tConstructing wrapper function "%s.%s"...\n' % + outmess(' Constructing wrapper function "%s.%s"...\n' % (rout['modulename'], rout['name'])) else: - outmess('\t\tConstructing wrapper function "%s"...\n' % (rout['name'])) + outmess(' Constructing wrapper function "%s"...\n' % (rout['name'])) # Routine vrd = capi_maps.routsign2map(rout) rd = dictappend({}, vrd) @@ -1473,9 +1477,9 @@ def buildapi(rout): ar = applyrules(routine_rules, rd) if ismoduleroutine(rout): - outmess('\t\t\t %s\n' % (ar['docshort'])) + outmess(' %s\n' % (ar['docshort'])) else: - outmess('\t\t %s\n' % (ar['docshort'])) + outmess(' %s\n' % (ar['docshort'])) return ar, wrap diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py index 0a35db477..499609f96 100644 --- a/numpy/f2py/setup.py +++ b/numpy/f2py/setup.py @@ -39,8 +39,6 @@ if __name__ == "__main__": config = configuration(top_path='') config = config.todict() - config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ - "/F2PY-2-latest.tar.gz" config['classifiers'] = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', @@ -68,6 +66,6 @@ command line tool (f2py) for generating Python C/API modules for wrapping Fortran 77/90/95 subroutines, accessing common blocks from Python, and calling Python functions from Fortran (call-backs). Interfacing subroutines/data from Fortran 90/95 modules is supported.""", - url="http://cens.ioc.ee/projects/f2py2e/", + url="https://numpy.org/doc/stable/f2py/", keywords=['Fortran', 'f2py'], **config) diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index b9ef18701..0b32137ef 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -19,7 +19,7 @@ extern "C" { int F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) { - if (obj==NULL) { + if (obj == NULL) { fprintf(stderr, "Error loading %s\n", name); if (PyErr_Occurred()) { PyErr_Print(); @@ -33,21 +33,25 @@ F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) /* * Python-only fallback for thread-local callback pointers */ -void *F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) +void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) { PyObject *local_dict, *value; void *prev; local_dict = PyThreadState_GetDict(); if (local_dict == NULL) { - Py_FatalError("F2PySwapThreadLocalCallbackPtr: PyThreadState_GetDict failed"); + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyThreadState_GetDict " + "failed"); } value = PyDict_GetItemString(local_dict, key); if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { - Py_FatalError("F2PySwapThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); } } else { @@ -56,11 +60,13 @@ void *F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) value = PyLong_FromVoidPtr((void *)ptr); if (value == NULL) { - Py_FatalError("F2PySwapThreadLocalCallbackPtr: PyLong_FromVoidPtr failed"); + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_FromVoidPtr failed"); } if (PyDict_SetItemString(local_dict, key, value) != 0) { - Py_FatalError("F2PySwapThreadLocalCallbackPtr: PyDict_SetItemString failed"); + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyDict_SetItemString failed"); } Py_DECREF(value); @@ -68,21 +74,24 @@ void *F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) return prev; } -void *F2PyGetThreadLocalCallbackPtr(char *key) +void * +F2PyGetThreadLocalCallbackPtr(char *key) { PyObject *local_dict, *value; void *prev; local_dict = PyThreadState_GetDict(); if (local_dict == NULL) { - Py_FatalError("F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); } value = PyDict_GetItemString(local_dict, key); if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { - Py_FatalError("F2PyGetThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); } } else { @@ -94,14 +103,15 @@ void *F2PyGetThreadLocalCallbackPtr(char *key) /************************* FortranObject *******************************/ -typedef PyObject *(*fortranfunc)(PyObject *,PyObject *,PyObject *,void *); +typedef PyObject *(*fortranfunc)(PyObject *, PyObject *, PyObject *, void *); PyObject * -PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) { +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init) +{ int i; PyFortranObject *fp = NULL; PyObject *v = NULL; - if (init!=NULL) { /* Initialize F90 module objects */ + if (init != NULL) { /* Initialize F90 module objects */ (*(init))(); } fp = PyObject_New(PyFortranObject, &PyFortran_Type); @@ -120,46 +130,49 @@ PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) { goto fail; } fp->defs = defs; - for (i=0;i<fp->len;i++) { - if (fp->defs[i].rank == -1) { /* Is Fortran routine */ + for (i = 0; i < fp->len; i++) { + if (fp->defs[i].rank == -1) { /* Is Fortran routine */ v = PyFortranObject_NewAsAttr(&(fp->defs[i])); - if (v==NULL) { + if (v == NULL) { goto fail; } - PyDict_SetItemString(fp->dict,fp->defs[i].name,v); + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); Py_XDECREF(v); - } else - if ((fp->defs[i].data)!=NULL) { /* Is Fortran variable or array (not allocatable) */ - if (fp->defs[i].type == NPY_STRING) { - int n = fp->defs[i].rank-1; - v = PyArray_New(&PyArray_Type, n, fp->defs[i].dims.d, - NPY_STRING, NULL, fp->defs[i].data, fp->defs[i].dims.d[n], - NPY_ARRAY_FARRAY, NULL); - } - else { - v = PyArray_New(&PyArray_Type, fp->defs[i].rank, fp->defs[i].dims.d, - fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, - NULL); - } - if (v==NULL) { - goto fail; - } - PyDict_SetItemString(fp->dict,fp->defs[i].name,v); - Py_XDECREF(v); + } + else if ((fp->defs[i].data) != + NULL) { /* Is Fortran variable or array (not allocatable) */ + if (fp->defs[i].type == NPY_STRING) { + npy_intp n = fp->defs[i].rank - 1; + v = PyArray_New(&PyArray_Type, n, fp->defs[i].dims.d, + NPY_STRING, NULL, fp->defs[i].data, + fp->defs[i].dims.d[n], NPY_ARRAY_FARRAY, NULL); + } + else { + v = PyArray_New(&PyArray_Type, fp->defs[i].rank, + fp->defs[i].dims.d, fp->defs[i].type, NULL, + fp->defs[i].data, 0, NPY_ARRAY_FARRAY, NULL); + } + if (v == NULL) { + goto fail; } + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); + Py_XDECREF(v); + } } return (PyObject *)fp; - fail: +fail: Py_XDECREF(fp); return NULL; } PyObject * -PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module routines */ +PyFortranObject_NewAsAttr(FortranDataDef *defs) +{ /* used for calling F90 module routines */ PyFortranObject *fp = NULL; fp = PyObject_New(PyFortranObject, &PyFortran_Type); - if (fp == NULL) return NULL; - if ((fp->dict = PyDict_New())==NULL) { + if (fp == NULL) + return NULL; + if ((fp->dict = PyDict_New()) == NULL) { PyObject_Del(fp); return NULL; } @@ -171,18 +184,19 @@ PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module /* Fortran methods */ static void -fortran_dealloc(PyFortranObject *fp) { +fortran_dealloc(PyFortranObject *fp) +{ Py_XDECREF(fp->dict); PyObject_Del(fp); } - /* Returns number of bytes consumed from buf, or -1 on error. */ static Py_ssize_t format_def(char *buf, Py_ssize_t size, FortranDataDef def) { char *p = buf; - int i, n; + int i; + npy_intp n; n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]); if (n < 0 || n >= size) { @@ -209,7 +223,7 @@ format_def(char *buf, Py_ssize_t size, FortranDataDef def) if (def.data == NULL) { static const char notalloc[] = ", not allocated"; - if ((size_t) size < sizeof(notalloc)) { + if ((size_t)size < sizeof(notalloc)) { return -1; } memcpy(p, notalloc, sizeof(notalloc)); @@ -290,7 +304,6 @@ fortran_doc(FortranDataDef def) p += n; size -= n; } - } if (size <= 1) { goto fail; @@ -304,17 +317,20 @@ fortran_doc(FortranDataDef def) PyMem_Free(buf); return s; - fail: - fprintf(stderr, "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" - " too long docstring required, increase size\n", +fail: + fprintf(stderr, + "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" + " too long docstring required, increase size\n", p - buf, origsize); PyMem_Free(buf); return NULL; } static FortranDataDef *save_def; /* save pointer of an allocatable array */ -static void set_data(char *d,npy_intp *f) { /* callback from Fortran */ - if (*f) /* In fortran f=allocated(d) */ +static void +set_data(char *d, npy_intp *f) +{ /* callback from Fortran */ + if (*f) /* In fortran f=allocated(d) */ save_def->data = d; else save_def->data = NULL; @@ -322,8 +338,9 @@ static void set_data(char *d,npy_intp *f) { /* callback from Fortran */ } static PyObject * -fortran_getattr(PyFortranObject *fp, char *name) { - int i,j,k,flag; +fortran_getattr(PyFortranObject *fp, char *name) +{ + int i, j, k, flag; if (fp->dict != NULL) { PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); if (v == NULL && PyErr_Occurred()) { @@ -334,36 +351,41 @@ fortran_getattr(PyFortranObject *fp, char *name) { return v; } } - for (i=0,j=1;i<fp->len && (j=strcmp(name,fp->defs[i].name));i++); - if (j==0) - if (fp->defs[i].rank!=-1) { /* F90 allocatable array */ - if (fp->defs[i].func==NULL) return NULL; - for(k=0;k<fp->defs[i].rank;++k) - fp->defs[i].dims.d[k]=-1; + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) + if (fp->defs[i].rank != -1) { /* F90 allocatable array */ + if (fp->defs[i].func == NULL) + return NULL; + for (k = 0; k < fp->defs[i].rank; ++k) fp->defs[i].dims.d[k] = -1; save_def = &fp->defs[i]; - (*(fp->defs[i].func))(&fp->defs[i].rank,fp->defs[i].dims.d,set_data,&flag); - if (flag==2) + (*(fp->defs[i].func))(&fp->defs[i].rank, fp->defs[i].dims.d, + set_data, &flag); + if (flag == 2) k = fp->defs[i].rank + 1; else k = fp->defs[i].rank; - if (fp->defs[i].data !=NULL) { /* array is allocated */ - PyObject *v = PyArray_New(&PyArray_Type, k, fp->defs[i].dims.d, - fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, - NULL); - if (v==NULL) return NULL; + if (fp->defs[i].data != NULL) { /* array is allocated */ + PyObject *v = PyArray_New( + &PyArray_Type, k, fp->defs[i].dims.d, fp->defs[i].type, + NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, NULL); + if (v == NULL) + return NULL; /* Py_INCREF(v); */ return v; - } else { /* array is not allocated */ + } + else { /* array is not allocated */ Py_RETURN_NONE; } } - if (strcmp(name,"__dict__")==0) { + if (strcmp(name, "__dict__") == 0) { Py_INCREF(fp->dict); return fp->dict; } - if (strcmp(name,"__doc__")==0) { + if (strcmp(name, "__doc__") == 0) { PyObject *s = PyUnicode_FromString(""), *s2, *s3; - for (i=0;i<fp->len;i++) { + for (i = 0; i < fp->len; i++) { s2 = fortran_doc(fp->defs[i]); s3 = PyUnicode_Concat(s, s2); Py_DECREF(s2); @@ -374,8 +396,9 @@ fortran_getattr(PyFortranObject *fp, char *name) { return NULL; return s; } - if ((strcmp(name,"_cpointer")==0) && (fp->len==1)) { - PyObject *cobj = F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data),NULL); + if ((strcmp(name, "_cpointer") == 0) && (fp->len == 1)) { + PyObject *cobj = + F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data), NULL); if (PyDict_SetItemString(fp->dict, name, cobj)) return NULL; return cobj; @@ -388,51 +411,68 @@ fortran_getattr(PyFortranObject *fp, char *name) { } static int -fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) { - int i,j,flag; +fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) +{ + int i, j, flag; PyArrayObject *arr = NULL; - for (i=0,j=1;i<fp->len && (j=strcmp(name,fp->defs[i].name));i++); - if (j==0) { - if (fp->defs[i].rank==-1) { - PyErr_SetString(PyExc_AttributeError,"over-writing fortran routine"); + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) { + if (fp->defs[i].rank == -1) { + PyErr_SetString(PyExc_AttributeError, + "over-writing fortran routine"); return -1; } - if (fp->defs[i].func!=NULL) { /* is allocatable array */ + if (fp->defs[i].func != NULL) { /* is allocatable array */ npy_intp dims[F2PY_MAX_DIMS]; int k; save_def = &fp->defs[i]; - if (v!=Py_None) { /* set new value (reallocate if needed -- - see f2py generated code for more - details ) */ - for(k=0;k<fp->defs[i].rank;k++) dims[k]=-1; - if ((arr = array_from_pyobj(fp->defs[i].type,dims,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) + if (v != Py_None) { /* set new value (reallocate if needed -- + see f2py generated code for more + details ) */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; + if ((arr = array_from_pyobj(fp->defs[i].type, dims, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) return -1; - (*(fp->defs[i].func))(&fp->defs[i].rank,PyArray_DIMS(arr),set_data,&flag); - } else { /* deallocate */ - for(k=0;k<fp->defs[i].rank;k++) dims[k]=0; - (*(fp->defs[i].func))(&fp->defs[i].rank,dims,set_data,&flag); - for(k=0;k<fp->defs[i].rank;k++) dims[k]=-1; + (*(fp->defs[i].func))(&fp->defs[i].rank, PyArray_DIMS(arr), + set_data, &flag); + } + else { /* deallocate */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = 0; + (*(fp->defs[i].func))(&fp->defs[i].rank, dims, set_data, + &flag); + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; } - memcpy(fp->defs[i].dims.d,dims,fp->defs[i].rank*sizeof(npy_intp)); - } else { /* not allocatable array */ - if ((arr = array_from_pyobj(fp->defs[i].type,fp->defs[i].dims.d,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) + memcpy(fp->defs[i].dims.d, dims, + fp->defs[i].rank * sizeof(npy_intp)); + } + else { /* not allocatable array */ + if ((arr = array_from_pyobj(fp->defs[i].type, fp->defs[i].dims.d, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) return -1; } - if (fp->defs[i].data!=NULL) { /* copy Python object to Fortran array */ - npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d,PyArray_NDIM(arr)); - if (s==-1) - s = PyArray_MultiplyList(PyArray_DIMS(arr),PyArray_NDIM(arr)); - if (s<0 || - (memcpy(fp->defs[i].data,PyArray_DATA(arr),s*PyArray_ITEMSIZE(arr)))==NULL) { - if ((PyObject*)arr!=v) { + if (fp->defs[i].data != + NULL) { /* copy Python object to Fortran array */ + npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d, + PyArray_NDIM(arr)); + if (s == -1) + s = PyArray_MultiplyList(PyArray_DIMS(arr), PyArray_NDIM(arr)); + if (s < 0 || (memcpy(fp->defs[i].data, PyArray_DATA(arr), + s * PyArray_ITEMSIZE(arr))) == NULL) { + if ((PyObject *)arr != v) { Py_DECREF(arr); } return -1; } - if ((PyObject*)arr!=v) { + if ((PyObject *)arr != v) { Py_DECREF(arr); } - } else return (fp->defs[i].func==NULL?-1:0); + } + else + return (fp->defs[i].func == NULL ? -1 : 0); return 0; /* successful */ } if (fp->dict == NULL) { @@ -443,30 +483,33 @@ fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) { if (v == NULL) { int rv = PyDict_DelItemString(fp->dict, name); if (rv < 0) - PyErr_SetString(PyExc_AttributeError,"delete non-existing fortran attribute"); + PyErr_SetString(PyExc_AttributeError, + "delete non-existing fortran attribute"); return rv; } else return PyDict_SetItemString(fp->dict, name, v); } -static PyObject* -fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) { +static PyObject * +fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) +{ int i = 0; /* printf("fortran call name=%s,func=%p,data=%p,%p\n",fp->defs[i].name, fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */ - if (fp->defs[i].rank==-1) {/* is Fortran routine */ - if (fp->defs[i].func==NULL) { + if (fp->defs[i].rank == -1) { /* is Fortran routine */ + if (fp->defs[i].func == NULL) { PyErr_Format(PyExc_RuntimeError, "no function to call"); return NULL; } - else if (fp->defs[i].data==NULL) + else if (fp->defs[i].data == NULL) /* dummy routine */ - return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw,NULL); + return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp, arg, + kw, NULL); else - return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw, - (void *)fp->defs[i].data); + return (*((fortranfunc)(fp->defs[i].func)))( + (PyObject *)fp, arg, kw, (void *)fp->defs[i].data); } PyErr_Format(PyExc_TypeError, "this fortran object is not callable"); return NULL; @@ -488,16 +531,14 @@ fortran_repr(PyFortranObject *fp) return repr; } - PyTypeObject PyFortran_Type = { - PyVarObject_HEAD_INIT(NULL, 0) - .tp_name ="fortran", - .tp_basicsize = sizeof(PyFortranObject), - .tp_dealloc = (destructor)fortran_dealloc, - .tp_getattr = (getattrfunc)fortran_getattr, - .tp_setattr = (setattrfunc)fortran_setattr, - .tp_repr = (reprfunc)fortran_repr, - .tp_call = (ternaryfunc)fortran_call, + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "fortran", + .tp_basicsize = sizeof(PyFortranObject), + .tp_dealloc = (destructor)fortran_dealloc, + .tp_getattr = (getattrfunc)fortran_getattr, + .tp_setattr = (setattrfunc)fortran_setattr, + .tp_repr = (reprfunc)fortran_repr, + .tp_call = (ternaryfunc)fortran_call, }; /************************* f2py_report_atexit *******************************/ @@ -518,99 +559,123 @@ static struct timeb cb_stop_time; static struct timeb cb_start_call_time; static struct timeb cb_stop_call_time; -extern void f2py_start_clock(void) { ftime(&start_time); } -extern -void f2py_start_call_clock(void) { +extern void +f2py_start_clock(void) +{ + ftime(&start_time); +} +extern void +f2py_start_call_clock(void) +{ f2py_stop_clock(); ftime(&start_call_time); } -extern -void f2py_stop_clock(void) { +extern void +f2py_stop_clock(void) +{ ftime(&stop_time); - passed_time += 1000*(stop_time.time - start_time.time); + passed_time += 1000 * (stop_time.time - start_time.time); passed_time += stop_time.millitm - start_time.millitm; } -extern -void f2py_stop_call_clock(void) { +extern void +f2py_stop_call_clock(void) +{ ftime(&stop_call_time); - passed_call_time += 1000*(stop_call_time.time - start_call_time.time); + passed_call_time += 1000 * (stop_call_time.time - start_call_time.time); passed_call_time += stop_call_time.millitm - start_call_time.millitm; passed_counter += 1; f2py_start_clock(); } -extern void f2py_cb_start_clock(void) { ftime(&cb_start_time); } -extern -void f2py_cb_start_call_clock(void) { +extern void +f2py_cb_start_clock(void) +{ + ftime(&cb_start_time); +} +extern void +f2py_cb_start_call_clock(void) +{ f2py_cb_stop_clock(); ftime(&cb_start_call_time); } -extern -void f2py_cb_stop_clock(void) { +extern void +f2py_cb_stop_clock(void) +{ ftime(&cb_stop_time); - cb_passed_time += 1000*(cb_stop_time.time - cb_start_time.time); + cb_passed_time += 1000 * (cb_stop_time.time - cb_start_time.time); cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm; } -extern -void f2py_cb_stop_call_clock(void) { +extern void +f2py_cb_stop_call_clock(void) +{ ftime(&cb_stop_call_time); - cb_passed_call_time += 1000*(cb_stop_call_time.time - cb_start_call_time.time); - cb_passed_call_time += cb_stop_call_time.millitm - cb_start_call_time.millitm; + cb_passed_call_time += + 1000 * (cb_stop_call_time.time - cb_start_call_time.time); + cb_passed_call_time += + cb_stop_call_time.millitm - cb_start_call_time.millitm; cb_passed_counter += 1; f2py_cb_start_clock(); } static int f2py_report_on_exit_been_here = 0; -extern -void f2py_report_on_exit(int exit_flag,void *name) { +extern void +f2py_report_on_exit(int exit_flag, void *name) +{ if (f2py_report_on_exit_been_here) { - fprintf(stderr," %s\n",(char*)name); + fprintf(stderr, " %s\n", (char *)name); return; } f2py_report_on_exit_been_here = 1; - fprintf(stderr," /-----------------------\\\n"); - fprintf(stderr," < F2PY performance report >\n"); - fprintf(stderr," \\-----------------------/\n"); - fprintf(stderr,"Overall time spent in ...\n"); - fprintf(stderr,"(a) wrapped (Fortran/C) functions : %8d msec\n", + fprintf(stderr, " /-----------------------\\\n"); + fprintf(stderr, " < F2PY performance report >\n"); + fprintf(stderr, " \\-----------------------/\n"); + fprintf(stderr, "Overall time spent in ...\n"); + fprintf(stderr, "(a) wrapped (Fortran/C) functions : %8d msec\n", passed_call_time); - fprintf(stderr,"(b) f2py interface, %6d calls : %8d msec\n", - passed_counter,passed_time); - fprintf(stderr,"(c) call-back (Python) functions : %8d msec\n", + fprintf(stderr, "(b) f2py interface, %6d calls : %8d msec\n", + passed_counter, passed_time); + fprintf(stderr, "(c) call-back (Python) functions : %8d msec\n", cb_passed_call_time); - fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n", - cb_passed_counter,cb_passed_time); - - fprintf(stderr,"(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", - passed_call_time-cb_passed_call_time-cb_passed_time); - fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); - fprintf(stderr,"Exit status: %d\n",exit_flag); - fprintf(stderr,"Modules : %s\n",(char*)name); + fprintf(stderr, "(d) f2py call-back interface, %6d calls : %8d msec\n", + cb_passed_counter, cb_passed_time); + + fprintf(stderr, + "(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", + passed_call_time - cb_passed_call_time - cb_passed_time); + fprintf(stderr, + "Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); + fprintf(stderr, "Exit status: %d\n", exit_flag); + fprintf(stderr, "Modules : %s\n", (char *)name); } #endif /********************** report on array copy ****************************/ #ifdef F2PY_REPORT_ON_ARRAY_COPY -static void f2py_report_on_array_copy(PyArrayObject* arr) { +static void +f2py_report_on_array_copy(PyArrayObject *arr) +{ const npy_intp arr_size = PyArray_Size((PyObject *)arr); - if (arr_size>F2PY_REPORT_ON_ARRAY_COPY) { - fprintf(stderr,"copied an array: size=%ld, elsize=%"NPY_INTP_FMT"\n", + if (arr_size > F2PY_REPORT_ON_ARRAY_COPY) { + fprintf(stderr, + "copied an array: size=%ld, elsize=%" NPY_INTP_FMT "\n", arr_size, (npy_intp)PyArray_ITEMSIZE(arr)); } } -static void f2py_report_on_array_copy_fromany(void) { - fprintf(stderr,"created an array from object\n"); +static void +f2py_report_on_array_copy_fromany(void) +{ + fprintf(stderr, "created an array from object\n"); } -#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR f2py_report_on_array_copy((PyArrayObject *)arr) +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR \ + f2py_report_on_array_copy((PyArrayObject *)arr) #define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany() #else #define F2PY_REPORT_ON_ARRAY_COPY_FROMARR #define F2PY_REPORT_ON_ARRAY_COPY_FROMANY #endif - /************************* array_from_obj *******************************/ /* @@ -632,72 +697,82 @@ static void f2py_report_on_array_copy_fromany(void) { * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ */ -static int check_and_fix_dimensions(const PyArrayObject* arr, - const int rank, - npy_intp *dims); +static int +check_and_fix_dimensions(const PyArrayObject *arr, const int rank, + npy_intp *dims); static int -count_negative_dimensions(const int rank, - const npy_intp *dims) { - int i=0,r=0; - while (i<rank) { - if (dims[i] < 0) ++r; +count_negative_dimensions(const int rank, const npy_intp *dims) +{ + int i = 0, r = 0; + while (i < rank) { + if (dims[i] < 0) + ++r; ++i; } return r; } #ifdef DEBUG_COPY_ND_ARRAY -void dump_dims(int rank, npy_intp const* dims) { +void +dump_dims(int rank, npy_intp const *dims) +{ int i; printf("["); - for(i=0;i<rank;++i) { + for (i = 0; i < rank; ++i) { printf("%3" NPY_INTP_FMT, dims[i]); } printf("]\n"); } -void dump_attrs(const PyArrayObject* obj) { - const PyArrayObject_fields *arr = (const PyArrayObject_fields*) obj; +void +dump_attrs(const PyArrayObject *obj) +{ + const PyArrayObject_fields *arr = (const PyArrayObject_fields *)obj; int rank = PyArray_NDIM(arr); npy_intp size = PyArray_Size((PyObject *)arr); - printf("\trank = %d, flags = %d, size = %" NPY_INTP_FMT "\n", - rank,arr->flags,size); + printf("\trank = %d, flags = %d, size = %" NPY_INTP_FMT "\n", rank, + arr->flags, size); printf("\tstrides = "); - dump_dims(rank,arr->strides); + dump_dims(rank, arr->strides); printf("\tdimensions = "); - dump_dims(rank,arr->dimensions); + dump_dims(rank, arr->dimensions); } #endif -#define SWAPTYPE(a,b,t) {t c; c = (a); (a) = (b); (b) = c; } - -static int swap_arrays(PyArrayObject* obj1, PyArrayObject* obj2) { - PyArrayObject_fields *arr1 = (PyArrayObject_fields*) obj1, - *arr2 = (PyArrayObject_fields*) obj2; - SWAPTYPE(arr1->data,arr2->data,char*); - SWAPTYPE(arr1->nd,arr2->nd,int); - SWAPTYPE(arr1->dimensions,arr2->dimensions,npy_intp*); - SWAPTYPE(arr1->strides,arr2->strides,npy_intp*); - SWAPTYPE(arr1->base,arr2->base,PyObject*); - SWAPTYPE(arr1->descr,arr2->descr,PyArray_Descr*); - SWAPTYPE(arr1->flags,arr2->flags,int); +#define SWAPTYPE(a, b, t) \ + { \ + t c; \ + c = (a); \ + (a) = (b); \ + (b) = c; \ + } + +static int +swap_arrays(PyArrayObject *obj1, PyArrayObject *obj2) +{ + PyArrayObject_fields *arr1 = (PyArrayObject_fields *)obj1, + *arr2 = (PyArrayObject_fields *)obj2; + SWAPTYPE(arr1->data, arr2->data, char *); + SWAPTYPE(arr1->nd, arr2->nd, int); + SWAPTYPE(arr1->dimensions, arr2->dimensions, npy_intp *); + SWAPTYPE(arr1->strides, arr2->strides, npy_intp *); + SWAPTYPE(arr1->base, arr2->base, PyObject *); + SWAPTYPE(arr1->descr, arr2->descr, PyArray_Descr *); + SWAPTYPE(arr1->flags, arr2->flags, int); /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ return 0; } -#define ARRAY_ISCOMPATIBLE(arr,type_num) \ - ( (PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) \ - ||(PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) \ - ||(PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) \ - ||(PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) \ - ) - -extern -PyArrayObject* array_from_pyobj(const int type_num, - npy_intp *dims, - const int rank, - const int intent, - PyObject *obj) { +#define ARRAY_ISCOMPATIBLE(arr, type_num) \ + ((PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) || \ + (PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) || \ + (PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) || \ + (PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num))) + +extern PyArrayObject * +array_from_pyobj(const int type_num, npy_intp *dims, const int rank, + const int intent, PyObject *obj) +{ /* * Note about reference counting * ----------------------------- @@ -716,27 +791,26 @@ PyArrayObject* array_from_pyobj(const int type_num, char typechar; int elsize; - if ((intent & F2PY_INTENT_HIDE) - || ((intent & F2PY_INTENT_CACHE) && (obj==Py_None)) - || ((intent & F2PY_OPTIONAL) && (obj==Py_None)) - ) { + if ((intent & F2PY_INTENT_HIDE) || + ((intent & F2PY_INTENT_CACHE) && (obj == Py_None)) || + ((intent & F2PY_OPTIONAL) && (obj == Py_None))) { /* intent(cache), optional, intent(hide) */ - if (count_negative_dimensions(rank,dims) > 0) { + if (count_negative_dimensions(rank, dims) > 0) { int i; - strcpy(mess, "failed to create intent(cache|hide)|optional array" + strcpy(mess, + "failed to create intent(cache|hide)|optional array" "-- must have defined dimensions but got ("); - for(i=0;i<rank;++i) - sprintf(mess+strlen(mess),"%" NPY_INTP_FMT ",",dims[i]); + for (i = 0; i < rank; ++i) + sprintf(mess + strlen(mess), "%" NPY_INTP_FMT ",", dims[i]); strcat(mess, ")"); - PyErr_SetString(PyExc_ValueError,mess); + PyErr_SetString(PyExc_ValueError, mess); return NULL; } - arr = (PyArrayObject *) - PyArray_New(&PyArray_Type, rank, dims, type_num, - NULL,NULL,1, - !(intent&F2PY_INTENT_C), - NULL); - if (arr==NULL) return NULL; + arr = (PyArrayObject *)PyArray_New(&PyArray_Type, rank, dims, type_num, + NULL, NULL, 1, + !(intent & F2PY_INTENT_C), NULL); + if (arr == NULL) + return NULL; if (!(intent & F2PY_INTENT_CACHE)) PyArray_FILLWBYTE(arr, 0); return arr; @@ -760,8 +834,7 @@ PyArrayObject* array_from_pyobj(const int type_num, if (intent & F2PY_INTENT_CACHE) { /* intent(cache) */ - if (PyArray_ISONESEGMENT(arr) - && PyArray_ITEMSIZE(arr)>=elsize) { + if (PyArray_ISONESEGMENT(arr) && PyArray_ITEMSIZE(arr) >= elsize) { if (check_and_fix_dimensions(arr, rank, dims)) { return NULL; } @@ -772,17 +845,17 @@ PyArrayObject* array_from_pyobj(const int type_num, strcpy(mess, "failed to initialize intent(cache) array"); if (!PyArray_ISONESEGMENT(arr)) strcat(mess, " -- input must be in one segment"); - if (PyArray_ITEMSIZE(arr)<elsize) - sprintf(mess+strlen(mess), - " -- expected at least elsize=%d but got %" NPY_INTP_FMT, - elsize, - (npy_intp)PyArray_ITEMSIZE(arr) - ); - PyErr_SetString(PyExc_ValueError,mess); + if (PyArray_ITEMSIZE(arr) < elsize) + sprintf(mess + strlen(mess), + " -- expected at least elsize=%d but got " + "%" NPY_INTP_FMT, + elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError, mess); return NULL; } - /* here we have always intent(in) or intent(inout) or intent(inplace) */ + /* here we have always intent(in) or intent(inout) or intent(inplace) + */ if (check_and_fix_dimensions(arr, rank, dims)) { return NULL; @@ -794,12 +867,12 @@ PyArrayObject* array_from_pyobj(const int type_num, for (i=1;i<=16;i++) printf("i=%d isaligned=%d\n", i, ARRAY_ISALIGNED(arr, i)); */ - if ((! (intent & F2PY_INTENT_COPY)) - && PyArray_ITEMSIZE(arr)==elsize - && ARRAY_ISCOMPATIBLE(arr,type_num) - && F2PY_CHECK_ALIGNMENT(arr, intent) - ) { - if ((intent & F2PY_INTENT_C)?PyArray_ISCARRAY_RO(arr):PyArray_ISFARRAY_RO(arr)) { + if ((!(intent & F2PY_INTENT_COPY)) && + PyArray_ITEMSIZE(arr) == elsize && + ARRAY_ISCOMPATIBLE(arr, type_num) && + F2PY_CHECK_ALIGNMENT(arr, intent)) { + if ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY_RO(arr) + : PyArray_ISFARRAY_RO(arr)) { if ((intent & F2PY_INTENT_OUT)) { Py_INCREF(arr); } @@ -809,36 +882,35 @@ PyArrayObject* array_from_pyobj(const int type_num, } if (intent & F2PY_INTENT_INOUT) { strcpy(mess, "failed to initialize intent(inout) array"); - /* Must use PyArray_IS*ARRAY because intent(inout) requires writable input */ + /* Must use PyArray_IS*ARRAY because intent(inout) requires + * writable input */ if ((intent & F2PY_INTENT_C) && !PyArray_ISCARRAY(arr)) strcat(mess, " -- input not contiguous"); if (!(intent & F2PY_INTENT_C) && !PyArray_ISFARRAY(arr)) strcat(mess, " -- input not fortran contiguous"); - if (PyArray_ITEMSIZE(arr)!=elsize) - sprintf(mess+strlen(mess), + if (PyArray_ITEMSIZE(arr) != elsize) + sprintf(mess + strlen(mess), " -- expected elsize=%d but got %" NPY_INTP_FMT, - elsize, - (npy_intp)PyArray_ITEMSIZE(arr) - ); - if (!(ARRAY_ISCOMPATIBLE(arr,type_num))) - sprintf(mess+strlen(mess)," -- input '%c' not compatible to '%c'", - PyArray_DESCR(arr)->type,typechar); + elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + if (!(ARRAY_ISCOMPATIBLE(arr, type_num))) + sprintf(mess + strlen(mess), + " -- input '%c' not compatible to '%c'", + PyArray_DESCR(arr)->type, typechar); if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) - sprintf(mess+strlen(mess)," -- input not %d-aligned", F2PY_GET_ALIGNMENT(intent)); - PyErr_SetString(PyExc_ValueError,mess); + sprintf(mess + strlen(mess), " -- input not %d-aligned", + F2PY_GET_ALIGNMENT(intent)); + PyErr_SetString(PyExc_ValueError, mess); return NULL; } /* here we have always intent(in) or intent(inplace) */ { - PyArrayObject * retarr; - retarr = (PyArrayObject *) \ - PyArray_New(&PyArray_Type, PyArray_NDIM(arr), PyArray_DIMS(arr), type_num, - NULL,NULL,1, - !(intent&F2PY_INTENT_C), - NULL); - if (retarr==NULL) + PyArrayObject *retarr; + retarr = (PyArrayObject *)PyArray_New( + &PyArray_Type, PyArray_NDIM(arr), PyArray_DIMS(arr), + type_num, NULL, NULL, 1, !(intent & F2PY_INTENT_C), NULL); + if (retarr == NULL) return NULL; F2PY_REPORT_ON_ARRAY_COPY_FROMARR; if (PyArray_CopyInto(retarr, arr)) { @@ -846,21 +918,21 @@ PyArrayObject* array_from_pyobj(const int type_num, return NULL; } if (intent & F2PY_INTENT_INPLACE) { - if (swap_arrays(arr,retarr)) + if (swap_arrays(arr, retarr)) return NULL; /* XXX: set exception */ Py_XDECREF(retarr); if (intent & F2PY_INTENT_OUT) Py_INCREF(arr); - } else { + } + else { arr = retarr; } } return arr; } - if ((intent & F2PY_INTENT_INOUT) || - (intent & F2PY_INTENT_INPLACE) || - (intent & F2PY_INTENT_CACHE)) { + if ((intent & F2PY_INTENT_INOUT) || (intent & F2PY_INTENT_INPLACE) || + (intent & F2PY_INTENT_CACHE)) { PyErr_Format(PyExc_TypeError, "failed to initialize intent(inout|inplace|cache) " "array, input '%s' object is not an array", @@ -869,7 +941,7 @@ PyArrayObject* array_from_pyobj(const int type_num, } { - PyArray_Descr * descr = PyArray_DescrFromType(type_num); + PyArray_Descr *descr = PyArray_DescrFromType(type_num); /* compatibility with NPY_CHAR */ if (type_num == NPY_STRING) { PyArray_DESCR_REPLACE(descr); @@ -880,26 +952,28 @@ PyArrayObject* array_from_pyobj(const int type_num, descr->type = NPY_CHARLTR; } F2PY_REPORT_ON_ARRAY_COPY_FROMANY; - arr = (PyArrayObject *) \ - PyArray_FromAny(obj, descr, 0,0, - ((intent & F2PY_INTENT_C)?NPY_ARRAY_CARRAY:NPY_ARRAY_FARRAY) \ - | NPY_ARRAY_FORCECAST, NULL); - if (arr==NULL) + arr = (PyArrayObject *)PyArray_FromAny( + obj, descr, 0, 0, + ((intent & F2PY_INTENT_C) ? NPY_ARRAY_CARRAY + : NPY_ARRAY_FARRAY) | + NPY_ARRAY_FORCECAST, + NULL); + if (arr == NULL) return NULL; if (check_and_fix_dimensions(arr, rank, dims)) { return NULL; } return arr; } - } /*****************************************/ /* Helper functions for array_from_pyobj */ /*****************************************/ -static -int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp *dims) +static int +check_and_fix_dimensions(const PyArrayObject *arr, const int rank, + npy_intp *dims) { /* * This function fills in blanks (that are -1's) in dims list using @@ -908,13 +982,15 @@ int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp * * Returns 0 if the function is successful. * - * If an error condition is detected, an exception is set and 1 is returned. + * If an error condition is detected, an exception is set and 1 is + * returned. */ - const npy_intp arr_size = (PyArray_NDIM(arr))?PyArray_Size((PyObject *)arr):1; + const npy_intp arr_size = + (PyArray_NDIM(arr)) ? PyArray_Size((PyObject *)arr) : 1; #ifdef DEBUG_COPY_ND_ARRAY dump_attrs(arr); printf("check_and_fix_dimensions:init: dims="); - dump_dims(rank,dims); + dump_dims(rank, dims); #endif if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ npy_intp new_size = 1; @@ -922,35 +998,39 @@ int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp int i; npy_intp d; /* Fill dims where -1 or 0; check dimensions; calc new_size; */ - for(i=0;i<PyArray_NDIM(arr);++i) { - d = PyArray_DIM(arr,i); + for (i = 0; i < PyArray_NDIM(arr); ++i) { + d = PyArray_DIM(arr, i); if (dims[i] >= 0) { - if (d>1 && dims[i]!=d) { - PyErr_Format(PyExc_ValueError, - "%d-th dimension must be fixed to %" - NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", - i, dims[i], d); + if (d > 1 && dims[i] != d) { + PyErr_Format( + PyExc_ValueError, + "%d-th dimension must be fixed to %" NPY_INTP_FMT + " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); return 1; } - if (!dims[i]) dims[i] = 1; - } else { + if (!dims[i]) + dims[i] = 1; + } + else { dims[i] = d ? d : 1; } new_size *= dims[i]; } - for(i=PyArray_NDIM(arr);i<rank;++i) - if (dims[i]>1) { + for (i = PyArray_NDIM(arr); i < rank; ++i) + if (dims[i] > 1) { PyErr_Format(PyExc_ValueError, "%d-th dimension must be %" NPY_INTP_FMT " but got 0 (not defined).\n", i, dims[i]); return 1; - } else if (free_axe<0) + } + else if (free_axe < 0) free_axe = i; else dims[i] = 1; - if (free_axe>=0) { - dims[free_axe] = arr_size/new_size; + if (free_axe >= 0) { + dims[free_axe] = arr_size / new_size; new_size *= dims[free_axe]; } if (new_size != arr_size) { @@ -961,22 +1041,27 @@ int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp new_size, arr_size); return 1; } - } else if (rank==PyArray_NDIM(arr)) { + } + else if (rank == PyArray_NDIM(arr)) { npy_intp new_size = 1; int i; npy_intp d; - for (i=0; i<rank; ++i) { - d = PyArray_DIM(arr,i); - if (dims[i]>=0) { - if (d > 1 && d!=dims[i]) { - PyErr_Format(PyExc_ValueError, - "%d-th dimension must be fixed to %" - NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", - i, dims[i], d); + for (i = 0; i < rank; ++i) { + d = PyArray_DIM(arr, i); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + PyErr_Format( + PyExc_ValueError, + "%d-th dimension must be fixed to %" NPY_INTP_FMT + " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); return 1; } - if (!dims[i]) dims[i] = 1; - } else dims[i] = d; + if (!dims[i]) + dims[i] = 1; + } + else + dims[i] = d; new_size *= dims[i]; } if (new_size != arr_size) { @@ -986,15 +1071,17 @@ int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp new_size, arr_size); return 1; } - } else { /* [[1,2]] -> [[1],[2]] */ - int i,j; + } + else { /* [[1,2]] -> [[1],[2]] */ + int i, j; npy_intp d; int effrank; npy_intp size; - for (i=0,effrank=0;i<PyArray_NDIM(arr);++i) - if (PyArray_DIM(arr,i)>1) ++effrank; - if (dims[rank-1]>=0) - if (effrank>rank) { + for (i = 0, effrank = 0; i < PyArray_NDIM(arr); ++i) + if (PyArray_DIM(arr, i) > 1) + ++effrank; + if (dims[rank - 1] >= 0) + if (effrank > rank) { PyErr_Format(PyExc_ValueError, "too many axes: %d (effrank=%d), " "expected rank=%d\n", @@ -1002,31 +1089,38 @@ int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp return 1; } - for (i=0,j=0;i<rank;++i) { - while (j<PyArray_NDIM(arr) && PyArray_DIM(arr,j)<2) ++j; - if (j>=PyArray_NDIM(arr)) d = 1; - else d = PyArray_DIM(arr,j++); - if (dims[i]>=0) { - if (d>1 && d!=dims[i]) { - PyErr_Format(PyExc_ValueError, - "%d-th dimension must be fixed to %" - NPY_INTP_FMT " but got %" NPY_INTP_FMT - " (real index=%d)\n", - i, dims[i], d, j-1); + for (i = 0, j = 0; i < rank; ++i) { + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + PyErr_Format( + PyExc_ValueError, + "%d-th dimension must be fixed to %" NPY_INTP_FMT + " but got %" NPY_INTP_FMT " (real index=%d)\n", + i, dims[i], d, j - 1); return 1; } - if (!dims[i]) dims[i] = 1; - } else + if (!dims[i]) + dims[i] = 1; + } + else dims[i] = d; } - for (i=rank;i<PyArray_NDIM(arr);++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */ - while (j<PyArray_NDIM(arr) && PyArray_DIM(arr,j)<2) ++j; - if (j>=PyArray_NDIM(arr)) d = 1; - else d = PyArray_DIM(arr,j++); - dims[rank-1] *= d; + for (i = rank; i < PyArray_NDIM(arr); + ++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */ + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + dims[rank - 1] *= d; } - for (i=0,size=1;i<rank;++i) size *= dims[i]; + for (i = 0, size = 1; i < rank; ++i) size *= dims[i]; if (size != arr_size) { char msg[200]; int len; @@ -1037,15 +1131,15 @@ int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp size, arr_size, rank, effrank, PyArray_NDIM(arr)); for (i = 0; i < rank; ++i) { len = strlen(msg); - snprintf(msg + len, sizeof(msg) - len, - " %" NPY_INTP_FMT, dims[i]); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + dims[i]); } len = strlen(msg); snprintf(msg + len, sizeof(msg) - len, " ], arr.dims=["); for (i = 0; i < PyArray_NDIM(arr); ++i) { len = strlen(msg); - snprintf(msg + len, sizeof(msg) - len, - " %" NPY_INTP_FMT, PyArray_DIM(arr, i)); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + PyArray_DIM(arr, i)); } len = strlen(msg); snprintf(msg + len, sizeof(msg) - len, " ]\n"); @@ -1055,7 +1149,7 @@ int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp } #ifdef DEBUG_COPY_ND_ARRAY printf("check_and_fix_dimensions:end: dims="); - dump_dims(rank,dims); + dump_dims(rank, dims); #endif return 0; } @@ -1064,8 +1158,8 @@ int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp /************************* copy_ND_array *******************************/ -extern -int copy_ND_array(const PyArrayObject *arr, PyArrayObject *out) +extern int +copy_ND_array(const PyArrayObject *arr, PyArrayObject *out) { F2PY_REPORT_ON_ARRAY_COPY_FROMARR; return PyArray_CopyInto(out, (PyArrayObject *)arr); diff --git a/numpy/f2py/src/fortranobject.h b/numpy/f2py/src/fortranobject.h index d4cc10243..a1e9fdbdf 100644 --- a/numpy/f2py/src/fortranobject.h +++ b/numpy/f2py/src/fortranobject.h @@ -4,7 +4,7 @@ extern "C" { #endif -#include "Python.h" +#include <Python.h> #ifdef FORTRANOBJECT_C #define NO_IMPORT_ARRAY @@ -13,18 +13,19 @@ extern "C" { #include "numpy/arrayobject.h" #include "numpy/npy_3kcompat.h" - #ifdef F2PY_REPORT_ATEXIT #include <sys/timeb.h> - extern void f2py_start_clock(void); - extern void f2py_stop_clock(void); - extern void f2py_start_call_clock(void); - extern void f2py_stop_call_clock(void); - extern void f2py_cb_start_clock(void); - extern void f2py_cb_stop_clock(void); - extern void f2py_cb_start_call_clock(void); - extern void f2py_cb_stop_call_clock(void); - extern void f2py_report_on_exit(int,void*); +// clang-format off +extern void f2py_start_clock(void); +extern void f2py_stop_clock(void); +extern void f2py_start_call_clock(void); +extern void f2py_stop_call_clock(void); +extern void f2py_cb_start_clock(void); +extern void f2py_cb_stop_clock(void); +extern void f2py_cb_start_call_clock(void); +extern void f2py_cb_stop_call_clock(void); +extern void f2py_report_on_exit(int, void *); +// clang-format on #endif #ifdef DMALLOC @@ -44,50 +45,60 @@ Author: Pearu Peterson <pearu@cens.ioc.ee> #define F2PY_MAX_DIMS 40 -typedef void (*f2py_set_data_func)(char*,npy_intp*); +typedef void (*f2py_set_data_func)(char *, npy_intp *); typedef void (*f2py_void_func)(void); -typedef void (*f2py_init_func)(int*,npy_intp*,f2py_set_data_func,int*); +typedef void (*f2py_init_func)(int *, npy_intp *, f2py_set_data_func, int *); - /*typedef void* (*f2py_c_func)(void*,...);*/ +/*typedef void* (*f2py_c_func)(void*,...);*/ typedef void *(*f2pycfunc)(void); typedef struct { - char *name; /* attribute (array||routine) name */ - int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, - || rank=-1 for Fortran routine */ - struct {npy_intp d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */ - int type; /* PyArray_<type> || not used */ - char *data; /* pointer to array || Fortran routine */ - f2py_init_func func; /* initialization function for - allocatable arrays: - func(&rank,dims,set_ptr_func,name,len(name)) - || C/API wrapper for Fortran routine */ - char *doc; /* documentation string; only recommended - for routines. */ + char *name; /* attribute (array||routine) name */ + int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, + || rank=-1 for Fortran routine */ + struct { + npy_intp d[F2PY_MAX_DIMS]; + } dims; /* dimensions of the array, || not used */ + int type; /* PyArray_<type> || not used */ + char *data; /* pointer to array || Fortran routine */ + f2py_init_func func; /* initialization function for + allocatable arrays: + func(&rank,dims,set_ptr_func,name,len(name)) + || C/API wrapper for Fortran routine */ + char *doc; /* documentation string; only recommended + for routines. */ } FortranDataDef; typedef struct { - PyObject_HEAD - int len; /* Number of attributes */ - FortranDataDef *defs; /* An array of FortranDataDef's */ - PyObject *dict; /* Fortran object attribute dictionary */ + PyObject_HEAD + int len; /* Number of attributes */ + FortranDataDef *defs; /* An array of FortranDataDef's */ + PyObject *dict; /* Fortran object attribute dictionary */ } PyFortranObject; #define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) -#define PyFortran_Check1(op) (0==strcmp(Py_TYPE(op)->tp_name,"fortran")) - - extern PyTypeObject PyFortran_Type; - extern int F2PyDict_SetItemString(PyObject* dict, char *name, PyObject *obj); - extern PyObject * PyFortranObject_New(FortranDataDef* defs, f2py_void_func init); - extern PyObject * PyFortranObject_NewAsAttr(FortranDataDef* defs); - -PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); -void * F2PyCapsule_AsVoidPtr(PyObject *obj); -int F2PyCapsule_Check(PyObject *ptr); - -extern void *F2PySwapThreadLocalCallbackPtr(char *key, void *ptr); -extern void *F2PyGetThreadLocalCallbackPtr(char *key); +#define PyFortran_Check1(op) (0 == strcmp(Py_TYPE(op)->tp_name, "fortran")) + +extern PyTypeObject PyFortran_Type; +extern int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj); +extern PyObject * +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init); +extern PyObject * +PyFortranObject_NewAsAttr(FortranDataDef *defs); + +PyObject * +F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); +void * +F2PyCapsule_AsVoidPtr(PyObject *obj); +int +F2PyCapsule_Check(PyObject *ptr); + +extern void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr); +extern void * +F2PyGetThreadLocalCallbackPtr(char *key); #define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS) #define F2PY_INTENT_IN 1 @@ -109,23 +120,23 @@ extern void *F2PyGetThreadLocalCallbackPtr(char *key); #define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) #define F2PY_GET_ALIGNMENT(intent) \ - (F2PY_ALIGN4(intent) ? 4 : \ - (F2PY_ALIGN8(intent) ? 8 : \ - (F2PY_ALIGN16(intent) ? 16 : 1) )) -#define F2PY_CHECK_ALIGNMENT(arr, intent) ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) - - extern PyArrayObject* array_from_pyobj(const int type_num, - npy_intp *dims, - const int rank, - const int intent, - PyObject *obj); - extern int copy_ND_array(const PyArrayObject *in, PyArrayObject *out); + (F2PY_ALIGN4(intent) \ + ? 4 \ + : (F2PY_ALIGN8(intent) ? 8 : (F2PY_ALIGN16(intent) ? 16 : 1))) +#define F2PY_CHECK_ALIGNMENT(arr, intent) \ + ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) + +extern PyArrayObject * +array_from_pyobj(const int type_num, npy_intp *dims, const int rank, + const int intent, PyObject *obj); +extern int +copy_ND_array(const PyArrayObject *in, PyArrayObject *out); #ifdef DEBUG_COPY_ND_ARRAY - extern void dump_attrs(const PyArrayObject* arr); +extern void +dump_attrs(const PyArrayObject *arr); #endif - #ifdef __cplusplus } #endif diff --git a/numpy/f2py/src/test/Makefile b/numpy/f2py/src/test/Makefile deleted file mode 100644 index 0f8869f72..000000000 --- a/numpy/f2py/src/test/Makefile +++ /dev/null @@ -1,96 +0,0 @@ -# -*- makefile -*- -# File: Makefile-foo -# Usage: -# make -f Makefile-foo [MODE=opt|debug] -# Notes: -# 1) You must use GNU make; try `gmake ..' if `make' fails. -# 2) This file is auto-generated with f2py (version 2.264). -# f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, -# written by Pearu Peterson <pearu@ioc.ee>. -# See http://cens.ioc.ee/projects/f2py2e/ -# Generation date: Wed Sep 13 16:22:55 2000 -# $Revision: 1.2 $ -# $Date: 2000/09/17 16:10:27 $ - -# Recommendation notes produced by f2py2e/buildmakefile.py: -# *** - -PYINC = -I/numeric/include/python1.5/Numeric -I/numeric/include/python1.5 -INCLUDES = -I.. -LIBS = -L$(shell gcc -v 2>&1 | grep specs | sed -e 's/Reading specs from //g' | sed -e 's/\/specs//g') -lg2c -LIBS=-L$$ABSOFT/lib -lfio -lf77math -lf90math -LIBS=-L/numeric/bin -lvast90 -L/usr/lib/gcc-lib/i586-mandrake-linux/2.95.2 -lg2c - -# Wrapper generator: -F2PY = /home/pearu/bin/f2py-cvs - -# Fortran compiler: Absoft f95 -FC = f95 -FC = f90 -FOPT = -FDEBUG = -FFLAGS = -B108 -YCFRL=1 -YCOM_NAMES=LCS -YCOM_PFX -YCOM_SFX=_ -YEXT_PFX -YEXT_NAMES=LCS -FFLAGS = -# C compiler: cc ('gcc 2.x.x' 2.95.2) -CC = cc -COPT = -CDEBUG = -CFLAGS = -fpic - -# Linker: ld ('GNU ld' 2.9.5) -LD = ld -LDFLAGS = -shared -s -SO = .so - -ifeq '$(MODE)' 'debug' -FFLAGS += $(FDEBUG) -CFLAGS += $(CDEBUG) -endif -ifeq '$(MODE)' 'opt' -FFLAGS += $(FOPT) -CFLAGS += $(COPT) -endif -FFLAGS += $(INCLUDES) -CFLAGS += $(PYINC) $(INCLUDES) - -SRCC = ../fortranobject.c -SRCF = mod.f90 bar.f foo90.f90 wrap.f -SRCS = $(SRCC) $(SRCF) -OBJC = $(filter %.o,$(SRCC:.c=.o) $(SRCC:.cc=.o) $(SRCC:.C=.o)) -OBJF = $(filter %.o,$(SRCF:.f90=.o) $(SRCF:.f=.o) $(SRCF:.F=.o) $(SRCF:.for=.o)) -OBJS = $(OBJC) $(OBJF) - -INSTALLNAME = f2py2e-apps -INSTALLDIRECTORY = /numeric/lib/python1.5/site-packages/$(INSTALLNAME) -INSTALLDIR = install -d -c -INSTALLEXEC = install -m 755 -c - -all: foo - -foo: foomodule$(SO) -foomodule$(SO) : foomodule.o $(OBJS) - $(LD) $(LDFLAGS) -o $@ $< $(OBJS) $(LIBS) - -foomodule.o: foomodule.c - - -$(OBJS) : $(SRCS) -%.o : %.f ; $(FC) -c $(FFLAGS) $< -%.o : %.f90 ; $(FC) -c $(FFLAGS) $< - -test: foomodule$(SO) - python -c 'import foo;print foo.__doc__' -install: foomodule$(SO) - $(INSTALLDIR) $(INSTALLDIRECTORY) - $(INSTALLEXEC) foomodule$(SO) $(INSTALLDIRECTORY) - cd $(INSTALLDIRECTORY) && echo "$(INSTALLNAME)" > ../$(INSTALLNAME).pth - -.PHONY: clean distclean debug test install foo -debug: - echo "OBJS=$(OBJS)" - echo "SRCS=$(SRCS)" -clean: - $(RM) *.o *.mod core foomodule.{dvi,log} $(OBJS) -distclean: clean - $(RM) *.so *.sl foomodule.{tex,so} - $(RM) .f2py_get_compiler_* diff --git a/numpy/f2py/src/test/bar.f b/numpy/f2py/src/test/bar.f deleted file mode 100644 index 5354ceaf9..000000000 --- a/numpy/f2py/src/test/bar.f +++ /dev/null @@ -1,11 +0,0 @@ - subroutine bar() - integer a - real*8 b,c(3) - common /foodata/ a,b,c - a = 4 - b = 6.7 - c(2) = 3.0 - write(*,*) "bar:a=",a - write(*,*) "bar:b=",b - write(*,*) "bar:c=",c - end diff --git a/numpy/f2py/src/test/foo.f b/numpy/f2py/src/test/foo.f deleted file mode 100644 index 5354ceaf9..000000000 --- a/numpy/f2py/src/test/foo.f +++ /dev/null @@ -1,11 +0,0 @@ - subroutine bar() - integer a - real*8 b,c(3) - common /foodata/ a,b,c - a = 4 - b = 6.7 - c(2) = 3.0 - write(*,*) "bar:a=",a - write(*,*) "bar:b=",b - write(*,*) "bar:c=",c - end diff --git a/numpy/f2py/src/test/foo90.f90 b/numpy/f2py/src/test/foo90.f90 deleted file mode 100644 index dbca7e95b..000000000 --- a/numpy/f2py/src/test/foo90.f90 +++ /dev/null @@ -1,13 +0,0 @@ -subroutine foo() - integer a - real*8 b,c(3) - common /foodata/ a,b,c - print*, " F: in foo" - a = 5 - b = 6.3 - c(2) = 9.1 -end subroutine foo - - - - diff --git a/numpy/f2py/src/test/foomodule.c b/numpy/f2py/src/test/foomodule.c deleted file mode 100644 index 88ec62440..000000000 --- a/numpy/f2py/src/test/foomodule.c +++ /dev/null @@ -1,148 +0,0 @@ -/* File: foomodule.c - * Example of FortranObject usage. See also wrap.f foo.f foo90.f90. - * Author: Pearu Peterson <pearu@ioc.ee>. - * http://cens.ioc.ee/projects/f2py2e/ - * $Revision: 1.2 $ - * $Date: 2000/09/17 16:10:27 $ - */ -#ifdef __cplusplus -extern "C" { -#endif - -#include "Python.h" -#include "fortranobject.h" - -static PyObject *foo_error; - -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F -#else -#define F_FUNC(f,F) f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F##_ -#else -#define F_FUNC(f,F) f##_ -#endif -#endif - -/************* foo_bar *************/ -static char doc_foo_bar[] = "\ -Function signature:\n\ - bar()\n\ -"; -static PyObject *foo_bar(PyObject *capi_self, PyObject *capi_args, - PyObject *capi_keywds, void (*f2py_func)()) { - PyObject *capi_buildvalue = NULL; - static char *capi_kwlist[] = {NULL}; - if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\ - "|:foo.bar",\ - capi_kwlist)) - goto capi_fail; - (*f2py_func)(); - capi_buildvalue = Py_BuildValue(""); - capi_fail: - return capi_buildvalue; -} -/************ mod_init **************/ -static PyObject *mod_init(PyObject *capi_self, PyObject *capi_args, - PyObject *capi_keywds, void (*f2py_func)()) { - PyObject *capi_buildvalue = NULL; - static char *capi_kwlist[] = {NULL}; - if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\ - "|:mod.init",\ - capi_kwlist)) - goto capi_fail; - (*f2py_func)(); - capi_buildvalue = Py_BuildValue(""); - capi_fail: - return capi_buildvalue; -} - -/* F90 module */ -static FortranDataDef f2py_mod_def[] = { - {"a",0, {}, NPY_INT}, - {"b",0, {}, NPY_DOUBLE}, - {"c",1, {3}, NPY_DOUBLE}, - {"d",1, {-1}, NPY_DOUBLE}, - {"init",-1,{},0,NULL,(void *)mod_init}, - {NULL} -}; -static void f2py_setup_mod(char *a,char *b,char *c,void (*d)(),char *init) { - f2py_mod_def[0].data = a; - f2py_mod_def[1].data = b; - f2py_mod_def[2].data = c; - f2py_mod_def[3].func = d; - f2py_mod_def[4].data = init; -} -extern void F_FUNC(f2pyinitmod,F2PYINITMOD)(); - static void f2py_init_mod() { - F_FUNC(f2pyinitmod,F2PYINITMOD)(f2py_setup_mod); - } - -/* COMMON block */ -static FortranDataDef f2py_foodata_def[] = { - {"a",0, {}, NPY_INT}, - {"b",0, {}, NPY_DOUBLE}, - {"c",1, {3}, NPY_DOUBLE}, - {NULL} -}; -static void f2py_setup_foodata(char *a,char *b,char *c) { - f2py_foodata_def[0].data = a; - f2py_foodata_def[1].data = b; - f2py_foodata_def[2].data = c; -} -extern void F_FUNC(f2pyinitfoodata,F2PYINITFOODATA)(); - static void f2py_init_foodata() { - F_FUNC(f2pyinitfoodata,F2PYINITFOODATA)(f2py_setup_foodata); - } - -/* Fortran routines (needs no initialization/setup function) */ -extern void F_FUNC(bar,BAR)(); - extern void F_FUNC(foo,FOO)(); - static FortranDataDef f2py_routines_def[] = { - {"bar",-1, {}, 0, (char *)F_FUNC(bar,BAR),(void *)foo_bar,doc_foo_bar}, - {"foo",-1, {}, 0, (char *)F_FUNC(foo,FOO),(void *)foo_bar,doc_foo_bar}, - {NULL} - }; - -static PyMethodDef foo_module_methods[] = { - /*eof method*/ - {NULL,NULL} -}; - -void initfoo() { - int i; - PyObject *m, *d, *s, *tmp; - import_array(); - - m = Py_InitModule("foo", foo_module_methods); - - d = PyModule_GetDict(m); - s = PyUnicode_FromString("This module 'foo' demonstrates the usage of fortranobject."); - PyDict_SetItemString(d, "__doc__", s); - - /* Fortran objects: */ - tmp = PyFortranObject_New(f2py_mod_def,f2py_init_mod); - PyDict_SetItemString(d, "mod", tmp); - Py_DECREF(tmp); - tmp = PyFortranObject_New(f2py_foodata_def,f2py_init_foodata); - PyDict_SetItemString(d, "foodata", tmp); - Py_DECREF(tmp); - for(i=0;f2py_routines_def[i].name!=NULL;i++) { - tmp = PyFortranObject_NewAsAttr(&f2py_routines_def[i]); - PyDict_SetItemString(d, f2py_routines_def[i].name, tmp); - Py_DECREF(tmp); - } - - Py_DECREF(s); - - if (PyErr_Occurred()) - Py_FatalError("can't initialize module foo"); -} - -#ifdef __cplusplus -} -#endif diff --git a/numpy/f2py/src/test/wrap.f b/numpy/f2py/src/test/wrap.f deleted file mode 100644 index 9414eb9f6..000000000 --- a/numpy/f2py/src/test/wrap.f +++ /dev/null @@ -1,70 +0,0 @@ - subroutine f2py_mod_get_dims(f2py_r,f2py_s,f2py_set,f2py_n) - use mod - external f2py_set - logical f2py_ns - integer f2py_s(*),f2py_r,f2py_i,f2py_j - character*(*) f2py_n - if ("d".eq.f2py_n) then - f2py_ns = .FALSE. - if (allocated(d)) then - do f2py_i=1,f2py_r - if ((size(d,f2py_r-f2py_i+1).ne.f2py_s(f2py_i)).and. - c (f2py_s(f2py_i).ge.0)) then - f2py_ns = .TRUE. - end if - end do - if (f2py_ns) then - deallocate(d) - end if - end if - if (.not.allocated(d)) then - allocate(d(f2py_s(1))) - end if - if (allocated(d)) then - do f2py_i=1,f2py_r - f2py_s(f2py_i) = size(d,f2py_r-f2py_i+1) - end do - call f2py_set(d) - end if - end if - end subroutine f2py_mod_get_dims - subroutine f2py_mod_get_dims_d(r,s,set_data) - use mod, only: d => d - external set_data - logical ns - integer s(*),r,i,j - ns = .FALSE. - if (allocated(d)) then - do i=1,r - if ((size(d,r-i+1).ne.s(i)).and.(s(i).ge.0)) then - ns = .TRUE. - end if - end do - if (ns) then - deallocate(d) - end if - end if - if (.not.allocated(d).and.(s(1).ge.1)) then - allocate(d(s(1))) - end if - if (allocated(d)) then - do i=1,r - s(i) = size(d,r-i+1) - end do - end if - call set_data(d,allocated(d)) - end subroutine f2py_mod_get_dims_d - - subroutine f2pyinitmod(setupfunc) - use mod - external setupfunc,f2py_mod_get_dims_d,init - call setupfunc(a,b,c,f2py_mod_get_dims_d,init) - end subroutine f2pyinitmod - - subroutine f2pyinitfoodata(setupfunc) - external setupfunc - integer a - real*8 b,c(3) - common /foodata/ a,b,c - call setupfunc(a,b,c) - end subroutine f2pyinitfoodata diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py new file mode 100644 index 000000000..1b7b35458 --- /dev/null +++ b/numpy/f2py/symbolic.py @@ -0,0 +1,1510 @@ +"""Fortran/C symbolic expressions + +References: +- J3/21-007: Draft Fortran 202x. https://j3-fortran.org/doc/year/21/21-007.pdf +""" + +# To analyze Fortran expressions to solve dimensions specifications, +# for instances, we implement a minimal symbolic engine for parsing +# expressions into a tree of expression instances. As a first +# instance, we care only about arithmetic expressions involving +# integers and operations like addition (+), subtraction (-), +# multiplication (*), division (Fortran / is Python //, Fortran // is +# concatenate), and exponentiation (**). In addition, .pyf files may +# contain C expressions that support here is implemented as well. +# +# TODO: support logical constants (Op.BOOLEAN) +# TODO: support logical operators (.AND., ...) +# TODO: support defined operators (.MYOP., ...) +# +__all__ = ['Expr'] + + +import re +import warnings +from enum import Enum +from math import gcd + + +class Language(Enum): + """ + Used as Expr.tostring language argument. + """ + Python = 0 + Fortran = 1 + C = 2 + + +class Op(Enum): + """ + Used as Expr op attribute. + """ + INTEGER = 10 + REAL = 12 + COMPLEX = 15 + STRING = 20 + ARRAY = 30 + SYMBOL = 40 + TERNARY = 100 + APPLY = 200 + INDEXING = 210 + CONCAT = 220 + RELATIONAL = 300 + TERMS = 1000 + FACTORS = 2000 + REF = 3000 + DEREF = 3001 + + +class RelOp(Enum): + """ + Used in Op.RELATIONAL expression to specify the function part. + """ + EQ = 1 + NE = 2 + LT = 3 + LE = 4 + GT = 5 + GE = 6 + + @classmethod + def fromstring(cls, s, language=Language.C): + if language is Language.Fortran: + return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE, + '.lt.': RelOp.LT, '.le.': RelOp.LE, + '.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()] + return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT, + '<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s] + + def tostring(self, language=Language.C): + if language is Language.Fortran: + return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.', + RelOp.LT: '.lt.', RelOp.LE: '.le.', + RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self] + return {RelOp.EQ: '==', RelOp.NE: '!=', + RelOp.LT: '<', RelOp.LE: '<=', + RelOp.GT: '>', RelOp.GE: '>='}[self] + + +class ArithOp(Enum): + """ + Used in Op.APPLY expression to specify the function part. + """ + POS = 1 + NEG = 2 + ADD = 3 + SUB = 4 + MUL = 5 + DIV = 6 + POW = 7 + + +class OpError(Exception): + pass + + +class Precedence(Enum): + """ + Used as Expr.tostring precedence argument. + """ + ATOM = 0 + POWER = 1 + UNARY = 2 + PRODUCT = 3 + SUM = 4 + LT = 6 + EQ = 7 + LAND = 11 + LOR = 12 + TERNARY = 13 + ASSIGN = 14 + TUPLE = 15 + NONE = 100 + + +integer_types = (int,) +number_types = (int, float) + + +def _pairs_add(d, k, v): + # Internal utility method for updating terms and factors data. + c = d.get(k) + if c is None: + d[k] = v + else: + c = c + v + if c: + d[k] = c + else: + del d[k] + + +class ExprWarning(UserWarning): + pass + + +def ewarn(message): + warnings.warn(message, ExprWarning, stacklevel=2) + + +class Expr: + """Represents a Fortran expression as a op-data pair. + + Expr instances are hashable and sortable. + """ + + @staticmethod + def parse(s, language=Language.C): + """Parse a Fortran expression to a Expr. + """ + return fromstring(s, language=language) + + def __init__(self, op, data): + assert isinstance(op, Op) + + # sanity checks + if op is Op.INTEGER: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], int) + assert isinstance(data[1], (int, str)), data + elif op is Op.REAL: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], float) + assert isinstance(data[1], (int, str)), data + elif op is Op.COMPLEX: + # data is a 2-tuple of constant expressions + assert isinstance(data, tuple) and len(data) == 2 + elif op is Op.STRING: + # data is a 2-tuple of quoted string and a kind value + # (default is 1) + assert isinstance(data, tuple) and len(data) == 2 + assert (isinstance(data[0], str) + and data[0][::len(data[0])-1] in ('""', "''", '@@')) + assert isinstance(data[1], (int, str)), data + elif op is Op.SYMBOL: + # data is any hashable object + assert hash(data) is not None + elif op in (Op.ARRAY, Op.CONCAT): + # data is a tuple of expressions + assert isinstance(data, tuple) + assert all(isinstance(item, Expr) for item in data), data + elif op in (Op.TERMS, Op.FACTORS): + # data is {<term|base>:<coeff|exponent>} where dict values + # are nonzero Python integers + assert isinstance(data, dict) + elif op is Op.APPLY: + # data is (<function>, <operands>, <kwoperands>) where + # operands are Expr instances + assert isinstance(data, tuple) and len(data) == 3 + # function is any hashable object + assert hash(data[0]) is not None + assert isinstance(data[1], tuple) + assert isinstance(data[2], dict) + elif op is Op.INDEXING: + # data is (<object>, <indices>) + assert isinstance(data, tuple) and len(data) == 2 + # function is any hashable object + assert hash(data[0]) is not None + elif op is Op.TERNARY: + # data is (<cond>, <expr1>, <expr2>) + assert isinstance(data, tuple) and len(data) == 3 + elif op in (Op.REF, Op.DEREF): + # data is Expr instance + assert isinstance(data, Expr) + elif op is Op.RELATIONAL: + # data is (<relop>, <left>, <right>) + assert isinstance(data, tuple) and len(data) == 3 + else: + raise NotImplementedError( + f'unknown op or missing sanity check: {op}') + + self.op = op + self.data = data + + def __eq__(self, other): + return (isinstance(other, Expr) + and self.op is other.op + and self.data == other.data) + + def __hash__(self): + if self.op in (Op.TERMS, Op.FACTORS): + data = tuple(sorted(self.data.items())) + elif self.op is Op.APPLY: + data = self.data[:2] + tuple(sorted(self.data[2].items())) + else: + data = self.data + return hash((self.op, data)) + + def __lt__(self, other): + if isinstance(other, Expr): + if self.op is not other.op: + return self.op.value < other.op.value + if self.op in (Op.TERMS, Op.FACTORS): + return (tuple(sorted(self.data.items())) + < tuple(sorted(other.data.items()))) + if self.op is Op.APPLY: + if self.data[:2] != other.data[:2]: + return self.data[:2] < other.data[:2] + return tuple(sorted(self.data[2].items())) < tuple( + sorted(other.data[2].items())) + return self.data < other.data + return NotImplemented + + def __le__(self, other): return self == other or self < other + + def __gt__(self, other): return not (self <= other) + + def __ge__(self, other): return not (self < other) + + def __repr__(self): + return f'{type(self).__name__}({self.op}, {self.data!r})' + + def __str__(self): + return self.tostring() + + def tostring(self, parent_precedence=Precedence.NONE, + language=Language.Fortran): + """Return a string representation of Expr. + """ + if self.op in (Op.INTEGER, Op.REAL): + precedence = (Precedence.SUM if self.data[0] < 0 + else Precedence.ATOM) + r = str(self.data[0]) + (f'_{self.data[1]}' + if self.data[1] != 4 else '') + elif self.op is Op.COMPLEX: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '(' + r + ')' + precedence = Precedence.ATOM + elif self.op is Op.SYMBOL: + precedence = Precedence.ATOM + r = str(self.data) + elif self.op is Op.STRING: + r = self.data[0] + if self.data[1] != 1: + r = self.data[1] + '_' + r + precedence = Precedence.ATOM + elif self.op is Op.ARRAY: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '[' + r + ']' + precedence = Precedence.ATOM + elif self.op is Op.TERMS: + terms = [] + for term, coeff in sorted(self.data.items()): + if coeff < 0: + op = ' - ' + coeff = -coeff + else: + op = ' + ' + if coeff == 1: + term = term.tostring(Precedence.SUM, language=language) + else: + if term == as_number(1): + term = str(coeff) + else: + term = f'{coeff} * ' + term.tostring( + Precedence.PRODUCT, language=language) + if terms: + terms.append(op) + elif op == ' - ': + terms.append('-') + terms.append(term) + r = ''.join(terms) or '0' + precedence = Precedence.SUM if terms else Precedence.ATOM + elif self.op is Op.FACTORS: + factors = [] + tail = [] + for base, exp in sorted(self.data.items()): + op = ' * ' + if exp == 1: + factor = base.tostring(Precedence.PRODUCT, + language=language) + elif language is Language.C: + if exp in range(2, 10): + factor = base.tostring(Precedence.PRODUCT, + language=language) + factor = ' * '.join([factor] * exp) + elif exp in range(-10, 0): + factor = base.tostring(Precedence.PRODUCT, + language=language) + tail += [factor] * -exp + continue + else: + factor = base.tostring(Precedence.TUPLE, + language=language) + factor = f'pow({factor}, {exp})' + else: + factor = base.tostring(Precedence.POWER, + language=language) + f' ** {exp}' + if factors: + factors.append(op) + factors.append(factor) + if tail: + if not factors: + factors += ['1'] + factors += ['/', '(', ' * '.join(tail), ')'] + r = ''.join(factors) or '1' + precedence = Precedence.PRODUCT if factors else Precedence.ATOM + elif self.op is Op.APPLY: + name, args, kwargs = self.data + if name is ArithOp.DIV and language is Language.C: + numer, denom = [arg.tostring(Precedence.PRODUCT, + language=language) + for arg in args] + r = f'{numer} / {denom}' + precedence = Precedence.PRODUCT + else: + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in args] + args += [k + '=' + v.tostring(Precedence.NONE) + for k, v in kwargs.items()] + r = f'{name}({", ".join(args)})' + precedence = Precedence.ATOM + elif self.op is Op.INDEXING: + name = self.data[0] + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in self.data[1:]] + r = f'{name}[{", ".join(args)}]' + precedence = Precedence.ATOM + elif self.op is Op.CONCAT: + args = [arg.tostring(Precedence.PRODUCT, language=language) + for arg in self.data] + r = " // ".join(args) + precedence = Precedence.PRODUCT + elif self.op is Op.TERNARY: + cond, expr1, expr2 = [a.tostring(Precedence.TUPLE, + language=language) + for a in self.data] + if language is Language.C: + r = f'({cond} ? {expr1} : {expr2})' + elif language is Language.Python: + r = f'({expr1} if {cond} else {expr2})' + elif language is Language.Fortran: + r = f'merge({expr1}, {expr2}, {cond})' + else: + raise NotImplementedError( + f'tostring for {self.op} and {language}') + precedence = Precedence.ATOM + elif self.op is Op.REF: + r = '&' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.DEREF: + r = '*' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE) + else Precedence.LT) + left = left.tostring(precedence, language=language) + right = right.tostring(precedence, language=language) + rop = rop.tostring(language=language) + r = f'{left} {rop} {right}' + else: + raise NotImplementedError(f'tostring for op {self.op}') + if parent_precedence.value < precedence.value: + # If parent precedence is higher than operand precedence, + # operand will be enclosed in parenthesis. + return '(' + r + ')' + return r + + def __pos__(self): + return self + + def __neg__(self): + return self * -1 + + def __add__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number( + self.data[0] + other.data[0], + max(self.data[1], other.data[1])) + if self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 + r2, i1 + i2) + if self.op is Op.TERMS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self + as_complex(other) + elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX: + return as_complex(self) + other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self + as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) + other + return as_terms(self) + as_terms(other) + return NotImplemented + + def __radd__(self, other): + if isinstance(other, number_types): + return as_number(other) + self + return NotImplemented + + def __sub__(self, other): + return self + (-other) + + def __rsub__(self, other): + if isinstance(other, number_types): + return as_number(other) - self + return NotImplemented + + def __mul__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number(self.data[0] * other.data[0], + max(self.data[1], other.data[1])) + elif self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1) + + if self.op is Op.FACTORS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + elif self.op is Op.TERMS: + r = Expr(self.op, {}) + for t1, c1 in self.data.items(): + for t2, c2 in other.data.items(): + _pairs_add(r.data, t1 * t2, c1 * c2) + return normalize(r) + + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self * as_complex(other) + elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL): + return as_complex(self) * other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self * as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) * other + + if self.op is Op.TERMS: + return self * as_terms(other) + elif other.op is Op.TERMS: + return as_terms(self) * other + + return as_factors(self) * as_factors(other) + return NotImplemented + + def __rmul__(self, other): + if isinstance(other, number_types): + return as_number(other) * self + return NotImplemented + + def __pow__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if other.op is Op.INTEGER: + exponent = other.data[0] + # TODO: other kind not used + if exponent == 0: + return as_number(1) + if exponent == 1: + return self + if exponent > 0: + if self.op is Op.FACTORS: + r = Expr(self.op, {}) + for k, v in self.data.items(): + r.data[k] = v * exponent + return normalize(r) + return self * (self ** (exponent - 1)) + elif exponent != -1: + return (self ** (-exponent)) ** -1 + return Expr(Op.FACTORS, {self: exponent}) + return as_apply(ArithOp.POW, self, other) + return NotImplemented + + def __truediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran / is different from Python /: + # - `/` is a truncate operation for integer operands + return normalize(as_apply(ArithOp.DIV, self, other)) + return NotImplemented + + def __rtruediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other / self + return NotImplemented + + def __floordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran // is different from Python //: + # - `//` is a concatenate operation for string operands + return normalize(Expr(Op.CONCAT, (self, other))) + return NotImplemented + + def __rfloordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other // self + return NotImplemented + + def __call__(self, *args, **kwargs): + # In Fortran, parenthesis () are use for both function call as + # well as indexing operations. + # + # TODO: implement a method for deciding when __call__ should + # return an INDEXING expression. + return as_apply(self, *map(as_expr, args), + **dict((k, as_expr(v)) for k, v in kwargs.items())) + + def __getitem__(self, index): + # Provided to support C indexing operations that .pyf files + # may contain. + index = as_expr(index) + if not isinstance(index, tuple): + index = index, + if len(index) > 1: + ewarn(f'C-index should be a single expression but got `{index}`') + return Expr(Op.INDEXING, (self,) + index) + + def substitute(self, symbols_map): + """Recursively substitute symbols with values in symbols map. + + Symbols map is a dictionary of symbol-expression pairs. + """ + if self.op is Op.SYMBOL: + value = symbols_map.get(self) + if value is None: + return self + m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data) + if m: + # complement to fromstring method + items, paren = m.groups() + if paren in ['ROUNDDIV', 'SQUARE']: + return as_array(value) + assert paren == 'ROUND', (paren, value) + return value + if self.op in (Op.INTEGER, Op.REAL, Op.STRING): + return self + if self.op in (Op.ARRAY, Op.COMPLEX): + return Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data)) + if self.op is Op.CONCAT: + return normalize(Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data))) + if self.op is Op.TERMS: + r = None + for term, coeff in self.data.items(): + if r is None: + r = term.substitute(symbols_map) * coeff + else: + r += term.substitute(symbols_map) * coeff + if r is None: + ewarn('substitute: empty TERMS expression interpreted as' + ' int-literal 0') + return as_number(0) + return r + if self.op is Op.FACTORS: + r = None + for base, exponent in self.data.items(): + if r is None: + r = base.substitute(symbols_map) ** exponent + else: + r *= base.substitute(symbols_map) ** exponent + if r is None: + ewarn('substitute: empty FACTORS expression interpreted' + ' as int-literal 1') + return as_number(1) + return r + if self.op is Op.APPLY: + target, args, kwargs = self.data + if isinstance(target, Expr): + target = target.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in args) + kwargs = dict((k, v.substitute(symbols_map)) + for k, v in kwargs.items()) + return normalize(Expr(self.op, (target, args, kwargs))) + if self.op is Op.INDEXING: + func = self.data[0] + if isinstance(func, Expr): + func = func.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in self.data[1:]) + return normalize(Expr(self.op, (func,) + args)) + if self.op is Op.TERNARY: + operands = tuple(a.substitute(symbols_map) for a in self.data) + return normalize(Expr(self.op, operands)) + if self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, self.data.substitute(symbols_map))) + if self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.substitute(symbols_map) + right = right.substitute(symbols_map) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'substitute method for {self.op}: {self!r}') + + def traverse(self, visit, *args, **kwargs): + """Traverse expression tree with visit function. + + The visit function is applied to an expression with given args + and kwargs. + + Traverse call returns an expression returned by visit when not + None, otherwise return a new normalized expression with + traverse-visit sub-expressions. + """ + result = visit(self, *args, **kwargs) + if result is not None: + return result + + if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL): + return self + elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY): + return normalize(Expr(self.op, tuple( + item.traverse(visit, *args, **kwargs) + for item in self.data))) + elif self.op in (Op.TERMS, Op.FACTORS): + data = {} + for k, v in self.data.items(): + k = k.traverse(visit, *args, **kwargs) + v = (v.traverse(visit, *args, **kwargs) + if isinstance(v, Expr) else v) + if k in data: + v = data[k] + v + data[k] = v + return normalize(Expr(self.op, data)) + elif self.op is Op.APPLY: + obj = self.data[0] + func = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + operands = tuple(operand.traverse(visit, *args, **kwargs) + for operand in self.data[1]) + kwoperands = dict((k, v.traverse(visit, *args, **kwargs)) + for k, v in self.data[2].items()) + return normalize(Expr(self.op, (func, operands, kwoperands))) + elif self.op is Op.INDEXING: + obj = self.data[0] + obj = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + indices = tuple(index.traverse(visit, *args, **kwargs) + for index in self.data[1:]) + return normalize(Expr(self.op, (obj,) + indices)) + elif self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, + self.data.traverse(visit, *args, **kwargs))) + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.traverse(visit, *args, **kwargs) + right = right.traverse(visit, *args, **kwargs) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'traverse method for {self.op}') + + def contains(self, other): + """Check if self contains other. + """ + found = [] + + def visit(expr, found=found): + if found: + return expr + elif expr == other: + found.append(1) + return expr + + self.traverse(visit) + + return len(found) != 0 + + def symbols(self): + """Return a set of symbols contained in self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.SYMBOL: + found.add(expr) + + self.traverse(visit) + + return found + + def polynomial_atoms(self): + """Return a set of expressions used as atoms in polynomial self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.FACTORS: + for b in expr.data: + b.traverse(visit) + return expr + if expr.op in (Op.TERMS, Op.COMPLEX): + return + if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp): + if expr.data[0] is ArithOp.POW: + expr.data[1][0].traverse(visit) + return expr + return + if expr.op in (Op.INTEGER, Op.REAL): + return expr + + found.add(expr) + + if expr.op in (Op.INDEXING, Op.APPLY): + return expr + + self.traverse(visit) + + return found + + def linear_solve(self, symbol): + """Return a, b such that a * symbol + b == self. + + If self is not linear with respect to symbol, raise RuntimeError. + """ + b = self.substitute({symbol: as_number(0)}) + ax = self - b + a = ax.substitute({symbol: as_number(1)}) + + zero, _ = as_numer_denom(a * symbol - ax) + + if zero != as_number(0): + raise RuntimeError(f'not a {symbol}-linear equation:' + f' {a} * {symbol} + {b} == {self}') + return a, b + + +def normalize(obj): + """Normalize Expr and apply basic evaluation methods. + """ + if not isinstance(obj, Expr): + return obj + + if obj.op is Op.TERMS: + d = {} + for t, c in obj.data.items(): + if c == 0: + continue + if t.op is Op.COMPLEX and c != 1: + t = t * c + c = 1 + if t.op is Op.TERMS: + for t1, c1 in t.data.items(): + _pairs_add(d, t1, c1 * c) + else: + _pairs_add(d, t, c) + if len(d) == 0: + # TODO: deterimine correct kind + return as_number(0) + elif len(d) == 1: + (t, c), = d.items() + if c == 1: + return t + return Expr(Op.TERMS, d) + + if obj.op is Op.FACTORS: + coeff = 1 + d = {} + for b, e in obj.data.items(): + if e == 0: + continue + if b.op is Op.TERMS and isinstance(e, integer_types) and e > 1: + # expand integer powers of sums + b = b * (b ** (e - 1)) + e = 1 + + if b.op in (Op.INTEGER, Op.REAL): + if e == 1: + coeff *= b.data[0] + elif e > 0: + coeff *= b.data[0] ** e + else: + _pairs_add(d, b, e) + elif b.op is Op.FACTORS: + if e > 0 and isinstance(e, integer_types): + for b1, e1 in b.data.items(): + _pairs_add(d, b1, e1 * e) + else: + _pairs_add(d, b, e) + else: + _pairs_add(d, b, e) + if len(d) == 0 or coeff == 0: + # TODO: deterimine correct kind + assert isinstance(coeff, number_types) + return as_number(coeff) + elif len(d) == 1: + (b, e), = d.items() + if e == 1: + t = b + else: + t = Expr(Op.FACTORS, d) + if coeff == 1: + return t + return Expr(Op.TERMS, {t: coeff}) + elif coeff == 1: + return Expr(Op.FACTORS, d) + else: + return Expr(Op.TERMS, {Expr(Op.FACTORS, d): coeff}) + + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + dividend, divisor = obj.data[1] + t1, c1 = as_term_coeff(dividend) + t2, c2 = as_term_coeff(divisor) + if isinstance(c1, integer_types) and isinstance(c2, integer_types): + g = gcd(c1, c2) + c1, c2 = c1//g, c2//g + else: + c1, c2 = c1/c2, 1 + + if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV: + numer = t1.data[1][0] * c1 + denom = t1.data[1][1] * t2 * c2 + return as_apply(ArithOp.DIV, numer, denom) + + if t2.op is Op.APPLY and t2.data[0] is ArithOp.DIV: + numer = t2.data[1][1] * t1 * c1 + denom = t2.data[1][0] * c2 + return as_apply(ArithOp.DIV, numer, denom) + + d = dict(as_factors(t1).data) + for b, e in as_factors(t2).data.items(): + _pairs_add(d, b, -e) + numer, denom = {}, {} + for b, e in d.items(): + if e > 0: + numer[b] = e + else: + denom[b] = -e + numer = normalize(Expr(Op.FACTORS, numer)) * c1 + denom = normalize(Expr(Op.FACTORS, denom)) * c2 + + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] == 1: + # TODO: denom kind not used + return numer + return as_apply(ArithOp.DIV, numer, denom) + + if obj.op is Op.CONCAT: + lst = [obj.data[0]] + for s in obj.data[1:]: + last = lst[-1] + if ( + last.op is Op.STRING + and s.op is Op.STRING + and last.data[0][0] in '"\'' + and s.data[0][0] == last.data[0][-1] + ): + new_last = as_string(last.data[0][:-1] + s.data[0][1:], + max(last.data[1], s.data[1])) + lst[-1] = new_last + else: + lst.append(s) + if len(lst) == 1: + return lst[0] + return Expr(Op.CONCAT, tuple(lst)) + + if obj.op is Op.TERNARY: + cond, expr1, expr2 = map(normalize, obj.data) + if cond.op is Op.INTEGER: + return expr1 if cond.data[0] else expr2 + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + return obj + + +def as_expr(obj): + """Convert non-Expr objects to Expr objects. + """ + if isinstance(obj, complex): + return as_complex(obj.real, obj.imag) + if isinstance(obj, number_types): + return as_number(obj) + if isinstance(obj, str): + # STRING expression holds string with boundary quotes, hence + # applying repr: + return as_string(repr(obj)) + if isinstance(obj, tuple): + return tuple(map(as_expr, obj)) + return obj + + +def as_symbol(obj): + """Return object as SYMBOL expression (variable or unparsed expression). + """ + return Expr(Op.SYMBOL, obj) + + +def as_number(obj, kind=4): + """Return object as INTEGER or REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op in (Op.INTEGER, Op.REAL): + return obj + raise OpError(f'cannot convert {obj} to INTEGER or REAL constant') + + +def as_integer(obj, kind=4): + """Return object as INTEGER constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.INTEGER: + return obj + raise OpError(f'cannot convert {obj} to INTEGER constant') + + +def as_real(obj, kind=4): + """Return object as REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.REAL, (float(obj), kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.REAL: + return obj + elif obj.op is Op.INTEGER: + return Expr(Op.REAL, (float(obj.data[0]), kind)) + raise OpError(f'cannot convert {obj} to REAL constant') + + +def as_string(obj, kind=1): + """Return object as STRING expression (string literal constant). + """ + return Expr(Op.STRING, (obj, kind)) + + +def as_array(obj): + """Return object as ARRAY expression (array constant). + """ + if isinstance(obj, Expr): + obj = obj, + return Expr(Op.ARRAY, obj) + + +def as_complex(real, imag=0): + """Return object as COMPLEX expression (complex literal constant). + """ + return Expr(Op.COMPLEX, (as_expr(real), as_expr(imag))) + + +def as_apply(func, *args, **kwargs): + """Return object as APPLY expression (function call, constructor, etc.) + """ + return Expr(Op.APPLY, + (func, tuple(map(as_expr, args)), + dict((k, as_expr(v)) for k, v in kwargs.items()))) + + +def as_ternary(cond, expr1, expr2): + """Return object as TERNARY expression (cond?expr1:expr2). + """ + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + +def as_ref(expr): + """Return object as referencing expression. + """ + return Expr(Op.REF, expr) + + +def as_deref(expr): + """Return object as dereferencing expression. + """ + return Expr(Op.DEREF, expr) + + +def as_eq(left, right): + return Expr(Op.RELATIONAL, (RelOp.EQ, left, right)) + + +def as_ne(left, right): + return Expr(Op.RELATIONAL, (RelOp.NE, left, right)) + + +def as_lt(left, right): + return Expr(Op.RELATIONAL, (RelOp.LT, left, right)) + + +def as_le(left, right): + return Expr(Op.RELATIONAL, (RelOp.LE, left, right)) + + +def as_gt(left, right): + return Expr(Op.RELATIONAL, (RelOp.GT, left, right)) + + +def as_ge(left, right): + return Expr(Op.RELATIONAL, (RelOp.GE, left, right)) + + +def as_terms(obj): + """Return expression as TERMS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.TERMS: + return obj + if obj.op is Op.INTEGER: + return Expr(Op.TERMS, {as_integer(1, obj.data[1]): obj.data[0]}) + if obj.op is Op.REAL: + return Expr(Op.TERMS, {as_real(1, obj.data[1]): obj.data[0]}) + return Expr(Op.TERMS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_factors(obj): + """Return expression as FACTORS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.FACTORS: + return obj + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + if coeff == 1: + return Expr(Op.FACTORS, {term: 1}) + return Expr(Op.FACTORS, {term: 1, Expr.number(coeff): 1}) + if ((obj.op is Op.APPLY + and obj.data[0] is ArithOp.DIV + and not obj.data[2])): + return Expr(Op.FACTORS, {obj.data[1][0]: 1, obj.data[1][1]: -1}) + return Expr(Op.FACTORS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_term_coeff(obj): + """Return expression as term-coefficient pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.INTEGER: + return as_integer(1, obj.data[1]), obj.data[0] + if obj.op is Op.REAL: + return as_real(1, obj.data[1]), obj.data[0] + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + return term, coeff + # TODO: find common divisor of coefficients + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + t, c = as_term_coeff(obj.data[1][0]) + return as_apply(ArithOp.DIV, t, obj.data[1][1]), c + return obj, 1 + raise OpError(f'cannot convert {type(obj)} to term and coeff') + + +def as_numer_denom(obj): + """Return expression as numer-denom pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op in (Op.INTEGER, Op.REAL, Op.COMPLEX, Op.SYMBOL, + Op.INDEXING, Op.TERNARY): + return obj, as_number(1) + elif obj.op is Op.APPLY: + if obj.data[0] is ArithOp.DIV and not obj.data[2]: + numers, denoms = map(as_numer_denom, obj.data[1]) + return numers[0] * denoms[1], numers[1] * denoms[0] + return obj, as_number(1) + elif obj.op is Op.TERMS: + numers, denoms = [], [] + for term, coeff in obj.data.items(): + n, d = as_numer_denom(term) + n = n * coeff + numers.append(n) + denoms.append(d) + numer, denom = as_number(0), as_number(1) + for i in range(len(numers)): + n = numers[i] + for j in range(len(numers)): + if i != j: + n *= denoms[j] + numer += n + denom *= denoms[i] + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] < 0: + numer, denom = -numer, -denom + return numer, denom + elif obj.op is Op.FACTORS: + numer, denom = as_number(1), as_number(1) + for b, e in obj.data.items(): + bnumer, bdenom = as_numer_denom(b) + if e > 0: + numer *= bnumer ** e + denom *= bdenom ** e + elif e < 0: + numer *= bdenom ** (-e) + denom *= bnumer ** (-e) + return numer, denom + raise OpError(f'cannot convert {type(obj)} to numer and denom') + + +def _counter(): + # Used internally to generate unique dummy symbols + counter = 0 + while True: + counter += 1 + yield counter + + +COUNTER = _counter() + + +def eliminate_quotes(s): + """Replace quoted substrings of input string. + + Return a new string and a mapping of replacements. + """ + d = {} + + def repl(m): + kind, value = m.groups()[:2] + if kind: + # remove trailing underscore + kind = kind[:-1] + p = {"'": "SINGLE", '"': "DOUBLE"}[value[0]] + k = f'{kind}@__f2py_QUOTES_{p}_{COUNTER.__next__()}@' + d[k] = value + return k + + new_s = re.sub(r'({kind}_|)({single_quoted}|{double_quoted})'.format( + kind=r'\w[\w\d_]*', + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")'), + repl, s) + + assert '"' not in new_s + assert "'" not in new_s + + return new_s, d + + +def insert_quotes(s, d): + """Inverse of eliminate_quotes. + """ + for k, v in d.items(): + kind = k[:k.find('@')] + if kind: + kind += '_' + s = s.replace(k, kind + v) + return s + + +def replace_parenthesis(s): + """Replace substrings of input that are enclosed in parenthesis. + + Return a new string and a mapping of replacements. + """ + # Find a parenthesis pair that appears first. + + # Fortran deliminator are `(`, `)`, `[`, `]`, `(/', '/)`, `/`. + # We don't handle `/` deliminator because it is not a part of an + # expression. + left, right = None, None + mn_i = len(s) + for left_, right_ in (('(/', '/)'), + '()', + '{}', # to support C literal structs + '[]'): + i = s.find(left_) + if i == -1: + continue + if i < mn_i: + mn_i = i + left, right = left_, right_ + + if left is None: + return s, {} + + i = mn_i + j = s.find(right, i) + + while s.count(left, i + 1, j) != s.count(right, i + 1, j): + j = s.find(right, j + 1) + if j == -1: + raise ValueError(f'Mismatch of {left+right} parenthesis in {s!r}') + + p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left] + + k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@' + v = s[i+len(left):j] + r, d = replace_parenthesis(s[j+len(right):]) + d[k] = v + return s[:i] + k + r, d + + +def _get_parenthesis_kind(s): + assert s.startswith('@__f2py_PARENTHESIS_'), s + return s.split('_')[4] + + +def unreplace_parenthesis(s, d): + """Inverse of replace_parenthesis. + """ + for k, v in d.items(): + p = _get_parenthesis_kind(k) + left = dict(ROUND='(', SQUARE='[', CURLY='{', ROUNDDIV='(/')[p] + right = dict(ROUND=')', SQUARE=']', CURLY='}', ROUNDDIV='/)')[p] + s = s.replace(k, left + v + right) + return s + + +def fromstring(s, language=Language.C): + """Create an expression from a string. + + This is a "lazy" parser, that is, only arithmetic operations are + resolved, non-arithmetic operations are treated as symbols. + """ + r = _FromStringWorker(language=language).parse(s) + if isinstance(r, Expr): + return r + raise ValueError(f'failed to parse `{s}` to Expr instance: got `{r}`') + + +class _Pair: + # Internal class to represent a pair of expressions + + def __init__(self, left, right): + self.left = left + self.right = right + + def substitute(self, symbols_map): + left, right = self.left, self.right + if isinstance(left, Expr): + left = left.substitute(symbols_map) + if isinstance(right, Expr): + right = right.substitute(symbols_map) + return _Pair(left, right) + + def __repr__(self): + return f'{type(self).__name__}({self.left}, {self.right})' + + +class _FromStringWorker: + + def __init__(self, language=Language.C): + self.original = None + self.quotes_map = None + self.language = language + + def finalize_string(self, s): + return insert_quotes(s, self.quotes_map) + + def parse(self, inp): + self.original = inp + unquoted, self.quotes_map = eliminate_quotes(inp) + return self.process(unquoted) + + def process(self, s, context='expr'): + """Parse string within the given context. + + The context may define the result in case of ambiguous + expressions. For instance, consider expressions `f(x, y)` and + `(x, y) + (a, b)` where `f` is a function and pair `(x, y)` + denotes complex number. Specifying context as "args" or + "expr", the subexpression `(x, y)` will be parse to an + argument list or to a complex number, respectively. + """ + if isinstance(s, (list, tuple)): + return type(s)(self.process(s_, context) for s_ in s) + + assert isinstance(s, str), (type(s), s) + + # replace subexpressions in parenthesis with f2py @-names + r, raw_symbols_map = replace_parenthesis(s) + r = r.strip() + + def restore(r): + # restores subexpressions marked with f2py @-names + if isinstance(r, (list, tuple)): + return type(r)(map(restore, r)) + return unreplace_parenthesis(r, raw_symbols_map) + + # comma-separated tuple + if ',' in r: + operands = restore(r.split(',')) + if context == 'args': + return tuple(self.process(operands)) + if context == 'expr': + if len(operands) == 2: + # complex number literal + return as_complex(*self.process(operands)) + raise NotImplementedError( + f'parsing comma-separated list (context={context}): {r}') + + # ternary operation + m = re.match(r'\A([^?]+)[?]([^:]+)[:](.+)\Z', r) + if m: + assert context == 'expr', context + oper, expr1, expr2 = restore(m.groups()) + oper = self.process(oper) + expr1 = self.process(expr1) + expr2 = self.process(expr2) + return as_ternary(oper, expr1, expr2) + + # relational expression + if self.language is Language.Fortran: + m = re.match( + r'\A(.+)\s*[.](eq|ne|lt|le|gt|ge)[.]\s*(.+)\Z', r, re.I) + else: + m = re.match( + r'\A(.+)\s*([=][=]|[!][=]|[<][=]|[<]|[>][=]|[>])\s*(.+)\Z', r) + if m: + left, rop, right = m.groups() + if self.language is Language.Fortran: + rop = '.' + rop + '.' + left, right = self.process(restore((left, right))) + rop = RelOp.fromstring(rop, language=self.language) + return Expr(Op.RELATIONAL, (rop, left, right)) + + # keyword argument + m = re.match(r'\A(\w[\w\d_]*)\s*[=](.*)\Z', r) + if m: + keyname, value = m.groups() + value = restore(value) + return _Pair(keyname, self.process(value)) + + # addition/subtraction operations + operands = re.split(r'((?<!\d[edED])[+-])', r) + if len(operands) > 1: + result = self.process(restore(operands[0] or '0')) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(restore(operand)) + op = op.strip() + if op == '+': + result += operand + else: + assert op == '-' + result -= operand + return result + + # string concatenate operation + if self.language is Language.Fortran and '//' in r: + operands = restore(r.split('//')) + return Expr(Op.CONCAT, + tuple(self.process(operands))) + + # multiplication/division operations + operands = re.split(r'(?<=[@\w\d_])\s*([*]|/)', + (r if self.language is Language.C + else r.replace('**', '@__f2py_DOUBLE_STAR@'))) + if len(operands) > 1: + operands = restore(operands) + if self.language is not Language.C: + operands = [operand.replace('@__f2py_DOUBLE_STAR@', '**') + for operand in operands] + # Expression is an arithmetic product + result = self.process(operands[0]) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(operand) + op = op.strip() + if op == '*': + result *= operand + else: + assert op == '/' + result /= operand + return result + + # referencing/dereferencing + if r.startswith('*') or r.startswith('&'): + op = {'*': Op.DEREF, '&': Op.REF}[r[0]] + operand = self.process(restore(r[1:])) + return Expr(op, operand) + + # exponentiation operations + if self.language is not Language.C and '**' in r: + operands = list(reversed(restore(r.split('**')))) + result = self.process(operands[0]) + for operand in operands[1:]: + operand = self.process(operand) + result = operand ** result + return result + + # int-literal-constant + m = re.match(r'\A({digit_string})({kind}|)\Z'.format( + digit_string=r'\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + return as_integer(int(value), kind or 4) + + # real-literal-constant + m = re.match(r'\A({significant}({exponent}|)|\d+{exponent})({kind}|)\Z' + .format( + significant=r'[.]\d+|\d+[.]\d*', + exponent=r'[edED][+-]?\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + value = value.lower() + if 'd' in value: + return as_real(float(value.replace('d', 'e')), kind or 8) + return as_real(float(value), kind or 4) + + # string-literal-constant with kind parameter specification + if r in self.quotes_map: + kind = r[:r.find('@')] + return as_string(self.quotes_map[r], kind or 1) + + # array constructor or literal complex constant or + # parenthesized expression + if r in raw_symbols_map: + paren = _get_parenthesis_kind(r) + items = self.process(restore(raw_symbols_map[r]), + 'expr' if paren == 'ROUND' else 'args') + if paren == 'ROUND': + if isinstance(items, Expr): + return items + if paren in ['ROUNDDIV', 'SQUARE']: + # Expression is a array constructor + if isinstance(items, Expr): + items = (items,) + return as_array(items) + + # function call/indexing + m = re.match(r'\A(.+)\s*(@__f2py_PARENTHESIS_(ROUND|SQUARE)_\d+@)\Z', + r) + if m: + target, args, paren = m.groups() + target = self.process(restore(target)) + args = self.process(restore(args)[1:-1], 'args') + if not isinstance(args, tuple): + args = args, + if paren == 'ROUND': + kwargs = dict((a.left, a.right) for a in args + if isinstance(a, _Pair)) + args = tuple(a for a in args if not isinstance(a, _Pair)) + # Warning: this could also be Fortran indexing operation.. + return as_apply(target, *args, **kwargs) + else: + # Expression is a C/Python indexing operation + # (e.g. used in .pyf files) + assert paren == 'SQUARE' + return target[args] + + # Fortran standard conforming identifier + m = re.match(r'\A\w[\w\d_]*\Z', r) + if m: + return as_symbol(r) + + # fall-back to symbol + r = self.finalize_string(restore(r)) + ewarn( + f'fromstring: treating {r!r} as symbol (original={self.original})') + return as_symbol(r) diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index fe21d4b9b..ea47e0555 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -9,7 +9,9 @@ extern "C" { #endif /*********************** See f2py2e/cfuncs.py: includes ***********************/ -#include "Python.h" + +#define PY_SSIZE_T_CLEAN +#include <Python.h> #include "fortranobject.h" #include <math.h> diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 2cb429ec2..5d2aab94d 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -5,7 +5,6 @@ import pytest import threading import traceback import time -import random import numpy as np from numpy.testing import assert_, assert_equal, IS_PYPY @@ -107,9 +106,9 @@ cf2py intent(out) r ----- Call-back functions:: - def fun(): return a - Return objects: - a : int + def fun(): return a + Return objects: + a : int """) assert_equal(self.module.t.__doc__, expected) diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index 140f42cbc..039e085b4 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -1,3 +1,4 @@ +import pytest import numpy as np from numpy.testing import assert_array_equal, assert_equal from numpy.f2py.crackfortran import markinnerspaces @@ -39,6 +40,7 @@ class TestNoSpace(util.F2PyTest): class TestPublicPrivate(): + def test_defaultPrivate(self, tmp_path): f_path = tmp_path / "mod.f90" with f_path.open('w') as ff: @@ -165,3 +167,117 @@ class TestMarkinnerspaces(): def test_multiple_relevant_spaces(self): assert_equal(markinnerspaces("a 'b c' 'd e'"), "a 'b@_@c' 'd@_@e'") assert_equal(markinnerspaces(r'a "b c" "d e"'), r'a "b@_@c" "d@_@e"') + + +class TestDimSpec(util.F2PyTest): + """This test suite tests various expressions that are used as dimension + specifications. + + There exists two usage cases where analyzing dimensions + specifications are important. + + In the first case, the size of output arrays must be defined based + on the inputs to a Fortran function. Because Fortran supports + arbitrary bases for indexing, for instance, `arr(lower:upper)`, + f2py has to evaluate an expression `upper - lower + 1` where + `lower` and `upper` are arbitrary expressions of input parameters. + The evaluation is performed in C, so f2py has to translate Fortran + expressions to valid C expressions (an alternative approach is + that a developer specifies the corresponding C expressions in a + .pyf file). + + In the second case, when user provides an input array with a given + size but some hidden parameters used in dimensions specifications + need to be determined based on the input array size. This is a + harder problem because f2py has to solve the inverse problem: find + a parameter `p` such that `upper(p) - lower(p) + 1` equals to the + size of input array. In the case when this equation cannot be + solved (e.g. because the input array size is wrong), raise an + error before calling the Fortran function (that otherwise would + likely crash Python process when the size of input arrays is + wrong). f2py currently supports this case only when the equation + is linear with respect to unknown parameter. + + """ + + suffix = '.f90' + + code_template = textwrap.dedent(""" + function get_arr_size_{count}(a, n) result (length) + integer, intent(in) :: n + integer, dimension({dimspec}), intent(out) :: a + integer length + length = size(a) + end function + + subroutine get_inv_arr_size_{count}(a, n) + integer :: n + ! the value of n is computed in f2py wrapper + !f2py intent(out) n + integer, dimension({dimspec}), intent(in) :: a + if (a({first}).gt.0) then + print*, "a=", a + endif + end subroutine + """) + + linear_dimspecs = ['n', '2*n', '2:n', 'n/2', '5 - n/2', '3*n:20', + 'n*(n+1):n*(n+5)'] + nonlinear_dimspecs = ['2*n:3*n*n+2*n'] + all_dimspecs = linear_dimspecs + nonlinear_dimspecs + + code = '' + for count, dimspec in enumerate(all_dimspecs): + code += code_template.format( + count=count, dimspec=dimspec, + first=dimspec.split(':')[0] if ':' in dimspec else '1') + + @pytest.mark.parametrize('dimspec', all_dimspecs) + def test_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f'get_arr_size_{count}') + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + assert len(a) == sz + + @pytest.mark.parametrize('dimspec', all_dimspecs) + def test_inv_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f'get_arr_size_{count}') + get_inv_arr_size = getattr(self.module, f'get_inv_arr_size_{count}') + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + if dimspec in self.nonlinear_dimspecs: + # one must specify n as input, the call we'll ensure + # that a and n are compatible: + n1 = get_inv_arr_size(a, n) + else: + # in case of linear dependence, n can be determined + # from the shape of a: + n1 = get_inv_arr_size(a) + # n1 may be different from n (for instance, when `a` size + # is a function of some `n` fraction) but it must produce + # the same sized array + sz1, _ = get_arr_size(n1) + assert sz == sz1, (n, n1, sz, sz1) + + +class TestModuleDeclaration(): + def test_dependencies(self, tmp_path): + f_path = tmp_path / "mod.f90" + with f_path.open('w') as ff: + ff.write(textwrap.dedent("""\ + module foo + type bar + character(len = 4) :: text + end type bar + type(bar), parameter :: abar = bar('abar') + end module foo + """)) + mod = crackfortran.crackfortran([str(f_path)]) + assert len(mod) == 1 + assert mod[0]['vars']['abar']['='] == "bar('abar')" diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py index 7d4ced914..2c999ed0b 100644 --- a/numpy/f2py/tests/test_return_character.py +++ b/numpy/f2py/tests/test_return_character.py @@ -80,7 +80,7 @@ cf2py intent(out) ts end """ - @pytest.mark.xfail(IS_S390X, reason="calback returns ' '") + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") @pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(',')) def test_all(self, name): self.check_function(getattr(self.module, name), name) @@ -139,7 +139,7 @@ module f90_return_char end module f90_return_char """ - @pytest.mark.xfail(IS_S390X, reason="calback returns ' '") + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") @pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(',')) def test_all(self, name): self.check_function(getattr(self.module.f90_return_char, name), name) diff --git a/numpy/f2py/tests/test_symbolic.py b/numpy/f2py/tests/test_symbolic.py new file mode 100644 index 000000000..52cabac53 --- /dev/null +++ b/numpy/f2py/tests/test_symbolic.py @@ -0,0 +1,462 @@ +from numpy.testing import assert_raises +from numpy.f2py.symbolic import ( + Expr, Op, ArithOp, Language, + as_symbol, as_number, as_string, as_array, as_complex, + as_terms, as_factors, eliminate_quotes, insert_quotes, + fromstring, as_expr, as_apply, + as_numer_denom, as_ternary, as_ref, as_deref, + normalize, as_eq, as_ne, as_lt, as_gt, as_le, as_ge + ) +from . import util + + +class TestSymbolic(util.F2PyTest): + + def test_eliminate_quotes(self): + def worker(s): + r, d = eliminate_quotes(s) + s1 = insert_quotes(r, d) + assert s1 == s + + for kind in ['', 'mykind_']: + worker(kind + '"1234" // "ABCD"') + worker(kind + '"1234" // ' + kind + '"ABCD"') + worker(kind + '"1234" // \'ABCD\'') + worker(kind + '"1234" // ' + kind + '\'ABCD\'') + worker(kind + '"1\\"2\'AB\'34"') + worker('a = ' + kind + "'1\\'2\"AB\"34'") + + def test_sanity(self): + x = as_symbol('x') + y = as_symbol('y') + z = as_symbol('z') + + assert x.op == Op.SYMBOL + assert repr(x) == "Expr(Op.SYMBOL, 'x')" + assert x == x + assert x != y + assert hash(x) is not None + + n = as_number(123) + m = as_number(456) + assert n.op == Op.INTEGER + assert repr(n) == "Expr(Op.INTEGER, (123, 4))" + assert n == n + assert n != m + assert hash(n) is not None + + fn = as_number(12.3) + fm = as_number(45.6) + assert fn.op == Op.REAL + assert repr(fn) == "Expr(Op.REAL, (12.3, 4))" + assert fn == fn + assert fn != fm + assert hash(fn) is not None + + c = as_complex(1, 2) + c2 = as_complex(3, 4) + assert c.op == Op.COMPLEX + assert repr(c) == ("Expr(Op.COMPLEX, (Expr(Op.INTEGER, (1, 4))," + " Expr(Op.INTEGER, (2, 4))))") + assert c == c + assert c != c2 + assert hash(c) is not None + + s = as_string("'123'") + s2 = as_string('"ABC"') + assert s.op == Op.STRING + assert repr(s) == "Expr(Op.STRING, (\"'123'\", 1))", repr(s) + assert s == s + assert s != s2 + + a = as_array((n, m)) + b = as_array((n,)) + assert a.op == Op.ARRAY + assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4))," + " Expr(Op.INTEGER, (456, 4))))") + assert a == a + assert a != b + + t = as_terms(x) + u = as_terms(y) + assert t.op == Op.TERMS + assert repr(t) == "Expr(Op.TERMS, {Expr(Op.SYMBOL, 'x'): 1})" + assert t == t + assert t != u + assert hash(t) is not None + + v = as_factors(x) + w = as_factors(y) + assert v.op == Op.FACTORS + assert repr(v) == "Expr(Op.FACTORS, {Expr(Op.SYMBOL, 'x'): 1})" + assert v == v + assert w != v + assert hash(v) is not None + + t = as_ternary(x, y, z) + u = as_ternary(x, z, y) + assert t.op == Op.TERNARY + assert t == t + assert t != u + assert hash(t) is not None + + e = as_eq(x, y) + f = as_lt(x, y) + assert e.op == Op.RELATIONAL + assert e == e + assert e != f + assert hash(e) is not None + + def test_tostring_fortran(self): + x = as_symbol('x') + y = as_symbol('y') + z = as_symbol('z') + n = as_number(123) + m = as_number(456) + a = as_array((n, m)) + c = as_complex(n, m) + + assert str(x) == 'x' + assert str(n) == '123' + assert str(a) == '[123, 456]' + assert str(c) == '(123, 456)' + + assert str(Expr(Op.TERMS, {x: 1})) == 'x' + assert str(Expr(Op.TERMS, {x: 2})) == '2 * x' + assert str(Expr(Op.TERMS, {x: -1})) == '-x' + assert str(Expr(Op.TERMS, {x: -2})) == '-2 * x' + assert str(Expr(Op.TERMS, {x: 1, y: 1})) == 'x + y' + assert str(Expr(Op.TERMS, {x: -1, y: -1})) == '-x - y' + assert str(Expr(Op.TERMS, {x: 2, y: 3})) == '2 * x + 3 * y' + assert str(Expr(Op.TERMS, {x: -2, y: 3})) == '-2 * x + 3 * y' + assert str(Expr(Op.TERMS, {x: 2, y: -3})) == '2 * x - 3 * y' + + assert str(Expr(Op.FACTORS, {x: 1})) == 'x' + assert str(Expr(Op.FACTORS, {x: 2})) == 'x ** 2' + assert str(Expr(Op.FACTORS, {x: -1})) == 'x ** -1' + assert str(Expr(Op.FACTORS, {x: -2})) == 'x ** -2' + assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == 'x * y' + assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == 'x ** 2 * y ** 3' + + v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3}) + assert str(v) == 'x ** 2 * (x + y) ** 3', str(v) + v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3}) + assert str(v) == 'x ** 2 * (x * y) ** 3', str(v) + + assert str(Expr(Op.APPLY, ('f', (), {}))) == 'f()' + assert str(Expr(Op.APPLY, ('f', (x,), {}))) == 'f(x)' + assert str(Expr(Op.APPLY, ('f', (x, y), {}))) == 'f(x, y)' + assert str(Expr(Op.INDEXING, ('f', x))) == 'f[x]' + + assert str(as_ternary(x, y, z)) == 'merge(y, z, x)' + assert str(as_eq(x, y)) == 'x .eq. y' + assert str(as_ne(x, y)) == 'x .ne. y' + assert str(as_lt(x, y)) == 'x .lt. y' + assert str(as_le(x, y)) == 'x .le. y' + assert str(as_gt(x, y)) == 'x .gt. y' + assert str(as_ge(x, y)) == 'x .ge. y' + + def test_tostring_c(self): + language = Language.C + x = as_symbol('x') + y = as_symbol('y') + z = as_symbol('z') + n = as_number(123) + + assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == 'x * x' + assert Expr(Op.FACTORS, {x + y: 2}).tostring( + language=language) == '(x + y) * (x + y)' + assert Expr(Op.FACTORS, {x: 12}).tostring( + language=language) == 'pow(x, 12)' + + assert as_apply(ArithOp.DIV, x, y).tostring( + language=language) == 'x / y' + assert as_apply(ArithOp.DIV, x, x + y).tostring( + language=language) == 'x / (x + y)' + assert as_apply(ArithOp.DIV, x - y, x + y).tostring( + language=language) == '(x - y) / (x + y)' + assert (x + (x - y) / (x + y) + n).tostring( + language=language) == '123 + x + (x - y) / (x + y)' + + assert as_ternary(x, y, z).tostring(language=language) == '(x ? y : z)' + assert as_eq(x, y).tostring(language=language) == 'x == y' + assert as_ne(x, y).tostring(language=language) == 'x != y' + assert as_lt(x, y).tostring(language=language) == 'x < y' + assert as_le(x, y).tostring(language=language) == 'x <= y' + assert as_gt(x, y).tostring(language=language) == 'x > y' + assert as_ge(x, y).tostring(language=language) == 'x >= y' + + def test_operations(self): + x = as_symbol('x') + y = as_symbol('y') + z = as_symbol('z') + + assert x + x == Expr(Op.TERMS, {x: 2}) + assert x - x == Expr(Op.INTEGER, (0, 4)) + assert x + y == Expr(Op.TERMS, {x: 1, y: 1}) + assert x - y == Expr(Op.TERMS, {x: 1, y: -1}) + assert x * x == Expr(Op.FACTORS, {x: 2}) + assert x * y == Expr(Op.FACTORS, {x: 1, y: 1}) + + assert +x == x + assert -x == Expr(Op.TERMS, {x: -1}), repr(-x) + assert 2 * x == Expr(Op.TERMS, {x: 2}) + assert 2 + x == Expr(Op.TERMS, {x: 1, as_number(1): 2}) + assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3}) + assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2}) + + assert x ** 2 == Expr(Op.FACTORS, {x: 2}) + assert (x + y) ** 2 == Expr(Op.TERMS, + {Expr(Op.FACTORS, {x: 2}): 1, + Expr(Op.FACTORS, {y: 2}): 1, + Expr(Op.FACTORS, {x: 1, y: 1}): 2}) + assert (x + y) * x == x ** 2 + x * y + assert (x + y) ** 2 == x ** 2 + 2 * x * y + y ** 2 + assert (x + y) ** 2 + (x - y) ** 2 == 2 * x ** 2 + 2 * y ** 2 + assert (x + y) * z == x * z + y * z + assert z * (x + y) == x * z + y * z + + assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2)) + assert (2 * x / 2) == x + assert (3 * x / 2) == as_apply(ArithOp.DIV, 3*x, as_number(2)) + assert (4 * x / 2) == 2 * x + assert (5 * x / 2) == as_apply(ArithOp.DIV, 5*x, as_number(2)) + assert (6 * x / 2) == 3 * x + assert ((3*5) * x / 6) == as_apply(ArithOp.DIV, 5*x, as_number(2)) + assert (30*x**2*y**4 / (24*x**3*y**3)) == as_apply(ArithOp.DIV, + 5*y, 4*x) + assert ((15 * x / 6) / 5) == as_apply( + ArithOp.DIV, x, as_number(2)), ((15 * x / 6) / 5) + assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5)) + + assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5}) + + s = as_string('"ABC"') + t = as_string('"123"') + + assert s // t == Expr(Op.STRING, ('"ABC123"', 1)) + assert s // x == Expr(Op.CONCAT, (s, x)) + assert x // s == Expr(Op.CONCAT, (x, s)) + + c = as_complex(1., 2.) + assert -c == as_complex(-1., -2.) + assert c + c == as_expr((1+2j)*2) + assert c * c == as_expr((1+2j)**2) + + def test_substitute(self): + x = as_symbol('x') + y = as_symbol('y') + z = as_symbol('z') + a = as_array((x, y)) + + assert x.substitute({x: y}) == y + assert (x + y).substitute({x: z}) == y + z + assert (x * y).substitute({x: z}) == y * z + assert (x ** 4).substitute({x: z}) == z ** 4 + assert (x / y).substitute({x: z}) == z / y + assert x.substitute({x: y + z}) == y + z + assert a.substitute({x: y + z}) == as_array((y + z, y)) + + assert as_ternary(x, y, z).substitute( + {x: y + z}) == as_ternary(y + z, y, z) + assert as_eq(x, y).substitute( + {x: y + z}) == as_eq(y + z, y) + + def test_fromstring(self): + + x = as_symbol('x') + y = as_symbol('y') + z = as_symbol('z') + f = as_symbol('f') + s = as_string('"ABC"') + t = as_string('"123"') + a = as_array((x, y)) + + assert fromstring('x') == x + assert fromstring('+ x') == x + assert fromstring('- x') == -x + assert fromstring('x + y') == x + y + assert fromstring('x + 1') == x + 1 + assert fromstring('x * y') == x * y + assert fromstring('x * 2') == x * 2 + assert fromstring('x / y') == x / y + assert fromstring('x ** 2', + language=Language.Python) == x ** 2 + assert fromstring('x ** 2 ** 3', + language=Language.Python) == x ** 2 ** 3 + assert fromstring('(x + y) * z') == (x + y) * z + + assert fromstring('f(x)') == f(x) + assert fromstring('f(x,y)') == f(x, y) + assert fromstring('f[x]') == f[x] + assert fromstring('f[x][y]') == f[x][y] + + assert fromstring('"ABC"') == s + assert normalize(fromstring('"ABC" // "123" ', + language=Language.Fortran)) == s // t + assert fromstring('f("ABC")') == f(s) + assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', 'MYSTRKIND') + + assert fromstring('(/x, y/)') == a, fromstring('(/x, y/)') + assert fromstring('f((/x, y/))') == f(a) + assert fromstring('(/(x+y)*z/)') == as_array(((x+y)*z,)) + + assert fromstring('123') == as_number(123) + assert fromstring('123_2') == as_number(123, 2) + assert fromstring('123_myintkind') == as_number(123, 'myintkind') + + assert fromstring('123.0') == as_number(123.0, 4) + assert fromstring('123.0_4') == as_number(123.0, 4) + assert fromstring('123.0_8') == as_number(123.0, 8) + assert fromstring('123.0e0') == as_number(123.0, 4) + assert fromstring('123.0d0') == as_number(123.0, 8) + assert fromstring('123d0') == as_number(123.0, 8) + assert fromstring('123e-0') == as_number(123.0, 4) + assert fromstring('123d+0') == as_number(123.0, 8) + assert fromstring('123.0_myrealkind') == as_number(123.0, 'myrealkind') + assert fromstring('3E4') == as_number(30000.0, 4) + + assert fromstring('(1, 2)') == as_complex(1, 2) + assert fromstring('(1e2, PI)') == as_complex( + as_number(100.0), as_symbol('PI')) + + assert fromstring('[1, 2]') == as_array((as_number(1), as_number(2))) + + assert fromstring('POINT(x, y=1)') == as_apply( + as_symbol('POINT'), x, y=as_number(1)) + assert (fromstring('PERSON(name="John", age=50, shape=(/34, 23/))') + == as_apply(as_symbol('PERSON'), + name=as_string('"John"'), + age=as_number(50), + shape=as_array((as_number(34), as_number(23))))) + + assert fromstring('x?y:z') == as_ternary(x, y, z) + + assert fromstring('*x') == as_deref(x) + assert fromstring('**x') == as_deref(as_deref(x)) + assert fromstring('&x') == as_ref(x) + assert fromstring('(*x) * (*y)') == as_deref(x) * as_deref(y) + assert fromstring('(*x) * *y') == as_deref(x) * as_deref(y) + assert fromstring('*x * *y') == as_deref(x) * as_deref(y) + assert fromstring('*x**y') == as_deref(x) * as_deref(y) + + assert fromstring('x == y') == as_eq(x, y) + assert fromstring('x != y') == as_ne(x, y) + assert fromstring('x < y') == as_lt(x, y) + assert fromstring('x > y') == as_gt(x, y) + assert fromstring('x <= y') == as_le(x, y) + assert fromstring('x >= y') == as_ge(x, y) + + assert fromstring('x .eq. y', language=Language.Fortran) == as_eq(x, y) + assert fromstring('x .ne. y', language=Language.Fortran) == as_ne(x, y) + assert fromstring('x .lt. y', language=Language.Fortran) == as_lt(x, y) + assert fromstring('x .gt. y', language=Language.Fortran) == as_gt(x, y) + assert fromstring('x .le. y', language=Language.Fortran) == as_le(x, y) + assert fromstring('x .ge. y', language=Language.Fortran) == as_ge(x, y) + + def test_traverse(self): + x = as_symbol('x') + y = as_symbol('y') + z = as_symbol('z') + f = as_symbol('f') + + # Use traverse to substitute a symbol + def replace_visit(s, r=z): + if s == x: + return r + + assert x.traverse(replace_visit) == z + assert y.traverse(replace_visit) == y + assert z.traverse(replace_visit) == z + assert (f(y)).traverse(replace_visit) == f(y) + assert (f(x)).traverse(replace_visit) == f(z) + assert (f[y]).traverse(replace_visit) == f[y] + assert (f[z]).traverse(replace_visit) == f[z] + assert (x + y + z).traverse(replace_visit) == (2 * z + y) + assert (x + f(y, x - z)).traverse( + replace_visit) == (z + f(y, as_number(0))) + assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y) + + # Use traverse to collect symbols, method 1 + function_symbols = set() + symbols = set() + + def collect_symbols(s): + if s.op is Op.APPLY: + oper = s.data[0] + function_symbols.add(oper) + if oper in symbols: + symbols.remove(oper) + elif s.op is Op.SYMBOL and s not in function_symbols: + symbols.add(s) + + (x + f(y, x - z)).traverse(collect_symbols) + assert function_symbols == {f} + assert symbols == {x, y, z} + + # Use traverse to collect symbols, method 2 + def collect_symbols2(expr, symbols): + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols2, symbols) + assert symbols == {x, y, z, f} + + # Use traverse to partially collect symbols + def collect_symbols3(expr, symbols): + if expr.op is Op.APPLY: + # skip traversing function calls + return expr + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols3, symbols) + assert symbols == {x} + + def test_linear_solve(self): + x = as_symbol('x') + y = as_symbol('y') + z = as_symbol('z') + + assert x.linear_solve(x) == (as_number(1), as_number(0)) + assert (x+1).linear_solve(x) == (as_number(1), as_number(1)) + assert (2*x).linear_solve(x) == (as_number(2), as_number(0)) + assert (2*x+3).linear_solve(x) == (as_number(2), as_number(3)) + assert as_number(3).linear_solve(x) == (as_number(0), as_number(3)) + assert y.linear_solve(x) == (as_number(0), y) + assert (y*z).linear_solve(x) == (as_number(0), y * z) + + assert (x+y).linear_solve(x) == (as_number(1), y) + assert (z*x+y).linear_solve(x) == (z, y) + assert ((z+y)*x+y).linear_solve(x) == (z + y, y) + assert (z*y*x+y).linear_solve(x) == (z * y, y) + + assert_raises(RuntimeError, lambda: (x*x).linear_solve(x)) + + def test_as_numer_denom(self): + x = as_symbol('x') + y = as_symbol('y') + n = as_number(123) + + assert as_numer_denom(x) == (x, as_number(1)) + assert as_numer_denom(x / n) == (x, n) + assert as_numer_denom(n / x) == (n, x) + assert as_numer_denom(x / y) == (x, y) + assert as_numer_denom(x * y) == (x * y, as_number(1)) + assert as_numer_denom(n + x / y) == (x + n * y, y) + assert as_numer_denom(n + x / (y - x / n)) == (y * n ** 2, y * n - x) + + def test_polynomial_atoms(self): + x = as_symbol('x') + y = as_symbol('y') + n = as_number(123) + + assert x.polynomial_atoms() == {x} + assert n.polynomial_atoms() == set() + assert (y[x]).polynomial_atoms() == {y[x]} + assert (y(x)).polynomial_atoms() == {y(x)} + assert (y(x) + x).polynomial_atoms() == {y(x), x} + assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} + assert (y(x) ** x).polynomial_atoms() == {y(x)} diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index eace3c9fc..1a6805e75 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -242,9 +242,6 @@ def build_module_distutils(source_files, config_code, module_name, **kw): Build a module via distutils and import it. """ - from numpy.distutils.misc_util import Configuration - from numpy.distutils.core import setup - d = get_module_dir() # Copy files diff --git a/numpy/fft/_pocketfft.c b/numpy/fft/_pocketfft.c index ba9995f97..1eb2eba18 100644 --- a/numpy/fft/_pocketfft.c +++ b/numpy/fft/_pocketfft.c @@ -9,17 +9,19 @@ * Copyright (C) 2004-2018 Max-Planck-Society * \author Martin Reinecke */ - #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include "Python.h" +#define PY_SSIZE_T_CLEAN +#include <Python.h> + #include "numpy/arrayobject.h" +#include "npy_config.h" + #include <math.h> #include <string.h> #include <stdlib.h> -#include "npy_config.h" #define restrict NPY_RESTRICT #define RALLOC(type,num) \ diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 56b94853d..8201d3772 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -324,7 +324,6 @@ class DataSource: # a significant fraction of numpy's total import time. import shutil from urllib.request import urlopen - from urllib.error import URLError upath = self.abspath(path) diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index a576925d6..4a5ac1285 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -23,8 +23,7 @@ def _decode_line(line, encoding=None): Returns ------- - decoded_line : unicode - Unicode in Python 2, a str (unicode) in Python 3. + decoded_line : str """ if type(line) is bytes: diff --git a/numpy/lib/arraypad.pyi b/numpy/lib/arraypad.pyi index 49ce8e683..d7c5f4844 100644 --- a/numpy/lib/arraypad.pyi +++ b/numpy/lib/arraypad.pyi @@ -15,7 +15,7 @@ from numpy.typing import ( ArrayLike, NDArray, _ArrayLikeInt, - _NestedSequence, + _FiniteNestedSequence, _SupportsArray, ) @@ -45,7 +45,7 @@ _ModeKind = L[ "empty", ] -_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]] +_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] __all__: List[str] diff --git a/numpy/lib/arraysetops.pyi b/numpy/lib/arraysetops.pyi index 029aa1474..6f13ec74b 100644 --- a/numpy/lib/arraysetops.pyi +++ b/numpy/lib/arraysetops.pyi @@ -1,12 +1,335 @@ -from typing import List +from typing import ( + Literal as L, + Any, + List, + Union, + TypeVar, + Tuple, + overload, + SupportsIndex, +) + +from numpy import ( + dtype, + generic, + number, + bool_, + ushort, + ubyte, + uintc, + uint, + ulonglong, + short, + int8, + byte, + intc, + int_, + intp, + longlong, + half, + single, + double, + longdouble, + csingle, + cdouble, + clongdouble, + timedelta64, + datetime64, + object_, + str_, + bytes_, + void, +) + +from numpy.typing import ( + ArrayLike, + NDArray, + _FiniteNestedSequence, + _SupportsArray, + _ArrayLikeBool_co, + _ArrayLikeDT64_co, + _ArrayLikeTD64_co, + _ArrayLikeObject_co, + _ArrayLikeNumber_co, +) + +_SCT = TypeVar("_SCT", bound=generic) +_NumberType = TypeVar("_NumberType", bound=number[Any]) + +# Explicitly set all allowed values to prevent accidental castings to +# abstract dtypes (their common super-type). +# +# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) +# which could result in, for example, `int64` and `float64`producing a +# `number[_64Bit]` array +_SCTNoCast = TypeVar( + "_SCTNoCast", + bool_, + ushort, + ubyte, + uintc, + uint, + ulonglong, + short, + byte, + intc, + int_, + longlong, + half, + single, + double, + longdouble, + csingle, + cdouble, + clongdouble, + timedelta64, + datetime64, + object_, + str_, + bytes_, + void, +) + +_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] __all__: List[str] -def ediff1d(ary, to_end=..., to_begin=...): ... -def unique(ar, return_index=..., return_inverse=..., return_counts=..., axis=...): ... -def intersect1d(ar1, ar2, assume_unique=..., return_indices=...): ... -def setxor1d(ar1, ar2, assume_unique=...): ... -def in1d(ar1, ar2, assume_unique=..., invert=...): ... -def isin(element, test_elements, assume_unique=..., invert=...): ... -def union1d(ar1, ar2): ... -def setdiff1d(ar1, ar2, assume_unique=...): ... +@overload +def ediff1d( + ary: _ArrayLikeBool_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[int8]: ... +@overload +def ediff1d( + ary: _ArrayLike[_NumberType], + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[_NumberType]: ... +@overload +def ediff1d( + ary: _ArrayLikeNumber_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[Any]: ... +@overload +def ediff1d( + ary: _ArrayLikeDT64_co | _ArrayLikeTD64_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[timedelta64]: ... +@overload +def ediff1d( + ary: _ArrayLikeObject_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[object_]: ... + +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., +) -> NDArray[Any]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[_SCT], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[Any], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[_SCT], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[Any], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[_SCT], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[Any], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., +) -> Tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ... + +@overload +def intersect1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., + return_indices: L[False] = ..., +) -> NDArray[_SCTNoCast]: ... +@overload +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., + return_indices: L[False] = ..., +) -> NDArray[Any]: ... +@overload +def intersect1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., + return_indices: L[True] = ..., +) -> Tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ... +@overload +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., + return_indices: L[True] = ..., +) -> Tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... + +@overload +def setxor1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., +) -> NDArray[_SCTNoCast]: ... +@overload +def setxor1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., +) -> NDArray[Any]: ... + +def in1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., + invert: bool = ..., +) -> NDArray[bool_]: ... + +def isin( + element: ArrayLike, + test_elements: ArrayLike, + assume_unique: bool = ..., + invert: bool = ..., +) -> NDArray[bool_]: ... + +@overload +def union1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], +) -> NDArray[_SCTNoCast]: ... +@overload +def union1d( + ar1: ArrayLike, + ar2: ArrayLike, +) -> NDArray[Any]: ... + +@overload +def setdiff1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., +) -> NDArray[_SCTNoCast]: ... +@overload +def setdiff1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., +) -> NDArray[Any]: ... diff --git a/numpy/lib/format.py b/numpy/lib/format.py index e566e253d..3967b43ee 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -291,7 +291,7 @@ def descr_to_dtype(descr): Parameters ---------- descr : object - The object retreived by dtype.descr. Can be passed to + The object retrieved by dtype.descr. Can be passed to `numpy.dtype()` in order to replicate the input dtype. Returns diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index d875a00ae..20e32a78d 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -33,7 +33,7 @@ from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc import builtins # needed in this module for compatibility -from numpy.lib.histograms import histogram, histogramdd +from numpy.lib.histograms import histogram, histogramdd # noqa: F401 array_function_dispatch = functools.partial( @@ -268,6 +268,19 @@ def iterable(y): >>> np.iterable(2) False + Notes + ----- + In most cases, the results of ``np.iterable(obj)`` are consistent with + ``isinstance(obj, collections.abc.Iterable)``. One notable exception is + the treatment of 0-dimensional arrays:: + + >>> from collections.abc import Iterable + >>> a = np.array(1.0) # 0-dimensional numpy array + >>> isinstance(a, Iterable) + True + >>> np.iterable(a) + False + """ try: iter(y) @@ -657,11 +670,16 @@ def select(condlist, choicelist, default=0): Examples -------- - >>> x = np.arange(10) - >>> condlist = [x<3, x>5] + >>> x = np.arange(6) + >>> condlist = [x<3, x>3] >>> choicelist = [x, x**2] - >>> np.select(condlist, choicelist) - array([ 0, 1, 2, ..., 49, 64, 81]) + >>> np.select(condlist, choicelist, 42) + array([ 0, 1, 2, 42, 16, 25]) + + >>> condlist = [x<=4, x>3] + >>> choicelist = [x, x**2] + >>> np.select(condlist, choicelist, 55) + array([ 0, 1, 2, 3, 4, 25]) """ # Check the size of condlist and choicelist are the same, or abort. @@ -779,6 +797,17 @@ def copy(a, order='K', subok=False): >>> x[0] == z[0] False + Note that, np.copy clears previously set WRITEABLE=False flag. + + >>> a = np.array([1, 2, 3]) + >>> a.flags["WRITEABLE"] = False + >>> b = np.copy(a) + >>> b.flags["WRITEABLE"] + True + >>> b[0] = 3 + >>> b + array([3, 2, 3]) + Note that np.copy is a shallow copy and will not copy object elements within arrays. This is mainly important for arrays containing Python objects. The new array will contain the @@ -2804,9 +2833,9 @@ def blackman(M): """ if M < 1: - return array([]) + return array([], dtype=np.result_type(M, 0.0)) if M == 1: - return ones(1, float) + return ones(1, dtype=np.result_type(M, 0.0)) n = arange(1-M, M, 2) return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1)) @@ -2913,9 +2942,9 @@ def bartlett(M): """ if M < 1: - return array([]) + return array([], dtype=np.result_type(M, 0.0)) if M == 1: - return ones(1, float) + return ones(1, dtype=np.result_type(M, 0.0)) n = arange(1-M, M, 2) return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1)) @@ -3017,9 +3046,9 @@ def hanning(M): """ if M < 1: - return array([]) + return array([], dtype=np.result_type(M, 0.0)) if M == 1: - return ones(1, float) + return ones(1, dtype=np.result_type(M, 0.0)) n = arange(1-M, M, 2) return 0.5 + 0.5*cos(pi*n/(M-1)) @@ -3117,9 +3146,9 @@ def hamming(M): """ if M < 1: - return array([]) + return array([], dtype=np.result_type(M, 0.0)) if M == 1: - return ones(1, float) + return ones(1, dtype=np.result_type(M, 0.0)) n = arange(1-M, M, 2) return 0.54 + 0.46*cos(pi*n/(M-1)) @@ -3252,7 +3281,7 @@ def i0(x): Her Majesty's Stationery Office, 1962. .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 379. - http://www.math.sfu.ca/~cbm/aands/page_379.htm + https://personal.math.ubc.ca/~cbm/aands/page_379.htm .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero Examples @@ -3396,7 +3425,7 @@ def kaiser(M, beta): """ if M == 1: - return np.array([1.]) + return np.ones(1, dtype=np.result_type(M, 0.0)) n = arange(0, M) alpha = (M-1)/2.0 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) @@ -3714,16 +3743,15 @@ def _median(a, axis=None, out=None, overwrite_input=False): indexer[axis] = slice(index-1, index+1) indexer = tuple(indexer) + # Use mean in both odd and even case to coerce data type, + # using out array if needed. + rout = mean(part[indexer], axis=axis, out=out) # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact) and sz > 0: - # warn and return nans like mean would - rout = mean(part[indexer], axis=axis, out=out) - return np.lib.utils._median_nancheck(part, rout, axis, out) - else: - # if there are no nans - # Use mean in odd and even case to coerce data type - # and check, use out array. - return mean(part[indexer], axis=axis, out=out) + # If nans are possible, warn and replace by nans like mean would. + rout = np.lib.utils._median_nancheck(part, rout, axis) + + return rout def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, @@ -4277,7 +4305,13 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): .. versionadded:: 1.7.0 sparse : bool, optional - If True a sparse grid is returned in order to conserve memory. + If True the shape of the returned coordinate array for dimension *i* + is reduced from ``(N1, ..., Ni, ... Nn)`` to + ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are + intended to be use with :ref:`basics.broadcasting`. When all + coordinates are used in an expression, broadcasting still leads to a + fully-dimensonal result array. + Default is False. .. versionadded:: 1.7.0 @@ -4348,17 +4382,30 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): array([[0.], [1.]]) - `meshgrid` is very useful to evaluate functions on a grid. + `meshgrid` is very useful to evaluate functions on a grid. If the + function depends on all coordinates, you can use the parameter + ``sparse=True`` to save memory and computation time. + + >>> x = np.linspace(-5, 5, 101) + >>> y = np.linspace(-5, 5, 101) + >>> # full coorindate arrays + >>> xx, yy = np.meshgrid(x, y) + >>> zz = np.sqrt(xx**2 + yy**2) + >>> xx.shape, yy.shape, zz.shape + ((101, 101), (101, 101), (101, 101)) + >>> # sparse coordinate arrays + >>> xs, ys = np.meshgrid(x, y, sparse=True) + >>> zs = np.sqrt(xs**2 + ys**2) + >>> xs.shape, ys.shape, zs.shape + ((1, 101), (101, 1), (101, 101)) + >>> np.array_equal(zz, zs) + True >>> import matplotlib.pyplot as plt - >>> x = np.arange(-5, 5, 0.1) - >>> y = np.arange(-5, 5, 0.1) - >>> xx, yy = np.meshgrid(x, y, sparse=True) - >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) - >>> h = plt.contourf(x, y, z) + >>> h = plt.contourf(x, y, zs) >>> plt.axis('scaled') + >>> plt.colorbar() >>> plt.show() - """ ndim = len(xi) @@ -4711,9 +4758,8 @@ def insert(arr, obj, values, axis=None): if indices.size == 1: index = indices.item() if index < -N or index > N: - raise IndexError( - "index %i is out of bounds for axis %i with " - "size %i" % (obj, axis, N)) + raise IndexError(f"index {obj} is out of bounds for axis {axis} " + f"with size {N}") if (index < 0): index += N diff --git a/numpy/lib/function_base.pyi b/numpy/lib/function_base.pyi index 69c615c9c..9a53b24f2 100644 --- a/numpy/lib/function_base.pyi +++ b/numpy/lib/function_base.pyi @@ -1,7 +1,60 @@ -from typing import List +import sys +from typing import ( + Literal as L, + List, + Type, + Sequence, + Tuple, + Union, + Any, + TypeVar, + Iterator, + overload, + Callable, + Protocol, + SupportsIndex, + Iterable, + SupportsInt, +) + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard from numpy import ( vectorize as vectorize, + ufunc, + dtype, + generic, + floating, + complexfloating, + intp, + float64, + complex128, + timedelta64, + datetime64, + object_, + _OrderKACF, +) + +from numpy.typing import ( + NDArray, + ArrayLike, + DTypeLike, + _ShapeLike, + _ScalarLike_co, + _SupportsDType, + _FiniteNestedSequence, + _SupportsArray, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + _ArrayLikeObject_co, + _FloatLike_co, + _ComplexLike_co, ) from numpy.core.function_base import ( @@ -12,46 +65,632 @@ from numpy.core.multiarray import ( add_docstring as add_docstring, bincount as bincount, ) + from numpy.core.umath import _add_newdoc_ufunc +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_SCT = TypeVar("_SCT", bound=generic) +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) + +_2Tuple = Tuple[_T, _T] +_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] +_DTypeLike = Union[ + dtype[_SCT], + Type[_SCT], + _SupportsDType[dtype[_SCT]], +] + +class _TrimZerosSequence(Protocol[_T_co]): + def __len__(self) -> int: ... + def __getitem__(self, key: slice, /) -> _T_co: ... + def __iter__(self) -> Iterator[Any]: ... + +class _SupportsWriteFlush(Protocol): + def write(self, s: str, /) -> object: ... + def flush(self) -> object: ... + __all__: List[str] -add_newdoc_ufunc = _add_newdoc_ufunc - -def rot90(m, k=..., axes = ...): ... -def flip(m, axis=...): ... -def iterable(y): ... -def average(a, axis=..., weights=..., returned=...): ... -def asarray_chkfinite(a, dtype=..., order=...): ... -def piecewise(x, condlist, funclist, *args, **kw): ... -def select(condlist, choicelist, default=...): ... -def copy(a, order=..., subok=...): ... -def gradient(f, *varargs, axis=..., edge_order=...): ... -def diff(a, n=..., axis=..., prepend = ..., append = ...): ... -def interp(x, xp, fp, left=..., right=..., period=...): ... -def angle(z, deg=...): ... -def unwrap(p, discont = ..., axis=..., *, period=...): ... -def sort_complex(a): ... -def trim_zeros(filt, trim=...): ... -def extract(condition, arr): ... -def place(arr, mask, vals): ... -def disp(mesg, device=..., linefeed=...): ... -def cov(m, y=..., rowvar=..., bias=..., ddof=..., fweights=..., aweights=..., *, dtype=...): ... -def corrcoef(x, y=..., rowvar=..., bias = ..., ddof = ..., *, dtype=...): ... -def blackman(M): ... -def bartlett(M): ... -def hanning(M): ... -def hamming(M): ... -def i0(x): ... -def kaiser(M, beta): ... -def sinc(x): ... -def msort(a): ... -def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... -def percentile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... -def quantile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... -def trapz(y, x=..., dx=..., axis=...): ... -def meshgrid(*xi, copy=..., sparse=..., indexing=...): ... -def delete(arr, obj, axis=...): ... -def insert(arr, obj, values, axis=...): ... -def append(arr, values, axis=...): ... -def digitize(x, bins, right=...): ... +# NOTE: This is in reality a re-export of `np.core.umath._add_newdoc_ufunc` +def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None: ... + +@overload +def rot90( + m: _ArrayLike[_SCT], + k: int = ..., + axes: Tuple[int, int] = ..., +) -> NDArray[_SCT]: ... +@overload +def rot90( + m: ArrayLike, + k: int = ..., + axes: Tuple[int, int] = ..., +) -> NDArray[Any]: ... + +@overload +def flip(m: _SCT, axis: None = ...) -> _SCT: ... +@overload +def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ... +@overload +def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +@overload +def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ... + +def iterable(y: object) -> TypeGuard[Iterable[Any]]: ... + +@overload +def average( + a: _ArrayLikeFloat_co, + axis: None = ..., + weights: None | _ArrayLikeFloat_co= ..., + returned: L[False] = ..., +) -> floating[Any]: ... +@overload +def average( + a: _ArrayLikeComplex_co, + axis: None = ..., + weights: None | _ArrayLikeComplex_co = ..., + returned: L[False] = ..., +) -> complexfloating[Any, Any]: ... +@overload +def average( + a: _ArrayLikeObject_co, + axis: None = ..., + weights: None | Any = ..., + returned: L[False] = ..., +) -> Any: ... +@overload +def average( + a: _ArrayLikeFloat_co, + axis: None = ..., + weights: None | _ArrayLikeFloat_co= ..., + returned: L[True] = ..., +) -> _2Tuple[floating[Any]]: ... +@overload +def average( + a: _ArrayLikeComplex_co, + axis: None = ..., + weights: None | _ArrayLikeComplex_co = ..., + returned: L[True] = ..., +) -> _2Tuple[complexfloating[Any, Any]]: ... +@overload +def average( + a: _ArrayLikeObject_co, + axis: None = ..., + weights: None | Any = ..., + returned: L[True] = ..., +) -> _2Tuple[Any]: ... +@overload +def average( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + weights: None | Any = ..., + returned: L[False] = ..., +) -> Any: ... +@overload +def average( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + weights: None | Any = ..., + returned: L[True] = ..., +) -> _2Tuple[Any]: ... + +@overload +def asarray_chkfinite( + a: _ArrayLike[_SCT], + dtype: None = ..., + order: _OrderKACF = ..., +) -> NDArray[_SCT]: ... +@overload +def asarray_chkfinite( + a: object, + dtype: None = ..., + order: _OrderKACF = ..., +) -> NDArray[Any]: ... +@overload +def asarray_chkfinite( + a: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., +) -> NDArray[_SCT]: ... +@overload +def asarray_chkfinite( + a: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., +) -> NDArray[Any]: ... + +@overload +def piecewise( + x: _ArrayLike[_SCT], + condlist: ArrayLike, + funclist: Sequence[Any | Callable[..., Any]], + *args: Any, + **kw: Any, +) -> NDArray[_SCT]: ... +@overload +def piecewise( + x: ArrayLike, + condlist: ArrayLike, + funclist: Sequence[Any | Callable[..., Any]], + *args: Any, + **kw: Any, +) -> NDArray[Any]: ... + +def select( + condlist: Sequence[ArrayLike], + choicelist: Sequence[ArrayLike], + default: ArrayLike = ..., +) -> NDArray[Any]: ... + +@overload +def copy( + a: _ArrayType, + order: _OrderKACF, + subok: L[True], +) -> _ArrayType: ... +@overload +def copy( + a: _ArrayType, + order: _OrderKACF = ..., + *, + subok: L[True], +) -> _ArrayType: ... +@overload +def copy( + a: _ArrayLike[_SCT], + order: _OrderKACF = ..., + subok: L[False] = ..., +) -> NDArray[_SCT]: ... +@overload +def copy( + a: ArrayLike, + order: _OrderKACF = ..., + subok: L[False] = ..., +) -> NDArray[Any]: ... + +def gradient( + f: ArrayLike, + *varargs: ArrayLike, + axis: None | _ShapeLike = ..., + edge_order: L[1, 2] = ..., +) -> Any: ... + +@overload +def diff( + a: _T, + n: L[0], + axis: SupportsIndex = ..., + prepend: ArrayLike = ..., + append: ArrayLike = ..., +) -> _T: ... +@overload +def diff( + a: ArrayLike, + n: int = ..., + axis: SupportsIndex = ..., + prepend: ArrayLike = ..., + append: ArrayLike = ..., +) -> NDArray[Any]: ... + +@overload +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: None | _FloatLike_co = ..., + right: None | _FloatLike_co = ..., + period: None | _FloatLike_co = ..., +) -> NDArray[float64]: ... +@overload +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeComplex_co, + left: None | _ComplexLike_co = ..., + right: None | _ComplexLike_co = ..., + period: None | _FloatLike_co = ..., +) -> NDArray[complex128]: ... + +@overload +def angle(z: _ArrayLikeFloat_co, deg: bool = ...) -> floating[Any]: ... +@overload +def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> complexfloating[Any, Any]: ... +@overload +def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> Any: ... + +@overload +def unwrap( + p: _ArrayLikeFloat_co, + discont: None | float = ..., + axis: int = ..., + *, + period: float = ..., +) -> NDArray[floating[Any]]: ... +@overload +def unwrap( + p: _ArrayLikeObject_co, + discont: None | float = ..., + axis: int = ..., + *, + period: float = ..., +) -> NDArray[object_]: ... + +def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ... + +def trim_zeros( + filt: _TrimZerosSequence[_T], + trim: L["f", "b", "fb", "bf"] = ..., +) -> _T: ... + +@overload +def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ... + +def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... + +def disp( + mesg: object, + device: None | _SupportsWriteFlush = ..., + linefeed: bool = ..., +) -> None: ... + +@overload +def cov( + m: _ArrayLikeFloat_co, + y: None | _ArrayLikeFloat_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: None = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: _DTypeLike[_SCT], +) -> NDArray[_SCT]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: DTypeLike, +) -> NDArray[Any]: ... + +# NOTE `bias` and `ddof` have been deprecated +@overload +def corrcoef( + m: _ArrayLikeFloat_co, + y: None | _ArrayLikeFloat_co = ..., + rowvar: bool = ..., + *, + dtype: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + *, + dtype: None = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + *, + dtype: _DTypeLike[_SCT], +) -> NDArray[_SCT]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + *, + dtype: DTypeLike, +) -> NDArray[Any]: ... + +def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... + +def kaiser( + M: _FloatLike_co, + beta: _FloatLike_co, +) -> NDArray[floating[Any]]: ... + +@overload +def sinc(x: _FloatLike_co) -> floating[Any]: ... +@overload +def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +@overload +def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def msort(a: _ArrayType) -> _ArrayType: ... +@overload +def msort(a: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def msort(a: ArrayLike) -> NDArray[Any]: ... + +@overload +def median( + a: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> floating[Any]: ... +@overload +def median( + a: _ArrayLikeComplex_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> complexfloating[Any, Any]: ... +@overload +def median( + a: _ArrayLikeTD64_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> timedelta64: ... +@overload +def median( + a: _ArrayLikeObject_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> Any: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + out: _ArrayType = ..., + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> _ArrayType: ... + +_InterpolationKind = L[ + "lower", + "higher", + "midpoint", + "nearest", + "linear", +] + +@overload +def percentile( + a: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: L[False] = ..., +) -> floating[Any]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: L[False] = ..., +) -> complexfloating[Any, Any]: ... +@overload +def percentile( + a: _ArrayLikeTD64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: L[False] = ..., +) -> timedelta64: ... +@overload +def percentile( + a: _ArrayLikeDT64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: L[False] = ..., +) -> datetime64: ... +@overload +def percentile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: L[False] = ..., +) -> Any: ... +@overload +def percentile( + a: _ArrayLikeFloat_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: L[False] = ..., +) -> NDArray[floating[Any]]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: L[False] = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def percentile( + a: _ArrayLikeTD64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: L[False] = ..., +) -> NDArray[timedelta64]: ... +@overload +def percentile( + a: _ArrayLikeDT64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: L[False] = ..., +) -> NDArray[datetime64]: ... +@overload +def percentile( + a: _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: L[False] = ..., +) -> NDArray[object_]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None | _ShapeLike = ..., + out: None = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None | _ShapeLike = ..., + out: _ArrayType = ..., + overwrite_input: bool = ..., + interpolation: _InterpolationKind = ..., + keepdims: bool = ..., +) -> _ArrayType: ... + +# NOTE: Not an alias, but they do have identical signatures +# (that we can reuse) +quantile = percentile + +# TODO: Returns a scalar for <= 1D array-likes; returns an ndarray otherwise +def trapz( + y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + x: None | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> Any: ... + +def meshgrid( + *xi: ArrayLike, + copy: bool = ..., + sparse: bool = ..., + indexing: L["xy", "ij"] = ..., +) -> List[NDArray[Any]]: ... + +@overload +def delete( + arr: _ArrayLike[_SCT], + obj: slice | _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def delete( + arr: ArrayLike, + obj: slice | _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., +) -> NDArray[Any]: ... + +@overload +def insert( + arr: _ArrayLike[_SCT], + obj: slice | _ArrayLikeInt_co, + values: ArrayLike, + axis: None | SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def insert( + arr: ArrayLike, + obj: slice | _ArrayLikeInt_co, + values: ArrayLike, + axis: None | SupportsIndex = ..., +) -> NDArray[Any]: ... + +def append( + arr: ArrayLike, + values: ArrayLike, + axis: None | SupportsIndex = ..., +) -> NDArray[Any]: ... + +@overload +def digitize( + x: _FloatLike_co, + bins: _ArrayLikeFloat_co, + right: bool = ..., +) -> intp: ... +@overload +def digitize( + x: _ArrayLikeFloat_co, + bins: _ArrayLikeFloat_co, + right: bool = ..., +) -> NDArray[intp]: ... diff --git a/numpy/lib/histograms.pyi b/numpy/lib/histograms.pyi index 25a33e3ae..2ceb60793 100644 --- a/numpy/lib/histograms.pyi +++ b/numpy/lib/histograms.pyi @@ -1,7 +1,51 @@ -from typing import List +from typing import ( + Literal as L, + List, + Tuple, + Any, + SupportsIndex, + Sequence, +) + +from numpy.typing import ( + NDArray, + ArrayLike, +) + +_BinKind = L[ + "stone", + "auto", + "doane", + "fd", + "rice", + "scott", + "sqrt", + "sturges", +] __all__: List[str] -def histogram_bin_edges(a, bins=..., range=..., weights=...): ... -def histogram(a, bins=..., range=..., normed=..., weights=..., density=...): ... -def histogramdd(sample, bins=..., range=..., normed=..., weights=..., density=...): ... +def histogram_bin_edges( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = ..., + range: None | Tuple[float, float] = ..., + weights: None | ArrayLike = ..., +) -> NDArray[Any]: ... + +def histogram( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = ..., + range: None | Tuple[float, float] = ..., + normed: None = ..., + weights: None | ArrayLike = ..., + density: bool = ..., +) -> Tuple[NDArray[Any], NDArray[Any]]: ... + +def histogramdd( + sample: ArrayLike, + bins: SupportsIndex | ArrayLike = ..., + range: Sequence[Tuple[float, float]] = ..., + normed: None | bool = ..., + weights: None | ArrayLike = ..., + density: None | bool = ..., +) -> Tuple[NDArray[Any], List[NDArray[Any]]]: ... diff --git a/numpy/lib/index_tricks.pyi b/numpy/lib/index_tricks.pyi index 530be3cae..d16faf81a 100644 --- a/numpy/lib/index_tricks.pyi +++ b/numpy/lib/index_tricks.pyi @@ -33,7 +33,7 @@ from numpy.typing import ( # Arrays ArrayLike, _NestedSequence, - _RecursiveSequence, + _FiniteNestedSequence, NDArray, _ArrayLikeInt, @@ -59,21 +59,19 @@ _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) __all__: List[str] @overload -def ix_(*args: _NestedSequence[_SupportsDType[_DType]]) -> Tuple[ndarray[Any, _DType], ...]: ... +def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> Tuple[ndarray[Any, _DType], ...]: ... @overload -def ix_(*args: _NestedSequence[str]) -> Tuple[NDArray[str_], ...]: ... +def ix_(*args: str | _NestedSequence[str]) -> Tuple[NDArray[str_], ...]: ... @overload -def ix_(*args: _NestedSequence[bytes]) -> Tuple[NDArray[bytes_], ...]: ... +def ix_(*args: bytes | _NestedSequence[bytes]) -> Tuple[NDArray[bytes_], ...]: ... @overload -def ix_(*args: _NestedSequence[bool]) -> Tuple[NDArray[bool_], ...]: ... +def ix_(*args: bool | _NestedSequence[bool]) -> Tuple[NDArray[bool_], ...]: ... @overload -def ix_(*args: _NestedSequence[int]) -> Tuple[NDArray[int_], ...]: ... +def ix_(*args: int | _NestedSequence[int]) -> Tuple[NDArray[int_], ...]: ... @overload -def ix_(*args: _NestedSequence[float]) -> Tuple[NDArray[float_], ...]: ... +def ix_(*args: float | _NestedSequence[float]) -> Tuple[NDArray[float_], ...]: ... @overload -def ix_(*args: _NestedSequence[complex]) -> Tuple[NDArray[complex_], ...]: ... -@overload -def ix_(*args: _RecursiveSequence) -> Tuple[NDArray[Any], ...]: ... +def ix_(*args: complex | _NestedSequence[complex]) -> Tuple[NDArray[complex_], ...]: ... class nd_grid(Generic[_BoolType]): sparse: _BoolType diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 2c2c3435b..08d9b42bb 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -160,8 +160,12 @@ def _remove_nan_1d(arr1d, overwrite_input=False): True if `res` can be modified in place, given the constraint on the input """ + if arr1d.dtype == object: + # object arrays do not support `isnan` (gh-9009), so make a guess + c = np.not_equal(arr1d, arr1d, dtype=bool) + else: + c = np.isnan(arr1d) - c = np.isnan(arr1d) s = np.nonzero(c)[0] if s.size == arr1d.size: warnings.warn("All-NaN slice encountered", RuntimeWarning, @@ -214,19 +218,25 @@ def _divide_by_count(a, b, out=None): return np.divide(a, b, out=out, casting='unsafe') else: if out is None: - return a.dtype.type(a / b) + # Precaution against reduced object arrays + try: + return a.dtype.type(a / b) + except AttributeError: + return a / b else: # This is questionable, but currently a numpy scalar can # be output to a zero dimensional array. return np.divide(a, b, out=out, casting='unsafe') -def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None): +def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None, + initial=None, where=None): return (a, out) @array_function_dispatch(_nanmin_dispatcher) -def nanmin(a, axis=None, out=None, keepdims=np._NoValue): +def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): """ Return minimum of an array or minimum along an axis, ignoring any NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is raised and @@ -258,6 +268,16 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue): does not implement `keepdims` any exceptions will be raised. .. versionadded:: 1.8.0 + initial : scalar, optional + The maximum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to compare for the minimum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 Returns ------- @@ -313,6 +333,11 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue): kwargs = {} if keepdims is not np._NoValue: kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + if type(a) is np.ndarray and a.dtype != np.object_: # Fast, but not safe for subclasses of ndarray, or object arrays, # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) @@ -328,6 +353,7 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue): return res # Check for all-NaN axis + kwargs.pop("initial", None) mask = np.all(mask, axis=axis, **kwargs) if np.any(mask): res = _copyto(res, np.nan, mask) @@ -336,12 +362,14 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue): return res -def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None): +def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None, + initial=None, where=None): return (a, out) @array_function_dispatch(_nanmax_dispatcher) -def nanmax(a, axis=None, out=None, keepdims=np._NoValue): +def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): """ Return the maximum of an array or maximum along an axis, ignoring any NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is @@ -373,6 +401,16 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue): does not implement `keepdims` any exceptions will be raised. .. versionadded:: 1.8.0 + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 Returns ------- @@ -428,6 +466,11 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue): kwargs = {} if keepdims is not np._NoValue: kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + if type(a) is np.ndarray and a.dtype != np.object_: # Fast, but not safe for subclasses of ndarray, or object arrays, # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) @@ -443,6 +486,7 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue): return res # Check for all-NaN axis + kwargs.pop("initial", None) mask = np.all(mask, axis=axis, **kwargs) if np.any(mask): res = _copyto(res, np.nan, mask) @@ -451,12 +495,12 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue): return res -def _nanargmin_dispatcher(a, axis=None): +def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None): return (a,) @array_function_dispatch(_nanargmin_dispatcher) -def nanargmin(a, axis=None): +def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue): """ Return the indices of the minimum values in the specified axis ignoring NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results @@ -468,6 +512,17 @@ def nanargmin(a, axis=None): Input data. axis : int, optional Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 Returns ------- @@ -492,20 +547,20 @@ def nanargmin(a, axis=None): """ a, mask = _replace_nan(a, np.inf) - res = np.argmin(a, axis=axis) if mask is not None: mask = np.all(mask, axis=axis) if np.any(mask): raise ValueError("All-NaN slice encountered") + res = np.argmin(a, axis=axis, out=out, keepdims=keepdims) return res -def _nanargmax_dispatcher(a, axis=None): +def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None): return (a,) @array_function_dispatch(_nanargmax_dispatcher) -def nanargmax(a, axis=None): +def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue): """ Return the indices of the maximum values in the specified axis ignoring NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the @@ -518,6 +573,17 @@ def nanargmax(a, axis=None): Input data. axis : int, optional Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 Returns ------- @@ -542,20 +608,22 @@ def nanargmax(a, axis=None): """ a, mask = _replace_nan(a, -np.inf) - res = np.argmax(a, axis=axis) if mask is not None: mask = np.all(mask, axis=axis) if np.any(mask): raise ValueError("All-NaN slice encountered") + res = np.argmax(a, axis=axis, out=out, keepdims=keepdims) return res -def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): +def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): return (a, out) @array_function_dispatch(_nansum_dispatcher) -def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): +def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): """ Return the sum of array elements over a given axis treating Not a Numbers (NaNs) as zero. @@ -600,6 +668,14 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): does not implement `keepdims` any exceptions will be raised. .. versionadded:: 1.8.0 + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 Returns ------- @@ -645,15 +721,18 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ a, mask = _replace_nan(a, 0) - return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) -def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): +def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): return (a, out) @array_function_dispatch(_nanprod_dispatcher) -def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): +def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): """ Return the product of array elements over a given axis treating Not a Numbers (NaNs) as ones. @@ -687,6 +766,16 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): If True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 Returns ------- @@ -715,7 +804,8 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ a, mask = _replace_nan(a, 1) - return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None): @@ -855,12 +945,14 @@ def nancumprod(a, axis=None, dtype=None, out=None): return np.cumprod(a, axis=axis, dtype=dtype, out=out) -def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): +def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + *, where=None): return (a, out) @array_function_dispatch(_nanmean_dispatcher) -def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): +def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + *, where=np._NoValue): """ Compute the arithmetic mean along the specified axis, ignoring NaNs. @@ -898,6 +990,10 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): `keepdims` will be passed through to the `mean` or `sum` methods of sub-classes of `ndarray`. If the sub-classes methods does not implement `keepdims` any exceptions will be raised. + where : array_like of bool, optional + Elements to include in the mean. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 Returns ------- @@ -936,7 +1032,8 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ arr, mask = _replace_nan(a, 0) if mask is None: - return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) if dtype is not None: dtype = np.dtype(dtype) @@ -945,8 +1042,10 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): if out is not None and not issubclass(out.dtype.type, np.inexact): raise TypeError("If a is inexact, then out must be inexact") - cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims) - tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims, + where=where) + tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) avg = _divide_by_count(tot, cnt, out=out) isbad = (cnt == 0) @@ -967,7 +1066,7 @@ def _nanmedian1d(arr1d, overwrite_input=False): ) if arr1d_parsed.size == 0: - # Ensure that a nan-esque scalar of the appropiate type (and unit) + # Ensure that a nan-esque scalar of the appropriate type (and unit) # is returned for `timedelta64` and `complexfloating` return arr1d[-1] @@ -1413,19 +1512,21 @@ def _nanquantile_1d(arr1d, q, overwrite_input=False, interpolation='linear'): arr1d, overwrite_input = _remove_nan_1d(arr1d, overwrite_input=overwrite_input) if arr1d.size == 0: - return np.full(q.shape, np.nan)[()] # convert to scalar + # convert to scalar + return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()] return function_base._quantile_unchecked( arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation) -def _nanvar_dispatcher( - a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): +def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None): return (a, out) @array_function_dispatch(_nanvar_dispatcher) -def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): +def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue): """ Compute the variance along the specified axis, while ignoring NaNs. @@ -1462,7 +1563,11 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. + where : array_like of bool, optional + Elements to include in the variance. See `~numpy.ufunc.reduce` for + details. + .. versionadded:: 1.22.0 Returns ------- @@ -1518,7 +1623,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): arr, mask = _replace_nan(a, 0) if mask is None: return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) + keepdims=keepdims, where=where) if dtype is not None: dtype = np.dtype(dtype) @@ -1537,21 +1642,29 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): # keepdims=True, however matrix now raises an error in this case, but # the reason that it drops the keepdims kwarg is to force keepdims=True # so this used to work by serendipity. - cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims) - avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims) + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims, + where=where) + avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims, where=where) avg = _divide_by_count(avg, cnt) # Compute squared deviation from mean. - np.subtract(arr, avg, out=arr, casting='unsafe') + np.subtract(arr, avg, out=arr, casting='unsafe', where=where) arr = _copyto(arr, 0, mask) if issubclass(arr.dtype.type, np.complexfloating): - sqr = np.multiply(arr, arr.conj(), out=arr).real + sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real else: - sqr = np.multiply(arr, arr, out=arr) + sqr = np.multiply(arr, arr, out=arr, where=where) # Compute variance. - var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - if var.ndim < cnt.ndim: + var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + + # Precaution against reduced object arrays + try: + var_ndim = var.ndim + except AttributeError: + var_ndim = np.ndim(var) + if var_ndim < cnt.ndim: # Subclasses of ndarray may ignore keepdims, so check here. cnt = cnt.squeeze(axis) dof = cnt - ddof @@ -1567,13 +1680,14 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): return var -def _nanstd_dispatcher( - a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): +def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None): return (a, out) @array_function_dispatch(_nanstd_dispatcher) -def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): +def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue): """ Compute the standard deviation along the specified axis, while ignoring NaNs. @@ -1617,6 +1731,11 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): as-is to the relevant functions of the sub-classes. If these functions do not have a `keepdims` kwarg, a RuntimeError will be raised. + where : array_like of bool, optional + Elements to include in the standard deviation. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 Returns ------- @@ -1668,9 +1787,11 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) + keepdims=keepdims, where=where) if isinstance(var, np.ndarray): std = np.sqrt(var, out=var) - else: + elif hasattr(var, 'dtype'): std = var.dtype.type(np.sqrt(var)) + else: + std = np.sqrt(var) return std diff --git a/numpy/lib/nanfunctions.pyi b/numpy/lib/nanfunctions.pyi index 447770a54..54b4a7e26 100644 --- a/numpy/lib/nanfunctions.pyi +++ b/numpy/lib/nanfunctions.pyi @@ -1,54 +1,40 @@ from typing import List +from numpy.core.fromnumeric import ( + amin, + amax, + argmin, + argmax, + sum, + prod, + cumsum, + cumprod, + mean, + var, + std +) + +from numpy.lib.function_base import ( + median, + percentile, + quantile, +) + __all__: List[str] -def nanmin(a, axis=..., out=..., keepdims=...): ... -def nanmax(a, axis=..., out=..., keepdims=...): ... -def nanargmin(a, axis=...): ... -def nanargmax(a, axis=...): ... -def nansum(a, axis=..., dtype=..., out=..., keepdims=...): ... -def nanprod(a, axis=..., dtype=..., out=..., keepdims=...): ... -def nancumsum(a, axis=..., dtype=..., out=...): ... -def nancumprod(a, axis=..., dtype=..., out=...): ... -def nanmean(a, axis=..., dtype=..., out=..., keepdims=...): ... -def nanmedian( - a, - axis=..., - out=..., - overwrite_input=..., - keepdims=..., -): ... -def nanpercentile( - a, - q, - axis=..., - out=..., - overwrite_input=..., - interpolation=..., - keepdims=..., -): ... -def nanquantile( - a, - q, - axis=..., - out=..., - overwrite_input=..., - interpolation=..., - keepdims=..., -): ... -def nanvar( - a, - axis=..., - dtype=..., - out=..., - ddof=..., - keepdims=..., -): ... -def nanstd( - a, - axis=..., - dtype=..., - out=..., - ddof=..., - keepdims=..., -): ... +# NOTE: In reaility these functions are not aliases but distinct functions +# with identical signatures. +nanmin = amin +nanmax = amax +nanargmin = argmin +nanargmax = argmax +nansum = sum +nanprod = prod +nancumsum = cumsum +nancumprod = cumprod +nanmean = mean +nanvar = var +nanstd = std +nanmedian = median +nanpercentile = percentile +nanquantile = quantile diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index b91bf440f..6c34e95fe 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -248,7 +248,6 @@ class NpzFile(Mapping): else: raise KeyError("%s is not a file in the archive" % key) - # deprecate the python 2 dict apis that we supported by accident in # python 3. We forgot to implement itervalues() at all in earlier # versions of numpy, so no need to deprecated it here. @@ -1465,7 +1464,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', @set_module('numpy') def fromregex(file, regexp, dtype, encoding=None): - """ + r""" Construct an array from a text file, using regular expression parsing. The returned array is always a structured array, and is constructed from @@ -1483,7 +1482,7 @@ def fromregex(file, regexp, dtype, encoding=None): Regular expression used to parse the file. Groups in the regular expression correspond to fields in the dtype. dtype : dtype or list of dtypes - Dtype for the structured array. + Dtype for the structured array; must be a structured datatype. encoding : str, optional Encoding used to decode the inputfile. Does not apply to input streams. @@ -1512,12 +1511,11 @@ def fromregex(file, regexp, dtype, encoding=None): Examples -------- - >>> f = open('test.dat', 'w') - >>> _ = f.write("1312 foo\\n1534 bar\\n444 qux") - >>> f.close() + >>> from io import StringIO + >>> text = StringIO("1312 foo\n1534 bar\n444 qux") - >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] - >>> output = np.fromregex('test.dat', regexp, + >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything] + >>> output = np.fromregex(text, regexp, ... [('num', np.int64), ('key', 'S3')]) >>> output array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], @@ -1535,6 +1533,8 @@ def fromregex(file, regexp, dtype, encoding=None): try: if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) + if dtype.names is None: + raise TypeError('dtype must be a structured datatype.') content = file.read() if isinstance(content, bytes) and isinstance(regexp, str): @@ -1634,7 +1634,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional If `names` is True, the field names are read from the first line after - the first `skip_header` lines. This line can optionally be preceeded + the first `skip_header` lines. This line can optionally be preceded by a comment delimiter. If `names` is a sequence or a single-string of comma-separated names, the names will be used to define the field names in a structured dtype. If `names` is None, the names of the dtype diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index 4841e9e71..8fd36ca6f 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -29,6 +29,7 @@ from numpy import ( generic, float64, void, + record, ) from numpy.ma.mrecords import MaskedRecords @@ -240,7 +241,7 @@ def recfromtxt( *, usemask: L[False] = ..., **kwargs: Any, -) -> recarray[Any, dtype[void]]: ... +) -> recarray[Any, dtype[record]]: ... @overload def recfromtxt( fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], @@ -255,7 +256,7 @@ def recfromcsv( *, usemask: L[False] = ..., **kwargs: Any, -) -> recarray[Any, dtype[void]]: ... +) -> recarray[Any, dtype[record]]: ... @overload def recfromcsv( fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index c40e50a57..1cbb3cd88 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -510,13 +510,19 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): coefficients for `k`-th data set are in ``p[:,k]``. residuals, rank, singular_values, rcond - Present only if `full` = True. Residuals is sum of squared residuals - of the least-squares fit, the effective rank of the scaled Vandermonde - coefficient matrix, its singular values, and the specified value of - `rcond`. For more details, see `linalg.lstsq`. + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the effective rank of the scaled Vandermonde + coefficient matrix + - singular_values -- singular values of the scaled Vandermonde + coefficient matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. V : ndarray, shape (M,M) or (M,M,K) - Present only if `full` = False and `cov`=True. The covariance + Present only if ``full == False`` and ``cov == True``. The covariance matrix of the polynomial coefficient estimates. The diagonal of this matrix are the variance estimates for each coefficient. If y is a 2-D array, then the covariance matrix for the `k`-th data set @@ -527,7 +533,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): ----- RankWarning The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. + deficient. The warning is only raised if ``full == False``. The warnings can be turned off by diff --git a/numpy/lib/polynomial.pyi b/numpy/lib/polynomial.pyi index 7d38658d0..00065f53b 100644 --- a/numpy/lib/polynomial.pyi +++ b/numpy/lib/polynomial.pyi @@ -1,19 +1,305 @@ -from typing import List +from typing import ( + Literal as L, + List, + overload, + Any, + SupportsInt, + SupportsIndex, + TypeVar, + Tuple, + NoReturn, +) from numpy import ( RankWarning as RankWarning, poly1d as poly1d, + unsignedinteger, + signedinteger, + floating, + complexfloating, + bool_, + int32, + int64, + float64, + complex128, + object_, +) + +from numpy.typing import ( + NDArray, + ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeObject_co, ) +_T = TypeVar("_T") + +_2Tup = Tuple[_T, _T] +_5Tup = Tuple[ + _T, + NDArray[float64], + NDArray[int32], + NDArray[float64], + NDArray[float64], +] + __all__: List[str] -def poly(seq_of_zeros): ... -def roots(p): ... -def polyint(p, m=..., k=...): ... -def polyder(p, m=...): ... -def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... -def polyval(p, x): ... -def polyadd(a1, a2): ... -def polysub(a1, a2): ... -def polymul(a1, a2): ... -def polydiv(u, v): ... +def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ... + +# Returns either a float or complex array depending on the input values. +# See `np.linalg.eigvals`. +def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ... + +@overload +def polyint( + p: poly1d, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., +) -> poly1d: ... +@overload +def polyint( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeFloat_co = ..., +) -> NDArray[floating[Any]]: ... +@overload +def polyint( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeComplex_co = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyint( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeObject_co = ..., +) -> NDArray[object_]: ... + +@overload +def polyder( + p: poly1d, + m: SupportsInt | SupportsIndex = ..., +) -> poly1d: ... +@overload +def polyder( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[floating[Any]]: ... +@overload +def polyder( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyder( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[object_]: ... + +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[False] = ..., +) -> NDArray[float64]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[False] = ..., +) -> NDArray[complex128]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[True, "unscaled"] = ..., +) -> _2Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[True, "unscaled"] = ..., +) -> _2Tup[NDArray[complex128]]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[True] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[True] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[complex128]]: ... + +@overload +def polyval( + p: _ArrayLikeBool_co, + x: _ArrayLikeBool_co, +) -> NDArray[int64]: ... +@overload +def polyval( + p: _ArrayLikeUInt_co, + x: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def polyval( + p: _ArrayLikeInt_co, + x: _ArrayLikeInt_co, +) -> NDArray[signedinteger[Any]]: ... +@overload +def polyval( + p: _ArrayLikeFloat_co, + x: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def polyval( + p: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyval( + p: _ArrayLikeObject_co, + x: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polyadd( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NDArray[bool_]: ... +@overload +def polyadd( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger[Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polysub( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NoReturn: ... +@overload +def polysub( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def polysub( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger[Any]]: ... +@overload +def polysub( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polysub( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +# NOTE: Not an alias, but they do have the same signature (that we can reuse) +polymul = polyadd + +@overload +def polydiv( + u: poly1d, + v: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co | _ArrayLikeObject_co, + v: poly1d, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, +) -> _2Tup[NDArray[floating[Any]]]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, +) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ... +@overload +def polydiv( + u: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, +) -> _2Tup[NDArray[Any]]: ... diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index fbfbca73d..a491f612e 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -819,7 +819,8 @@ def repack_fields(a, align=False, recurse=False): ... >>> dt = np.dtype('u1, <i8, <f8', align=True) >>> dt - dtype({'names':['f0','f1','f2'], 'formats':['u1','<i8','<f8'], 'offsets':[0,8,16], 'itemsize':24}, align=True) + dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '<i8', '<f8'], \ +'offsets': [0, 8, 16], 'itemsize': 24}, align=True) >>> print_offsets(dt) offsets: [0, 8, 16] itemsize: 24 diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py index ed9ffd295..308f1328b 100644 --- a/numpy/lib/scimath.py +++ b/numpy/lib/scimath.py @@ -7,8 +7,7 @@ For example, for functions like `log` with branch cuts, the versions in this module provide the mathematically valid answers in the complex plane:: >>> import math - >>> from numpy.lib import scimath - >>> scimath.log(-math.exp(1)) == (1+1j*math.pi) + >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi) True Similarly, `sqrt`, other base logarithms, `power` and trig functions are @@ -223,16 +222,16 @@ def sqrt(x): -------- For real, non-negative inputs this works just like `numpy.sqrt`: - >>> np.lib.scimath.sqrt(1) + >>> np.emath.sqrt(1) 1.0 - >>> np.lib.scimath.sqrt([1, 4]) + >>> np.emath.sqrt([1, 4]) array([1., 2.]) But it automatically handles negative inputs: - >>> np.lib.scimath.sqrt(-1) + >>> np.emath.sqrt(-1) 1j - >>> np.lib.scimath.sqrt([-1,4]) + >>> np.emath.sqrt([-1,4]) array([0.+1.j, 2.+0.j]) """ @@ -367,9 +366,9 @@ def logn(n, x): -------- >>> np.set_printoptions(precision=4) - >>> np.lib.scimath.logn(2, [4, 8]) + >>> np.emath.logn(2, [4, 8]) array([2., 3.]) - >>> np.lib.scimath.logn(2, [-4, -8, 8]) + >>> np.emath.logn(2, [-4, -8, 8]) array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) """ @@ -462,11 +461,11 @@ def power(x, p): -------- >>> np.set_printoptions(precision=4) - >>> np.lib.scimath.power([2, 4], 2) + >>> np.emath.power([2, 4], 2) array([ 4, 16]) - >>> np.lib.scimath.power([2, 4], -2) + >>> np.emath.power([2, 4], -2) array([0.25 , 0.0625]) - >>> np.lib.scimath.power([-2, 4], 2) + >>> np.emath.power([-2, 4], 2) array([ 4.-0.j, 16.+0.j]) """ diff --git a/numpy/lib/shape_base.pyi b/numpy/lib/shape_base.pyi index 1598dc36c..8aa283d02 100644 --- a/numpy/lib/shape_base.pyi +++ b/numpy/lib/shape_base.pyi @@ -17,7 +17,7 @@ from numpy.typing import ( ArrayLike, NDArray, _ShapeLike, - _NestedSequence, + _FiniteNestedSequence, _SupportsDType, _ArrayLikeBool_co, _ArrayLikeUInt_co, @@ -31,7 +31,7 @@ from numpy.core.shape_base import vstack _SCT = TypeVar("_SCT", bound=generic) -_ArrayLike = _NestedSequence[_SupportsDType[dtype[_SCT]]] +_ArrayLike = _FiniteNestedSequence[_SupportsDType[dtype[_SCT]]] # The signatures of `__array_wrap__` and `__array_prepare__` are the same; # give them unique names for the sake of clarity diff --git a/numpy/lib/stride_tricks.pyi b/numpy/lib/stride_tricks.pyi index bafc46e9c..aad404107 100644 --- a/numpy/lib/stride_tricks.pyi +++ b/numpy/lib/stride_tricks.pyi @@ -6,12 +6,12 @@ from numpy.typing import ( ArrayLike, _ShapeLike, _Shape, - _NestedSequence, + _FiniteNestedSequence, _SupportsArray, ) _SCT = TypeVar("_SCT", bound=generic) -_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]] +_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] __all__: List[str] diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 10656a233..78e67a89b 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -276,8 +276,6 @@ Test the header writing. ''' import sys import os -import shutil -import tempfile import warnings import pytest from io import BytesIO diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 829691b1c..c7dfe5673 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -553,6 +553,11 @@ class TestInsert: with pytest.raises(IndexError): np.insert([0, 1, 2], np.array([], dtype=float), []) + @pytest.mark.parametrize('idx', [4, -4]) + def test_index_out_of_bounds(self, idx): + with pytest.raises(IndexError, match='out of bounds'): + np.insert([0, 1, 2], [idx], [3, 4]) + class TestAmax: @@ -1528,7 +1533,7 @@ class TestVectorize: ([('x',)], [('y',), ()])) assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'), ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) - + # Tests to check if whitespaces are ignored assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()])) assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'), @@ -1853,35 +1858,116 @@ class TestUnwrap: assert sm_discont.dtype == wrap_uneven.dtype +@pytest.mark.parametrize( + "dtype", "O" + np.typecodes["AllInteger"] + np.typecodes["Float"] +) +@pytest.mark.parametrize("M", [0, 1, 10]) class TestFilterwindows: - def test_hanning(self): + def test_hanning(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hanning(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + # check symmetry - w = hanning(10) assert_equal(w, flipud(w)) + # check known value - assert_almost_equal(np.sum(w, axis=0), 4.500, 4) + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.500, 4) + + def test_hamming(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hamming(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) + + def test_bartlett(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = bartlett(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype - def test_hamming(self): # check symmetry - w = hamming(10) assert_equal(w, flipud(w)) + # check known value - assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) + + def test_blackman(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = blackman(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype - def test_bartlett(self): # check symmetry - w = bartlett(10) assert_equal(w, flipud(w)) + # check known value - assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) + + def test_kaiser(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = kaiser(scalar, 0) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype - def test_blackman(self): # check symmetry - w = blackman(10) assert_equal(w, flipud(w)) + # check known value - assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 10, 15) class TestTrapz: @@ -3432,6 +3518,16 @@ class TestMedian: a = MySubClass([1, 2, 3]) assert_equal(np.median(a), -7) + @pytest.mark.parametrize('arr', + ([1., 2., 3.], [1., np.nan, 3.], np.nan, 0.)) + def test_subclass2(self, arr): + """Check that we return subclasses, even if a NaN scalar.""" + class MySubclass(np.ndarray): + pass + + m = np.median(np.array(arr).view(MySubclass)) + assert isinstance(m, MySubclass) + def test_out(self): o = np.zeros((4,)) d = np.ones((3, 4)) diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index c21aefd1a..26a34be7e 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -4,7 +4,6 @@ import numpy as np from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, assert_raises_regex, - assert_warns ) from numpy.lib.index_tricks import ( mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 11f2b7d4d..5201b8e6e 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1196,6 +1196,7 @@ class TestLoadTxt(LoadTxtBase): a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int) assert_array_equal(x, a) + class Testfromregex: def test_record(self): c = TextIO() @@ -1255,6 +1256,13 @@ class Testfromregex: x = np.fromregex(c, regexp, dt) assert_array_equal(x, a) + def test_bad_dtype_not_structured(self): + regexp = re.compile(b'(\\d)') + c = BytesIO(b'123') + with pytest.raises(TypeError, match='structured datatype'): + np.fromregex(c, regexp, dtype=np.float64) + + #####-------------------------------------------------------------------------- diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 1f1f5601b..126dba495 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -1,11 +1,12 @@ import warnings import pytest +import inspect import numpy as np from numpy.lib.nanfunctions import _nan_mask, _replace_nan from numpy.testing import ( - assert_, assert_equal, assert_almost_equal, assert_no_warnings, - assert_raises, assert_array_equal, suppress_warnings + assert_, assert_equal, assert_almost_equal, assert_raises, + assert_array_equal, suppress_warnings ) @@ -35,6 +36,53 @@ _ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170], [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) +class TestSignatureMatch: + NANFUNCS = { + np.nanmin: np.amin, + np.nanmax: np.amax, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanpercentile: np.percentile, + np.nanquantile: np.quantile, + np.nanvar: np.var, + np.nanstd: np.std, + } + IDS = [k.__name__ for k in NANFUNCS] + + @staticmethod + def get_signature(func, default="..."): + """Construct a signature and replace all default parameter-values.""" + prm_list = [] + signature = inspect.signature(func) + for prm in signature.parameters.values(): + if prm.default is inspect.Parameter.empty: + prm_list.append(prm) + else: + prm_list.append(prm.replace(default=default)) + return inspect.Signature(prm_list) + + @pytest.mark.parametrize("nan_func,func", NANFUNCS.items(), ids=IDS) + def test_signature_match(self, nan_func, func): + # Ignore the default parameter-values as they can sometimes differ + # between the two functions (*e.g.* one has `False` while the other + # has `np._NoValue`) + signature = self.get_signature(func) + nan_signature = self.get_signature(nan_func) + np.testing.assert_equal(signature, nan_signature) + + def test_exhaustiveness(self): + """Validate that all nan functions are actually tested.""" + np.testing.assert_equal( + set(self.IDS), set(np.lib.nanfunctions.__all__) + ) + + class TestNanFunctions_MinMax: nanfuncs = [np.nanmin, np.nanmax] @@ -83,21 +131,23 @@ class TestNanFunctions_MinMax: res = nf(_ndat, axis=1) assert_almost_equal(res, tgt) - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for f in self.nanfuncs: - for axis in [None, 0, 1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(mat, axis=axis)).all()) - assert_(len(w) == 1, 'no warning raised') - assert_(issubclass(w[0].category, RuntimeWarning)) - # Check scalars - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(np.nan))) - assert_(len(w) == 1, 'no warning raised') - assert_(issubclass(w[0].category, RuntimeWarning)) + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "All-NaN slice encountered" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype def test_masked(self): mat = np.ma.fix_invalid(_ndat) @@ -168,6 +218,46 @@ class TestNanFunctions_MinMax: assert_(len(w) == 1, 'no warning raised') assert_(issubclass(w[0].category, RuntimeWarning)) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + initial = 100 if f is np.nanmax else 0 + + ret1 = f(ar, initial=initial) + assert ret1.dtype == dtype + assert ret1 == initial + + ret2 = f(ar.view(MyNDArray), initial=initial) + assert ret2.dtype == dtype + assert ret2 == initial + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool_) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 4 if f is np.nanmin else 8 + + ret1 = f(ar, where=where, initial=5) + assert ret1.dtype == dtype + assert ret1 == reference + + ret2 = f(ar.view(MyNDArray), where=where, initial=5) + assert ret2.dtype == dtype + assert ret2 == reference + class TestNanFunctions_ArgminArgmax: @@ -193,12 +283,20 @@ class TestNanFunctions_ArgminArgmax: assert_(not fcmp(val, row).any()) assert_(not np.equal(val, row[:ind]).any()) - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for f in self.nanfuncs: - for axis in [None, 0, 1]: - assert_raises(ValueError, f, mat, axis=axis) - assert_raises(ValueError, f, np.nan) + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func in self.nanfuncs: + with pytest.raises(ValueError, match="All-NaN slice encountered"): + func(array, axis=axis) def test_empty(self): mat = np.zeros((0, 3)) @@ -230,80 +328,105 @@ class TestNanFunctions_ArgminArgmax: res = f(mine) assert_(res.shape == ()) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_keepdims(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan -class TestNanFunctions_IntTypes: - - int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8, - np.uint16, np.uint32, np.uint64) - - mat = np.array([127, 39, 93, 87, 46]) - - def integer_arrays(self): - for dtype in self.int_types: - yield self.mat.astype(dtype) - - def test_nanmin(self): - tgt = np.min(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanmin(mat), tgt) - - def test_nanmax(self): - tgt = np.max(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanmax(mat), tgt) - - def test_nanargmin(self): - tgt = np.argmin(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanargmin(mat), tgt) - - def test_nanargmax(self): - tgt = np.argmax(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanargmax(mat), tgt) - - def test_nansum(self): - tgt = np.sum(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nansum(mat), tgt) + for f in self.nanfuncs: + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, keepdims=True) + assert ret.ndim == ar.ndim + assert ret == reference - def test_nanprod(self): - tgt = np.prod(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanprod(mat), tgt) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_out(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan - def test_nancumsum(self): - tgt = np.cumsum(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nancumsum(mat), tgt) + for f in self.nanfuncs: + out = np.zeros((), dtype=np.intp) + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, out=out) + assert ret is out + assert ret == reference - def test_nancumprod(self): - tgt = np.cumprod(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nancumprod(mat), tgt) - def test_nanmean(self): - tgt = np.mean(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanmean(mat), tgt) - def test_nanvar(self): - tgt = np.var(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanvar(mat), tgt) +_TEST_ARRAYS = { + "0d": np.array(5), + "1d": np.array([127, 39, 93, 87, 46]) +} +for _v in _TEST_ARRAYS.values(): + _v.setflags(write=False) - tgt = np.var(mat, ddof=1) - for mat in self.integer_arrays(): - assert_equal(np.nanvar(mat, ddof=1), tgt) - def test_nanstd(self): - tgt = np.std(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanstd(mat), tgt) +@pytest.mark.parametrize( + "dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O", +) +@pytest.mark.parametrize("mat", _TEST_ARRAYS.values(), ids=_TEST_ARRAYS.keys()) +class TestNanFunctions_NumberTypes: + nanfuncs = { + np.nanmin: np.min, + np.nanmax: np.max, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanvar: np.var, + np.nanstd: np.std, + } + nanfunc_ids = [i.__name__ for i in nanfuncs] + + @pytest.mark.parametrize("nanfunc,func", nanfuncs.items(), ids=nanfunc_ids) + @np.errstate(over="ignore") + def test_nanfunc(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat) + out = nanfunc(mat) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanquantile, np.quantile), (np.nanpercentile, np.percentile)], + ids=["nanquantile", "nanpercentile"], + ) + def test_nanfunc_q(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat, q=1) + out = nanfunc(mat, q=1) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanvar, np.var), (np.nanstd, np.std)], + ids=["nanvar", "nanstd"], + ) + def test_nanfunc_ddof(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat, ddof=0.5) + out = nanfunc(mat, ddof=0.5) - tgt = np.std(self.mat, ddof=1) - for mat in self.integer_arrays(): - assert_equal(np.nanstd(mat, ddof=1), tgt) + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype class SharedNanFunctionsTestsMixin: @@ -416,20 +539,21 @@ class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): nanfuncs = [np.nansum, np.nanprod] stdfuncs = [np.sum, np.prod] - def test_allnans(self): - # Check for FutureWarning - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = np.nansum([np.nan]*3, axis=None) - assert_(res == 0, 'result is not 0') - assert_(len(w) == 0, 'warning raised') - # Check scalar - res = np.nansum(np.nan) - assert_(res == 0, 'result is not 0') - assert_(len(w) == 0, 'warning raised') - # Check there is no warning for not all-nan - np.nansum([0]*3, axis=None) - assert_(len(w) == 0, 'unwanted warning raised') + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array, axis=axis) + assert np.all(out == identity) + assert out.dtype == array.dtype def test_empty(self): for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): @@ -444,25 +568,51 @@ class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): res = f(mat, axis=None) assert_equal(res, tgt) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + reference = 28 if f is np.nansum else 3360 + ret = f(ar, initial=2) + assert ret.dtype == dtype + assert ret == reference + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool_) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 26 if f is np.nansum else 2240 + ret = f(ar, where=where, initial=2) + assert ret.dtype == dtype + assert ret == reference + class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): nanfuncs = [np.nancumsum, np.nancumprod] stdfuncs = [np.cumsum, np.cumprod] - def test_allnans(self): - for f, tgt_value in zip(self.nanfuncs, [0, 1]): - # Unlike other nan-functions, sum/prod/cumsum/cumprod don't warn on all nan input - with assert_no_warnings(): - res = f([np.nan]*3, axis=None) - tgt = tgt_value*np.ones((3)) - assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((3))' % (tgt_value)) - # Check scalar - res = f(np.nan) - tgt = tgt_value*np.ones((1)) - assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((1))' % (tgt_value)) - # Check there is no warning for not all-nan - f([0]*3, axis=None) + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan) + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array) + assert np.all(out == identity) + assert out.dtype == array.dtype def test_empty(self): for f, tgt_value in zip(self.nanfuncs, [0, 1]): @@ -558,19 +708,29 @@ class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): else: assert_(len(sup.log) == 0) - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for f in self.nanfuncs: - for axis in [None, 0, 1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(mat, axis=axis)).all()) - assert_(len(w) == 1) - assert_(issubclass(w[0].category, RuntimeWarning)) - # Check scalar - assert_(np.isnan(f(np.nan))) - assert_(len(w) == 2) - assert_(issubclass(w[0].category, RuntimeWarning)) + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + + # `nanvar` and `nanstd` convert complex inputs to their + # corresponding floating dtype + if func is np.nanmean: + assert out.dtype == array.dtype + else: + assert out.dtype == np.abs(array).dtype def test_empty(self): mat = np.zeros((0, 3)) @@ -587,6 +747,21 @@ class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): assert_equal(f(mat, axis=axis), np.zeros([])) assert_(len(w) == 0) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool_) + where[:, 0] = False + + for f, f_std in zip(self.nanfuncs, self.stdfuncs): + reference = f_std(ar[where][2:]) + dtype_reference = dtype if f is np.nanmean else ar.real.dtype + + ret = f(ar, where=where) + assert ret.dtype == dtype_reference + np.testing.assert_allclose(ret, reference) + _TIME_UNITS = ( "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" @@ -834,24 +1009,21 @@ class TestNanFunctions_Percentile: res = np.nanpercentile(_ndat, (28, 98), axis=1) assert_almost_equal(res, tgt) - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for axis in [None, 0, 1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all()) - if axis is None: - assert_(len(w) == 1) - else: - assert_(len(w) == 3) - assert_(issubclass(w[0].category, RuntimeWarning)) - # Check scalar - assert_(np.isnan(np.nanpercentile(np.nan, 60))) - if axis is None: - assert_(len(w) == 2) - else: - assert_(len(w) == 4) - assert_(issubclass(w[0].category, RuntimeWarning)) + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanpercentile(array, 60, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype def test_empty(self): mat = np.zeros((0, 3)) @@ -944,6 +1116,22 @@ class TestNanFunctions_Quantile: np.nanquantile(np.arange(100.), p, interpolation="midpoint") assert_array_equal(p, p0) + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanquantile(array, 1, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + @pytest.mark.parametrize("arr, expected", [ # array of floats with some nans (np.array([np.nan, 5.0, np.nan, np.inf]), diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index 373226277..55df2a675 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -1,5 +1,3 @@ -import pytest - import os import numpy as np diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 83c028061..811faff79 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -67,7 +67,7 @@ def fliplr(m): See Also -------- flipud : Flip array in the up/down direction. - flip : Flip array in one or more dimesions. + flip : Flip array in one or more dimensions. rot90 : Rotate array counterclockwise. Notes @@ -120,7 +120,7 @@ def flipud(m): See Also -------- fliplr : Flip array in the left/right direction. - flip : Flip array in one or more dimesions. + flip : Flip array in one or more dimensions. rot90 : Rotate array counterclockwise. Notes diff --git a/numpy/lib/twodim_base.pyi b/numpy/lib/twodim_base.pyi index 007338d77..cba503ca3 100644 --- a/numpy/lib/twodim_base.pyi +++ b/numpy/lib/twodim_base.pyi @@ -33,7 +33,7 @@ from numpy.typing import ( _SupportsDType, ArrayLike, NDArray, - _NestedSequence, + _FiniteNestedSequence, _SupportsArray, _ArrayLikeInt_co, _ArrayLikeFloat_co, @@ -55,7 +55,7 @@ _DTypeLike = Union[ dtype[_SCT], _SupportsDType[dtype[_SCT]], ] -_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]] +_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] __all__: List[str] diff --git a/numpy/lib/type_check.pyi b/numpy/lib/type_check.pyi index 5eb0e62d2..0a55dbf21 100644 --- a/numpy/lib/type_check.pyi +++ b/numpy/lib/type_check.pyi @@ -28,7 +28,7 @@ from numpy.typing import ( _64Bit, _SupportsDType, _ScalarLike_co, - _NestedSequence, + _FiniteNestedSequence, _SupportsArray, _DTypeLikeComplex, ) @@ -39,7 +39,7 @@ _SCT = TypeVar("_SCT", bound=generic) _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) -_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]] +_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]] class _SupportsReal(Protocol[_T_co]): @property diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 1f2cb66fa..1df2ab09b 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -1002,7 +1002,7 @@ def safe_eval(source): return ast.literal_eval(source) -def _median_nancheck(data, result, axis, out): +def _median_nancheck(data, result, axis): """ Utility function to check median result from data for NaN values at the end and return NaN in that case. Input result can also be a MaskedArray. @@ -1010,18 +1010,18 @@ def _median_nancheck(data, result, axis, out): Parameters ---------- data : array - Input data to median function + Sorted input data to median function result : Array or MaskedArray - Result of median function + Result of median function. axis : int Axis along which the median was computed. - out : ndarray, optional - Output array in which to place the result. Returns ------- - median : scalar or ndarray - Median or NaN in axes which contained NaN in the input. + result : scalar or ndarray + Median or NaN in axes which contained NaN in the input. If the input + was an array, NaN will be inserted in-place. If a scalar, either the + input itself or a scalar NaN. """ if data.size == 0: return result @@ -1029,14 +1029,12 @@ def _median_nancheck(data, result, axis, out): # masked NaN values are ok if np.ma.isMaskedArray(n): n = n.filled(False) - if result.ndim == 0: - if n == True: - if out is not None: - out[...] = data.dtype.type(np.nan) - result = out - else: - result = data.dtype.type(np.nan) - elif np.count_nonzero(n.ravel()) > 0: + if np.count_nonzero(n.ravel()) > 0: + # Without given output, it is possible that the current result is a + # numpy scalar, which is not writeable. If so, just return nan. + if isinstance(result, np.generic): + return data.dtype.type(np.nan) + result[n] = np.nan return result diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 7237d865d..d457f153a 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1,5 +1,28 @@ from typing import Any, List +from numpy.linalg.linalg import ( + matrix_power as matrix_power, + solve as solve, + tensorsolve as tensorsolve, + tensorinv as tensorinv, + inv as inv, + cholesky as cholesky, + eigvals as eigvals, + eigvalsh as eigvalsh, + pinv as pinv, + slogdet as slogdet, + det as det, + svd as svd, + eig as eig, + eigh as eigh, + lstsq as lstsq, + norm as norm, + qr as qr, + cond as cond, + matrix_rank as matrix_rank, + multi_dot as multi_dot, +) + from numpy._pytesttester import PytestTester __all__: List[str] @@ -7,24 +30,3 @@ __path__: List[str] test: PytestTester class LinAlgError(Exception): ... - -def tensorsolve(a, b, axes=...): ... -def solve(a, b): ... -def tensorinv(a, ind=...): ... -def inv(a): ... -def matrix_power(a, n): ... -def cholesky(a): ... -def qr(a, mode=...): ... -def eigvals(a): ... -def eigvalsh(a, UPLO=...): ... -def eig(a): ... -def eigh(a, UPLO=...): ... -def svd(a, full_matrices=..., compute_uv=..., hermitian=...): ... -def cond(x, p=...): ... -def matrix_rank(A, tol=..., hermitian=...): ... -def pinv(a, rcond=..., hermitian=...): ... -def slogdet(a): ... -def det(a): ... -def lstsq(a, b, rcond=...): ... -def norm(x, ord=..., axis=..., keepdims=...): ... -def multi_dot(arrays, *, out=...): ... diff --git a/numpy/linalg/lapack_lite/README.rst b/numpy/linalg/lapack_lite/README.rst index ed738ab86..8baa1d8ff 100644 --- a/numpy/linalg/lapack_lite/README.rst +++ b/numpy/linalg/lapack_lite/README.rst @@ -12,15 +12,18 @@ automatically from a directory of LAPACK source files. You'll need `plex 2.0.0dev`_, available from PyPI, installed to do the appropriate scrubbing. As of writing, **this is only available for python 2.7**, and is unlikely to ever be ported to python 3. +As a result, all the Python scripts in this directory must remain compatible +with Python 2.7, even though NumPy itself no longer supports this version, +until these scripts are rewritten to use something other than ``plex``. .. _plex 2.0.0dev: https://pypi.python.org/pypi/plex/ The routines that ``lapack_litemodule.c`` wraps are listed in ``wrapped_routines``, along with a few exceptions that aren't picked up properly. Assuming that you have an unpacked LAPACK source tree in -``~/LAPACK``, you generate the new routines in this directory with:: +``/tmp/lapack-3.x.x``, you generate the new routines in this directory with:: -$ python ./make_lite.py wrapped_routines ~/LAPACK +$ ./make_lite.py wrapped_routines /tmp/lapack-3.x.x This will grab the right routines, with dependencies, put them into the appropriate ``f2c_*.f`` files, run ``f2c`` over them, then do some scrubbing diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py index 738fad7fe..fffd70910 100644 --- a/numpy/linalg/lapack_lite/clapack_scrub.py +++ b/numpy/linalg/lapack_lite/clapack_scrub.py @@ -1,12 +1,17 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python2.7 +# WARNING! This a Python 2 script. Read README.rst for rationale. import os import re import sys -from io import StringIO from plex import Scanner, Str, Lexicon, Opt, Bol, State, AnyChar, TEXT, IGNORE from plex.traditional import re as Re +try: + from io import BytesIO as UStringIO # Python 2 +except ImportError: + from io import StringIO as UStringIO # Python 3 + class MyScanner(Scanner): def __init__(self, info, name='<default>'): @@ -22,8 +27,8 @@ def sep_seq(sequence, sep): return pat def runScanner(data, scanner_class, lexicon=None): - info = StringIO(data) - outfo = StringIO() + info = UStringIO(data) + outfo = UStringIO() if lexicon is not None: scanner = scanner_class(lexicon, info) else: @@ -190,7 +195,7 @@ def cleanComments(source): return SourceLines state = SourceLines - for line in StringIO(source): + for line in UStringIO(source): state = state(line) comments.flushTo(lines) return lines.getValue() @@ -218,20 +223,23 @@ def removeHeader(source): return OutOfHeader state = LookingForHeader - for line in StringIO(source): + for line in UStringIO(source): state = state(line) return lines.getValue() def removeSubroutinePrototypes(source): - expression = re.compile( - r'/\* Subroutine \*/^\s*(?:(?:inline|static)\s+){0,2}(?!else|typedef|return)\w+\s+\*?\s*(\w+)\s*\([^0]+\)\s*;?' - ) - lines = LineQueue() - for line in StringIO(source): - if not expression.match(line): - lines.add(line) - - return lines.getValue() + # This function has never worked as advertised by its name: + # - "/* Subroutine */" declarations may span multiple lines and + # cannot be matched by a line by line approach. + # - The caret in the initial regex would prevent any match, even + # of single line "/* Subroutine */" declarations. + # + # While we could "fix" this function to do what the name implies + # it should do, we have no hint of what it should really do. + # + # Therefore we keep the existing (non-)functionaity, documenting + # this function as doing nothing at all. + return source def removeBuiltinFunctions(source): lines = LineQueue() @@ -249,7 +257,7 @@ def removeBuiltinFunctions(source): return InBuiltInFunctions state = LookingForBuiltinFunctions - for line in StringIO(source): + for line in UStringIO(source): state = state(line) return lines.getValue() @@ -299,6 +307,5 @@ if __name__ == '__main__': source = scrub_source(source, nsteps, verbose=True) - writefo = open(outfilename, 'w') - writefo.write(source) - writefo.close() + with open(outfilename, 'w') as writefo: + writefo.write(source) diff --git a/numpy/linalg/lapack_lite/fortran.py b/numpy/linalg/lapack_lite/fortran.py index fc09f0808..2a5c9c05e 100644 --- a/numpy/linalg/lapack_lite/fortran.py +++ b/numpy/linalg/lapack_lite/fortran.py @@ -1,3 +1,4 @@ +# WARNING! This a Python 2 script. Read README.rst for rationale. import re import itertools @@ -44,6 +45,8 @@ class LineIterator: line = line.rstrip() return line + next = __next__ + class PushbackIterator: """PushbackIterator(iterable) @@ -69,6 +72,8 @@ class PushbackIterator: def pushback(self, item): self.buffer.append(item) + next = __next__ + def fortranSourceLines(fo): """Return an iterator over statement lines of a Fortran source file. diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py index 09ffaf840..ca8d4c62c 100755 --- a/numpy/linalg/lapack_lite/make_lite.py +++ b/numpy/linalg/lapack_lite/make_lite.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python2.7 +# WARNING! This a Python 2 script. Read README.rst for rationale. """ Usage: make_lite.py <wrapped_routines_file> <lapack_dir> @@ -20,7 +21,10 @@ import shutil import fortran import clapack_scrub -from shutil import which +try: + from distutils.spawn import find_executable as which # Python 2 +except ImportError: + from shutil import which # Python 3 # Arguments to pass to f2c. You'll always want -A for ANSI C prototypes # Others of interest: -a to not make variables static by default @@ -81,7 +85,8 @@ class FortranRoutine: return self._dependencies def __repr__(self): - return f'FortranRoutine({self.name!r}, filename={self.filename!r})' + return "FortranRoutine({!r}, filename={!r})".format(self.name, + self.filename) class UnknownFortranRoutine(FortranRoutine): """Wrapper for a Fortran routine for which the corresponding file @@ -193,7 +198,7 @@ class LapackLibrary(FortranLibrary): def printRoutineNames(desc, routines): print(desc) for r in routines: - print(f'\t{r.name}') + print('\t%s' % r.name) def getLapackRoutines(wrapped_routines, ignores, lapack_dir): blas_src_dir = os.path.join(lapack_dir, 'BLAS', 'SRC') @@ -243,7 +248,7 @@ def dumpRoutineNames(library, output_dir): with open(filename, 'w') as fo: for r in routines: deps = r.dependencies() - fo.write(f"{r.name}: {' '.join(deps)}\n") + fo.write('%s: %s\n' % (r.name, ' '.join(deps))) def concatenateRoutines(routines, output_file): with open(output_file, 'w') as output_fo: @@ -261,8 +266,8 @@ def runF2C(fortran_filename, output_dir): subprocess.check_call( ["f2c"] + F2C_ARGS + ['-d', output_dir, fortran_filename] ) - except subprocess.CalledProcessError as e: - raise F2CError from e + except subprocess.CalledProcessError: + raise F2CError def scrubF2CSource(c_file): with open(c_file) as fo: @@ -275,8 +280,8 @@ def scrubF2CSource(c_file): def ensure_executable(name): try: which(name) - except Exception as ex: - raise SystemExit(name + ' not found') from ex + except Exception: + raise SystemExit(name + ' not found') def create_name_header(output_dir): routine_re = re.compile(r'^ (subroutine|.* function)\s+(\w+)\(.*$', @@ -316,13 +321,13 @@ def create_name_header(output_dir): # Rename BLAS/LAPACK symbols for name in sorted(symbols): - f.write(f'#define {name}_ BLAS_FUNC({name})\n') + f.write("#define %s_ BLAS_FUNC(%s)\n" % (name, name)) # Rename also symbols that f2c exports itself f.write("\n" "/* Symbols exported by f2c.c */\n") for name in sorted(f2c_symbols): - f.write(f'#define {name} numpy_lapack_lite_{name}\n') + f.write("#define %s numpy_lapack_lite_%s\n" % (name, name)) def main(): if len(sys.argv) != 3: @@ -336,10 +341,7 @@ def main(): lapack_src_dir = sys.argv[2] output_dir = os.path.join(os.path.dirname(__file__), 'build') - try: - shutil.rmtree(output_dir) - except: - pass + shutil.rmtree(output_dir, ignore_errors=True) os.makedirs(output_dir) wrapped_routines, ignores = getWrappedRoutineNames(wrapped_routines_file) @@ -348,9 +350,9 @@ def main(): dumpRoutineNames(library, output_dir) for typename in types: - fortran_file = os.path.join(output_dir, f'f2c_{typename}.f') + fortran_file = os.path.join(output_dir, 'f2c_%s.f' % typename) c_file = fortran_file[:-2] + '.c' - print(f'creating {c_file} ...') + print('creating %s ...' % c_file) routines = library.allRoutinesByType(typename) concatenateRoutines(routines, fortran_file) @@ -358,11 +360,11 @@ def main(): patch_file = os.path.basename(fortran_file) + '.patch' if os.path.exists(patch_file): subprocess.check_call(['patch', '-u', fortran_file, patch_file]) - print(f'Patched {fortran_file}') + print("Patched {}".format(fortran_file)) try: runF2C(fortran_file, output_dir) except F2CError: - print(f'f2c failed on {fortran_file}') + print('f2c failed on %s' % fortran_file) break scrubF2CSource(c_file) diff --git a/numpy/linalg/lapack_lite/python_xerbla.c b/numpy/linalg/lapack_lite/python_xerbla.c index fe2f718b2..37a41408b 100644 --- a/numpy/linalg/lapack_lite/python_xerbla.c +++ b/numpy/linalg/lapack_lite/python_xerbla.c @@ -1,4 +1,6 @@ -#include "Python.h" +#define PY_SSIZE_T_CLEAN +#include <Python.h> + #include "numpy/npy_common.h" #include "npy_cblas.h" diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c index 362a593a6..2fed0f2b0 100644 --- a/numpy/linalg/lapack_litemodule.c +++ b/numpy/linalg/lapack_litemodule.c @@ -4,11 +4,12 @@ More modifications by Jeff Whitaker */ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include "Python.h" +#define PY_SSIZE_T_CLEAN +#include <Python.h> + #include "numpy/arrayobject.h" #include "npy_cblas.h" - #define FNAME(name) BLAS_FUNC(name) typedef CBLAS_INT fortran_int; diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 2b686839a..0c27e0631 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -30,7 +30,7 @@ from numpy.core.multiarray import normalize_axis_index from numpy.core.overrides import set_module from numpy.core import overrides from numpy.lib.twodim_base import triu, eye -from numpy.linalg import lapack_lite, _umath_linalg +from numpy.linalg import _umath_linalg array_function_dispatch = functools.partial( @@ -1680,7 +1680,7 @@ def cond(x, p=None): x : (..., M, N) array_like The matrix whose condition number is sought. p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional - Order of the norm: + Order of the norm used in the condition number computation: ===== ============================ p norm for matrices @@ -1695,7 +1695,7 @@ def cond(x, p=None): -2 smallest singular value ===== ============================ - inf means the numpy.inf object, and the Frobenius norm is + inf means the `numpy.inf` object, and the Frobenius norm is the root-of-sum-of-squares norm. Returns @@ -1864,7 +1864,7 @@ def matrix_rank(A, tol=None, hermitian=False): References ---------- - .. [1] MATLAB reference documention, "Rank" + .. [1] MATLAB reference documentation, "Rank" https://www.mathworks.com/help/techdoc/ref/rank.html .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, @@ -2159,7 +2159,7 @@ def lstsq(a, b, rcond="warn"): r""" Return the least-squares solution to a linear matrix equation. - Computes the vector `x` that approximatively solves the equation + Computes the vector `x` that approximately solves the equation ``a @ x = b``. The equation may be under-, well-, or over-determined (i.e., the number of linearly independent rows of `a` can be less than, equal to, or greater than its number of linearly independent columns). diff --git a/numpy/linalg/linalg.pyi b/numpy/linalg/linalg.pyi new file mode 100644 index 000000000..a60b9539e --- /dev/null +++ b/numpy/linalg/linalg.pyi @@ -0,0 +1,284 @@ +from typing import ( + Literal as L, + List, + Iterable, + overload, + TypeVar, + Any, + SupportsIndex, + SupportsInt, + Tuple, +) + +from numpy import ( + generic, + floating, + complexfloating, + int32, + float64, + complex128, +) + +from numpy.linalg import LinAlgError as LinAlgError + +from numpy.typing import ( + NDArray, + ArrayLike, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeTD64_co, + _ArrayLikeObject_co, +) + +_T = TypeVar("_T") +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) + +_2Tuple = Tuple[_T, _T] +_ModeKind = L["reduced", "complete", "r", "raw"] + +__all__: List[str] + +@overload +def tensorsolve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axes: None | Iterable[int] =..., +) -> NDArray[float64]: ... +@overload +def tensorsolve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axes: None | Iterable[int] =..., +) -> NDArray[floating[Any]]: ... +@overload +def tensorsolve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axes: None | Iterable[int] =..., +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def solve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, +) -> NDArray[float64]: ... +@overload +def solve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def solve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def tensorinv( + a: _ArrayLikeInt_co, + ind: int = ..., +) -> NDArray[float64]: ... +@overload +def tensorinv( + a: _ArrayLikeFloat_co, + ind: int = ..., +) -> NDArray[floating[Any]]: ... +@overload +def tensorinv( + a: _ArrayLikeComplex_co, + ind: int = ..., +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... +@overload +def inv(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +@overload +def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +# TODO: The supported input and output dtypes are dependent on the value of `n`. +# For example: `n < 0` always casts integer types to float64 +def matrix_power( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + n: SupportsIndex, +) -> NDArray[Any]: ... + +@overload +def cholesky(a: _ArrayLikeInt_co) -> NDArray[float64]: ... +@overload +def cholesky(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +@overload +def cholesky(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> _2Tuple[NDArray[float64]]: ... +@overload +def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> _2Tuple[NDArray[floating[Any]]]: ... +@overload +def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> _2Tuple[NDArray[complexfloating[Any, Any]]]: ... + +@overload +def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... +@overload +def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]] | NDArray[complexfloating[Any, Any]]: ... +@overload +def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ... +@overload +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating[Any]]: ... + +@overload +def eig(a: _ArrayLikeInt_co) -> _2Tuple[NDArray[float64]] | _2Tuple[NDArray[complex128]]: ... +@overload +def eig(a: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]] | _2Tuple[NDArray[complexfloating[Any, Any]]]: ... +@overload +def eig(a: _ArrayLikeComplex_co) -> _2Tuple[NDArray[complexfloating[Any, Any]]]: ... + +@overload +def eigh( + a: _ArrayLikeInt_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> Tuple[NDArray[float64], NDArray[float64]]: ... +@overload +def eigh( + a: _ArrayLikeFloat_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> Tuple[NDArray[floating[Any]], NDArray[floating[Any]]]: ... +@overload +def eigh( + a: _ArrayLikeComplex_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> Tuple[NDArray[floating[Any]], NDArray[complexfloating[Any, Any]]]: ... + +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> Tuple[ + NDArray[float64], + NDArray[float64], + NDArray[float64], +]: ... +@overload +def svd( + a: _ArrayLikeFloat_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> Tuple[ + NDArray[floating[Any]], + NDArray[floating[Any]], + NDArray[floating[Any]], +]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> Tuple[ + NDArray[complexfloating[Any, Any]], + NDArray[floating[Any]], + NDArray[complexfloating[Any, Any]], +]: ... +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = ..., + compute_uv: L[False] = ..., + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = ..., + compute_uv: L[False] = ..., + hermitian: bool = ..., +) -> NDArray[floating[Any]]: ... + +# TODO: Returns a scalar for 2D arrays and +# a `(x.ndim - 2)`` dimensionl array otherwise +def cond(x: _ArrayLikeComplex_co, p: None | float | L["fro", "nuc"] = ...) -> Any: ... + +# TODO: Returns `int` for <2D arrays and `intp` otherwise +def matrix_rank( + A: _ArrayLikeComplex_co, + tol: None | _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> Any: ... + +@overload +def pinv( + a: _ArrayLikeInt_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def pinv( + a: _ArrayLikeFloat_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[floating[Any]]: ... +@overload +def pinv( + a: _ArrayLikeComplex_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[complexfloating[Any, Any]]: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def slogdet(a: _ArrayLikeComplex_co) -> _2Tuple[Any]: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def det(a: _ArrayLikeComplex_co) -> Any: ... + +@overload +def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: None | float = ...) -> Tuple[ + NDArray[float64], + NDArray[float64], + int32, + NDArray[float64], +]: ... +@overload +def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: None | float = ...) -> Tuple[ + NDArray[floating[Any]], + NDArray[floating[Any]], + int32, + NDArray[floating[Any]], +]: ... +@overload +def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: None | float = ...) -> Tuple[ + NDArray[complexfloating[Any, Any]], + NDArray[floating[Any]], + int32, + NDArray[floating[Any]], +]: ... + +@overload +def norm( + x: ArrayLike, + ord: None | float | L["fro", "nuc"] = ..., + axis: None = ..., + keepdims: bool = ..., +) -> floating[Any]: ... +@overload +def norm( + x: ArrayLike, + ord: None | float | L["fro", "nuc"] = ..., + axis: SupportsInt | SupportsIndex | Tuple[int, ...] = ..., + keepdims: bool = ..., +) -> Any: ... + +# TODO: Returns a scalar or array +def multi_dot( + arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], + *, + out: None | NDArray[Any] = ..., +) -> Any: ... diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py index e2944f38c..94536bb2c 100644 --- a/numpy/linalg/setup.py +++ b/numpy/linalg/setup.py @@ -3,8 +3,7 @@ import sys def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import ( - get_info, system_info, lapack_opt_info, blas_opt_info) + from numpy.distutils.system_info import get_info, system_info config = Configuration('linalg', parent_package, top_path) config.add_subpackage('tests') diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index dd059fb63..c1ba84a8e 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -1,7 +1,6 @@ """ Test functions for linalg module """ -from numpy.core.fromnumeric import shape import os import sys import itertools @@ -22,7 +21,6 @@ from numpy.testing import ( assert_almost_equal, assert_allclose, suppress_warnings, assert_raises_regex, HAS_LAPACK64, ) -from numpy.testing._private.utils import requires_memory def consistent_subclass(out, in_): @@ -1072,7 +1070,6 @@ class TestMatrixPower: assert_raises(LinAlgError, matrix_power, mat, -1) - class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): def do(self, a, b, tags): @@ -1956,8 +1953,8 @@ class TestMultiDot: assert_almost_equal(multi_dot([A, B]), A.dot(B)) assert_almost_equal(multi_dot([A, B]), np.dot(A, B)) - def test_basic_function_with_dynamic_programing_optimization(self): - # multi_dot with four or more arguments uses the dynamic programing + def test_basic_function_with_dynamic_programming_optimization(self): + # multi_dot with four or more arguments uses the dynamic programming # optimization and therefore deserve a separate A = np.random.random((6, 2)) B = np.random.random((2, 6)) @@ -2018,8 +2015,8 @@ class TestMultiDot: assert_almost_equal(out, A.dot(B)) assert_almost_equal(out, np.dot(A, B)) - def test_dynamic_programing_optimization_and_out(self): - # multi_dot with four or more arguments uses the dynamic programing + def test_dynamic_programming_optimization_and_out(self): + # multi_dot with four or more arguments uses the dynamic programming # optimization and therefore deserve a separate test A = np.random.random((6, 2)) B = np.random.random((2, 6)) diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src index a486e9e5b..ff63ea218 100644 --- a/numpy/linalg/umath_linalg.c.src +++ b/numpy/linalg/umath_linalg.c.src @@ -5,9 +5,10 @@ ** INCLUDES ** ***************************************************************************** */ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define PY_SSIZE_T_CLEAN +#include <Python.h> -#include "Python.h" +#define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" diff --git a/numpy/ma/bench.py b/numpy/ma/bench.py index e29d54365..56865683d 100644 --- a/numpy/ma/bench.py +++ b/numpy/ma/bench.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- import timeit import numpy diff --git a/numpy/ma/core.py b/numpy/ma/core.py index b2ac383a2..036d6312c 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -68,13 +68,13 @@ __all__ = [ 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero', - 'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod', + 'not_equal', 'ones', 'ones_like', 'outer', 'outerproduct', 'power', 'prod', 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_', 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask', 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', - 'var', 'where', 'zeros', + 'var', 'where', 'zeros', 'zeros_like', ] MaskType = np.bool_ @@ -1065,7 +1065,7 @@ class _MaskedBinaryOperation(_MaskedUFunc): tr = self.f.reduce(t, axis) mr = nomask else: - tr = self.f.reduce(t, axis, dtype=dtype or t.dtype) + tr = self.f.reduce(t, axis, dtype=dtype) mr = umath.logical_and.reduce(m, axis) if not tr.shape: @@ -3949,7 +3949,7 @@ class MaskedArray(ndarray): # 2016-11-19: Demoted to legacy format - if np.get_printoptions()['legacy'] == '1.13': + if np.core.arrayprint._get_legacy_print_mode() <= 113: is_long = self.ndim > 1 parameters = dict( name=name, @@ -8159,8 +8159,18 @@ arange = _convert2ma( np_ret='arange : ndarray', np_ma_ret='arange : MaskedArray', ) -clip = np.clip -diff = np.diff +clip = _convert2ma( + 'clip', + params=dict(fill_value=None, hardmask=False), + np_ret='clipped_array : ndarray', + np_ma_ret='clipped_array : MaskedArray', +) +diff = _convert2ma( + 'diff', + params=dict(fill_value=None, hardmask=False), + np_ret='diff : ndarray', + np_ma_ret='diff : MaskedArray', +) empty = _convert2ma( 'empty', params=dict(fill_value=None, hardmask=False), @@ -8188,22 +8198,40 @@ identity = _convert2ma( np_ret='out : ndarray', np_ma_ret='out : MaskedArray', ) -indices = np.indices +indices = _convert2ma( + 'indices', + params=dict(fill_value=None, hardmask=False), + np_ret='grid : one ndarray or tuple of ndarrays', + np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays', +) ones = _convert2ma( 'ones', params=dict(fill_value=None, hardmask=False), np_ret='out : ndarray', np_ma_ret='out : MaskedArray', ) -ones_like = np.ones_like -squeeze = np.squeeze +ones_like = _convert2ma( + 'ones_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +squeeze = _convert2ma( + 'squeeze', + params=dict(fill_value=None, hardmask=False), + np_ret='squeezed : ndarray', + np_ma_ret='squeezed : MaskedArray', +) zeros = _convert2ma( 'zeros', params=dict(fill_value=None, hardmask=False), np_ret='out : ndarray', np_ma_ret='out : MaskedArray', ) -zeros_like = np.zeros_like +zeros_like = _convert2ma( + 'zeros_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) def append(a, b, axis=None): diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 73abfc296..38bf1f0e8 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -750,7 +750,7 @@ def _median(a, axis=None, out=None, overwrite_input=False): s = mid.sum(out=out) if not odd: s = np.true_divide(s, 2., casting='safe', out=out) - s = np.lib.utils._median_nancheck(asorted, s, axis, out) + s = np.lib.utils._median_nancheck(asorted, s, axis) else: s = mid.mean(out=out) @@ -790,7 +790,7 @@ def _median(a, axis=None, out=None, overwrite_input=False): s = np.ma.sum(low_high, axis=axis, out=out) np.true_divide(s.data, 2., casting='unsafe', out=s.data) - s = np.lib.utils._median_nancheck(asorted, s, axis, out) + s = np.lib.utils._median_nancheck(asorted, s, axis) else: s = np.ma.mean(low_high, axis=axis, out=out) diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index 10b1b209c..1e8103bcf 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -493,7 +493,6 @@ def _mrreconstruct(subtype, baseclass, baseshape, basetype,): _mask = ndarray.__new__(ndarray, baseshape, 'b1') return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) - mrecarray = MaskedRecords @@ -667,8 +666,9 @@ def openfile(fname): raise NotImplementedError("Wow, binary file") -def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', - varnames=None, vartypes=None): +def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', + varnames=None, vartypes=None, + *, delimitor=np._NoValue): # backwards compatibility """ Creates a mrecarray from data stored in the file `filename`. @@ -676,7 +676,7 @@ def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', ---------- fname : {file name/handle} Handle of an opened file. - delimitor : {None, string}, optional + delimiter : {None, string}, optional Alphanumeric character used to separate columns in the file. If None, any (group of) white spacestring(s) will be used. commentchar : {'#', string}, optional @@ -692,6 +692,17 @@ def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', Ultra simple: the varnames are in the header, one line""" + if delimitor is not np._NoValue: + if delimiter is not None: + raise TypeError("fromtextfile() got multiple values for argument " + "'delimiter'") + # NumPy 1.22.0, 2021-09-23 + warnings.warn("The 'delimitor' keyword argument of " + "numpy.ma.mrecords.fromtextfile() is deprecated " + "since NumPy 1.22.0, use 'delimiter' instead.", + DeprecationWarning, stacklevel=2) + delimiter = delimitor + # Try to open the file. ftext = openfile(fname) @@ -699,14 +710,14 @@ def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', while True: line = ftext.readline() firstline = line[:line.find(commentchar)].strip() - _varnames = firstline.split(delimitor) + _varnames = firstline.split(delimiter) if len(_varnames) > 1: break if varnames is None: varnames = _varnames # Get the data. - _variables = masked_array([line.strip().split(delimitor) for line in ftext + _variables = masked_array([line.strip().split(delimiter) for line in ftext if line[0] != commentchar and len(line) > 1]) (_, nfields) = _variables.shape ftext.close() diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 92d5afb89..7bd8678cf 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -78,11 +78,13 @@ def fromrecords( def fromtextfile( fname, - delimitor=..., + delimiter=..., commentchar=..., missingchar=..., varnames=..., vartypes=..., + # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 + # delimitor=..., ): ... def addfield(mrecord, newfield, newfieldname=...): ... diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 2fd353d23..bf95c999a 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -43,9 +43,9 @@ from numpy.ma.core import ( masked_less, masked_less_equal, masked_not_equal, masked_outside, masked_print_option, masked_values, masked_where, max, maximum, maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply, - mvoid, nomask, not_equal, ones, outer, power, product, put, putmask, - ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt, - subtract, sum, take, tan, tanh, transpose, where, zeros, + mvoid, nomask, not_equal, ones, ones_like, outer, power, product, put, + putmask, ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, + sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, zeros_like, ) from numpy.compat import pickle @@ -1071,7 +1071,7 @@ class TestMaskedArrayArithmetic: assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]]) def test_mixed_arithmetic(self): - # Tests mixed arithmetics. + # Tests mixed arithmetic. na = np.array([1]) ma = array([1]) assert_(isinstance(na + ma, MaskedArray)) @@ -1084,7 +1084,7 @@ class TestMaskedArrayArithmetic: assert_equal(getmaskarray(2 / a), [1, 0, 1]) def test_masked_singleton_arithmetic(self): - # Tests some scalar arithmetics on MaskedArrays. + # Tests some scalar arithmetic on MaskedArrays. # Masked singleton should remain masked no matter what xm = array(0, mask=1) assert_((1 / array(0)).mask) @@ -1804,7 +1804,7 @@ class TestMaskedArrayArithmetic: assert_equal(test.mask, [[False, True], [False, True]]) - def test_numpyarithmetics(self): + def test_numpyarithmetic(self): # Check that the mask is not back-propagated when using numpy functions a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) control = masked_array([np.nan, np.nan, 0, np.log(2), -1], @@ -2479,8 +2479,8 @@ class TestUfuncs: # also check that allclose uses ma ufuncs, to avoid warning allclose(m, 0.5) -class TestMaskedArrayInPlaceArithmetics: - # Test MaskedArray Arithmetics +class TestMaskedArrayInPlaceArithmetic: + # Test MaskedArray Arithmetic def setup(self): x = arange(10) @@ -3229,6 +3229,50 @@ class TestMaskedArrayMethods: b = a.view(masked_array) assert_(np.may_share_memory(a.mask, b.mask)) + def test_zeros(self): + # Tests zeros/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = zeros(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = zeros_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check zeros_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = zeros_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + + def test_ones(self): + # Tests ones/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = ones(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = ones_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check ones_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = ones_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + @suppress_copy_mask_on_assignment def test_put(self): # Tests put. @@ -3464,7 +3508,7 @@ class TestMaskedArrayMethods: # Test sort on dtype with subarray (gh-8069) # Just check that the sort does not error, structured array subarrays # are treated as byte strings and that leads to differing behavior - # depending on endianess and `endwith`. + # depending on endianness and `endwith`. dt = np.dtype([('v', int, 2)]) a = a.view(dt) test = sort(a) diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py index 14f697375..3e0e09fdd 100644 --- a/numpy/ma/tests/test_deprecations.py +++ b/numpy/ma/tests/test_deprecations.py @@ -1,10 +1,13 @@ """Test deprecation and future warnings. """ +import pytest import numpy as np from numpy.testing import assert_warns from numpy.ma.testutils import assert_equal from numpy.ma.core import MaskedArrayFutureWarning +import io +import textwrap class TestArgsort: """ gh-8701 """ @@ -66,3 +69,21 @@ class TestMinimumMaximum: result = ma_max(data1d) assert_equal(result, ma_max(data1d, axis=None)) assert_equal(result, ma_max(data1d, axis=0)) + + +class TestFromtextfile: + def test_fromtextfile_delimitor(self): + # NumPy 1.22.0, 2021-09-23 + + textfile = io.StringIO(textwrap.dedent( + """ + A,B,C,D + 'string 1';1;1.0;'mixed column' + 'string 2';2;2.0; + 'string 3';3;3.0;123 + 'string 4';4;4.0;3.14 + """ + )) + + with pytest.warns(DeprecationWarning): + result = np.ma.mrecords.fromtextfile(textfile, delimitor=';') diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index 27df519d2..4b2c01df9 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -468,7 +468,7 @@ class TestMRecordsImport: with temppath() as path: with open(path, 'w') as f: f.write(fcontent) - mrectxt = fromtextfile(path, delimitor=',', varnames='ABCDEFG') + mrectxt = fromtextfile(path, delimiter=',', varnames='ABCDEFG') assert_(isinstance(mrectxt, MaskedRecords)) assert_equal(mrectxt.F, [1, 1, 1, 1]) assert_equal(mrectxt.E._mask, [1, 1, 1, 1]) diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi index 26453f000..c1b82d2ec 100644 --- a/numpy/matrixlib/__init__.pyi +++ b/numpy/matrixlib/__init__.pyi @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import List from numpy._pytesttester import PytestTester @@ -6,10 +6,12 @@ from numpy import ( matrix as matrix, ) +from numpy.matrixlib.defmatrix import ( + bmat as bmat, + mat as mat, + asmatrix as asmatrix, +) + __all__: List[str] __path__: List[str] test: PytestTester - -def bmat(obj, ldict=..., gdict=...): ... -def asmatrix(data, dtype=...): ... -mat = asmatrix diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi new file mode 100644 index 000000000..6c86ea1ef --- /dev/null +++ b/numpy/matrixlib/defmatrix.pyi @@ -0,0 +1,15 @@ +from typing import List, Any, Sequence, Mapping +from numpy import matrix as matrix +from numpy.typing import ArrayLike, DTypeLike, NDArray + +__all__: List[str] + +def bmat( + obj: str | Sequence[ArrayLike] | NDArray[Any], + ldict: None | Mapping[str, Any] = ..., + gdict: None | Mapping[str, Any] = ..., +) -> matrix[Any, Any]: ... + +def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[Any, Any]: ... + +mat = asmatrix diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py index 4b4361163..5a3addf4c 100644 --- a/numpy/polynomial/__init__.py +++ b/numpy/polynomial/__init__.py @@ -164,7 +164,7 @@ def set_default_printstyle(style): 1.0 + 2.0 x**1 + 3.0 x**2 >>> print(c) 1.0 + 2.0 T_1(x) + 3.0 T_2(x) - >>> # Formatting supercedes all class/package-level defaults + >>> # Formatting supersedes all class/package-level defaults >>> print(f"{p:unicode}") 1.0 + 2.0·x¹ + 3.0·x² """ diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 5525b232b..155d72805 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -344,7 +344,7 @@ class ABCPolyBase(abc.ABC): # Polynomial coefficient # The coefficient array can be an object array with elements that # will raise a TypeError with >= 0 (e.g. strings or Python - # complex). In this case, represent the coeficient as-is. + # complex). In this case, represent the coefficient as-is. try: if coef >= 0: next_term = f"+ {coef}" @@ -958,12 +958,12 @@ class ABCPolyBase(abc.ABC): of interest, do ``new_series.convert().coef``. [resid, rank, sv, rcond] : list - These values are only returned if `full` = True + These values are only returned if ``full == True`` - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. + - resid -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - sv -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. For more details, see `linalg.lstsq`. diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 210000ec4..2b3268aeb 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -131,9 +131,9 @@ chebtrim = pu.trimcoef # def _cseries_to_zseries(c): - """Covert Chebyshev series to z-series. + """Convert Chebyshev series to z-series. - Covert a Chebyshev series to the equivalent z-series. The result is + Convert a Chebyshev series to the equivalent z-series. The result is never an empty array. The dtype of the return is the same as that of the input. No checks are run on the arguments as this routine is for internal use. @@ -156,9 +156,9 @@ def _cseries_to_zseries(c): def _zseries_to_cseries(zs): - """Covert z-series to a Chebyshev series. + """Convert z-series to a Chebyshev series. - Covert a z series to the equivalent Chebyshev series. The result is + Convert a z series to the equivalent Chebyshev series. The result is never an empty array. The dtype of the return is the same as that of the input. No checks are run on the arguments as this routine is for internal use. @@ -1598,12 +1598,12 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): `k`. [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True + These values are only returned if ``full == True`` - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. @@ -1611,7 +1611,7 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): ----- RankWarning The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The + deficient. The warning is only raised if ``full == False``. The warnings can be turned off by >>> import warnings diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index c1b9f71c0..9b0735a9a 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -419,7 +419,7 @@ def hermmulx(c): .. math:: - xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) + xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) Examples -------- @@ -1324,12 +1324,12 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): `k`. [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True + These values are only returned if ``full == True`` - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. @@ -1337,7 +1337,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): ----- RankWarning The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The + deficient. The warning is only raised if ``full == False``. The warnings can be turned off by >>> import warnings diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index b7095c910..182c562c2 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -414,7 +414,7 @@ def hermemulx(c): .. math:: - xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) + xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) Examples -------- @@ -1315,12 +1315,12 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): `k`. [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True + These values are only returned if ``full == True`` - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. @@ -1328,7 +1328,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): ----- RankWarning The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The + deficient. The warning is only raised if ``full = False``. The warnings can be turned off by >>> import warnings diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index d3b6432dc..d9ca373dd 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -414,7 +414,7 @@ def lagmulx(c): .. math:: - xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) + xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) Examples -------- @@ -1030,7 +1030,7 @@ def lagval3d(x, y, z, c): Returns ------- values : ndarray, compatible object - The values of the multidimension polynomial on points formed with + The values of the multidimensional polynomial on points formed with triples of corresponding values from `x`, `y`, and `z`. See Also @@ -1321,12 +1321,12 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): `k`. [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True + These values are only returned if ``full == True`` - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. @@ -1334,7 +1334,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): ----- RankWarning The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The + deficient. The warning is only raised if ``full == False``. The warnings can be turned off by >>> import warnings diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index d4cf4accf..2e8052e7c 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -425,7 +425,7 @@ def legmulx(c): See Also -------- - legadd, legmul, legmul, legdiv, legpow + legadd, legmul, legdiv, legpow Notes ----- @@ -1339,12 +1339,12 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): returned `coef`. [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True + These values are only returned if ``full == True`` - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. @@ -1352,7 +1352,7 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): ----- RankWarning The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The + deficient. The warning is only raised if ``full == False``. The warnings can be turned off by >>> import warnings diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index d8a032068..2fead88ab 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1268,12 +1268,12 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): fit to the data in `y`'s `k`-th column. [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True + These values are only returned if ``full == True`` - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. @@ -1281,7 +1281,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): ------ RankWarning Raised if the matrix in the least-squares fit is rank deficient. - The warning is only raised if `full` == False. The warnings can + The warning is only raised if ``full == False``. The warnings can be turned off by: >>> import warnings diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index 8e71a1945..6322062f2 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -597,4 +597,4 @@ class TestInterpolate: for deg in range(0, 10): for t in range(0, deg + 1): p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) - assert_almost_equal(p(x), powx(x, t), decimal=12) + assert_almost_equal(p(x), powx(x, t), decimal=11) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 60b6bfc72..5347ea125 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -876,8 +876,10 @@ cdef class Generator: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than high. high - low must be non-negative. The default value - is 1.0. + less than high. The high limit may be included in the returned array of + floats due to floating-point rounding in the equation + ``low + (high-low) * random_sample()``. high - low must be + non-negative. The default value is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -2095,7 +2097,7 @@ cdef class Generator: Raises ------ ValueError - If a < 1. + If a <= 0. Notes ----- @@ -3624,7 +3626,7 @@ cdef class Generator: from numpy.linalg import cholesky l = cholesky(cov) - # make sure check_valid is ignored whe method == 'cholesky' + # make sure check_valid is ignored when method == 'cholesky' # since the decomposition will have failed if cov is not valid. if check_valid != 'ignore' and method != 'cholesky': if check_valid != 'warn' and check_valid != 'raise': @@ -4441,7 +4443,7 @@ cdef class Generator: # Fast, statically typed path: shuffle the underlying buffer. # Only for non-empty, 1d objects of class ndarray (subclasses such # as MaskedArrays may not support this approach). - x_ptr = <char*><size_t>np.PyArray_DATA(x) + x_ptr = np.PyArray_BYTES(x) stride = x.strides[0] itemsize = x.dtype.itemsize # As the array x could contain python objects we use a buffer @@ -4449,7 +4451,7 @@ cdef class Generator: # within the buffer and erroneously decrementing it's refcount # when the function exits. buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit - buf_ptr = <char*><size_t>np.PyArray_DATA(buf) + buf_ptr = np.PyArray_BYTES(buf) if x.dtype.hasobject: with self.lock: _shuffle_raw_wrap(&self._bitgen, n, 1, itemsize, stride, diff --git a/numpy/random/include/aligned_malloc.h b/numpy/random/include/aligned_malloc.h index ea24f6d23..43f68253d 100644 --- a/numpy/random/include/aligned_malloc.h +++ b/numpy/random/include/aligned_malloc.h @@ -1,7 +1,7 @@ #ifndef _RANDOMDGEN__ALIGNED_MALLOC_H_ #define _RANDOMDGEN__ALIGNED_MALLOC_H_ -#include "Python.h" +#include <Python.h> #include "numpy/npy_common.h" #define NPY_MEMALIGN 16 /* 16 for SSE2, 32 for AVX, 64 for Xeon Phi */ diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index c9d8ee8e3..81a526ab4 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -1033,7 +1033,10 @@ cdef class RandomState: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than or equal to high. The default value is 1.0. + less than or equal to high. The high limit may be included in the + returned array of floats due to floating-point rounding in the + equation ``low + (high-low) * random_sample()``. The default value + is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -2524,7 +2527,7 @@ cdef class RandomState: Raises ------ ValueError - If a < 1. + If a <= 0. See Also -------- @@ -4472,7 +4475,7 @@ cdef class RandomState: # Fast, statically typed path: shuffle the underlying buffer. # Only for non-empty, 1d objects of class ndarray (subclasses such # as MaskedArrays may not support this approach). - x_ptr = <char*><size_t>np.PyArray_DATA(x) + x_ptr = np.PyArray_BYTES(x) stride = x.strides[0] itemsize = x.dtype.itemsize # As the array x could contain python objects we use a buffer @@ -4480,7 +4483,7 @@ cdef class RandomState: # within the buffer and erroneously decrementing it's refcount # when the function exits. buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit - buf_ptr = <char*><size_t>np.PyArray_DATA(buf) + buf_ptr = np.PyArray_BYTES(buf) with self.lock: # We trick gcc into providing a specialized implementation for # the most common case, yielding a ~33% performance improvement. diff --git a/numpy/random/setup.py b/numpy/random/setup.py index dce9a101e..866c0cb2f 100644 --- a/numpy/random/setup.py +++ b/numpy/random/setup.py @@ -65,12 +65,26 @@ def configuration(parent_package='', top_path=None): 'src/distributions/random_mvhg_marginals.c', 'src/distributions/random_hypergeometric.c', ] + + def gl_if_msvc(build_cmd): + """ Add flag if we are using MSVC compiler + + We can't see this in our scope, because we have not initialized the + distutils build command, so use this deferred calculation to run when + we are building the library. + """ + # Keep in sync with numpy/core/setup.py + if build_cmd.compiler.compiler_type == 'msvc': + # explicitly disable whole-program optimization + return ['/GL-'] + return [] + config.add_installed_library('npyrandom', sources=npyrandom_sources, install_dir='lib', build_info={ 'include_dirs' : [], # empty list required for creating npyrandom.h - 'extra_compiler_args' : (['/GL-'] if is_msvc else []), + 'extra_compiler_args': [gl_if_msvc], }) for gen in ['mt19937']: diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 9bdfa9bea..adf4db4a7 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -452,7 +452,7 @@ double random_standard_cauchy(bitgen_t *bitgen_state) { } double random_pareto(bitgen_t *bitgen_state, double a) { - return exp(random_standard_exponential(bitgen_state) / a) - 1; + return expm1(random_standard_exponential(bitgen_state) / a); } double random_weibull(bitgen_t *bitgen_state, double a) { @@ -463,7 +463,7 @@ double random_weibull(bitgen_t *bitgen_state, double a) { } double random_power(bitgen_t *bitgen_state, double a) { - return pow(1 - exp(-random_standard_exponential(bitgen_state)), 1. / a); + return pow(-expm1(-random_standard_exponential(bitgen_state)), 1. / a); } double random_laplace(bitgen_t *bitgen_state, double loc, double scale) { @@ -918,7 +918,7 @@ int64_t random_logseries(bitgen_t *bitgen_state, double p) { return 1; } U = next_double(bitgen_state); - q = 1.0 - exp(r * U); + q = -expm1(r * U); if (V <= q * q) { result = (int64_t)floor(1 + log(V) / log(q)); if ((result < 1) || (V == 0.0)) { diff --git a/numpy/random/src/distributions/random_hypergeometric.c b/numpy/random/src/distributions/random_hypergeometric.c index 0da49bd62..d8510bfca 100644 --- a/numpy/random/src/distributions/random_hypergeometric.c +++ b/numpy/random/src/distributions/random_hypergeometric.c @@ -155,7 +155,7 @@ static int64_t hypergeometric_hrua(bitgen_t *bitgen_state, c = sqrt(var + 0.5); /* - * h is 2*s_hat (See Stadlober's theses (1989), Eq. (5.17); or + * h is 2*s_hat (See Stadlober's thesis (1989), Eq. (5.17); or * Stadlober (1990), Eq. 8). s_hat is the scale of the "table mountain" * function that dominates the scaled hypergeometric PMF ("scaled" means * normalized to have a maximum value of 1). diff --git a/numpy/random/src/pcg64/pcg64.c b/numpy/random/src/pcg64/pcg64.c index c623c809b..b9be1e39d 100644 --- a/numpy/random/src/pcg64/pcg64.c +++ b/numpy/random/src/pcg64/pcg64.c @@ -109,8 +109,7 @@ pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta, pcg128_t cur_mult, cur_plus = pcg128_mult(pcg128_add(cur_mult, PCG_128BIT_CONSTANT(0u, 1u)), cur_plus); cur_mult = pcg128_mult(cur_mult, cur_mult); - delta.low >>= 1; - delta.low += delta.high & 1; + delta.low = (delta.low >> 1) | (delta.high << 63); delta.high >>= 1; } return pcg128_add(pcg128_mult(acc_mult, state), acc_plus); diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index 29054b70b..ea1ebacb6 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -358,6 +358,17 @@ class TestPCG64(Base): assert val_neg == val_pos assert val_big == val_pos + def test_advange_large(self): + rs = Generator(self.bit_generator(38219308213743)) + pcg = rs.bit_generator + state = pcg.state["state"] + initial_state = 287608843259529770491897792873167516365 + assert state["state"] == initial_state + pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1))) + state = pcg.state["state"] + advanced_state = 135275564607035429730177404003164635391 + assert state["state"] == advanced_state + class TestPCG64DXSM(Base): @classmethod @@ -386,6 +397,17 @@ class TestPCG64DXSM(Base): assert val_neg == val_pos assert val_big == val_pos + def test_advange_large(self): + rs = Generator(self.bit_generator(38219308213743)) + pcg = rs.bit_generator + state = pcg.state + initial_state = 287608843259529770491897792873167516365 + assert state["state"]["state"] == initial_state + pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1))) + state = pcg.state["state"] + advanced_state = 277778083536782149546677086420637664879 + assert state["state"] == advanced_state + class TestMT19937(Base): @classmethod diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index 88d2792a6..0227d6502 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -1,7 +1,7 @@ from numpy.testing import (assert_, assert_array_equal) import numpy as np import pytest -from numpy.random import Generator, MT19937, RandomState +from numpy.random import Generator, MT19937 mt19937 = Generator(MT19937()) diff --git a/numpy/rec.pyi b/numpy/rec.pyi deleted file mode 100644 index 198636058..000000000 --- a/numpy/rec.pyi +++ /dev/null @@ -1,65 +0,0 @@ -from typing import List - -from numpy import ( - format_parser as format_parser, - record as record, - recarray as recarray, -) - -__all__: List[str] - -def fromarrays( - arrayList, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., -): ... -def fromrecords( - recList, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., -): ... -def fromstring( - datastring, - dtype=..., - shape=..., - offset=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., -): ... -def fromfile( - fd, - dtype=..., - shape=..., - offset=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., -): ... -def array( - obj, - dtype=..., - shape=..., - offset=..., - strides=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., - copy=..., -): ... diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py index bca1d3670..a008f5828 100644 --- a/numpy/testing/__init__.py +++ b/numpy/testing/__init__.py @@ -10,7 +10,7 @@ from unittest import TestCase from ._private.utils import * from ._private.utils import (_assert_valid_refcount, _gen_alignment_data, IS_PYSTON) -from ._private import decorators as dec +from ._private import extbuild, decorators as dec from ._private.nosetester import ( run_module_suite, NoseTester as Tester ) diff --git a/numpy/testing/_private/extbuild.py b/numpy/testing/_private/extbuild.py new file mode 100644 index 000000000..20bf3dcea --- /dev/null +++ b/numpy/testing/_private/extbuild.py @@ -0,0 +1,251 @@ +""" +Build a c-extension module on-the-fly in tests. +See build_and_import_extensions for usage hints + +""" + +import os +import pathlib +import sys +import sysconfig +from numpy.distutils.ccompiler import new_compiler +from distutils.errors import CompileError + +__all__ = ['build_and_import_extension', 'compile_extension_module'] + + +def build_and_import_extension( + modname, functions, *, prologue="", build_dir=None, + include_dirs=[], more_init=""): + """ + Build and imports a c-extension module `modname` from a list of function + fragments `functions`. + + + Parameters + ---------- + functions : list of fragments + Each fragment is a sequence of func_name, calling convention, snippet. + prologue : string + Code to preceed the rest, usually extra ``#include`` or ``#define`` + macros. + build_dir : pathlib.Path + Where to build the module, usually a temporary directory + include_dirs : list + Extra directories to find include files when compiling + more_init : string + Code to appear in the module PyMODINIT_FUNC + + Returns + ------- + out: module + The module will have been loaded and is ready for use + + Examples + -------- + >>> functions = [("test_bytes", "METH_O", \"\"\" + if ( !PyBytesCheck(args)) { + Py_RETURN_FALSE; + } + Py_RETURN_TRUE; + \"\"\")] + >>> mod = build_and_import_extension("testme", functions) + >>> assert not mod.test_bytes(u'abc') + >>> assert mod.test_bytes(b'abc') + """ + + body = prologue + _make_methods(functions, modname) + init = """PyObject *mod = PyModule_Create(&moduledef); + """ + if not build_dir: + build_dir = pathlib.Path('.') + if more_init: + init += """#define INITERROR return NULL + """ + init += more_init + init += "\nreturn mod;" + source_string = _make_source(modname, init, body) + try: + mod_so = compile_extension_module( + modname, build_dir, include_dirs, source_string) + except CompileError as e: + # shorten the exception chain + raise RuntimeError(f"could not compile in {build_dir}:") from e + import importlib.util + spec = importlib.util.spec_from_file_location(modname, mod_so) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + return foo + + +def compile_extension_module( + name, builddir, include_dirs, + source_string, libraries=[], library_dirs=[]): + """ + Build an extension module and return the filename of the resulting + native code file. + + Parameters + ---------- + name : string + name of the module, possibly including dots if it is a module inside a + package. + builddir : pathlib.Path + Where to build the module, usually a temporary directory + include_dirs : list + Extra directories to find include files when compiling + libraries : list + Libraries to link into the extension module + library_dirs: list + Where to find the libraries, ``-L`` passed to the linker + """ + modname = name.split('.')[-1] + dirname = builddir / name + dirname.mkdir(exist_ok=True) + cfile = _convert_str_to_file(source_string, dirname) + include_dirs = [sysconfig.get_config_var('INCLUDEPY')] + include_dirs + + return _c_compile( + cfile, outputfilename=dirname / modname, + include_dirs=include_dirs, libraries=[], library_dirs=[], + ) + + +def _convert_str_to_file(source, dirname): + """Helper function to create a file ``source.c`` in `dirname` that contains + the string in `source`. Returns the file name + """ + filename = dirname / 'source.c' + with filename.open('w') as f: + f.write(str(source)) + return filename + + +def _make_methods(functions, modname): + """ Turns the name, signature, code in functions into complete functions + and lists them in a methods_table. Then turns the methods_table into a + ``PyMethodDef`` structure and returns the resulting code fragment ready + for compilation + """ + methods_table = [] + codes = [] + for funcname, flags, code in functions: + cfuncname = "%s_%s" % (modname, funcname) + if 'METH_KEYWORDS' in flags: + signature = '(PyObject *self, PyObject *args, PyObject *kwargs)' + else: + signature = '(PyObject *self, PyObject *args)' + methods_table.append( + "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags)) + func_code = """ + static PyObject* {cfuncname}{signature} + {{ + {code} + }} + """.format(cfuncname=cfuncname, signature=signature, code=code) + codes.append(func_code) + + body = "\n".join(codes) + """ + static PyMethodDef methods[] = { + %(methods)s + { NULL } + }; + static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "%(modname)s", /* m_name */ + NULL, /* m_doc */ + -1, /* m_size */ + methods, /* m_methods */ + }; + """ % dict(methods='\n'.join(methods_table), modname=modname) + return body + + +def _make_source(name, init, body): + """ Combines the code fragments into source code ready to be compiled + """ + code = """ + #include <Python.h> + + %(body)s + + PyMODINIT_FUNC + PyInit_%(name)s(void) { + %(init)s + } + """ % dict( + name=name, init=init, body=body, + ) + return code + + +def _c_compile(cfile, outputfilename, include_dirs=[], libraries=[], + library_dirs=[]): + if sys.platform == 'win32': + compile_extra = ["/we4013"] + link_extra = ["/LIBPATH:" + os.path.join(sys.base_prefix, 'libs')] + elif sys.platform.startswith('linux'): + compile_extra = [ + "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"] + link_extra = None + else: + compile_extra = link_extra = None + pass + if sys.platform == 'win32': + link_extra = link_extra + ['/DEBUG'] # generate .pdb file + if sys.platform == 'darwin': + # support Fink & Darwinports + for s in ('/sw/', '/opt/local/'): + if (s + 'include' not in include_dirs + and os.path.exists(s + 'include')): + include_dirs.append(s + 'include') + if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'): + library_dirs.append(s + 'lib') + + outputfilename = outputfilename.with_suffix(get_so_suffix()) + saved_environ = os.environ.copy() + try: + build( + cfile, outputfilename, + compile_extra, link_extra, + include_dirs, libraries, library_dirs) + finally: + # workaround for a distutils bugs where some env vars can + # become longer and longer every time it is used + for key, value in saved_environ.items(): + if os.environ.get(key) != value: + os.environ[key] = value + return outputfilename + + +def build(cfile, outputfilename, compile_extra, link_extra, + include_dirs, libraries, library_dirs): + "cd into the directory where the cfile is, use distutils to build" + + compiler = new_compiler(force=1, verbose=2) + compiler.customize('') + objects = [] + + old = os.getcwd() + os.chdir(cfile.parent) + try: + res = compiler.compile( + [str(cfile.name)], + include_dirs=include_dirs, + extra_preargs=compile_extra + ) + objects += [str(cfile.parent / r) for r in res] + finally: + os.chdir(old) + + compiler.link_shared_object( + objects, str(outputfilename), + libraries=libraries, + extra_preargs=link_extra, + library_dirs=library_dirs) + + +def get_so_suffix(): + ret = sysconfig.get_config_var('EXT_SUFFIX') + assert ret + return ret diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 77ca4ef85..3d52f74b2 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2402,9 +2402,9 @@ def break_cycles(): gc.collect() if IS_PYPY: - # interpreter runs now, to call deleted objects' __del__ methods + # a few more, just to make sure all the finalizers are called + gc.collect() gc.collect() - # two more, just to make sure gc.collect() gc.collect() diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index af3730df1..1ea083700 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -1,6 +1,7 @@ import sys import pytest import weakref +from pathlib import Path import numpy as np from numpy.ctypeslib import ndpointer, load_library, as_array @@ -37,13 +38,15 @@ else: reason="Known to fail on cygwin") class TestLoadLibrary: def test_basic(self): - try: - # Should succeed - load_library('_multiarray_umath', np.core._multiarray_umath.__file__) - except ImportError as e: - msg = ("ctypes is not available on this python: skipping the test" - " (import error was: %s)" % str(e)) - print(msg) + loader_path = np.core._multiarray_umath.__file__ + + out1 = load_library('_multiarray_umath', loader_path) + out2 = load_library(Path('_multiarray_umath'), loader_path) + out3 = load_library('_multiarray_umath', Path(loader_path)) + out4 = load_library(b'_multiarray_umath', loader_path) + + assert isinstance(out1, ctypes.CDLL) + assert out1 is out2 is out3 is out4 def test_basic2(self): # Regression for #801: load_library with a full library name diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 5b1578500..1e7d389d9 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -178,7 +178,6 @@ PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ "core.fromnumeric", "core.function_base", "core.getlimits", - "core.machar", "core.memmap", "core.multiarray", "core.numeric", @@ -253,6 +252,7 @@ PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ "f2py.f90mod_rules", "f2py.func2subr", "f2py.rules", + "f2py.symbolic", "f2py.use_rules", "fft.helper", "lib.arraypad", diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index d60ddb5bb..d5cfbf5ac 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -21,6 +21,8 @@ Mypy plugin .. automodule:: numpy.typing.mypy_plugin +.. currentmodule:: numpy.typing + Differences from the runtime NumPy API -------------------------------------- @@ -112,8 +114,9 @@ runtime, they're not necessarily considered as sub-classes. Timedelta64 ~~~~~~~~~~~ -The `~numpy.timedelta64` class is not considered a subclass of `~numpy.signedinteger`, -the former only inheriting from `~numpy.generic` while static type checking. +The `~numpy.timedelta64` class is not considered a subclass of +`~numpy.signedinteger`, the former only inheriting from `~numpy.generic` +while static type checking. 0D arrays ~~~~~~~~~ @@ -129,6 +132,22 @@ If it is known in advance that an operation _will_ perform a 0D-array -> scalar cast, then one can consider manually remedying the situation with either `typing.cast` or a ``# type: ignore`` comment. +Record array dtypes +~~~~~~~~~~~~~~~~~~~ + +The dtype of `numpy.recarray`, and the `numpy.rec` functions in general, +can be specified in one of two ways: + +* Directly via the ``dtype`` argument. +* With up to five helper arguments that operate via `numpy.format_parser`: + ``formats``, ``names``, ``titles``, ``aligned`` and ``byteorder``. + +These two approaches are currently typed as being mutually exclusive, +*i.e.* if ``dtype`` is specified than one may not specify ``formats``. +While this mutual exclusivity is not (strictly) enforced during runtime, +combining both dtype specifiers can lead to unexpected or even downright +buggy behavior. + API --- @@ -136,7 +155,10 @@ API # NOTE: The API section will be appended with additional entries # further down in this file -from typing import TYPE_CHECKING, List, Any, final +from __future__ import annotations + +from numpy import ufunc +from typing import TYPE_CHECKING, final if not TYPE_CHECKING: __all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] @@ -147,14 +169,14 @@ else: # # Declare to mypy that `__all__` is a list of strings without assigning # an explicit value - __all__: List[str] - __path__: List[str] + __all__: list[str] + __path__: list[str] -@final # Dissallow the creation of arbitrary `NBitBase` subclasses +@final # Disallow the creation of arbitrary `NBitBase` subclasses class NBitBase: """ - An object representing `numpy.number` precision during static type checking. + A type representing `numpy.number` precision during static type checking. Used exclusively for the purpose static type checking, `NBitBase` represents the base of a hierarchical set of subclasses. @@ -165,9 +187,9 @@ class NBitBase: Examples -------- - Below is a typical usage example: `NBitBase` is herein used for annotating a - function that takes a float and integer of arbitrary precision as arguments - and returns a new float of whichever precision is largest + Below is a typical usage example: `NBitBase` is herein used for annotating + a function that takes a float and integer of arbitrary precision + as arguments and returns a new float of whichever precision is largest (*e.g.* ``np.float16 + np.int64 -> np.float64``). .. code-block:: python @@ -207,15 +229,32 @@ class NBitBase: # Silence errors about subclassing a `@final`-decorated class -class _256Bit(NBitBase): ... # type: ignore[misc] -class _128Bit(_256Bit): ... # type: ignore[misc] -class _96Bit(_128Bit): ... # type: ignore[misc] -class _80Bit(_96Bit): ... # type: ignore[misc] -class _64Bit(_80Bit): ... # type: ignore[misc] -class _32Bit(_64Bit): ... # type: ignore[misc] -class _16Bit(_32Bit): ... # type: ignore[misc] -class _8Bit(_16Bit): ... # type: ignore[misc] +class _256Bit(NBitBase): # type: ignore[misc] + pass + +class _128Bit(_256Bit): # type: ignore[misc] + pass + +class _96Bit(_128Bit): # type: ignore[misc] + pass + +class _80Bit(_96Bit): # type: ignore[misc] + pass + +class _64Bit(_80Bit): # type: ignore[misc] + pass + +class _32Bit(_64Bit): # type: ignore[misc] + pass + +class _16Bit(_32Bit): # type: ignore[misc] + pass + +class _8Bit(_16Bit): # type: ignore[misc] + pass + +from ._nested_sequence import _NestedSequence from ._nbit import ( _NBitByte, _NBitShort, @@ -302,8 +341,7 @@ from ._dtype_like import ( from ._array_like import ( ArrayLike as ArrayLike, _ArrayLike, - _NestedSequence, - _RecursiveSequence, + _FiniteNestedSequence, _SupportsArray, _ArrayLikeInt, _ArrayLikeBool_co, @@ -334,14 +372,16 @@ if TYPE_CHECKING: _GUFunc_Nin2_Nout1, ) else: - _UFunc_Nin1_Nout1 = Any - _UFunc_Nin2_Nout1 = Any - _UFunc_Nin1_Nout2 = Any - _UFunc_Nin2_Nout2 = Any - _GUFunc_Nin2_Nout1 = Any + # Declare the (type-check-only) ufunc subclasses as ufunc aliases during + # runtime; this helps autocompletion tools such as Jedi (numpy/numpy#19834) + _UFunc_Nin1_Nout1 = ufunc + _UFunc_Nin2_Nout1 = ufunc + _UFunc_Nin1_Nout2 = ufunc + _UFunc_Nin2_Nout2 = ufunc + _GUFunc_Nin2_Nout1 = ufunc # Clean up the namespace -del TYPE_CHECKING, final, List, Any +del TYPE_CHECKING, final, ufunc if __doc__ is not None: from ._add_docstring import _docstrings diff --git a/numpy/typing/_add_docstring.py b/numpy/typing/_add_docstring.py index 846b67042..10d77f516 100644 --- a/numpy/typing/_add_docstring.py +++ b/numpy/typing/_add_docstring.py @@ -50,16 +50,17 @@ def _parse_docstrings() -> str: new_lines.append("") else: new_lines.append(f"{indent}{line}") - s = "\n".join(new_lines) - # Done. - type_list_ret.append(f""".. data:: {name}\n :value: {value}\n {s}""") + s = "\n".join(new_lines) + s_block = f""".. data:: {name}\n :value: {value}\n {s}""" + type_list_ret.append(s_block) return "\n".join(type_list_ret) add_newdoc('ArrayLike', 'typing.Union[...]', """ - A `~typing.Union` representing objects that can be coerced into an `~numpy.ndarray`. + A `~typing.Union` representing objects that can be coerced + into an `~numpy.ndarray`. Among others this includes the likes of: @@ -88,7 +89,8 @@ add_newdoc('ArrayLike', 'typing.Union[...]', add_newdoc('DTypeLike', 'typing.Union[...]', """ - A `~typing.Union` representing objects that can be coerced into a `~numpy.dtype`. + A `~typing.Union` representing objects that can be coerced + into a `~numpy.dtype`. Among others this includes the likes of: @@ -101,7 +103,8 @@ add_newdoc('DTypeLike', 'typing.Union[...]', See Also -------- :ref:`Specifying and constructing data types <arrays.dtypes.constructing>` - A comprehensive overview of all objects that can be coerced into data types. + A comprehensive overview of all objects that can be coerced + into data types. Examples -------- diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py index 6ea0eb662..02e5ee573 100644 --- a/numpy/typing/_array_like.py +++ b/numpy/typing/_array_like.py @@ -18,6 +18,7 @@ from numpy import ( str_, bytes_, ) +from ._nested_sequence import _NestedSequence _T = TypeVar("_T") _ScalarType = TypeVar("_ScalarType", bound=generic) @@ -32,21 +33,23 @@ _DType_co = TypeVar("_DType_co", covariant=True, bound="dtype[Any]") class _SupportsArray(Protocol[_DType_co]): def __array__(self) -> ndarray[Any, _DType_co]: ... -# TODO: Wait for support for recursive types -_NestedSequence = Union[ + +# TODO: Wait until mypy supports recursive objects in combination with typevars +_FiniteNestedSequence = Union[ _T, Sequence[_T], Sequence[Sequence[_T]], Sequence[Sequence[Sequence[_T]]], Sequence[Sequence[Sequence[Sequence[_T]]]], ] -_RecursiveSequence = Sequence[Sequence[Sequence[Sequence[Sequence[Any]]]]] # A union representing array-like objects; consists of two typevars: # One representing types that can be parametrized w.r.t. `np.dtype` # and another one for the rest _ArrayLike = Union[ + _SupportsArray[_DType], _NestedSequence[_SupportsArray[_DType]], + _T, _NestedSequence[_T], ] @@ -57,12 +60,9 @@ _ArrayLike = Union[ # is resolved. See also the mypy issue: # # https://github.com/python/typing/issues/593 -ArrayLike = Union[ - _RecursiveSequence, - _ArrayLike[ - dtype, - Union[bool, int, float, complex, str, bytes] - ], +ArrayLike = _ArrayLike[ + dtype, + Union[bool, int, float, complex, str, bytes], ] # `ArrayLike<X>_co`: array-like objects that can be coerced into `X` @@ -95,10 +95,19 @@ _ArrayLikeTD64_co = _ArrayLike[ "dtype[Union[bool_, integer[Any], timedelta64]]", Union[bool, int], ] -_ArrayLikeDT64_co = _NestedSequence[_SupportsArray["dtype[datetime64]"]] -_ArrayLikeObject_co = _NestedSequence[_SupportsArray["dtype[object_]"]] +_ArrayLikeDT64_co = Union[ + _SupportsArray["dtype[datetime64]"], + _NestedSequence[_SupportsArray["dtype[datetime64]"]], +] +_ArrayLikeObject_co = Union[ + _SupportsArray["dtype[object_]"], + _NestedSequence[_SupportsArray["dtype[object_]"]], +] -_ArrayLikeVoid_co = _NestedSequence[_SupportsArray["dtype[void]"]] +_ArrayLikeVoid_co = Union[ + _SupportsArray["dtype[void]"], + _NestedSequence[_SupportsArray["dtype[void]"]], +] _ArrayLikeStr_co = _ArrayLike[ "dtype[str_]", str, diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.pyi index 44ad5c291..e1149f26a 100644 --- a/numpy/typing/_callable.py +++ b/numpy/typing/_callable.pyi @@ -49,6 +49,8 @@ from ._generic_alias import NDArray _T1 = TypeVar("_T1") _T2 = TypeVar("_T2") +_T1_contra = TypeVar("_T1_contra", contravariant=True) +_T2_contra = TypeVar("_T2_contra", contravariant=True) _2Tuple = Tuple[_T1, _T1] _NBit1 = TypeVar("_NBit1", bound=NBitBase) @@ -318,8 +320,8 @@ class _ComplexOp(Protocol[_NBit1]): class _NumberOp(Protocol): def __call__(self, other: _NumberLike_co, /) -> Any: ... -class _ComparisonOp(Protocol[_T1, _T2]): +class _ComparisonOp(Protocol[_T1_contra, _T2_contra]): @overload - def __call__(self, other: _T1, /) -> bool_: ... + def __call__(self, other: _T1_contra, /) -> bool_: ... @overload - def __call__(self, other: _T2, /) -> NDArray[bool_]: ... + def __call__(self, other: _T2_contra, /) -> NDArray[bool_]: ... diff --git a/numpy/typing/_dtype_like.py b/numpy/typing/_dtype_like.py index 0955f5b18..c9bf1a137 100644 --- a/numpy/typing/_dtype_like.py +++ b/numpy/typing/_dtype_like.py @@ -1,4 +1,14 @@ -from typing import Any, List, Sequence, Tuple, Union, Type, TypeVar, Protocol, TypedDict +from typing import ( + Any, + List, + Sequence, + Tuple, + Union, + Type, + TypeVar, + Protocol, + TypedDict, +) import numpy as np @@ -55,18 +65,23 @@ class _DTypeDictBase(TypedDict): names: Sequence[str] formats: Sequence[_DTypeLikeNested] + # Mandatory + optional keys class _DTypeDict(_DTypeDictBase, total=False): + # Only `str` elements are usable as indexing aliases, + # but `titles` can in principle accept any object offsets: Sequence[int] - titles: Sequence[Any] # Only `str` elements are usable as indexing aliases, but all objects are legal + titles: Sequence[Any] itemsize: int aligned: bool + # A protocol for anything with the dtype attribute class _SupportsDType(Protocol[_DType_co]): @property def dtype(self) -> _DType_co: ... + # Would create a dtype[np.void] _VoidDTypeLike = Union[ # (flexible_dtype, itemsize) @@ -93,7 +108,7 @@ DTypeLike = Union[ # default data type (float64) None, # array-scalar types and generic types - Type[Any], # TODO: enumerate these when we add type hints for numpy scalars + Type[Any], # NOTE: We're stuck with `Type[Any]` due to object dtypes # anything with a dtype attribute _SupportsDType[DType[Any]], # character codes, type strings or comma-separated fields, e.g., 'float64' diff --git a/numpy/typing/_extended_precision.py b/numpy/typing/_extended_precision.py index 0900bc659..edc1778ce 100644 --- a/numpy/typing/_extended_precision.py +++ b/numpy/typing/_extended_precision.py @@ -1,4 +1,5 @@ -"""A module with platform-specific extended precision `numpy.number` subclasses. +"""A module with platform-specific extended precision +`numpy.number` subclasses. The subclasses are defined here (instead of ``__init__.pyi``) such that they can be imported conditionally via the numpy's mypy plugin. diff --git a/numpy/typing/_generic_alias.py b/numpy/typing/_generic_alias.py index 8d65ef855..932f12dd0 100644 --- a/numpy/typing/_generic_alias.py +++ b/numpy/typing/_generic_alias.py @@ -51,7 +51,7 @@ def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]: def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T: - """Recursivelly replace all typevars with those from `parameters`. + """Recursively replace all typevars with those from `parameters`. Helper function for `_GenericAlias.__getitem__`. @@ -205,12 +205,9 @@ else: ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True) -if TYPE_CHECKING: +if TYPE_CHECKING or sys.version_info >= (3, 9): _DType = np.dtype[ScalarType] NDArray = np.ndarray[Any, np.dtype[ScalarType]] -elif sys.version_info >= (3, 9): - _DType = types.GenericAlias(np.dtype, (ScalarType,)) - NDArray = types.GenericAlias(np.ndarray, (Any, _DType)) else: _DType = _GenericAlias(np.dtype, (ScalarType,)) NDArray = _GenericAlias(np.ndarray, (Any, _DType)) diff --git a/numpy/typing/_nested_sequence.py b/numpy/typing/_nested_sequence.py new file mode 100644 index 000000000..a853303ca --- /dev/null +++ b/numpy/typing/_nested_sequence.py @@ -0,0 +1,90 @@ +"""A module containing the `_NestedSequence` protocol.""" + +from __future__ import annotations + +from typing import ( + Any, + Iterator, + overload, + TypeVar, + Protocol, +) + +__all__ = ["_NestedSequence"] + +_T_co = TypeVar("_T_co", covariant=True) + + +class _NestedSequence(Protocol[_T_co]): + """A protocol for representing nested sequences. + + Warning + ------- + `_NestedSequence` currently does not work in combination with typevars, + *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``. + + See Also + -------- + `collections.abc.Sequence` + ABCs for read-only and mutable :term:`sequences`. + + Examples + -------- + .. code-block:: python + + >>> from __future__ import annotations + + >>> from typing import TYPE_CHECKING + >>> import numpy as np + >>> from numpy.typing import _NestedSequnce + + >>> def get_dtype(seq: _NestedSequnce[float]) -> np.dtype[np.float64]: + ... return np.asarray(seq).dtype + + >>> a = get_dtype([1.0]) + >>> b = get_dtype([[1.0]]) + >>> c = get_dtype([[[1.0]]]) + >>> d = get_dtype([[[[1.0]]]]) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.dtype[numpy.floating[numpy.typing._64Bit]] + ... # note: b: numpy.dtype[numpy.floating[numpy.typing._64Bit]] + ... # note: c: numpy.dtype[numpy.floating[numpy.typing._64Bit]] + ... # note: d: numpy.dtype[numpy.floating[numpy.typing._64Bit]] + + """ + + def __len__(self, /) -> int: + """Implement ``len(self)``.""" + raise NotImplementedError + + @overload + def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: ... + @overload + def __getitem__(self, index: slice, /) -> _NestedSequence[_T_co]: ... + + def __getitem__(self, index, /): + """Implement ``self[x]``.""" + raise NotImplementedError + + def __contains__(self, x: object, /) -> bool: + """Implement ``x in self``.""" + raise NotImplementedError + + def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + """Implement ``iter(self)``.""" + raise NotImplementedError + + def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + """Implement ``reversed(self)``.""" + raise NotImplementedError + + def count(self, value: Any, /) -> int: + """Return the number of occurrences of `value`.""" + raise NotImplementedError + + def index(self, value: Any, /) -> int: + """Return the first index of `value`.""" + raise NotImplementedError diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 091980d65..5ac75f94d 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -33,7 +33,8 @@ To enable the plugin, one must add it to their mypy `configuration file`_: from __future__ import annotations -import typing as t +from collections.abc import Iterable +from typing import Final, TYPE_CHECKING, Callable import numpy as np @@ -44,15 +45,15 @@ try: from mypy.nodes import MypyFile, ImportFrom, Statement from mypy.build import PRI_MED - _HookFunc = t.Callable[[AnalyzeTypeContext], Type] + _HookFunc = Callable[[AnalyzeTypeContext], Type] MYPY_EX: None | ModuleNotFoundError = None except ModuleNotFoundError as ex: MYPY_EX = ex -__all__: t.List[str] = [] +__all__: list[str] = [] -def _get_precision_dict() -> t.Dict[str, str]: +def _get_precision_dict() -> dict[str, str]: names = [ ("_NBitByte", np.byte), ("_NBitShort", np.short), @@ -73,7 +74,7 @@ def _get_precision_dict() -> t.Dict[str, str]: return ret -def _get_extended_precision_list() -> t.List[str]: +def _get_extended_precision_list() -> list[str]: extended_types = [np.ulonglong, np.longlong, np.longdouble, np.clongdouble] extended_names = { "uint128", @@ -107,13 +108,13 @@ def _get_c_intp_name() -> str: #: A dictionary mapping type-aliases in `numpy.typing._nbit` to #: concrete `numpy.typing.NBitBase` subclasses. -_PRECISION_DICT: t.Final = _get_precision_dict() +_PRECISION_DICT: Final = _get_precision_dict() #: A list with the names of all extended precision `np.number` subclasses. -_EXTENDED_PRECISION_LIST: t.Final = _get_extended_precision_list() +_EXTENDED_PRECISION_LIST: Final = _get_extended_precision_list() #: The name of the ctypes quivalent of `np.intp` -_C_INTP: t.Final = _get_c_intp_name() +_C_INTP: Final = _get_c_intp_name() def _hook(ctx: AnalyzeTypeContext) -> Type: @@ -124,20 +125,19 @@ def _hook(ctx: AnalyzeTypeContext) -> Type: return api.named_type(name_new) -if t.TYPE_CHECKING or MYPY_EX is None: - def _index(iterable: t.Iterable[Statement], id: str) -> int: +if TYPE_CHECKING or MYPY_EX is None: + def _index(iterable: Iterable[Statement], id: str) -> int: """Identify the first ``ImportFrom`` instance the specified `id`.""" for i, value in enumerate(iterable): if getattr(value, "id", None) == id: return i - else: - raise ValueError("Failed to identify a `ImportFrom` instance " - f"with the following id: {id!r}") + raise ValueError("Failed to identify a `ImportFrom` instance " + f"with the following id: {id!r}") def _override_imports( file: MypyFile, module: str, - imports: t.List[t.Tuple[str, t.Optional[str]]], + imports: list[tuple[str, None | str]], ) -> None: """Override the first `module`-based import with new `imports`.""" # Construct a new `from module import y` statement @@ -145,7 +145,7 @@ if t.TYPE_CHECKING or MYPY_EX is None: import_obj.is_top_level = True # Replace the first `module`-based import statement with `import_obj` - for lst in [file.defs, file.imports]: # type: t.List[Statement] + for lst in [file.defs, file.imports]: # type: list[Statement] i = _index(lst, module) lst[i] = import_obj @@ -153,7 +153,8 @@ if t.TYPE_CHECKING or MYPY_EX is None: """A mypy plugin for handling versus numpy-specific typing tasks.""" def get_type_analyze_hook(self, fullname: str) -> None | _HookFunc: - """Set the precision of platform-specific `numpy.number` subclasses. + """Set the precision of platform-specific `numpy.number` + subclasses. For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`. """ @@ -161,7 +162,9 @@ if t.TYPE_CHECKING or MYPY_EX is None: return _hook return None - def get_additional_deps(self, file: MypyFile) -> t.List[t.Tuple[int, str, int]]: + def get_additional_deps( + self, file: MypyFile + ) -> list[tuple[int, str, int]]: """Handle all import-based overrides. * Import platform-specific extended-precision `numpy.number` @@ -184,11 +187,11 @@ if t.TYPE_CHECKING or MYPY_EX is None: ) return ret - def plugin(version: str) -> t.Type[_NumpyPlugin]: + def plugin(version: str) -> type[_NumpyPlugin]: """An entry-point for mypy.""" return _NumpyPlugin else: - def plugin(version: str) -> t.Type[_NumpyPlugin]: + def plugin(version: str) -> type[_NumpyPlugin]: """An entry-point for mypy.""" raise MYPY_EX diff --git a/numpy/typing/tests/data/fail/arithmetic.py b/numpy/typing/tests/data/fail/arithmetic.pyi index 02bbffa53..b99b24c1f 100644 --- a/numpy/typing/tests/data/fail/arithmetic.py +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -28,6 +28,7 @@ AR_LIKE_M: List[np.datetime64] # NOTE: mypys `NoReturn` errors are, unfortunately, not that great _1 = AR_b - AR_LIKE_b # E: Need type annotation _2 = AR_LIKE_b - AR_b # E: Need type annotation +AR_i - bytes() # E: No overload variant AR_f - AR_LIKE_m # E: Unsupported operand types AR_f - AR_LIKE_M # E: Unsupported operand types @@ -91,11 +92,11 @@ AR_f *= AR_LIKE_m # E: incompatible type # Array power -AR_b **= AR_LIKE_b # E: incompatible type -AR_b **= AR_LIKE_u # E: incompatible type -AR_b **= AR_LIKE_i # E: incompatible type -AR_b **= AR_LIKE_f # E: incompatible type -AR_b **= AR_LIKE_c # E: incompatible type +AR_b **= AR_LIKE_b # E: Invalid self argument +AR_b **= AR_LIKE_u # E: Invalid self argument +AR_b **= AR_LIKE_i # E: Invalid self argument +AR_b **= AR_LIKE_f # E: Invalid self argument +AR_b **= AR_LIKE_c # E: Invalid self argument AR_u **= AR_LIKE_i # E: incompatible type AR_u **= AR_LIKE_f # E: incompatible type diff --git a/numpy/typing/tests/data/fail/array_constructors.py b/numpy/typing/tests/data/fail/array_constructors.pyi index 0e2250513..4f0a60b5b 100644 --- a/numpy/typing/tests/data/fail/array_constructors.py +++ b/numpy/typing/tests/data/fail/array_constructors.pyi @@ -10,7 +10,7 @@ np.zeros("test") # E: incompatible type np.zeros() # E: require at least one argument np.ones("test") # E: incompatible type -np.ones() # E: Missing positional argument +np.ones() # E: require at least one argument np.array(0, float, True) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/array_like.py b/numpy/typing/tests/data/fail/array_like.pyi index 3bbd29061..3bbd29061 100644 --- a/numpy/typing/tests/data/fail/array_like.py +++ b/numpy/typing/tests/data/fail/array_like.pyi diff --git a/numpy/typing/tests/data/fail/array_pad.py b/numpy/typing/tests/data/fail/array_pad.pyi index 2be51a871..2be51a871 100644 --- a/numpy/typing/tests/data/fail/array_pad.py +++ b/numpy/typing/tests/data/fail/array_pad.pyi diff --git a/numpy/typing/tests/data/fail/arrayprint.py b/numpy/typing/tests/data/fail/arrayprint.pyi index 86297a0b2..86297a0b2 100644 --- a/numpy/typing/tests/data/fail/arrayprint.py +++ b/numpy/typing/tests/data/fail/arrayprint.pyi diff --git a/numpy/typing/tests/data/fail/arrayterator.py b/numpy/typing/tests/data/fail/arrayterator.pyi index c50fb2ec4..c50fb2ec4 100644 --- a/numpy/typing/tests/data/fail/arrayterator.py +++ b/numpy/typing/tests/data/fail/arrayterator.pyi diff --git a/numpy/typing/tests/data/fail/bitwise_ops.py b/numpy/typing/tests/data/fail/bitwise_ops.pyi index 8a8f89755..ee9090007 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.py +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -16,5 +16,5 @@ u8 & f8 # E: No overload variant ~f8 # E: Unsupported operand type # mypys' error message for `NoReturn` is unfortunately pretty bad -# TODO: Reenable this once we add support for numerical precision for `number`s +# TODO: Re-enable this once we add support for numerical precision for `number`s # a = u8 | 0 # E: Need type annotation diff --git a/numpy/typing/tests/data/fail/char.pyi b/numpy/typing/tests/data/fail/char.pyi new file mode 100644 index 000000000..320f05df5 --- /dev/null +++ b/numpy/typing/tests/data/fail/char.pyi @@ -0,0 +1,66 @@ +import numpy as np +import numpy.typing as npt + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] + +np.char.equal(AR_U, AR_S) # E: incompatible type + +np.char.not_equal(AR_U, AR_S) # E: incompatible type + +np.char.greater_equal(AR_U, AR_S) # E: incompatible type + +np.char.less_equal(AR_U, AR_S) # E: incompatible type + +np.char.greater(AR_U, AR_S) # E: incompatible type + +np.char.less(AR_U, AR_S) # E: incompatible type + +np.char.encode(AR_S) # E: incompatible type +np.char.decode(AR_U) # E: incompatible type + +np.char.join(AR_U, b"_") # E: incompatible type +np.char.join(AR_S, "_") # E: incompatible type + +np.char.ljust(AR_U, 5, fillchar=b"a") # E: incompatible type +np.char.ljust(AR_S, 5, fillchar="a") # E: incompatible type +np.char.rjust(AR_U, 5, fillchar=b"a") # E: incompatible type +np.char.rjust(AR_S, 5, fillchar="a") # E: incompatible type + +np.char.lstrip(AR_U, chars=b"a") # E: incompatible type +np.char.lstrip(AR_S, chars="a") # E: incompatible type +np.char.strip(AR_U, chars=b"a") # E: incompatible type +np.char.strip(AR_S, chars="a") # E: incompatible type +np.char.rstrip(AR_U, chars=b"a") # E: incompatible type +np.char.rstrip(AR_S, chars="a") # E: incompatible type + +np.char.partition(AR_U, b"a") # E: incompatible type +np.char.partition(AR_S, "a") # E: incompatible type +np.char.rpartition(AR_U, b"a") # E: incompatible type +np.char.rpartition(AR_S, "a") # E: incompatible type + +np.char.replace(AR_U, b"_", b"-") # E: incompatible type +np.char.replace(AR_S, "_", "-") # E: incompatible type + +np.char.split(AR_U, b"_") # E: incompatible type +np.char.split(AR_S, "_") # E: incompatible type +np.char.rsplit(AR_U, b"_") # E: incompatible type +np.char.rsplit(AR_S, "_") # E: incompatible type + +np.char.count(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type +np.char.count(AR_S, "a", end=9) # E: incompatible type + +np.char.endswith(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type +np.char.endswith(AR_S, "a", end=9) # E: incompatible type +np.char.startswith(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type +np.char.startswith(AR_S, "a", end=9) # E: incompatible type + +np.char.find(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type +np.char.find(AR_S, "a", end=9) # E: incompatible type +np.char.rfind(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type +np.char.rfind(AR_S, "a", end=9) # E: incompatible type + +np.char.index(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type +np.char.index(AR_S, "a", end=9) # E: incompatible type +np.char.rindex(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type +np.char.rindex(AR_S, "a", end=9) # E: incompatible type diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi new file mode 100644 index 000000000..ebc182ec2 --- /dev/null +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -0,0 +1,62 @@ +import numpy as np +from typing import Any + +AR_U: np.chararray[Any, np.dtype[np.str_]] +AR_S: np.chararray[Any, np.dtype[np.bytes_]] + +AR_S.encode() # E: Invalid self argument +AR_U.decode() # E: Invalid self argument + +AR_U.join(b"_") # E: incompatible type +AR_S.join("_") # E: incompatible type + +AR_U.ljust(5, fillchar=b"a") # E: incompatible type +AR_S.ljust(5, fillchar="a") # E: incompatible type +AR_U.rjust(5, fillchar=b"a") # E: incompatible type +AR_S.rjust(5, fillchar="a") # E: incompatible type + +AR_U.lstrip(chars=b"a") # E: incompatible type +AR_S.lstrip(chars="a") # E: incompatible type +AR_U.strip(chars=b"a") # E: incompatible type +AR_S.strip(chars="a") # E: incompatible type +AR_U.rstrip(chars=b"a") # E: incompatible type +AR_S.rstrip(chars="a") # E: incompatible type + +AR_U.partition(b"a") # E: incompatible type +AR_S.partition("a") # E: incompatible type +AR_U.rpartition(b"a") # E: incompatible type +AR_S.rpartition("a") # E: incompatible type + +AR_U.replace(b"_", b"-") # E: incompatible type +AR_S.replace("_", "-") # E: incompatible type + +AR_U.split(b"_") # E: incompatible type +AR_S.split("_") # E: incompatible type +AR_S.split(1) # E: incompatible type +AR_U.rsplit(b"_") # E: incompatible type +AR_S.rsplit("_") # E: incompatible type + +AR_U.count(b"a", start=[1, 2, 3]) # E: incompatible type +AR_S.count("a", end=9) # E: incompatible type + +AR_U.endswith(b"a", start=[1, 2, 3]) # E: incompatible type +AR_S.endswith("a", end=9) # E: incompatible type +AR_U.startswith(b"a", start=[1, 2, 3]) # E: incompatible type +AR_S.startswith("a", end=9) # E: incompatible type + +AR_U.find(b"a", start=[1, 2, 3]) # E: incompatible type +AR_S.find("a", end=9) # E: incompatible type +AR_U.rfind(b"a", start=[1, 2, 3]) # E: incompatible type +AR_S.rfind("a", end=9) # E: incompatible type + +AR_U.index(b"a", start=[1, 2, 3]) # E: incompatible type +AR_S.index("a", end=9) # E: incompatible type +AR_U.rindex(b"a", start=[1, 2, 3]) # E: incompatible type +AR_S.rindex("a", end=9) # E: incompatible type + +AR_U == AR_S # E: Unsupported operand types +AR_U != AR_S # E: Unsupported operand types +AR_U >= AR_S # E: Unsupported operand types +AR_U <= AR_S # E: Unsupported operand types +AR_U > AR_S # E: Unsupported operand types +AR_U < AR_S # E: Unsupported operand types diff --git a/numpy/typing/tests/data/fail/comparisons.py b/numpy/typing/tests/data/fail/comparisons.pyi index cad1c6555..febd0a18c 100644 --- a/numpy/typing/tests/data/fail/comparisons.py +++ b/numpy/typing/tests/data/fail/comparisons.pyi @@ -21,8 +21,7 @@ AR_M > AR_i # E: Unsupported operand types AR_M > AR_f # E: Unsupported operand types AR_M > AR_m # E: Unsupported operand types -# Unfortunately `NoReturn` errors are not the most descriptive -_1 = AR_i > str() # E: Need type annotation -_2 = AR_i > bytes() # E: Need type annotation -_3 = str() > AR_M # E: Need type annotation -_4 = bytes() > AR_M # E: Need type annotation +AR_i > str() # E: No overload variant +AR_i > bytes() # E: No overload variant +str() > AR_M # E: Unsupported operand types +bytes() > AR_M # E: Unsupported operand types diff --git a/numpy/typing/tests/data/fail/constants.py b/numpy/typing/tests/data/fail/constants.pyi index 324cbe9fa..324cbe9fa 100644 --- a/numpy/typing/tests/data/fail/constants.py +++ b/numpy/typing/tests/data/fail/constants.pyi diff --git a/numpy/typing/tests/data/fail/datasource.py b/numpy/typing/tests/data/fail/datasource.pyi index 345277d45..345277d45 100644 --- a/numpy/typing/tests/data/fail/datasource.py +++ b/numpy/typing/tests/data/fail/datasource.pyi diff --git a/numpy/typing/tests/data/fail/dtype.py b/numpy/typing/tests/data/fail/dtype.pyi index 0f3810f3c..0f3810f3c 100644 --- a/numpy/typing/tests/data/fail/dtype.py +++ b/numpy/typing/tests/data/fail/dtype.pyi diff --git a/numpy/typing/tests/data/fail/einsumfunc.py b/numpy/typing/tests/data/fail/einsumfunc.pyi index 33722f861..33722f861 100644 --- a/numpy/typing/tests/data/fail/einsumfunc.py +++ b/numpy/typing/tests/data/fail/einsumfunc.pyi diff --git a/numpy/typing/tests/data/fail/flatiter.py b/numpy/typing/tests/data/fail/flatiter.pyi index 544ffbe4a..544ffbe4a 100644 --- a/numpy/typing/tests/data/fail/flatiter.py +++ b/numpy/typing/tests/data/fail/flatiter.pyi diff --git a/numpy/typing/tests/data/fail/fromnumeric.py b/numpy/typing/tests/data/fail/fromnumeric.pyi index 8fafed1b7..8fafed1b7 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.py +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi diff --git a/numpy/typing/tests/data/fail/histograms.pyi b/numpy/typing/tests/data/fail/histograms.pyi new file mode 100644 index 000000000..ad151488d --- /dev/null +++ b/numpy/typing/tests/data/fail/histograms.pyi @@ -0,0 +1,13 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] + +np.histogram_bin_edges(AR_i8, range=(0, 1, 2)) # E: incompatible type + +np.histogram(AR_i8, range=(0, 1, 2)) # E: incompatible type +np.histogram(AR_i8, normed=True) # E: incompatible type + +np.histogramdd(AR_i8, range=(0, 1)) # E: incompatible type +np.histogramdd(AR_i8, range=[(0, 1, 2)]) # E: incompatible type diff --git a/numpy/typing/tests/data/fail/index_tricks.py b/numpy/typing/tests/data/fail/index_tricks.pyi index c508bf3ae..c508bf3ae 100644 --- a/numpy/typing/tests/data/fail/index_tricks.py +++ b/numpy/typing/tests/data/fail/index_tricks.pyi diff --git a/numpy/typing/tests/data/fail/lib_function_base.pyi b/numpy/typing/tests/data/fail/lib_function_base.pyi new file mode 100644 index 000000000..9cad2da03 --- /dev/null +++ b/numpy/typing/tests/data/fail/lib_function_base.pyi @@ -0,0 +1,53 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] + +def func(a: int) -> None: ... + +np.average(AR_m) # E: incompatible type +np.select(1, [AR_f8]) # E: incompatible type +np.angle(AR_m) # E: incompatible type +np.unwrap(AR_m) # E: incompatible type +np.unwrap(AR_c16) # E: incompatible type +np.trim_zeros(1) # E: incompatible type +np.place(1, [True], 1.5) # E: incompatible type +np.vectorize(1) # E: incompatible type +np.add_newdoc("__main__", 1.5, "docstring") # E: incompatible type +np.place(AR_f8, slice(None), 5) # E: incompatible type + +np.interp(AR_f8, AR_c16, AR_f8) # E: incompatible type +np.interp(AR_c16, AR_f8, AR_f8) # E: incompatible type +np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # E: No overload variant +np.interp(AR_f8, AR_f8, AR_O) # E: incompatible type + +np.cov(AR_m) # E: incompatible type +np.cov(AR_O) # E: incompatible type +np.corrcoef(AR_m) # E: incompatible type +np.corrcoef(AR_O) # E: incompatible type +np.corrcoef(AR_f8, bias=True) # E: No overload variant +np.corrcoef(AR_f8, ddof=2) # E: No overload variant +np.blackman(1j) # E: incompatible type +np.bartlett(1j) # E: incompatible type +np.hanning(1j) # E: incompatible type +np.hamming(1j) # E: incompatible type +np.hamming(AR_c16) # E: incompatible type +np.kaiser(1j, 1) # E: incompatible type +np.sinc(AR_O) # E: incompatible type +np.median(AR_M) # E: incompatible type + +np.add_newdoc_ufunc(func, "docstring") # E: incompatible type +np.percentile(AR_f8, 50j) # E: No overload variant +np.percentile(AR_f8, 50, interpolation="bob") # E: No overload variant +np.quantile(AR_f8, 0.5j) # E: No overload variant +np.quantile(AR_f8, 0.5, interpolation="bob") # E: No overload variant +np.meshgrid(AR_f8, AR_f8, indexing="bob") # E: incompatible type +np.delete(AR_f8, AR_f8) # E: incompatible type +np.insert(AR_f8, AR_f8, 1.5) # E: incompatible type +np.digitize(AR_f8, 1j) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/lib_polynomial.pyi b/numpy/typing/tests/data/fail/lib_polynomial.pyi new file mode 100644 index 000000000..ca02d7bde --- /dev/null +++ b/numpy/typing/tests/data/fail/lib_polynomial.pyi @@ -0,0 +1,29 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] +AR_U: npt.NDArray[np.str_] + +poly_obj: np.poly1d + +np.polyint(AR_U) # E: incompatible type +np.polyint(AR_f8, m=1j) # E: No overload variant + +np.polyder(AR_U) # E: incompatible type +np.polyder(AR_f8, m=1j) # E: No overload variant + +np.polyfit(AR_O, AR_f8, 1) # E: incompatible type +np.polyfit(AR_f8, AR_f8, 1, rcond=1j) # E: No overload variant +np.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # E: incompatible type +np.polyfit(AR_f8, AR_f8, 1, cov="bob") # E: No overload variant + +np.polyval(AR_f8, AR_U) # E: incompatible type +np.polyadd(AR_f8, AR_U) # E: incompatible type +np.polysub(AR_f8, AR_U) # E: incompatible type +np.polymul(AR_f8, AR_U) # E: incompatible type +np.polydiv(AR_f8, AR_U) # E: incompatible type + +5**poly_obj # E: No overload variant +hash(poly_obj) diff --git a/numpy/typing/tests/data/fail/lib_utils.py b/numpy/typing/tests/data/fail/lib_utils.pyi index e16c926aa..e16c926aa 100644 --- a/numpy/typing/tests/data/fail/lib_utils.py +++ b/numpy/typing/tests/data/fail/lib_utils.pyi diff --git a/numpy/typing/tests/data/fail/lib_version.py b/numpy/typing/tests/data/fail/lib_version.pyi index 2758cfe40..2758cfe40 100644 --- a/numpy/typing/tests/data/fail/lib_version.py +++ b/numpy/typing/tests/data/fail/lib_version.pyi diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi new file mode 100644 index 000000000..da9390328 --- /dev/null +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -0,0 +1,48 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_O: npt.NDArray[np.object_] +AR_M: npt.NDArray[np.datetime64] + +np.linalg.tensorsolve(AR_O, AR_O) # E: incompatible type + +np.linalg.solve(AR_O, AR_O) # E: incompatible type + +np.linalg.tensorinv(AR_O) # E: incompatible type + +np.linalg.inv(AR_O) # E: incompatible type + +np.linalg.matrix_power(AR_M, 5) # E: incompatible type + +np.linalg.cholesky(AR_O) # E: incompatible type + +np.linalg.qr(AR_O) # E: incompatible type +np.linalg.qr(AR_f8, mode="bob") # E: No overload variant + +np.linalg.eigvals(AR_O) # E: incompatible type + +np.linalg.eigvalsh(AR_O) # E: incompatible type +np.linalg.eigvalsh(AR_O, UPLO="bob") # E: No overload variant + +np.linalg.eig(AR_O) # E: incompatible type + +np.linalg.eigh(AR_O) # E: incompatible type +np.linalg.eigh(AR_O, UPLO="bob") # E: No overload variant + +np.linalg.svd(AR_O) # E: incompatible type + +np.linalg.cond(AR_O) # E: incompatible type +np.linalg.cond(AR_f8, p="bob") # E: incompatible type + +np.linalg.matrix_rank(AR_O) # E: incompatible type + +np.linalg.pinv(AR_O) # E: incompatible type + +np.linalg.slogdet(AR_O) # E: incompatible type + +np.linalg.det(AR_O) # E: incompatible type + +np.linalg.norm(AR_f8, ord="bob") # E: No overload variant + +np.linalg.multi_dot([AR_M]) # E: incompatible type diff --git a/numpy/typing/tests/data/fail/memmap.pyi b/numpy/typing/tests/data/fail/memmap.pyi new file mode 100644 index 000000000..434870b60 --- /dev/null +++ b/numpy/typing/tests/data/fail/memmap.pyi @@ -0,0 +1,5 @@ +import numpy as np + +with open("file.txt", "r") as f: + np.memmap(f) # E: No overload variant +np.memmap("test.txt", shape=[10, 5]) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/modules.py b/numpy/typing/tests/data/fail/modules.pyi index 59e724f22..59e724f22 100644 --- a/numpy/typing/tests/data/fail/modules.py +++ b/numpy/typing/tests/data/fail/modules.pyi diff --git a/numpy/typing/tests/data/fail/multiarray.py b/numpy/typing/tests/data/fail/multiarray.pyi index 50361ec43..22bcf8c92 100644 --- a/numpy/typing/tests/data/fail/multiarray.py +++ b/numpy/typing/tests/data/fail/multiarray.pyi @@ -42,8 +42,15 @@ np.datetime_data(int) # E: incompatible type np.busday_offset("2012", 10) # E: incompatible type -np.datetime_as_string("2012") # E: incompatible type +np.datetime_as_string("2012") # E: No overload variant np.compare_chararrays("a", b"a", "==", False) # E: No overload variant np.add_docstring(func, None) # E: incompatible type + +np.nested_iters([AR_i8, AR_i8]) # E: Missing positional argument +np.nested_iters([AR_i8, AR_i8], 0) # E: incompatible type +np.nested_iters([AR_i8, AR_i8], [0]) # E: incompatible type +np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["test"]) # E: incompatible type +np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["test"]]) # E: incompatible type +np.nested_iters([AR_i8, AR_i8], [[0], [1]], buffersize=1.0) # E: incompatible type diff --git a/numpy/typing/tests/data/fail/ndarray.py b/numpy/typing/tests/data/fail/ndarray.pyi index 5a5130d40..5a5130d40 100644 --- a/numpy/typing/tests/data/fail/ndarray.py +++ b/numpy/typing/tests/data/fail/ndarray.pyi diff --git a/numpy/typing/tests/data/fail/ndarray_misc.py b/numpy/typing/tests/data/fail/ndarray_misc.pyi index cf3fedc45..8320a44f3 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.py +++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -35,3 +35,7 @@ AR_M.__int__() # E: Invalid self argument AR_M.__float__() # E: Invalid self argument AR_M.__complex__() # E: Invalid self argument AR_b.__index__() # E: Invalid self argument + +AR_f8[1.5] # E: No overload variant +AR_f8["field_a"] # E: No overload variant +AR_f8[["field_a", "field_b"]] # E: Invalid index type diff --git a/numpy/typing/tests/data/fail/nditer.pyi b/numpy/typing/tests/data/fail/nditer.pyi new file mode 100644 index 000000000..1e8e37ee5 --- /dev/null +++ b/numpy/typing/tests/data/fail/nditer.pyi @@ -0,0 +1,8 @@ +import numpy as np + +class Test(np.nditer): ... # E: Cannot inherit from final class + +np.nditer([0, 1], flags=["test"]) # E: incompatible type +np.nditer([0, 1], op_flags=[["test"]]) # E: incompatible type +np.nditer([0, 1], itershape=(1.0,)) # E: incompatible type +np.nditer([0, 1], buffersize=1.0) # E: incompatible type diff --git a/numpy/typing/tests/data/fail/nested_sequence.pyi b/numpy/typing/tests/data/fail/nested_sequence.pyi new file mode 100644 index 000000000..e28661a05 --- /dev/null +++ b/numpy/typing/tests/data/fail/nested_sequence.pyi @@ -0,0 +1,17 @@ +from typing import Sequence, Tuple, List +import numpy.typing as npt + +a: Sequence[float] +b: List[complex] +c: Tuple[str, ...] +d: int +e: str + +def func(a: npt._NestedSequence[int]) -> None: + ... + +reveal_type(func(a)) # E: incompatible type +reveal_type(func(b)) # E: incompatible type +reveal_type(func(c)) # E: incompatible type +reveal_type(func(d)) # E: incompatible type +reveal_type(func(e)) # E: incompatible type diff --git a/numpy/typing/tests/data/fail/npyio.py b/numpy/typing/tests/data/fail/npyio.pyi index c91b4c9cb..c91b4c9cb 100644 --- a/numpy/typing/tests/data/fail/npyio.py +++ b/numpy/typing/tests/data/fail/npyio.pyi diff --git a/numpy/typing/tests/data/fail/numerictypes.py b/numpy/typing/tests/data/fail/numerictypes.pyi index 9a81cd9dc..a5c2814ef 100644 --- a/numpy/typing/tests/data/fail/numerictypes.py +++ b/numpy/typing/tests/data/fail/numerictypes.pyi @@ -1,6 +1,6 @@ import numpy as np -# Techincally this works, but probably shouldn't. See +# Technically this works, but probably shouldn't. See # # https://github.com/numpy/numpy/issues/16366 # diff --git a/numpy/typing/tests/data/fail/random.py b/numpy/typing/tests/data/fail/random.pyi index c4d1e3e3e..c4d1e3e3e 100644 --- a/numpy/typing/tests/data/fail/random.py +++ b/numpy/typing/tests/data/fail/random.pyi diff --git a/numpy/typing/tests/data/fail/rec.pyi b/numpy/typing/tests/data/fail/rec.pyi new file mode 100644 index 000000000..a57f1ba27 --- /dev/null +++ b/numpy/typing/tests/data/fail/rec.pyi @@ -0,0 +1,17 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] + +np.rec.fromarrays(1) # E: No overload variant +np.rec.fromarrays([1, 2, 3], dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant + +np.rec.fromrecords(AR_i8) # E: incompatible type +np.rec.fromrecords([(1.5,)], dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant + +np.rec.fromstring("string", dtype=[("f8", "f8")]) # E: No overload variant +np.rec.fromstring(b"bytes") # E: No overload variant +np.rec.fromstring(b"(1.5,)", dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant + +with open("test", "r") as f: + np.rec.fromfile(f, dtype=[("f8", "f8")]) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/scalars.py b/numpy/typing/tests/data/fail/scalars.pyi index 94fe3f71e..94fe3f71e 100644 --- a/numpy/typing/tests/data/fail/scalars.py +++ b/numpy/typing/tests/data/fail/scalars.pyi diff --git a/numpy/typing/tests/data/fail/stride_tricks.py b/numpy/typing/tests/data/fail/stride_tricks.pyi index f2bfba743..f2bfba743 100644 --- a/numpy/typing/tests/data/fail/stride_tricks.py +++ b/numpy/typing/tests/data/fail/stride_tricks.pyi diff --git a/numpy/typing/tests/data/fail/testing.py b/numpy/typing/tests/data/fail/testing.pyi index e753a9810..e753a9810 100644 --- a/numpy/typing/tests/data/fail/testing.py +++ b/numpy/typing/tests/data/fail/testing.pyi diff --git a/numpy/typing/tests/data/fail/twodim_base.py b/numpy/typing/tests/data/fail/twodim_base.pyi index ab34a374c..ab34a374c 100644 --- a/numpy/typing/tests/data/fail/twodim_base.py +++ b/numpy/typing/tests/data/fail/twodim_base.pyi diff --git a/numpy/typing/tests/data/fail/type_check.py b/numpy/typing/tests/data/fail/type_check.pyi index 95f52bfbd..95f52bfbd 100644 --- a/numpy/typing/tests/data/fail/type_check.py +++ b/numpy/typing/tests/data/fail/type_check.pyi diff --git a/numpy/typing/tests/data/fail/ufunc_config.py b/numpy/typing/tests/data/fail/ufunc_config.pyi index f547fbb46..f547fbb46 100644 --- a/numpy/typing/tests/data/fail/ufunc_config.py +++ b/numpy/typing/tests/data/fail/ufunc_config.pyi diff --git a/numpy/typing/tests/data/fail/ufunclike.py b/numpy/typing/tests/data/fail/ufunclike.pyi index 82a5f3a1d..82a5f3a1d 100644 --- a/numpy/typing/tests/data/fail/ufunclike.py +++ b/numpy/typing/tests/data/fail/ufunclike.pyi diff --git a/numpy/typing/tests/data/fail/ufuncs.py b/numpy/typing/tests/data/fail/ufuncs.pyi index e827267c6..e827267c6 100644 --- a/numpy/typing/tests/data/fail/ufuncs.py +++ b/numpy/typing/tests/data/fail/ufuncs.pyi diff --git a/numpy/typing/tests/data/fail/warnings_and_errors.py b/numpy/typing/tests/data/fail/warnings_and_errors.pyi index f4fa38293..f4fa38293 100644 --- a/numpy/typing/tests/data/fail/warnings_and_errors.py +++ b/numpy/typing/tests/data/fail/warnings_and_errors.pyi diff --git a/numpy/typing/tests/data/misc/extended_precision.py b/numpy/typing/tests/data/misc/extended_precision.pyi index 1e495e4f3..1e495e4f3 100644 --- a/numpy/typing/tests/data/misc/extended_precision.py +++ b/numpy/typing/tests/data/misc/extended_precision.pyi diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index 206f70a15..2763d9c92 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -1,5 +1,5 @@ import sys -from typing import List, Any +from typing import Any import numpy as np diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index e16d196b6..5bd2fda20 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -1,4 +1,4 @@ -from typing import Any, List, Optional +from typing import Any, Optional import numpy as np from numpy.typing import ArrayLike, _SupportsArray diff --git a/numpy/typing/tests/data/pass/einsumfunc.py b/numpy/typing/tests/data/pass/einsumfunc.py index a2a39fb1c..429764e67 100644 --- a/numpy/typing/tests/data/pass/einsumfunc.py +++ b/numpy/typing/tests/data/pass/einsumfunc.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import List, Any +from typing import Any import numpy as np diff --git a/numpy/typing/tests/data/pass/lib_utils.py b/numpy/typing/tests/data/pass/lib_utils.py index 0a15dad22..65640c288 100644 --- a/numpy/typing/tests/data/pass/lib_utils.py +++ b/numpy/typing/tests/data/pass/lib_utils.py @@ -1,7 +1,6 @@ from __future__ import annotations from io import StringIO -from typing import Any import numpy as np diff --git a/numpy/typing/tests/data/pass/multiarray.py b/numpy/typing/tests/data/pass/multiarray.py index e5d33c673..26cedfd77 100644 --- a/numpy/typing/tests/data/pass/multiarray.py +++ b/numpy/typing/tests/data/pass/multiarray.py @@ -1,4 +1,3 @@ -from typing import Any import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 243caf229..85965e0de 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -47,7 +47,7 @@ np.dtype(object_dtype) np.dtype((np.int32, (np.int8, 4))) -# Dtype comparision +# Dtype comparison np.dtype(float) == float np.dtype(float) != np.float64 np.dtype(float) < None diff --git a/numpy/typing/tests/data/reveal/arithmetic.py b/numpy/typing/tests/data/reveal/arithmetic.pyi index 0d9132e5b..0d9132e5b 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.py +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi diff --git a/numpy/typing/tests/data/reveal/array_constructors.py b/numpy/typing/tests/data/reveal/array_constructors.pyi index 44c85e988..0aea4ea96 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.py +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -120,32 +120,41 @@ reveal_type(np.logspace(0, 10)) # E: numpy.ndarray[Any, Any] reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray[Any, Any] reveal_type(np.zeros_like(A)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] -reveal_type(np.zeros_like(C)) # E: numpy.ndarray[Any, Any] -reveal_type(np.zeros_like(B)) # E: SubClass -reveal_type(np.zeros_like(B, dtype=np.int64)) # E: numpy.ndarray[Any, Any] +reveal_type(np.zeros_like(C)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.zeros_like(A, dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.zeros_like(B)) # E: SubClass[{float64}] +reveal_type(np.zeros_like(B, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] reveal_type(np.ones_like(A)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] -reveal_type(np.ones_like(C)) # E: numpy.ndarray[Any, Any] -reveal_type(np.ones_like(B)) # E: SubClass -reveal_type(np.ones_like(B, dtype=np.int64)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ones_like(C)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.ones_like(A, dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.ones_like(B)) # E: SubClass[{float64}] +reveal_type(np.ones_like(B, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] reveal_type(np.full_like(A, i8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] -reveal_type(np.full_like(C, i8)) # E: numpy.ndarray[Any, Any] +reveal_type(np.full_like(C, i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.full_like(A, i8, dtype=int)) # E: numpy.ndarray[Any, numpy.dtype[Any]] reveal_type(np.full_like(B, i8)) # E: SubClass[{float64}] -reveal_type(np.full_like(B, i8, dtype=np.int64)) # E: numpy.ndarray[Any, Any] +reveal_type(np.full_like(B, i8, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] -reveal_type(np.ones(1)) # E: numpy.ndarray[Any, Any] -reveal_type(np.ones([1, 1, 1])) # E: numpy.ndarray[Any, Any] +reveal_type(np.ones(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.ones([1, 1, 1])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.ones(5, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(np.ones(5, dtype=int)) # E: numpy.ndarray[Any, numpy.dtype[Any]] -reveal_type(np.full(1, i8)) # E: numpy.ndarray[Any, Any] -reveal_type(np.full([1, 1, 1], i8)) # E: numpy.ndarray[Any, Any] +reveal_type(np.full(1, i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.full([1, 1, 1], i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.full(1, i8, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.full(1, i8, dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]] -reveal_type(np.indices([1, 2, 3])) # E: numpy.ndarray[Any, Any] -reveal_type(np.indices([1, 2, 3], sparse=True)) # E: tuple[numpy.ndarray[Any, Any]] +reveal_type(np.indices([1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(np.indices([1, 2, 3], sparse=True)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{int_}]]] reveal_type(np.fromfunction(func, (3, 5))) # E: SubClass[{float64}] -reveal_type(np.identity(10)) # E: numpy.ndarray[Any, Any] +reveal_type(np.identity(10)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.identity(10, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(np.identity(10, dtype=int)) # E: numpy.ndarray[Any, numpy.dtype[Any]] reveal_type(np.atleast_1d(A)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] reveal_type(np.atleast_1d(C)) # E: numpy.ndarray[Any, numpy.dtype[Any]] diff --git a/numpy/typing/tests/data/reveal/arraypad.py b/numpy/typing/tests/data/reveal/arraypad.pyi index 03c03fb4e..03c03fb4e 100644 --- a/numpy/typing/tests/data/reveal/arraypad.py +++ b/numpy/typing/tests/data/reveal/arraypad.pyi diff --git a/numpy/typing/tests/data/reveal/arrayprint.py b/numpy/typing/tests/data/reveal/arrayprint.pyi index e797097eb..e797097eb 100644 --- a/numpy/typing/tests/data/reveal/arrayprint.py +++ b/numpy/typing/tests/data/reveal/arrayprint.pyi diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi new file mode 100644 index 000000000..c8aeb03ab --- /dev/null +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -0,0 +1,60 @@ +import numpy as np +import numpy.typing as npt + +AR_b: npt.NDArray[np.bool_] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] + +AR_LIKE_f8: list[float] + +reveal_type(np.ediff1d(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[{int8}]] +reveal_type(np.ediff1d(AR_i8, to_end=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(np.ediff1d(AR_M)) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(np.ediff1d(AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] +reveal_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5])) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.intersect1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(np.intersect1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(np.intersect1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.intersect1d(AR_f8, AR_f8, return_indices=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] + +reveal_type(np.setxor1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(np.setxor1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(np.setxor1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.in1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.in1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.in1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.in1d(AR_f8, AR_LIKE_f8, invert=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.isin(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isin(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isin(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isin(AR_f8, AR_LIKE_f8, invert=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.union1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(np.union1d(AR_M, AR_M)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(np.union1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.setdiff1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(np.setdiff1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(np.setdiff1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.unique(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.unique(AR_LIKE_f8, axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.unique(AR_f8, return_index=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_index=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_inverse=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_inverse=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_index=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_inverse=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_inverse=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]] diff --git a/numpy/typing/tests/data/reveal/arrayterator.py b/numpy/typing/tests/data/reveal/arrayterator.pyi index ea4e75612..ea4e75612 100644 --- a/numpy/typing/tests/data/reveal/arrayterator.py +++ b/numpy/typing/tests/data/reveal/arrayterator.pyi diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.py b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 6b9969568..6b9969568 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.py +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi new file mode 100644 index 000000000..dd2e76a2d --- /dev/null +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -0,0 +1,147 @@ +import numpy as np +import numpy.typing as npt +from typing import Sequence + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] + +reveal_type(np.char.equal(AR_U, AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.equal(AR_S, AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.not_equal(AR_U, AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.not_equal(AR_S, AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.greater_equal(AR_U, AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.greater_equal(AR_S, AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.less_equal(AR_U, AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.less_equal(AR_S, AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.greater(AR_U, AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.greater(AR_S, AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.less(AR_U, AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.less(AR_S, AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.multiply(AR_U, 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.multiply(AR_S, [5, 4, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.mod(AR_U, "test")) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.mod(AR_S, "test")) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.capitalize(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.capitalize(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.center(AR_U, 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.center(AR_S, [2, 3, 4], b"a")) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.encode(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(np.char.decode(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] + +reveal_type(np.char.expandtabs(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.expandtabs(AR_S, tabsize=4)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.join(AR_U, "_")) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.join(AR_S, [b"_", b""])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.ljust(AR_U, 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(np.char.rjust(AR_U, 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.lstrip(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.lstrip(AR_S, chars=b"_")) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(np.char.rstrip(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.rstrip(AR_S, chars=b"_")) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(np.char.strip(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.strip(AR_S, chars=b"_")) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.partition(AR_U, "\n")) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.partition(AR_S, [b"a", b"b", b"c"])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(np.char.rpartition(AR_U, "\n")) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.rpartition(AR_S, [b"a", b"b", b"c"])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.replace(AR_U, "_", "-")) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.replace(AR_S, [b"_", b""], [b"a", b"b"])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.split(AR_U, "_")) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] +reveal_type(np.char.split(AR_S, maxsplit=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] +reveal_type(np.char.rsplit(AR_U, "_")) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] +reveal_type(np.char.rsplit(AR_S, maxsplit=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.char.splitlines(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] +reveal_type(np.char.splitlines(AR_S, keepends=[True, True, False])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.char.swapcase(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.swapcase(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.title(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.title(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.upper(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.upper(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.zfill(AR_U, 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.zfill(AR_S, [2, 3, 4])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(np.char.count(AR_U, "a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(np.char.count(AR_S, [b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(np.char.endswith(AR_U, "a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.endswith(AR_S, [b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.startswith(AR_U, "a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.startswith(AR_S, [b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.find(AR_U, "a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(np.char.find(AR_S, [b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(np.char.rfind(AR_U, "a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(np.char.rfind(AR_S, [b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(np.char.index(AR_U, "a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(np.char.index(AR_S, [b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(np.char.rindex(AR_U, "a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(np.char.rindex(AR_S, [b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(np.char.isalpha(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.isalpha(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.isalnum(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.isalnum(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.isdecimal(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.isdecimal(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.isdigit(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.isdigit(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.islower(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.islower(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.isnumeric(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.isnumeric(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.isspace(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.isspace(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.istitle(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.istitle(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.isupper(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.char.isupper(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.char.str_len(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(np.char.str_len(AR_S)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(np.char.array(AR_U)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.array(AR_S, order="K")) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(np.char.array("bob", copy=True)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.array(b"bob", itemsize=5)) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(np.char.array(1, unicode=False)) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(np.char.array(1, unicode=True)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] + +reveal_type(np.char.asarray(AR_U)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.asarray(AR_S, order="K")) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(np.char.asarray("bob")) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.char.asarray(b"bob", itemsize=5)) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(np.char.asarray(1, unicode=False)) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(np.char.asarray(1, unicode=True)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi new file mode 100644 index 000000000..c0a39c92b --- /dev/null +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -0,0 +1,129 @@ +import numpy as np +from typing import Any + +AR_U: np.chararray[Any, np.dtype[np.str_]] +AR_S: np.chararray[Any, np.dtype[np.bytes_]] + +reveal_type(AR_U == AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S == AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U != AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S != AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U >= AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S >= AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U <= AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S <= AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U > AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S > AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U < AR_U) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S < AR_S) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U * 5) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S * [5]) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U % "test") # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S % b"test") # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.capitalize()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.capitalize()) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.center(5)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.center([2, 3, 4], b"a")) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.encode()) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(AR_S.decode()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] + +reveal_type(AR_U.expandtabs()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.expandtabs(tabsize=4)) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.join("_")) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.join([b"_", b""])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.ljust(5)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(AR_U.rjust(5)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.lstrip()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.lstrip(chars=b"_")) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(AR_U.rstrip()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.rstrip(chars=b"_")) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(AR_U.strip()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.strip(chars=b"_")) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.partition("\n")) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.partition([b"a", b"b", b"c"])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] +reveal_type(AR_U.rpartition("\n")) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.rpartition([b"a", b"b", b"c"])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.replace("_", "-")) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.replace([b"_", b""], [b"a", b"b"])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.split("_")) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] +reveal_type(AR_S.split(maxsplit=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] +reveal_type(AR_U.rsplit("_")) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] +reveal_type(AR_S.rsplit(maxsplit=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(AR_U.splitlines()) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] +reveal_type(AR_S.splitlines(keepends=[True, True, False])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(AR_U.swapcase()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.swapcase()) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.title()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.title()) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.upper()) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.upper()) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.zfill(5)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(AR_S.zfill([2, 3, 4])) # E: numpy.chararray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(AR_U.count("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(AR_S.count([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(AR_U.endswith("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S.endswith([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_U.startswith("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S.startswith([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U.find("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(AR_S.find([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(AR_U.rfind("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(AR_S.rfind([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(AR_U.index("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(AR_S.index([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(AR_U.rindex("a", start=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(AR_S.rindex([b"a", b"b", b"c"], end=9)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(AR_U.isalpha()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S.isalpha()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U.isalnum()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S.isalnum()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U.isdecimal()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S.isdecimal()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U.isdigit()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S.isdigit()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U.islower()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S.islower()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U.isnumeric()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S.isnumeric()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U.isspace()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S.isspace()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U.istitle()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S.istitle()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(AR_U.isupper()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR_S.isupper()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] diff --git a/numpy/typing/tests/data/reveal/comparisons.py b/numpy/typing/tests/data/reveal/comparisons.pyi index 16f21cc39..16f21cc39 100644 --- a/numpy/typing/tests/data/reveal/comparisons.py +++ b/numpy/typing/tests/data/reveal/comparisons.pyi diff --git a/numpy/typing/tests/data/reveal/constants.py b/numpy/typing/tests/data/reveal/constants.pyi index 9a46bfded..9a46bfded 100644 --- a/numpy/typing/tests/data/reveal/constants.py +++ b/numpy/typing/tests/data/reveal/constants.pyi diff --git a/numpy/typing/tests/data/reveal/ctypeslib.py b/numpy/typing/tests/data/reveal/ctypeslib.py deleted file mode 100644 index 0c32d70ed..000000000 --- a/numpy/typing/tests/data/reveal/ctypeslib.py +++ /dev/null @@ -1,3 +0,0 @@ -import numpy as np - -reveal_type(np.ctypeslib.c_intp()) # E: {c_intp} diff --git a/numpy/typing/tests/data/reveal/ctypeslib.pyi b/numpy/typing/tests/data/reveal/ctypeslib.pyi new file mode 100644 index 000000000..461a447d9 --- /dev/null +++ b/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -0,0 +1,87 @@ +import ctypes +from typing import Any + +import numpy as np +import numpy.typing as npt + +AR_bool: npt.NDArray[np.bool_] +AR_ubyte: npt.NDArray[np.ubyte] +AR_ushort: npt.NDArray[np.ushort] +AR_uintc: npt.NDArray[np.uintc] +AR_uint: npt.NDArray[np.uint] +AR_ulonglong: npt.NDArray[np.ulonglong] +AR_byte: npt.NDArray[np.byte] +AR_short: npt.NDArray[np.short] +AR_intc: npt.NDArray[np.intc] +AR_int: npt.NDArray[np.int_] +AR_longlong: npt.NDArray[np.longlong] +AR_single: npt.NDArray[np.single] +AR_double: npt.NDArray[np.double] +AR_longdouble: npt.NDArray[np.longdouble] +AR_void: npt.NDArray[np.void] + +pointer: ctypes.pointer[Any] + +reveal_type(np.ctypeslib.c_intp()) # E: {c_intp} + +reveal_type(np.ctypeslib.ndpointer()) # E: Type[numpy.ctypeslib._ndptr[None]] +reveal_type(np.ctypeslib.ndpointer(dtype=np.float64)) # E: Type[numpy.ctypeslib._ndptr[numpy.dtype[{float64}]]] +reveal_type(np.ctypeslib.ndpointer(dtype=float)) # E: Type[numpy.ctypeslib._ndptr[numpy.dtype[Any]]] +reveal_type(np.ctypeslib.ndpointer(shape=(10, 3))) # E: Type[numpy.ctypeslib._ndptr[None]] +reveal_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3))) # E: Type[numpy.ctypeslib._concrete_ndptr[numpy.dtype[{int64}]]] +reveal_type(np.ctypeslib.ndpointer(int, shape=(1,))) # E: Type[numpy.ctypeslib._concrete_ndptr[numpy.dtype[Any]]] + +reveal_type(np.ctypeslib.as_ctypes_type(np.bool_)) # E: Type[ctypes.c_bool] +reveal_type(np.ctypeslib.as_ctypes_type(np.ubyte)) # E: Type[{c_ubyte}] +reveal_type(np.ctypeslib.as_ctypes_type(np.ushort)) # E: Type[{c_ushort}] +reveal_type(np.ctypeslib.as_ctypes_type(np.uintc)) # E: Type[{c_uint}] +reveal_type(np.ctypeslib.as_ctypes_type(np.uint)) # E: Type[{c_ulong}] +reveal_type(np.ctypeslib.as_ctypes_type(np.ulonglong)) # E: Type[{c_ulonglong}] +reveal_type(np.ctypeslib.as_ctypes_type(np.byte)) # E: Type[{c_byte}] +reveal_type(np.ctypeslib.as_ctypes_type(np.short)) # E: Type[{c_short}] +reveal_type(np.ctypeslib.as_ctypes_type(np.intc)) # E: Type[{c_int}] +reveal_type(np.ctypeslib.as_ctypes_type(np.int_)) # E: Type[{c_long}] +reveal_type(np.ctypeslib.as_ctypes_type(np.longlong)) # E: Type[{c_longlong}] +reveal_type(np.ctypeslib.as_ctypes_type(np.single)) # E: Type[{c_float}] +reveal_type(np.ctypeslib.as_ctypes_type(np.double)) # E: Type[{c_double}] +reveal_type(np.ctypeslib.as_ctypes_type(np.longdouble)) # E: Type[{c_longdouble}] +reveal_type(np.ctypeslib.as_ctypes_type(ctypes.c_double)) # E: Type[{c_double}] +reveal_type(np.ctypeslib.as_ctypes_type("q")) # E: Type[ctypes.c_longlong] +reveal_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)])) # E: Type[Any] +reveal_type(np.ctypeslib.as_ctypes_type("i8")) # E: Type[Any] +reveal_type(np.ctypeslib.as_ctypes_type("f8")) # E: Type[Any] + +reveal_type(np.ctypeslib.as_ctypes(AR_bool.take(0))) # E: ctypes.c_bool +reveal_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0))) # E: {c_ubyte} +reveal_type(np.ctypeslib.as_ctypes(AR_ushort.take(0))) # E: {c_ushort} +reveal_type(np.ctypeslib.as_ctypes(AR_uintc.take(0))) # E: {c_uint} +reveal_type(np.ctypeslib.as_ctypes(AR_uint.take(0))) # E: {c_ulong} +reveal_type(np.ctypeslib.as_ctypes(AR_ulonglong.take(0))) # E: {c_ulonglong} +reveal_type(np.ctypeslib.as_ctypes(AR_byte.take(0))) # E: {c_byte} +reveal_type(np.ctypeslib.as_ctypes(AR_short.take(0))) # E: {c_short} +reveal_type(np.ctypeslib.as_ctypes(AR_intc.take(0))) # E: {c_int} +reveal_type(np.ctypeslib.as_ctypes(AR_int.take(0))) # E: {c_long} +reveal_type(np.ctypeslib.as_ctypes(AR_longlong.take(0))) # E: {c_longlong} +reveal_type(np.ctypeslib.as_ctypes(AR_single.take(0))) # E: {c_float} +reveal_type(np.ctypeslib.as_ctypes(AR_double.take(0))) # E: {c_double} +reveal_type(np.ctypeslib.as_ctypes(AR_longdouble.take(0))) # E: {c_longdouble} +reveal_type(np.ctypeslib.as_ctypes(AR_void.take(0))) # E: Any +reveal_type(np.ctypeslib.as_ctypes(AR_bool)) # E: ctypes.Array[ctypes.c_bool] +reveal_type(np.ctypeslib.as_ctypes(AR_ubyte)) # E: ctypes.Array[{c_ubyte}] +reveal_type(np.ctypeslib.as_ctypes(AR_ushort)) # E: ctypes.Array[{c_ushort}] +reveal_type(np.ctypeslib.as_ctypes(AR_uintc)) # E: ctypes.Array[{c_uint}] +reveal_type(np.ctypeslib.as_ctypes(AR_uint)) # E: ctypes.Array[{c_ulong}] +reveal_type(np.ctypeslib.as_ctypes(AR_ulonglong)) # E: ctypes.Array[{c_ulonglong}] +reveal_type(np.ctypeslib.as_ctypes(AR_byte)) # E: ctypes.Array[{c_byte}] +reveal_type(np.ctypeslib.as_ctypes(AR_short)) # E: ctypes.Array[{c_short}] +reveal_type(np.ctypeslib.as_ctypes(AR_intc)) # E: ctypes.Array[{c_int}] +reveal_type(np.ctypeslib.as_ctypes(AR_int)) # E: ctypes.Array[{c_long}] +reveal_type(np.ctypeslib.as_ctypes(AR_longlong)) # E: ctypes.Array[{c_longlong}] +reveal_type(np.ctypeslib.as_ctypes(AR_single)) # E: ctypes.Array[{c_float}] +reveal_type(np.ctypeslib.as_ctypes(AR_double)) # E: ctypes.Array[{c_double}] +reveal_type(np.ctypeslib.as_ctypes(AR_longdouble)) # E: ctypes.Array[{c_longdouble}] +reveal_type(np.ctypeslib.as_ctypes(AR_void)) # E: ctypes.Array[Any] + +reveal_type(np.ctypeslib.as_array(AR_ubyte)) # E: numpy.ndarray[Any, numpy.dtype[{ubyte}]] +reveal_type(np.ctypeslib.as_array(1)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.ctypeslib.as_array(pointer)) # E: numpy.ndarray[Any, numpy.dtype[Any]] diff --git a/numpy/typing/tests/data/reveal/datasource.py b/numpy/typing/tests/data/reveal/datasource.pyi index 245ac7649..245ac7649 100644 --- a/numpy/typing/tests/data/reveal/datasource.py +++ b/numpy/typing/tests/data/reveal/datasource.pyi diff --git a/numpy/typing/tests/data/reveal/dtype.py b/numpy/typing/tests/data/reveal/dtype.pyi index 364d1dcab..364d1dcab 100644 --- a/numpy/typing/tests/data/reveal/dtype.py +++ b/numpy/typing/tests/data/reveal/dtype.pyi diff --git a/numpy/typing/tests/data/reveal/einsumfunc.py b/numpy/typing/tests/data/reveal/einsumfunc.pyi index f1a90428d..f1a90428d 100644 --- a/numpy/typing/tests/data/reveal/einsumfunc.py +++ b/numpy/typing/tests/data/reveal/einsumfunc.pyi diff --git a/numpy/typing/tests/data/reveal/flatiter.py b/numpy/typing/tests/data/reveal/flatiter.pyi index 97776dd9f..97776dd9f 100644 --- a/numpy/typing/tests/data/reveal/flatiter.py +++ b/numpy/typing/tests/data/reveal/flatiter.pyi diff --git a/numpy/typing/tests/data/reveal/fromnumeric.py b/numpy/typing/tests/data/reveal/fromnumeric.pyi index bbcfbb85a..bbcfbb85a 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.py +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi diff --git a/numpy/typing/tests/data/reveal/getlimits.py b/numpy/typing/tests/data/reveal/getlimits.pyi index e12723bfe..90bcb06c8 100644 --- a/numpy/typing/tests/data/reveal/getlimits.py +++ b/numpy/typing/tests/data/reveal/getlimits.pyi @@ -1,6 +1,4 @@ import numpy as np -from numpy.typing import _32Bit - f: float f8: np.float64 c8: np.complex64 @@ -11,7 +9,6 @@ u4: np.uint32 finfo_f8: np.finfo[np.float64] iinfo_i8: np.iinfo[np.int64] -machar_f4: np.core.getlimits.MachArLike[_32Bit] reveal_type(np.finfo(f)) # E: numpy.finfo[{double}] reveal_type(np.finfo(f8)) # E: numpy.finfo[{float64}] @@ -36,7 +33,6 @@ reveal_type(finfo_f8.resolution) # E: {float64} reveal_type(finfo_f8.tiny) # E: {float64} reveal_type(finfo_f8.smallest_normal) # E: {float64} reveal_type(finfo_f8.smallest_subnormal) # E: {float64} -reveal_type(finfo_f8.machar) # E: MachArLike[numpy.typing._64Bit] reveal_type(np.iinfo(i)) # E: iinfo[{int_}] reveal_type(np.iinfo(i8)) # E: iinfo[{int64}] @@ -49,25 +45,3 @@ reveal_type(iinfo_i8.bits) # E: int reveal_type(iinfo_i8.key) # E: str reveal_type(iinfo_i8.min) # E: int reveal_type(iinfo_i8.max) # E: int - -reveal_type(machar_f4.eps) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] -reveal_type(machar_f4.epsilon) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] -reveal_type(machar_f4.epsneg) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] -reveal_type(machar_f4.huge) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] -reveal_type(machar_f4.resolution) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] -reveal_type(machar_f4.tiny) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] -reveal_type(machar_f4.xmax) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] -reveal_type(machar_f4.xmin) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] -reveal_type(machar_f4.smallest_subnormal) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] -reveal_type(machar_f4.smallest_normal) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] -reveal_type(machar_f4.iexp) # E: int -reveal_type(machar_f4.irnd) # E: int -reveal_type(machar_f4.it) # E: int -reveal_type(machar_f4.machep) # E: int -reveal_type(machar_f4.maxexp) # E: int -reveal_type(machar_f4.minexp) # E: int -reveal_type(machar_f4.negep) # E: int -reveal_type(machar_f4.ngrd) # E: int -reveal_type(machar_f4.precision) # E: int -reveal_type(machar_f4.ibeta) # E: {int32} -reveal_type(machar_f4.title) # E: str diff --git a/numpy/typing/tests/data/reveal/histograms.pyi b/numpy/typing/tests/data/reveal/histograms.pyi new file mode 100644 index 000000000..55fa9518f --- /dev/null +++ b/numpy/typing/tests/data/reveal/histograms.pyi @@ -0,0 +1,19 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] + +reveal_type(np.histogram_bin_edges(AR_i8, bins="auto")) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3))) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.histogram(AR_i8, bins="auto")) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]] +reveal_type(np.histogram(AR_i8, bins="rice", range=(0, 3))) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]] +reveal_type(np.histogram(AR_i8, bins="scott", weights=AR_f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]] +reveal_type(np.histogram(AR_f8, bins=1, density=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]] + +reveal_type(np.histogramdd(AR_i8, bins=[1])) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], builtins.list[numpy.ndarray[Any, numpy.dtype[Any]]]] +reveal_type(np.histogramdd(AR_i8, range=[(0, 3)])) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], builtins.list[numpy.ndarray[Any, numpy.dtype[Any]]]] +reveal_type(np.histogramdd(AR_i8, weights=AR_f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], builtins.list[numpy.ndarray[Any, numpy.dtype[Any]]]] +reveal_type(np.histogramdd(AR_f8, density=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], builtins.list[numpy.ndarray[Any, numpy.dtype[Any]]]] diff --git a/numpy/typing/tests/data/reveal/index_tricks.py b/numpy/typing/tests/data/reveal/index_tricks.pyi index 863d60220..863d60220 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.py +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi new file mode 100644 index 000000000..bced08894 --- /dev/null +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -0,0 +1,180 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + +vectorized_func: np.vectorize + +f8: np.float64 +AR_LIKE_f8: list[float] + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] +AR_b: npt.NDArray[np.bool_] +AR_U: npt.NDArray[np.str_] +CHAR_AR_U: np.chararray[Any, np.dtype[np.str_]] + +def func(*args: Any, **kwargs: Any) -> Any: ... + +reveal_type(vectorized_func.pyfunc) # E: def (*Any, **Any) -> Any +reveal_type(vectorized_func.cache) # E: bool +reveal_type(vectorized_func.signature) # E: Union[None, builtins.str] +reveal_type(vectorized_func.otypes) # E: Union[None, builtins.str] +reveal_type(vectorized_func.excluded) # E: set[Union[builtins.int, builtins.str]] +reveal_type(vectorized_func.__doc__) # E: Union[None, builtins.str] +reveal_type(vectorized_func([1])) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.vectorize(int)) # E: numpy.vectorize +reveal_type(np.vectorize( # E: numpy.vectorize + int, otypes="i", doc="doc", excluded=(), cache=True, signature=None +)) + +reveal_type(np.add_newdoc("__main__", "blabla", doc="test doc")) # E: None +reveal_type(np.add_newdoc("__main__", "blabla", doc=("meth", "test doc"))) # E: None +reveal_type(np.add_newdoc("__main__", "blabla", doc=[("meth", "test doc")])) # E: None + +reveal_type(np.rot90(AR_f8, k=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.rot90(AR_LIKE_f8, axes=(0, 1))) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.flip(f8)) # E: {float64} +reveal_type(np.flip(1.0)) # E: Any +reveal_type(np.flip(AR_f8, axis=(0, 1))) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.flip(AR_LIKE_f8, axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.iterable(1)) # E: bool +reveal_type(np.iterable([1])) # E: bool + +reveal_type(np.average(AR_f8)) # E: numpy.floating[Any] +reveal_type(np.average(AR_f8, weights=AR_c16)) # E: numpy.complexfloating[Any, Any] +reveal_type(np.average(AR_O)) # E: Any +reveal_type(np.average(AR_f8, returned=True)) # E: Tuple[numpy.floating[Any], numpy.floating[Any]] +reveal_type(np.average(AR_f8, weights=AR_c16, returned=True)) # E: Tuple[numpy.complexfloating[Any, Any], numpy.complexfloating[Any, Any]] +reveal_type(np.average(AR_O, returned=True)) # E: Tuple[Any, Any] +reveal_type(np.average(AR_f8, axis=0)) # E: Any +reveal_type(np.average(AR_f8, axis=0, returned=True)) # E: Tuple[Any, Any] + +reveal_type(np.asarray_chkfinite(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.asarray_chkfinite(AR_LIKE_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.asarray_chkfinite(AR_f8, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.asarray_chkfinite(AR_f8, dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.piecewise(AR_f8, AR_b, [func])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.piecewise(AR_LIKE_f8, AR_b, [func])) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.select([AR_f8], [AR_f8])) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.copy(AR_LIKE_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.copy(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.copy(CHAR_AR_U)) # E: numpy.ndarray[Any, Any] +reveal_type(np.copy(CHAR_AR_U, "K", subok=True)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.copy(CHAR_AR_U, subok=True)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]] + +reveal_type(np.gradient(AR_f8, axis=None)) # E: Any +reveal_type(np.gradient(AR_LIKE_f8, edge_order=2)) # E: Any + +reveal_type(np.diff("bob", n=0)) # E: str +reveal_type(np.diff(AR_f8, axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.diff(AR_LIKE_f8, prepend=1.5)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.angle(AR_f8)) # E: numpy.floating[Any] +reveal_type(np.angle(AR_c16, deg=True)) # E: numpy.complexfloating[Any, Any] +reveal_type(np.angle(AR_O)) # E: Any + +reveal_type(np.unwrap(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.unwrap(AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.sort_complex(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] + +reveal_type(np.trim_zeros(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.trim_zeros(AR_LIKE_f8)) # E: list[builtins.float] + +reveal_type(np.extract(AR_i8, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.extract(AR_i8, AR_LIKE_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.place(AR_f8, mask=AR_i8, vals=5.0)) # E: None + +reveal_type(np.disp(1, linefeed=True)) # E: None +with open("test", "w") as f: + reveal_type(np.disp("message", device=f)) # E: None + +reveal_type(np.cov(AR_f8, bias=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.cov(AR_f8, AR_c16, ddof=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] +reveal_type(np.cov(AR_f8, fweights=AR_f8, dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.corrcoef(AR_f8, rowvar=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.corrcoef(AR_f8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.corrcoef(AR_f8, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[{float32}]] +reveal_type(np.corrcoef(AR_f8, dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.blackman(5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.bartlett(6)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.hanning(4.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.hamming(0)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.i0(AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.kaiser(4, 5.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] + +reveal_type(np.sinc(1.0)) # E: numpy.floating[Any] +reveal_type(np.sinc(1j)) # E: numpy.complexfloating[Any, Any] +reveal_type(np.sinc(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.sinc(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] + +reveal_type(np.msort(CHAR_AR_U)) # E: Any +reveal_type(np.msort(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(np.msort(AR_LIKE_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.median(AR_f8, keepdims=False)) # E: numpy.floating[Any] +reveal_type(np.median(AR_c16, overwrite_input=True)) # E: numpy.complexfloating[Any, Any] +reveal_type(np.median(AR_m)) # E: numpy.timedelta64 +reveal_type(np.median(AR_O)) # E: Any +reveal_type(np.median(AR_f8, keepdims=True)) # E: Any +reveal_type(np.median(AR_c16, axis=0)) # E: Any +reveal_type(np.median(AR_LIKE_f8, out=AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]] + +reveal_type(np.add_newdoc_ufunc(np.add, "docstring")) # E: None + +reveal_type(np.percentile(AR_f8, 50)) # E: numpy.floating[Any] +reveal_type(np.percentile(AR_c16, 50)) # E: numpy.complexfloating[Any, Any] +reveal_type(np.percentile(AR_m, 50)) # E: numpy.timedelta64 +reveal_type(np.percentile(AR_M, 50, overwrite_input=True)) # E: numpy.datetime64 +reveal_type(np.percentile(AR_O, 50)) # E: Any +reveal_type(np.percentile(AR_f8, [50])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.percentile(AR_c16, [50])) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.percentile(AR_m, [50])) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(np.percentile(AR_M, [50], interpolation="nearest")) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(np.percentile(AR_O, [50])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] +reveal_type(np.percentile(AR_f8, [50], keepdims=True)) # E: Any +reveal_type(np.percentile(AR_f8, [50], axis=[1])) # E: Any +reveal_type(np.percentile(AR_f8, [50], out=AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]] + +reveal_type(np.quantile(AR_f8, 0.5)) # E: numpy.floating[Any] +reveal_type(np.quantile(AR_c16, 0.5)) # E: numpy.complexfloating[Any, Any] +reveal_type(np.quantile(AR_m, 0.5)) # E: numpy.timedelta64 +reveal_type(np.quantile(AR_M, 0.5, overwrite_input=True)) # E: numpy.datetime64 +reveal_type(np.quantile(AR_O, 0.5)) # E: Any +reveal_type(np.quantile(AR_f8, [0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.quantile(AR_c16, [0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.quantile(AR_m, [0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(np.quantile(AR_M, [0.5], interpolation="nearest")) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(np.quantile(AR_O, [0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] +reveal_type(np.quantile(AR_f8, [0.5], keepdims=True)) # E: Any +reveal_type(np.quantile(AR_f8, [0.5], axis=[1])) # E: Any +reveal_type(np.quantile(AR_f8, [0.5], out=AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]] + +reveal_type(np.meshgrid(AR_f8, AR_i8, copy=False)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]] +reveal_type(np.meshgrid(AR_f8, AR_i8, AR_c16, indexing="ij")) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]] + +reveal_type(np.delete(AR_f8, np.s_[:5])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.insert(AR_f8, np.s_[:5], 5)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.append(AR_f8, 5)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.append(AR_LIKE_f8, 1j, axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.digitize(4.5, [1])) # E: {intp} +reveal_type(np.digitize(AR_f8, [1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{intp}]] diff --git a/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/numpy/typing/tests/data/reveal/lib_polynomial.pyi new file mode 100644 index 000000000..5a4a3c424 --- /dev/null +++ b/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -0,0 +1,111 @@ +import numpy as np +import numpy.typing as npt + +AR_b: npt.NDArray[np.bool_] +AR_u4: npt.NDArray[np.uint32] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +poly_obj: np.poly1d + +reveal_type(poly_obj.variable) # E: str +reveal_type(poly_obj.order) # E: int +reveal_type(poly_obj.o) # E: int +reveal_type(poly_obj.roots) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(poly_obj.r) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(poly_obj.coeffs) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(poly_obj.c) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(poly_obj.coef) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(poly_obj.coefficients) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(poly_obj.__hash__) # E: None + +reveal_type(poly_obj(1)) # E: Any +reveal_type(poly_obj([1])) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(poly_obj(poly_obj)) # E: numpy.poly1d + +reveal_type(len(poly_obj)) # E: int +reveal_type(-poly_obj) # E: numpy.poly1d +reveal_type(+poly_obj) # E: numpy.poly1d + +reveal_type(poly_obj * 5) # E: numpy.poly1d +reveal_type(5 * poly_obj) # E: numpy.poly1d +reveal_type(poly_obj + 5) # E: numpy.poly1d +reveal_type(5 + poly_obj) # E: numpy.poly1d +reveal_type(poly_obj - 5) # E: numpy.poly1d +reveal_type(5 - poly_obj) # E: numpy.poly1d +reveal_type(poly_obj**1) # E: numpy.poly1d +reveal_type(poly_obj**1.0) # E: numpy.poly1d +reveal_type(poly_obj / 5) # E: numpy.poly1d +reveal_type(5 / poly_obj) # E: numpy.poly1d + +reveal_type(poly_obj[0]) # E: Any +poly_obj[0] = 5 +reveal_type(iter(poly_obj)) # E: Iterator[Any] +reveal_type(poly_obj.deriv()) # E: numpy.poly1d +reveal_type(poly_obj.integ()) # E: numpy.poly1d + +reveal_type(np.poly(poly_obj)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.poly(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.poly(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] + +reveal_type(np.polyint(poly_obj)) # E: numpy.poly1d +reveal_type(np.polyint(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.polyint(AR_f8, k=AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.polyint(AR_O, m=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.polyder(poly_obj)) # E: numpy.poly1d +reveal_type(np.polyder(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.polyder(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.polyder(AR_O, m=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.polyfit(AR_f8, AR_f8, 2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.polyfit(AR_f8, AR_i8, 1, full=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]], numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]]] +reveal_type(np.polyfit(AR_u4, AR_f8, 1.0, cov="unscaled")) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]]] +reveal_type(np.polyfit(AR_c16, AR_f8, 2)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]] +reveal_type(np.polyfit(AR_f8, AR_c16, 1, full=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{complex128}]], numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]], numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]]] +reveal_type(np.polyfit(AR_u4, AR_c16, 1.0, cov=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{complex128}]], numpy.ndarray[Any, numpy.dtype[{complex128}]]] + +reveal_type(np.polyval(AR_b, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(np.polyval(AR_u4, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(np.polyval(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.polyval(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.polyval(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.polyval(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.polyadd(poly_obj, AR_i8)) # E: numpy.poly1d +reveal_type(np.polyadd(AR_f8, poly_obj)) # E: numpy.poly1d +reveal_type(np.polyadd(AR_b, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.polyadd(AR_u4, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(np.polyadd(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.polyadd(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.polyadd(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.polyadd(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.polysub(poly_obj, AR_i8)) # E: numpy.poly1d +reveal_type(np.polysub(AR_f8, poly_obj)) # E: numpy.poly1d +reveal_type(np.polysub(AR_b, AR_b)) # E: <nothing> +reveal_type(np.polysub(AR_u4, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(np.polysub(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.polysub(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.polysub(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.polysub(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.polymul(poly_obj, AR_i8)) # E: numpy.poly1d +reveal_type(np.polymul(AR_f8, poly_obj)) # E: numpy.poly1d +reveal_type(np.polymul(AR_b, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.polymul(AR_u4, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(np.polymul(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.polymul(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.polymul(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.polymul(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.polydiv(poly_obj, AR_i8)) # E: numpy.poly1d +reveal_type(np.polydiv(AR_f8, poly_obj)) # E: numpy.poly1d +reveal_type(np.polydiv(AR_b, AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(np.polydiv(AR_u4, AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(np.polydiv(AR_i8, AR_i8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(np.polydiv(AR_f8, AR_i8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(np.polydiv(AR_i8, AR_c16)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(np.polydiv(AR_O, AR_O)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]] diff --git a/numpy/typing/tests/data/reveal/lib_utils.py b/numpy/typing/tests/data/reveal/lib_utils.pyi index d82012707..d82012707 100644 --- a/numpy/typing/tests/data/reveal/lib_utils.py +++ b/numpy/typing/tests/data/reveal/lib_utils.pyi diff --git a/numpy/typing/tests/data/reveal/lib_version.py b/numpy/typing/tests/data/reveal/lib_version.pyi index e6f695558..e6f695558 100644 --- a/numpy/typing/tests/data/reveal/lib_version.py +++ b/numpy/typing/tests/data/reveal/lib_version.pyi diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi new file mode 100644 index 000000000..fecdc0d37 --- /dev/null +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -0,0 +1,97 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] +AR_m: npt.NDArray[np.timedelta64] +AR_S: npt.NDArray[np.str_] + +reveal_type(np.linalg.tensorsolve(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.linalg.tensorsolve(AR_i8, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.linalg.tensorsolve(AR_c16, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] + +reveal_type(np.linalg.solve(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.linalg.solve(AR_i8, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.linalg.solve(AR_c16, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] + +reveal_type(np.linalg.tensorinv(AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.linalg.tensorinv(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.linalg.tensorinv(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] + +reveal_type(np.linalg.inv(AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.linalg.inv(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.linalg.inv(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] + +reveal_type(np.linalg.matrix_power(AR_i8, -1)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.linalg.matrix_power(AR_f8, 0)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.linalg.matrix_power(AR_c16, 1)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.linalg.matrix_power(AR_O, 2)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.linalg.cholesky(AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.linalg.cholesky(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.linalg.cholesky(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] + +reveal_type(np.linalg.qr(AR_i8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]]] +reveal_type(np.linalg.qr(AR_f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(np.linalg.qr(AR_c16)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] + +reveal_type(np.linalg.eigvals(AR_i8)) # E: Union[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{complex128}]]] +reveal_type(np.linalg.eigvals(AR_f8)) # E: Union[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(np.linalg.eigvals(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] + +reveal_type(np.linalg.eigvalsh(AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.linalg.eigvalsh(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.linalg.eigvalsh(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] + +reveal_type(np.linalg.eig(AR_i8)) # E: Union[Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]]], Tuple[numpy.ndarray[Any, numpy.dtype[{complex128}]], numpy.ndarray[Any, numpy.dtype[{complex128}]]]] +reveal_type(np.linalg.eig(AR_f8)) # E: Union[Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]], Tuple[numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]]] +reveal_type(np.linalg.eig(AR_c16)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] + +reveal_type(np.linalg.eigh(AR_i8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]]] +reveal_type(np.linalg.eigh(AR_f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(np.linalg.eigh(AR_c16)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] + +reveal_type(np.linalg.svd(AR_i8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]]] +reveal_type(np.linalg.svd(AR_f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(np.linalg.svd(AR_c16)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]] +reveal_type(np.linalg.svd(AR_i8, compute_uv=False)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.linalg.svd(AR_f8, compute_uv=False)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.linalg.svd(AR_c16, compute_uv=False)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] + +reveal_type(np.linalg.cond(AR_i8)) # E: Any +reveal_type(np.linalg.cond(AR_f8)) # E: Any +reveal_type(np.linalg.cond(AR_c16)) # E: Any + +reveal_type(np.linalg.matrix_rank(AR_i8)) # E: Any +reveal_type(np.linalg.matrix_rank(AR_f8)) # E: Any +reveal_type(np.linalg.matrix_rank(AR_c16)) # E: Any + +reveal_type(np.linalg.pinv(AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.linalg.pinv(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.linalg.pinv(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] + +reveal_type(np.linalg.slogdet(AR_i8)) # E: Tuple[Any, Any] +reveal_type(np.linalg.slogdet(AR_f8)) # E: Tuple[Any, Any] +reveal_type(np.linalg.slogdet(AR_c16)) # E: Tuple[Any, Any] + +reveal_type(np.linalg.det(AR_i8)) # E: Any +reveal_type(np.linalg.det(AR_f8)) # E: Any +reveal_type(np.linalg.det(AR_c16)) # E: Any + +reveal_type(np.linalg.lstsq(AR_i8, AR_i8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{float64}]], {int32}, numpy.ndarray[Any, numpy.dtype[{float64}]]] +reveal_type(np.linalg.lstsq(AR_i8, AR_f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], {int32}, numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] +reveal_type(np.linalg.lstsq(AR_f8, AR_c16)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], {int32}, numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] + +reveal_type(np.linalg.norm(AR_i8)) # E: numpy.floating[Any] +reveal_type(np.linalg.norm(AR_f8)) # E: numpy.floating[Any] +reveal_type(np.linalg.norm(AR_c16)) # E: numpy.floating[Any] +reveal_type(np.linalg.norm(AR_S)) # E: numpy.floating[Any] +reveal_type(np.linalg.norm(AR_f8, axis=0)) # E: Any + +reveal_type(np.linalg.multi_dot([AR_i8, AR_i8])) # E: Any +reveal_type(np.linalg.multi_dot([AR_i8, AR_f8])) # E: Any +reveal_type(np.linalg.multi_dot([AR_f8, AR_c16])) # E: Any +reveal_type(np.linalg.multi_dot([AR_O, AR_O])) # E: Any +reveal_type(np.linalg.multi_dot([AR_m, AR_m])) # E: Any diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi new file mode 100644 index 000000000..def33f458 --- /dev/null +++ b/numpy/typing/tests/data/reveal/matrix.pyi @@ -0,0 +1,69 @@ +from typing import Any +import numpy as np +import numpy.typing as npt + +mat: np.matrix[Any, np.dtype[np.int64]] +ar_f8: npt.NDArray[np.float64] + +reveal_type(mat * 5) # E: numpy.matrix[Any, Any] +reveal_type(5 * mat) # E: numpy.matrix[Any, Any] +mat *= 5 + +reveal_type(mat**5) # E: numpy.matrix[Any, Any] +mat **= 5 + +reveal_type(mat.sum()) # E: Any +reveal_type(mat.mean()) # E: Any +reveal_type(mat.std()) # E: Any +reveal_type(mat.var()) # E: Any +reveal_type(mat.prod()) # E: Any +reveal_type(mat.any()) # E: numpy.bool_ +reveal_type(mat.all()) # E: numpy.bool_ +reveal_type(mat.max()) # E: {int64} +reveal_type(mat.min()) # E: {int64} +reveal_type(mat.argmax()) # E: {intp} +reveal_type(mat.argmin()) # E: {intp} +reveal_type(mat.ptp()) # E: {int64} + +reveal_type(mat.sum(axis=0)) # E: numpy.matrix[Any, Any] +reveal_type(mat.mean(axis=0)) # E: numpy.matrix[Any, Any] +reveal_type(mat.std(axis=0)) # E: numpy.matrix[Any, Any] +reveal_type(mat.var(axis=0)) # E: numpy.matrix[Any, Any] +reveal_type(mat.prod(axis=0)) # E: numpy.matrix[Any, Any] +reveal_type(mat.any(axis=0)) # E: numpy.matrix[Any, numpy.dtype[numpy.bool_]] +reveal_type(mat.all(axis=0)) # E: numpy.matrix[Any, numpy.dtype[numpy.bool_]] +reveal_type(mat.max(axis=0)) # E: numpy.matrix[Any, numpy.dtype[{int64}]] +reveal_type(mat.min(axis=0)) # E: numpy.matrix[Any, numpy.dtype[{int64}]] +reveal_type(mat.argmax(axis=0)) # E: numpy.matrix[Any, numpy.dtype[{intp}]] +reveal_type(mat.argmin(axis=0)) # E: numpy.matrix[Any, numpy.dtype[{intp}]] +reveal_type(mat.ptp(axis=0)) # E: numpy.matrix[Any, numpy.dtype[{int64}]] + +reveal_type(mat.sum(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(mat.mean(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(mat.std(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(mat.var(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(mat.prod(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(mat.any(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(mat.all(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(mat.max(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(mat.min(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(mat.argmax(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(mat.argmin(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(mat.ptp(out=ar_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] + +reveal_type(mat.T) # E: numpy.matrix[Any, numpy.dtype[{int64}]] +reveal_type(mat.I) # E: numpy.matrix[Any, Any] +reveal_type(mat.A) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(mat.A1) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(mat.H) # E: numpy.matrix[Any, numpy.dtype[{int64}]] +reveal_type(mat.getT()) # E: numpy.matrix[Any, numpy.dtype[{int64}]] +reveal_type(mat.getI()) # E: numpy.matrix[Any, Any] +reveal_type(mat.getA()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(mat.getA1()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(mat.getH()) # E: numpy.matrix[Any, numpy.dtype[{int64}]] + +reveal_type(np.bmat(ar_f8)) # E: numpy.matrix[Any, Any] +reveal_type(np.bmat([[0, 1, 2]])) # E: numpy.matrix[Any, Any] +reveal_type(np.bmat("mat")) # E: numpy.matrix[Any, Any] + +reveal_type(np.asmatrix(ar_f8, dtype=np.int64)) # E: numpy.matrix[Any, Any] diff --git a/numpy/typing/tests/data/reveal/memmap.pyi b/numpy/typing/tests/data/reveal/memmap.pyi new file mode 100644 index 000000000..c1d8edc67 --- /dev/null +++ b/numpy/typing/tests/data/reveal/memmap.pyi @@ -0,0 +1,16 @@ +import numpy as np +from typing import Any + +memmap_obj: np.memmap[Any, np.dtype[np.str_]] + +reveal_type(np.memmap.__array_priority__) # E: float +reveal_type(memmap_obj.__array_priority__) # E: float +reveal_type(memmap_obj.filename) # E: Union[builtins.str, None] +reveal_type(memmap_obj.offset) # E: int +reveal_type(memmap_obj.mode) # E: str +reveal_type(memmap_obj.flush()) # E: None + +reveal_type(np.memmap("file.txt", offset=5)) # E: numpy.memmap[Any, numpy.dtype[{uint8}]] +reveal_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3))) # E: numpy.memmap[Any, numpy.dtype[{float64}]] +with open("file.txt", "rb") as f: + reveal_type(np.memmap(f, dtype=float, order="K")) # E: numpy.memmap[Any, numpy.dtype[Any]] diff --git a/numpy/typing/tests/data/reveal/mod.py b/numpy/typing/tests/data/reveal/mod.pyi index bf45b8c58..bf45b8c58 100644 --- a/numpy/typing/tests/data/reveal/mod.py +++ b/numpy/typing/tests/data/reveal/mod.pyi diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.pyi index 7e695433e..7e695433e 100644 --- a/numpy/typing/tests/data/reveal/modules.py +++ b/numpy/typing/tests/data/reveal/modules.pyi diff --git a/numpy/typing/tests/data/reveal/multiarray.py b/numpy/typing/tests/data/reveal/multiarray.pyi index cee51975e..ee818c08a 100644 --- a/numpy/typing/tests/data/reveal/multiarray.py +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -25,6 +25,8 @@ M: np.datetime64 b_f8 = np.broadcast(AR_f8) b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8) +nditer_obj: np.nditer + def func(a: int) -> bool: ... reveal_type(next(b_f8)) # E: tuple[Any] @@ -123,3 +125,8 @@ reveal_type(np.compare_chararrays("a", "b", "!=", rstrip=False)) # E: numpy.nda reveal_type(np.compare_chararrays(b"a", b"a", "==", True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(np.add_docstring(func, "test")) # E: None + +reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["c_index"])) # E: tuple[numpy.nditer] +reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["readonly", "readonly"]])) # E: tuple[numpy.nditer] +reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_dtypes=np.int_)) # E: tuple[numpy.nditer] +reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], order="C", casting="no")) # E: tuple[numpy.nditer] diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.py b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index d34f6f69a..d34f6f69a 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.py +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.py b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index 03f2faf43..03f2faf43 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.py +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 050b82cdc..2d900c53d 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.py +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -20,6 +20,7 @@ B: SubClass AR_f8: NDArray[np.float64] AR_i8: NDArray[np.int64] AR_U: NDArray[np.str_] +AR_V: NDArray[np.void] ctypes_obj = AR_f8.ctypes @@ -193,3 +194,18 @@ reveal_type(operator.index(AR_i8)) # E: int reveal_type(AR_f8.__array_prepare__(B)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] reveal_type(AR_f8.__array_wrap__(B)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(AR_V[0]) # E: Any +reveal_type(AR_V[0, 0]) # E: Any +reveal_type(AR_V[AR_i8]) # E: Any +reveal_type(AR_V[AR_i8, AR_i8]) # E: Any +reveal_type(AR_V[AR_i8, None]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]] +reveal_type(AR_V[0, ...]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]] +reveal_type(AR_V[:]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]] +reveal_type(AR_V["a"]) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(AR_V[["a", "b"]]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]] + +reveal_type(AR_f8.dump("test_file")) # E: None +reveal_type(AR_f8.dump(b"test_file")) # E: None +with open("test_file", "wb") as f: + reveal_type(AR_f8.dump(f)) # E: None diff --git a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi index a44e1cfa1..a44e1cfa1 100644 --- a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py +++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi diff --git a/numpy/typing/tests/data/reveal/nditer.py b/numpy/typing/tests/data/reveal/nditer.py deleted file mode 100644 index de2a4b5ed..000000000 --- a/numpy/typing/tests/data/reveal/nditer.py +++ /dev/null @@ -1,19 +0,0 @@ -import copy -import numpy as np - -nditer_obj: np.nditer - -with nditer_obj as context: - reveal_type(context) # E: numpy.nditer - -reveal_type(len(nditer_obj)) # E: builtins.int -reveal_type(copy.copy(nditer_obj)) # E: numpy.nditer -reveal_type(next(nditer_obj)) # E: Any -reveal_type(iter(nditer_obj)) # E: typing.Iterator[Any] -reveal_type(nditer_obj[1]) # E: Any -reveal_type(nditer_obj[1:5]) # E: Any - -nditer_obj[1] = 1 -nditer_obj[1:5] = 1 -del nditer_obj[1] -del nditer_obj[1:5] diff --git a/numpy/typing/tests/data/reveal/nditer.pyi b/numpy/typing/tests/data/reveal/nditer.pyi new file mode 100644 index 000000000..473e922a2 --- /dev/null +++ b/numpy/typing/tests/data/reveal/nditer.pyi @@ -0,0 +1,46 @@ +import numpy as np + +nditer_obj: np.nditer + +reveal_type(np.nditer([0, 1], flags=["c_index"])) # E: numpy.nditer +reveal_type(np.nditer([0, 1], op_flags=[["readonly", "readonly"]])) # E: numpy.nditer +reveal_type(np.nditer([0, 1], op_dtypes=np.int_)) # E: numpy.nditer +reveal_type(np.nditer([0, 1], order="C", casting="no")) # E: numpy.nditer + +reveal_type(nditer_obj.dtypes) # E: tuple[numpy.dtype[Any]] +reveal_type(nditer_obj.finished) # E: bool +reveal_type(nditer_obj.has_delayed_bufalloc) # E: bool +reveal_type(nditer_obj.has_index) # E: bool +reveal_type(nditer_obj.has_multi_index) # E: bool +reveal_type(nditer_obj.index) # E: int +reveal_type(nditer_obj.iterationneedsapi) # E: bool +reveal_type(nditer_obj.iterindex) # E: int +reveal_type(nditer_obj.iterrange) # E: tuple[builtins.int] +reveal_type(nditer_obj.itersize) # E: int +reveal_type(nditer_obj.itviews) # E: tuple[numpy.ndarray[Any, numpy.dtype[Any]]] +reveal_type(nditer_obj.multi_index) # E: tuple[builtins.int] +reveal_type(nditer_obj.ndim) # E: int +reveal_type(nditer_obj.nop) # E: int +reveal_type(nditer_obj.operands) # E: tuple[numpy.ndarray[Any, numpy.dtype[Any]]] +reveal_type(nditer_obj.shape) # E: tuple[builtins.int] +reveal_type(nditer_obj.value) # E: tuple[numpy.ndarray[Any, numpy.dtype[Any]]] + +reveal_type(nditer_obj.close()) # E: None +reveal_type(nditer_obj.copy()) # E: numpy.nditer +reveal_type(nditer_obj.debug_print()) # E: None +reveal_type(nditer_obj.enable_external_loop()) # E: None +reveal_type(nditer_obj.iternext()) # E: bool +reveal_type(nditer_obj.remove_axis(0)) # E: None +reveal_type(nditer_obj.remove_multi_index()) # E: None +reveal_type(nditer_obj.reset()) # E: None + +reveal_type(len(nditer_obj)) # E: int +reveal_type(iter(nditer_obj)) # E: Iterator[builtins.tuple[numpy.ndarray[Any, numpy.dtype[Any]]]] +reveal_type(next(nditer_obj)) # E: tuple[numpy.ndarray[Any, numpy.dtype[Any]]] +reveal_type(nditer_obj.__copy__()) # E: numpy.nditer +with nditer_obj as f: + reveal_type(f) # E: numpy.nditer +reveal_type(nditer_obj[0]) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(nditer_obj[:]) # E: tuple[numpy.ndarray[Any, numpy.dtype[Any]]] +nditer_obj[0] = 0 +nditer_obj[:] = [0, 1] diff --git a/numpy/typing/tests/data/reveal/nested_sequence.pyi b/numpy/typing/tests/data/reveal/nested_sequence.pyi new file mode 100644 index 000000000..4d3aad467 --- /dev/null +++ b/numpy/typing/tests/data/reveal/nested_sequence.pyi @@ -0,0 +1,24 @@ +from typing import Sequence, Tuple, List, Any +import numpy.typing as npt + +a: Sequence[int] +b: Sequence[Sequence[int]] +c: Sequence[Sequence[Sequence[int]]] +d: Sequence[Sequence[Sequence[Sequence[int]]]] +e: Sequence[bool] +f: Tuple[int, ...] +g: List[int] +h: Sequence[Any] + +def func(a: npt._NestedSequence[int]) -> None: + ... + +reveal_type(func(a)) # E: None +reveal_type(func(b)) # E: None +reveal_type(func(c)) # E: None +reveal_type(func(d)) # E: None +reveal_type(func(e)) # E: None +reveal_type(func(f)) # E: None +reveal_type(func(g)) # E: None +reveal_type(func(h)) # E: None +reveal_type(func(range(15))) # E: None diff --git a/numpy/typing/tests/data/reveal/npyio.py b/numpy/typing/tests/data/reveal/npyio.pyi index d66201dd3..bee97a8e1 100644 --- a/numpy/typing/tests/data/reveal/npyio.py +++ b/numpy/typing/tests/data/reveal/npyio.pyi @@ -82,10 +82,10 @@ reveal_type(np.genfromtxt(str_path, delimiter="\n")) # E: numpy.ndarray[Any, nu reveal_type(np.genfromtxt(str_path, ndmin=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] reveal_type(np.genfromtxt(["1", "2", "3"], ndmin=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] -reveal_type(np.recfromtxt(bytes_file)) # E: numpy.recarray[Any, numpy.dtype[numpy.void]] +reveal_type(np.recfromtxt(bytes_file)) # E: numpy.recarray[Any, numpy.dtype[numpy.record]] reveal_type(np.recfromtxt(pathlib_path, usemask=True)) # E: numpy.ma.mrecords.MaskedRecords[Any, numpy.dtype[numpy.void]] -reveal_type(np.recfromtxt(["1", "2", "3"])) # E: numpy.recarray[Any, numpy.dtype[numpy.void]] +reveal_type(np.recfromtxt(["1", "2", "3"])) # E: numpy.recarray[Any, numpy.dtype[numpy.record]] -reveal_type(np.recfromcsv(bytes_file)) # E: numpy.recarray[Any, numpy.dtype[numpy.void]] +reveal_type(np.recfromcsv(bytes_file)) # E: numpy.recarray[Any, numpy.dtype[numpy.record]] reveal_type(np.recfromcsv(pathlib_path, usemask=True)) # E: numpy.ma.mrecords.MaskedRecords[Any, numpy.dtype[numpy.void]] -reveal_type(np.recfromcsv(["1", "2", "3"])) # E: numpy.recarray[Any, numpy.dtype[numpy.void]] +reveal_type(np.recfromcsv(["1", "2", "3"])) # E: numpy.recarray[Any, numpy.dtype[numpy.record]] diff --git a/numpy/typing/tests/data/reveal/numeric.py b/numpy/typing/tests/data/reveal/numeric.py deleted file mode 100644 index ec6e47ca0..000000000 --- a/numpy/typing/tests/data/reveal/numeric.py +++ /dev/null @@ -1,89 +0,0 @@ -""" -Tests for :mod:`numpy.core.numeric`. - -Does not include tests which fall under ``array_constructors``. - -""" - -from typing import List -import numpy as np - -class SubClass(np.ndarray): - ... - -i8: np.int64 - -A: np.ndarray -B: List[int] -C: SubClass - -reveal_type(np.count_nonzero(i8)) # E: int -reveal_type(np.count_nonzero(A)) # E: int -reveal_type(np.count_nonzero(B)) # E: int -reveal_type(np.count_nonzero(A, keepdims=True)) # E: Any -reveal_type(np.count_nonzero(A, axis=0)) # E: Any - -reveal_type(np.isfortran(i8)) # E: bool -reveal_type(np.isfortran(A)) # E: bool - -reveal_type(np.argwhere(i8)) # E: numpy.ndarray[Any, Any] -reveal_type(np.argwhere(A)) # E: numpy.ndarray[Any, Any] - -reveal_type(np.flatnonzero(i8)) # E: numpy.ndarray[Any, Any] -reveal_type(np.flatnonzero(A)) # E: numpy.ndarray[Any, Any] - -reveal_type(np.correlate(B, A, mode="valid")) # E: numpy.ndarray[Any, Any] -reveal_type(np.correlate(A, A, mode="same")) # E: numpy.ndarray[Any, Any] - -reveal_type(np.convolve(B, A, mode="valid")) # E: numpy.ndarray[Any, Any] -reveal_type(np.convolve(A, A, mode="same")) # E: numpy.ndarray[Any, Any] - -reveal_type(np.outer(i8, A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.outer(B, A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.outer(A, A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.outer(A, A, out=C)) # E: SubClass - -reveal_type(np.tensordot(B, A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.tensordot(A, A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.tensordot(A, A, axes=0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.tensordot(A, A, axes=(0, 1))) # E: numpy.ndarray[Any, Any] - -reveal_type(np.isscalar(i8)) # E: bool -reveal_type(np.isscalar(A)) # E: bool -reveal_type(np.isscalar(B)) # E: bool - -reveal_type(np.roll(A, 1)) # E: numpy.ndarray[Any, Any] -reveal_type(np.roll(A, (1, 2))) # E: numpy.ndarray[Any, Any] -reveal_type(np.roll(B, 1)) # E: numpy.ndarray[Any, Any] - -reveal_type(np.rollaxis(A, 0, 1)) # E: numpy.ndarray[Any, Any] - -reveal_type(np.moveaxis(A, 0, 1)) # E: numpy.ndarray[Any, Any] -reveal_type(np.moveaxis(A, (0, 1), (1, 2))) # E: numpy.ndarray[Any, Any] - -reveal_type(np.cross(B, A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.cross(A, A)) # E: numpy.ndarray[Any, Any] - -reveal_type(np.indices([0, 1, 2])) # E: numpy.ndarray[Any, Any] -reveal_type(np.indices([0, 1, 2], sparse=False)) # E: numpy.ndarray[Any, Any] -reveal_type(np.indices([0, 1, 2], sparse=True)) # E: tuple[numpy.ndarray[Any, Any]] - -reveal_type(np.binary_repr(1)) # E: str - -reveal_type(np.base_repr(1)) # E: str - -reveal_type(np.allclose(i8, A)) # E: bool -reveal_type(np.allclose(B, A)) # E: bool -reveal_type(np.allclose(A, A)) # E: bool - -reveal_type(np.isclose(i8, A)) # E: Any -reveal_type(np.isclose(B, A)) # E: Any -reveal_type(np.isclose(A, A)) # E: Any - -reveal_type(np.array_equal(i8, A)) # E: bool -reveal_type(np.array_equal(B, A)) # E: bool -reveal_type(np.array_equal(A, A)) # E: bool - -reveal_type(np.array_equiv(i8, A)) # E: bool -reveal_type(np.array_equiv(B, A)) # E: bool -reveal_type(np.array_equiv(A, A)) # E: bool diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi new file mode 100644 index 000000000..9b3b1419d --- /dev/null +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -0,0 +1,134 @@ +""" +Tests for :mod:`numpy.core.numeric`. + +Does not include tests which fall under ``array_constructors``. + +""" + +from typing import List +import numpy as np +import numpy.typing as npt + +class SubClass(npt.NDArray[np.int64]): + ... + +i8: np.int64 + +AR_b: npt.NDArray[np.bool_] +AR_u8: npt.NDArray[np.uint64] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_O: npt.NDArray[np.object_] + +B: List[int] +C: SubClass + +reveal_type(np.count_nonzero(i8)) # E: int +reveal_type(np.count_nonzero(AR_i8)) # E: int +reveal_type(np.count_nonzero(B)) # E: int +reveal_type(np.count_nonzero(AR_i8, keepdims=True)) # E: Any +reveal_type(np.count_nonzero(AR_i8, axis=0)) # E: Any + +reveal_type(np.isfortran(i8)) # E: bool +reveal_type(np.isfortran(AR_i8)) # E: bool + +reveal_type(np.argwhere(i8)) # E: numpy.ndarray[Any, numpy.dtype[{intp}]] +reveal_type(np.argwhere(AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{intp}]] + +reveal_type(np.flatnonzero(i8)) # E: numpy.ndarray[Any, numpy.dtype[{intp}]] +reveal_type(np.flatnonzero(AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{intp}]] + +reveal_type(np.correlate(B, AR_i8, mode="valid")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.correlate(AR_i8, AR_i8, mode="same")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.correlate(AR_b, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.correlate(AR_b, AR_u8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(np.correlate(AR_i8, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.correlate(AR_i8, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.correlate(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.correlate(AR_i8, AR_m)) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(np.correlate(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.convolve(B, AR_i8, mode="valid")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.convolve(AR_i8, AR_i8, mode="same")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.convolve(AR_b, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.convolve(AR_b, AR_u8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(np.convolve(AR_i8, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.convolve(AR_i8, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.convolve(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.convolve(AR_i8, AR_m)) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(np.convolve(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.outer(i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.outer(B, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.outer(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.outer(AR_i8, AR_i8, out=C)) # E: SubClass +reveal_type(np.outer(AR_b, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.outer(AR_b, AR_u8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(np.outer(AR_i8, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.convolve(AR_i8, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.outer(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.outer(AR_i8, AR_m)) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(np.outer(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.tensordot(B, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.tensordot(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.tensordot(AR_i8, AR_i8, axes=0)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.tensordot(AR_b, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.tensordot(AR_b, AR_u8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(np.tensordot(AR_i8, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.tensordot(AR_i8, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.tensordot(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.tensordot(AR_i8, AR_m)) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(np.tensordot(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.isscalar(i8)) # E: bool +reveal_type(np.isscalar(AR_i8)) # E: bool +reveal_type(np.isscalar(B)) # E: bool + +reveal_type(np.roll(AR_i8, 1)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(np.roll(AR_i8, (1, 2))) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(np.roll(B, 1)) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.rollaxis(AR_i8, 0, 1)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] + +reveal_type(np.moveaxis(AR_i8, 0, 1)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(np.moveaxis(AR_i8, (0, 1), (1, 2))) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] + +reveal_type(np.cross(B, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.cross(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.cross(AR_b, AR_u8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(np.cross(AR_i8, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(np.cross(AR_i8, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.cross(AR_i8, AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(np.cross(AR_O, AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]] + +reveal_type(np.indices([0, 1, 2])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(np.indices([0, 1, 2], sparse=True)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{int_}]]] +reveal_type(np.indices([0, 1, 2], dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.indices([0, 1, 2], sparse=True, dtype=np.float64)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{float64}]]] +reveal_type(np.indices([0, 1, 2], dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.indices([0, 1, 2], sparse=True, dtype=float)) # E: tuple[numpy.ndarray[Any, numpy.dtype[Any]]] + +reveal_type(np.binary_repr(1)) # E: str + +reveal_type(np.base_repr(1)) # E: str + +reveal_type(np.allclose(i8, AR_i8)) # E: bool +reveal_type(np.allclose(B, AR_i8)) # E: bool +reveal_type(np.allclose(AR_i8, AR_i8)) # E: bool + +reveal_type(np.isclose(i8, i8)) # E: numpy.bool_ +reveal_type(np.isclose(i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isclose(B, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isclose(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(np.array_equal(i8, AR_i8)) # E: bool +reveal_type(np.array_equal(B, AR_i8)) # E: bool +reveal_type(np.array_equal(AR_i8, AR_i8)) # E: bool + +reveal_type(np.array_equiv(i8, AR_i8)) # E: bool +reveal_type(np.array_equiv(B, AR_i8)) # E: bool +reveal_type(np.array_equiv(AR_i8, AR_i8)) # E: bool diff --git a/numpy/typing/tests/data/reveal/numerictypes.py b/numpy/typing/tests/data/reveal/numerictypes.pyi index c50a3a3d6..c50a3a3d6 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.py +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi diff --git a/numpy/typing/tests/data/reveal/random.py b/numpy/typing/tests/data/reveal/random.pyi index 6fc35aced..6fc35aced 100644 --- a/numpy/typing/tests/data/reveal/random.py +++ b/numpy/typing/tests/data/reveal/random.pyi diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi new file mode 100644 index 000000000..2fa8cc7b9 --- /dev/null +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -0,0 +1,106 @@ +import io +from typing import Any, List + +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +REC_AR_V: np.recarray[Any, np.dtype[np.record]] +AR_LIST: List[npt.NDArray[np.int64]] + +format_parser: np.format_parser +record: np.record +file_obj: io.BufferedIOBase + +reveal_type(np.format_parser( # E: numpy.format_parser + formats=[np.float64, np.int64, np.bool_], + names=["f8", "i8", "?"], + titles=None, + aligned=True, +)) +reveal_type(format_parser.dtype) # E: numpy.dtype[numpy.void] + +reveal_type(record.field_a) # E: Any +reveal_type(record.field_b) # E: Any +reveal_type(record["field_a"]) # E: Any +reveal_type(record["field_b"]) # E: Any +reveal_type(record.pprint()) # E: str +record.field_c = 5 + +reveal_type(REC_AR_V.field(0)) # E: Any +reveal_type(REC_AR_V.field("field_a")) # E: Any +reveal_type(REC_AR_V.field(0, AR_i8)) # E: None +reveal_type(REC_AR_V.field("field_a", AR_i8)) # E: None +reveal_type(REC_AR_V["field_a"]) # E: Any +reveal_type(REC_AR_V.field_a) # E: Any + +reveal_type(np.recarray( # numpy.recarray[Any, numpy.dtype[numpy.record]] + shape=(10, 5), + formats=[np.float64, np.int64, np.bool_], + order="K", + byteorder="|", +)) +reveal_type(np.recarray( # numpy.recarray[Any, numpy.dtype[Any]] + shape=(10, 5), + dtype=[("f8", np.float64), ("i8", np.int64)], + strides=(5, 5), +)) + +reveal_type(np.rec.fromarrays( # numpy.recarray[Any, numpy.dtype[numpy.record]] + AR_LIST, +)) +reveal_type(np.rec.fromarrays( # numpy.recarray[Any, numpy.dtype[Any]] + AR_LIST, + dtype=np.int64, +)) +reveal_type(np.rec.fromarrays( # numpy.recarray[Any, numpy.dtype[Any]] + AR_LIST, + formats=[np.int64, np.float64], + names=["i8", "f8"] +)) + +reveal_type(np.rec.fromrecords( # numpy.recarray[Any, numpy.dtype[numpy.record]] + (1, 1.5), +)) +reveal_type(np.rec.fromrecords( # numpy.recarray[Any, numpy.dtype[numpy.record]] + [(1, 1.5)], + dtype=[("i8", np.int64), ("f8", np.float64)], +)) +reveal_type(np.rec.fromrecords( # numpy.recarray[Any, numpy.dtype[numpy.record]] + REC_AR_V, + formats=[np.int64, np.float64], + names=["i8", "f8"] +)) + +reveal_type(np.rec.fromstring( # numpy.recarray[Any, numpy.dtype[numpy.record]] + b"(1, 1.5)", + dtype=[("i8", np.int64), ("f8", np.float64)], +)) +reveal_type(np.rec.fromstring( # numpy.recarray[Any, numpy.dtype[numpy.record]] + REC_AR_V, + formats=[np.int64, np.float64], + names=["i8", "f8"] +)) + +reveal_type(np.rec.fromfile( # numpy.recarray[Any, numpy.dtype[Any]] + "test_file.txt", + dtype=[("i8", np.int64), ("f8", np.float64)], +)) +reveal_type(np.rec.fromfile( # numpy.recarray[Any, numpy.dtype[numpy.record]] + file_obj, + formats=[np.int64, np.float64], + names=["i8", "f8"] +)) + +reveal_type(np.rec.array( # numpy.recarray[Any, numpy.dtype[{int64}]] + AR_i8, +)) +reveal_type(np.rec.array( # numpy.recarray[Any, numpy.dtype[Any]] + [(1, 1.5)], + dtype=[("i8", np.int64), ("f8", np.float64)], +)) +reveal_type(np.rec.array( # numpy.recarray[Any, numpy.dtype[numpy.record]] + [(1, 1.5)], + formats=[np.int64, np.float64], + names=["i8", "f8"] +)) diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.pyi index e83d579e9..a95f8f6f2 100644 --- a/numpy/typing/tests/data/reveal/scalars.py +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -10,6 +10,7 @@ c16: np.complex128 m: np.timedelta64 U: np.str_ S: np.bytes_ +V: np.void reveal_type(c8.real) # E: {float32} reveal_type(c8.imag) # E: {float32} @@ -36,6 +37,11 @@ reveal_type(c16.imag) # E: {float64} reveal_type(np.unicode_('foo')) # E: numpy.str_ reveal_type(np.str0('foo')) # E: numpy.str_ +reveal_type(V[0]) # E: Any +reveal_type(V["field1"]) # E: Any +reveal_type(V[["field1", "field2"]]) # E: numpy.void +V[0] = 5 + # Aliases reveal_type(np.unicode_()) # E: numpy.str_ reveal_type(np.str0()) # E: numpy.str_ diff --git a/numpy/typing/tests/data/reveal/shape_base.py b/numpy/typing/tests/data/reveal/shape_base.pyi index 57633defb..57633defb 100644 --- a/numpy/typing/tests/data/reveal/shape_base.py +++ b/numpy/typing/tests/data/reveal/shape_base.pyi diff --git a/numpy/typing/tests/data/reveal/stride_tricks.py b/numpy/typing/tests/data/reveal/stride_tricks.pyi index 152d9cea6..152d9cea6 100644 --- a/numpy/typing/tests/data/reveal/stride_tricks.py +++ b/numpy/typing/tests/data/reveal/stride_tricks.pyi diff --git a/numpy/typing/tests/data/reveal/testing.py b/numpy/typing/tests/data/reveal/testing.pyi index 2b040ff60..2b040ff60 100644 --- a/numpy/typing/tests/data/reveal/testing.py +++ b/numpy/typing/tests/data/reveal/testing.pyi diff --git a/numpy/typing/tests/data/reveal/twodim_base.py b/numpy/typing/tests/data/reveal/twodim_base.pyi index b95fbc71e..b95fbc71e 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.py +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi diff --git a/numpy/typing/tests/data/reveal/type_check.py b/numpy/typing/tests/data/reveal/type_check.pyi index 416dd42a8..416dd42a8 100644 --- a/numpy/typing/tests/data/reveal/type_check.py +++ b/numpy/typing/tests/data/reveal/type_check.pyi diff --git a/numpy/typing/tests/data/reveal/ufunc_config.py b/numpy/typing/tests/data/reveal/ufunc_config.pyi index 26be80314..6848a3cb5 100644 --- a/numpy/typing/tests/data/reveal/ufunc_config.py +++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -17,9 +17,9 @@ reveal_type(np.geterr()) # E: TypedDict('numpy.core._ufunc_config._ErrDict' reveal_type(np.setbufsize(4096)) # E: int reveal_type(np.getbufsize()) # E: int -reveal_type(np.seterrcall(func)) # E: Union[None, def (builtins.str, builtins.int) -> Any, numpy.core._ufunc_config._SupportsWrite] -reveal_type(np.seterrcall(Write())) # E: Union[None, def (builtins.str, builtins.int) -> Any, numpy.core._ufunc_config._SupportsWrite] -reveal_type(np.geterrcall()) # E: Union[None, def (builtins.str, builtins.int) -> Any, numpy.core._ufunc_config._SupportsWrite] +reveal_type(np.seterrcall(func)) # E: Union[None, def (builtins.str, builtins.int) -> Any, numpy._SupportsWrite[builtins.str]] +reveal_type(np.seterrcall(Write())) # E: Union[None, def (builtins.str, builtins.int) -> Any, numpy._SupportsWrite[builtins.str]] +reveal_type(np.geterrcall()) # E: Union[None, def (builtins.str, builtins.int) -> Any, numpy._SupportsWrite[builtins.str]] reveal_type(np.errstate(call=func, all="call")) # E: numpy.errstate[def (a: builtins.str, b: builtins.int)] reveal_type(np.errstate(call=Write(), divide="log", over="log")) # E: numpy.errstate[ufunc_config.Write] diff --git a/numpy/typing/tests/data/reveal/ufunclike.py b/numpy/typing/tests/data/reveal/ufunclike.pyi index 8b3aea7ce..8b3aea7ce 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.py +++ b/numpy/typing/tests/data/reveal/ufunclike.pyi diff --git a/numpy/typing/tests/data/reveal/ufuncs.py b/numpy/typing/tests/data/reveal/ufuncs.pyi index ade45577c..ade45577c 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.py +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi diff --git a/numpy/typing/tests/data/reveal/version.pyi b/numpy/typing/tests/data/reveal/version.pyi new file mode 100644 index 000000000..e53837647 --- /dev/null +++ b/numpy/typing/tests/data/reveal/version.pyi @@ -0,0 +1,8 @@ +import numpy.version + +reveal_type(numpy.version.version) # E: str +reveal_type(numpy.version.__version__) # E: str +reveal_type(numpy.version.full_version) # E: str +reveal_type(numpy.version.git_revision) # E: str +reveal_type(numpy.version.release) # E: bool +reveal_type(numpy.version.short_version) # E: str diff --git a/numpy/typing/tests/data/reveal/warnings_and_errors.py b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi index 3f20a0135..3f20a0135 100644 --- a/numpy/typing/tests/data/reveal/warnings_and_errors.py +++ b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index b617b3873..a898b3e28 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -8,9 +8,7 @@ ROOT = Path(np.__file__).parents[0] FILES = [ ROOT / "py.typed", ROOT / "__init__.pyi", - ROOT / "char.pyi", ROOT / "ctypeslib.pyi", - ROOT / "rec.pyi", ROOT / "core" / "__init__.pyi", ROOT / "distutils" / "__init__.pyi", ROOT / "f2py" / "__init__.pyi", diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py index 151b06bed..5b5df49dc 100644 --- a/numpy/typing/tests/test_runtime.py +++ b/numpy/typing/tests/test_runtime.py @@ -3,7 +3,7 @@ from __future__ import annotations import sys -from typing import get_type_hints, Union, Tuple, NamedTuple, get_args, get_origin +from typing import get_type_hints, Union, NamedTuple, get_args, get_origin import pytest import numpy as np @@ -12,7 +12,7 @@ import numpy.typing as npt class TypeTup(NamedTuple): typ: type - args: Tuple[type, ...] + args: tuple[type, ...] origin: None | type diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index 35558c880..2dcfd6082 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -1,13 +1,17 @@ +from __future__ import annotations + import importlib.util import itertools import os import re import shutil from collections import defaultdict -from typing import Optional, IO, Dict, List +from collections.abc import Iterator +from typing import IO, TYPE_CHECKING import pytest import numpy as np +import numpy.typing as npt from numpy.typing.mypy_plugin import ( _PRECISION_DICT, _EXTENDED_PRECISION_LIST, @@ -21,6 +25,10 @@ except ImportError: else: NO_MYPY = False +if TYPE_CHECKING: + # We need this as annotation, but it's located in a private namespace. + # As a compromise, do *not* import it during runtime + from _pytest.mark.structures import ParameterSet DATA_DIR = os.path.join(os.path.dirname(__file__), "data") PASS_DIR = os.path.join(DATA_DIR, "pass") @@ -32,11 +40,11 @@ CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache") #: A dictionary with file names as keys and lists of the mypy stdout as values. #: To-be populated by `run_mypy`. -OUTPUT_MYPY: Dict[str, List[str]] = {} +OUTPUT_MYPY: dict[str, list[str]] = {} def _key_func(key: str) -> str: - """Split at the first occurance of the ``:`` character. + """Split at the first occurrence of the ``:`` character. Windows drive-letters (*e.g.* ``C:``) are ignored herein. """ @@ -62,7 +70,10 @@ def run_mypy() -> None: NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests """ - if os.path.isdir(CACHE_DIR) and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)): + if ( + os.path.isdir(CACHE_DIR) + and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)) + ): shutil.rmtree(CACHE_DIR) for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR): @@ -85,25 +96,19 @@ def run_mypy() -> None: OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k) -def get_test_cases(directory): +def get_test_cases(directory: str) -> Iterator[ParameterSet]: for root, _, files in os.walk(directory): for fname in files: - if os.path.splitext(fname)[-1] == ".py": + short_fname, ext = os.path.splitext(fname) + if ext in (".pyi", ".py"): fullpath = os.path.join(root, fname) - # Use relative path for nice py.test name - relpath = os.path.relpath(fullpath, start=directory) - - yield pytest.param( - fullpath, - # Manually specify a name for the test - id=relpath, - ) + yield pytest.param(fullpath, id=short_fname) @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) -def test_success(path): +def test_success(path) -> None: # Alias `OUTPUT_MYPY` so that it appears in the local namespace output_mypy = OUTPUT_MYPY if path in output_mypy: @@ -115,7 +120,7 @@ def test_success(path): @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.mark.parametrize("path", get_test_cases(FAIL_DIR)) -def test_fail(path): +def test_fail(path: str) -> None: __tracebackhide__ = True with open(path) as fin: @@ -138,38 +143,86 @@ def test_fail(path): for i, line in enumerate(lines): lineno = i + 1 - if line.startswith('#') or (" E:" not in line and lineno not in errors): + if ( + line.startswith('#') + or (" E:" not in line and lineno not in errors) + ): continue target_line = lines[lineno - 1] if "# E:" in target_line: - marker = target_line.split("# E:")[-1].strip() - expected_error = errors.get(lineno) - _test_fail(path, marker, expected_error, lineno) + expression, _, marker = target_line.partition(" # E: ") + expected_error = errors[lineno].strip() + marker = marker.strip() + _test_fail(path, expression, marker, expected_error, lineno) else: - pytest.fail(f"Unexpected mypy output\n\n{errors[lineno]}") + pytest.fail( + f"Unexpected mypy output at line {lineno}\n\n{errors[lineno]}" + ) _FAIL_MSG1 = """Extra error at line {} +Expression: {} Extra error: {!r} """ _FAIL_MSG2 = """Error mismatch at line {} +Expression: {} Expected error: {!r} Observed error: {!r} """ -def _test_fail(path: str, error: str, expected_error: Optional[str], lineno: int) -> None: +def _test_fail( + path: str, + expression: str, + error: str, + expected_error: None | str, + lineno: int, +) -> None: if expected_error is None: - raise AssertionError(_FAIL_MSG1.format(lineno, error)) + raise AssertionError(_FAIL_MSG1.format(lineno, expression, error)) elif error not in expected_error: - raise AssertionError(_FAIL_MSG2.format(lineno, expected_error, error)) + raise AssertionError(_FAIL_MSG2.format( + lineno, expression, expected_error, error + )) + + +def _construct_ctypes_dict() -> dict[str, str]: + dct = { + "ubyte": "c_ubyte", + "ushort": "c_ushort", + "uintc": "c_uint", + "uint": "c_ulong", + "ulonglong": "c_ulonglong", + "byte": "c_byte", + "short": "c_short", + "intc": "c_int", + "int_": "c_long", + "longlong": "c_longlong", + "single": "c_float", + "double": "c_double", + "longdouble": "c_longdouble", + } + + # Match `ctypes` names to the first ctypes type with a given kind and + # precision, e.g. {"c_double": "c_double", "c_longdouble": "c_double"} + # if both types represent 64-bit floats. + # In this context "first" is defined by the order of `dct` + ret = {} + visited: dict[tuple[str, int], str] = {} + for np_name, ct_name in dct.items(): + np_scalar = getattr(np, np_name)() + + # Find the first `ctypes` type for a given `kind`/`itemsize` combo + key = (np_scalar.dtype.kind, np_scalar.dtype.itemsize) + ret[ct_name] = visited.setdefault(key, f"ctypes.{ct_name}") + return ret -def _construct_format_dict(): +def _construct_format_dict() -> dict[str, str]: dct = {k.split(".")[-1]: v.replace("numpy", "numpy.typing") for k, v in _PRECISION_DICT.items()} @@ -193,12 +246,18 @@ def _construct_format_dict(): "float96": "numpy.floating[numpy.typing._96Bit]", "float128": "numpy.floating[numpy.typing._128Bit]", "float256": "numpy.floating[numpy.typing._256Bit]", - "complex64": "numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit]", - "complex128": "numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit]", - "complex160": "numpy.complexfloating[numpy.typing._80Bit, numpy.typing._80Bit]", - "complex192": "numpy.complexfloating[numpy.typing._96Bit, numpy.typing._96Bit]", - "complex256": "numpy.complexfloating[numpy.typing._128Bit, numpy.typing._128Bit]", - "complex512": "numpy.complexfloating[numpy.typing._256Bit, numpy.typing._256Bit]", + "complex64": ("numpy.complexfloating" + "[numpy.typing._32Bit, numpy.typing._32Bit]"), + "complex128": ("numpy.complexfloating" + "[numpy.typing._64Bit, numpy.typing._64Bit]"), + "complex160": ("numpy.complexfloating" + "[numpy.typing._80Bit, numpy.typing._80Bit]"), + "complex192": ("numpy.complexfloating" + "[numpy.typing._96Bit, numpy.typing._96Bit]"), + "complex256": ("numpy.complexfloating" + "[numpy.typing._128Bit, numpy.typing._128Bit]"), + "complex512": ("numpy.complexfloating" + "[numpy.typing._256Bit, numpy.typing._256Bit]"), "ubyte": f"numpy.unsignedinteger[{dct['_NBitByte']}]", "ushort": f"numpy.unsignedinteger[{dct['_NBitShort']}]", @@ -217,9 +276,14 @@ def _construct_format_dict(): "single": f"numpy.floating[{dct['_NBitSingle']}]", "double": f"numpy.floating[{dct['_NBitDouble']}]", "longdouble": f"numpy.floating[{dct['_NBitLongDouble']}]", - "csingle": f"numpy.complexfloating[{dct['_NBitSingle']}, {dct['_NBitSingle']}]", - "cdouble": f"numpy.complexfloating[{dct['_NBitDouble']}, {dct['_NBitDouble']}]", - "clongdouble": f"numpy.complexfloating[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]", + "csingle": ("numpy.complexfloating" + f"[{dct['_NBitSingle']}, {dct['_NBitSingle']}]"), + "cdouble": ("numpy.complexfloating" + f"[{dct['_NBitDouble']}, {dct['_NBitDouble']}]"), + "clongdouble": ( + "numpy.complexfloating" + f"[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]" + ), # numpy.typing "_NBitInt": dct['_NBitInt'], @@ -231,40 +295,49 @@ def _construct_format_dict(): #: A dictionary with all supported format keys (as keys) #: and matching values -FORMAT_DICT: Dict[str, str] = _construct_format_dict() +FORMAT_DICT: dict[str, str] = _construct_format_dict() +FORMAT_DICT.update(_construct_ctypes_dict()) -def _parse_reveals(file: IO[str]) -> List[str]: - """Extract and parse all ``" # E: "`` comments from the passed file-like object. +def _parse_reveals(file: IO[str]) -> tuple[npt.NDArray[np.str_], list[str]]: + """Extract and parse all ``" # E: "`` comments from the passed + file-like object. - All format keys will be substituted for their respective value from `FORMAT_DICT`, - *e.g.* ``"{float64}"`` becomes ``"numpy.floating[numpy.typing._64Bit]"``. + All format keys will be substituted for their respective value + from `FORMAT_DICT`, *e.g.* ``"{float64}"`` becomes + ``"numpy.floating[numpy.typing._64Bit]"``. """ string = file.read().replace("*", "") - # Grab all `# E:`-based comments - comments_array = np.char.partition(string.split("\n"), sep=" # E: ")[:, 2] + # Grab all `# E:`-based comments and matching expressions + expression_array, _, comments_array = np.char.partition( + string.split("\n"), sep=" # E: " + ).T comments = "/n".join(comments_array) - # Only search for the `{*}` pattern within comments, - # otherwise there is the risk of accidently grabbing dictionaries and sets + # Only search for the `{*}` pattern within comments, otherwise + # there is the risk of accidentally grabbing dictionaries and sets key_set = set(re.findall(r"\{(.*?)\}", comments)) kwargs = { - k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for k in key_set + k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for + k in key_set } fmt_str = comments.format(**kwargs) - return fmt_str.split("/n") + return expression_array, fmt_str.split("/n") @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR)) -def test_reveal(path): +def test_reveal(path: str) -> None: + """Validate that mypy correctly infers the return-types of + the expressions in `path`. + """ __tracebackhide__ = True with open(path) as fin: - lines = _parse_reveals(fin) + expression_array, reveal_list = _parse_reveals(fin) output_mypy = OUTPUT_MYPY assert path in output_mypy @@ -279,29 +352,47 @@ def test_reveal(path): lineno = int(match.group('lineno')) - 1 assert "Revealed type is" in error_line - marker = lines[lineno] - _test_reveal(path, marker, error_line, 1 + lineno) + marker = reveal_list[lineno] + expression = expression_array[lineno] + _test_reveal(path, expression, marker, error_line, 1 + lineno) _REVEAL_MSG = """Reveal mismatch at line {} +Expression: {} Expected reveal: {!r} Observed reveal: {!r} """ -def _test_reveal(path: str, reveal: str, expected_reveal: str, lineno: int) -> None: +def _test_reveal( + path: str, + expression: str, + reveal: str, + expected_reveal: str, + lineno: int, +) -> None: + """Error-reporting helper function for `test_reveal`.""" if reveal not in expected_reveal: - raise AssertionError(_REVEAL_MSG.format(lineno, expected_reveal, reveal)) + raise AssertionError( + _REVEAL_MSG.format(lineno, expression, expected_reveal, reveal) + ) @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) -def test_code_runs(path): +def test_code_runs(path: str) -> None: + """Validate that the code in `path` properly during runtime.""" path_without_extension, _ = os.path.splitext(path) dirname, filename = path.split(os.sep)[-2:] - spec = importlib.util.spec_from_file_location(f"{dirname}.{filename}", path) + + spec = importlib.util.spec_from_file_location( + f"{dirname}.{filename}", path + ) + assert spec is not None + assert spec.loader is not None + test_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(test_module) @@ -325,15 +416,19 @@ LINENO_MAPPING = { @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") def test_extended_precision() -> None: - path = os.path.join(MISC_DIR, "extended_precision.py") + path = os.path.join(MISC_DIR, "extended_precision.pyi") output_mypy = OUTPUT_MYPY assert path in output_mypy + with open(path, "r") as f: + expression_list = f.readlines() + for _msg in output_mypy[path]: *_, _lineno, msg_typ, msg = _msg.split(":") msg = _strip_filename(msg) lineno = int(_lineno) + expression = expression_list[lineno - 1].rstrip("\n") msg_typ = msg_typ.strip() assert msg_typ in {"error", "note"} @@ -342,8 +437,8 @@ def test_extended_precision() -> None: raise ValueError(f"Unexpected reveal line format: {lineno}") else: marker = FORMAT_DICT[LINENO_MAPPING[lineno]] - _test_reveal(path, marker, msg, lineno) + _test_reveal(path, expression, marker, msg, lineno) else: if msg_typ == "error": marker = "Module has no attribute" - _test_fail(path, marker, msg, lineno) + _test_fail(path, expression, marker, msg, lineno) diff --git a/numpy/version.py b/numpy/version.py index 4159a1c0e..d5657d0d0 100644 --- a/numpy/version.py +++ b/numpy/version.py @@ -1,12 +1,15 @@ +from __future__ import annotations + from ._version import get_versions -__ALL__ = ['version', 'full_version', 'git_revision', 'release'] +__ALL__ = ['version', '__version__', 'full_version', 'git_revision', 'release'] -vinfo = get_versions() -version: str = vinfo["version"] -full_version: str = vinfo['version'] -git_revision: str = vinfo['full-revisionid'] +vinfo: dict[str, str] = get_versions() +version = vinfo["version"] +__version__ = vinfo.get("closest-tag", vinfo["version"]) +full_version = vinfo['version'] +git_revision = vinfo['full-revisionid'] release = 'dev0' not in version and '+' not in version -short_version: str = vinfo['version'].split("+")[0] +short_version = vinfo['version'].split("+")[0] del get_versions, vinfo diff --git a/pavement.py b/pavement.py index 43ed14a51..6fdaae975 100644 --- a/pavement.py +++ b/pavement.py @@ -168,7 +168,7 @@ def compute_sha256(idirs): def write_release_task(options, filename='README'): """Append hashes of release files to release notes. - This appends file hashes to the release notes ane creates + This appends file hashes to the release notes and creates four README files of the result in various formats: - README.rst diff --git a/pytest.ini b/pytest.ini index dfad538c2..92ce6d6e2 100644 --- a/pytest.ini +++ b/pytest.ini @@ -16,3 +16,5 @@ filterwarnings = ignore:Importing from numpy.matlib is # pytest warning when using PYTHONOPTIMIZE ignore:assertions not in test modules or plugins:pytest.PytestConfigWarning +# TODO: remove below when array_api user warning is removed + ignore:The numpy.array_api submodule is still experimental. See NEP 47. diff --git a/runtests.py b/runtests.py index 8ce9a639c..621d8b421 100755 --- a/runtests.py +++ b/runtests.py @@ -474,23 +474,18 @@ def build_project(args): '--single-version-externally-managed', '--record=' + dst_dir + 'tmp_install_log.txt'] - py_v_s = sysconfig.get_config_var('py_version_short') - platlibdir = getattr(sys, 'platlibdir', '') # Python3.9+ + config_vars = dict(sysconfig.get_config_vars()) + config_vars["platbase"] = dst_dir + config_vars["base"] = dst_dir + site_dir_template = os.path.normpath(sysconfig.get_path( 'platlib', expand=False )) - site_dir = site_dir_template.format(platbase=dst_dir, - py_version_short=py_v_s, - platlibdir=platlibdir, - base=dst_dir, - ) + site_dir = site_dir_template.format(**config_vars) noarch_template = os.path.normpath(sysconfig.get_path( 'purelib', expand=False )) - site_dir_noarch = noarch_template.format(base=dst_dir, - py_version_short=py_v_s, - platlibdir=platlibdir, - ) + site_dir_noarch = noarch_template.format(**config_vars) # easy_install won't install to a path that Python by default cannot see # and isn't on the PYTHONPATH. Plus, it has to exist. @@ -1,3 +1,6 @@ +[codespell] +skip = *-changelog.rst,*-notes.rst,f2c_blas.c,f2c_c_lapack.c,f2c_d_lapack.c,f2c_s_lapack.c,f2c_z_lapack.c + # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. @@ -55,11 +55,14 @@ FULLVERSION = versioneer.get_version() # 1.22.0 ... -> ISRELEASED == True, VERSION == 1.22.0 # 1.22.0rc1 ... -> ISRELEASED == True, VERSION == 1.22.0 ISRELEASED = re.search(r'(dev|\+)', FULLVERSION) is None -MAJOR, MINOR, MICRO = re.match(r'(\d+)\.(\d+)\.(\d+)', FULLVERSION).groups() +_V_MATCH = re.match(r'(\d+)\.(\d+)\.(\d+)', FULLVERSION) +if _V_MATCH is None: + raise RuntimeError(f'Cannot parse version {FULLVERSION}') +MAJOR, MINOR, MICRO = _V_MATCH.groups() VERSION = '{}.{}.{}'.format(MAJOR, MINOR, MICRO) # The first version not in the `Programming Language :: Python :: ...` classifiers above -if sys.version_info >= (3, 10): +if sys.version_info >= (3, 11): fmt = "NumPy {} may not yet support Python {}.{}." warnings.warn( fmt.format(VERSION, *sys.version_info[:2]), @@ -210,9 +213,8 @@ def get_build_overrides(): class new_build_clib(build_clib): def build_a_library(self, build_info, lib_name, libraries): if _needs_gcc_c99_flag(self): - args = build_info.get('extra_compiler_args') or [] - args.append('-std=c99') - build_info['extra_compiler_args'] = args + build_info['extra_cflags'] = ['-std=c99'] + build_info['extra_cxxflags'] = ['-std=c++11'] build_clib.build_a_library(self, build_info, lib_name, libraries) class new_build_ext(build_ext): diff --git a/test_requirements.txt b/test_requirements.txt index ee9bc9a84..256b26d9b 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,13 +1,13 @@ cython==0.29.24 wheel<0.37.1 setuptools<49.2.0 -hypothesis==6.17.3 -pytest==6.2.4 -pytz==2021.1 -pytest-cov==2.12.1 +hypothesis==6.24.1 +pytest==6.2.5 +pytz==2021.3 +pytest-cov==3.0.0 pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy' # for numpy.random.test.test_extending -cffi +cffi; python_version < '3.10' # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy mypy==0.910; platform_python_implementation != "PyPy" diff --git a/tools/changelog.py b/tools/changelog.py index 2bd7cde08..444d96882 100755 --- a/tools/changelog.py +++ b/tools/changelog.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- encoding:utf-8 -*- """ Script to generate contributor and pull request lists diff --git a/tools/cythonize.py b/tools/cythonize.py index c4c25ae26..c06962cf9 100755 --- a/tools/cythonize.py +++ b/tools/cythonize.py @@ -135,7 +135,7 @@ rules = { # Hash db # def load_hashes(filename): - # Return { filename : (sha1 of input, sha1 of output) } + # Return { filename : (sha256 of input, sha256 of output) } if os.path.isfile(filename): hashes = {} with open(filename, 'r') as f: @@ -151,8 +151,8 @@ def save_hashes(hash_db, filename): for key, value in sorted(hash_db.items()): f.write("%s %s %s\n" % (key, value[0], value[1])) -def sha1_of_file(filename): - h = hashlib.sha1() +def sha256_of_file(filename): + h = hashlib.sha256() with open(filename, "rb") as f: h.update(f.read()) return h.hexdigest() @@ -168,8 +168,8 @@ def normpath(path): return path def get_hash(frompath, topath): - from_hash = sha1_of_file(frompath) - to_hash = sha1_of_file(topath) if os.path.exists(topath) else None + from_hash = sha256_of_file(frompath) + to_hash = sha256_of_file(topath) if os.path.exists(topath) else None return (from_hash, to_hash) def process(path, fromfile, tofile, processor_function, hash_db): diff --git a/tools/download-wheels.py b/tools/download-wheels.py index 28b3fc7ad..dd066d9ad 100644 --- a/tools/download-wheels.py +++ b/tools/download-wheels.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- encoding:utf-8 -*- """ Script to download NumPy wheels from the Anaconda staging area. diff --git a/tools/gitpod/gitpod.Dockerfile b/tools/gitpod/gitpod.Dockerfile index 7791df191..7894be5bc 100644 --- a/tools/gitpod/gitpod.Dockerfile +++ b/tools/gitpod/gitpod.Dockerfile @@ -34,12 +34,13 @@ COPY --from=clone --chown=gitpod /tmp/numpy ${WORKSPACE} WORKDIR ${WORKSPACE} # Build numpy to populate the cache used by ccache +RUN git submodule update --init --depth=1 -- numpy/core/src/umath/svml RUN conda activate ${CONDA_ENV} && \ python setup.py build_ext --inplace && \ ccache -s # Gitpod will load the repository into /workspace/numpy. We remove the -# directoy from the image to prevent conflicts +# directory from the image to prevent conflicts RUN rm -rf ${WORKSPACE} # ----------------------------------------------------------------------------- diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini index 3b66d3c3e..9e31050b7 100644 --- a/tools/lint_diff.ini +++ b/tools/lint_diff.ini @@ -1,4 +1,5 @@ [pycodestyle] max_line_length = 79 statistics = True -ignore = E121,E122,E123,E125,E126,E127,E128,E226,E251,E265,E266,E302,E402,E704,E712,E721,E731,E741,W291,W293,W391,W503,W504 +ignore = E121,E122,E123,E125,E126,E127,E128,E226,E241,E251,E265,E266,E302,E402,E704,E712,E721,E731,E741,W291,W293,W391,W503,W504 +exclude = numpy/__config__.py,numpy/typing/tests/data diff --git a/tools/linter.py b/tools/linter.py index fd229dbef..0031ff83a 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -14,7 +14,9 @@ CONFIG = os.path.join( # computing the diff itself. EXCLUDE = ( "numpy/typing/tests/data/", + "numpy/typing/_char_codes.py", "numpy/__config__.py", + "numpy/f2py", ) diff --git a/tools/list_installed_dll_dependencies_cygwin.sh b/tools/list_installed_dll_dependencies_cygwin.sh index 5b81998db..ee06ae0d0 100644 --- a/tools/list_installed_dll_dependencies_cygwin.sh +++ b/tools/list_installed_dll_dependencies_cygwin.sh @@ -11,11 +11,8 @@ # Cygwin-specific, but the rest should work on most platforms with # /bin/sh -py_ver=3.7 -site_packages=$(python${py_ver} -m pip show numpy | \ - grep Location | cut -d " " -f 2 -); -dll_list=$(for name in $(python${py_ver} -m pip show -f numpy | \ - grep -F .dll); do echo ${site_packages}/${name}; done) +py_ver=${1} +dll_list=`/bin/dash tools/list_numpy_dlls.sh ${py_ver}` echo "Checks for existence, permissions and file type" ls -l ${dll_list} file ${dll_list} @@ -29,10 +26,10 @@ cd dist/ for name in ${dll_list}; do echo ${name} - ext_module=$(echo ${name} | \ + ext_module=`echo ${name} | \ sed -E \ -e "s/^\/+(home|usr).*?site-packages\/+//" \ -e "s/.cpython-3.m?-x86(_64)?-cygwin.dll$//" \ - -e "s/\//./g") + -e "s/\//./g"` python${py_ver} -c "import ${ext_module}" done diff --git a/tools/list_numpy_dlls.sh b/tools/list_numpy_dlls.sh new file mode 100644 index 000000000..fedd2097b --- /dev/null +++ b/tools/list_numpy_dlls.sh @@ -0,0 +1,9 @@ +#!/bin/dash +# Print the list of dlls installed by NumPy + +py_ver=${1} +site_packages=`python${py_ver} -m pip show numpy | \ + grep Location | cut -d " " -f 2 -`; +dll_list=`for name in $(python${py_ver} -m pip show -f numpy | \ + grep -F .dll); do echo ${site_packages}/${name}; done` +echo ${dll_list} diff --git a/tools/openblas_support.py b/tools/openblas_support.py index 9ab964e6f..4eb72dbc9 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -13,8 +13,8 @@ from tempfile import mkstemp, gettempdir from urllib.request import urlopen, Request from urllib.error import HTTPError -OPENBLAS_V = '0.3.17' -OPENBLAS_LONG = 'v0.3.17' +OPENBLAS_V = '0.3.18' +OPENBLAS_LONG = 'v0.3.18' BASE_LOC = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs' BASEURL = f'{BASE_LOC}/{OPENBLAS_LONG}/download' SUPPORTED_PLATFORMS = [ diff --git a/tools/rebase_installed_dlls_cygwin.sh b/tools/rebase_installed_dlls_cygwin.sh new file mode 100644 index 000000000..f772879d9 --- /dev/null +++ b/tools/rebase_installed_dlls_cygwin.sh @@ -0,0 +1,5 @@ +#!/bin/dash +# Rebase the dlls installed by NumPy + +py_ver=${1} +/usr/bin/rebase --database --oblivious `/bin/dash tools/list_numpy_dlls.sh ${py_ver}` diff --git a/tools/refguide_check.py b/tools/refguide_check.py index 9a6d1c9f8..21ba5a448 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -93,18 +93,27 @@ OTHER_MODULE_DOCS = { # these names are known to fail doctesting and we like to keep it that way # e.g. sometimes pseudocode is acceptable etc -DOCTEST_SKIPLIST = set([ +# +# Optionally, a subset of methods can be skipped by setting dict-values +# to a container of method-names +DOCTEST_SKIPDICT = { # cases where NumPy docstrings import things from SciPy: - 'numpy.lib.vectorize', - 'numpy.random.standard_gamma', - 'numpy.random.gamma', - 'numpy.random.vonmises', - 'numpy.random.power', - 'numpy.random.zipf', + 'numpy.lib.vectorize': None, + 'numpy.random.standard_gamma': None, + 'numpy.random.gamma': None, + 'numpy.random.vonmises': None, + 'numpy.random.power': None, + 'numpy.random.zipf': None, # remote / local file IO with DataSource is problematic in doctest: - 'numpy.lib.DataSource', - 'numpy.lib.Repository', -]) + 'numpy.lib.DataSource': None, + 'numpy.lib.Repository': None, +} +if sys.version_info < (3, 9): + DOCTEST_SKIPDICT.update({ + "numpy.core.ndarray": {"__class_getitem__"}, + "numpy.core.dtype": {"__class_getitem__"}, + "numpy.core.number": {"__class_getitem__"}, + }) # Skip non-numpy RST files, historical release notes # Any single-directory exact match will skip the directory and all subdirs. @@ -118,14 +127,13 @@ RST_SKIPLIST = [ 'changelog', 'doc/release', 'doc/source/release', + 'doc/release/upcoming_changes', 'c-info.ufunc-tutorial.rst', 'c-info.python-as-glue.rst', 'f2py.getting-started.rst', 'arrays.nditer.cython.rst', # See PR 17222, these should be fixed - 'basics.broadcasting.rst', 'basics.byteswapping.rst', - 'basics.creation.rst', 'basics.dispatch.rst', 'basics.indexing.rst', 'basics.subclassing.rst', @@ -870,8 +878,12 @@ def check_doctests(module, verbose, ns=None, for name in get_all_dict(module)[0]: full_name = module.__name__ + '.' + name - if full_name in DOCTEST_SKIPLIST: - continue + if full_name in DOCTEST_SKIPDICT: + skip_methods = DOCTEST_SKIPDICT[full_name] + if skip_methods is None: + continue + else: + skip_methods = None try: obj = getattr(module, name) @@ -892,6 +904,10 @@ def check_doctests(module, verbose, ns=None, traceback.format_exc())) continue + if skip_methods is not None: + tests = [i for i in tests if + i.name.partition(".")[2] not in skip_methods] + success, output = _run_doctests(tests, full_name, verbose, doctest_warnings) @@ -972,7 +988,7 @@ def check_doctests_testfile(fname, verbose, ns=None, results = [] _, short_name = os.path.split(fname) - if short_name in DOCTEST_SKIPLIST: + if short_name in DOCTEST_SKIPDICT: return results full_name = fname diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh index 65aa4ad13..056e97472 100755 --- a/tools/travis-before-install.sh +++ b/tools/travis-before-install.sh @@ -22,13 +22,12 @@ pushd builds # Build into own virtualenv # We therefore control our own environment, avoid travis' numpy -pip install -U virtualenv if [ -n "$USE_DEBUG" ] then - virtualenv --python=$(which python3-dbg) venv + python3-dbg -m venv venv else - virtualenv --python=python venv + python -m venv venv fi source venv/bin/activate diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 4667db991..b395942fb 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -165,7 +165,7 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then fi $PYTHON setup.py build --warn-error build_src --verbose-cfg bdist_wheel # Make another virtualenv to install into - virtualenv --python=`which $PYTHON` venv-for-wheel + $PYTHON -m venv venv-for-wheel . venv-for-wheel/bin/activate # Move out of source directory to avoid finding local numpy pushd dist @@ -181,7 +181,7 @@ elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result" $PYTHON setup.py sdist # Make another virtualenv to install into - virtualenv --python=`which $PYTHON` venv-for-wheel + $PYTHON -m venv venv-for-wheel . venv-for-wheel/bin/activate # Move out of source directory to avoid finding local numpy pushd dist diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt new file mode 100644 index 000000000..9ea808afc --- /dev/null +++ b/tools/wheels/LICENSE_linux.txt @@ -0,0 +1,880 @@ + +---- + +This binary distribution of NumPy also bundles the following software: + + +Name: OpenBLAS +Files: .libs/libopenb*.so +Description: bundled as a dynamically linked library +Availability: https://github.com/xianyi/OpenBLAS/ +License: 3-clause BSD + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: .libs/libopenb*.so +Description: bundled in OpenBLAS +Availability: https://github.com/xianyi/OpenBLAS/ +License 3-clause BSD + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: .libs/libgfortran*.so +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/viewcvs/gcc/ +License: GPLv3 + runtime exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/> + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<http://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<http://www.gnu.org/philosophy/why-not-lgpl.html>. diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt new file mode 100644 index 000000000..9a687c3b6 --- /dev/null +++ b/tools/wheels/LICENSE_osx.txt @@ -0,0 +1,789 @@ + +---- + +This binary distribution of NumPy also bundles the following software: + + +Name: GCC runtime library +Files: .dylibs/* +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/viewcvs/gcc/ +License: GPLv3 + runtime exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/> + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<http://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<http://www.gnu.org/philosophy/why-not-lgpl.html>. diff --git a/tools/wheels/check_license.py b/tools/wheels/check_license.py new file mode 100644 index 000000000..0fe7356c0 --- /dev/null +++ b/tools/wheels/check_license.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +""" +check_license.py [MODULE] + +Check the presence of a LICENSE.txt in the installed module directory, +and that it appears to contain text prevalent for a NumPy binary +distribution. + +""" +import os +import sys +import io +import re +import argparse + + +def check_text(text): + ok = "Copyright (c)" in text and re.search( + r"This binary distribution of \w+ also bundles the following software", + text, + ) + return ok + + +def main(): + p = argparse.ArgumentParser(usage=__doc__.rstrip()) + p.add_argument("module", nargs="?", default="numpy") + args = p.parse_args() + + # Drop '' from sys.path + sys.path.pop(0) + + # Find module path + __import__(args.module) + mod = sys.modules[args.module] + + # Check license text + license_txt = os.path.join(os.path.dirname(mod.__file__), "LICENSE.txt") + with io.open(license_txt, "r", encoding="utf-8") as f: + text = f.read() + + ok = check_text(text) + if not ok: + print( + "ERROR: License text {} does not contain expected " + "text fragments\n".format(license_txt) + ) + print(text) + sys.exit(1) + + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh new file mode 100644 index 000000000..36410ba1f --- /dev/null +++ b/tools/wheels/cibw_before_build.sh @@ -0,0 +1,37 @@ +set -xe + +PROJECT_DIR="$1" +UNAME="$(uname)" + +# Update license +if [[ $UNAME == "Linux" ]] ; then + cat $PROJECT_DIR/tools/wheels/LICENSE_linux.txt >> $PROJECT_DIR/LICENSE.txt +elif [[ $UNAME == "Darwin" ]]; then + cat $PROJECT_DIR/tools/wheels/LICENSE_osx.txt >> $PROJECT_DIR/LICENSE.txt +fi + +# Install Openblas +if [[ $UNAME == "Linux" || $UNAME == "Darwin" ]] ; then + basedir=$(python tools/openblas_support.py) + cp -r $basedir/lib/* /usr/local/lib + cp $basedir/include/* /usr/local/include +fi + +# Install GFortran +if [[ $UNAME == "Darwin" ]]; then + # same version of gfortran as the openblas-libs and numpy-wheel builds + curl -L https://github.com/MacPython/gfortran-install/raw/master/archives/gfortran-4.9.0-Mavericks.dmg -o gfortran.dmg + GFORTRAN_SHA256=$(shasum -a 256 gfortran.dmg) + KNOWN_SHA256="d2d5ca5ba8332d63bbe23a07201c4a0a5d7e09ee56f0298a96775f928c3c4b30 gfortran.dmg" + if [ "$GFORTRAN_SHA256" != "$KNOWN_SHA256" ]; then + echo sha256 mismatch + exit 1 + fi + hdiutil attach -mountpoint /Volumes/gfortran gfortran.dmg + sudo installer -pkg /Volumes/gfortran/gfortran.pkg -target / + otool -L /usr/local/gfortran/lib/libgfortran.3.dylib + # Manually symlink gfortran-4.9 to plain gfortran for f2py. + # No longer needed after Feb 13 2020 as gfortran is already present + # and the attempted link errors. Keep this for future reference. + # ln -s /usr/local/bin/gfortran-4.9 /usr/local/bin/gfortran +fi diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh new file mode 100644 index 000000000..f09395e84 --- /dev/null +++ b/tools/wheels/cibw_test_command.sh @@ -0,0 +1,15 @@ +# This script is used by .github/workflows/wheels.yml to build wheels with +# cibuildwheel. It runs the full test suite, checks for lincense inclusion +# and that the openblas version is correct. +set -xe + +PROJECT_DIR="$1" +UNAME="$(uname)" + +python -c "import numpy; numpy.show_config()" +python -c "import sys; import numpy; sys.exit(not numpy.test('full', extra_argv=['-vv']))" + +python $PROJECT_DIR/tools/wheels/check_license.py +if [[ $UNAME == "Linux" || $UNAME == "Darwin" ]] ; then + python $PROJECT_DIR/tools/openblas_support.py --check_version +fi |