diff options
156 files changed, 2597 insertions, 1797 deletions
diff --git a/.cirrus.star b/.cirrus.star index 20460b8b2..25c7b7dfd 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -16,8 +16,9 @@ def main(ctx): if env.get("CIRRUS_REPO_FULL_NAME") != "numpy/numpy": return [] - # if env.get("CIRRUS_CRON", "") == "nightly": - # return fs.read("ci/cirrus_wheels.yml") + # only run the wheels entry on a cron job + if env.get("CIRRUS_CRON", "") == "nightly": + return fs.read("tools/ci/cirrus_wheels.yml") # Obtain commit message for the event. Unfortunately CIRRUS_CHANGE_MESSAGE # only contains the actual commit message on a non-PR trigger event. @@ -31,8 +32,8 @@ def main(ctx): if "[skip cirrus]" in dct["message"] or "[skip ci]" in dct["message"]: return [] - config = fs.read("tools/ci/cirrus_general.yml") - # add extra jobs to the cirrus run by += adding to config + config = fs.read("tools/ci/cirrus_wheels.yml") + config += fs.read("tools/ci/cirrus_macosx_arm64.yml") return config diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 6bcbdbfcb..b59fe181d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,8 +4,7 @@ updates: directory: / schedule: interval: daily - - - package-ecosystem: docker - directory: /tools/gitpod - schedule: - interval: daily + commit-message: + prefix: "MAINT" + labels: + - "03 - Maintenance" diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 23d6d8572..9548bed6b 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -1,6 +1,10 @@ name: Build_Test on: + push: + branches: + # coverage comparison in the "full" step needs to run on main after merges + - main pull_request: branches: - main @@ -23,15 +27,15 @@ permissions: jobs: lint: - if: "github.repository == 'numpy/numpy'" + if: github.repository == 'numpy/numpy' && github.event_name != 'push' runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - name: Install linter requirements @@ -47,11 +51,11 @@ jobs: env: WITHOUT_SIMD: 1 steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -59,17 +63,18 @@ jobs: basic: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' strategy: matrix: python-version: ["3.9", "3.10", "3.11", "pypy3.9-v7.3.11"] env: EXPECT_CPU_FEATURES: "SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL" steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ matrix.python-version }} - uses: ./.github/actions @@ -78,8 +83,9 @@ jobs: needs: [smoke_test] # provides GCC 7, 8 runs-on: ubuntu-20.04 + if: github.event_name != 'push' steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 @@ -114,14 +120,15 @@ jobs: without_optimizations: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: WITHOUT_OPTIMIZATIONS: 1 steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -129,14 +136,15 @@ jobs: with_baseline_only: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: CPU_DISPATCH: "none" steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -144,14 +152,15 @@ jobs: without_avx512: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: CPU_DISPATCH: "max -xop -fma4 -avx512f -avx512cd -avx512_knl -avx512_knm -avx512_skx -avx512_clx -avx512_cnl -avx512_icl" steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -159,14 +168,15 @@ jobs: without_avx512_avx2_fma3: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: CPU_DISPATCH: "SSSE3 SSE41 POPCNT SSE42 AVX F16C" steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -174,14 +184,15 @@ jobs: debug: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: USE_DEBUG: 1 steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -189,14 +200,15 @@ jobs: blas64: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: NPY_USE_BLAS_ILP64: 1 steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -210,11 +222,11 @@ jobs: RUN_COVERAGE: 1 INSTALL_PICKLE5: 1 steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -222,6 +234,7 @@ jobs: benchmark: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: PYTHONOPTIMIZE: 2 BLAS: None @@ -231,11 +244,11 @@ jobs: NPY_LAPACK_ORDER: MKL,OPENBLAS,ATLAS,LAPACK USE_ASV: 1 steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -243,16 +256,17 @@ jobs: relaxed_strides_debug: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: CHECK_BLAS: 1 NPY_USE_BLAS_ILP64: 1 NPY_RELAXED_STRIDES_DEBUG: 1 steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -260,14 +274,15 @@ jobs: use_wheel: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: USE_WHEEL: 1 steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -275,6 +290,7 @@ jobs: numpy2_flag: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: # Test for numpy-2.0 feature-flagged behavior. NPY_NUMPY_2_BEHAVIOR: 1 @@ -282,11 +298,11 @@ jobs: # currently unfortunately NPY_PROMOTION_STATE: legacy steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -294,17 +310,18 @@ jobs: no_openblas: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: BLAS: None LAPACK: None ATLAS: None DOWNLOAD_OPENBLAS: '' steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -312,14 +329,15 @@ jobs: sdist: needs: [smoke_test] runs-on: ubuntu-latest + if: github.event_name != 'push' env: USE_SDIST: 1 steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions @@ -328,8 +346,9 @@ jobs: needs: [smoke_test] # make sure this matches the base docker image below runs-on: ubuntu-22.04 + if: github.event_name != 'push' steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 @@ -340,7 +359,7 @@ jobs: run: | # use x86_64 cross-compiler to speed up the build sudo apt update - sudo apt install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf + sudo apt install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf gfortran-arm-linux-gnueabihf # Keep the `test_requirements.txt` dependency-subset synced docker run --name the_container --interactive -v /:/host arm32v7/ubuntu:22.04 /bin/bash -c " @@ -353,6 +372,7 @@ jobs: rm -rf /usr/lib/gcc/arm-linux-gnueabihf && ln -s /host/usr/lib/gcc-cross/arm-linux-gnueabihf /usr/lib/gcc/arm-linux-gnueabihf && rm -f /usr/bin/arm-linux-gnueabihf-gcc && ln -s /host/usr/bin/arm-linux-gnueabihf-gcc /usr/bin/arm-linux-gnueabihf-gcc && rm -f /usr/bin/arm-linux-gnueabihf-g++ && ln -s /host/usr/bin/arm-linux-gnueabihf-g++ /usr/bin/arm-linux-gnueabihf-g++ && + rm -f /usr/bin/arm-linux-gnueabihf-gfortran && ln -s /host/usr/bin/arm-linux-gnueabihf-gfortran /usr/bin/arm-linux-gnueabihf-gfortran && rm -f /usr/bin/arm-linux-gnueabihf-ar && ln -s /host/usr/bin/arm-linux-gnueabihf-ar /usr/bin/arm-linux-gnueabihf-ar && rm -f /usr/bin/arm-linux-gnueabihf-as && ln -s /host/usr/bin/arm-linux-gnueabihf-as /usr/bin/arm-linux-gnueabihf-as && rm -f /usr/bin/arm-linux-gnueabihf-ld && ln -s /host/usr/bin/arm-linux-gnueabihf-ld /usr/bin/arm-linux-gnueabihf-ld && @@ -365,6 +385,7 @@ jobs: uname -a && gcc --version && g++ --version && + arm-linux-gnueabihf-gfortran --version && python3 --version && git config --global --add safe.directory /numpy cd /numpy && @@ -374,7 +395,7 @@ jobs: - name: Run SIMD Tests run: | docker run --rm --interactive -v $(pwd):/numpy the_build /bin/bash -c " - cd /numpy && python3 runtests.py -n -v -- -k test_simd + cd /numpy && F90=arm-linux-gnueabihf-gfortran python3 runtests.py -n -v -- -k 'test_simd or test_kind' " sde_simd_avx512_test: @@ -384,11 +405,11 @@ jobs: needs: [smoke_test] runs-on: ubuntu-latest steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - name: Install Intel SDE @@ -408,3 +429,35 @@ jobs: # ICL implies SKX, CLX and CNL - name: Run SIMD tests (Ice Lake) run: sde -icl -- python runtests.py -n -v -- -k test_simd + + intel_spr_sde_test: + if: ${{ false }} # disable for now + needs: [smoke_test] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.4.0 + with: + submodules: recursive + fetch-depth: 0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Install Intel SDE + run: | + curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/751535/sde-external-9.14.0-2022-10-25-lin.tar.xz + mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ + sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde + - name: Install dependencies + run: | + python -m pip install -r test_requirements.txt + sudo apt install gcc-12 g++-12 + - name: Build and install NumPy + run: | + export CC=/usr/bin/gcc-12 + export CXX=/usr/bin/g++-12 + python -m pip install -e . + # Run only a few tests, running everything in an SDE takes a long time + # Using pytest directly, unable to use python runtests.py -n -t ... + - name: Run linalg/ufunc/umath tests + run: | + sde -spr -- python -m pytest numpy/core/tests/test_umath* numpy/core/tests/test_ufunc.py numpy/linalg/tests/test_* diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml index 55ee74fbc..75638f6b6 100644 --- a/.github/workflows/circleci.yml +++ b/.github/workflows/circleci.yml @@ -13,13 +13,13 @@ jobs: runs-on: ubuntu-latest if: "github.repository == 'numpy/numpy' && !contains(github.event.head_commit.message, '[circle skip]') && !contains(github.event.head_commit.message, '[skip circle]') && github.event.context == 'ci/circleci: build'" name: Run CircleCI artifacts redirector - # if: github.repository == 'numpy/numpy' permissions: statuses: write steps: - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@1e28a97d7b1e273a8f78ed4692bfd10f84706a45 # master + uses: larsoner/circleci-artifacts-redirector-action@0a7552bf8cf99cbd40a8928fa48e858e205b98c8 # master with: repo-token: ${{ secrets.GITHUB_TOKEN }} + api-token: ${{ secrets.CIRCLE_TOKEN }} artifact-path: 0/doc/build/html/index.html circleci-jobs: build diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 925d6a1f5..dbfe5173e 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,11 +41,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.2.7 + uses: github/codeql-action/init@7df0ce34898d659f95c0c4a09eaa8d4e32ee64db # v2.2.12 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.2.7 + uses: github/codeql-action/autobuild@7df0ce34898d659f95c0c4a09eaa8d4e32ee64db # v2.2.12 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.2.7 + uses: github/codeql-action/analyze@7df0ce34898d659f95c0c4a09eaa8d4e32ee64db # v2.2.12 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index a600447e7..1865825c4 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -20,12 +20,12 @@ jobs: runs-on: windows-latest if: "github.repository == 'numpy/numpy'" steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - name: Install Cygwin - uses: cygwin/cygwin-install-action@f5e0f048310c425e84bc789f493a828c6dc80a25 # v3 + uses: cygwin/cygwin-install-action@006ad0b0946ca6d0a3ea2d4437677fa767392401 # v4 with: platform: x86_64 install-dir: 'C:\tools\cygwin' diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 152196a86..cc5530fd6 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,6 +15,6 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: 'Dependency Review' uses: actions/dependency-review-action@f46c48ed6d4f1227fb2d9ea62bf6bcbed315589e # v3.0.4 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml deleted file mode 100644 index c8f95d676..000000000 --- a/.github/workflows/docker.yml +++ /dev/null @@ -1,61 +0,0 @@ -name: Build Base Docker Image - -on: - push: - branches: - - main - paths: - - "environment.yml" - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - build_docker: - name: Build base Docker image - runs-on: ubuntu-latest - environment: numpy-dev - if: "github.repository_owner == 'numpy'" - steps: - - name: Clone repository - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 - - name: Lint Docker - uses: hadolint/hadolint-action@54c9adbab1582c2ef04b2016b760714a4bfde3cf # v3.1.0 - with: - dockerfile: ./tools/gitpod/Dockerfile - ignore: DL3059 - - name: Get refs - shell: bash - run: | - export raw_branch=${GITHUB_REF#refs/heads/} - echo "branch=${raw_branch//\//-}" >> $GITHUB_OUTPUT - echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT - echo "sha8=$(echo ${GITHUB_SHA} | cut -c1-8)" >> $GITHUB_OUTPUT - id: getrefs - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2.5.0 - - name: Cache Docker layers - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: ${{ runner.os }}-buildx- - - name: Login to Docker Hub - uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push - id: docker_build - uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4.0.0 - with: - context: "." - file: "./tools/gitpod/Dockerfile" - push: ${{ github.event_name != 'pull_request' }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - tags: | - numpy/numpy-dev:${{ steps.getrefs.outputs.date }}-${{ steps.getrefs.outputs.branch}}-${{ steps.getrefs.outputs.sha8 }}, numpy/numpy-dev:latest - - name: Image digest - # Return details of the image build: sha and shell - run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 7ad7903a3..56b08612e 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -31,7 +31,7 @@ jobs: NODE_VERSION: 18 steps: - name: Checkout numpy - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: true # versioneer.py requires the latest tag to be reachable. Here we @@ -42,7 +42,7 @@ jobs: - name: set up python id: setup-python - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} diff --git a/.github/workflows/gitpod.yml b/.github/workflows/gitpod.yml deleted file mode 100644 index 737c3c07b..000000000 --- a/.github/workflows/gitpod.yml +++ /dev/null @@ -1,61 +0,0 @@ -name: Build Gitpod Docker image - -on: - push: - branches: - - main - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - build_gitpod: - name: Build Gitpod Docker image - runs-on: ubuntu-latest - environment: numpy-dev - if: "github.repository_owner == 'numpy'" - steps: - - name: Clone repository - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 - with: - fetch-depth: 0 - - name: Lint Docker - uses: hadolint/hadolint-action@54c9adbab1582c2ef04b2016b760714a4bfde3cf # v3.1.0 - with: - dockerfile: ./tools/gitpod/gitpod.Dockerfile - ignore: DL3059 - - name: Get refs - shell: bash - run: | - export raw_branch=${GITHUB_REF#refs/heads/} - echo "branch=${raw_branch//\//-}" >> $GITHUB_OUTPUT - echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT - echo "sha8=$(echo ${GITHUB_SHA} | cut -c1-8)" >> $GITHUB_OUTPUT - id: getrefs - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2.5.0 - - name: Cache Docker layers - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: ${{ runner.os }}-buildx- - - name: Login to Docker Hub - uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push - id: docker_build - uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4.0.0 - with: - context: "." - file: "./tools/gitpod/gitpod.Dockerfile" - push: ${{ github.event_name != 'pull_request' }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - tags: | - numpy/numpy-gitpod:${{ steps.getrefs.outputs.date }}-${{ steps.getrefs.outputs.branch}}-${{ steps.getrefs.outputs.sha8 }}, numpy/numpy-gitpod:latest - - name: Image digest - # Return details of the image build: sha and shell - run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/linux_meson.yml b/.github/workflows/linux_meson.yml index 902c06997..126eb8f29 100644 --- a/.github/workflows/linux_meson.yml +++ b/.github/workflows/linux_meson.yml @@ -25,11 +25,11 @@ jobs: if: "github.repository == 'numpy/numpy'" runs-on: ubuntu-latest steps: - - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} - name: Install dependencies @@ -41,7 +41,7 @@ jobs: env: TERM: xterm-256color run: - ./spin build -- --werror + spin build -- --werror - name: Check build-internal dependencies run: ninja -C build -t missingdeps @@ -54,4 +54,4 @@ jobs: TERM: xterm-256color run: | pip install pytest hypothesis typing_extensions - ./spin test + spin test diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml index 54c8b7c2d..7d90c20ed 100644 --- a/.github/workflows/linux_musl.yml +++ b/.github/workflows/linux_musl.yml @@ -62,5 +62,5 @@ jobs: pip install pytest hypothesis typing_extensions # use meson to build and test - ./spin build - ./spin test + spin build + spin test diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 6924048be..398da49b1 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,12 +25,12 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.1.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.1.0 with: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2 + uses: ossf/scorecard-action@80e868c13c90f172d68d1f4501dee99e2479f7af # v2.1.3 with: results_file: results.sarif results_format: sarif @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.1.27 + uses: github/codeql-action/upload-sarif@7df0ce34898d659f95c0c4a09eaa8d4e32ee64db # v2.1.27 with: sarif_file: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index df31be80e..001c4b8b0 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -43,7 +43,7 @@ jobs: message: ${{ steps.commit_message.outputs.message }} steps: - name: Checkout numpy - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 # Gets the correct commit message for pull request with: ref: ${{ github.event.pull_request.head.sha }} @@ -76,7 +76,7 @@ jobs: buildplat: - [ubuntu-20.04, manylinux_x86_64] - [ubuntu-20.04, musllinux_x86_64] - - [macos-12, macosx_*] + - [macos-12, macosx_x86_64] - [windows-2019, win_amd64] - [windows-2019, win32] python: ["cp39", "cp310", "cp311", "pp39"] @@ -92,7 +92,7 @@ jobs: IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} steps: - name: Checkout numpy - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: true # versioneer.py requires the latest tag to be reachable. Here we @@ -102,7 +102,7 @@ jobs: fetch-depth: 0 # Used to push the built wheels - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: "3.x" @@ -114,7 +114,7 @@ jobs: if: ${{ matrix.buildplat[1] == 'win32' }} - name: Build wheels - uses: pypa/cibuildwheel@02ad79a31bf7aa0eee07f690509048d2fb9fd445 # v2.12.1 + uses: pypa/cibuildwheel@5e15bb25b428e1bf2daf2215f173d2b40135f56f # v2.12.3 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} @@ -171,7 +171,7 @@ jobs: # IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} steps: - name: Checkout numpy - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: true # versioneer.py requires the latest tag to be reachable. Here we @@ -180,7 +180,7 @@ jobs: # https://github.com/actions/checkout/issues/338 fetch-depth: 0 # Used to push the built wheels - - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: # Build sdist on lowest supported Python python-version: "3.9" diff --git a/.github/workflows/windows_meson.yml b/.github/workflows/windows_meson.yml index e0064dc19..d4372bd3c 100644 --- a/.github/workflows/windows_meson.yml +++ b/.github/workflows/windows_meson.yml @@ -23,12 +23,12 @@ jobs: # if: "github.repository == 'numpy/numpy'" steps: - name: Checkout - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: submodules: recursive fetch-depth: 0 - name: Setup Python - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0 + uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 with: python-version: ${{ env.PYTHON_VERSION }} diff --git a/.gitpod.yml b/.gitpod.yml deleted file mode 100644 index 42513de8d..000000000 --- a/.gitpod.yml +++ /dev/null @@ -1,66 +0,0 @@ -# Rebuilding NumPy on init - rather than on prebuild: this ensures -# that even forks do have a usable freshly built NumPy -# Might delegate this later to prebuild with Q2 improvements on gitpod -# https://www.gitpod.io/docs/config-start-tasks/#configuring-the-terminal -# ------------------------------------------------------------------------- - -image: numpy/numpy-gitpod:latest -tasks: - - name: Prepare development environment - init: | - mkdir -p .vscode - cp tools/gitpod/settings.json .vscode/settings.json - rm -f /workspace/numpy/.git/shallow.lock - conda activate numpy-dev - git pull --unshallow # need to force this else the prebuild fails - git fetch --tags - git submodule update --init - python setup.py build_ext --inplace - echo "🛠 Completed rebuilding NumPy!! 🛠 " - echo "📖 Building docs 📖 " - cd doc - make html - echo "✨ Pre-build complete! You can close this terminal ✨ " - -# -------------------------------------------------------- -# exposing ports for liveserve -ports: - - port: 5500 - onOpen: notify - -# -------------------------------------------------------- -# some useful extensions to have -vscode: - extensions: - - eamodio.gitlens - - njpwerner.autodocstring - - lextudio.restructuredtext - - ritwickdey.liveserver - - ms-python.python - - yzhang.markdown-all-in-one - - bungcip.better-toml - - mhutchie.git-graph - -# -------------------------------------------------------- -# using prebuilds for the container - note: atm this only -# works for the NumPy repo -# With this configuration the prebuild will happen on push to master -github: - prebuilds: - # enable for main/default branch - master: true - # enable for other branches (defaults to false) - branches: false - # enable for pull requests coming from this repo (defaults to true) - pullRequests: false - # enable for pull requests coming from forks (defaults to false) - pullRequestsFromForks: false - # add a check to pull requests (defaults to true) - addCheck: false - # add a "Review in Gitpod" button as a comment to pull requests (defaults to false) - addComment: false - # add a "Review in Gitpod" button to the pull request's description (defaults to false) - addBadge: false - # add a label once the prebuild is ready to pull requests (defaults to false) - addLabel: false -
\ No newline at end of file diff --git a/.hadolint.yaml b/.hadolint.yaml deleted file mode 100644 index 0188ba2cf..000000000 --- a/.hadolint.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -ignored: - - DL3006 - - DL3008 - - SC2016 - - DL3004 - - DL3007
\ No newline at end of file diff --git a/benchmarks/README.rst b/benchmarks/README.rst index 135527e4f..ef841a818 100644 --- a/benchmarks/README.rst +++ b/benchmarks/README.rst @@ -31,7 +31,7 @@ the command line and execute:: python runtests.py --bench where ``--bench`` activates the benchmark suite instead of the -test suite. This builds NumPy and runs all available benchmarks +test suite. This builds NumPy and runs all available benchmarks defined in ``benchmarks/``. (Note: this could take a while. Each benchmark is run multiple times to measure the distribution in execution times.) diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json index b60135524..267450448 100644 --- a/benchmarks/asv.conf.json +++ b/benchmarks/asv.conf.json @@ -43,7 +43,8 @@ // version. "matrix": { "Cython": [], - "setuptools": ["59.2.0"] + "setuptools": ["59.2.0"], + "packaging": [] }, // The directory (relative to the current directory) that benchmarks are diff --git a/benchmarks/asv_compare.conf.json.tpl b/benchmarks/asv_compare.conf.json.tpl index 01f4e41de..f0ef0bf49 100644 --- a/benchmarks/asv_compare.conf.json.tpl +++ b/benchmarks/asv_compare.conf.json.tpl @@ -47,7 +47,8 @@ // version. "matrix": { "Cython": [], - "setuptools": ["59.2.0"] + "setuptools": ["59.2.0"], + "packaging": [] }, // The directory (relative to the current directory) that benchmarks are diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py index 7b9f1d3e6..35fc87eac 100644 --- a/benchmarks/benchmarks/__init__.py +++ b/benchmarks/benchmarks/__init__.py @@ -26,7 +26,7 @@ def dirty_lock(lock_name, lock_on_count=1): lock_path = os.path.abspath(os.path.join( os.path.dirname(__file__), "..", "env", lock_name) ) - # ASV load the 'benchmark_dir' to discovering the available benchmarks + # ASV loads the 'benchmark_dir' to discover the available benchmarks # the issue here is ASV doesn't capture any strings from stdout or stderr # during this stage so we escape it and lock on the second increment try: diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 2e35f5bb9..fe1cd37b6 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -215,13 +215,41 @@ class Indices(Benchmark): def time_indices(self): np.indices((1000, 500)) -class VarComplex(Benchmark): - params = [10**n for n in range(0, 9)] - def setup(self, n): - self.arr = np.random.randn(n) + 1j * np.random.randn(n) - def teardown(self, n): - del self.arr +class StatsMethods(Benchmark): + # Not testing, but in array_api (redundant) + # 8, 16, 32 bit variants, and 128 complexes + params = [['int64', 'uint64', 'float64', 'intp', + 'complex64', 'bool', 'float', 'int', + 'complex', 'complex256'], + [100**n for n in range(0, 2)]] + param_names = ['dtype', 'size'] - def time_var(self, n): - self.arr.var() + def setup(self, dtype, size): + try: + self.data = np.ones(size, dtype=getattr(np, dtype)) + except AttributeError: # builtins throw AttributeError after 1.20 + self.data = np.ones(size, dtype=dtype) + if dtype.startswith('complex'): + self.data = np.random.randn(size) + 1j * np.random.randn(size) + + def time_min(self, dtype, size): + self.data.min() + + def time_max(self, dtype, size): + self.data.max() + + def time_mean(self, dtype, size): + self.data.mean() + + def time_std(self, dtype, size): + self.data.std() + + def time_prod(self, dtype, size): + self.data.prod() + + def time_var(self, dtype, size): + self.data.var() + + def time_sum(self, dtype, size): + self.data.sum() diff --git a/benchmarks/benchmarks/bench_creation.py b/benchmarks/benchmarks/bench_creation.py new file mode 100644 index 000000000..3a577df7a --- /dev/null +++ b/benchmarks/benchmarks/bench_creation.py @@ -0,0 +1,81 @@ +from .common import Benchmark, TYPES1 + +import numpy as np + + +class MeshGrid(Benchmark): + """ Benchmark meshgrid generation + """ + params = [[16, 32], + [2, 3, 4], + ['ij', 'xy'], TYPES1] + param_names = ['size', 'ndims', 'ind', 'ndtype'] + timeout = 10 + + def setup(self, size, ndims, ind, ndtype): + self.grid_dims = [(np.random.ranf(size)).astype(ndtype) for + x in range(ndims)] + + def time_meshgrid(self, size, ndims, ind, ndtype): + np.meshgrid(*self.grid_dims, indexing=ind) + + +class Create(Benchmark): + """ Benchmark for creation functions + """ + # (64, 64), (128, 128), (256, 256) + # , (512, 512), (1024, 1024) + params = [[16, 32, 128, 256, 512, + (16, 16), (32, 32)], + ['C', 'F'], + TYPES1] + param_names = ['shape', 'order', 'npdtypes'] + timeout = 10 + + def setup(self, shape, order, npdtypes): + values = get_squares_() + self.xarg = values.get(npdtypes)[0] + + def time_full(self, shape, order, npdtypes): + np.full(shape, self.xarg[1], dtype=npdtypes, order=order) + + def time_full_like(self, shape, order, npdtypes): + np.full_like(self.xarg, self.xarg[0], order=order) + + def time_ones(self, shape, order, npdtypes): + np.ones(shape, dtype=npdtypes, order=order) + + def time_ones_like(self, shape, order, npdtypes): + np.ones_like(self.xarg, order=order) + + def time_zeros(self, shape, order, npdtypes): + np.zeros(shape, dtype=npdtypes, order=order) + + def time_zeros_like(self, shape, order, npdtypes): + np.zeros_like(self.xarg, order=order) + + def time_empty(self, shape, order, npdtypes): + np.empty(shape, dtype=npdtypes, order=order) + + def time_empty_like(self, shape, order, npdtypes): + np.empty_like(self.xarg, order=order) + + +class UfuncsFromDLP(Benchmark): + """ Benchmark for creation functions + """ + params = [[16, 32, (16, 16), + (32, 32), (64, 64)], + TYPES1] + param_names = ['shape', 'npdtypes'] + timeout = 10 + + def setup(self, shape, npdtypes): + if npdtypes in ['longdouble', 'clongdouble']: + raise NotImplementedError( + 'Only IEEE dtypes are supported') + values = get_squares_() + self.xarg = values.get(npdtypes)[0] + + def time_from_dlpack(self, shape, npdtypes): + np.from_dlpack(self.xarg) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index b64f8ab17..f792116a6 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -132,11 +132,26 @@ class Unique(Benchmark): # produce a randomly shuffled array with the # approximate desired percentage np.nan content base_array = np.random.uniform(size=array_size) - base_array[base_array < percent_nans / 100.] = np.nan + n_nan = int(percent_nans * array_size) + nan_indices = np.random.choice(np.arange(array_size), size=n_nan) + base_array[nan_indices] = np.nan self.arr = base_array - def time_unique(self, array_size, percent_nans): - np.unique(self.arr) + def time_unique_values(self, array_size, percent_nans): + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=False) + + def time_unique_counts(self, array_size, percent_nans): + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=True) + + def time_unique_inverse(self, array_size, percent_nans): + np.unique(self.arr, return_index=False, + return_inverse=True, return_counts=False) + + def time_unique_all(self, array_size, percent_nans): + np.unique(self.arr, return_index=True, + return_inverse=True, return_counts=True) class Isin(Benchmark): diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index a94ba1139..b4e39b084 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -190,3 +190,27 @@ class Einsum(Benchmark): # sum_of_products_contig_outstride0_one:non_contiguous arrays def time_einsum_noncon_contig_outstride0(self, dtype): np.einsum("i->", self.non_contiguous_dim1, optimize=True) + + +class LinAlgTransposeVdot(Benchmark): + # Smaller for speed + # , (128, 128), (256, 256), (512, 512), + # (1024, 1024) + params = [[(16, 16), (32, 32), + (64, 64)], TYPES1] + param_names = ['shape', 'npdtypes'] + + def setup(self, shape, npdtypes): + self.xarg = np.random.uniform(-1, 1, np.dot(*shape)).reshape(shape) + self.xarg = self.xarg.astype(npdtypes) + self.x2arg = np.random.uniform(-1, 1, np.dot(*shape)).reshape(shape) + self.x2arg = self.x2arg.astype(npdtypes) + if npdtypes.startswith('complex'): + self.xarg += self.xarg.T*1j + self.x2arg += self.x2arg.T*1j + + def time_transpose(self, shape, npdtypes): + np.transpose(self.xarg) + + def time_vdot(self, shape, npdtypes): + np.vdot(self.xarg, self.x2arg) diff --git a/benchmarks/benchmarks/bench_manipulate.py b/benchmarks/benchmarks/bench_manipulate.py new file mode 100644 index 000000000..0a312479c --- /dev/null +++ b/benchmarks/benchmarks/bench_manipulate.py @@ -0,0 +1,107 @@ +from .common import Benchmark, get_squares_, TYPES1, DLPACK_TYPES + +import numpy as np +from collections import deque + +class BroadcastArrays(Benchmark): + params = [[(16, 32), (32, 64), + (64, 128), (128, 256), + (256, 512), (512, 1024)], + TYPES1] + param_names = ['shape', 'ndtype'] + timeout = 10 + + def setup(self, shape, ndtype): + self.xarg = np.random.ranf(shape[0]*shape[1]).reshape(shape) + self.xarg = self.xarg.astype(ndtype) + if ndtype.startswith('complex'): + self.xarg += np.random.ranf(1)*1j + + def time_broadcast_arrays(self, shape, ndtype): + np.broadcast_arrays(self.xarg, np.ones(1)) + + +class BroadcastArraysTo(Benchmark): + params = [[16, 32, 64, 128, 256, 512], + TYPES1] + param_names = ['size', 'ndtype'] + timeout = 10 + + def setup(self, size, ndtype): + self.rng = np.random.default_rng() + self.xarg = self.rng.random(size) + self.xarg = self.xarg.astype(ndtype) + if ndtype.startswith('complex'): + self.xarg += self.rng.random(1)*1j + + def time_broadcast_to(self, size, ndtype): + np.broadcast_to(self.xarg, (size, size)) + + +class ConcatenateStackArrays(Benchmark): + # (64, 128), (128, 256), (256, 512) + params = [[(16, 32), (32, 64)], + [2, 3, 4, 5], + TYPES1] + param_names = ['shape', 'narrays', 'ndtype'] + timeout = 10 + + def setup(self, shape, narrays, ndtype): + self.xarg = [np.random.ranf(shape[0]*shape[1]).reshape(shape) + for x in range(narrays)] + self.xarg = [x.astype(ndtype) for x in self.xarg] + if ndtype.startswith('complex'): + [x + np.random.ranf(1)*1j for x in self.xarg] + + def time_concatenate_ax0(self, size, narrays, ndtype): + np.concatenate(self.xarg, axis=0) + + def time_concatenate_ax1(self, size, narrays, ndtype): + np.concatenate(self.xarg, axis=1) + + def time_stack_ax0(self, size, narrays, ndtype): + np.stack(self.xarg, axis=0) + + def time_stack_ax1(self, size, narrays, ndtype): + np.stack(self.xarg, axis=1) + + +class DimsManipulations(Benchmark): + params = [ + [(2, 1, 4), (2, 1), (5, 2, 3, 1)], + ] + param_names = ['shape'] + timeout = 10 + + def setup(self, shape): + self.xarg = np.ones(shape=shape) + self.reshaped = deque(shape) + self.reshaped.rotate(1) + self.reshaped = tuple(self.reshaped) + + def time_expand_dims(self, shape): + np.expand_dims(self.xarg, axis=1) + + def time_expand_dims_neg(self, shape): + np.expand_dims(self.xarg, axis=-1) + + def time_squeeze_dims(self, shape): + np.squeeze(self.xarg) + + def time_flip_all(self, shape): + np.flip(self.xarg, axis=None) + + def time_flip_one(self, shape): + np.flip(self.xarg, axis=1) + + def time_flip_neg(self, shape): + np.flip(self.xarg, axis=-1) + + def time_moveaxis(self, shape): + np.moveaxis(self.xarg, [0, 1], [-1, -2]) + + def time_roll(self, shape): + np.roll(self.xarg, 3) + + def time_reshape(self, shape): + np.reshape(self.xarg, self.reshaped) diff --git a/benchmarks/benchmarks/bench_reduce.py b/benchmarks/benchmarks/bench_reduce.py index ca07bd180..040b5ca73 100644 --- a/benchmarks/benchmarks/bench_reduce.py +++ b/benchmarks/benchmarks/bench_reduce.py @@ -45,19 +45,40 @@ class AnyAll(Benchmark): self.zeros.any() -class MinMax(Benchmark): - params = [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, - np.int64, np.uint64, np.float32, np.float64, np.intp] +class StatsReductions(Benchmark): + # Not testing, but in array_api (redundant) + # 8, 16, 32 bit variants, and 128 complexes + params = ['int64', 'uint64', 'float64', 'intp', + 'complex64', 'bool', 'float', 'int', + 'complex', 'complex256'], param_names = ['dtype'] def setup(self, dtype): - self.d = np.ones(20000, dtype=dtype) + try: + self.data = np.ones(200, dtype=getattr(np, dtype)) + except AttributeError: # builtins throw AttributeError after 1.20 + self.data = np.ones(200, dtype=dtype) + if dtype.startswith('complex'): + self.data = self.data * self.data.T*1j def time_min(self, dtype): - np.min(self.d) + np.min(self.data) def time_max(self, dtype): - np.max(self.d) + np.max(self.data) + + def time_mean(self, dtype): + np.mean(self.data) + + def time_std(self, dtype): + np.std(self.data) + + def time_prod(self, dtype): + np.prod(self.data) + + def time_var(self, dtype): + np.var(self.data) + class FMinMax(Benchmark): params = [np.float32, np.float64] @@ -72,6 +93,7 @@ class FMinMax(Benchmark): def time_max(self, dtype): np.fmax.reduce(self.d) + class ArgMax(Benchmark): params = [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64, np.float32, np.float64, bool] @@ -83,6 +105,7 @@ class ArgMax(Benchmark): def time_argmax(self, dtype): np.argmax(self.d) + class ArgMin(Benchmark): params = [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64, np.float32, np.float64, bool] @@ -94,6 +117,7 @@ class ArgMin(Benchmark): def time_argmin(self, dtype): np.argmin(self.d) + class SmallReduction(Benchmark): def setup(self): self.d = np.ones(100, dtype=np.float32) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 138f2e9a0..f7c77d90c 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -1,6 +1,9 @@ -from .common import Benchmark, get_squares_ +from .common import Benchmark, get_squares_, TYPES1, DLPACK_TYPES import numpy as np +import itertools +from packaging import version +import operator ufuncs = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh', @@ -13,11 +16,13 @@ ufuncs = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', - 'logical_xor', 'matmul', 'maximum', 'minimum', 'mod', 'modf', 'multiply', - 'negative', 'nextafter', 'not_equal', 'positive', 'power', - 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', - 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', - 'square', 'subtract', 'tan', 'tanh', 'true_divide', 'trunc'] + 'logical_xor', 'matmul', 'maximum', 'minimum', 'mod', 'modf', + 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', + 'right_shift', 'rint', 'sign', 'signbit', 'sin', + 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', + 'true_divide', 'trunc'] +arrayfuncdisp = ['real', 'round'] for name in dir(np): @@ -25,6 +30,30 @@ for name in dir(np): print("Missing ufunc %r" % (name,)) +class ArrayFunctionDispatcher(Benchmark): + params = [arrayfuncdisp] + param_names = ['func'] + timeout = 10 + + def setup(self, ufuncname): + np.seterr(all='ignore') + try: + self.afdn = getattr(np, ufuncname) + except AttributeError: + raise NotImplementedError() + self.args = [] + for _, aarg in get_squares_().items(): + arg = (aarg,) * 1 # no nin + try: + self.afdn(*arg) + except TypeError: + continue + self.args.append(arg) + + def time_afdn_types(self, ufuncname): + [self.afdn(*arg) for arg in self.args] + + class Broadcast(Benchmark): def setup(self): self.d = np.ones((50000, 100), dtype=np.float64) @@ -56,23 +85,179 @@ class UFunc(Benchmark): def setup(self, ufuncname): np.seterr(all='ignore') try: - self.f = getattr(np, ufuncname) + self.ufn = getattr(np, ufuncname) except AttributeError: raise NotImplementedError() self.args = [] - for t, a in get_squares_().items(): - arg = (a,) * self.f.nin + for _, aarg in get_squares_().items(): + arg = (aarg,) * self.ufn.nin try: - self.f(*arg) + self.ufn(*arg) except TypeError: continue self.args.append(arg) def time_ufunc_types(self, ufuncname): - [self.f(*arg) for arg in self.args] + [self.ufn(*arg) for arg in self.args] + + +class MethodsV0(Benchmark): + """ Benchmark for the methods which do not take any arguments + """ + params = [['__abs__', '__neg__', '__pos__'], TYPES1] + param_names = ['methods', 'npdtypes'] + timeout = 10 + + def setup(self, methname, npdtypes): + values = get_squares_() + self.xarg = values.get(npdtypes)[0] + + def time_ndarray_meth(self, methname, npdtypes): + getattr(operator, methname)(self.xarg) + + +class NDArrayLRShifts(Benchmark): + """ Benchmark for the shift methods + """ + params = [['__lshift__', '__rshift__'], + ['intp', 'int8', 'int16', + 'int32', 'int64', 'uint8', + 'uint16', 'uint32', 'uint64']] + param_names = ['methods', 'npdtypes'] + timeout = 10 + + def setup(self, methname, npdtypes): + self.vals = np.ones(1000, + dtype=getattr(np, npdtypes)) * \ + np.random.randint(9) + + def time_ndarray_meth(self, methname, npdtypes): + getattr(operator, methname)(*[self.vals, 2]) + + +class Methods0D(Benchmark): + """Zero dimension array methods + """ + params = [['__bool__', '__complex__', '__invert__', + '__float__', '__int__'], TYPES1] + param_names = ['methods', 'npdtypes'] + timeout = 10 + + def setup(self, methname, npdtypes): + self.xarg = np.array(3, dtype=npdtypes) + if (npdtypes.startswith('complex') and + methname in ['__float__', '__int__']) or \ + (npdtypes.startswith('int') and methname == '__invert__'): + # Skip + raise NotImplementedError + + def time_ndarray__0d__(self, methname, npdtypes): + meth = getattr(self.xarg, methname) + meth() + + +class MethodsV1(Benchmark): + """ Benchmark for the methods which take an argument + """ + params = [['__and__', '__add__', '__eq__', '__floordiv__', '__ge__', + '__gt__', '__le__', '__lt__', '__matmul__', + '__mod__', '__mul__', '__ne__', '__or__', + '__pow__', '__sub__', '__truediv__', '__xor__'], + TYPES1] + param_names = ['methods', 'npdtypes'] + timeout = 10 + + def setup(self, methname, npdtypes): + if ( + npdtypes.startswith("complex") + and methname in ["__floordiv__", "__mod__"] + ) or ( + not npdtypes.startswith("int") + and methname in ["__and__", "__or__", "__xor__"] + ): + raise NotImplementedError # skip + values = get_squares_().get(npdtypes) + self.xargs = [values[0], values[1]] + + def time_ndarray_meth(self, methname, npdtypes): + getattr(operator, methname)(*self.xargs) + + +class NDArrayGetItem(Benchmark): + param_names = ['margs', 'msize'] + params = [[0, (0, 0), (-1, 0), [0, -1]], + ['small', 'big']] + + def setup(self, margs, msize): + self.xs = np.random.uniform(-1, 1, 6).reshape(2, 3) + self.xl = np.random.uniform(-1, 1, 50*50).reshape(50, 50) + + def time_methods_getitem(self, margs, msize): + if msize == 'small': + mdat = self.xs + elif msize == 'big': + mdat = self.xl + getattr(mdat, '__getitem__')(margs) + + +class NDArraySetItem(Benchmark): + param_names = ['margs', 'msize'] + params = [[0, (0, 0), (-1, 0), [0, -1]], + ['small', 'big']] + + def setup(self, margs, msize): + self.xs = np.random.uniform(-1, 1, 6).reshape(2, 3) + self.xl = np.random.uniform(-1, 1, 100*100).reshape(100, 100) + + def time_methods_setitem(self, margs, msize): + if msize == 'small': + mdat = self.xs + elif msize == 'big': + mdat = self.xl + mdat[margs] = 17 + + +class DLPMethods(Benchmark): + """ Benchmark for DLPACK helpers + """ + params = [['__dlpack__', '__dlpack_device__'], DLPACK_TYPES] + param_names = ['methods', 'npdtypes'] + timeout = 10 + + def setup(self, methname, npdtypes): + values = get_squares_() + if npdtypes == 'bool': + if version.parse(np.__version__) > version.parse("1.25"): + self.xarg = values.get('int16')[0].astype('bool') + else: + raise NotImplementedError("Not supported before v1.25") + else: + self.xarg = values.get('int16')[0] + + def time_ndarray_dlp(self, methname, npdtypes): + meth = getattr(self.xarg, methname) + meth() + + +class NDArrayAsType(Benchmark): + """ Benchmark for type conversion + """ + params = [list(itertools.combinations(TYPES1, 2))] + param_names = ['typeconv'] + timeout = 10 + + def setup(self, typeconv): + if typeconv[0] == typeconv[1]: + raise NotImplementedError( + "Skipping test for converting to the same dtype") + self.xarg = get_squares_().get(typeconv[0]) + + def time_astype(self, typeconv): + self.xarg.astype(typeconv[1]) + class UFuncSmall(Benchmark): - """ Benchmark for a selection of ufuncs on a small arrays and scalars + """ Benchmark for a selection of ufuncs on a small arrays and scalars Since the arrays and scalars are small, we are benchmarking the overhead of the numpy ufunc functionality diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index 587fab343..d10fe999d 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -1,7 +1,8 @@ -import numpy +import numpy as np import random import os -import functools +from functools import lru_cache +from pathlib import Path # Various pre-crafted datasets/variables for testing # !!! Must not be changed -- only appended !!! @@ -9,7 +10,7 @@ import functools # sequences random.seed(1) # but will seed it nevertheless -numpy.random.seed(1) +np.random.seed(1) nx, ny = 1000, 1000 # reduced squares based on indexes_rand, primarily for testing more @@ -21,37 +22,37 @@ TYPES1 = [ 'int16', 'float16', 'int32', 'float32', 'int64', 'float64', 'complex64', - 'longfloat', 'complex128', + 'longdouble', 'complex128', ] -if 'complex256' in numpy.sctypeDict: - TYPES1.append('complex256') +if 'complex256' in np.sctypeDict: + TYPES1.append('clongdouble') +DLPACK_TYPES = [ + 'int16', 'float16', + 'int32', 'float32', + 'int64', 'float64', 'complex64', + 'complex128', 'bool', +] -def memoize(func): - result = [] - def wrapper(): - if not result: - result.append(func()) - return result[0] - return wrapper - +# Path for caching +CACHE_ROOT = Path(__file__).resolve().parent.parent / 'env' / 'numpy_benchdata' # values which will be used to construct our sample data matrices # replicate 10 times to speed up initial imports of this helper # and generate some redundancy -@memoize +@lru_cache(typed=True) def get_values(): - rnd = numpy.random.RandomState(1) - values = numpy.tile(rnd.uniform(0, 100, size=nx*ny//10), 10) + rnd = np.random.RandomState(1) + values = np.tile(rnd.uniform(0, 100, size=nx*ny//10), 10) return values -@memoize +@lru_cache(typed=True) def get_squares(): values = get_values() - squares = {t: numpy.array(values, - dtype=getattr(numpy, t)).reshape((nx, ny)) + squares = {t: np.array(values, + dtype=getattr(np, t)).reshape((nx, ny)) for t in TYPES1} # adjust complex ones to have non-degenerated imagery part -- use @@ -62,42 +63,42 @@ def get_squares(): return squares -@memoize +@lru_cache(typed=True) def get_squares_(): # smaller squares squares_ = {t: s[:nxs, :nys] for t, s in get_squares().items()} return squares_ -@memoize +@lru_cache(typed=True) def get_vectors(): # vectors vectors = {t: s[0] for t, s in get_squares().items()} return vectors -@memoize +@lru_cache(typed=True) def get_indexes(): indexes = list(range(nx)) # so we do not have all items indexes.pop(5) indexes.pop(95) - indexes = numpy.array(indexes) + indexes = np.array(indexes) return indexes -@memoize +@lru_cache(typed=True) def get_indexes_rand(): rnd = random.Random(1) indexes_rand = get_indexes().tolist() # copy rnd.shuffle(indexes_rand) # in-place shuffle - indexes_rand = numpy.array(indexes_rand) + indexes_rand = np.array(indexes_rand) return indexes_rand -@memoize +@lru_cache(typed=True) def get_indexes_(): # smaller versions indexes = get_indexes() @@ -105,20 +106,14 @@ def get_indexes_(): return indexes_ -@memoize +@lru_cache(typed=True) def get_indexes_rand_(): indexes_rand = get_indexes_rand() indexes_rand_ = indexes_rand[indexes_rand < nxs] return indexes_rand_ -CACHE_ROOT = os.path.dirname(__file__) -CACHE_ROOT = os.path.abspath( - os.path.join(CACHE_ROOT, '..', 'env', 'numpy_benchdata') -) - - -@functools.cache +@lru_cache(typed=True) def get_data(size, dtype, ip_num=0, zeros=False, finite=True, denormal=False): """ Generates a cached random array that covers several scenarios that @@ -144,15 +139,14 @@ def get_data(size, dtype, ip_num=0, zeros=False, finite=True, denormal=False): denormal: Spreading subnormal numbers along with generated data. """ - np = numpy dtype = np.dtype(dtype) dname = dtype.name cache_name = f'{dname}_{size}_{ip_num}_{int(zeros)}' if dtype.kind in 'fc': cache_name += f'{int(finite)}{int(denormal)}' cache_name += '.bin' - cache_path = os.path.join(CACHE_ROOT, cache_name) - if os.path.exists(cache_path): + cache_path = CACHE_ROOT / cache_name + if cache_path.exists(): return np.fromfile(cache_path, dtype) array = np.ones(size, dtype) @@ -214,8 +208,8 @@ def get_data(size, dtype, ip_num=0, zeros=False, finite=True, denormal=False): for start, r in enumerate(rands): array[start:len(r)*stride:stride] = r - if not os.path.exists(CACHE_ROOT): - os.mkdir(CACHE_ROOT) + if not CACHE_ROOT.exists(): + CACHE_ROOT.mkdir(parents=True) array.tofile(cache_path) return array diff --git a/build_requirements.txt b/build_requirements.txt index 3699320c8..075d454f2 100644 --- a/build_requirements.txt +++ b/build_requirements.txt @@ -2,4 +2,4 @@ meson-python>=0.10.0 Cython>=0.29.30,<3.0 wheel==0.38.1 ninja -spin>=0.2 +spin==0.3 diff --git a/building_with_meson.md b/building_with_meson.md index cf198e7d9..259498998 100644 --- a/building_with_meson.md +++ b/building_with_meson.md @@ -18,14 +18,14 @@ into a problem._ Then install spin: - `python -m pip install spin` -**Compile and install:** `./spin build` +**Compile and install:** `spin build` This builds in the `build/` directory, and installs into the `build-install` directory. Then run the test suite or a shell via `spin`: ``` -./spin test -./spin ipython +spin test +spin ipython ``` Alternatively, to use the package, add it to your `PYTHONPATH`: @@ -67,5 +67,5 @@ Libs: -L${libdir} -lopenblas Then build with: ``` -./spin build -- -Dpkg_config_path=${HOME}/lib/pkgconfig +spin build -- -Dpkg_config_path=${HOME}/lib/pkgconfig ``` diff --git a/doc/records.rst b/doc/records.rst index 3c0d55216..2e9b1251a 100644 --- a/doc/records.rst +++ b/doc/records.rst @@ -74,7 +74,7 @@ New possibilities for the "data-type" Reference counting for ``PyArray_Descr *`` objects. ``````````````````````````````````````````````````` -Most functions that take ``PyArary_Descr *`` as arguments and return a +Most functions that take ``PyArray_Descr *`` as arguments and return a ``PyObject *`` steal the reference unless otherwise noted in the code: Functions that return ``PyArray_Descr *`` objects return a new diff --git a/doc/release/upcoming_changes/10615.deprecation.rst b/doc/release/upcoming_changes/10615.deprecation.rst new file mode 100644 index 000000000..7fa948ea8 --- /dev/null +++ b/doc/release/upcoming_changes/10615.deprecation.rst @@ -0,0 +1,14 @@ +Only ndim-0 arrays are treated as scalars +----------------------------------------- +NumPy used to treat all arrays of size 1 (e.g., ``np.array([3.14])``) as scalars. +In the future, this will be limited to arrays of ndim 0 (e.g., ``np.array(3.14)``). +The following expressions will report a deprecation warning: + +.. code-block:: python + + a = np.array([3.14]) + float(a) # better: a[0] to get the numpy.float or a.item() + + b = np.array([[3.14]]) + c = numpy.random.rand(10) + c[0] = b # better: c[0] = b[0, 0] diff --git a/doc/release/upcoming_changes/21120.new_feature.rst b/doc/release/upcoming_changes/21120.new_feature.rst new file mode 100644 index 000000000..7d4dbf743 --- /dev/null +++ b/doc/release/upcoming_changes/21120.new_feature.rst @@ -0,0 +1,21 @@ +Add support for inplace matrix multiplication +---------------------------------------------- +It is now possible to perform inplace matrix multiplication +via the ``@=`` operator. + +.. code-block:: python + + >>> import numpy as np + + >>> a = np.arange(6).reshape(3, 2) + >>> print(a) + [[0 1] + [2 3] + [4 5]] + + >>> b = np.ones((2, 2), dtype=int) + >>> a @= b + >>> print(a) + [[1 1] + [5 5] + [9 9]] diff --git a/doc/release/upcoming_changes/23275.improvement.rst b/doc/release/upcoming_changes/23275.improvement.rst new file mode 100644 index 000000000..14ed5d9ad --- /dev/null +++ b/doc/release/upcoming_changes/23275.improvement.rst @@ -0,0 +1,4 @@ +``numpy.logspace`` now supports a non-scalar ``base`` argument +-------------------------------------------------------------- +The ``base`` argument of ``numpy.logspace`` can now be array-like if it's +broadcastable against the ``start`` and ``stop`` arguments.
\ No newline at end of file diff --git a/doc/release/upcoming_changes/23322.improvement.rst b/doc/release/upcoming_changes/23322.improvement.rst new file mode 100644 index 000000000..ce5ab8cf5 --- /dev/null +++ b/doc/release/upcoming_changes/23322.improvement.rst @@ -0,0 +1,4 @@ +`np.ma.dot()` now supports for non-2d arrays +-------------------------------------------- +Previously `np.ma.dot()` only worked if `a` and `b` were both 2d. +Now it works for non-2d arrays as well as `np.dot()`. diff --git a/doc/release/upcoming_changes/23357.improvement.rst b/doc/release/upcoming_changes/23357.improvement.rst new file mode 100644 index 000000000..3b474146b --- /dev/null +++ b/doc/release/upcoming_changes/23357.improvement.rst @@ -0,0 +1,9 @@ +Explicitly show keys of .npz file in repr +----------------------------------------- +``NpzFile`` shows keys of loaded .npz file when printed. + +.. code-block:: python + + >>> npzfile = np.load('arr.npz') + >>> npzfile + NpzFile 'arr.npz' with keys arr_0, arr_1, arr_2, arr_3, arr_4... diff --git a/doc/release/upcoming_changes/23480.expired.rst b/doc/release/upcoming_changes/23480.expired.rst new file mode 100644 index 000000000..164677b98 --- /dev/null +++ b/doc/release/upcoming_changes/23480.expired.rst @@ -0,0 +1 @@ +* The ``np.dual`` submodule has been removed. diff --git a/doc/release/upcoming_changes/23601.change.rst b/doc/release/upcoming_changes/23601.change.rst new file mode 100644 index 000000000..e09bd50fe --- /dev/null +++ b/doc/release/upcoming_changes/23601.change.rst @@ -0,0 +1,6 @@ +C++ standard library usage +-------------------------- + +NumPy builds now depend on the C++ standard library, because +the ``numpy.core._multiarray_umath`` extension is linked with +the C++ linker. diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index b5ee915c4..aedc489d6 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -18,9 +18,26 @@ sources needs some additional steps, which are explained below. For the rest of this chapter we assume that you have set up your git repo as described in :ref:`using-git`. -.. note:: If you are having trouble building NumPy from source or setting up - your local development environment, you can try - to :ref:`build NumPy with Gitpod <development-gitpod>`. +.. note:: + + If you are having trouble building NumPy from source or setting up your + local development environment, you can try to build NumPy with GitHub + Codespaces. It allows you to create the correct development environment + right in your browser, reducing the need to install local development + environments and deal with incompatible dependencies. + + If you have good internet connectivity and want a temporary set-up, it is + often faster to work on NumPy in a Codespaces environment. For documentation + on how to get started with Codespaces, see + `the Codespaces docs <https://docs.github.com/en/codespaces>`__. + When creating a codespace for the ``numpy/numpy`` repository, the default + 2-core machine type works; 4-core will build and work a bit faster (but of + course at a cost of halving your number of free usage hours). Once your + codespace has started, you can run ``conda activate numpy-dev`` and your + development environment is completely set up - you can then follow the + relevant parts of the NumPy documentation to build, test, develop, write + docs, and contribute to NumPy. + .. _testing-builds: diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst deleted file mode 100644 index 4e386867d..000000000 --- a/doc/source/dev/development_gitpod.rst +++ /dev/null @@ -1,271 +0,0 @@ -.. _development-gitpod: - - -Using Gitpod for NumPy development -================================== - -This section of the documentation will guide you through: - -* using GitPod for your NumPy development environment -* creating a personal fork of the NumPy repository on GitHub -* a quick tour of Gitpod and VSCode -* working on the NumPy documentation in Gitpod - -Gitpod ------- - -`Gitpod`_ is an open-source platform for automated and ready-to-code -development environments. It enables developers to describe their dev -environment as code and start instant and fresh development environments for -each new task directly from your browser. This reduces the need to install local -development environments and deal with incompatible dependencies. - -Gitpod GitHub integration -------------------------- - -To be able to use Gitpod, you will need to have the Gitpod app installed on your -GitHub account, so if -you do not have an account yet, you will need to create one first. - -Head over to the `Gitpod`_ website and click on the **Continue with GitHub** -button. You will be redirected to the GitHub authentication page. -You will then be asked to install the `Gitpod GitHub app <https://github.com/marketplace/gitpod-io>`_. - -Make sure to select **All repositories** access option to avoid issues with -permissions later on. Click on the green **Install** button - -.. image:: ./gitpod-imgs/installing-gitpod-io.png - :alt: Gitpod repository access and installation screenshot - -This will install the necessary hooks for the integration. - -Forking the NumPy repository ----------------------------- - -The best way to work on NumPy as a contributor is by making a fork of the -repository first. - -#. Browse to the `NumPy repository on GitHub`_ and `create your own fork`_. -#. Browse to your fork. Your fork will have a URL like - https://github.com/melissawm/NumPy, except with your GitHub username in place of ``melissawm``. - -Starting Gitpod ---------------- -Once you have authenticated to Gitpod through GitHub, you can install the -`Gitpod browser extension <https://www.gitpod.io/docs/browser-extension>`_ -which will add a **Gitpod** button next to the **Code** button in the -repository: - -.. image:: ./gitpod-imgs/NumPy-github.png - :alt: NumPy repository with Gitpod button screenshot - -#. If you install the extension - you can click the **Gitpod** button to start - a new workspace. - -#. Alternatively, if you do not want to install the browser extension, you can - visit https://gitpod.io/#https://github.com/USERNAME/NumPy replacing - ``USERNAME`` with your GitHub username. - -#. In both cases, this will open a new tab on your web browser and start - building your development environment. Please note this can take a few - minutes. - -#. Once the build is complete, you will be directed to your workspace, - including the VSCode editor and all the dependencies you need to work on - NumPy. The first time you start your workspace, you will notice that there - might be some actions running. This will ensure that you have a development - version of NumPy installed and that the docs are being pre-built for you. - -#. When your workspace is ready, you can :ref:`test the build<testing-builds>` by - entering:: - - $ python runtests.py -v - -``runtests.py`` is another script in the NumPy root directory. It runs a suite -of tests that make sure NumPy is working as it should, and ``-v`` activates the -``--verbose`` option to show all the test output. - -Quick workspace tour --------------------- -Gitpod uses VSCode as the editor. If you have not used this editor before, you -can check the Getting started `VSCode docs`_ to familiarize yourself with it. - -Your workspace will look similar to the image below: - -.. image:: ./gitpod-imgs/gitpod-workspace.png - :alt: Gitpod workspace screenshot - -.. note:: By default, VSCode initializes with a light theme. You can change to - a dark theme by with the keyboard shortcut :kbd:`Cmd-K Cmd-T` in Mac or - :kbd:`Ctrl-K Ctrl-T` in Linux and Windows. - -We have marked some important sections in the editor: - -#. Your current Python interpreter - by default, this is ``numpy-dev`` and - should be displayed in the status bar and on your terminal. You do not need - to activate the conda environment as this will always be activated for you. -#. Your current branch is always displayed in the status bar. You can also use - this button to change or create branches. -#. GitHub Pull Requests extension - you can use this to work with Pull Requests - from your workspace. -#. Marketplace extensions - we have added some essential extensions to the NumPy - Gitpod. Still, you can also install other extensions or syntax highlighting - themes for your user, and these will be preserved for you. -#. Your workspace directory - by default, it is ``/workspace/numpy``. **Do not - change this** as this is the only directory preserved in Gitpod. - -We have also pre-installed a few tools and VSCode extensions to help with the -development experience: - -* `GitHub CLI <https://cli.github.com/>`_ -* `VSCode rst extension <https://marketplace.visualstudio.com/items?itemName=lextudio.restructuredtext>`_ -* `VSCode Live server extension <https://marketplace.visualstudio.com/items?itemName=ritwickdey.LiveServer>`_ -* `VSCode Gitlens extension <https://marketplace.visualstudio.com/items?itemName=eamodio.gitlens>`_ -* `VSCode autodocstrings extension <https://marketplace.visualstudio.com/items?itemName=njpwerner.autodocstring>`_ -* `VSCode Git Graph extension <https://marketplace.visualstudio.com/items?itemName=mhutchie.git-graph>`_ - -Development workflow with Gitpod --------------------------------- -The :ref:`development-workflow` section of this documentation contains -information regarding the NumPy development workflow. Make sure to check this -before working on your contributions. - -When using Gitpod, git is pre configured for you: - -#. You do not need to configure your git username, and email as this should be - done for you as you authenticated through GitHub. You can check the git - configuration with the command ``git config --list`` in your terminal. -#. As you started your workspace from your own NumPy fork, you will by default - have both ``upstream`` and ``origin`` added as remotes. You can verify this by - typing ``git remote`` on your terminal or by clicking on the **branch name** - on the status bar (see image below). - - .. image:: ./gitpod-imgs/NumPy-gitpod-branches.png - :alt: Gitpod workspace branches plugin screenshot - -Rendering the NumPy documentation ---------------------------------- -You can find the detailed documentation on how rendering the documentation with -Sphinx works in the :ref:`howto-build-docs` section. - -The documentation is pre-built during your workspace initialization. So once -this task is completed, you have two main options to render the documentation -in Gitpod. - -Option 1: Using Liveserve -~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. View the documentation in ``NumPy/doc/build/html``. You can start with - ``index.html`` and browse, or you can jump straight to the file you're - interested in. -#. To see the rendered version of a page, you can right-click on the ``.html`` - file and click on **Open with Live Serve**. Alternatively, you can open the - file in the editor and click on the **Go live** button on the status bar. - - .. image:: ./gitpod-imgs/vscode-statusbar.png - :alt: Gitpod workspace VSCode start live serve screenshot - -#. A simple browser will open to the right-hand side of the editor. We recommend - closing it and click on the **Open in browser** button in the pop-up. -#. To stop the server click on the **Port: 5500** button on the status bar. - -Option 2: Using the rst extension -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A quick and easy way to see live changes in a ``.rst`` file as you work on it -uses the rst extension with docutils. - -.. note:: This will generate a simple live preview of the document without the - ``html`` theme, and some backlinks might not be added correctly. But it is an - easy and lightweight way to get instant feedback on your work. - -#. Open any of the source documentation files located in ``doc/source`` in the - editor. -#. Open VSCode Command Palette with :kbd:`Cmd-Shift-P` in Mac or - :kbd:`Ctrl-Shift-P` in Linux and Windows. Start typing "restructured" - and choose either "Open preview" or "Open preview to the Side". - - .. image:: ./gitpod-imgs/vscode-rst.png - :alt: Gitpod workspace VSCode open rst screenshot - -#. As you work on the document, you will see a live rendering of it on the editor. - - .. image:: ./gitpod-imgs/rst-rendering.png - :alt: Gitpod workspace VSCode rst rendering screenshot - -If you want to see the final output with the ``html`` theme you will need to -rebuild the docs with ``make html`` and use Live Serve as described in option 1. - -FAQ's and troubleshooting -------------------------- - -How long is my Gitpod workspace kept for? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Your stopped workspace will be kept for 14 days and deleted afterwards if you do -not use them. - -Can I come back to a previous workspace? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Yes, let's say you stepped away for a while and you want to carry on working on -your NumPy contributions. You need to visit https://gitpod.io/workspaces and -click on the workspace you want to spin up again. All your changes will be there -as you last left them. - -Can I install additional VSCode extensions? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Absolutely! Any extensions you installed will be installed in your own workspace -and preserved. - -I registered on Gitpod but I still cannot see a ``Gitpod`` button in my repositories. -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Head to https://gitpod.io/integrations and make sure you are logged in. -Hover over GitHub and click on the three buttons that appear on the right. -Click on edit permissions and make sure you have ``user:email``, -``read:user``, and ``public_repo`` checked. Click on **Update Permissions** -and confirm the changes in the GitHub application page. - -.. image:: ./gitpod-imgs/gitpod-edit-permissions-gh.png - :alt: Gitpod integrations - edit GH permissions screenshot - -How long does my workspace stay active if I'm not using it? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you keep your workspace open in a browser tab but don't interact with it, -it will shut down after 30 minutes. If you close the browser tab, it will -shut down after 3 minutes. - -My terminal is blank - there is no cursor and it's completely unresponsive -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Unfortunately this is a known-issue on Gitpod's side. You can sort this -issue in two ways: - -#. Create a new Gitpod workspace altogether. -#. Head to your `Gitpod dashboard <https://gitpod.io/workspaces>`_ and locate - the running workspace. Hover on it and click on the **three dots menu** - and then click on **Stop**. When the workspace is completely stopped you - can click on its name to restart it again. - -.. image:: ./gitpod-imgs/gitpod-dashboard-stop.png - :alt: Gitpod dashboard and workspace menu screenshot - -I authenticated through GitHub but I still cannot commit to the repository through Gitpod. -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Head to https://gitpod.io/integrations and make sure you are logged in. -Hover over GitHub and click on the three buttons that appear on the right. -Click on edit permissions and make sure you have ``public_repo`` checked. -Click on **Update Permissions** and confirm the changes in the -GitHub application page. - -.. image:: ./gitpod-imgs/gitpod-edit-permissions-repo.png - :alt: Gitpod integrations - edit GH repository permissions screenshot - -.. _Gitpod: https://www.gitpod.io/ -.. _NumPy repository on GitHub: https://github.com/NumPy/NumPy -.. _create your own fork: https://help.github.com/en/articles/fork-a-repo -.. _VSCode docs: https://code.visualstudio.com/docs/getstarted/tips-and-tricks diff --git a/doc/source/dev/gitpod-imgs/NumPy-github.png b/doc/source/dev/gitpod-imgs/NumPy-github.png Binary files differdeleted file mode 100644 index 010b0fc5e..000000000 --- a/doc/source/dev/gitpod-imgs/NumPy-github.png +++ /dev/null diff --git a/doc/source/dev/gitpod-imgs/NumPy-gitpod-branches.png b/doc/source/dev/gitpod-imgs/NumPy-gitpod-branches.png Binary files differdeleted file mode 100644 index 3ee6c5f20..000000000 --- a/doc/source/dev/gitpod-imgs/NumPy-gitpod-branches.png +++ /dev/null diff --git a/doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.png b/doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.png Binary files differdeleted file mode 100644 index 40f137745..000000000 --- a/doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.png +++ /dev/null diff --git a/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-gh.png b/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-gh.png Binary files differdeleted file mode 100644 index 8955e907a..000000000 --- a/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-gh.png +++ /dev/null diff --git a/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-repo.png b/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-repo.png Binary files differdeleted file mode 100644 index 8bfaff81c..000000000 --- a/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-repo.png +++ /dev/null diff --git a/doc/source/dev/gitpod-imgs/gitpod-workspace.png b/doc/source/dev/gitpod-imgs/gitpod-workspace.png Binary files differdeleted file mode 100644 index a65c9bd7e..000000000 --- a/doc/source/dev/gitpod-imgs/gitpod-workspace.png +++ /dev/null diff --git a/doc/source/dev/gitpod-imgs/installing-gitpod-io.png b/doc/source/dev/gitpod-imgs/installing-gitpod-io.png Binary files differdeleted file mode 100644 index 97319a729..000000000 --- a/doc/source/dev/gitpod-imgs/installing-gitpod-io.png +++ /dev/null diff --git a/doc/source/dev/gitpod-imgs/rst-rendering.png b/doc/source/dev/gitpod-imgs/rst-rendering.png Binary files differdeleted file mode 100644 index 41cc305f3..000000000 --- a/doc/source/dev/gitpod-imgs/rst-rendering.png +++ /dev/null diff --git a/doc/source/dev/gitpod-imgs/vscode-rst.png b/doc/source/dev/gitpod-imgs/vscode-rst.png Binary files differdeleted file mode 100644 index 5b574c115..000000000 --- a/doc/source/dev/gitpod-imgs/vscode-rst.png +++ /dev/null diff --git a/doc/source/dev/gitpod-imgs/vscode-statusbar.png b/doc/source/dev/gitpod-imgs/vscode-statusbar.png Binary files differdeleted file mode 100644 index 3febbcee0..000000000 --- a/doc/source/dev/gitpod-imgs/vscode-statusbar.png +++ /dev/null diff --git a/doc/source/dev/howto_build_docs.rst b/doc/source/dev/howto_build_docs.rst index a46688d7f..b3d2e3055 100644 --- a/doc/source/dev/howto_build_docs.rst +++ b/doc/source/dev/howto_build_docs.rst @@ -14,22 +14,13 @@ in several different formats. Development environments ======================== -Before proceeding further it should be noted that the documentation is built with the ``make`` tool, -which is not natively available on Windows. MacOS or Linux users can jump -to :ref:`how-todoc.prerequisites`. It is recommended for Windows users to set up their development -environment on :ref:`Gitpod <development-gitpod>` or `Windows Subsystem -for Linux (WSL) <https://docs.microsoft.com/en-us/windows/wsl/install-win10>`_. WSL is a good option -for a persistent local set-up. - -Gitpod -~~~~~~ -Gitpod is an open-source platform that automatically creates the correct development environment right -in your browser, reducing the need to install local development environments and deal with -incompatible dependencies. - -If you have good internet connectivity and want a temporary set-up, -it is often faster to build with Gitpod. Here are the in-depth instructions for -:ref:`building NumPy with Gitpod <development-gitpod>`. +Before proceeding further it should be noted that the documentation is built +with the ``make`` tool, which is not natively available on Windows. MacOS or +Linux users can jump to :ref:`how-todoc.prerequisites`. It is recommended for +Windows users to set up their development environment on +GitHub Codespaces (see :ref:`recommended-development-setup`) or +`Windows Subsystem for Linux (WSL) <https://learn.microsoft.com/en-us/windows/wsl/install>`_. +WSL is a good option for a persistent local set-up. .. _how-todoc.prerequisites: diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index bd3595741..b4479fa0d 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -60,12 +60,16 @@ Here's the short summary, complete TOC links are below: - ``upstream``, which refers to the ``numpy`` repository - ``origin``, which refers to your personal fork -2. Develop your contribution: - - * Pull the latest changes from upstream:: + * Pull the latest changes from upstream, including tags:: git checkout main - git pull upstream main + git pull upstream main --tags + + * Initialize numpy's submodules:: + + git submodule update --init + +2. Develop your contribution: * Create a branch for the feature you want to work on. Since the branch name will appear in the merge message, use a sensible name @@ -258,7 +262,6 @@ The rest of the story Git Basics <gitwash/index> development_environment - development_gitpod howto_build_docs development_workflow development_advanced_debugging diff --git a/doc/source/f2py/windows/index.rst b/doc/source/f2py/windows/index.rst index c1e6b4128..980346667 100644 --- a/doc/source/f2py/windows/index.rst +++ b/doc/source/f2py/windows/index.rst @@ -56,7 +56,7 @@ PGI Compilers (commercial) Windows support`_. Cygwin (FOSS) - Can also be used for ``gfortran``. Howeve, the POSIX API compatibility layer provided by + Can also be used for ``gfortran``. However, the POSIX API compatibility layer provided by Cygwin is meant to compile UNIX software on Windows, instead of building native Windows programs. This means cross compilation is required. diff --git a/doc/source/f2py/windows/pgi.rst b/doc/source/f2py/windows/pgi.rst index 644259abe..28e25f016 100644 --- a/doc/source/f2py/windows/pgi.rst +++ b/doc/source/f2py/windows/pgi.rst @@ -22,7 +22,5 @@ as classic Flang requires a custom LLVM and compilation from sources. As of 29-01-2022, `PGI compiler toolchains`_ have been superseded by the Nvidia HPC SDK, with no `native Windows support`_. -However, - .. _PGI compiler toolchains: https://www.pgroup.com/index.html .. _native Windows support: https://developer.nvidia.com/nvidia-hpc-sdk-downloads#collapseFour diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 30d7be9f9..9a592984c 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -293,6 +293,10 @@ elements the data type consists of.) :members: __init__ :exclude-members: __init__ +.. autoclass:: numpy.character + :members: __init__ + :exclude-members: __init__ + .. autoclass:: numpy.bytes_ :members: __init__ :exclude-members: __init__ diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 6651a4760..c135fc6eb 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -423,7 +423,7 @@ From other objects :c:data:`NPY_ARRAY_FORCECAST` is present in ``flags``, this call will generate an error if the data type cannot be safely obtained from the object. If you want to use - ``NULL`` for the *dtype* and ensure the array is notswapped then + ``NULL`` for the *dtype* and ensure the array is not swapped then use :c:func:`PyArray_CheckFromAny`. A value of 0 for either of the depth parameters causes the parameter to be ignored. Any of the following array flags can be added (*e.g.* using \|) to get the @@ -548,22 +548,6 @@ From other objects :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \| :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` -.. c:function:: int PyArray_GetArrayParamsFromObject( \ - PyObject* op, PyArray_Descr* requested_dtype, npy_bool writeable, \ - PyArray_Descr** out_dtype, int* out_ndim, npy_intp* out_dims, \ - PyArrayObject** out_arr, PyObject* context) - - .. deprecated:: NumPy 1.19 - - Unless NumPy is made aware of an issue with this, this function - is scheduled for rapid removal without replacement. - - .. versionchanged:: NumPy 1.19 - - `context` is never used. Its use results in an error. - - .. versionadded:: 1.6 - .. c:function:: PyObject* PyArray_CheckFromAny( \ PyObject* op, PyArray_Descr* dtype, int min_depth, int max_depth, \ int requirements, PyObject* context) @@ -1202,17 +1186,6 @@ Converting data types return value is the enumerated typenumber that represents the data-type that *op* should have. -.. c:function:: void PyArray_ArrayType( \ - PyObject* op, PyArray_Descr* mintype, PyArray_Descr* outtype) - - This function is superseded by :c:func:`PyArray_ResultType`. - - This function works similarly to :c:func:`PyArray_ObjectType` (...) - except it handles flexible arrays. The *mintype* argument can have - an itemsize member and the *outtype* argument will have an - itemsize member at least as big but perhaps bigger depending on - the object *op*. - .. c:function:: PyArrayObject** PyArray_ConvertToCommonType( \ PyObject* op, int* n) @@ -1490,7 +1463,7 @@ of the constant names is deprecated in 1.7. :c:func:`PyArray_FromAny` and a copy had to be made of some other array (and the user asked for this flag to be set in such a situation). The base attribute then points to the "misbehaved" - array (which is set read_only). :c:func`PyArray_ResolveWritebackIfCopy` + array (which is set read_only). :c:func:`PyArray_ResolveWritebackIfCopy` will copy its contents back to the "misbehaved" array (casting if necessary) and will reset the "misbehaved" array to :c:data:`NPY_ARRAY_WRITEABLE`. If the "misbehaved" array was not @@ -3378,7 +3351,7 @@ Memory management .. c:function:: int PyArray_ResolveWritebackIfCopy(PyArrayObject* obj) - If ``obj.flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, this function + If ``obj->flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, this function clears the flags, `DECREF` s `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. It then copies ``obj->data`` to `obj->base->data`, and returns the error state of @@ -3608,29 +3581,17 @@ Miscellaneous Macros Returns the reference count of any Python object. -.. c:function:: void PyArray_DiscardWritebackIfCopy(PyObject* obj) +.. c:function:: void PyArray_DiscardWritebackIfCopy(PyArrayObject* obj) - If ``obj.flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, this function + If ``obj->flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, this function clears the flags, `DECREF` s `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. In - contrast to :c:func:`PyArray_DiscardWritebackIfCopy` it makes no attempt - to copy the data from `obj->base` This undoes + contrast to :c:func:`PyArray_ResolveWritebackIfCopy` it makes no attempt + to copy the data from `obj->base`. This undoes :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called after an error when you are finished with ``obj``, just before ``Py_DECREF(obj)``. It may be called multiple times, or with ``NULL`` input. -.. c:function:: void PyArray_XDECREF_ERR(PyObject* obj) - - Deprecated in 1.14, use :c:func:`PyArray_DiscardWritebackIfCopy` - followed by ``Py_XDECREF`` - - DECREF's an array object which may have the - :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` - flag set without causing the contents to be copied back into the - original array. Resets the :c:data:`NPY_ARRAY_WRITEABLE` flag on the base - object. This is useful for recovering from an error condition when - writeback semantics are used, but will lead to wrong results. - Enumerated Types ~~~~~~~~~~~~~~~~ diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst index 642f62749..bdd5a6f55 100644 --- a/doc/source/reference/c-api/dtype.rst +++ b/doc/source/reference/c-api/dtype.rst @@ -25,161 +25,161 @@ select the precision desired. Enumerated Types ---------------- -.. c:enumerator:: NPY_TYPES +.. c:enum:: NPY_TYPES -There is a list of enumerated types defined providing the basic 24 -data types plus some useful generic names. Whenever the code requires -a type number, one of these enumerated types is requested. The types -are all called ``NPY_{NAME}``: + There is a list of enumerated types defined providing the basic 24 + data types plus some useful generic names. Whenever the code requires + a type number, one of these enumerated types is requested. The types + are all called ``NPY_{NAME}``: -.. c:enumerator:: NPY_BOOL + .. c:enumerator:: NPY_BOOL - The enumeration value for the boolean type, stored as one byte. - It may only be set to the values 0 and 1. + The enumeration value for the boolean type, stored as one byte. + It may only be set to the values 0 and 1. -.. c:enumerator:: NPY_BYTE -.. c:enumerator:: NPY_INT8 + .. c:enumerator:: NPY_BYTE + .. c:enumerator:: NPY_INT8 - The enumeration value for an 8-bit/1-byte signed integer. + The enumeration value for an 8-bit/1-byte signed integer. -.. c:enumerator:: NPY_SHORT -.. c:enumerator:: NPY_INT16 + .. c:enumerator:: NPY_SHORT + .. c:enumerator:: NPY_INT16 - The enumeration value for a 16-bit/2-byte signed integer. + The enumeration value for a 16-bit/2-byte signed integer. -.. c:enumerator:: NPY_INT -.. c:enumerator:: NPY_INT32 + .. c:enumerator:: NPY_INT + .. c:enumerator:: NPY_INT32 - The enumeration value for a 32-bit/4-byte signed integer. + The enumeration value for a 32-bit/4-byte signed integer. -.. c:enumerator:: NPY_LONG + .. c:enumerator:: NPY_LONG - Equivalent to either NPY_INT or NPY_LONGLONG, depending on the - platform. + Equivalent to either NPY_INT or NPY_LONGLONG, depending on the + platform. -.. c:enumerator:: NPY_LONGLONG -.. c:enumerator:: NPY_INT64 + .. c:enumerator:: NPY_LONGLONG + .. c:enumerator:: NPY_INT64 - The enumeration value for a 64-bit/8-byte signed integer. + The enumeration value for a 64-bit/8-byte signed integer. -.. c:enumerator:: NPY_UBYTE -.. c:enumerator:: NPY_UINT8 + .. c:enumerator:: NPY_UBYTE + .. c:enumerator:: NPY_UINT8 - The enumeration value for an 8-bit/1-byte unsigned integer. + The enumeration value for an 8-bit/1-byte unsigned integer. -.. c:enumerator:: NPY_USHORT -.. c:enumerator:: NPY_UINT16 + .. c:enumerator:: NPY_USHORT + .. c:enumerator:: NPY_UINT16 - The enumeration value for a 16-bit/2-byte unsigned integer. + The enumeration value for a 16-bit/2-byte unsigned integer. -.. c:enumerator:: NPY_UINT -.. c:enumerator:: NPY_UINT32 + .. c:enumerator:: NPY_UINT + .. c:enumerator:: NPY_UINT32 - The enumeration value for a 32-bit/4-byte unsigned integer. + The enumeration value for a 32-bit/4-byte unsigned integer. -.. c:enumerator:: NPY_ULONG + .. c:enumerator:: NPY_ULONG - Equivalent to either NPY_UINT or NPY_ULONGLONG, depending on the - platform. + Equivalent to either NPY_UINT or NPY_ULONGLONG, depending on the + platform. -.. c:enumerator:: NPY_ULONGLONG -.. c:enumerator:: NPY_UINT64 + .. c:enumerator:: NPY_ULONGLONG + .. c:enumerator:: NPY_UINT64 - The enumeration value for a 64-bit/8-byte unsigned integer. + The enumeration value for a 64-bit/8-byte unsigned integer. -.. c:enumerator:: NPY_HALF -.. c:enumerator:: NPY_FLOAT16 + .. c:enumerator:: NPY_HALF + .. c:enumerator:: NPY_FLOAT16 - The enumeration value for a 16-bit/2-byte IEEE 754-2008 compatible floating - point type. + The enumeration value for a 16-bit/2-byte IEEE 754-2008 compatible floating + point type. -.. c:enumerator:: NPY_FLOAT -.. c:enumerator:: NPY_FLOAT32 + .. c:enumerator:: NPY_FLOAT + .. c:enumerator:: NPY_FLOAT32 - The enumeration value for a 32-bit/4-byte IEEE 754 compatible floating - point type. + The enumeration value for a 32-bit/4-byte IEEE 754 compatible floating + point type. -.. c:enumerator:: NPY_DOUBLE -.. c:enumerator:: NPY_FLOAT64 + .. c:enumerator:: NPY_DOUBLE + .. c:enumerator:: NPY_FLOAT64 - The enumeration value for a 64-bit/8-byte IEEE 754 compatible floating - point type. + The enumeration value for a 64-bit/8-byte IEEE 754 compatible floating + point type. -.. c:enumerator:: NPY_LONGDOUBLE + .. c:enumerator:: NPY_LONGDOUBLE - The enumeration value for a platform-specific floating point type which is - at least as large as NPY_DOUBLE, but larger on many platforms. + The enumeration value for a platform-specific floating point type which is + at least as large as NPY_DOUBLE, but larger on many platforms. -.. c:enumerator:: NPY_CFLOAT -.. c:enumerator:: NPY_COMPLEX64 + .. c:enumerator:: NPY_CFLOAT + .. c:enumerator:: NPY_COMPLEX64 - The enumeration value for a 64-bit/8-byte complex type made up of - two NPY_FLOAT values. + The enumeration value for a 64-bit/8-byte complex type made up of + two NPY_FLOAT values. -.. c:enumerator:: NPY_CDOUBLE -.. c:enumerator:: NPY_COMPLEX128 + .. c:enumerator:: NPY_CDOUBLE + .. c:enumerator:: NPY_COMPLEX128 - The enumeration value for a 128-bit/16-byte complex type made up of - two NPY_DOUBLE values. + The enumeration value for a 128-bit/16-byte complex type made up of + two NPY_DOUBLE values. -.. c:enumerator:: NPY_CLONGDOUBLE + .. c:enumerator:: NPY_CLONGDOUBLE - The enumeration value for a platform-specific complex floating point - type which is made up of two NPY_LONGDOUBLE values. + The enumeration value for a platform-specific complex floating point + type which is made up of two NPY_LONGDOUBLE values. -.. c:enumerator:: NPY_DATETIME + .. c:enumerator:: NPY_DATETIME - The enumeration value for a data type which holds dates or datetimes with - a precision based on selectable date or time units. + The enumeration value for a data type which holds dates or datetimes with + a precision based on selectable date or time units. -.. c:enumerator:: NPY_TIMEDELTA + .. c:enumerator:: NPY_TIMEDELTA - The enumeration value for a data type which holds lengths of times in - integers of selectable date or time units. + The enumeration value for a data type which holds lengths of times in + integers of selectable date or time units. -.. c:enumerator:: NPY_STRING + .. c:enumerator:: NPY_STRING - The enumeration value for ASCII strings of a selectable size. The - strings have a fixed maximum size within a given array. + The enumeration value for ASCII strings of a selectable size. The + strings have a fixed maximum size within a given array. -.. c:enumerator:: NPY_UNICODE + .. c:enumerator:: NPY_UNICODE - The enumeration value for UCS4 strings of a selectable size. The - strings have a fixed maximum size within a given array. + The enumeration value for UCS4 strings of a selectable size. The + strings have a fixed maximum size within a given array. -.. c:enumerator:: NPY_OBJECT + .. c:enumerator:: NPY_OBJECT - The enumeration value for references to arbitrary Python objects. + The enumeration value for references to arbitrary Python objects. -.. c:enumerator:: NPY_VOID + .. c:enumerator:: NPY_VOID - Primarily used to hold struct dtypes, but can contain arbitrary - binary data. + Primarily used to hold struct dtypes, but can contain arbitrary + binary data. -Some useful aliases of the above types are + Some useful aliases of the above types are -.. c:enumerator:: NPY_INTP + .. c:enumerator:: NPY_INTP - The enumeration value for a signed integer type which is the same - size as a (void \*) pointer. This is the type used by all - arrays of indices. + The enumeration value for a signed integer type which is the same + size as a (void \*) pointer. This is the type used by all + arrays of indices. -.. c:enumerator:: NPY_UINTP + .. c:enumerator:: NPY_UINTP - The enumeration value for an unsigned integer type which is the - same size as a (void \*) pointer. + The enumeration value for an unsigned integer type which is the + same size as a (void \*) pointer. -.. c:enumerator:: NPY_MASK + .. c:enumerator:: NPY_MASK - The enumeration value of the type used for masks, such as with - the :c:data:`NPY_ITER_ARRAYMASK` iterator flag. This is equivalent - to :c:data:`NPY_UINT8`. + The enumeration value of the type used for masks, such as with + the :c:data:`NPY_ITER_ARRAYMASK` iterator flag. This is equivalent + to :c:data:`NPY_UINT8`. -.. c:enumerator:: NPY_DEFAULT_TYPE + .. c:enumerator:: NPY_DEFAULT_TYPE - The default type to use when no dtype is explicitly specified, for - example when calling np.zero(shape). This is equivalent to - :c:data:`NPY_DOUBLE`. + The default type to use when no dtype is explicitly specified, for + example when calling np.zero(shape). This is equivalent to + :c:data:`NPY_DOUBLE`. Other useful related constants are diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 66bc04f05..95aa6dd17 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -122,7 +122,7 @@ PyArray_Type and PyArrayObject ``ndarraytypes.h`` points to this data member. :c:data:`NPY_MAXDIMS` is the largest number of dimensions for any array. - .. c:member:: npy_intp dimensions + .. c:member:: npy_intp *dimensions An array of integers providing the shape in each dimension as long as nd :math:`\geq` 1. The integer is always large enough @@ -285,83 +285,16 @@ PyArrayDescr_Type and PyArray_Descr array like behavior. Each bit in this member is a flag which are named as: - .. c:member:: int alignment - - Non-NULL if this type is an array (C-contiguous) of some other type - - -.. - dedented to allow internal linking, pending a refactoring - -.. c:macro:: NPY_ITEM_REFCOUNT - - Indicates that items of this data-type must be reference - counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ). - - .. c:macro:: NPY_ITEM_HASOBJECT - - Same as :c:data:`NPY_ITEM_REFCOUNT`. - -.. - dedented to allow internal linking, pending a refactoring - -.. c:macro:: NPY_LIST_PICKLE - - Indicates arrays of this data-type must be converted to a list - before pickling. - -.. c:macro:: NPY_ITEM_IS_POINTER - - Indicates the item is a pointer to some other data-type - -.. c:macro:: NPY_NEEDS_INIT - - Indicates memory for this data-type must be initialized (set - to 0) on creation. - -.. c:macro:: NPY_NEEDS_PYAPI - - Indicates this data-type requires the Python C-API during - access (so don't give up the GIL if array access is going to - be needed). - -.. c:macro:: NPY_USE_GETITEM - - On array access use the ``f->getitem`` function pointer - instead of the standard conversion to an array scalar. Must - use if you don't define an array scalar to go along with - the data-type. - -.. c:macro:: NPY_USE_SETITEM - - When creating a 0-d array from an array scalar use - ``f->setitem`` instead of the standard copy from an array - scalar. Must use if you don't define an array scalar to go - along with the data-type. - - .. c:macro:: NPY_FROM_FIELDS - - The bits that are inherited for the parent data-type if these - bits are set in any field of the data-type. Currently ( - :c:data:`NPY_NEEDS_INIT` \| :c:data:`NPY_LIST_PICKLE` \| - :c:data:`NPY_ITEM_REFCOUNT` \| :c:data:`NPY_NEEDS_PYAPI` ). - - .. c:macro:: NPY_OBJECT_DTYPE_FLAGS - - Bits set for the object data-type: ( :c:data:`NPY_LIST_PICKLE` - \| :c:data:`NPY_USE_GETITEM` \| :c:data:`NPY_ITEM_IS_POINTER` \| - :c:data:`NPY_ITEM_REFCOUNT` \| :c:data:`NPY_NEEDS_INIT` \| - :c:data:`NPY_NEEDS_PYAPI`). - - .. c:function:: int PyDataType_FLAGCHK(PyArray_Descr *dtype, int flags) - - Return true if all the given flags are set for the data-type - object. - - .. c:function:: int PyDataType_REFCHK(PyArray_Descr *dtype) - - Equivalent to :c:func:`PyDataType_FLAGCHK` (*dtype*, - :c:data:`NPY_ITEM_REFCOUNT`). + * :c:macro:`NPY_ITEM_REFCOUNT` + * :c:macro:`NPY_ITEM_HASOBJECT` + * :c:macro:`NPY_LIST_PICKLE` + * :c:macro:`NPY_ITEM_IS_POINTER` + * :c:macro:`NPY_NEEDS_INIT` + * :c:macro:`NPY_NEEDS_PYAPI` + * :c:macro:`NPY_USE_GETITEM` + * :c:macro:`NPY_USE_SETITEM` + * :c:macro:`NPY_FROM_FIELDS` + * :c:macro:`NPY_OBJECT_DTYPE_FLAGS` .. c:member:: int type_num @@ -452,6 +385,73 @@ PyArrayDescr_Type and PyArray_Descr Currently unused. Reserved for future use in caching hash values. +.. c:macro:: NPY_ITEM_REFCOUNT + + Indicates that items of this data-type must be reference + counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ). + +.. c:macro:: NPY_ITEM_HASOBJECT + + Same as :c:data:`NPY_ITEM_REFCOUNT`. + +.. c:macro:: NPY_LIST_PICKLE + + Indicates arrays of this data-type must be converted to a list + before pickling. + +.. c:macro:: NPY_ITEM_IS_POINTER + + Indicates the item is a pointer to some other data-type + +.. c:macro:: NPY_NEEDS_INIT + + Indicates memory for this data-type must be initialized (set + to 0) on creation. + +.. c:macro:: NPY_NEEDS_PYAPI + + Indicates this data-type requires the Python C-API during + access (so don't give up the GIL if array access is going to + be needed). + +.. c:macro:: NPY_USE_GETITEM + + On array access use the ``f->getitem`` function pointer + instead of the standard conversion to an array scalar. Must + use if you don't define an array scalar to go along with + the data-type. + +.. c:macro:: NPY_USE_SETITEM + + When creating a 0-d array from an array scalar use + ``f->setitem`` instead of the standard copy from an array + scalar. Must use if you don't define an array scalar to go + along with the data-type. + +.. c:macro:: NPY_FROM_FIELDS + + The bits that are inherited for the parent data-type if these + bits are set in any field of the data-type. Currently ( + :c:data:`NPY_NEEDS_INIT` \| :c:data:`NPY_LIST_PICKLE` \| + :c:data:`NPY_ITEM_REFCOUNT` \| :c:data:`NPY_NEEDS_PYAPI` ). + +.. c:macro:: NPY_OBJECT_DTYPE_FLAGS + + Bits set for the object data-type: ( :c:data:`NPY_LIST_PICKLE` + \| :c:data:`NPY_USE_GETITEM` \| :c:data:`NPY_ITEM_IS_POINTER` \| + :c:data:`NPY_ITEM_REFCOUNT` \| :c:data:`NPY_NEEDS_INIT` \| + :c:data:`NPY_NEEDS_PYAPI`). + +.. c:function:: int PyDataType_FLAGCHK(PyArray_Descr *dtype, int flags) + + Return true if all the given flags are set for the data-type + object. + +.. c:function:: int PyDataType_REFCHK(PyArray_Descr *dtype) + + Equivalent to :c:func:`PyDataType_FLAGCHK` (*dtype*, + :c:data:`NPY_ITEM_REFCOUNT`). + .. c:type:: PyArray_ArrFuncs Functions implementing internal features. Not all of these @@ -997,10 +997,14 @@ PyUFunc_Type and PyUFuncObject .. c:member:: npy_uint32 *core_dim_flags - For each distinct core dimension, a set of ``UFUNC_CORE_DIM*`` flags + For each distinct core dimension, a set of flags ( + :c:macro:`UFUNC_CORE_DIM_CAN_IGNORE` and + :c:macro:`UFUNC_CORE_DIM_SIZE_INFERRED`) + + .. c:member:: PyObject *identity_value -.. - dedented to allow internal linking, pending a refactoring + Identity for reduction, when :c:member:`PyUFuncObject.identity` + is equal to :c:data:`PyUFunc_IdentityValue`. .. c:macro:: UFUNC_CORE_DIM_CAN_IGNORE @@ -1011,11 +1015,6 @@ PyUFunc_Type and PyUFuncObject if the dim size will be determined from the operands and not from a :ref:`frozen <frozen>` signature - .. c:member:: PyObject *identity_value - - Identity for reduction, when :c:member:`PyUFuncObject.identity` - is equal to :c:data:`PyUFunc_IdentityValue`. - PyArrayIter_Type and PyArrayIterObject -------------------------------------- @@ -1253,7 +1252,7 @@ ScalarArrayTypes ---------------- There is a Python type for each of the different built-in data types -that can be present in the array Most of these are simple wrappers +that can be present in the array. Most of these are simple wrappers around the corresponding data type in C. The C-names for these types are ``Py{TYPE}ArrType_Type`` where ``{TYPE}`` can be @@ -1457,22 +1456,6 @@ memory management. These types are not accessible directly from Python, and are not exposed to the C-API. They are included here only for completeness and assistance in understanding the code. - -.. c:type:: PyUFuncLoopObject - - A loose wrapper for a C-structure that contains the information - needed for looping. This is useful if you are trying to understand - the ufunc looping code. The :c:type:`PyUFuncLoopObject` is the associated - C-structure. It is defined in the ``ufuncobject.h`` header. - -.. c:type:: PyUFuncReduceObject - - A loose wrapper for the C-structure that contains the information - needed for reduce-like methods of ufuncs. This is useful if you are - trying to understand the reduce, accumulate, and reduce-at - code. The :c:type:`PyUFuncReduceObject` is the associated C-structure. It - is defined in the ``ufuncobject.h`` header. - .. c:type:: PyUFunc_Loop1d A simple linked-list of C-structures containing the information needed @@ -1483,8 +1466,12 @@ for completeness and assistance in understanding the code. Advanced indexing is handled with this Python type. It is simply a loose wrapper around the C-structure containing the variables - needed for advanced array indexing. The associated C-structure, - ``PyArrayMapIterObject``, is useful if you are trying to + needed for advanced array indexing. + +.. c:type:: PyArrayMapIterObject + + The C-structure associated with :c:var:`PyArrayMapIter_Type`. + This structure is useful if you are trying to understand the advanced-index mapping code. It is defined in the ``arrayobject.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes diff --git a/doc/source/reference/maskedarray.baseclass.rst b/doc/source/reference/maskedarray.baseclass.rst index fcd310faa..7121914b9 100644 --- a/doc/source/reference/maskedarray.baseclass.rst +++ b/doc/source/reference/maskedarray.baseclass.rst @@ -72,19 +72,19 @@ Attributes and properties of masked arrays .. seealso:: :ref:`Array Attributes <arrays.ndarray.attributes>` -.. autoattribute:: MaskedArray.data +.. autoattribute:: numpy::ma.MaskedArray.data -.. autoattribute:: MaskedArray.mask +.. autoattribute:: numpy::ma.MaskedArray.mask -.. autoattribute:: MaskedArray.recordmask +.. autoattribute:: numpy::ma.MaskedArray.recordmask -.. autoattribute:: MaskedArray.fill_value +.. autoattribute:: numpy::ma.MaskedArray.fill_value -.. autoattribute:: MaskedArray.baseclass +.. autoattribute:: numpy::ma.MaskedArray.baseclass -.. autoattribute:: MaskedArray.sharedmask +.. autoattribute:: numpy::ma.MaskedArray.sharedmask -.. autoattribute:: MaskedArray.hardmask +.. autoattribute:: numpy::ma.MaskedArray.hardmask As :class:`MaskedArray` is a subclass of :class:`~numpy.ndarray`, a masked array also inherits all the attributes and properties of a :class:`~numpy.ndarray` instance. diff --git a/doc/source/reference/random/extending.rst b/doc/source/reference/random/extending.rst index 6bb941496..998faf80a 100644 --- a/doc/source/reference/random/extending.rst +++ b/doc/source/reference/random/extending.rst @@ -25,7 +25,7 @@ provided by ``ctypes.next_double``. Both CTypes and CFFI allow the more complicated distributions to be used directly in Numba after compiling the file distributions.c into a ``DLL`` or ``so``. An example showing the use of a more complicated distribution is in -the `examples` section below. +the `Examples`_ section below. .. _random_cython: @@ -113,6 +113,6 @@ Examples .. toctree:: Numba <examples/numba> - CFFI + Numba <examples/numba_cffi> + CFFI + Numba <examples/numba_cffi> Cython <examples/cython/index> CFFI <examples/cffi> diff --git a/doc/source/reference/routines.dual.rst b/doc/source/reference/routines.dual.rst deleted file mode 100644 index 18c7791d0..000000000 --- a/doc/source/reference/routines.dual.rst +++ /dev/null @@ -1,47 +0,0 @@ -Optionally SciPy-accelerated routines (:mod:`numpy.dual`) -========================================================= - -.. automodule:: numpy.dual - -Linear algebra --------------- - -.. currentmodule:: numpy.linalg - -.. autosummary:: - - cholesky - det - eig - eigh - eigvals - eigvalsh - inv - lstsq - norm - pinv - solve - svd - -FFT ---- - -.. currentmodule:: numpy.fft - -.. autosummary:: - - fft - fft2 - fftn - ifft - ifft2 - ifftn - -Other ------ - -.. currentmodule:: numpy - -.. autosummary:: - - i0 diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst index d503cc243..fd22a74aa 100644 --- a/doc/source/reference/routines.ma.rst +++ b/doc/source/reference/routines.ma.rst @@ -31,6 +31,7 @@ From existing data ma.fromfunction ma.MaskedArray.copy + ma.diagflat Ones and zeros @@ -72,6 +73,9 @@ Inspecting the array ma.isMaskedArray ma.isMA ma.isarray + ma.isin + ma.in1d + ma.unique ma.MaskedArray.all @@ -394,6 +398,17 @@ Clipping and rounding ma.MaskedArray.clip ma.MaskedArray.round +Set operations +~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + + ma.intersect1d + ma.setdiff1d + ma.setxor1d + ma.union1d + Miscellanea ~~~~~~~~~~~ diff --git a/doc/source/reference/routines.rst b/doc/source/reference/routines.rst index 24117895b..72ed80972 100644 --- a/doc/source/reference/routines.rst +++ b/doc/source/reference/routines.rst @@ -24,7 +24,6 @@ indentation. routines.ctypeslib routines.datetime routines.dtype - routines.dual routines.emath routines.err routines.fft diff --git a/doc/source/release/1.24.0-notes.rst b/doc/source/release/1.24.0-notes.rst index bcccd8c57..1c9e719b3 100644 --- a/doc/source/release/1.24.0-notes.rst +++ b/doc/source/release/1.24.0-notes.rst @@ -389,7 +389,7 @@ Users can modify the behavior of these warnings using ``np.errstate``. Note that for float to int casts, the exact warnings that are given may be platform dependent. For example:: - arr = np.full(100, value=1000, dtype=np.float64) + arr = np.full(100, fill_value=1000, dtype=np.float64) arr.astype(np.int8) May give a result equivalent to (the intermediate cast means no warning is diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst index 40b4e378d..3ee501889 100644 --- a/doc/source/user/basics.creation.rst +++ b/doc/source/user/basics.creation.rst @@ -75,8 +75,8 @@ the computation, here ``uint32`` and ``int32`` can both be represented in as ``int64``. The default NumPy behavior is to create arrays in either 32 or 64-bit signed -integers (platform dependent and matches C int size) or double precision -floating point numbers, int32/int64 and float, respectively. If you expect your +integers (platform dependent and matches C ``long`` size) or double precision +floating point numbers. If you expect your integer arrays to be a specific type, then you need to specify the dtype while you create the array. diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index d138242d7..783d5a447 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -1482,7 +1482,7 @@ Further reading - The `Python tutorial <https://docs.python.org/tutorial/>`__ - :ref:`reference` -- `SciPy Tutorial <https://docs.scipy.org/doc/scipy/reference/tutorial/index.html>`__ +- `SciPy Tutorial <https://docs.scipy.org/doc/scipy/tutorial/index.html>`__ - `SciPy Lecture Notes <https://scipy-lectures.org>`__ - A `matlab, R, IDL, NumPy/SciPy dictionary <http://mathesaurus.sf.net/>`__ - :doc:`tutorial-svd <numpy-tutorials:content/tutorial-svd>` diff --git a/meson.build b/meson.build index c1fc1dad8..47e71efc0 100644 --- a/meson.build +++ b/meson.build @@ -11,7 +11,7 @@ project( 'buildtype=debugoptimized', 'b_ndebug=if-release', 'c_std=c99', - 'cpp_std=c++14', + 'cpp_std=c++17', 'blas=openblas', 'lapack=openblas', 'pkgconfig.relocatable=true', diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index ec4c563ad..0dd2fff2b 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -515,7 +515,6 @@ cdef extern from "numpy/arrayobject.h": void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil - void PyArray_XDECREF_ERR(ndarray) # Cannot be supported due to out arg # void PyArray_DESCR_REPLACE(descr) diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index a6990cc68..47d9294c1 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -473,7 +473,6 @@ cdef extern from "numpy/arrayobject.h": void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil - void PyArray_XDECREF_ERR(ndarray) # Cannot be supported due to out arg # void PyArray_DESCR_REPLACE(descr) diff --git a/numpy/__init__.py b/numpy/__init__.py index e41f7eae6..83b42092f 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -74,10 +74,6 @@ test Run numpy unittests show_config Show numpy build configuration -dual - Overwrite certain functions with high-performance SciPy tools. - Note: `numpy.dual` is deprecated. Use the functions from NumPy or Scipy - directly instead of importing them from `numpy.dual`. matlib Make everything matrices. __version__ @@ -219,7 +215,14 @@ else: __deprecated_attrs__.update({ n: (alias, _msg.format(n=n, an=an)) for n, alias, an in _type_info}) - del _msg, _type_info + import math + + __deprecated_attrs__['math'] = (math, + "`np.math` is a deprecated alias for the standard library `math` " + "module (Deprecated Numpy 1.25). Replace usages of `np.math` with " + "`math`") + + del math, _msg, _type_info from .core import abs # now that numpy modules are imported, can initialize limits @@ -276,6 +279,7 @@ else: # Warn for expired attributes, and return a dummy function # that always raises an exception. import warnings + import math try: msg = __expired_functions__[attr] except KeyError: diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ee5fbb601..8627f6c60 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1921,7 +1921,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def __neg__(self: NDArray[object_]) -> Any: ... # Binary ops - # NOTE: `ndarray` does not implement `__imatmul__` @overload def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] @overload @@ -2508,6 +2507,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __imatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __imatmul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __imatmul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __imatmul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __imatmul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __imatmul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ... def __dlpack_device__(self) -> tuple[int, L[0]]: ... diff --git a/numpy/array_api/_array_object.py b/numpy/array_api/_array_object.py index eee117be6..a949b5977 100644 --- a/numpy/array_api/_array_object.py +++ b/numpy/array_api/_array_object.py @@ -850,23 +850,13 @@ class Array: """ Performs the operation __imatmul__. """ - # Note: NumPy does not implement __imatmul__. - # matmul is not defined for scalars, but without this, we may get # the wrong error message from asarray. other = self._check_allowed_dtypes(other, "numeric", "__imatmul__") if other is NotImplemented: return other - - # __imatmul__ can only be allowed when it would not change the shape - # of self. - other_shape = other.shape - if self.shape == () or other_shape == (): - raise ValueError("@= requires at least one dimension") - if len(other_shape) == 1 or other_shape[-1] != other_shape[-2]: - raise ValueError("@= cannot change the shape of the input array") - self._array[:] = self._array.__matmul__(other._array) - return self + res = self._array.__imatmul__(other._array) + return self.__class__._new(res) def __rmatmul__(self: Array, other: Array, /) -> Array: """ diff --git a/numpy/conftest.py b/numpy/conftest.py index d450cc99b..f1a3eda98 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -30,7 +30,7 @@ hypothesis.settings.register_profile( hypothesis.settings.register_profile( name="np.test() profile", deadline=None, print_blob=True, database=None, derandomize=True, - suppress_health_check=hypothesis.HealthCheck.all(), + suppress_health_check=list(hypothesis.HealthCheck), ) # Note that the default profile is chosen based on the presence # of pytest.ini, but can be overridden by passing the diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index f2a2216c3..bd7c4f519 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -2935,7 +2935,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('T', add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', - """ a.__array__([dtype], /) -> reference if type unchanged, copy otherwise. + """ a.__array__([dtype], /) Returns either a new reference to self if dtype is not given or a new array of provided data type if dtype is different from the current dtype of the @@ -3008,7 +3008,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__class_getitem__', add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', - """a.__deepcopy__(memo, /) -> Deep copy of array. + """a.__deepcopy__(memo, /) Used if :func:`copy.deepcopy` is called on an array. diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index dcfb6e6a8..62cd52707 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -1096,7 +1096,7 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', identify the value may be printed and rounded unbiased. -- versionadded:: 1.21.0 - + Returns ------- rep : string @@ -1181,7 +1181,7 @@ def format_float_positional(x, precision=None, unique=True, Minimum number of digits to print. Only has an effect if `unique=True` in which case additional digits past those necessary to uniquely identify the value may be printed, rounding the last additional digit. - + -- versionadded:: 1.21.0 Returns @@ -1339,13 +1339,29 @@ class TimedeltaFormat(_TimelikeFormat): class SubArrayFormat: - def __init__(self, format_function): + def __init__(self, format_function, **options): self.format_function = format_function + self.threshold = options['threshold'] + self.edge_items = options['edgeitems'] + + def __call__(self, a): + self.summary_insert = "..." if a.size > self.threshold else "" + return self.format_array(a) + + def format_array(self, a): + if np.ndim(a) == 0: + return self.format_function(a) + + if self.summary_insert and a.shape[0] > 2*self.edge_items: + formatted = ( + [self.format_array(a_) for a_ in a[:self.edge_items]] + + [self.summary_insert] + + [self.format_array(a_) for a_ in a[-self.edge_items:]] + ) + else: + formatted = [self.format_array(a_) for a_ in a] - def __call__(self, arr): - if arr.ndim <= 1: - return "[" + ", ".join(self.format_function(a) for a in arr) + "]" - return "[" + ", ".join(self.__call__(a) for a in arr) + "]" + return "[" + ", ".join(formatted) + "]" class StructuredVoidFormat: @@ -1369,7 +1385,7 @@ class StructuredVoidFormat: for field_name in data.dtype.names: format_function = _get_format_function(data[field_name], **options) if data.dtype[field_name].shape != (): - format_function = SubArrayFormat(format_function) + format_function = SubArrayFormat(format_function, **options) format_functions.append(format_function) return cls(format_functions) @@ -1428,7 +1444,7 @@ def dtype_is_implied(dtype): # not just void types can be structured, and names are not part of the repr if dtype.names is not None: return False - + # should care about endianness *unless size is 1* (e.g., int8, bool) if not dtype.isnative: return False diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index 05f947c15..1695b4d27 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -3833,7 +3833,7 @@ add_newdoc('numpy.core.umath', 'sqrt', -------- emath.sqrt A version which returns complex numbers when given negative reals. - Note: 0.0 and -0.0 are handled differently for complex inputs. + Note that 0.0 and -0.0 are handled differently for complex inputs. Notes ----- diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py index b1ed67690..11c5a30bf 100644 --- a/numpy/core/defchararray.py +++ b/numpy/core/defchararray.py @@ -278,7 +278,7 @@ def str_len(a): See Also -------- - builtins.len + len Examples -------- diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py index d6c5885b8..01966f0fe 100644 --- a/numpy/core/einsumfunc.py +++ b/numpy/core/einsumfunc.py @@ -728,7 +728,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): * if False no optimization is taken * if True defaults to the 'greedy' algorithm * 'optimal' An algorithm that combinatorially explores all possible - ways of contracting the listed tensors and choosest the least costly + ways of contracting the listed tensors and chooses the least costly path. Scales exponentially with the number of terms in the contraction. * 'greedy' An algorithm that chooses the best pair contraction diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index b36667427..4608bc6de 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -238,24 +238,9 @@ def reshape(a, newshape, order='C'): Notes ----- - It is not always possible to change the shape of an array without - copying the data. If you want an error to be raised when the data is copied, - you should assign the new shape to the shape attribute of the array:: - - >>> a = np.zeros((10, 2)) - - # A transpose makes the array non-contiguous - >>> b = a.T - - # Taking a view makes it possible to modify the shape without modifying - # the initial object. - >>> c = b.view() - >>> c.shape = (20) - Traceback (most recent call last): - ... - AttributeError: Incompatible shape for in-place modification. Use - `.reshape()` to make a copy with the desired shape. - + It is not always possible to change the shape of an array without copying + the data. + The `order` keyword gives the index ordering both for *fetching* the values from `a`, and then *placing* the values into the output array. For example, let's say you have an array: @@ -3802,7 +3787,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, # Aliases of other functions. Provided unique docstrings -# are reference purposes only. Wherever possible, +# are for reference purposes only. Wherever possible, # avoid using them. diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index 3dc51a81b..00e4e6b0e 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -3,6 +3,7 @@ import warnings import operator import types +import numpy as np from . import numeric as _nx from .numeric import result_type, NaN, asanyarray, ndim from numpy.core.multiarray import add_docstring @@ -167,7 +168,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, y += start if endpoint and num > 1: - y[-1] = stop + y[-1, ...] = stop if axis != 0: y = _nx.moveaxis(y, 0, axis) @@ -183,7 +184,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None, dtype=None, axis=None): - return (start, stop) + return (start, stop, base) @array_function_dispatch(_logspace_dispatcher) @@ -199,6 +200,9 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, .. versionchanged:: 1.16.0 Non-scalar `start` and `stop` are now supported. + .. versionchanged:: 1.25.0 + Non-scalar 'base` is now supported + Parameters ---------- start : array_like @@ -223,9 +227,10 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, an integer; `float` is chosen even if the arguments would produce an array of integers. axis : int, optional - The axis in the result to store the samples. Relevant only if start - or stop are array-like. By default (0), the samples will be along a - new axis inserted at the beginning. Use -1 to get an axis at the end. + The axis in the result to store the samples. Relevant only if start, + stop, or base are array-like. By default (0), the samples will be + along a new axis inserted at the beginning. Use -1 to get an axis at + the end. .. versionadded:: 1.16.0 @@ -247,7 +252,7 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, Notes ----- - Logspace is equivalent to the code + If base is a scalar, logspace is equivalent to the code >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) ... # doctest: +SKIP @@ -262,6 +267,9 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, array([100. , 177.827941 , 316.22776602, 562.34132519]) >>> np.logspace(2.0, 3.0, num=4, base=2.0) array([4. , 5.0396842 , 6.34960421, 8. ]) + >>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1) + array([[ 4. , 5.0396842 , 6.34960421, 8. ], + [ 9. , 12.98024613, 18.72075441, 27. ]]) Graphical illustration: @@ -279,7 +287,13 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, >>> plt.show() """ + ndmax = np.broadcast(start, stop, base).ndim + start, stop, base = ( + np.array(a, copy=False, subok=True, ndmin=ndmax) + for a in (start, stop, base) + ) y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis) + base = np.expand_dims(base, axis=axis) if dtype is None: return _nx.power(base, y) return _nx.power(base, y).astype(dtype, copy=False) diff --git a/numpy/core/include/numpy/_dtype_api.h b/numpy/core/include/numpy/_dtype_api.h index 2f801eace..c397699c7 100644 --- a/numpy/core/include/numpy/_dtype_api.h +++ b/numpy/core/include/numpy/_dtype_api.h @@ -5,14 +5,14 @@ #ifndef NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ #define NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ -#define __EXPERIMENTAL_DTYPE_API_VERSION 9 +#define __EXPERIMENTAL_DTYPE_API_VERSION 10 struct PyArrayMethodObject_tag; /* * Largely opaque struct for DType classes (i.e. metaclass instances). * The internal definition is currently in `ndarraytypes.h` (export is a bit - * more complex because `PyArray_Descr` is a DTypeMeta internall but not + * more complex because `PyArray_Descr` is a DTypeMeta internally but not * externally). */ #if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) @@ -199,9 +199,8 @@ typedef int (get_reduction_initial_function)( PyArrayMethod_Context *context, npy_bool reduction_is_empty, char *initial); - /* - * The following functions are only used be the wrapping array method defined + * The following functions are only used by the wrapping array method defined * in umath/wrapping_array_method.c */ @@ -257,14 +256,13 @@ typedef int translate_loop_descrs_func(int nin, int nout, * strided-loop function. This is designed for loops that need to visit every * element of a single array. * - * Currently this is only used for array clearing, via the - * NPY_DT_get_clear_loop, api hook, particularly for arrays storing embedded - * references to python objects or heap-allocated data. If you define a dtype - * that uses embedded references, the NPY_ITEM_REFCOUNT flag must be set on the - * dtype instance. + * Currently this is used for array clearing, via the NPY_DT_get_clear_loop + * API hook, and zero-filling, via the NPY_DT_get_fill_zero_loop API hook. + * These are most useful for handling arrays storing embedded references to + * python objects or heap-allocated data. * * The `void *traverse_context` is passed in because we may need to pass in - * Intepreter state or similar in the futurem, but we don't want to pass in + * Intepreter state or similar in the future, but we don't want to pass in * a full context (with pointers to dtypes, method, caller which all make * no sense for a traverse function). * @@ -280,9 +278,10 @@ typedef int (traverse_loop_function)( /* * Simplified get_loop function specific to dtype traversal * - * Currently this is only used for clearing arrays. It should set the flags - * needed for the traversal loop and set out_loop to the loop function, which - * must be a valid traverse_loop_function pointer. + * It should set the flags needed for the traversal loop and set out_loop to the + * loop function, which must be a valid traverse_loop_function + * pointer. Currently this is used for zero-filling and clearing arrays storing + * embedded references. * */ typedef int (get_traverse_loop_function)( @@ -319,6 +318,7 @@ typedef int (get_traverse_loop_function)( #define NPY_DT_setitem 7 #define NPY_DT_getitem 8 #define NPY_DT_get_clear_loop 9 +#define NPY_DT_get_fill_zero_loop 10 // These PyArray_ArrFunc slots will be deprecated and replaced eventually // getitem and setitem can be defined as a performance optimization; diff --git a/numpy/core/meson.build b/numpy/core/meson.build index 9aaa5ed87..e968fb336 100644 --- a/numpy/core/meson.build +++ b/numpy/core/meson.build @@ -112,7 +112,7 @@ cdata.set('NPY_SIZEOF_PY_LONG_LONG', if cc.has_header('complex.h') cdata.set10('HAVE_COMPLEX_H', true) cdata.set10('NPY_USE_C99_COMPLEX', true) - if cc.get_id() == 'msvc' + if cc.get_argument_syntax() == 'msvc' complex_types_to_check = [ ['NPY_HAVE_COMPLEX_FLOAT', 'NPY_SIZEOF_COMPLEX_FLOAT', '_Fcomplex', 'float'], ['NPY_HAVE_COMPLEX_DOUBLE', 'NPY_SIZEOF_COMPLEX_DOUBLE', '_Dcomplex', 'double'], @@ -261,7 +261,7 @@ else # function is not available in CI. For the latter there is a fallback path, # but that is broken because we don't have the exact long double # representation checks. - if cc.get_id() != 'msvc' + if cc.get_argument_syntax() != 'msvc' cdata.set10('HAVE_STRTOLD_L', false) endif endif @@ -568,7 +568,7 @@ c_args_common = [ cpp_args_common = c_args_common + [ '-D__STDC_VERSION__=0', # for compatibility with C headers ] -if cc.get_id() != 'msvc' +if cc.get_argument_syntax() != 'msvc' cpp_args_common += [ '-fno-exceptions', # no exception support '-fno-rtti', # no runtime type information diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 52b17bfc8..b3ca1557b 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -20,7 +20,7 @@ from setup_common import * # noqa: F403 NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0") if not NPY_RELAXED_STRIDES_CHECKING: raise SystemError( - "Support for NPY_RELAXED_STRIDES_CHECKING=0 has been remove as of " + "Support for NPY_RELAXED_STRIDES_CHECKING=0 has been removed as of " "NumPy 1.23. This error will eventually be removed entirely.") # Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a @@ -405,7 +405,6 @@ def configuration(parent_package='',top_path=None): exec_mod_from_location) from numpy.distutils.system_info import (get_info, blas_opt_info, lapack_opt_info) - from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS from numpy.version import release as is_released config = Configuration('core', parent_package, top_path) @@ -658,44 +657,6 @@ def configuration(parent_package='',top_path=None): # but we cannot use add_installed_pkg_config here either, so we only # update the substitution dictionary during npymath build config_cmd = config.get_config_cmd() - # Check that the toolchain works, to fail early if it doesn't - # (avoid late errors with MATHLIB which are confusing if the - # compiler does not work). - for lang, test_code, note in ( - ('c', 'int main(void) { return 0;}', ''), - ('c++', ( - 'int main(void)' - '{ auto x = 0.0; return static_cast<int>(x); }' - ), ( - 'note: A compiler with support for C++11 language ' - 'features is required.' - ) - ), - ): - is_cpp = lang == 'c++' - if is_cpp: - # this a workaround to get rid of invalid c++ flags - # without doing big changes to config. - # c tested first, compiler should be here - bk_c = config_cmd.compiler - config_cmd.compiler = bk_c.cxx_compiler() - - # Check that Linux compiler actually support the default flags - if hasattr(config_cmd.compiler, 'compiler'): - config_cmd.compiler.compiler.extend(NPY_CXX_FLAGS) - config_cmd.compiler.compiler_so.extend(NPY_CXX_FLAGS) - - st = config_cmd.try_link(test_code, lang=lang) - if not st: - # rerun the failing command in verbose mode - config_cmd.compiler.verbose = True - config_cmd.try_link(test_code, lang=lang) - raise RuntimeError( - f"Broken toolchain: cannot link a simple {lang.upper()} " - f"program. {note}" - ) - if is_cpp: - config_cmd.compiler = bk_c mlibs = check_mathlib(config_cmd) posix_mlib = ' '.join(['-l%s' % l for l in mlibs]) @@ -1049,9 +1010,6 @@ def configuration(parent_package='',top_path=None): svml_objs.sort() config.add_extension('_multiarray_umath', - # Forcing C language even though we have C++ sources. - # It forces the C linker and don't link C++ runtime. - language = 'c', sources=multiarray_src + umath_src + common_src + [generate_config_h, @@ -1067,8 +1025,7 @@ def configuration(parent_package='',top_path=None): common_deps, libraries=['npymath'], extra_objects=svml_objs, - extra_info=extra_info, - extra_cxx_compile_args=NPY_CXX_FLAGS) + extra_info=extra_info) ####################################################################### # umath_tests module # diff --git a/numpy/core/src/common/simd/vec/memory.h b/numpy/core/src/common/simd/vec/memory.h index de78d02e3..1ad39cead 100644 --- a/numpy/core/src/common/simd/vec/memory.h +++ b/numpy/core/src/common/simd/vec/memory.h @@ -205,9 +205,9 @@ NPY_FINLINE npyv_s32 npyv_load_till_s32(const npy_int32 *ptr, npy_uintp nlane, n assert(nlane > 0); npyv_s32 vfill = npyv_setall_s32(fill); #ifdef NPY_HAVE_VX - const unsigned blane = (unsigned short)nlane; + const unsigned blane = (nlane > 4) ? 4 : nlane; const npyv_u32 steps = npyv_set_u32(0, 1, 2, 3); - const npyv_u32 vlane = npyv_setall_u32((unsigned)blane); + const npyv_u32 vlane = npyv_setall_u32(blane); const npyv_b32 mask = vec_cmpgt(vlane, steps); npyv_s32 a = vec_load_len(ptr, blane*4-1); return vec_sel(vfill, a, mask); @@ -233,8 +233,8 @@ NPY_FINLINE npyv_s32 npyv_load_till_s32(const npy_int32 *ptr, npy_uintp nlane, n NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) { #ifdef NPY_HAVE_VX - unsigned blane = ((unsigned short)nlane)*4 - 1; - return vec_load_len(ptr, blane); + unsigned blane = (nlane > 4) ? 4 : nlane; + return vec_load_len(ptr, blane*4-1); #else return npyv_load_till_s32(ptr, nlane, 0); #endif @@ -252,7 +252,7 @@ NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, n NPY_FINLINE npyv_s64 npyv_load_tillz_s64(const npy_int64 *ptr, npy_uintp nlane) { #ifdef NPY_HAVE_VX - unsigned blane = (unsigned short)nlane; + unsigned blane = (nlane > 2) ? 2 : nlane; return vec_load_len((const signed long long*)ptr, blane*8-1); #else return npyv_load_till_s64(ptr, nlane, 0); @@ -354,7 +354,7 @@ NPY_FINLINE void npyv_store_till_s32(npy_int32 *ptr, npy_uintp nlane, npyv_s32 a { assert(nlane > 0); #ifdef NPY_HAVE_VX - unsigned blane = (unsigned short)nlane; + unsigned blane = (nlane > 4) ? 4 : nlane; vec_store_len(a, ptr, blane*4-1); #else switch(nlane) { @@ -378,7 +378,7 @@ NPY_FINLINE void npyv_store_till_s64(npy_int64 *ptr, npy_uintp nlane, npyv_s64 a { assert(nlane > 0); #ifdef NPY_HAVE_VX - unsigned blane = (unsigned short)nlane; + unsigned blane = (nlane > 2) ? 2 : nlane; vec_store_len(a, (signed long long*)ptr, blane*8-1); #else if (nlane == 1) { diff --git a/numpy/core/src/common/ucsnarrow.h b/numpy/core/src/common/ucsnarrow.h index 6fe157199..4b17a2809 100644 --- a/numpy/core/src/common/ucsnarrow.h +++ b/numpy/core/src/common/ucsnarrow.h @@ -2,6 +2,6 @@ #define NUMPY_CORE_SRC_COMMON_NPY_UCSNARROW_H_ NPY_NO_EXPORT PyUnicodeObject * -PyUnicode_FromUCS4(char *src, Py_ssize_t size, int swap, int align); +PyUnicode_FromUCS4(char const *src, Py_ssize_t size, int swap, int align); #endif /* NUMPY_CORE_SRC_COMMON_NPY_UCSNARROW_H_ */ diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c index d6bee1a7b..d55a5752b 100644 --- a/numpy/core/src/multiarray/array_coercion.c +++ b/numpy/core/src/multiarray/array_coercion.c @@ -878,7 +878,8 @@ find_descriptor_from_array( * means that legacy behavior is used: The dtype instances "S0", "U0", and * "V0" are converted to mean the DType classes instead. * When dtype != NULL, this path is ignored, and the function does nothing - * unless descr == NULL. + * unless descr == NULL. If both descr and dtype are null, it returns the + * descriptor for the array. * * This function is identical to normal casting using only the dtype, however, * it supports inspecting the elements when the array has object dtype @@ -927,7 +928,7 @@ PyArray_AdaptDescriptorToArray( /* This is an object array but contained no elements, use default */ new_descr = NPY_DT_CALL_default_descr(dtype); } - Py_DECREF(dtype); + Py_XDECREF(dtype); return new_descr; } @@ -1280,7 +1281,9 @@ PyArray_DiscoverDTypeAndShape( } if (requested_descr != NULL) { - assert(fixed_DType == NPY_DTYPE(requested_descr)); + if (fixed_DType != NULL) { + assert(fixed_DType == NPY_DTYPE(requested_descr)); + } /* The output descriptor must be the input. */ Py_INCREF(requested_descr); *out_descr = requested_descr; diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c index 76652550a..c9d881e16 100644 --- a/numpy/core/src/multiarray/buffer.c +++ b/numpy/core/src/multiarray/buffer.c @@ -17,6 +17,7 @@ #include "numpyos.h" #include "arrayobject.h" #include "scalartypes.h" +#include "dtypemeta.h" /************************************************************************* **************** Implement Buffer Protocol **************************** @@ -417,9 +418,16 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, break; } default: - PyErr_Format(PyExc_ValueError, - "cannot include dtype '%c' in a buffer", - descr->type); + if (NPY_DT_is_legacy(NPY_DTYPE(descr))) { + PyErr_Format(PyExc_ValueError, + "cannot include dtype '%c' in a buffer", + descr->type); + } + else { + PyErr_Format(PyExc_ValueError, + "cannot include dtype '%s' in a buffer", + ((PyTypeObject*)NPY_DTYPE(descr))->tp_name); + } return -1; } } diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c index da8d23a26..573d0d606 100644 --- a/numpy/core/src/multiarray/common.c +++ b/numpy/core/src/multiarray/common.c @@ -128,24 +128,6 @@ PyArray_DTypeFromObject(PyObject *obj, int maxdims, PyArray_Descr **out_dtype) } -NPY_NO_EXPORT int -_zerofill(PyArrayObject *ret) -{ - if (PyDataType_REFCHK(PyArray_DESCR(ret))) { - PyObject *zero = PyLong_FromLong(0); - PyArray_FillObjectArray(ret, zero); - Py_DECREF(zero); - if (PyErr_Occurred()) { - return -1; - } - } - else { - npy_intp n = PyArray_NBYTES(ret); - memset(PyArray_DATA(ret), 0, n); - } - return 0; -} - NPY_NO_EXPORT npy_bool _IsWriteable(PyArrayObject *ap) { @@ -459,3 +441,32 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, } } +NPY_NO_EXPORT int +check_is_convertible_to_scalar(PyArrayObject *v) +{ + if (PyArray_NDIM(v) == 0) { + return 0; + } + + /* Remove this if-else block when the deprecation expires */ + if (PyArray_SIZE(v) == 1) { + /* Numpy 1.25.0, 2023-01-02 */ + if (DEPRECATE( + "Conversion of an array with ndim > 0 to a scalar " + "is deprecated, and will error in future. " + "Ensure you extract a single element from your array " + "before performing this operation. " + "(Deprecated NumPy 1.25.)") < 0) { + return -1; + } + return 0; + } else { + PyErr_SetString(PyExc_TypeError, + "only length-1 arrays can be converted to Python scalars"); + return -1; + } + + PyErr_SetString(PyExc_TypeError, + "only 0-dimensional arrays can be converted to Python scalars"); + return -1; +} diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index 4e067b22c..cb9fadc4e 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -55,9 +55,6 @@ PyArray_DTypeFromObject(PyObject *obj, int maxdims, NPY_NO_EXPORT PyArray_Descr * _array_find_python_scalar_type(PyObject *op); -NPY_NO_EXPORT int -_zerofill(PyArrayObject *ret); - NPY_NO_EXPORT npy_bool _IsWriteable(PyArrayObject *ap); @@ -335,6 +332,13 @@ PyArray_TupleFromItems(int n, PyObject *const *items, int make_null_none) return tuple; } +/* + * Returns 0 if the array has rank 0, -1 otherwise. Prints a deprecation + * warning for arrays of _size_ 1. + */ +NPY_NO_EXPORT int +check_is_convertible_to_scalar(PyArrayObject *v); + #include "ucsnarrow.h" diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c index e99bc3fe4..9e0c9fb60 100644 --- a/numpy/core/src/multiarray/convert.c +++ b/numpy/core/src/multiarray/convert.c @@ -21,6 +21,7 @@ #include "convert.h" #include "array_coercion.h" +#include "refcount.h" int fallocate(int fd, int mode, off_t offset, off_t len); @@ -416,7 +417,7 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) descr, value); if (PyDataType_REFCHK(descr)) { - PyArray_Item_XDECREF(value, descr); + PyArray_ClearBuffer(descr, value, 0, 1, 1); } PyMem_FREE(value_buffer_heap); return retcode; diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 53db5c577..de1cd075d 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -3913,21 +3913,6 @@ PyArray_InitializeObjectToObjectCast(void) } -static int -PyArray_SetClearFunctions(void) -{ - PyArray_DTypeMeta *Object = PyArray_DTypeFromTypeNum(NPY_OBJECT); - NPY_DT_SLOTS(Object)->get_clear_loop = &npy_get_clear_object_strided_loop; - Py_DECREF(Object); /* use borrowed */ - - PyArray_DTypeMeta *Void = PyArray_DTypeFromTypeNum(NPY_VOID); - NPY_DT_SLOTS(Void)->get_clear_loop = &npy_get_clear_void_and_legacy_user_dtype_loop; - Py_DECREF(Void); /* use borrowed */ - return 0; -} - - - NPY_NO_EXPORT int PyArray_InitializeCasts() { @@ -3947,8 +3932,5 @@ PyArray_InitializeCasts() if (PyArray_InitializeDatetimeCasts() < 0) { return -1; } - if (PyArray_SetClearFunctions() < 0) { - return -1; - } return 0; } diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 38af60427..79a1905a7 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -715,6 +715,11 @@ PyArray_NewFromDescr_int( fa->base = (PyObject *)NULL; fa->weakreflist = (PyObject *)NULL; + /* needed for zero-filling logic below, defined and initialized up here + so cleanup logic can go in the fail block */ + NPY_traverse_info fill_zero_info; + NPY_traverse_info_init(&fill_zero_info); + if (nd > 0) { fa->dimensions = npy_alloc_cache_dim(2 * nd); if (fa->dimensions == NULL) { @@ -784,6 +789,31 @@ PyArray_NewFromDescr_int( if (data == NULL) { + /* float errors do not matter and we do not release GIL */ + NPY_ARRAYMETHOD_FLAGS zero_flags; + get_traverse_loop_function *get_fill_zero_loop = + NPY_DT_SLOTS(NPY_DTYPE(descr))->get_fill_zero_loop; + if (get_fill_zero_loop != NULL) { + if (get_fill_zero_loop( + NULL, descr, 1, descr->elsize, &(fill_zero_info.func), + &(fill_zero_info.auxdata), &zero_flags) < 0) { + goto fail; + } + } + + /* + * We always want a zero-filled array allocated with calloc if + * NPY_NEEDS_INIT is set on the dtype, for safety. We also want a + * zero-filled array if zeroed is set and the zero-filling loop isn't + * defined, for better performance. + * + * If the zero-filling loop is defined and zeroed is set, allocate + * with malloc and let the zero-filling loop fill the array buffer + * with valid zero values for the dtype. + */ + int use_calloc = (PyDataType_FLAGCHK(descr, NPY_NEEDS_INIT) || + (zeroed && (fill_zero_info.func == NULL))); + /* Store the handler in case the default is modified */ fa->mem_handler = PyDataMem_GetHandler(); if (fa->mem_handler == NULL) { @@ -801,11 +831,8 @@ PyArray_NewFromDescr_int( fa->strides[i] = 0; } } - /* - * It is bad to have uninitialized OBJECT pointers - * which could also be sub-fields of a VOID array - */ - if (zeroed || PyDataType_FLAGCHK(descr, NPY_NEEDS_INIT)) { + + if (use_calloc) { data = PyDataMem_UserNEW_ZEROED(nbytes, 1, fa->mem_handler); } else { @@ -816,6 +843,18 @@ PyArray_NewFromDescr_int( goto fail; } + /* + * If the array needs special dtype-specific zero-filling logic, do that + */ + if (NPY_UNLIKELY(zeroed && (fill_zero_info.func != NULL))) { + npy_intp size = PyArray_MultiplyList(fa->dimensions, fa->nd); + if (fill_zero_info.func( + NULL, descr, data, size, descr->elsize, + fill_zero_info.auxdata) < 0) { + goto fail; + } + } + fa->flags |= NPY_ARRAY_OWNDATA; } else { @@ -910,9 +949,11 @@ PyArray_NewFromDescr_int( } } } + NPY_traverse_info_xfree(&fill_zero_info); return (PyObject *)fa; fail: + NPY_traverse_info_xfree(&fill_zero_info); Py_XDECREF(fa->mem_handler); Py_DECREF(fa); return NULL; @@ -1605,6 +1646,37 @@ NPY_NO_EXPORT PyObject * PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, int max_depth, int flags, PyObject *context) { + npy_dtype_info dt_info = {NULL, NULL}; + + int res = PyArray_ExtractDTypeAndDescriptor( + newtype, &dt_info.descr, &dt_info.dtype); + + Py_XDECREF(newtype); + + if (res < 0) { + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); + return NULL; + } + + PyObject* ret = PyArray_FromAny_int(op, dt_info.descr, dt_info.dtype, + min_depth, max_depth, flags, context); + + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); + return ret; +} + +/* + * Internal version of PyArray_FromAny that accepts a dtypemeta. Borrows + * references to the descriptor and dtype. + */ + +NPY_NO_EXPORT PyObject * +PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, + PyArray_DTypeMeta *in_DType, int min_depth, int max_depth, + int flags, PyObject *context) +{ /* * This is the main code to make a NumPy array from a Python * Object. It is called from many different places. @@ -1620,26 +1692,15 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, return NULL; } - PyArray_Descr *fixed_descriptor; - PyArray_DTypeMeta *fixed_DType; - if (PyArray_ExtractDTypeAndDescriptor(newtype, - &fixed_descriptor, &fixed_DType) < 0) { - Py_XDECREF(newtype); - return NULL; - } - Py_XDECREF(newtype); - ndim = PyArray_DiscoverDTypeAndShape(op, - NPY_MAXDIMS, dims, &cache, fixed_DType, fixed_descriptor, &dtype, + NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, flags & NPY_ARRAY_ENSURENOCOPY); - Py_XDECREF(fixed_descriptor); - Py_XDECREF(fixed_DType); if (ndim < 0) { return NULL; } - if (NPY_UNLIKELY(fixed_descriptor != NULL && PyDataType_HASSUBARRAY(dtype))) { + if (NPY_UNLIKELY(in_descr != NULL && PyDataType_HASSUBARRAY(dtype))) { /* * When a subarray dtype was passed in, its dimensions are appended * to the array dimension (causing a dimension mismatch). @@ -1737,7 +1798,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, } else if (cache == NULL && PyArray_IsScalar(op, Void) && !(((PyVoidScalarObject *)op)->flags & NPY_ARRAY_OWNDATA) && - newtype == NULL) { + ((in_descr == NULL) && (in_DType == NULL))) { /* * Special case, we return a *view* into void scalars, mainly to * allow things similar to the "reversed" assignment: @@ -1768,7 +1829,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, return NULL; } - if (cache == NULL && newtype != NULL && + if (cache == NULL && in_descr != NULL && PyDataType_ISSIGNED(dtype) && PyArray_IsScalar(op, Generic)) { assert(ndim == 0); @@ -1899,7 +1960,6 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, * NPY_ARRAY_FORCECAST will cause a cast to occur regardless of whether or not * it is safe. * - * context is passed through to PyArray_GetArrayParamsFromObject */ /*NUMPY_API @@ -1909,24 +1969,57 @@ NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, int max_depth, int requires, PyObject *context) { + npy_dtype_info dt_info = {NULL, NULL}; + + int res = PyArray_ExtractDTypeAndDescriptor( + descr, &dt_info.descr, &dt_info.dtype); + + Py_XDECREF(descr); + + if (res < 0) { + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); + return NULL; + } + + PyObject* ret = PyArray_CheckFromAny_int( + op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requires, + context); + + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); + return ret; +} + +/* + * Internal version of PyArray_CheckFromAny that accepts a dtypemeta. Borrows + * references to the descriptor and dtype. + */ + +NPY_NO_EXPORT PyObject * +PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, + PyArray_DTypeMeta *in_DType, int min_depth, + int max_depth, int requires, PyObject *context) +{ PyObject *obj; if (requires & NPY_ARRAY_NOTSWAPPED) { - if (!descr && PyArray_Check(op) && + if (!in_descr && PyArray_Check(op) && PyArray_ISBYTESWAPPED((PyArrayObject* )op)) { - descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op)); - if (descr == NULL) { + in_descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op)); + if (in_descr == NULL) { return NULL; } } - else if (descr && !PyArray_ISNBO(descr->byteorder)) { - PyArray_DESCR_REPLACE(descr); + else if (in_descr && !PyArray_ISNBO(in_descr->byteorder)) { + PyArray_DESCR_REPLACE(in_descr); } - if (descr && descr->byteorder != NPY_IGNORE) { - descr->byteorder = NPY_NATIVE; + if (in_descr && in_descr->byteorder != NPY_IGNORE) { + in_descr->byteorder = NPY_NATIVE; } } - obj = PyArray_FromAny(op, descr, min_depth, max_depth, requires, context); + obj = PyArray_FromAny_int(op, in_descr, in_DType, min_depth, + max_depth, requires, context); if (obj == NULL) { return NULL; } @@ -2965,17 +3058,7 @@ PyArray_Zeros(int nd, npy_intp const *dims, PyArray_Descr *type, int is_f_order) return NULL; } - /* handle objects */ - if (PyDataType_REFCHK(PyArray_DESCR(ret))) { - if (_zerofill(ret) < 0) { - Py_DECREF(ret); - return NULL; - } - } - - return (PyObject *)ret; - } /*NUMPY_API diff --git a/numpy/core/src/multiarray/ctors.h b/numpy/core/src/multiarray/ctors.h index 98160b1cc..22020e26a 100644 --- a/numpy/core/src/multiarray/ctors.h +++ b/numpy/core/src/multiarray/ctors.h @@ -36,10 +36,20 @@ _array_from_array_like(PyObject *op, int never_copy); NPY_NO_EXPORT PyObject * +PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, + PyArray_DTypeMeta *in_DType, int min_depth, int max_depth, + int flags, PyObject *context); + +NPY_NO_EXPORT PyObject * PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, int max_depth, int flags, PyObject *context); NPY_NO_EXPORT PyObject * +PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, + PyArray_DTypeMeta *in_DType, int min_depth, + int max_depth, int requires, PyObject *context); + +NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, int max_depth, int requires, PyObject *context); diff --git a/numpy/core/src/multiarray/dtype_traversal.c b/numpy/core/src/multiarray/dtype_traversal.c index cefa7d6e1..769c2e015 100644 --- a/numpy/core/src/multiarray/dtype_traversal.c +++ b/numpy/core/src/multiarray/dtype_traversal.c @@ -24,7 +24,7 @@ #include "alloc.h" #include "array_method.h" #include "dtypemeta.h" - +#include "refcount.h" #include "dtype_traversal.h" @@ -124,6 +124,39 @@ npy_get_clear_object_strided_loop( } +/**************** Python Object zero fill *********************/ + +static int +fill_zero_object_strided_loop( + void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + char *data, npy_intp size, npy_intp stride, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + PyObject *zero = PyLong_FromLong(0); + while (size--) { + Py_INCREF(zero); + // assumes `data` doesn't have a pre-existing object inside it + memcpy(data, &zero, sizeof(zero)); + data += stride; + } + Py_DECREF(zero); + return 0; +} + +NPY_NO_EXPORT int +npy_object_get_fill_zero_loop(void *NPY_UNUSED(traverse_context), + PyArray_Descr *NPY_UNUSED(descr), + int NPY_UNUSED(aligned), + npy_intp NPY_UNUSED(fixed_stride), + traverse_loop_function **out_loop, + NpyAuxData **NPY_UNUSED(out_auxdata), + NPY_ARRAYMETHOD_FLAGS *flags) +{ + *flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_NO_FLOATINGPOINT_ERRORS; + *out_loop = &fill_zero_object_strided_loop; + return 0; +} + /**************** Structured DType clear funcationality ***************/ /* @@ -408,7 +441,6 @@ clear_no_op( return 0; } - NPY_NO_EXPORT int npy_get_clear_void_and_legacy_user_dtype_loop( void *traverse_context, PyArray_Descr *dtype, int aligned, @@ -472,3 +504,41 @@ npy_get_clear_void_and_legacy_user_dtype_loop( dtype); return -1; } + +/**************** Structured DType zero fill ***************/ + +static int +fill_zero_void_with_objects_strided_loop( + void *NPY_UNUSED(traverse_context), PyArray_Descr *descr, + char *data, npy_intp size, npy_intp stride, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + PyObject *zero = PyLong_FromLong(0); + while (size--) { + _fillobject(data, zero, descr); + data += stride; + } + Py_DECREF(zero); + return 0; +} + + +NPY_NO_EXPORT int +npy_void_get_fill_zero_loop(void *NPY_UNUSED(traverse_context), + PyArray_Descr *descr, + int NPY_UNUSED(aligned), + npy_intp NPY_UNUSED(fixed_stride), + traverse_loop_function **out_loop, + NpyAuxData **NPY_UNUSED(out_auxdata), + NPY_ARRAYMETHOD_FLAGS *flags) +{ + *flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + if (PyDataType_REFCHK(descr)) { + *flags |= NPY_METH_REQUIRES_PYAPI; + *out_loop = &fill_zero_void_with_objects_strided_loop; + } + else { + *out_loop = NULL; + } + return 0; +} diff --git a/numpy/core/src/multiarray/dtype_traversal.h b/numpy/core/src/multiarray/dtype_traversal.h index fd060a0f0..a9c185382 100644 --- a/numpy/core/src/multiarray/dtype_traversal.h +++ b/numpy/core/src/multiarray/dtype_traversal.h @@ -19,6 +19,22 @@ npy_get_clear_void_and_legacy_user_dtype_loop( traverse_loop_function **out_loop, NpyAuxData **out_traversedata, NPY_ARRAYMETHOD_FLAGS *flags); +/* NumPy DType zero-filling implementations */ + +NPY_NO_EXPORT int +npy_object_get_fill_zero_loop( + void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride), + traverse_loop_function **out_loop, NpyAuxData **NPY_UNUSED(out_auxdata), + NPY_ARRAYMETHOD_FLAGS *flags); + +NPY_NO_EXPORT int +npy_void_get_fill_zero_loop( + void *NPY_UNUSED(traverse_context), PyArray_Descr *descr, + int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride), + traverse_loop_function **out_loop, NpyAuxData **NPY_UNUSED(out_auxdata), + NPY_ARRAYMETHOD_FLAGS *flags); + /* Helper to deal with calling or nesting simple strided loops */ @@ -34,6 +50,7 @@ NPY_traverse_info_init(NPY_traverse_info *cast_info) { cast_info->func = NULL; /* mark as uninitialized. */ cast_info->auxdata = NULL; /* allow leaving auxdata untouched */ + cast_info->descr = NULL; /* mark as uninitialized. */ } @@ -45,7 +62,7 @@ NPY_traverse_info_xfree(NPY_traverse_info *traverse_info) } traverse_info->func = NULL; NPY_AUXDATA_FREE(traverse_info->auxdata); - Py_DECREF(traverse_info->descr); + Py_XDECREF(traverse_info->descr); } @@ -79,4 +96,4 @@ PyArray_GetClearFunction( NPY_traverse_info *clear_info, NPY_ARRAYMETHOD_FLAGS *flags); -#endif /* NUMPY_CORE_SRC_MULTIARRAY_DTYPE_TRAVERSAL_H_ */
\ No newline at end of file +#endif /* NUMPY_CORE_SRC_MULTIARRAY_DTYPE_TRAVERSAL_H_ */ diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c index f268ba2cb..f8c1b6617 100644 --- a/numpy/core/src/multiarray/dtypemeta.c +++ b/numpy/core/src/multiarray/dtypemeta.c @@ -20,6 +20,8 @@ #include "usertypes.h" #include "conversion_utils.h" #include "templ_common.h" +#include "refcount.h" +#include "dtype_traversal.h" #include <assert.h> @@ -524,6 +526,7 @@ void_common_instance(PyArray_Descr *descr1, PyArray_Descr *descr2) return NULL; } + NPY_NO_EXPORT int python_builtins_are_known_scalar_types( PyArray_DTypeMeta *NPY_UNUSED(cls), PyTypeObject *pytype) @@ -855,6 +858,7 @@ dtypemeta_wrap_legacy_descriptor(PyArray_Descr *descr) dt_slots->common_dtype = default_builtin_common_dtype; dt_slots->common_instance = NULL; dt_slots->ensure_canonical = ensure_native_byteorder; + dt_slots->get_fill_zero_loop = NULL; if (PyTypeNum_ISSIGNED(dtype_class->type_num)) { /* Convert our scalars (raise on too large unsigned and NaN, etc.) */ @@ -866,6 +870,8 @@ dtypemeta_wrap_legacy_descriptor(PyArray_Descr *descr) } else if (descr->type_num == NPY_OBJECT) { dt_slots->common_dtype = object_common_dtype; + dt_slots->get_fill_zero_loop = npy_object_get_fill_zero_loop; + dt_slots->get_clear_loop = npy_get_clear_object_strided_loop; } else if (PyTypeNum_ISDATETIME(descr->type_num)) { /* Datetimes are flexible, but were not considered previously */ @@ -887,6 +893,9 @@ dtypemeta_wrap_legacy_descriptor(PyArray_Descr *descr) void_discover_descr_from_pyobject); dt_slots->common_instance = void_common_instance; dt_slots->ensure_canonical = void_ensure_canonical; + dt_slots->get_fill_zero_loop = npy_void_get_fill_zero_loop; + dt_slots->get_clear_loop = + npy_get_clear_void_and_legacy_user_dtype_loop; } else { dt_slots->default_descr = string_and_unicode_default_descr; diff --git a/numpy/core/src/multiarray/dtypemeta.h b/numpy/core/src/multiarray/dtypemeta.h index 3b4dbad24..6dbfd1549 100644 --- a/numpy/core/src/multiarray/dtypemeta.h +++ b/numpy/core/src/multiarray/dtypemeta.h @@ -42,6 +42,23 @@ typedef struct { */ get_traverse_loop_function *get_clear_loop; /* + Either NULL or a function that sets a function pointer to a traversal + loop that fills an array with zero values appropriate for the dtype. If + get_fill_zero_loop is undefined or the function pointer set by it is + NULL, the array buffer is allocated with calloc. If this function is + defined and it sets a non-NULL function pointer, the array buffer is + allocated with malloc and the zero-filling loop function pointer is + called to fill the buffer. For the best performance, avoid using this + function if a zero-filled array buffer allocated with calloc makes sense + for the dtype. + + Note that this is currently used only for zero-filling a newly allocated + array. While it can be used to zero-fill an already-filled buffer, that + will not work correctly for arrays holding references. If you need to do + that, clear the array first. + */ + get_traverse_loop_function *get_fill_zero_loop; + /* * The casting implementation (ArrayMethod) to convert between two * instances of this DType, stored explicitly for fast access: */ @@ -63,7 +80,7 @@ typedef struct { // This must be updated if new slots before within_dtype_castingimpl // are added -#define NPY_NUM_DTYPE_SLOTS 9 +#define NPY_NUM_DTYPE_SLOTS 10 #define NPY_NUM_DTYPE_PYARRAY_ARRFUNCS_SLOTS 22 #define NPY_DT_MAX_ARRFUNCS_SLOT \ NPY_NUM_DTYPE_PYARRAY_ARRFUNCS_SLOTS + _NPY_DT_ARRFUNCS_OFFSET diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c index ab35548ed..d019acbb5 100644 --- a/numpy/core/src/multiarray/getset.c +++ b/numpy/core/src/multiarray/getset.c @@ -797,20 +797,17 @@ array_imag_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) } else { Py_INCREF(PyArray_DESCR(self)); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self), - PyArray_DESCR(self), - PyArray_NDIM(self), - PyArray_DIMS(self), - NULL, NULL, - PyArray_ISFORTRAN(self), - (PyObject *)self); + ret = (PyArrayObject *)PyArray_NewFromDescr_int( + Py_TYPE(self), + PyArray_DESCR(self), + PyArray_NDIM(self), + PyArray_DIMS(self), + NULL, NULL, + PyArray_ISFORTRAN(self), + (PyObject *)self, NULL, 1, 0); if (ret == NULL) { return NULL; } - if (_zerofill(ret) < 0) { - Py_DECREF(ret); - return NULL; - } PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE); } return (PyObject *) ret; diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 93b290020..3b2e72820 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -2804,9 +2804,7 @@ array_complex(PyArrayObject *self, PyObject *NPY_UNUSED(args)) PyArray_Descr *dtype; PyObject *c; - if (PyArray_SIZE(self) != 1) { - PyErr_SetString(PyExc_TypeError, - "only length-1 arrays can be converted to Python scalars"); + if (check_is_convertible_to_scalar(self) < 0) { return NULL; } diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index ac8e641b7..98ca15ac4 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -1042,12 +1042,11 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) #endif if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) { - result = (PyArray_NDIM(ap1) == 0 ? ap1 : ap2); - result = (PyArrayObject *)Py_TYPE(result)->tp_as_number->nb_multiply( - (PyObject *)ap1, (PyObject *)ap2); + PyObject *mul_res = PyObject_CallFunctionObjArgs( + n_ops.multiply, ap1, ap2, out, NULL); Py_DECREF(ap1); Py_DECREF(ap2); - return (PyObject *)result; + return mul_res; } l = PyArray_DIMS(ap1)[PyArray_NDIM(ap1) - 1]; if (PyArray_NDIM(ap2) > 1) { @@ -1635,25 +1634,35 @@ _prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order) ((order) == NPY_CORDER && PyArray_IS_C_CONTIGUOUS(op)) || \ ((order) == NPY_FORTRANORDER && PyArray_IS_F_CONTIGUOUS(op))) + static inline PyObject * _array_fromobject_generic( - PyObject *op, PyArray_Descr *type, _PyArray_CopyMode copy, NPY_ORDER order, - npy_bool subok, int ndmin) + PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, + _PyArray_CopyMode copy, NPY_ORDER order, npy_bool subok, int ndmin) { PyArrayObject *oparr = NULL, *ret = NULL; PyArray_Descr *oldtype = NULL; int nd, flags = 0; + /* Hold on to `in_descr` as `dtype`, since we may also set it below. */ + Py_XINCREF(in_descr); + PyArray_Descr *dtype = in_descr; + if (ndmin > NPY_MAXDIMS) { PyErr_Format(PyExc_ValueError, "ndmin bigger than allowable number of dimensions " "NPY_MAXDIMS (=%d)", NPY_MAXDIMS); - return NULL; + goto finish; } /* fast exit if simple call */ if (PyArray_CheckExact(op) || (subok && PyArray_Check(op))) { oparr = (PyArrayObject *)op; - if (type == NULL) { + + if (dtype == NULL && in_DType == NULL) { + /* + * User did not ask for a specific dtype instance or class. So + * we can return either self or a copy. + */ if (copy != NPY_COPY_ALWAYS && STRIDING_OK(oparr, order)) { ret = oparr; Py_INCREF(ret); @@ -1663,17 +1672,30 @@ _array_fromobject_generic( if (copy == NPY_COPY_NEVER) { PyErr_SetString(PyExc_ValueError, "Unable to avoid copy while creating a new array."); - return NULL; + goto finish; } ret = (PyArrayObject *)PyArray_NewCopy(oparr, order); goto finish; } } + else if (dtype == NULL) { + /* + * If the user passed a DType class but not a dtype instance, + * we must use `PyArray_AdaptDescriptorToArray` to find the + * correct dtype instance. + * Even if the fast-path doesn't work we will use this. + */ + dtype = PyArray_AdaptDescriptorToArray(oparr, in_DType, NULL); + if (dtype == NULL) { + goto finish; + } + } + /* One more chance for faster exit if user specified the dtype. */ oldtype = PyArray_DESCR(oparr); - if (PyArray_EquivTypes(oldtype, type)) { + if (PyArray_EquivTypes(oldtype, dtype)) { if (copy != NPY_COPY_ALWAYS && STRIDING_OK(oparr, order)) { - if (oldtype == type) { + if (oldtype == dtype) { Py_INCREF(op); ret = oparr; } @@ -1681,10 +1703,10 @@ _array_fromobject_generic( /* Create a new PyArrayObject from the caller's * PyArray_Descr. Use the reference `op` as the base * object. */ - Py_INCREF(type); + Py_INCREF(dtype); ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( Py_TYPE(op), - type, + dtype, PyArray_NDIM(oparr), PyArray_DIMS(oparr), PyArray_STRIDES(oparr), @@ -1700,10 +1722,10 @@ _array_fromobject_generic( if (copy == NPY_COPY_NEVER) { PyErr_SetString(PyExc_ValueError, "Unable to avoid copy while creating a new array."); - return NULL; + goto finish; } ret = (PyArrayObject *)PyArray_NewCopy(oparr, order); - if (oldtype == type || ret == NULL) { + if (oldtype == dtype || ret == NULL) { goto finish; } Py_INCREF(oldtype); @@ -1734,11 +1756,13 @@ _array_fromobject_generic( } flags |= NPY_ARRAY_FORCECAST; - Py_XINCREF(type); - ret = (PyArrayObject *)PyArray_CheckFromAny(op, type, - 0, 0, flags, NULL); + + ret = (PyArrayObject *)PyArray_CheckFromAny_int( + op, dtype, in_DType, 0, 0, flags, NULL); finish: + Py_XDECREF(dtype); + if (ret == NULL) { return NULL; } @@ -1765,7 +1789,7 @@ array_array(PyObject *NPY_UNUSED(ignored), npy_bool subok = NPY_FALSE; _PyArray_CopyMode copy = NPY_COPY_ALWAYS; int ndmin = 0; - PyArray_Descr *type = NULL; + npy_dtype_info dt_info = {NULL, NULL}; NPY_ORDER order = NPY_KEEPORDER; PyObject *like = Py_None; NPY_PREPARE_ARGPARSER; @@ -1773,21 +1797,23 @@ array_array(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("array", args, len_args, kwnames, "object", NULL, &op, - "|dtype", &PyArray_DescrConverter2, &type, + "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, "$copy", &PyArray_CopyConverter, ©, "$order", &PyArray_OrderConverter, &order, "$subok", &PyArray_BoolConverter, &subok, "$ndmin", &PyArray_PythonPyIntFromInt, &ndmin, "$like", NULL, &like, NULL, NULL, NULL) < 0) { - Py_XDECREF(type); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return NULL; } if (like != Py_None) { PyObject *deferred = array_implement_c_array_function_creation( "array", like, NULL, NULL, args, len_args, kwnames); if (deferred != Py_NotImplemented) { - Py_XDECREF(type); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return deferred; } } @@ -1798,8 +1824,9 @@ array_array(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, type, copy, order, subok, ndmin); - Py_XDECREF(type); + op, dt_info.descr, dt_info.dtype, copy, order, subok, ndmin); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return res; } @@ -1808,7 +1835,7 @@ array_asarray(PyObject *NPY_UNUSED(ignored), PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *op; - PyArray_Descr *type = NULL; + npy_dtype_info dt_info = {NULL, NULL}; NPY_ORDER order = NPY_KEEPORDER; PyObject *like = Py_None; NPY_PREPARE_ARGPARSER; @@ -1816,18 +1843,20 @@ array_asarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asarray", args, len_args, kwnames, "a", NULL, &op, - "|dtype", &PyArray_DescrConverter2, &type, + "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, "|order", &PyArray_OrderConverter, &order, "$like", NULL, &like, NULL, NULL, NULL) < 0) { - Py_XDECREF(type); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return NULL; } if (like != Py_None) { PyObject *deferred = array_implement_c_array_function_creation( "asarray", like, NULL, NULL, args, len_args, kwnames); if (deferred != Py_NotImplemented) { - Py_XDECREF(type); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return deferred; } } @@ -1837,8 +1866,9 @@ array_asarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, type, NPY_FALSE, order, NPY_FALSE, 0); - Py_XDECREF(type); + op, dt_info.descr, dt_info.dtype, NPY_FALSE, order, NPY_FALSE, 0); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return res; } @@ -1847,7 +1877,7 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *op; - PyArray_Descr *type = NULL; + npy_dtype_info dt_info = {NULL, NULL}; NPY_ORDER order = NPY_KEEPORDER; PyObject *like = Py_None; NPY_PREPARE_ARGPARSER; @@ -1855,18 +1885,20 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asanyarray", args, len_args, kwnames, "a", NULL, &op, - "|dtype", &PyArray_DescrConverter2, &type, + "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, "|order", &PyArray_OrderConverter, &order, "$like", NULL, &like, NULL, NULL, NULL) < 0) { - Py_XDECREF(type); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return NULL; } if (like != Py_None) { PyObject *deferred = array_implement_c_array_function_creation( "asanyarray", like, NULL, NULL, args, len_args, kwnames); if (deferred != Py_NotImplemented) { - Py_XDECREF(type); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return deferred; } } @@ -1876,8 +1908,9 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, type, NPY_FALSE, order, NPY_TRUE, 0); - Py_XDECREF(type); + op, dt_info.descr, dt_info.dtype, NPY_FALSE, order, NPY_TRUE, 0); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return res; } @@ -1887,24 +1920,26 @@ array_ascontiguousarray(PyObject *NPY_UNUSED(ignored), PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *op; - PyArray_Descr *type = NULL; + npy_dtype_info dt_info = {NULL, NULL}; PyObject *like = Py_None; NPY_PREPARE_ARGPARSER; if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("ascontiguousarray", args, len_args, kwnames, "a", NULL, &op, - "|dtype", &PyArray_DescrConverter2, &type, + "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, "$like", NULL, &like, NULL, NULL, NULL) < 0) { - Py_XDECREF(type); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return NULL; } if (like != Py_None) { PyObject *deferred = array_implement_c_array_function_creation( "ascontiguousarray", like, NULL, NULL, args, len_args, kwnames); if (deferred != Py_NotImplemented) { - Py_XDECREF(type); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return deferred; } } @@ -1914,8 +1949,10 @@ array_ascontiguousarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, type, NPY_FALSE, NPY_CORDER, NPY_FALSE, 1); - Py_XDECREF(type); + op, dt_info.descr, dt_info.dtype, NPY_FALSE, NPY_CORDER, NPY_FALSE, + 1); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return res; } @@ -1925,24 +1962,26 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *op; - PyArray_Descr *type = NULL; + npy_dtype_info dt_info = {NULL, NULL}; PyObject *like = Py_None; NPY_PREPARE_ARGPARSER; if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asfortranarray", args, len_args, kwnames, "a", NULL, &op, - "|dtype", &PyArray_DescrConverter2, &type, + "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, "$like", NULL, &like, NULL, NULL, NULL) < 0) { - Py_XDECREF(type); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return NULL; } if (like != Py_None) { PyObject *deferred = array_implement_c_array_function_creation( "asfortranarray", like, NULL, NULL, args, len_args, kwnames); if (deferred != Py_NotImplemented) { - Py_XDECREF(type); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return deferred; } } @@ -1952,8 +1991,10 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, type, NPY_FALSE, NPY_FORTRANORDER, NPY_FALSE, 1); - Py_XDECREF(type); + op, dt_info.descr, dt_info.dtype, NPY_FALSE, NPY_FORTRANORDER, + NPY_FALSE, 1); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); return res; } diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index 2e25152d5..35e4af79c 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -53,6 +53,8 @@ static PyObject * array_inplace_remainder(PyArrayObject *m1, PyObject *m2); static PyObject * array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo)); +static PyObject * +array_inplace_matrix_multiply(PyArrayObject *m1, PyObject *m2); /* * Dictionary can contain any of the numeric operations, by name. @@ -339,7 +341,6 @@ array_divmod(PyObject *m1, PyObject *m2) return PyArray_GenericBinaryFunction(m1, m2, n_ops.divmod); } -/* Need this to be version dependent on account of the slot check */ static PyObject * array_matrix_multiply(PyObject *m1, PyObject *m2) { @@ -348,13 +349,70 @@ array_matrix_multiply(PyObject *m1, PyObject *m2) } static PyObject * -array_inplace_matrix_multiply( - PyArrayObject *NPY_UNUSED(m1), PyObject *NPY_UNUSED(m2)) +array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) { - PyErr_SetString(PyExc_TypeError, - "In-place matrix multiplication is not (yet) supported. " - "Use 'a = a @ b' instead of 'a @= b'."); - return NULL; + static PyObject *AxisError_cls = NULL; + npy_cache_import("numpy.exceptions", "AxisError", &AxisError_cls); + if (AxisError_cls == NULL) { + return NULL; + } + + INPLACE_GIVE_UP_IF_NEEDED(self, other, + nb_inplace_matrix_multiply, array_inplace_matrix_multiply); + + /* + * Unlike `matmul(a, b, out=a)` we ensure that the result is not broadcast + * if the result without `out` would have less dimensions than `a`. + * Since the signature of matmul is '(n?,k),(k,m?)->(n?,m?)' this is the + * case exactly when the second operand has both core dimensions. + * + * The error here will be confusing, but for now, we enforce this by + * passing the correct `axes=`. + */ + static PyObject *axes_1d_obj_kwargs = NULL; + static PyObject *axes_2d_obj_kwargs = NULL; + if (NPY_UNLIKELY(axes_1d_obj_kwargs == NULL)) { + axes_1d_obj_kwargs = Py_BuildValue( + "{s, [(i), (i, i), (i)]}", "axes", -1, -2, -1, -1); + if (axes_1d_obj_kwargs == NULL) { + return NULL; + } + } + if (NPY_UNLIKELY(axes_2d_obj_kwargs == NULL)) { + axes_2d_obj_kwargs = Py_BuildValue( + "{s, [(i, i), (i, i), (i, i)]}", "axes", -2, -1, -2, -1, -2, -1); + if (axes_2d_obj_kwargs == NULL) { + return NULL; + } + } + + PyObject *args = PyTuple_Pack(3, self, other, self); + if (args == NULL) { + return NULL; + } + PyObject *kwargs; + if (PyArray_NDIM(self) == 1) { + kwargs = axes_1d_obj_kwargs; + } + else { + kwargs = axes_2d_obj_kwargs; + } + PyObject *res = PyObject_Call(n_ops.matmul, args, kwargs); + Py_DECREF(args); + + if (res == NULL) { + /* + * AxisError should indicate that the axes argument didn't work out + * which should mean the second operand not being 2 dimensional. + */ + if (PyErr_ExceptionMatches(AxisError_cls)) { + PyErr_SetString(PyExc_ValueError, + "inplace matrix multiplication requires the first operand to " + "have at least one and the second at least two dimensions."); + } + } + + return res; } /* @@ -811,13 +869,11 @@ array_scalar_forward(PyArrayObject *v, PyObject *(*builtin_func)(PyObject *), const char *where) { - PyObject *scalar; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only size-1 arrays can be"\ - " converted to Python scalars"); + if (check_is_convertible_to_scalar(v) < 0) { return NULL; } + PyObject *scalar; scalar = PyArray_GETITEM(v, PyArray_DATA(v)); if (scalar == NULL) { return NULL; diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c index 20527f7af..d200957c3 100644 --- a/numpy/core/src/multiarray/refcount.c +++ b/numpy/core/src/multiarray/refcount.c @@ -17,15 +17,12 @@ #include "numpy/arrayscalars.h" #include "iterators.h" #include "dtypemeta.h" +#include "refcount.h" #include "npy_config.h" #include "npy_pycompat.h" -static void -_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype); - - /* * Helper function to clear a strided memory (normally or always contiguous) * from all Python (or other) references. The function does nothing if the @@ -395,7 +392,7 @@ PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) } } -static void +NPY_NO_EXPORT void _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype) { if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) { diff --git a/numpy/core/src/multiarray/refcount.h b/numpy/core/src/multiarray/refcount.h index 16d34e292..7f39b9ca4 100644 --- a/numpy/core/src/multiarray/refcount.h +++ b/numpy/core/src/multiarray/refcount.h @@ -24,4 +24,7 @@ PyArray_XDECREF(PyArrayObject *mp); NPY_NO_EXPORT void PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj); +NPY_NO_EXPORT void +_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype); + #endif /* NUMPY_CORE_SRC_MULTIARRAY_REFCOUNT_H_ */ diff --git a/numpy/core/src/multiarray/textreading/tokenize.cpp b/numpy/core/src/multiarray/textreading/tokenize.cpp index 210428813..e0ddc393d 100644 --- a/numpy/core/src/multiarray/textreading/tokenize.cpp +++ b/numpy/core/src/multiarray/textreading/tokenize.cpp @@ -449,6 +449,8 @@ npy_tokenizer_init(tokenizer_state *ts, parser_config *config) ts->fields = (field_info *)PyMem_Malloc(4 * sizeof(*ts->fields)); if (ts->fields == nullptr) { + PyMem_Free(ts->field_buffer); + ts->field_buffer = nullptr; PyErr_NoMemory(); return -1; } diff --git a/numpy/core/src/npymath/npy_math_private.h b/numpy/core/src/npymath/npy_math_private.h index a474b3de3..20c94f98a 100644 --- a/numpy/core/src/npymath/npy_math_private.h +++ b/numpy/core/src/npymath/npy_math_private.h @@ -21,6 +21,7 @@ #include <Python.h> #ifdef __cplusplus #include <cmath> +#include <complex> using std::isgreater; using std::isless; #else @@ -494,8 +495,9 @@ do { \ * Microsoft C defines _MSC_VER * Intel compiler does not use MSVC complex types, but defines _MSC_VER by * default. + * since c++17 msvc is no longer support them. */ -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +#if !defined(__cplusplus) && defined(_MSC_VER) && !defined(__INTEL_COMPILER) typedef union { npy_cdouble npy_z; _Dcomplex c99_z; diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index a5e8f4cbe..d4aced05f 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -5935,6 +5935,9 @@ trivial_at_loop(PyArrayMethodObject *ufuncimpl, NPY_ARRAYMETHOD_FLAGS flags, res = ufuncimpl->contiguous_indexed_loop( context, args, inner_size, steps, NULL); + if (args[2] != NULL) { + args[2] += (*inner_size) * steps[2]; + } } while (res == 0 && iter->outer_next(iter->outer)); if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py index 838c61bc2..aeddac585 100644 --- a/numpy/core/tests/test_array_coercion.py +++ b/numpy/core/tests/test_array_coercion.py @@ -161,6 +161,8 @@ class TestStringDiscovery: # A nested array is also discovered correctly arr = np.array(obj, dtype="O") assert np.array(arr, dtype="S").dtype == expected + # Also if we use the dtype class + assert np.array(arr, dtype=type(expected)).dtype == expected # Check that .astype() behaves identical assert arr.astype("S").dtype == expected # The DType class is accepted by `.astype()` diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 372cb8d41..b92c8ae8c 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -353,6 +353,33 @@ class TestArray2String: ' [ 501, 502, 503, ..., 999, 1000, 1001]])' assert_equal(repr(A), reprA) + def test_summarize_structure(self): + A = (np.arange(2002, dtype="<i8").reshape(2, 1001) + .view([('i', "<i8", (1001,))])) + strA = ("[[([ 0, 1, 2, ..., 998, 999, 1000],)]\n" + " [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]]") + assert_equal(str(A), strA) + + reprA = ("array([[([ 0, 1, 2, ..., 998, 999, 1000],)],\n" + " [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]],\n" + " dtype=[('i', '<i8', (1001,))])") + assert_equal(repr(A), reprA) + + B = np.ones(2002, dtype=">i8").view([('i', ">i8", (2, 1001))]) + strB = "[([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)]" + assert_equal(str(B), strB) + + reprB = ( + "array([([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)],\n" + " dtype=[('i', '>i8', (2, 1001))])" + ) + assert_equal(repr(B), reprB) + + C = (np.arange(22, dtype="<i8").reshape(2, 11) + .view([('i1', "<i8"), ('i10', "<i8", (10,))])) + strC = "[[( 0, [ 1, ..., 10])]\n [(11, [12, ..., 21])]]" + assert_equal(np.array2string(C, threshold=1, edgeitems=1), strC) + def test_linewidth(self): a = np.full(6, 1) @@ -817,7 +844,7 @@ class TestPrintOptions: ) def test_dtype_endianness_repr(self, native): ''' - there was an issue where + there was an issue where repr(array([0], dtype='<u2')) and repr(array([0], dtype='>u2')) both returned the same thing: array([0], dtype=uint16) @@ -963,6 +990,16 @@ class TestPrintOptions: [[ 0.]]]])""") ) + def test_edgeitems_structured(self): + np.set_printoptions(edgeitems=1, threshold=1) + A = np.arange(5*2*3, dtype="<i8").view([('i', "<i8", (5, 2, 3))]) + reprA = ( + "array([([[[ 0, ..., 2], [ 3, ..., 5]], ..., " + "[[24, ..., 26], [27, ..., 29]]],)],\n" + " dtype=[('i', '<i8', (5, 2, 3))])" + ) + assert_equal(repr(A), reprA) + def test_bad_args(self): assert_raises(ValueError, np.set_printoptions, threshold=float('nan')) assert_raises(TypeError, np.set_printoptions, threshold='1') diff --git a/numpy/core/tests/test_custom_dtypes.py b/numpy/core/tests/test_custom_dtypes.py index 1a34c6fa3..da6a4bd50 100644 --- a/numpy/core/tests/test_custom_dtypes.py +++ b/numpy/core/tests/test_custom_dtypes.py @@ -229,6 +229,12 @@ class TestSFloat: expected = arr.astype(SF(1.)) # above will have discovered 1. scaling assert_array_equal(res.view(np.float64), expected.view(np.float64)) + def test_creation_class(self): + arr1 = np.array([1., 2., 3.], dtype=SF) + assert arr1.dtype == SF(1.) + arr2 = np.array([1., 2., 3.], dtype=SF(1.)) + assert_array_equal(arr1.view(np.float64), arr2.view(np.float64)) + def test_type_pickle(): # can't actually unpickle, but we can pickle (if in namespace) diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 96ae4b2a8..e47a24995 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -822,6 +822,18 @@ class TestLoadtxtParseIntsViaFloat(_DeprecationTestCase): assert isinstance(e.__cause__, DeprecationWarning) +class TestScalarConversion(_DeprecationTestCase): + # 2023-01-02, 1.25.0 + def test_float_conversion(self): + self.assert_deprecated(float, args=(np.array([3.14]),)) + + def test_behaviour(self): + b = np.array([[3.14]]) + c = np.zeros(5) + with pytest.warns(DeprecationWarning): + c[0] = b + + class TestPyIntConversion(_DeprecationTestCase): message = r".*stop allowing conversion of out-of-bound.*" @@ -922,3 +934,12 @@ class TestFromnumeric(_DeprecationTestCase): # 2023-03-02, 1.25.0 def test_alltrue(self): self.assert_deprecated(lambda: np.alltrue(np.array([True, False]))) + + +class TestMathAlias(_DeprecationTestCase): + # Deprecated in Numpy 1.25, 2023-04-06 + def test_deprecated_np_math(self): + self.assert_deprecated(lambda: np.math) + + def test_deprecated_np_lib_math(self): + self.assert_deprecated(lambda: np.lib.math) diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py index 21583dd44..79f1ecfc9 100644 --- a/numpy/core/tests/test_function_base.py +++ b/numpy/core/tests/test_function_base.py @@ -1,3 +1,4 @@ +import pytest from numpy import ( logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan, ndarray, sqrt, nextafter, stack, errstate @@ -65,6 +66,33 @@ class TestLogspace: t5 = logspace(start, stop, 6, axis=-1) assert_equal(t5, t2.T) + @pytest.mark.parametrize("axis", [0, 1, -1]) + def test_base_array(self, axis: int): + start = 1 + stop = 2 + num = 6 + base = array([1, 2]) + t1 = logspace(start, stop, num=num, base=base, axis=axis) + t2 = stack( + [logspace(start, stop, num=num, base=_base) for _base in base], + axis=(axis + 1) % t1.ndim, + ) + assert_equal(t1, t2) + + @pytest.mark.parametrize("axis", [0, 1, -1]) + def test_stop_base_array(self, axis: int): + start = 1 + stop = array([2, 3]) + num = 6 + base = array([1, 2]) + t1 = logspace(start, stop, num=num, base=base, axis=axis) + t2 = stack( + [logspace(start, _stop, num=num, base=_base) + for _stop, _base in zip(stop, base)], + axis=(axis + 1) % t1.ndim, + ) + assert_equal(t1, t2) + def test_dtype(self): y = logspace(0, 6, dtype='float32') assert_equal(y.dtype, dtype('float32')) diff --git a/numpy/core/tests/test_mem_policy.py b/numpy/core/tests/test_mem_policy.py index 79abdbf1e..d5dfbc38b 100644 --- a/numpy/core/tests/test_mem_policy.py +++ b/numpy/core/tests/test_mem_policy.py @@ -5,7 +5,7 @@ import pytest import numpy as np import threading import warnings -from numpy.testing import extbuild, assert_warns, IS_WASM, IS_MUSL +from numpy.testing import extbuild, assert_warns, IS_WASM import sys @@ -358,7 +358,6 @@ def test_thread_locality(get_module): assert np.core.multiarray.get_handler_name() == orig_policy_name -@pytest.mark.xfail(IS_MUSL, reason="gh23050") @pytest.mark.slow def test_new_policy(get_module): a = np.arange(10) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index ac4bd42d3..984047c87 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import collections.abc import tempfile import sys @@ -1194,6 +1196,17 @@ class TestCreation: expected = expected * (arr.nbytes // len(expected)) assert arr.tobytes() == expected + @pytest.mark.parametrize("func", [ + np.array, np.asarray, np.asanyarray, np.ascontiguousarray, + np.asfortranarray]) + def test_creation_from_dtypemeta(self, func): + dtype = np.dtype('i') + arr1 = func([1, 2, 3], dtype=dtype) + arr2 = func([1, 2, 3], dtype=type(dtype)) + assert_array_equal(arr1, arr2) + assert arr2.dtype == dtype + + class TestStructured: def test_subarray_field_access(self): a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) @@ -3631,9 +3644,13 @@ class TestMethods: msg = 'dtype: {0}'.format(dt) ap = complex(a) assert_equal(ap, a, msg) - bp = complex(b) + + with assert_warns(DeprecationWarning): + bp = complex(b) assert_equal(bp, b, msg) - cp = complex(c) + + with assert_warns(DeprecationWarning): + cp = complex(c) assert_equal(cp, c, msg) def test__complex__should_not_work(self): @@ -3656,7 +3673,8 @@ class TestMethods: assert_raises(TypeError, complex, d) e = np.array(['1+1j'], 'U') - assert_raises(TypeError, complex, e) + with assert_warns(DeprecationWarning): + assert_raises(TypeError, complex, e) class TestCequenceMethods: def test_array_contains(self): @@ -3718,7 +3736,7 @@ class TestBinop: 'and': (np.bitwise_and, True, int), 'xor': (np.bitwise_xor, True, int), 'or': (np.bitwise_or, True, int), - 'matmul': (np.matmul, False, float), + 'matmul': (np.matmul, True, float), # 'ge': (np.less_equal, False), # 'gt': (np.less, False), # 'le': (np.greater_equal, False), @@ -6662,6 +6680,22 @@ class TestDot: r = np.empty((1024, 32), dtype=int) assert_raises(ValueError, dot, f, v, r) + def test_dot_out_result(self): + x = np.ones((), dtype=np.float16) + y = np.ones((5,), dtype=np.float16) + z = np.zeros((5,), dtype=np.float16) + res = x.dot(y, out=z) + assert np.array_equal(res, y) + assert np.array_equal(z, y) + + def test_dot_out_aliasing(self): + x = np.ones((), dtype=np.float16) + y = np.ones((5,), dtype=np.float16) + z = np.zeros((5,), dtype=np.float16) + res = x.dot(y, out=z) + z[0] = 2 + assert np.array_equal(res, z) + def test_dot_array_order(self): a = np.array([[1, 2], [3, 4]], order='C') b = np.array([[1, 2], [3, 4]], order='F') @@ -7169,16 +7203,69 @@ class TestMatmulOperator(MatmulCommon): assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc')) -def test_matmul_inplace(): - # It would be nice to support in-place matmul eventually, but for now - # we don't have a working implementation, so better just to error out - # and nudge people to writing "a = a @ b". - a = np.eye(3) - b = np.eye(3) - assert_raises(TypeError, a.__imatmul__, b) - import operator - assert_raises(TypeError, operator.imatmul, a, b) - assert_raises(TypeError, exec, "a @= b", globals(), locals()) + +class TestMatmulInplace: + DTYPES = {} + for i in MatmulCommon.types: + for j in MatmulCommon.types: + if np.can_cast(j, i): + DTYPES[f"{i}-{j}"] = (np.dtype(i), np.dtype(j)) + + @pytest.mark.parametrize("dtype1,dtype2", DTYPES.values(), ids=DTYPES) + def test_basic(self, dtype1: np.dtype, dtype2: np.dtype) -> None: + a = np.arange(10).reshape(5, 2).astype(dtype1) + a_id = id(a) + b = np.ones((2, 2), dtype=dtype2) + + ref = a @ b + a @= b + + assert id(a) == a_id + assert a.dtype == dtype1 + assert a.shape == (5, 2) + if dtype1.kind in "fc": + np.testing.assert_allclose(a, ref) + else: + np.testing.assert_array_equal(a, ref) + + SHAPES = { + "2d_large": ((10**5, 10), (10, 10)), + "3d_large": ((10**4, 10, 10), (1, 10, 10)), + "1d": ((3,), (3,)), + "2d_1d": ((3, 3), (3,)), + "1d_2d": ((3,), (3, 3)), + "2d_broadcast": ((3, 3), (3, 1)), + "2d_broadcast_reverse": ((1, 3), (3, 3)), + "3d_broadcast1": ((3, 3, 3), (1, 3, 1)), + "3d_broadcast2": ((3, 3, 3), (1, 3, 3)), + "3d_broadcast3": ((3, 3, 3), (3, 3, 1)), + "3d_broadcast_reverse1": ((1, 3, 3), (3, 3, 3)), + "3d_broadcast_reverse2": ((3, 1, 3), (3, 3, 3)), + "3d_broadcast_reverse3": ((1, 1, 3), (3, 3, 3)), + } + + @pytest.mark.parametrize("a_shape,b_shape", SHAPES.values(), ids=SHAPES) + def test_shapes(self, a_shape: tuple[int, ...], b_shape: tuple[int, ...]): + a_size = np.prod(a_shape) + a = np.arange(a_size).reshape(a_shape).astype(np.float64) + a_id = id(a) + + b_size = np.prod(b_shape) + b = np.arange(b_size).reshape(b_shape) + + ref = a @ b + if ref.shape != a_shape: + with pytest.raises(ValueError): + a @= b + return + else: + a @= b + + assert id(a) == a_id + assert a.dtype.type == np.float64 + assert a.shape == a_shape + np.testing.assert_allclose(a, ref) + def test_matmul_axes(): a = np.arange(3*4*5).reshape(3, 4, 5) @@ -8674,8 +8761,10 @@ class TestConversion: int_funcs = (int, lambda x: x.__int__()) for int_func in int_funcs: assert_equal(int_func(np.array(0)), 0) - assert_equal(int_func(np.array([1])), 1) - assert_equal(int_func(np.array([[42]])), 42) + with assert_warns(DeprecationWarning): + assert_equal(int_func(np.array([1])), 1) + with assert_warns(DeprecationWarning): + assert_equal(int_func(np.array([[42]])), 42) assert_raises(TypeError, int_func, np.array([1, 2])) # gh-9972 @@ -8690,7 +8779,8 @@ class TestConversion: def __trunc__(self): return 3 assert_equal(3, int_func(np.array(HasTrunc()))) - assert_equal(3, int_func(np.array([HasTrunc()]))) + with assert_warns(DeprecationWarning): + assert_equal(3, int_func(np.array([HasTrunc()]))) else: pass @@ -8699,8 +8789,9 @@ class TestConversion: raise NotImplementedError assert_raises(NotImplementedError, int_func, np.array(NotConvertible())) - assert_raises(NotImplementedError, - int_func, np.array([NotConvertible()])) + with assert_warns(DeprecationWarning): + assert_raises(NotImplementedError, + int_func, np.array([NotConvertible()])) class TestWhere: diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 498a654c8..71af2ccb7 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -2054,6 +2054,17 @@ class TestUfunc: # If it is [-1, -1, -1, -100, 0] then the regular strided loop was used assert np.all(arr == [-1, -1, -1, -200, -1]) + def test_ufunc_at_large(self): + # issue gh-23457 + indices = np.zeros(8195, dtype=np.int16) + b = np.zeros(8195, dtype=float) + b[0] = 10 + b[1] = 5 + b[8192:] = 100 + a = np.zeros(1, dtype=float) + np.add.at(a, indices, b) + assert a[0] == b.sum() + def test_cast_index_fastpath(self): arr = np.zeros(10) values = np.ones(100000) @@ -2956,3 +2967,10 @@ class TestLowlevelAPIAccess: with pytest.raises(TypeError): # cannot call it a second time: np.negative._get_strided_loop(call_info) + + def test_long_arrays(self): + t = np.zeros((1029, 917), dtype=np.single) + t[0][0] = 1 + t[28][414] = 1 + tc = np.cos(t) + assert_equal(tc[0][0], tc[28][414]) diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index 0e018a268..66124df94 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -1,6 +1,7 @@ import os import re import sys +import platform import shlex import time import subprocess @@ -394,8 +395,14 @@ def CCompiler_customize_cmd(self, cmd, ignore=()): log.info('customize %s using %s' % (self.__class__.__name__, cmd.__class__.__name__)) - if hasattr(self, 'compiler') and 'clang' in self.compiler[0]: + if ( + hasattr(self, 'compiler') and + 'clang' in self.compiler[0] and + not (platform.machine() == 'arm64' and sys.platform == 'darwin') + ): # clang defaults to a non-strict floating error point model. + # However, '-ftrapping-math' is not currently supported (2023-04-08) + # for macosx_arm64. # Since NumPy and most Python libs give warnings for these, override: self.compiler.append('-ftrapping-math') self.compiler_so.append('-ftrapping-math') diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py index 4904dd3dd..6ba4cd816 100644 --- a/numpy/distutils/ccompiler_opt.py +++ b/numpy/distutils/ccompiler_opt.py @@ -16,15 +16,6 @@ import re import subprocess import textwrap -# These flags are used to compile any C++ source within Numpy. -# They are chosen to have very few runtime dependencies. -NPY_CXX_FLAGS = [ - '-std=c++11', # Minimal standard version - '-D__STDC_VERSION__=0', # for compatibility with C headers - '-fno-exceptions', # no exception support - '-fno-rtti'] # no runtime type information - - class _Config: """An abstract class holds all configurable attributes of `CCompilerOpt`, these class attributes can be used to change the default behavior @@ -1000,7 +991,7 @@ class _CCompiler: ) detect_args = ( ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""), - ("cc_has_native", + ("cc_has_native", ".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""), # in case if the class run with -DNPY_DISABLE_OPTIMIZATION ("cc_noopt", ".*DISABLE_OPT.*", ""), diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py index 45201f98f..11999dae2 100644 --- a/numpy/distutils/command/build_clib.py +++ b/numpy/distutils/command/build_clib.py @@ -307,6 +307,7 @@ class build_clib(old_build_clib): # problem, msvc uses its own convention :( c_sources += cxx_sources cxx_sources = [] + extra_cflags += extra_cxxflags # filtering C dispatch-table sources when optimization is not disabled, # otherwise treated as normal sources. diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index 6dc6b4265..871aa1099 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -259,7 +259,7 @@ class build_ext (old_build_ext): log.warn('resetting extension %r language from %r to %r.' % (ext.name, l, ext_language)) - ext.language = ext_language + ext.language = ext_language # global language all_languages.update(ext_languages) @@ -407,6 +407,7 @@ class build_ext (old_build_ext): if cxx_sources: # Needed to compile kiva.agg._agg extension. extra_args.append('/Zm1000') + extra_cflags += extra_cxxflags # this hack works around the msvc compiler attributes # problem, msvc uses its own convention :( c_sources += cxx_sources diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py index efe3a4cb5..68f516b92 100644 --- a/numpy/distutils/fcompiler/absoft.py +++ b/numpy/distutils/fcompiler/absoft.py @@ -1,6 +1,6 @@ -# http://www.absoft.com/literature/osxuserguide.pdf -# http://www.absoft.com/documentation.html +# Absoft Corporation ceased operations on 12/31/2022. +# Thus, all links to <http://www.absoft.com> are invalid. # Notes: # - when using -g77 then use -DUNDERSCORE_G77 to compile f2py diff --git a/numpy/dual.py b/numpy/dual.py deleted file mode 100644 index eb7e61aac..000000000 --- a/numpy/dual.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -.. deprecated:: 1.20 - -*This module is deprecated. Instead of importing functions from* -``numpy.dual``, *the functions should be imported directly from NumPy -or SciPy*. - -Aliases for functions which may be accelerated by SciPy. - -SciPy_ can be built to use accelerated or otherwise improved libraries -for FFTs, linear algebra, and special functions. This module allows -developers to transparently support these accelerated functions when -SciPy is available but still support users who have only installed -NumPy. - -.. _SciPy : https://www.scipy.org - -""" -import warnings - - -warnings.warn('The module numpy.dual is deprecated. Instead of using dual, ' - 'use the functions directly from numpy or scipy.', - category=DeprecationWarning, - stacklevel=2) - -# This module should be used for functions both in numpy and scipy if -# you want to use the numpy version if available but the scipy version -# otherwise. -# Usage --- from numpy.dual import fft, inv - -__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2', - 'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals', - 'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0'] - -import numpy.linalg as linpkg -import numpy.fft as fftpkg -from numpy.lib import i0 -import sys - - -fft = fftpkg.fft -ifft = fftpkg.ifft -fftn = fftpkg.fftn -ifftn = fftpkg.ifftn -fft2 = fftpkg.fft2 -ifft2 = fftpkg.ifft2 - -norm = linpkg.norm -inv = linpkg.inv -svd = linpkg.svd -solve = linpkg.solve -det = linpkg.det -eig = linpkg.eig -eigvals = linpkg.eigvals -eigh = linpkg.eigh -eigvalsh = linpkg.eigvalsh -lstsq = linpkg.lstsq -pinv = linpkg.pinv -cholesky = linpkg.cholesky - -_restore_dict = {} - -def register_func(name, func): - if name not in __all__: - raise ValueError("{} not a dual function.".format(name)) - f = sys._getframe(0).f_globals - _restore_dict[name] = f[name] - f[name] = func - -def restore_func(name): - if name not in __all__: - raise ValueError("{} not a dual function.".format(name)) - try: - val = _restore_dict[name] - except KeyError: - return - else: - sys._getframe(0).f_globals[name] = val - -def restore_all(): - for name in _restore_dict.keys(): - restore_func(name) diff --git a/numpy/exceptions.py b/numpy/exceptions.py index 73ad5ea6d..2f8438101 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -60,7 +60,7 @@ class ModuleDeprecationWarning(DeprecationWarning): .. warning:: - This warning should not be used, since nose testing is not relvant + This warning should not be used, since nose testing is not relevant anymore. The nose tester turns ordinary Deprecation warnings into test failures. diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 99ee2ae8e..61e83d555 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -935,7 +935,7 @@ typedefpattern = re.compile( r'(?:,(?P<attributes>[\w(),]+))?(::)?(?P<name>\b[a-z$_][\w$]*\b)' r'(?:\((?P<params>[\w,]*)\))?\Z', re.I) nameargspattern = re.compile( - r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I) + r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>(?:(?!@\)@).)*)\s*@\)@))*\s*\Z', re.I) operatorpattern = re.compile( r'\s*(?P<scheme>(operator|assignment))' r'@\(@\s*(?P<name>[^)]+)\s*@\)@\s*\Z', re.I) @@ -2393,19 +2393,19 @@ def _selected_int_kind_func(r): def _selected_real_kind_func(p, r=0, radix=0): # XXX: This should be processor dependent - # This is only good for 0 <= p <= 20 + # This is only verified for 0 <= p <= 20, possibly good for p <= 33 and above if p < 7: return 4 if p < 16: return 8 machine = platform.machine().lower() - if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')): - if p <= 20: + if machine.startswith(('aarch64', 'arm64', 'power', 'ppc', 'riscv', 's390x', 'sparc')): + if p <= 33: return 16 else: if p < 19: return 10 - elif p <= 20: + elif p <= 33: return 16 return -1 diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index 21ee399f0..86d7004ab 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -30,15 +30,15 @@ def run(): try: import numpy has_newnumpy = 1 - except ImportError: - print('Failed to import new numpy:', sys.exc_info()[1]) + except ImportError as e: + print('Failed to import new numpy:', e) has_newnumpy = 0 try: from numpy.f2py import f2py2e has_f2py2e = 1 - except ImportError: - print('Failed to import f2py2e:', sys.exc_info()[1]) + except ImportError as e: + print('Failed to import f2py2e:', e) has_f2py2e = 0 try: @@ -48,8 +48,8 @@ def run(): try: import numpy_distutils has_numpy_distutils = 1 - except ImportError: - print('Failed to import numpy_distutils:', sys.exc_info()[1]) + except ImportError as e: + print('Failed to import numpy_distutils:', e) has_numpy_distutils = 0 if has_newnumpy: diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index 73ac4e276..23965087d 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -1,9 +1,10 @@ import importlib import codecs +import time import unicodedata import pytest import numpy as np -from numpy.f2py.crackfortran import markinnerspaces +from numpy.f2py.crackfortran import markinnerspaces, nameargspattern from . import util from numpy.f2py import crackfortran import textwrap @@ -276,3 +277,49 @@ class TestUnicodeComment(util.F2PyTest): ) def test_encoding_comment(self): self.module.foo(3) + +class TestNameArgsPatternBacktracking: + @pytest.mark.parametrize( + ['adversary'], + [ + ('@)@bind@(@',), + ('@)@bind @(@',), + ('@)@bind foo bar baz@(@',) + ] + ) + def test_nameargspattern_backtracking(self, adversary): + '''address ReDOS vulnerability: + https://github.com/numpy/numpy/issues/23338''' + last_median = 0. + trials_per_count = 128 + start_reps, end_reps = 15, 25 + times_median_doubled = 0 + for ii in range(start_reps, end_reps): + repeated_adversary = adversary * ii + times = [] + for _ in range(trials_per_count): + t0 = time.perf_counter() + mtch = nameargspattern.search(repeated_adversary) + times.append(time.perf_counter() - t0) + # We should use a measure of time that's resilient to outliers. + # Times jump around a lot due to the CPU's scheduler. + median = np.median(times) + assert not mtch + # if the adversary is capped with @)@, it becomes acceptable + # according to the old version of the regex. + # that should still be true. + good_version_of_adversary = repeated_adversary + '@)@' + assert nameargspattern.search(good_version_of_adversary) + if ii > start_reps: + # the hallmark of exponentially catastrophic backtracking + # is that runtime doubles for every added instance of + # the problematic pattern. + times_median_doubled += median > 2 * last_median + # also try to rule out non-exponential but still bad cases + # arbitrarily, we should set a hard limit of 10ms as too slow + assert median < trials_per_count * 0.01 + last_median = median + # we accept that maybe the median might double once, due to + # the CPU scheduler acting weird or whatever. More than that + # seems suspicious. + assert times_median_doubled < 2
\ No newline at end of file diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index f0cb61fb6..69b85aaad 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -1,5 +1,6 @@ import os import pytest +import platform from numpy.f2py.crackfortran import ( _selected_int_kind_func as selected_int_kind, @@ -11,8 +12,8 @@ from . import util class TestKind(util.F2PyTest): sources = [util.getpath("tests", "src", "kind", "foo.f90")] - def test_all(self): - selectedrealkind = self.module.selectedrealkind + def test_int(self): + """Test `int` kind_func for integers up to 10**40.""" selectedintkind = self.module.selectedintkind for i in range(40): @@ -20,7 +21,27 @@ class TestKind(util.F2PyTest): i ), f"selectedintkind({i}): expected {selected_int_kind(i)!r} but got {selectedintkind(i)!r}" - for i in range(20): + def test_real(self): + """ + Test (processor-dependent) `real` kind_func for real numbers + of up to 31 digits precision (extended/quadruple). + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32): + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" + + @pytest.mark.xfail(platform.machine().lower().startswith("ppc"), + reason="Some PowerPC may not support full IEEE 754 precision") + def test_quad_precision(self): + """ + Test kind_func for quadruple precision [`real(16)`] of 32+ digits . + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32, 40): assert selectedrealkind(i) == selected_real_kind( i ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index 58166d4b1..d3cc9fee4 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -11,7 +11,6 @@ Most contains basic functions that are used by several submodules and are useful to have in the main name-space. """ -import math from numpy.version import version as __version__ @@ -58,7 +57,7 @@ from .arraypad import * from ._version import * from numpy.core._multiarray_umath import tracemalloc_domain -__all__ = ['emath', 'math', 'tracemalloc_domain', 'Arrayterator'] +__all__ = ['emath', 'tracemalloc_domain', 'Arrayterator'] __all__ += type_check.__all__ __all__ += index_tricks.__all__ __all__ += function_base.__all__ @@ -77,3 +76,19 @@ __all__ += histograms.__all__ from numpy._pytesttester import PytestTester test = PytestTester(__name__) del PytestTester + +def __getattr__(attr): + # Warn for reprecated attributes + import math + import warnings + + if attr == 'math': + warnings.warn( + "`np.lib.math` is a deprecated alias for the standard library " + "`math` module (Deprecated Numpy 1.25). Replace usages of " + "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2) + return math + else: + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) + diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 8f3fd694d..ef50fb19d 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -437,15 +437,15 @@ def _write_array_header(fp, d, version=None): header.append("'%s': %s, " % (key, repr(value))) header.append("}") header = "".join(header) - + # Add some spare space so that the array header can be modified in-place # when changing the array size, e.g. when growing it by appending data at - # the end. + # the end. shape = d['shape'] header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr( shape[-1 if d['fortran_order'] else 0] ))) if len(shape) > 0 else 0) - + if version is None: header = _wrap_header_guess_version(header) else: @@ -505,7 +505,7 @@ def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE): max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. - See :py:meth:`ast.literal_eval()` for details. + See :py:func:`ast.literal_eval()` for details. Raises ------ @@ -532,7 +532,7 @@ def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE): max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. - See :py:meth:`ast.literal_eval()` for details. + See :py:func:`ast.literal_eval()` for details. Returns ------- @@ -764,7 +764,7 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. - See :py:meth:`ast.literal_eval()` for details. + See :py:func:`ast.literal_eval()` for details. This option is ignored when `allow_pickle` is passed. In that case the file is by definition trusted and the limit is unnecessary. @@ -883,7 +883,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None, max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. - See :py:meth:`ast.literal_eval()` for details. + See :py:func:`ast.literal_eval()` for details. Returns ------- diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index f0f374f97..277ae3dc4 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -2117,10 +2117,10 @@ def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, @set_module('numpy') class vectorize: """ - vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, - signature=None) + vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None, + cache=False, signature=None) - Generalized function class. + Returns an object that acts like pyfunc, but takes arrays as input. Define a vectorized function which takes a nested sequence of objects or numpy arrays as inputs and returns a single numpy array or a tuple of numpy @@ -2134,8 +2134,9 @@ class vectorize: Parameters ---------- - pyfunc : callable + pyfunc : callable, optional A python function or method. + Can be omitted to produce a decorator with keyword arguments. otypes : str or list of dtypes, optional The output data type. It must be specified as either a string of typecode characters or a list of data type specifiers. There should @@ -2167,8 +2168,9 @@ class vectorize: Returns ------- - vectorized : callable - Vectorized function. + out : callable + A vectorized function if ``pyfunc`` was provided, + a decorator otherwise. See Also -------- @@ -2265,18 +2267,44 @@ class vectorize: [0., 0., 1., 2., 1., 0.], [0., 0., 0., 1., 2., 1.]]) + Decorator syntax is supported. The decorator can be called as + a function to provide keyword arguments. + >>>@np.vectorize + ...def identity(x): + ... return x + ... + >>>identity([0, 1, 2]) + array([0, 1, 2]) + >>>@np.vectorize(otypes=[float]) + ...def as_float(x): + ... return x + ... + >>>as_float([0, 1, 2]) + array([0., 1., 2.]) """ - def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, - cache=False, signature=None): + def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, + excluded=None, cache=False, signature=None): + + if (pyfunc != np._NoValue) and (not callable(pyfunc)): + #Splitting the error message to keep + #the length below 79 characters. + part1 = "When used as a decorator, " + part2 = "only accepts keyword arguments." + raise TypeError(part1 + part2) + self.pyfunc = pyfunc self.cache = cache self.signature = signature - self._ufunc = {} # Caching to improve default performance + if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'): + self.__name__ = pyfunc.__name__ - if doc is None: + self._ufunc = {} # Caching to improve default performance + self._doc = None + self.__doc__ = doc + if doc is None and hasattr(pyfunc, '__doc__'): self.__doc__ = pyfunc.__doc__ else: - self.__doc__ = doc + self._doc = doc if isinstance(otypes, str): for char in otypes: @@ -2298,7 +2326,15 @@ class vectorize: else: self._in_and_out_core_dims = None - def __call__(self, *args, **kwargs): + def _init_stage_2(self, pyfunc, *args, **kwargs): + self.__name__ = pyfunc.__name__ + self.pyfunc = pyfunc + if self._doc is None: + self.__doc__ = pyfunc.__doc__ + else: + self.__doc__ = self._doc + + def _call_as_normal(self, *args, **kwargs): """ Return arrays with the results of `pyfunc` broadcast (vectorized) over `args` and `kwargs` not in `excluded`. @@ -2328,6 +2364,13 @@ class vectorize: return self._vectorize_call(func=func, args=vargs) + def __call__(self, *args, **kwargs): + if self.pyfunc is np._NoValue: + self._init_stage_2(*args, **kwargs) + return self + + return self._call_as_normal(*args, **kwargs) + def _get_ufunc_and_otypes(self, func, args): """Return (ufunc, otypes).""" # frompyfunc will fail if args is empty diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index b1f85f709..f8f2ab7a2 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -142,7 +142,7 @@ class NpzFile(Mapping): max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. - See :py:meth:`ast.literal_eval()` for details. + See :py:func:`ast.literal_eval()` for details. This option is ignored when `allow_pickle` is passed. In that case the file is by definition trusted and the limit is unnecessary. @@ -167,6 +167,8 @@ class NpzFile(Mapping): >>> npz = np.load(outfile) >>> isinstance(npz, np.lib.npyio.NpzFile) True + >>> npz + NpzFile 'object' with keys x, y >>> sorted(npz.files) ['x', 'y'] >>> npz['x'] # getitem access @@ -178,6 +180,7 @@ class NpzFile(Mapping): # Make __exit__ safe if zipfile_factory raises an exception zip = None fid = None + _MAX_REPR_ARRAY_COUNT = 5 def __init__(self, fid, own_fid=False, allow_pickle=False, pickle_kwargs=None, *, @@ -259,6 +262,19 @@ class NpzFile(Mapping): else: raise KeyError("%s is not a file in the archive" % key) + def __repr__(self): + # Get filename or default to `object` + if isinstance(self.fid, str): + filename = self.fid + else: + filename = getattr(self.fid, "name", "object") + + # Get the name of arrays + array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT]) + if len(self.files) > self._MAX_REPR_ARRAY_COUNT: + array_names += "..." + return f"NpzFile {filename!r} with keys: {array_names}" + @set_module('numpy') def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, @@ -309,7 +325,7 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. - See :py:meth:`ast.literal_eval()` for details. + See :py:func:`ast.literal_eval()` for details. This option is ignored when `allow_pickle` is passed. In that case the file is by definition trusted and the limit is unnecessary. @@ -1159,10 +1175,10 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, while such lines are counted in `skiprows`. .. versionadded:: 1.16.0 - + .. versionchanged:: 1.23.0 - Lines containing no data, including comment lines (e.g., lines - starting with '#' or as specified via `comments`) are not counted + Lines containing no data, including comment lines (e.g., lines + starting with '#' or as specified via `comments`) are not counted towards `max_rows`. quotechar : unicode character or None, optional The character used to denote the start and end of a quoted item. diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index 8007b2dc7..9dd3d6809 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -72,6 +72,7 @@ class NpzFile(Mapping[str, NDArray[Any]]): files: list[str] allow_pickle: bool pickle_kwargs: None | Mapping[str, Any] + _MAX_REPR_ARRAY_COUNT: int # Represent `f` as a mutable property so we can access the type of `self` @property def f(self: _T) -> BagObj[_T]: ... @@ -97,6 +98,7 @@ class NpzFile(Mapping[str, NDArray[Any]]): def __iter__(self) -> Iterator[str]: ... def __len__(self) -> int: ... def __getitem__(self, key: str) -> NDArray[Any]: ... + def __repr__(self) -> str: ... # NOTE: Returns a `NpzFile` if file is a zip file; # returns an `ndarray`/`memmap` otherwise diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 3ec46735c..09d1195ad 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -8,7 +8,7 @@ import pytest import hypothesis from hypothesis.extra.numpy import arrays import hypothesis.strategies as st - +from functools import partial import numpy as np from numpy import ma @@ -1787,6 +1787,70 @@ class TestVectorize: assert_equal(type(r), subclass) assert_equal(r, m * v) + def test_name(self): + #See gh-23021 + @np.vectorize + def f2(a, b): + return a + b + + assert f2.__name__ == 'f2' + + def test_decorator(self): + @vectorize + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_docstring(self): + @vectorize + def f(x): + """Docstring""" + return x + + if sys.flags.optimize < 2: + assert f.__doc__ == "Docstring" + + def test_partial(self): + def foo(x, y): + return x + y + + bar = partial(foo, 3) + vbar = np.vectorize(bar) + assert vbar(1) == 4 + + def test_signature_otypes_decorator(self): + @vectorize(signature='(n)->(n)', otypes=['float64']) + def f(x): + return x + + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + assert f.__name__ == 'f' + + def test_bad_input(self): + with assert_raises(TypeError): + A = np.vectorize(pyfunc = 3) + + def test_no_keywords(self): + with assert_raises(TypeError): + @np.vectorize("string") + def foo(): + return "bar" + + def test_positional_regression_9477(self): + # This supplies the first keyword argument as a positional, + # to ensure that they are still properly forwarded after the + # enhancement for #9477 + f = vectorize((lambda x: x), ['float64']) + r = f([2]) + assert_equal(r.dtype, np.dtype('float64')) + class TestLeaks: class A: diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 06d6dbf8d..5a68fbc97 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -321,6 +321,21 @@ class TestSavezLoad(RoundtripTest): data.close() assert_(fp.closed) + @pytest.mark.parametrize("count, expected_repr", [ + (1, "NpzFile {fname!r} with keys: arr_0"), + (5, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4"), + # _MAX_REPR_ARRAY_COUNT is 5, so files with more than 5 keys are + # expected to end in '...' + (6, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4..."), + ]) + def test_repr_lists_keys(self, count, expected_repr): + a = np.array([[1, 2], [3, 4]], float) + with temppath(suffix='.npz') as tmp: + np.savez(tmp, *[a]*count) + l = np.load(tmp) + assert repr(l) == expected_repr.format(fname=tmp) + l.close() + class TestSaveTxt: def test_array(self): @@ -597,8 +612,8 @@ class TestSaveTxt: # in our process if needed, see gh-16889 memoryerror_raised = Value(c_bool) - # Since Python 3.8, the default start method for multiprocessing has - # been changed from 'fork' to 'spawn' on macOS, causing inconsistency + # Since Python 3.8, the default start method for multiprocessing has + # been changed from 'fork' to 'spawn' on macOS, causing inconsistency # on memory sharing model, lead to failed test for check_large_zip ctx = get_context('fork') p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,)) diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py index 1c4e1295e..6f72635ab 100644 --- a/numpy/linalg/setup.py +++ b/numpy/linalg/setup.py @@ -4,7 +4,6 @@ import sysconfig def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration - from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS from numpy.distutils.system_info import get_info, system_info config = Configuration('linalg', parent_package, top_path) @@ -81,7 +80,6 @@ def configuration(parent_package='', top_path=None): sources=['umath_linalg.cpp', get_lapack_lite_sources], depends=['lapack_lite/f2c.h'], extra_info=lapack_info, - extra_cxx_compile_args=NPY_CXX_FLAGS, libraries=['npymath'], ) config.add_data_files('*.pyi') diff --git a/numpy/ma/core.py b/numpy/ma/core.py index fcc321a73..0cf748dfd 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3340,6 +3340,10 @@ class MaskedArray(ndarray): # Note: Don't try to check for m.any(), that'll take too long return dout + # setitem may put NaNs into integer arrays or occasionally overflow a + # float. But this may happen in masked values, so avoid otherwise + # correct warnings (as is typical also in masked calculations). + @np.errstate(over='ignore', invalid='ignore') def __setitem__(self, indx, value): """ x.__setitem__(i, y) <==> x[i]=y @@ -4631,6 +4635,7 @@ class MaskedArray(ndarray): otherwise. 'K' means to read the elements in the order they occur in memory, except for reversing the data when strides are negative. By default, 'C' index order is used. + (Masked arrays currently use 'A' on the data when 'K' is passed.) Returns ------- @@ -4657,6 +4662,13 @@ class MaskedArray(ndarray): fill_value=999999) """ + # The order of _data and _mask could be different (it shouldn't be + # normally). Passing order `K` or `A` would be incorrect. + # So we ignore the mask memory order. + # TODO: We don't actually support K, so use A instead. We could + # try to guess this correct by sorting strides or deprecate. + if order in "kKaA": + order = "C" if self._data.flags.fnc else "F" r = ndarray.ravel(self._data, order=order).view(type(self)) r._update_from(self) if self._mask is not nomask: @@ -7008,6 +7020,21 @@ def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None): See Also -------- MaskedArray.sort : equivalent method + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.sort(masked_x) + masked_array(data=[-3.973, 0.801, 11.2, --], + mask=[False, False, False, True], + fill_value=1e+20) """ a = np.array(a, copy=True, subok=True) if axis is None: @@ -7033,6 +7060,29 @@ def compressed(x): -------- ma.MaskedArray.compressed : Equivalent method. + Examples + -------- + + Create an array with negative values masked: + + >>> import numpy as np + >>> x = np.array([[1, -1, 0], [2, -1, 3], [7, 4, -1]]) + >>> masked_x = np.ma.masked_array(x, mask=x < 0) + >>> masked_x + masked_array( + data=[[1, --, 0], + [2, --, 3], + [7, 4, --]], + mask=[[False, True, False], + [False, True, False], + [False, False, True]], + fill_value=999999) + + Compress the masked array into a 1-D array of non-masked values: + + >>> np.ma.compressed(masked_x) + array([1, 0, 2, 3, 7, 4]) + """ return asanyarray(x).compressed() @@ -7110,7 +7160,7 @@ def diag(v, k=0): Examples -------- - + Create an array with negative values masked: >>> import numpy as np @@ -7179,6 +7229,21 @@ def right_shift(a, n): -------- numpy.right_shift + Examples + -------- + >>> import numpy.ma as ma + >>> x = [11, 3, 8, 1] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11, 3, 8, --], + mask=[False, False, False, True], + fill_value=999999) + >>> ma.right_shift(masked_x,1) + masked_array(data=[5, 1, 4, --], + mask=[False, False, False, True], + fill_value=999999) + """ m = getmask(a) if m is nomask: @@ -7521,7 +7586,7 @@ def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): if len(combined) > 1: a = np.ma.concatenate(combined, axis) - # GH 22465 np.diff without prepend/append preserves the mask + # GH 22465 np.diff without prepend/append preserves the mask return np.diff(a, n, axis) @@ -7752,94 +7817,18 @@ def round_(a, decimals=0, out=None): round = round_ -# Needed by dot, so move here from extras.py. It will still be exported -# from extras.py for compatibility. -def mask_rowcols(a, axis=None): +def _mask_propagate(a, axis): """ - Mask rows and/or columns of a 2D array that contain masked values. - - Mask whole rows and/or columns of a 2D array that contain - masked values. The masking behavior is selected using the - `axis` parameter. - - - If `axis` is None, rows *and* columns are masked. - - If `axis` is 0, only rows are masked. - - If `axis` is 1 or -1, only columns are masked. - - Parameters - ---------- - a : array_like, MaskedArray - The array to mask. If not a MaskedArray instance (or if no array - elements are masked). The result is a MaskedArray with `mask` set - to `nomask` (False). Must be a 2D array. - axis : int, optional - Axis along which to perform the operation. If None, applies to a - flattened version of the array. - - Returns - ------- - a : MaskedArray - A modified version of the input array, masked depending on the value - of the `axis` parameter. - - Raises - ------ - NotImplementedError - If input array `a` is not 2D. - - See Also - -------- - mask_rows : Mask rows of a 2D array that contain masked values. - mask_cols : Mask cols of a 2D array that contain masked values. - masked_where : Mask where a condition is met. - - Notes - ----- - The input array's mask is modified by this function. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=int) - >>> a[1, 1] = 1 - >>> a - array([[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - >>> a = ma.masked_equal(a, 1) - >>> a - masked_array( - data=[[0, 0, 0], - [0, --, 0], - [0, 0, 0]], - mask=[[False, False, False], - [False, True, False], - [False, False, False]], - fill_value=1) - >>> ma.mask_rowcols(a) - masked_array( - data=[[0, --, 0], - [--, --, --], - [0, --, 0]], - mask=[[False, True, False], - [ True, True, True], - [False, True, False]], - fill_value=1) - + Mask whole 1-d vectors of an array that contain masked values. """ a = array(a, subok=False) - if a.ndim != 2: - raise NotImplementedError("mask_rowcols works for 2D arrays only.") m = getmask(a) - # Nothing is masked: return a - if m is nomask or not m.any(): + if m is nomask or not m.any() or axis is None: return a - maskedval = m.nonzero() a._mask = a._mask.copy() - if not axis: - a[np.unique(maskedval[0])] = masked - if axis in [None, 1, -1]: - a[:, np.unique(maskedval[1])] = masked + axes = normalize_axis_tuple(axis, a.ndim) + for ax in axes: + a._mask |= m.any(axis=ax, keepdims=True) return a @@ -7856,10 +7845,6 @@ def dot(a, b, strict=False, out=None): corresponding method, it is recommended that the optional arguments be treated as keyword only. At some point that may be mandatory. - .. note:: - Works only with 2-D arrays at the moment. - - Parameters ---------- a, b : masked_array_like @@ -7903,18 +7888,22 @@ def dot(a, b, strict=False, out=None): fill_value=999999) """ - # !!!: Works only with 2D arrays. There should be a way to get it to run - # with higher dimension - if strict and (a.ndim == 2) and (b.ndim == 2): - a = mask_rowcols(a, 0) - b = mask_rowcols(b, 1) + if strict is True: + if np.ndim(a) == 0 or np.ndim(b) == 0: + pass + elif b.ndim == 1: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 1) + else: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 2) am = ~getmaskarray(a) bm = ~getmaskarray(b) if out is None: d = np.dot(filled(a, 0), filled(b, 0)) m = ~np.dot(am, bm) - if d.ndim == 0: + if np.ndim(d) == 0: d = np.asarray(d) r = d.view(get_masked_subclass(a, b)) r.__setmask__(m) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 4abe2107a..8a6246c36 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -27,8 +27,7 @@ from . import core as ma from .core import ( MaskedArray, MAError, add, array, asarray, concatenate, filled, count, getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or, - nomask, ones, sort, zeros, getdata, get_masked_subclass, dot, - mask_rowcols + nomask, ones, sort, zeros, getdata, get_masked_subclass, dot ) import numpy as np @@ -955,6 +954,95 @@ def compress_cols(a): return compress_rowcols(a, 1) +def mask_rowcols(a, axis=None): + """ + Mask rows and/or columns of a 2D array that contain masked values. + + Mask whole rows and/or columns of a 2D array that contain + masked values. The masking behavior is selected using the + `axis` parameter. + + - If `axis` is None, rows *and* columns are masked. + - If `axis` is 0, only rows are masked. + - If `axis` is 1 or -1, only columns are masked. + + Parameters + ---------- + a : array_like, MaskedArray + The array to mask. If not a MaskedArray instance (or if no array + elements are masked), the result is a MaskedArray with `mask` set + to `nomask` (False). Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. If None, applies to a + flattened version of the array. + + Returns + ------- + a : MaskedArray + A modified version of the input array, masked depending on the value + of the `axis` parameter. + + Raises + ------ + NotImplementedError + If input array `a` is not 2D. + + See Also + -------- + mask_rows : Mask rows of a 2D array that contain masked values. + mask_cols : Mask cols of a 2D array that contain masked values. + masked_where : Mask where a condition is met. + + Notes + ----- + The input array's mask is modified by this function. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> ma.mask_rowcols(a) + masked_array( + data=[[0, --, 0], + [--, --, --], + [0, --, 0]], + mask=[[False, True, False], + [ True, True, True], + [False, True, False]], + fill_value=1) + + """ + a = array(a, subok=False) + if a.ndim != 2: + raise NotImplementedError("mask_rowcols works for 2D arrays only.") + m = getmask(a) + # Nothing is masked: return a + if m is nomask or not m.any(): + return a + maskedval = m.nonzero() + a._mask = a._mask.copy() + if not axis: + a[np.unique(maskedval[0])] = masked + if axis in [None, 1, -1]: + a[:, np.unique(maskedval[1])] = masked + return a + + def mask_rows(a, axis=np._NoValue): """ Mask rows of a 2D array that contain masked values. diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 5db01b74a..9a4b74997 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -377,6 +377,24 @@ class TestMaskedArray: assert_equal(s1, s2) assert_(x1[1:1].shape == (0,)) + def test_setitem_no_warning(self): + # Setitem shouldn't warn, because the assignment might be masked + # and warning for a masked assignment is weird (see gh-23000) + # (When the value is masked, otherwise a warning would be acceptable + # but is not given currently.) + x = np.ma.arange(60).reshape((6, 10)) + index = (slice(1, 5, 2), [7, 5]) + value = np.ma.masked_all((2, 2)) + value._data[...] = np.inf # not a valid integer... + x[index] = value + # The masked scalar is special cased, but test anyway (it's NaN): + x[...] = np.ma.masked + # Finally, a large value that cannot be cast to the float32 `x` + x = np.ma.arange(3., dtype=np.float32) + value = np.ma.array([2e234, 1, 1], mask=[True, False, False]) + x[...] = value + x[[0, 1, 2]] = value + @suppress_copy_mask_on_assignment def test_copy(self): # Tests of some subtle points of copying and sizing. @@ -3411,6 +3429,22 @@ class TestMaskedArrayMethods: assert_equal(a.ravel(order='C'), [1, 2, 3, 4]) assert_equal(a.ravel(order='F'), [1, 3, 2, 4]) + @pytest.mark.parametrize("order", "AKCF") + @pytest.mark.parametrize("data_order", "CF") + def test_ravel_order(self, order, data_order): + # Ravelling must ravel mask and data in the same order always to avoid + # misaligning the two in the ravel result. + arr = np.ones((5, 10), order=data_order) + arr[0, :] = 0 + mask = np.ones((10, 5), dtype=bool, order=data_order).T + mask[0, :] = False + x = array(arr, mask=mask) + assert x._data.flags.fnc != x._mask.flags.fnc + assert (x.filled(0) == 0).all() + raveled = x.ravel(order) + assert (raveled.filled(0) == 0).all() + + def test_reshape(self): # Tests reshape x = arange(4) diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index e59ba3656..d09a50fec 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -730,6 +730,47 @@ class TestCompressFunctions: assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[0, 0], [0, 0]], [[0, 0], [0, 1]]]) + c = dot(a, b, strict=True) + assert_equal(c.mask, + [[[[1, 1], [1, 1]], [[0, 0], [0, 1]]], + [[[0, 0], [0, 1]], [[0, 0], [0, 1]]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 1]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, + [[[[1, 0], [0, 0]], [[1, 0], [0, 0]]], + [[[1, 0], [0, 0]], [[1, 1], [1, 1]]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[1, 0], [0, 0]]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = 5. + c = dot(a, b, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(2), mask=[0, 1]) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[1, 0], [0, 0]]) def test_dot_returns_maskedarray(self): # See gh-6611 diff --git a/numpy/meson.build b/numpy/meson.build index b366b7b05..3c0adf6d0 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -10,7 +10,6 @@ endif # Platform detection is_windows = host_machine.system() == 'windows' is_mingw = is_windows and cc.get_id() == 'gcc' -is_msvc = is_windows and cc.get_id() == 'msvc' if is_windows # For mingw-w64, link statically against the UCRT. @@ -103,7 +102,6 @@ python_sources = [ 'conftest.py', 'ctypeslib.py', 'ctypeslib.pyi', - 'dual.py', 'exceptions.py', 'exceptions.pyi', 'matlib.py', diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 3bea91dd2..9730574cf 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -682,6 +682,28 @@ class ABCPolyBase(abc.ABC): degree : int Degree of the series, one less than the number of coefficients. + Examples + -------- + + Create a polynomial object for ``1 + 7*x + 4*x**2``: + + >>> poly = np.polynomial.Polynomial([1, 7, 4]) + >>> print(poly) + 1.0 + 7.0·x + 4.0·x² + >>> poly.degree() + 2 + + Note that this method does not check for non-zero coefficients. + You must trim the polynomial to remove any trailing zeroes: + + >>> poly = np.polynomial.Polynomial([1, 7, 0]) + >>> print(poly) + 1.0 + 7.0·x + 0.0·x² + >>> poly.degree() + 2 + >>> poly.trim().degree() + 1 + """ return len(self) - 1 @@ -887,7 +909,7 @@ class ABCPolyBase(abc.ABC): """Return the roots of the series polynomial. Compute the roots for the series. Note that the accuracy of the - roots decrease the further outside the domain they lie. + roots decreases the further outside the `domain` they lie. Returns ------- diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 1b19d00d9..a30d116c2 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -4327,7 +4327,7 @@ cdef class Generator: Raises ------ ValueError - If any value in ``alpha`` is less than or equal to zero + If any value in ``alpha`` is less than zero Notes ----- @@ -4406,8 +4406,8 @@ cdef class Generator: alpha_arr = <np.ndarray>np.PyArray_FROMANY( alpha, np.NPY_DOUBLE, 1, 1, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS) - if np.any(np.less_equal(alpha_arr, 0)): - raise ValueError('alpha <= 0') + if np.any(np.less(alpha_arr, 0)): + raise ValueError('alpha < 0') alpha_data = <double*>np.PyArray_DATA(alpha_arr) if size is None: diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index 8b911cb3a..3a2961098 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -812,6 +812,10 @@ class TestRandomDist: alpha = np.array([5.4e-01, -1.0e-16]) assert_raises(ValueError, random.dirichlet, alpha) + def test_dirichlet_zero_alpha(self): + y = random.default_rng().dirichlet([5, 9, 0, 8]) + assert_equal(y[2], 0) + def test_dirichlet_alpha_non_contiguous(self): a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) alpha = a[::2] diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 108ba0b74..e0681426e 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -248,7 +248,6 @@ PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ "distutils.numpy_distribution", "distutils.pathccompiler", "distutils.unixccompiler", - "dual", "f2py.auxfuncs", "f2py.capi_maps", "f2py.cb_rules", diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 19a1af9e2..6beacc5d7 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -150,7 +150,7 @@ A.argpartition([0]) A.diagonal() A.dot(1) -A.dot(1, out=B0) +A.dot(1, out=B2) A.nonzero() diff --git a/pyproject.toml b/pyproject.toml index 1b0e86023..903a99bca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -163,9 +163,9 @@ environment = { CFLAGS="-std=c99 -fno-strict-aliasing", LDFLAGS="-Wl,--strip-deb # https://github.com/multi-build/multibuild/blame/devel/README.rst#L541-L565 # for more info archs = "x86_64 arm64" -test-skip = "*_arm64 *_universal2:arm64" +test-skip = "*_universal2:arm64" # MACOS linker doesn't support stripping symbols -environment = { CFLAGS="-std=c99 -fno-strict-aliasing", OPENBLAS64_="/usr/local", NPY_USE_BLAS_ILP64="1", CC="clang", CXX = "clang++" } +environment = { CFLAGS="-std=c99 -fno-strict-aliasing", OPENBLAS64_="/usr/local", NPY_USE_BLAS_ILP64="1", CC="clang", CXX = "clang++", RUNNER_OS="macOS" } [tool.cibuildwheel.windows] environment = { OPENBLAS64_="openblas", OPENBLAS="", NPY_USE_BLAS_ILP64="1", CFLAGS="", LDFLAGS="" } @@ -12,7 +12,9 @@ import textwrap import warnings import builtins import re +import tempfile +from distutils.errors import CompileError # Python supported version checks. Keep right after stdlib imports to ensure we # get a sensible error for older Python versions @@ -184,45 +186,135 @@ class sdist_checked(cmdclass['sdist']): def get_build_overrides(): """ - Custom build commands to add `-std=c99` to compilation + Custom build commands to add std flags if required to compilation """ from numpy.distutils.command.build_clib import build_clib from numpy.distutils.command.build_ext import build_ext from numpy._utils import _pep440 - def _needs_gcc_c99_flag(obj): - if obj.compiler.compiler_type != 'unix': - return False + def try_compile(compiler, file, flags = [], verbose=False): + # To bypass trapping warnings by Travis CI + if getattr(compiler, 'compiler_type', '') == 'unix': + flags = ['-Werror'] + flags + bk_ver = getattr(compiler, 'verbose', False) + compiler.verbose = verbose + try: + compiler.compile([file], extra_postargs=flags) + return True, '' + except CompileError as e: + return False, str(e) + finally: + compiler.verbose = bk_ver + + def flags_is_required(compiler, is_cpp, flags, code): + if is_cpp: + compiler = compiler.cxx_compiler() + suf = '.cpp' + else: + suf = '.c' + with tempfile.TemporaryDirectory() as temp_dir: + tmp_file = os.path.join(temp_dir, "test" + suf) + with open(tmp_file, "w+") as f: + f.write(code) + # without specify any flags in case of the required + # standard already supported by default, then there's + # no need for passing the flags + comp = try_compile(compiler, tmp_file) + if not comp[0]: + comp = try_compile(compiler, tmp_file, flags) + if not comp[0]: + # rerun to verbose the error + try_compile(compiler, tmp_file, flags, True) + if is_cpp: + raise RuntimeError( + "Broken toolchain during testing C++ compiler. \n" + "A compiler with support for C++17 language " + "features is required.\n" + f"Triggered the following error: {comp[1]}." + ) + else: + raise RuntimeError( + "Broken toolchain during testing C compiler. \n" + "A compiler with support for C99 language " + "features is required.\n" + f"Triggered the following error: {comp[1]}." + ) + return True + return False - cc = obj.compiler.compiler[0] - if "gcc" not in cc: - return False - - # will print something like '4.2.1\n' - out = subprocess.run([cc, '-dumpversion'], - capture_output=True, text=True) - # -std=c99 is default from this version on - if _pep440.parse(out.stdout) >= _pep440.Version('5.0'): - return False - return True + def std_cxx_flags(cmd): + compiler = cmd.compiler + flags = getattr(compiler, '__np_cache_cpp_flags', None) + if flags is not None: + return flags + flags = dict( + msvc = ['/std:c++17'] + ).get(compiler.compiler_type, ['-std=c++17']) + # These flags are used to compile any C++ source within Numpy. + # They are chosen to have very few runtime dependencies. + extra_flags = dict( + # to update #def __cplusplus with enabled C++ version + msvc = ['/Zc:__cplusplus'] + ).get(compiler.compiler_type, [ + # The following flag is used to avoid emit any extra code + # from STL since extensions are build by C linker and + # without C++ runtime dependencies. + '-fno-threadsafe-statics', + '-D__STDC_VERSION__=0', # for compatibility with C headers + '-fno-exceptions', # no exception support + '-fno-rtti' # no runtime type information + ]) + if not flags_is_required(compiler, True, flags, textwrap.dedent(''' + #include <type_traits> + template<typename ...T> + constexpr bool test_fold = (... && std::is_const_v<T>); + int main() + { + if constexpr (test_fold<int, const int>) { + return 0; + } + else { + return -1; + } + } + ''')): + flags.clear() + flags += extra_flags + setattr(compiler, '__np_cache_cpp_flags', flags) + return flags + + def std_c_flags(cmd): + compiler = cmd.compiler + flags = getattr(compiler, '__np_cache_c_flags', None) + if flags is not None: + return flags + flags = dict( + msvc = [] + ).get(compiler.compiler_type, ['-std=c99']) + + if not flags_is_required(compiler, False, flags, textwrap.dedent(''' + inline static int test_inline() { return 0; } + int main(void) + { return test_inline(); } + ''')): + flags.clear() + + setattr(compiler, '__np_cache_c_flags', flags) + return flags class new_build_clib(build_clib): def build_a_library(self, build_info, lib_name, libraries): - from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS - if _needs_gcc_c99_flag(self): - build_info['extra_cflags'] = ['-std=c99'] - build_info['extra_cxxflags'] = NPY_CXX_FLAGS + build_info['extra_cflags'] = std_c_flags(self) + build_info['extra_cxxflags'] = std_cxx_flags(self) build_clib.build_a_library(self, build_info, lib_name, libraries) class new_build_ext(build_ext): def build_extension(self, ext): - if _needs_gcc_c99_flag(self): - if '-std=c99' not in ext.extra_compile_args: - ext.extra_compile_args.append('-std=c99') + ext.extra_c_compile_args += std_c_flags(self) + ext.extra_cxx_compile_args += std_cxx_flags(self) build_ext.build_extension(self, ext) return new_build_clib, new_build_ext - def generate_cython(): # Check Cython version from numpy._utils import _pep440 diff --git a/site.cfg.example b/site.cfg.example index 941917867..5d36922ea 100644 --- a/site.cfg.example +++ b/site.cfg.example @@ -225,9 +225,10 @@ # # UMFPACK is not used by NumPy. # -# https://www.cise.ufl.edu/research/sparse/umfpack/ -# https://www.cise.ufl.edu/research/sparse/amd/ +# https://github.com/DrTimothyAldenDavis/SuiteSparse/tree/dev/UMFPACK +# https://github.com/DrTimothyAldenDavis/SuiteSparse/tree/dev/AMD # https://scikit-umfpack.github.io/scikit-umfpack/ +# https://people.engr.tamu.edu/davis/suitesparse.html # #[amd] #libraries = amd @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# -# Example stub for running `python -m spin` -# -# Copy this into your project root. - -import os -import sys -import runpy - -sys.path.remove(os.path.abspath(os.path.dirname(sys.argv[0]))) -try: - runpy.run_module("spin", run_name="__main__") -except ImportError: - print("Cannot import spin; please install it using") - print() - print(" pip install spin") - print() - sys.exit(1) diff --git a/tools/ci/cirrus_macosx_arm64.yml b/tools/ci/cirrus_macosx_arm64.yml new file mode 100644 index 000000000..2343b807f --- /dev/null +++ b/tools/ci/cirrus_macosx_arm64.yml @@ -0,0 +1,55 @@ +modified_clone: &MODIFIED_CLONE + # makes sure that for a PR the CI runs against a merged main + clone_script: | + if [ -z "$CIRRUS_PR" ]; then + # if you're not in a PR then clone against the branch name that was pushed to. + git clone --recursive --branch=$CIRRUS_BRANCH https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR + git reset --hard $CIRRUS_CHANGE_IN_REPO + else + # it's a PR so clone the main branch then merge the changes from the PR + git clone https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR + git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR + + # CIRRUS_BASE_BRANCH will probably be `main` for the majority of the time + # However, if you do a PR against a maintenance branch we will want to + # merge the PR into the maintenance branch, not main + git checkout $CIRRUS_BASE_BRANCH + + # alpine git package needs default user.name and user.email to be set before a merge + git -c user.email="you@example.com" merge --no-commit pull/$CIRRUS_PR + git submodule update --init --recursive + fi + + +macos_arm64_test_task: + macos_instance: + image: ghcr.io/cirruslabs/macos-monterey-xcode:14 + + <<: *MODIFIED_CLONE + + pip_cache: + folder: ~/.cache/pip + + test_script: | + brew install python@3.10 + + export PATH=/opt/homebrew/opt/python@3.10/libexec/bin:$PATH + python --version + + RUNNER_OS="macOS" + CFLAGS="-std=c99 -fno-strict-aliasing" + SDKROOT=/Applications/Xcode-14.0.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.3.sdk + + # used for installing OpenBLAS/gfortran + bash tools/wheels/cibw_before_build.sh $PWD + + pushd ~/ + python -m venv numpy-dev + source numpy-dev/bin/activate + popd + + pip install -r build_requirements.txt + pip install pytest hypothesis typing_extensions + + spin build + spin test diff --git a/tools/ci/cirrus_general.yml b/tools/ci/cirrus_wheels.yml index c21bfd615..60512afab 100644 --- a/tools/ci/cirrus_general.yml +++ b/tools/ci/cirrus_wheels.yml @@ -1,6 +1,6 @@ build_and_store_wheels: &BUILD_AND_STORE_WHEELS install_cibuildwheel_script: - - python -m pip install cibuildwheel==2.12.0 + - python -m pip install cibuildwheel==2.12.1 cibuildwheel_script: - cibuildwheel wheels_artifacts: @@ -43,6 +43,50 @@ linux_aarch64_task: ###################################################################### +# Build macosx_arm64 natively +###################################################################### + +macosx_arm64_task: + macos_instance: + image: ghcr.io/cirruslabs/macos-monterey-xcode:14 + matrix: + - env: + CIRRUS_CLONE_SUBMODULES: true + CIBW_BUILD: cp39-* + - env: + CIRRUS_CLONE_SUBMODULES: true + CIBW_BUILD: cp310-* cp311-* + env: + PATH: /opt/homebrew/opt/python@3.10/bin:/usr/local/lib:/usr/local/include:$PATH + CIBW_ARCHS: arm64 + # Specifying CIBW_ENVIRONMENT_MACOS overrides pyproject.toml, so include + # all the settings from there, otherwise they're lost. + # SDKROOT needs to be set for repackaged conda-forge gfortran compilers + # supplied by isuruf. + # Find out SDKROOT via `xcrun --sdk macosx --show-sdk-path` + CIBW_ENVIRONMENT_MACOS: > + RUNNER_OS=macOS + SDKROOT=/Applications/Xcode-14.0.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.3.sdk + LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH + CFLAGS="-std=c99 -fno-strict-aliasing" + OPENBLAS64_="/usr/local" + NPY_USE_BLAS_ILP64="1" + + build_script: + - brew install python@3.10 + - ln -s python3 /opt/homebrew/opt/python@3.10/bin/python + - which python + # needed for submodules + - git submodule update --init + # need to obtain all the tags so setup.py can determine FULLVERSION + - git fetch origin + - uname -m + - python -c "import platform;print(platform.python_version());print(platform.system());print(platform.machine())" + - clang --version + <<: *BUILD_AND_STORE_WHEELS + + +###################################################################### # Upload all wheels ###################################################################### @@ -53,6 +97,7 @@ wheels_upload_task: # which bash, etc, may not be present. depends_on: - linux_aarch64 + - macosx_arm64 compute_engine_instance: image_project: cirrus-images image: family/docker-builder diff --git a/tools/gitpod/Dockerfile b/tools/gitpod/Dockerfile deleted file mode 100644 index 1ff9076cd..000000000 --- a/tools/gitpod/Dockerfile +++ /dev/null @@ -1,101 +0,0 @@ -# -# Dockerfile for NumPy development -# -# Usage: -# ------- -# -# To make a local build of the container, from the 'Docker-dev' directory: -# docker build --rm -f "Dockerfile" -t <build-tag> "." -# -# To use the container use the following command. It assumes that you are in -# the root folder of the NumPy git repository, making it available as -# /home/numpy in the container. Whatever changes you make to that directory -# are visible in the host and container. -# The docker image is retrieved from the NumPy dockerhub repository -# -# docker run --rm -it -v $(pwd):/home/numpy numpy/numpy-dev:<image-tag> -# -# By default the container will activate the conda environment numpy-dev -# which contains all the dependencies needed for NumPy development -# -# To build NumPy run: python setup.py build_ext --inplace -# -# To run the tests use: python runtests.py -# -# This image is based on: Ubuntu 20.04 (focal) -# https://hub.docker.com/_/ubuntu/?tab=tags&name=focal -# OS/ARCH: linux/amd64 -FROM gitpod/workspace-base:latest@sha256:770d3022db71512bdd1b7fdc06983f17cfc956342853e315d2d1c0ab39216a36 - -ARG MAMBAFORGE_VERSION="4.11.0-0" -ARG CONDA_ENV=numpy-dev - - -# ---- Configure environment ---- -ENV CONDA_DIR=/home/gitpod/mambaforge3 \ - SHELL=/bin/bash -ENV PATH=${CONDA_DIR}/bin:$PATH \ - WORKSPACE=/workspace/numpy - - -# ----------------------------------------------------------------------------- -# ---- Creating as root - note: make sure to change to gitpod in the end ---- -USER root - -# hadolint ignore=DL3008 -RUN apt-get update && \ - apt-get install -yq --no-install-recommends \ - ca-certificates \ - dirmngr \ - dvisvgm \ - gnupg \ - gpg-agent \ - texlive-latex-extra \ - vim && \ - # this needs to be done after installing dirmngr - apt-key adv --keyserver keyserver.ubuntu.com --recv-key 23F3D4EA75716059 && \ - apt-add-repository https://cli.github.com/packages && \ - apt-get install -yq --no-install-recommends \ - gh && \ - locale-gen en_US.UTF-8 && \ - apt-get clean && \ - rm -rf /var/cache/apt/* &&\ - rm -rf /var/lib/apt/lists/* &&\ - rm -rf /tmp/* - -# Allows this Dockerfile to activate conda environments -SHELL ["/bin/bash", "--login", "-o", "pipefail", "-c"] - -# ----------------------------------------------------------------------------- -# ---- Installing mamba ---- -RUN wget -q -O mambaforge3.sh \ - "https://github.com/conda-forge/miniforge/releases/download/$MAMBAFORGE_VERSION/Mambaforge-$MAMBAFORGE_VERSION-Linux-x86_64.sh" && \ - bash mambaforge3.sh -p ${CONDA_DIR} -b && \ - rm mambaforge3.sh - -# ----------------------------------------------------------------------------- -# ---- Copy needed files ---- -# basic workspace configurations -COPY ./tools/gitpod/workspace_config /usr/local/bin/workspace_config - -RUN chmod a+rx /usr/local/bin/workspace_config && \ - workspace_config - -# Copy conda environment file into the container - this needs to exists inside -# the container to create a conda environment from it -COPY environment.yml /tmp/environment.yml - -# ----------------------------------------------------------------------------- -# ---- Create conda environment ---- -# Install NumPy dependencies -RUN mamba env create -f /tmp/environment.yml && \ - conda activate ${CONDA_ENV} && \ - mamba install ccache -y && \ - # needed for docs rendering later on - python -m pip install --no-cache-dir sphinx-autobuild && \ - conda clean --all -f -y && \ - rm -rf /tmp/* - -# ----------------------------------------------------------------------------- -# Always make sure we are not root -USER gitpod
\ No newline at end of file diff --git a/tools/gitpod/gitpod.Dockerfile b/tools/gitpod/gitpod.Dockerfile deleted file mode 100644 index 7c369ac49..000000000 --- a/tools/gitpod/gitpod.Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# Doing a local shallow clone - keeps the container secure -# and much slimmer than using COPY directly or making a -# remote clone -ARG BASE_CONTAINER="numpy/numpy-dev:latest" -FROM gitpod/workspace-base:latest as clone - -COPY --chown=gitpod . /tmp/numpy_repo - -# the clone should be deep enough for versioneer to work -RUN git clone --shallow-since=2021-05-22 file:////tmp/numpy_repo /tmp/numpy - -# ----------------------------------------------------------------------------- -# Using the numpy-dev Docker image as a base -# This way, we ensure we have all the needed compilers and dependencies -# while reducing the build time -FROM ${BASE_CONTAINER} as build - -# ----------------------------------------------------------------------------- -USER root - -# ----------------------------------------------------------------------------- -# ---- ENV variables ---- -# ---- Directories needed ---- -ENV WORKSPACE=/workspace/numpy/ \ - CONDA_ENV=numpy-dev - -# Allows this Dockerfile to activate conda environments -SHELL ["/bin/bash", "--login", "-o", "pipefail", "-c"] - -# Copy over the shallow clone -COPY --from=clone --chown=gitpod /tmp/numpy ${WORKSPACE} - -# Everything happens in the /workspace/numpy directory -WORKDIR ${WORKSPACE} - -# Build numpy to populate the cache used by ccache -# Note, hadolint suggests consolidating the RUN commands. That info -# level complaint (DL3059) is currently ignored to avoid errors. -RUN git config --global --add safe.directory /workspace/numpy -RUN git submodule update --init --depth=1 -- numpy/core/src/umath/svml numpy/core/src/npysort/x86-simd-sort -RUN conda activate ${CONDA_ENV} && \ - python setup.py build_ext --inplace && \ - ccache -s - -# Gitpod will load the repository into /workspace/numpy. We remove the -# directory from the image to prevent conflicts -RUN rm -rf ${WORKSPACE} - -# ----------------------------------------------------------------------------- -# Always return to non privileged user -USER gitpod diff --git a/tools/gitpod/settings.json b/tools/gitpod/settings.json deleted file mode 100644 index 50296336d..000000000 --- a/tools/gitpod/settings.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "restructuredtext.updateOnTextChanged": "true", - "restructuredtext.updateDelay": 300, - "restructuredtext.linter.disabledLinters": ["doc8","rst-lint", "rstcheck"], - "python.defaultInterpreterPath": "/home/gitpod/mambaforge3/envs/numpy-dev/bin/python", - "esbonio.sphinx.buildDir": "${workspaceRoot}/doc/build/html", - "esbonio.sphinx.confDir": "" -}
\ No newline at end of file diff --git a/tools/gitpod/workspace_config b/tools/gitpod/workspace_config deleted file mode 100644 index aa859c9be..000000000 --- a/tools/gitpod/workspace_config +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash -# Basic configurations for the workspace - -set -e - -# gitpod/workspace-base needs at least one file here -touch /home/gitpod/.bashrc.d/empty - -# Add git aliases -git config --global alias.co checkout -git config --global alias.ci commit -git config --global alias.st status -git config --global alias.br branch -git config --global alias.hist "log --pretty=format:'%h %ad | %s%d [%an]' --graph --date=short" -git config --global alias.type 'cat-file -t' -git config --global alias.dump 'cat-file -p' - -# Enable basic vim defaults in ~/.vimrc -echo "filetype plugin indent on" >>~/.vimrc -echo "set colorcolumn=80" >>~/.vimrc -echo "set number" >>~/.vimrc -echo "syntax enable" >>~/.vimrc - -# Vanity custom bash prompt - makes it more legible -echo "PS1='\[\e]0;\u \w\a\]\[\033[01;36m\]\u\[\033[m\] > \[\033[38;5;141m\]\w\[\033[m\] \\$ '" >>~/.bashrc - -# Enable prompt color in the skeleton .bashrc -# hadolint ignore=SC2016 -sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc - -# .gitpod.yml is configured to install NumPy from /workspace/numpy -echo "export PYTHONPATH=${WORKSPACE}" >>~/.bashrc - -# make conda activate command available from /bin/bash (login and interactive) -if [[ ! -f "/etc/profile.d/conda.sh" ]]; then - ln -s ${CONDA_DIR}/etc/profile.d/conda.sh /etc/profile.d/conda.sh -fi -echo ". ${CONDA_DIR}/etc/profile.d/conda.sh" >>~/.bashrc -echo "conda activate numpy-dev" >>~/.bashrc - -# Enable prompt color in the skeleton .bashrc -# hadolint ignore=SC2016 -sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc - -# .gitpod.yml is configured to install numpy from /workspace/numpy -echo "export PYTHONPATH=/workspace/numpy" >>~/.bashrc - -# Set up ccache for compilers for this Dockerfile -# REF: https://github.com/conda-forge/compilers-feedstock/issues/31 -echo "conda activate numpy-dev" >>~/.startuprc -echo "export CC=\"ccache \$CC\"" >>~/.startuprc -echo "export CXX=\"ccache \$CXX\"" >>~/.startuprc -echo "export F77=\"ccache \$F77\"" >>~/.startuprc -echo "export F90=\"ccache \$F90\"" >>~/.startuprc -echo "export GFORTRAN=\"ccache \$GFORTRAN\"" >>~/.startuprc -echo "export FC=\"ccache \$FC\"" >>~/.startuprc -echo "source ~/.startuprc" >>~/.profile -echo "source ~/.startuprc" >>~/.bashrc diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 62d4e3f7e..493cceeae 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -15,13 +15,18 @@ fi # Install Openblas if [[ $RUNNER_OS == "Linux" || $RUNNER_OS == "macOS" ]] ; then basedir=$(python tools/openblas_support.py) - cp -r $basedir/lib/* /usr/local/lib - cp $basedir/include/* /usr/local/include if [[ $RUNNER_OS == "macOS" && $PLATFORM == "macosx-arm64" ]]; then + # /usr/local/lib doesn't exist on cirrus-ci runners + sudo mkdir -p /usr/local/lib /usr/local/include /usr/local/lib/cmake/openblas sudo mkdir -p /opt/arm64-builds/lib /opt/arm64-builds/include sudo chown -R $USER /opt/arm64-builds cp -r $basedir/lib/* /opt/arm64-builds/lib cp $basedir/include/* /opt/arm64-builds/include + sudo cp -r $basedir/lib/* /usr/local/lib + sudo cp $basedir/include/* /usr/local/include + else + cp -r $basedir/lib/* /usr/local/lib + cp $basedir/include/* /usr/local/include fi elif [[ $RUNNER_OS == "Windows" ]]; then PYTHONPATH=tools python -c "import openblas_support; openblas_support.make_init('numpy')" diff --git a/tools/wheels/gfortran_utils.sh b/tools/wheels/gfortran_utils.sh index 39357b3fe..9102ba127 100644 --- a/tools/wheels/gfortran_utils.sh +++ b/tools/wheels/gfortran_utils.sh @@ -123,7 +123,7 @@ if [ "$(uname)" == "Darwin" ]; then curl -L -O https://github.com/isuruf/gcc/releases/download/gcc-11.3.0-2/gfortran-darwin-${arch}-${type}.tar.gz case ${arch}-${type} in arm64-native) - export GFORTRAN_SHA=142290685240f4f86cdf359cb2030d586105d7e4 + export GFORTRAN_SHA=0d5c118e5966d0fb9e7ddb49321f63cac1397ce8 ;; arm64-cross) export GFORTRAN_SHA=527232845abc5af21f21ceacc46fb19c190fe804 @@ -148,10 +148,10 @@ if [ "$(uname)" == "Darwin" ]; then if [[ "${type}" == "native" ]]; then # Link these into /usr/local so that there's no need to add rpath or -L for f in libgfortran.dylib libgfortran.5.dylib libgcc_s.1.dylib libgcc_s.1.1.dylib libquadmath.dylib libquadmath.0.dylib; do - ln -sf /opt/gfortran-darwin-${arch}-${type}/lib/$f /usr/local/lib/$f + sudo ln -sf /opt/gfortran-darwin-${arch}-${type}/lib/$f /usr/local/lib/$f done # Add it to PATH - ln -sf /opt/gfortran-darwin-${arch}-${type}/bin/gfortran /usr/local/bin/gfortran + sudo ln -sf /opt/gfortran-darwin-${arch}-${type}/bin/gfortran /usr/local/bin/gfortran fi } |