summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/dependabot.yml9
-rw-r--r--.github/workflows/build_test.yml59
-rw-r--r--.github/workflows/codeql.yml8
-rw-r--r--.github/workflows/cygwin.yml2
-rw-r--r--.github/workflows/dependency-review.yml2
-rw-r--r--.github/workflows/docker.yml61
-rw-r--r--.github/workflows/emscripten.yml2
-rw-r--r--.github/workflows/gitpod.yml61
-rw-r--r--.github/workflows/linux_meson.yml6
-rw-r--r--.github/workflows/linux_musl.yml4
-rw-r--r--.github/workflows/scorecards.yml6
-rw-r--r--.github/workflows/wheels.yml6
-rw-r--r--.github/workflows/windows_meson.yml2
-rw-r--r--.gitpod.yml66
-rw-r--r--.hadolint.yaml7
-rw-r--r--benchmarks/README.rst2
-rw-r--r--benchmarks/benchmarks/__init__.py2
-rw-r--r--build_requirements.txt2
-rw-r--r--building_with_meson.md8
-rw-r--r--doc/neps/nep-0013-ufunc-overrides.rst11
-rw-r--r--doc/records.rst2
-rw-r--r--doc/release/upcoming_changes/21120.new_feature.rst21
-rw-r--r--doc/release/upcoming_changes/23240.compatibility.rst10
-rw-r--r--doc/release/upcoming_changes/23322.improvement.rst4
-rw-r--r--doc/release/upcoming_changes/23480.expired.rst1
-rw-r--r--doc/source/dev/development_environment.rst23
-rw-r--r--doc/source/dev/development_gitpod.rst271
-rw-r--r--doc/source/dev/gitpod-imgs/NumPy-github.pngbin5368 -> 0 bytes
-rw-r--r--doc/source/dev/gitpod-imgs/NumPy-gitpod-branches.pngbin138394 -> 0 bytes
-rw-r--r--doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.pngbin14185 -> 0 bytes
-rw-r--r--doc/source/dev/gitpod-imgs/gitpod-edit-permissions-gh.pngbin23308 -> 0 bytes
-rw-r--r--doc/source/dev/gitpod-imgs/gitpod-edit-permissions-repo.pngbin5505 -> 0 bytes
-rw-r--r--doc/source/dev/gitpod-imgs/gitpod-workspace.pngbin201069 -> 0 bytes
-rw-r--r--doc/source/dev/gitpod-imgs/installing-gitpod-io.pngbin33186 -> 0 bytes
-rw-r--r--doc/source/dev/gitpod-imgs/rst-rendering.pngbin228437 -> 0 bytes
-rw-r--r--doc/source/dev/gitpod-imgs/vscode-rst.pngbin16443 -> 0 bytes
-rw-r--r--doc/source/dev/gitpod-imgs/vscode-statusbar.pngbin4492 -> 0 bytes
-rw-r--r--doc/source/dev/howto_build_docs.rst23
-rw-r--r--doc/source/dev/index.rst1
-rw-r--r--doc/source/f2py/windows/index.rst2
-rw-r--r--doc/source/f2py/windows/pgi.rst2
-rw-r--r--doc/source/reference/arrays.classes.rst5
-rw-r--r--doc/source/reference/arrays.scalars.rst4
-rw-r--r--doc/source/reference/c-api/array.rst43
-rw-r--r--doc/source/reference/c-api/dtype.rst194
-rw-r--r--doc/source/reference/c-api/types-and-structures.rst4
-rw-r--r--doc/source/reference/random/extending.rst4
-rw-r--r--doc/source/reference/routines.dual.rst47
-rw-r--r--doc/source/reference/routines.ma.rst15
-rw-r--r--doc/source/reference/routines.rst1
-rw-r--r--doc/source/user/basics.creation.rst4
-rw-r--r--doc/source/user/quickstart.rst2
-rw-r--r--meson.build2
-rw-r--r--numpy/__init__.cython-30.pxd1
-rw-r--r--numpy/__init__.pxd1
-rw-r--r--numpy/__init__.py4
-rw-r--r--numpy/__init__.pyi14
-rw-r--r--numpy/array_api/_array_object.py14
-rw-r--r--numpy/core/_add_newdocs.py4
-rw-r--r--numpy/core/einsumfunc.py2
-rw-r--r--numpy/core/fromnumeric.py23
-rw-r--r--numpy/core/setup.py44
-rw-r--r--numpy/core/src/common/ucsnarrow.h2
-rw-r--r--numpy/core/src/multiarray/array_coercion.c9
-rw-r--r--numpy/core/src/multiarray/buffer.c14
-rw-r--r--numpy/core/src/multiarray/convert.c3
-rw-r--r--numpy/core/src/multiarray/ctors.c100
-rw-r--r--numpy/core/src/multiarray/ctors.h10
-rw-r--r--numpy/core/src/multiarray/methods.c14
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c142
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.h1
-rw-r--r--numpy/core/src/multiarray/number.c72
-rw-r--r--numpy/core/src/npymath/npy_math_private.h4
-rw-r--r--numpy/core/src/umath/override.c16
-rw-r--r--numpy/core/src/umath/override.h2
-rw-r--r--numpy/core/src/umath/ufunc_object.c9
-rw-r--r--numpy/core/tests/test_array_coercion.py18
-rw-r--r--numpy/core/tests/test_custom_dtypes.py6
-rw-r--r--numpy/core/tests/test_multiarray.py104
-rw-r--r--numpy/core/tests/test_overrides.py13
-rw-r--r--numpy/core/tests/test_ufunc.py11
-rw-r--r--numpy/core/tests/test_umath.py73
-rw-r--r--numpy/distutils/ccompiler_opt.py11
-rw-r--r--numpy/distutils/command/build_clib.py1
-rw-r--r--numpy/distutils/command/build_ext.py1
-rw-r--r--numpy/distutils/fcompiler/absoft.py4
-rw-r--r--numpy/dual.py83
-rw-r--r--numpy/exceptions.py2
-rwxr-xr-xnumpy/f2py/crackfortran.py2
-rw-r--r--numpy/f2py/tests/test_crackfortran.py49
-rw-r--r--numpy/lib/format.py14
-rw-r--r--numpy/lib/function_base.py67
-rw-r--r--numpy/lib/npyio.py10
-rw-r--r--numpy/lib/tests/test_function_base.py66
-rw-r--r--numpy/linalg/setup.py2
-rw-r--r--numpy/ma/core.py135
-rw-r--r--numpy/ma/extras.py92
-rw-r--r--numpy/ma/tests/test_extras.py41
-rw-r--r--numpy/meson.build1
-rw-r--r--numpy/tests/test_public_api.py1
-rw-r--r--numpy/typing/tests/data/pass/ndarray_misc.py2
-rwxr-xr-xsetup.py138
-rwxr-xr-xspin19
-rw-r--r--tools/gitpod/Dockerfile101
-rw-r--r--tools/gitpod/gitpod.Dockerfile51
-rw-r--r--tools/gitpod/settings.json8
-rw-r--r--tools/gitpod/workspace_config58
107 files changed, 1275 insertions, 1425 deletions
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 6bcbdbfcb..b59fe181d 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -4,8 +4,7 @@ updates:
directory: /
schedule:
interval: daily
-
- - package-ecosystem: docker
- directory: /tools/gitpod
- schedule:
- interval: daily
+ commit-message:
+ prefix: "MAINT"
+ labels:
+ - "03 - Maintenance"
diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml
index 26563ae3e..d43786250 100644
--- a/.github/workflows/build_test.yml
+++ b/.github/workflows/build_test.yml
@@ -1,6 +1,10 @@
name: Build_Test
on:
+ push:
+ branches:
+ # coverage comparison in the "full" step needs to run on main after merges
+ - main
pull_request:
branches:
- main
@@ -23,11 +27,11 @@ permissions:
jobs:
lint:
- if: "github.repository == 'numpy/numpy'"
+ if: github.repository == 'numpy/numpy' && github.event_name != 'push'
runs-on: ubuntu-latest
continue-on-error: true
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -47,7 +51,7 @@ jobs:
env:
WITHOUT_SIMD: 1
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -59,13 +63,14 @@ jobs:
basic:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "pypy3.9-v7.3.11"]
env:
EXPECT_CPU_FEATURES: "SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL"
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -78,8 +83,9 @@ jobs:
needs: [smoke_test]
# provides GCC 7, 8
runs-on: ubuntu-20.04
+ if: github.event_name != 'push'
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -114,10 +120,11 @@ jobs:
without_optimizations:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
WITHOUT_OPTIMIZATIONS: 1
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -129,10 +136,11 @@ jobs:
with_baseline_only:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
CPU_DISPATCH: "none"
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -144,10 +152,11 @@ jobs:
without_avx512:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
CPU_DISPATCH: "max -xop -fma4 -avx512f -avx512cd -avx512_knl -avx512_knm -avx512_skx -avx512_clx -avx512_cnl -avx512_icl"
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -159,10 +168,11 @@ jobs:
without_avx512_avx2_fma3:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
CPU_DISPATCH: "SSSE3 SSE41 POPCNT SSE42 AVX F16C"
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -174,10 +184,11 @@ jobs:
debug:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
USE_DEBUG: 1
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -189,10 +200,11 @@ jobs:
blas64:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
NPY_USE_BLAS_ILP64: 1
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -210,7 +222,7 @@ jobs:
RUN_COVERAGE: 1
INSTALL_PICKLE5: 1
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -222,6 +234,7 @@ jobs:
benchmark:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
PYTHONOPTIMIZE: 2
BLAS: None
@@ -231,7 +244,7 @@ jobs:
NPY_LAPACK_ORDER: MKL,OPENBLAS,ATLAS,LAPACK
USE_ASV: 1
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -243,12 +256,13 @@ jobs:
relaxed_strides_debug:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
CHECK_BLAS: 1
NPY_USE_BLAS_ILP64: 1
NPY_RELAXED_STRIDES_DEBUG: 1
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -260,10 +274,11 @@ jobs:
use_wheel:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
USE_WHEEL: 1
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -275,6 +290,7 @@ jobs:
numpy2_flag:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
# Test for numpy-2.0 feature-flagged behavior.
NPY_NUMPY_2_BEHAVIOR: 1
@@ -282,7 +298,7 @@ jobs:
# currently unfortunately
NPY_PROMOTION_STATE: legacy
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -294,13 +310,14 @@ jobs:
no_openblas:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
BLAS: None
LAPACK: None
ATLAS: None
DOWNLOAD_OPENBLAS: ''
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -312,10 +329,11 @@ jobs:
sdist:
needs: [smoke_test]
runs-on: ubuntu-latest
+ if: github.event_name != 'push'
env:
USE_SDIST: 1
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -328,8 +346,9 @@ jobs:
needs: [smoke_test]
# make sure this matches the base docker image below
runs-on: ubuntu-22.04
+ if: github.event_name != 'push'
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -384,7 +403,7 @@ jobs:
needs: [smoke_test]
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 925d6a1f5..7860716d6 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -41,11 +41,11 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
- uses: github/codeql-action/init@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.2.7
+ uses: github/codeql-action/init@04df1262e6247151b5ac09cd2c303ac36ad3f62b # v2.2.9
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -55,7 +55,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
- uses: github/codeql-action/autobuild@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.2.7
+ uses: github/codeql-action/autobuild@04df1262e6247151b5ac09cd2c303ac36ad3f62b # v2.2.9
# ℹ️ Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
@@ -68,6 +68,6 @@ jobs:
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.2.7
+ uses: github/codeql-action/analyze@04df1262e6247151b5ac09cd2c303ac36ad3f62b # v2.2.9
with:
category: "/language:${{matrix.language}}"
diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml
index a600447e7..397d457f9 100644
--- a/.github/workflows/cygwin.yml
+++ b/.github/workflows/cygwin.yml
@@ -20,7 +20,7 @@ jobs:
runs-on: windows-latest
if: "github.repository == 'numpy/numpy'"
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml
index 152196a86..c1aa5c0cb 100644
--- a/.github/workflows/dependency-review.yml
+++ b/.github/workflows/dependency-review.yml
@@ -15,6 +15,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
- name: 'Dependency Review'
uses: actions/dependency-review-action@f46c48ed6d4f1227fb2d9ea62bf6bcbed315589e # v3.0.4
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
deleted file mode 100644
index c8f95d676..000000000
--- a/.github/workflows/docker.yml
+++ /dev/null
@@ -1,61 +0,0 @@
-name: Build Base Docker Image
-
-on:
- push:
- branches:
- - main
- paths:
- - "environment.yml"
-
-permissions:
- contents: read # to fetch code (actions/checkout)
-
-jobs:
- build_docker:
- name: Build base Docker image
- runs-on: ubuntu-latest
- environment: numpy-dev
- if: "github.repository_owner == 'numpy'"
- steps:
- - name: Clone repository
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
- - name: Lint Docker
- uses: hadolint/hadolint-action@54c9adbab1582c2ef04b2016b760714a4bfde3cf # v3.1.0
- with:
- dockerfile: ./tools/gitpod/Dockerfile
- ignore: DL3059
- - name: Get refs
- shell: bash
- run: |
- export raw_branch=${GITHUB_REF#refs/heads/}
- echo "branch=${raw_branch//\//-}" >> $GITHUB_OUTPUT
- echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT
- echo "sha8=$(echo ${GITHUB_SHA} | cut -c1-8)" >> $GITHUB_OUTPUT
- id: getrefs
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2.5.0
- - name: Cache Docker layers
- uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
- with:
- path: /tmp/.buildx-cache
- key: ${{ runner.os }}-buildx-${{ github.sha }}
- restore-keys: ${{ runner.os }}-buildx-
- - name: Login to Docker Hub
- uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
- with:
- username: ${{ secrets.DOCKERHUB_USERNAME }}
- password: ${{ secrets.DOCKERHUB_TOKEN }}
- - name: Build and push
- id: docker_build
- uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4.0.0
- with:
- context: "."
- file: "./tools/gitpod/Dockerfile"
- push: ${{ github.event_name != 'pull_request' }}
- cache-from: type=local,src=/tmp/.buildx-cache
- cache-to: type=local,dest=/tmp/.buildx-cache
- tags: |
- numpy/numpy-dev:${{ steps.getrefs.outputs.date }}-${{ steps.getrefs.outputs.branch}}-${{ steps.getrefs.outputs.sha8 }}, numpy/numpy-dev:latest
- - name: Image digest
- # Return details of the image build: sha and shell
- run: echo ${{ steps.docker_build.outputs.digest }}
diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml
index 7ad7903a3..f4d565392 100644
--- a/.github/workflows/emscripten.yml
+++ b/.github/workflows/emscripten.yml
@@ -31,7 +31,7 @@ jobs:
NODE_VERSION: 18
steps:
- name: Checkout numpy
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: true
# versioneer.py requires the latest tag to be reachable. Here we
diff --git a/.github/workflows/gitpod.yml b/.github/workflows/gitpod.yml
deleted file mode 100644
index 737c3c07b..000000000
--- a/.github/workflows/gitpod.yml
+++ /dev/null
@@ -1,61 +0,0 @@
-name: Build Gitpod Docker image
-
-on:
- push:
- branches:
- - main
-
-permissions:
- contents: read # to fetch code (actions/checkout)
-
-jobs:
- build_gitpod:
- name: Build Gitpod Docker image
- runs-on: ubuntu-latest
- environment: numpy-dev
- if: "github.repository_owner == 'numpy'"
- steps:
- - name: Clone repository
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
- with:
- fetch-depth: 0
- - name: Lint Docker
- uses: hadolint/hadolint-action@54c9adbab1582c2ef04b2016b760714a4bfde3cf # v3.1.0
- with:
- dockerfile: ./tools/gitpod/gitpod.Dockerfile
- ignore: DL3059
- - name: Get refs
- shell: bash
- run: |
- export raw_branch=${GITHUB_REF#refs/heads/}
- echo "branch=${raw_branch//\//-}" >> $GITHUB_OUTPUT
- echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT
- echo "sha8=$(echo ${GITHUB_SHA} | cut -c1-8)" >> $GITHUB_OUTPUT
- id: getrefs
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2.5.0
- - name: Cache Docker layers
- uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
- with:
- path: /tmp/.buildx-cache
- key: ${{ runner.os }}-buildx-${{ github.sha }}
- restore-keys: ${{ runner.os }}-buildx-
- - name: Login to Docker Hub
- uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
- with:
- username: ${{ secrets.DOCKERHUB_USERNAME }}
- password: ${{ secrets.DOCKERHUB_TOKEN }}
- - name: Build and push
- id: docker_build
- uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4.0.0
- with:
- context: "."
- file: "./tools/gitpod/gitpod.Dockerfile"
- push: ${{ github.event_name != 'pull_request' }}
- cache-from: type=local,src=/tmp/.buildx-cache
- cache-to: type=local,dest=/tmp/.buildx-cache
- tags: |
- numpy/numpy-gitpod:${{ steps.getrefs.outputs.date }}-${{ steps.getrefs.outputs.branch}}-${{ steps.getrefs.outputs.sha8 }}, numpy/numpy-gitpod:latest
- - name: Image digest
- # Return details of the image build: sha and shell
- run: echo ${{ steps.docker_build.outputs.digest }}
diff --git a/.github/workflows/linux_meson.yml b/.github/workflows/linux_meson.yml
index 902c06997..16e2bc90c 100644
--- a/.github/workflows/linux_meson.yml
+++ b/.github/workflows/linux_meson.yml
@@ -25,7 +25,7 @@ jobs:
if: "github.repository == 'numpy/numpy'"
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
@@ -41,7 +41,7 @@ jobs:
env:
TERM: xterm-256color
run:
- ./spin build -- --werror
+ spin build -- --werror
- name: Check build-internal dependencies
run:
ninja -C build -t missingdeps
@@ -54,4 +54,4 @@ jobs:
TERM: xterm-256color
run: |
pip install pytest hypothesis typing_extensions
- ./spin test
+ spin test
diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml
index 54c8b7c2d..7d90c20ed 100644
--- a/.github/workflows/linux_musl.yml
+++ b/.github/workflows/linux_musl.yml
@@ -62,5 +62,5 @@ jobs:
pip install pytest hypothesis typing_extensions
# use meson to build and test
- ./spin build
- ./spin test
+ spin build
+ spin test
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 6924048be..03eabc365 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -25,12 +25,12 @@ jobs:
steps:
- name: "Checkout code"
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.1.0
+ uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.1.0
with:
persist-credentials: false
- name: "Run analysis"
- uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2
+ uses: ossf/scorecard-action@80e868c13c90f172d68d1f4501dee99e2479f7af # v2.1.3
with:
results_file: results.sarif
results_format: sarif
@@ -50,6 +50,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.1.27
+ uses: github/codeql-action/upload-sarif@04df1262e6247151b5ac09cd2c303ac36ad3f62b # v2.1.27
with:
sarif_file: results.sarif
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
index df31be80e..71e0c68fd 100644
--- a/.github/workflows/wheels.yml
+++ b/.github/workflows/wheels.yml
@@ -43,7 +43,7 @@ jobs:
message: ${{ steps.commit_message.outputs.message }}
steps:
- name: Checkout numpy
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
# Gets the correct commit message for pull request
with:
ref: ${{ github.event.pull_request.head.sha }}
@@ -92,7 +92,7 @@ jobs:
IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
steps:
- name: Checkout numpy
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: true
# versioneer.py requires the latest tag to be reachable. Here we
@@ -171,7 +171,7 @@ jobs:
# IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
steps:
- name: Checkout numpy
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: true
# versioneer.py requires the latest tag to be reachable. Here we
diff --git a/.github/workflows/windows_meson.yml b/.github/workflows/windows_meson.yml
index e0064dc19..aed6cd852 100644
--- a/.github/workflows/windows_meson.yml
+++ b/.github/workflows/windows_meson.yml
@@ -23,7 +23,7 @@ jobs:
# if: "github.repository == 'numpy/numpy'"
steps:
- name: Checkout
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0
with:
submodules: recursive
fetch-depth: 0
diff --git a/.gitpod.yml b/.gitpod.yml
deleted file mode 100644
index 42513de8d..000000000
--- a/.gitpod.yml
+++ /dev/null
@@ -1,66 +0,0 @@
-# Rebuilding NumPy on init - rather than on prebuild: this ensures
-# that even forks do have a usable freshly built NumPy
-# Might delegate this later to prebuild with Q2 improvements on gitpod
-# https://www.gitpod.io/docs/config-start-tasks/#configuring-the-terminal
-# -------------------------------------------------------------------------
-
-image: numpy/numpy-gitpod:latest
-tasks:
- - name: Prepare development environment
- init: |
- mkdir -p .vscode
- cp tools/gitpod/settings.json .vscode/settings.json
- rm -f /workspace/numpy/.git/shallow.lock
- conda activate numpy-dev
- git pull --unshallow # need to force this else the prebuild fails
- git fetch --tags
- git submodule update --init
- python setup.py build_ext --inplace
- echo "🛠 Completed rebuilding NumPy!! 🛠 "
- echo "📖 Building docs 📖 "
- cd doc
- make html
- echo "✨ Pre-build complete! You can close this terminal ✨ "
-
-# --------------------------------------------------------
-# exposing ports for liveserve
-ports:
- - port: 5500
- onOpen: notify
-
-# --------------------------------------------------------
-# some useful extensions to have
-vscode:
- extensions:
- - eamodio.gitlens
- - njpwerner.autodocstring
- - lextudio.restructuredtext
- - ritwickdey.liveserver
- - ms-python.python
- - yzhang.markdown-all-in-one
- - bungcip.better-toml
- - mhutchie.git-graph
-
-# --------------------------------------------------------
-# using prebuilds for the container - note: atm this only
-# works for the NumPy repo
-# With this configuration the prebuild will happen on push to master
-github:
- prebuilds:
- # enable for main/default branch
- master: true
- # enable for other branches (defaults to false)
- branches: false
- # enable for pull requests coming from this repo (defaults to true)
- pullRequests: false
- # enable for pull requests coming from forks (defaults to false)
- pullRequestsFromForks: false
- # add a check to pull requests (defaults to true)
- addCheck: false
- # add a "Review in Gitpod" button as a comment to pull requests (defaults to false)
- addComment: false
- # add a "Review in Gitpod" button to the pull request's description (defaults to false)
- addBadge: false
- # add a label once the prebuild is ready to pull requests (defaults to false)
- addLabel: false
- \ No newline at end of file
diff --git a/.hadolint.yaml b/.hadolint.yaml
deleted file mode 100644
index 0188ba2cf..000000000
--- a/.hadolint.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-ignored:
- - DL3006
- - DL3008
- - SC2016
- - DL3004
- - DL3007 \ No newline at end of file
diff --git a/benchmarks/README.rst b/benchmarks/README.rst
index 135527e4f..ef841a818 100644
--- a/benchmarks/README.rst
+++ b/benchmarks/README.rst
@@ -31,7 +31,7 @@ the command line and execute::
python runtests.py --bench
where ``--bench`` activates the benchmark suite instead of the
-test suite. This builds NumPy and runs all available benchmarks
+test suite. This builds NumPy and runs all available benchmarks
defined in ``benchmarks/``. (Note: this could take a while. Each
benchmark is run multiple times to measure the distribution in
execution times.)
diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py
index 7b9f1d3e6..35fc87eac 100644
--- a/benchmarks/benchmarks/__init__.py
+++ b/benchmarks/benchmarks/__init__.py
@@ -26,7 +26,7 @@ def dirty_lock(lock_name, lock_on_count=1):
lock_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "env", lock_name)
)
- # ASV load the 'benchmark_dir' to discovering the available benchmarks
+ # ASV loads the 'benchmark_dir' to discover the available benchmarks
# the issue here is ASV doesn't capture any strings from stdout or stderr
# during this stage so we escape it and lock on the second increment
try:
diff --git a/build_requirements.txt b/build_requirements.txt
index 3699320c8..075d454f2 100644
--- a/build_requirements.txt
+++ b/build_requirements.txt
@@ -2,4 +2,4 @@ meson-python>=0.10.0
Cython>=0.29.30,<3.0
wheel==0.38.1
ninja
-spin>=0.2
+spin==0.3
diff --git a/building_with_meson.md b/building_with_meson.md
index cf198e7d9..259498998 100644
--- a/building_with_meson.md
+++ b/building_with_meson.md
@@ -18,14 +18,14 @@ into a problem._
Then install spin:
- `python -m pip install spin`
-**Compile and install:** `./spin build`
+**Compile and install:** `spin build`
This builds in the `build/` directory, and installs into the `build-install` directory.
Then run the test suite or a shell via `spin`:
```
-./spin test
-./spin ipython
+spin test
+spin ipython
```
Alternatively, to use the package, add it to your `PYTHONPATH`:
@@ -67,5 +67,5 @@ Libs: -L${libdir} -lopenblas
Then build with:
```
-./spin build -- -Dpkg_config_path=${HOME}/lib/pkgconfig
+spin build -- -Dpkg_config_path=${HOME}/lib/pkgconfig
```
diff --git a/doc/neps/nep-0013-ufunc-overrides.rst b/doc/neps/nep-0013-ufunc-overrides.rst
index c132113db..d69af6924 100644
--- a/doc/neps/nep-0013-ufunc-overrides.rst
+++ b/doc/neps/nep-0013-ufunc-overrides.rst
@@ -20,6 +20,8 @@ NEP 13 — A mechanism for overriding Ufuncs
:Date: 2017-03-31
:Status: Final
+:Updated: 2023-02-19
+:Author: Roy Smart
Executive summary
=================
@@ -173,12 +175,12 @@ where in all current cases only a single output makes sense).
The function dispatch proceeds as follows:
-- If one of the input or output arguments implements
+- If one of the input, output, or ``where`` arguments implements
``__array_ufunc__``, it is executed instead of the ufunc.
- If more than one of the arguments implements ``__array_ufunc__``,
they are tried in the following order: subclasses before superclasses,
- inputs before outputs, otherwise left to right.
+ inputs before outputs, outputs before ``where``, otherwise left to right.
- The first ``__array_ufunc__`` method returning something else than
:obj:`NotImplemented` determines the return value of the Ufunc.
@@ -326,7 +328,10 @@ equivalent to::
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# Cannot handle items that have __array_ufunc__ (other than our own).
outputs = kwargs.get('out', ())
- for item in inputs + outputs:
+ objs = inputs + outputs
+ if "where" in kwargs:
+ objs = objs + (kwargs["where"], )
+ for item in objs:
if (hasattr(item, '__array_ufunc__') and
type(item).__array_ufunc__ is not ndarray.__array_ufunc__):
return NotImplemented
diff --git a/doc/records.rst b/doc/records.rst
index 3c0d55216..2e9b1251a 100644
--- a/doc/records.rst
+++ b/doc/records.rst
@@ -74,7 +74,7 @@ New possibilities for the "data-type"
Reference counting for ``PyArray_Descr *`` objects.
```````````````````````````````````````````````````
-Most functions that take ``PyArary_Descr *`` as arguments and return a
+Most functions that take ``PyArray_Descr *`` as arguments and return a
``PyObject *`` steal the reference unless otherwise noted in the code:
Functions that return ``PyArray_Descr *`` objects return a new
diff --git a/doc/release/upcoming_changes/21120.new_feature.rst b/doc/release/upcoming_changes/21120.new_feature.rst
new file mode 100644
index 000000000..7d4dbf743
--- /dev/null
+++ b/doc/release/upcoming_changes/21120.new_feature.rst
@@ -0,0 +1,21 @@
+Add support for inplace matrix multiplication
+----------------------------------------------
+It is now possible to perform inplace matrix multiplication
+via the ``@=`` operator.
+
+.. code-block:: python
+
+ >>> import numpy as np
+
+ >>> a = np.arange(6).reshape(3, 2)
+ >>> print(a)
+ [[0 1]
+ [2 3]
+ [4 5]]
+
+ >>> b = np.ones((2, 2), dtype=int)
+ >>> a @= b
+ >>> print(a)
+ [[1 1]
+ [5 5]
+ [9 9]]
diff --git a/doc/release/upcoming_changes/23240.compatibility.rst b/doc/release/upcoming_changes/23240.compatibility.rst
new file mode 100644
index 000000000..28536a020
--- /dev/null
+++ b/doc/release/upcoming_changes/23240.compatibility.rst
@@ -0,0 +1,10 @@
+Array-likes that define ``__array_ufunc__`` can now override ufuncs if used as ``where``
+----------------------------------------------------------------------------------------
+If the ``where`` keyword argument of a :class:`numpy.ufunc` is a subclass of
+:class:`numpy.ndarray` or is a duck type that defines
+:func:`numpy.class.__array_ufunc__` it can override the behavior of the ufunc
+using the same mechanism as the input and output arguments.
+Note that for this to work properly, the ``where.__array_ufunc__``
+implementation will have to unwrap the ``where`` argument to pass it into the
+default implementation of the ``ufunc`` or, for :class:`numpy.ndarray`
+subclasses before using ``super().__array_ufunc__``. \ No newline at end of file
diff --git a/doc/release/upcoming_changes/23322.improvement.rst b/doc/release/upcoming_changes/23322.improvement.rst
new file mode 100644
index 000000000..ce5ab8cf5
--- /dev/null
+++ b/doc/release/upcoming_changes/23322.improvement.rst
@@ -0,0 +1,4 @@
+`np.ma.dot()` now supports for non-2d arrays
+--------------------------------------------
+Previously `np.ma.dot()` only worked if `a` and `b` were both 2d.
+Now it works for non-2d arrays as well as `np.dot()`.
diff --git a/doc/release/upcoming_changes/23480.expired.rst b/doc/release/upcoming_changes/23480.expired.rst
new file mode 100644
index 000000000..164677b98
--- /dev/null
+++ b/doc/release/upcoming_changes/23480.expired.rst
@@ -0,0 +1 @@
+* The ``np.dual`` submodule has been removed.
diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst
index b5ee915c4..aedc489d6 100644
--- a/doc/source/dev/development_environment.rst
+++ b/doc/source/dev/development_environment.rst
@@ -18,9 +18,26 @@ sources needs some additional steps, which are explained below. For the rest
of this chapter we assume that you have set up your git repo as described in
:ref:`using-git`.
-.. note:: If you are having trouble building NumPy from source or setting up
- your local development environment, you can try
- to :ref:`build NumPy with Gitpod <development-gitpod>`.
+.. note::
+
+ If you are having trouble building NumPy from source or setting up your
+ local development environment, you can try to build NumPy with GitHub
+ Codespaces. It allows you to create the correct development environment
+ right in your browser, reducing the need to install local development
+ environments and deal with incompatible dependencies.
+
+ If you have good internet connectivity and want a temporary set-up, it is
+ often faster to work on NumPy in a Codespaces environment. For documentation
+ on how to get started with Codespaces, see
+ `the Codespaces docs <https://docs.github.com/en/codespaces>`__.
+ When creating a codespace for the ``numpy/numpy`` repository, the default
+ 2-core machine type works; 4-core will build and work a bit faster (but of
+ course at a cost of halving your number of free usage hours). Once your
+ codespace has started, you can run ``conda activate numpy-dev`` and your
+ development environment is completely set up - you can then follow the
+ relevant parts of the NumPy documentation to build, test, develop, write
+ docs, and contribute to NumPy.
+
.. _testing-builds:
diff --git a/doc/source/dev/development_gitpod.rst b/doc/source/dev/development_gitpod.rst
deleted file mode 100644
index 4e386867d..000000000
--- a/doc/source/dev/development_gitpod.rst
+++ /dev/null
@@ -1,271 +0,0 @@
-.. _development-gitpod:
-
-
-Using Gitpod for NumPy development
-==================================
-
-This section of the documentation will guide you through:
-
-* using GitPod for your NumPy development environment
-* creating a personal fork of the NumPy repository on GitHub
-* a quick tour of Gitpod and VSCode
-* working on the NumPy documentation in Gitpod
-
-Gitpod
-------
-
-`Gitpod`_ is an open-source platform for automated and ready-to-code
-development environments. It enables developers to describe their dev
-environment as code and start instant and fresh development environments for
-each new task directly from your browser. This reduces the need to install local
-development environments and deal with incompatible dependencies.
-
-Gitpod GitHub integration
--------------------------
-
-To be able to use Gitpod, you will need to have the Gitpod app installed on your
-GitHub account, so if
-you do not have an account yet, you will need to create one first.
-
-Head over to the `Gitpod`_ website and click on the **Continue with GitHub**
-button. You will be redirected to the GitHub authentication page.
-You will then be asked to install the `Gitpod GitHub app <https://github.com/marketplace/gitpod-io>`_.
-
-Make sure to select **All repositories** access option to avoid issues with
-permissions later on. Click on the green **Install** button
-
-.. image:: ./gitpod-imgs/installing-gitpod-io.png
- :alt: Gitpod repository access and installation screenshot
-
-This will install the necessary hooks for the integration.
-
-Forking the NumPy repository
-----------------------------
-
-The best way to work on NumPy as a contributor is by making a fork of the
-repository first.
-
-#. Browse to the `NumPy repository on GitHub`_ and `create your own fork`_.
-#. Browse to your fork. Your fork will have a URL like
- https://github.com/melissawm/NumPy, except with your GitHub username in place of ``melissawm``.
-
-Starting Gitpod
----------------
-Once you have authenticated to Gitpod through GitHub, you can install the
-`Gitpod browser extension <https://www.gitpod.io/docs/browser-extension>`_
-which will add a **Gitpod** button next to the **Code** button in the
-repository:
-
-.. image:: ./gitpod-imgs/NumPy-github.png
- :alt: NumPy repository with Gitpod button screenshot
-
-#. If you install the extension - you can click the **Gitpod** button to start
- a new workspace.
-
-#. Alternatively, if you do not want to install the browser extension, you can
- visit https://gitpod.io/#https://github.com/USERNAME/NumPy replacing
- ``USERNAME`` with your GitHub username.
-
-#. In both cases, this will open a new tab on your web browser and start
- building your development environment. Please note this can take a few
- minutes.
-
-#. Once the build is complete, you will be directed to your workspace,
- including the VSCode editor and all the dependencies you need to work on
- NumPy. The first time you start your workspace, you will notice that there
- might be some actions running. This will ensure that you have a development
- version of NumPy installed and that the docs are being pre-built for you.
-
-#. When your workspace is ready, you can :ref:`test the build<testing-builds>` by
- entering::
-
- $ python runtests.py -v
-
-``runtests.py`` is another script in the NumPy root directory. It runs a suite
-of tests that make sure NumPy is working as it should, and ``-v`` activates the
-``--verbose`` option to show all the test output.
-
-Quick workspace tour
---------------------
-Gitpod uses VSCode as the editor. If you have not used this editor before, you
-can check the Getting started `VSCode docs`_ to familiarize yourself with it.
-
-Your workspace will look similar to the image below:
-
-.. image:: ./gitpod-imgs/gitpod-workspace.png
- :alt: Gitpod workspace screenshot
-
-.. note:: By default, VSCode initializes with a light theme. You can change to
- a dark theme by with the keyboard shortcut :kbd:`Cmd-K Cmd-T` in Mac or
- :kbd:`Ctrl-K Ctrl-T` in Linux and Windows.
-
-We have marked some important sections in the editor:
-
-#. Your current Python interpreter - by default, this is ``numpy-dev`` and
- should be displayed in the status bar and on your terminal. You do not need
- to activate the conda environment as this will always be activated for you.
-#. Your current branch is always displayed in the status bar. You can also use
- this button to change or create branches.
-#. GitHub Pull Requests extension - you can use this to work with Pull Requests
- from your workspace.
-#. Marketplace extensions - we have added some essential extensions to the NumPy
- Gitpod. Still, you can also install other extensions or syntax highlighting
- themes for your user, and these will be preserved for you.
-#. Your workspace directory - by default, it is ``/workspace/numpy``. **Do not
- change this** as this is the only directory preserved in Gitpod.
-
-We have also pre-installed a few tools and VSCode extensions to help with the
-development experience:
-
-* `GitHub CLI <https://cli.github.com/>`_
-* `VSCode rst extension <https://marketplace.visualstudio.com/items?itemName=lextudio.restructuredtext>`_
-* `VSCode Live server extension <https://marketplace.visualstudio.com/items?itemName=ritwickdey.LiveServer>`_
-* `VSCode Gitlens extension <https://marketplace.visualstudio.com/items?itemName=eamodio.gitlens>`_
-* `VSCode autodocstrings extension <https://marketplace.visualstudio.com/items?itemName=njpwerner.autodocstring>`_
-* `VSCode Git Graph extension <https://marketplace.visualstudio.com/items?itemName=mhutchie.git-graph>`_
-
-Development workflow with Gitpod
---------------------------------
-The :ref:`development-workflow` section of this documentation contains
-information regarding the NumPy development workflow. Make sure to check this
-before working on your contributions.
-
-When using Gitpod, git is pre configured for you:
-
-#. You do not need to configure your git username, and email as this should be
- done for you as you authenticated through GitHub. You can check the git
- configuration with the command ``git config --list`` in your terminal.
-#. As you started your workspace from your own NumPy fork, you will by default
- have both ``upstream`` and ``origin`` added as remotes. You can verify this by
- typing ``git remote`` on your terminal or by clicking on the **branch name**
- on the status bar (see image below).
-
- .. image:: ./gitpod-imgs/NumPy-gitpod-branches.png
- :alt: Gitpod workspace branches plugin screenshot
-
-Rendering the NumPy documentation
----------------------------------
-You can find the detailed documentation on how rendering the documentation with
-Sphinx works in the :ref:`howto-build-docs` section.
-
-The documentation is pre-built during your workspace initialization. So once
-this task is completed, you have two main options to render the documentation
-in Gitpod.
-
-Option 1: Using Liveserve
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-#. View the documentation in ``NumPy/doc/build/html``. You can start with
- ``index.html`` and browse, or you can jump straight to the file you're
- interested in.
-#. To see the rendered version of a page, you can right-click on the ``.html``
- file and click on **Open with Live Serve**. Alternatively, you can open the
- file in the editor and click on the **Go live** button on the status bar.
-
- .. image:: ./gitpod-imgs/vscode-statusbar.png
- :alt: Gitpod workspace VSCode start live serve screenshot
-
-#. A simple browser will open to the right-hand side of the editor. We recommend
- closing it and click on the **Open in browser** button in the pop-up.
-#. To stop the server click on the **Port: 5500** button on the status bar.
-
-Option 2: Using the rst extension
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-A quick and easy way to see live changes in a ``.rst`` file as you work on it
-uses the rst extension with docutils.
-
-.. note:: This will generate a simple live preview of the document without the
- ``html`` theme, and some backlinks might not be added correctly. But it is an
- easy and lightweight way to get instant feedback on your work.
-
-#. Open any of the source documentation files located in ``doc/source`` in the
- editor.
-#. Open VSCode Command Palette with :kbd:`Cmd-Shift-P` in Mac or
- :kbd:`Ctrl-Shift-P` in Linux and Windows. Start typing "restructured"
- and choose either "Open preview" or "Open preview to the Side".
-
- .. image:: ./gitpod-imgs/vscode-rst.png
- :alt: Gitpod workspace VSCode open rst screenshot
-
-#. As you work on the document, you will see a live rendering of it on the editor.
-
- .. image:: ./gitpod-imgs/rst-rendering.png
- :alt: Gitpod workspace VSCode rst rendering screenshot
-
-If you want to see the final output with the ``html`` theme you will need to
-rebuild the docs with ``make html`` and use Live Serve as described in option 1.
-
-FAQ's and troubleshooting
--------------------------
-
-How long is my Gitpod workspace kept for?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Your stopped workspace will be kept for 14 days and deleted afterwards if you do
-not use them.
-
-Can I come back to a previous workspace?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Yes, let's say you stepped away for a while and you want to carry on working on
-your NumPy contributions. You need to visit https://gitpod.io/workspaces and
-click on the workspace you want to spin up again. All your changes will be there
-as you last left them.
-
-Can I install additional VSCode extensions?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Absolutely! Any extensions you installed will be installed in your own workspace
-and preserved.
-
-I registered on Gitpod but I still cannot see a ``Gitpod`` button in my repositories.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Head to https://gitpod.io/integrations and make sure you are logged in.
-Hover over GitHub and click on the three buttons that appear on the right.
-Click on edit permissions and make sure you have ``user:email``,
-``read:user``, and ``public_repo`` checked. Click on **Update Permissions**
-and confirm the changes in the GitHub application page.
-
-.. image:: ./gitpod-imgs/gitpod-edit-permissions-gh.png
- :alt: Gitpod integrations - edit GH permissions screenshot
-
-How long does my workspace stay active if I'm not using it?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If you keep your workspace open in a browser tab but don't interact with it,
-it will shut down after 30 minutes. If you close the browser tab, it will
-shut down after 3 minutes.
-
-My terminal is blank - there is no cursor and it's completely unresponsive
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Unfortunately this is a known-issue on Gitpod's side. You can sort this
-issue in two ways:
-
-#. Create a new Gitpod workspace altogether.
-#. Head to your `Gitpod dashboard <https://gitpod.io/workspaces>`_ and locate
- the running workspace. Hover on it and click on the **three dots menu**
- and then click on **Stop**. When the workspace is completely stopped you
- can click on its name to restart it again.
-
-.. image:: ./gitpod-imgs/gitpod-dashboard-stop.png
- :alt: Gitpod dashboard and workspace menu screenshot
-
-I authenticated through GitHub but I still cannot commit to the repository through Gitpod.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Head to https://gitpod.io/integrations and make sure you are logged in.
-Hover over GitHub and click on the three buttons that appear on the right.
-Click on edit permissions and make sure you have ``public_repo`` checked.
-Click on **Update Permissions** and confirm the changes in the
-GitHub application page.
-
-.. image:: ./gitpod-imgs/gitpod-edit-permissions-repo.png
- :alt: Gitpod integrations - edit GH repository permissions screenshot
-
-.. _Gitpod: https://www.gitpod.io/
-.. _NumPy repository on GitHub: https://github.com/NumPy/NumPy
-.. _create your own fork: https://help.github.com/en/articles/fork-a-repo
-.. _VSCode docs: https://code.visualstudio.com/docs/getstarted/tips-and-tricks
diff --git a/doc/source/dev/gitpod-imgs/NumPy-github.png b/doc/source/dev/gitpod-imgs/NumPy-github.png
deleted file mode 100644
index 010b0fc5e..000000000
--- a/doc/source/dev/gitpod-imgs/NumPy-github.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/dev/gitpod-imgs/NumPy-gitpod-branches.png b/doc/source/dev/gitpod-imgs/NumPy-gitpod-branches.png
deleted file mode 100644
index 3ee6c5f20..000000000
--- a/doc/source/dev/gitpod-imgs/NumPy-gitpod-branches.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.png b/doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.png
deleted file mode 100644
index 40f137745..000000000
--- a/doc/source/dev/gitpod-imgs/gitpod-dashboard-stop.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-gh.png b/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-gh.png
deleted file mode 100644
index 8955e907a..000000000
--- a/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-gh.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-repo.png b/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-repo.png
deleted file mode 100644
index 8bfaff81c..000000000
--- a/doc/source/dev/gitpod-imgs/gitpod-edit-permissions-repo.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/dev/gitpod-imgs/gitpod-workspace.png b/doc/source/dev/gitpod-imgs/gitpod-workspace.png
deleted file mode 100644
index a65c9bd7e..000000000
--- a/doc/source/dev/gitpod-imgs/gitpod-workspace.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/dev/gitpod-imgs/installing-gitpod-io.png b/doc/source/dev/gitpod-imgs/installing-gitpod-io.png
deleted file mode 100644
index 97319a729..000000000
--- a/doc/source/dev/gitpod-imgs/installing-gitpod-io.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/dev/gitpod-imgs/rst-rendering.png b/doc/source/dev/gitpod-imgs/rst-rendering.png
deleted file mode 100644
index 41cc305f3..000000000
--- a/doc/source/dev/gitpod-imgs/rst-rendering.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/dev/gitpod-imgs/vscode-rst.png b/doc/source/dev/gitpod-imgs/vscode-rst.png
deleted file mode 100644
index 5b574c115..000000000
--- a/doc/source/dev/gitpod-imgs/vscode-rst.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/dev/gitpod-imgs/vscode-statusbar.png b/doc/source/dev/gitpod-imgs/vscode-statusbar.png
deleted file mode 100644
index 3febbcee0..000000000
--- a/doc/source/dev/gitpod-imgs/vscode-statusbar.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/dev/howto_build_docs.rst b/doc/source/dev/howto_build_docs.rst
index a46688d7f..b3d2e3055 100644
--- a/doc/source/dev/howto_build_docs.rst
+++ b/doc/source/dev/howto_build_docs.rst
@@ -14,22 +14,13 @@ in several different formats.
Development environments
========================
-Before proceeding further it should be noted that the documentation is built with the ``make`` tool,
-which is not natively available on Windows. MacOS or Linux users can jump
-to :ref:`how-todoc.prerequisites`. It is recommended for Windows users to set up their development
-environment on :ref:`Gitpod <development-gitpod>` or `Windows Subsystem
-for Linux (WSL) <https://docs.microsoft.com/en-us/windows/wsl/install-win10>`_. WSL is a good option
-for a persistent local set-up.
-
-Gitpod
-~~~~~~
-Gitpod is an open-source platform that automatically creates the correct development environment right
-in your browser, reducing the need to install local development environments and deal with
-incompatible dependencies.
-
-If you have good internet connectivity and want a temporary set-up,
-it is often faster to build with Gitpod. Here are the in-depth instructions for
-:ref:`building NumPy with Gitpod <development-gitpod>`.
+Before proceeding further it should be noted that the documentation is built
+with the ``make`` tool, which is not natively available on Windows. MacOS or
+Linux users can jump to :ref:`how-todoc.prerequisites`. It is recommended for
+Windows users to set up their development environment on
+GitHub Codespaces (see :ref:`recommended-development-setup`) or
+`Windows Subsystem for Linux (WSL) <https://learn.microsoft.com/en-us/windows/wsl/install>`_.
+WSL is a good option for a persistent local set-up.
.. _how-todoc.prerequisites:
diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst
index bd3595741..22ab55b31 100644
--- a/doc/source/dev/index.rst
+++ b/doc/source/dev/index.rst
@@ -258,7 +258,6 @@ The rest of the story
Git Basics <gitwash/index>
development_environment
- development_gitpod
howto_build_docs
development_workflow
development_advanced_debugging
diff --git a/doc/source/f2py/windows/index.rst b/doc/source/f2py/windows/index.rst
index c1e6b4128..980346667 100644
--- a/doc/source/f2py/windows/index.rst
+++ b/doc/source/f2py/windows/index.rst
@@ -56,7 +56,7 @@ PGI Compilers (commercial)
Windows support`_.
Cygwin (FOSS)
- Can also be used for ``gfortran``. Howeve, the POSIX API compatibility layer provided by
+ Can also be used for ``gfortran``. However, the POSIX API compatibility layer provided by
Cygwin is meant to compile UNIX software on Windows, instead of building
native Windows programs. This means cross compilation is required.
diff --git a/doc/source/f2py/windows/pgi.rst b/doc/source/f2py/windows/pgi.rst
index 644259abe..28e25f016 100644
--- a/doc/source/f2py/windows/pgi.rst
+++ b/doc/source/f2py/windows/pgi.rst
@@ -22,7 +22,5 @@ as classic Flang requires a custom LLVM and compilation from sources.
As of 29-01-2022, `PGI compiler toolchains`_ have been superseded by the Nvidia
HPC SDK, with no `native Windows support`_.
-However,
-
.. _PGI compiler toolchains: https://www.pgroup.com/index.html
.. _native Windows support: https://developer.nvidia.com/nvidia-hpc-sdk-downloads#collapseFour
diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst
index 2cce595e0..34da83670 100644
--- a/doc/source/reference/arrays.classes.rst
+++ b/doc/source/reference/arrays.classes.rst
@@ -71,10 +71,11 @@ NumPy provides several hooks that classes can customize:
The method should return either the result of the operation, or
:obj:`NotImplemented` if the operation requested is not implemented.
- If one of the input or output arguments has a :func:`__array_ufunc__`
+ If one of the input, output, or ``where`` arguments has a :func:`__array_ufunc__`
method, it is executed *instead* of the ufunc. If more than one of the
arguments implements :func:`__array_ufunc__`, they are tried in the
- order: subclasses before superclasses, inputs before outputs, otherwise
+ order: subclasses before superclasses, inputs before outputs,
+ outputs before ``where``, otherwise
left to right. The first routine returning something other than
:obj:`NotImplemented` determines the result. If all of the
:func:`__array_ufunc__` operations return :obj:`NotImplemented`, a
diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst
index 30d7be9f9..9a592984c 100644
--- a/doc/source/reference/arrays.scalars.rst
+++ b/doc/source/reference/arrays.scalars.rst
@@ -293,6 +293,10 @@ elements the data type consists of.)
:members: __init__
:exclude-members: __init__
+.. autoclass:: numpy.character
+ :members: __init__
+ :exclude-members: __init__
+
.. autoclass:: numpy.bytes_
:members: __init__
:exclude-members: __init__
diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst
index 6651a4760..ed6d6fb6b 100644
--- a/doc/source/reference/c-api/array.rst
+++ b/doc/source/reference/c-api/array.rst
@@ -423,7 +423,7 @@ From other objects
:c:data:`NPY_ARRAY_FORCECAST` is present in ``flags``,
this call will generate an error if the data
type cannot be safely obtained from the object. If you want to use
- ``NULL`` for the *dtype* and ensure the array is notswapped then
+ ``NULL`` for the *dtype* and ensure the array is not swapped then
use :c:func:`PyArray_CheckFromAny`. A value of 0 for either of the
depth parameters causes the parameter to be ignored. Any of the
following array flags can be added (*e.g.* using \|) to get the
@@ -548,22 +548,6 @@ From other objects
:c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
:c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`
-.. c:function:: int PyArray_GetArrayParamsFromObject( \
- PyObject* op, PyArray_Descr* requested_dtype, npy_bool writeable, \
- PyArray_Descr** out_dtype, int* out_ndim, npy_intp* out_dims, \
- PyArrayObject** out_arr, PyObject* context)
-
- .. deprecated:: NumPy 1.19
-
- Unless NumPy is made aware of an issue with this, this function
- is scheduled for rapid removal without replacement.
-
- .. versionchanged:: NumPy 1.19
-
- `context` is never used. Its use results in an error.
-
- .. versionadded:: 1.6
-
.. c:function:: PyObject* PyArray_CheckFromAny( \
PyObject* op, PyArray_Descr* dtype, int min_depth, int max_depth, \
int requirements, PyObject* context)
@@ -1202,17 +1186,6 @@ Converting data types
return value is the enumerated typenumber that represents the
data-type that *op* should have.
-.. c:function:: void PyArray_ArrayType( \
- PyObject* op, PyArray_Descr* mintype, PyArray_Descr* outtype)
-
- This function is superseded by :c:func:`PyArray_ResultType`.
-
- This function works similarly to :c:func:`PyArray_ObjectType` (...)
- except it handles flexible arrays. The *mintype* argument can have
- an itemsize member and the *outtype* argument will have an
- itemsize member at least as big but perhaps bigger depending on
- the object *op*.
-
.. c:function:: PyArrayObject** PyArray_ConvertToCommonType( \
PyObject* op, int* n)
@@ -1490,7 +1463,7 @@ of the constant names is deprecated in 1.7.
:c:func:`PyArray_FromAny` and a copy had to be made of some other
array (and the user asked for this flag to be set in such a
situation). The base attribute then points to the "misbehaved"
- array (which is set read_only). :c:func`PyArray_ResolveWritebackIfCopy`
+ array (which is set read_only). :c:func:`PyArray_ResolveWritebackIfCopy`
will copy its contents back to the "misbehaved"
array (casting if necessary) and will reset the "misbehaved" array
to :c:data:`NPY_ARRAY_WRITEABLE`. If the "misbehaved" array was not
@@ -3619,18 +3592,6 @@ Miscellaneous Macros
error when you are finished with ``obj``, just before ``Py_DECREF(obj)``.
It may be called multiple times, or with ``NULL`` input.
-.. c:function:: void PyArray_XDECREF_ERR(PyObject* obj)
-
- Deprecated in 1.14, use :c:func:`PyArray_DiscardWritebackIfCopy`
- followed by ``Py_XDECREF``
-
- DECREF's an array object which may have the
- :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`
- flag set without causing the contents to be copied back into the
- original array. Resets the :c:data:`NPY_ARRAY_WRITEABLE` flag on the base
- object. This is useful for recovering from an error condition when
- writeback semantics are used, but will lead to wrong results.
-
Enumerated Types
~~~~~~~~~~~~~~~~
diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst
index 642f62749..bdd5a6f55 100644
--- a/doc/source/reference/c-api/dtype.rst
+++ b/doc/source/reference/c-api/dtype.rst
@@ -25,161 +25,161 @@ select the precision desired.
Enumerated Types
----------------
-.. c:enumerator:: NPY_TYPES
+.. c:enum:: NPY_TYPES
-There is a list of enumerated types defined providing the basic 24
-data types plus some useful generic names. Whenever the code requires
-a type number, one of these enumerated types is requested. The types
-are all called ``NPY_{NAME}``:
+ There is a list of enumerated types defined providing the basic 24
+ data types plus some useful generic names. Whenever the code requires
+ a type number, one of these enumerated types is requested. The types
+ are all called ``NPY_{NAME}``:
-.. c:enumerator:: NPY_BOOL
+ .. c:enumerator:: NPY_BOOL
- The enumeration value for the boolean type, stored as one byte.
- It may only be set to the values 0 and 1.
+ The enumeration value for the boolean type, stored as one byte.
+ It may only be set to the values 0 and 1.
-.. c:enumerator:: NPY_BYTE
-.. c:enumerator:: NPY_INT8
+ .. c:enumerator:: NPY_BYTE
+ .. c:enumerator:: NPY_INT8
- The enumeration value for an 8-bit/1-byte signed integer.
+ The enumeration value for an 8-bit/1-byte signed integer.
-.. c:enumerator:: NPY_SHORT
-.. c:enumerator:: NPY_INT16
+ .. c:enumerator:: NPY_SHORT
+ .. c:enumerator:: NPY_INT16
- The enumeration value for a 16-bit/2-byte signed integer.
+ The enumeration value for a 16-bit/2-byte signed integer.
-.. c:enumerator:: NPY_INT
-.. c:enumerator:: NPY_INT32
+ .. c:enumerator:: NPY_INT
+ .. c:enumerator:: NPY_INT32
- The enumeration value for a 32-bit/4-byte signed integer.
+ The enumeration value for a 32-bit/4-byte signed integer.
-.. c:enumerator:: NPY_LONG
+ .. c:enumerator:: NPY_LONG
- Equivalent to either NPY_INT or NPY_LONGLONG, depending on the
- platform.
+ Equivalent to either NPY_INT or NPY_LONGLONG, depending on the
+ platform.
-.. c:enumerator:: NPY_LONGLONG
-.. c:enumerator:: NPY_INT64
+ .. c:enumerator:: NPY_LONGLONG
+ .. c:enumerator:: NPY_INT64
- The enumeration value for a 64-bit/8-byte signed integer.
+ The enumeration value for a 64-bit/8-byte signed integer.
-.. c:enumerator:: NPY_UBYTE
-.. c:enumerator:: NPY_UINT8
+ .. c:enumerator:: NPY_UBYTE
+ .. c:enumerator:: NPY_UINT8
- The enumeration value for an 8-bit/1-byte unsigned integer.
+ The enumeration value for an 8-bit/1-byte unsigned integer.
-.. c:enumerator:: NPY_USHORT
-.. c:enumerator:: NPY_UINT16
+ .. c:enumerator:: NPY_USHORT
+ .. c:enumerator:: NPY_UINT16
- The enumeration value for a 16-bit/2-byte unsigned integer.
+ The enumeration value for a 16-bit/2-byte unsigned integer.
-.. c:enumerator:: NPY_UINT
-.. c:enumerator:: NPY_UINT32
+ .. c:enumerator:: NPY_UINT
+ .. c:enumerator:: NPY_UINT32
- The enumeration value for a 32-bit/4-byte unsigned integer.
+ The enumeration value for a 32-bit/4-byte unsigned integer.
-.. c:enumerator:: NPY_ULONG
+ .. c:enumerator:: NPY_ULONG
- Equivalent to either NPY_UINT or NPY_ULONGLONG, depending on the
- platform.
+ Equivalent to either NPY_UINT or NPY_ULONGLONG, depending on the
+ platform.
-.. c:enumerator:: NPY_ULONGLONG
-.. c:enumerator:: NPY_UINT64
+ .. c:enumerator:: NPY_ULONGLONG
+ .. c:enumerator:: NPY_UINT64
- The enumeration value for a 64-bit/8-byte unsigned integer.
+ The enumeration value for a 64-bit/8-byte unsigned integer.
-.. c:enumerator:: NPY_HALF
-.. c:enumerator:: NPY_FLOAT16
+ .. c:enumerator:: NPY_HALF
+ .. c:enumerator:: NPY_FLOAT16
- The enumeration value for a 16-bit/2-byte IEEE 754-2008 compatible floating
- point type.
+ The enumeration value for a 16-bit/2-byte IEEE 754-2008 compatible floating
+ point type.
-.. c:enumerator:: NPY_FLOAT
-.. c:enumerator:: NPY_FLOAT32
+ .. c:enumerator:: NPY_FLOAT
+ .. c:enumerator:: NPY_FLOAT32
- The enumeration value for a 32-bit/4-byte IEEE 754 compatible floating
- point type.
+ The enumeration value for a 32-bit/4-byte IEEE 754 compatible floating
+ point type.
-.. c:enumerator:: NPY_DOUBLE
-.. c:enumerator:: NPY_FLOAT64
+ .. c:enumerator:: NPY_DOUBLE
+ .. c:enumerator:: NPY_FLOAT64
- The enumeration value for a 64-bit/8-byte IEEE 754 compatible floating
- point type.
+ The enumeration value for a 64-bit/8-byte IEEE 754 compatible floating
+ point type.
-.. c:enumerator:: NPY_LONGDOUBLE
+ .. c:enumerator:: NPY_LONGDOUBLE
- The enumeration value for a platform-specific floating point type which is
- at least as large as NPY_DOUBLE, but larger on many platforms.
+ The enumeration value for a platform-specific floating point type which is
+ at least as large as NPY_DOUBLE, but larger on many platforms.
-.. c:enumerator:: NPY_CFLOAT
-.. c:enumerator:: NPY_COMPLEX64
+ .. c:enumerator:: NPY_CFLOAT
+ .. c:enumerator:: NPY_COMPLEX64
- The enumeration value for a 64-bit/8-byte complex type made up of
- two NPY_FLOAT values.
+ The enumeration value for a 64-bit/8-byte complex type made up of
+ two NPY_FLOAT values.
-.. c:enumerator:: NPY_CDOUBLE
-.. c:enumerator:: NPY_COMPLEX128
+ .. c:enumerator:: NPY_CDOUBLE
+ .. c:enumerator:: NPY_COMPLEX128
- The enumeration value for a 128-bit/16-byte complex type made up of
- two NPY_DOUBLE values.
+ The enumeration value for a 128-bit/16-byte complex type made up of
+ two NPY_DOUBLE values.
-.. c:enumerator:: NPY_CLONGDOUBLE
+ .. c:enumerator:: NPY_CLONGDOUBLE
- The enumeration value for a platform-specific complex floating point
- type which is made up of two NPY_LONGDOUBLE values.
+ The enumeration value for a platform-specific complex floating point
+ type which is made up of two NPY_LONGDOUBLE values.
-.. c:enumerator:: NPY_DATETIME
+ .. c:enumerator:: NPY_DATETIME
- The enumeration value for a data type which holds dates or datetimes with
- a precision based on selectable date or time units.
+ The enumeration value for a data type which holds dates or datetimes with
+ a precision based on selectable date or time units.
-.. c:enumerator:: NPY_TIMEDELTA
+ .. c:enumerator:: NPY_TIMEDELTA
- The enumeration value for a data type which holds lengths of times in
- integers of selectable date or time units.
+ The enumeration value for a data type which holds lengths of times in
+ integers of selectable date or time units.
-.. c:enumerator:: NPY_STRING
+ .. c:enumerator:: NPY_STRING
- The enumeration value for ASCII strings of a selectable size. The
- strings have a fixed maximum size within a given array.
+ The enumeration value for ASCII strings of a selectable size. The
+ strings have a fixed maximum size within a given array.
-.. c:enumerator:: NPY_UNICODE
+ .. c:enumerator:: NPY_UNICODE
- The enumeration value for UCS4 strings of a selectable size. The
- strings have a fixed maximum size within a given array.
+ The enumeration value for UCS4 strings of a selectable size. The
+ strings have a fixed maximum size within a given array.
-.. c:enumerator:: NPY_OBJECT
+ .. c:enumerator:: NPY_OBJECT
- The enumeration value for references to arbitrary Python objects.
+ The enumeration value for references to arbitrary Python objects.
-.. c:enumerator:: NPY_VOID
+ .. c:enumerator:: NPY_VOID
- Primarily used to hold struct dtypes, but can contain arbitrary
- binary data.
+ Primarily used to hold struct dtypes, but can contain arbitrary
+ binary data.
-Some useful aliases of the above types are
+ Some useful aliases of the above types are
-.. c:enumerator:: NPY_INTP
+ .. c:enumerator:: NPY_INTP
- The enumeration value for a signed integer type which is the same
- size as a (void \*) pointer. This is the type used by all
- arrays of indices.
+ The enumeration value for a signed integer type which is the same
+ size as a (void \*) pointer. This is the type used by all
+ arrays of indices.
-.. c:enumerator:: NPY_UINTP
+ .. c:enumerator:: NPY_UINTP
- The enumeration value for an unsigned integer type which is the
- same size as a (void \*) pointer.
+ The enumeration value for an unsigned integer type which is the
+ same size as a (void \*) pointer.
-.. c:enumerator:: NPY_MASK
+ .. c:enumerator:: NPY_MASK
- The enumeration value of the type used for masks, such as with
- the :c:data:`NPY_ITER_ARRAYMASK` iterator flag. This is equivalent
- to :c:data:`NPY_UINT8`.
+ The enumeration value of the type used for masks, such as with
+ the :c:data:`NPY_ITER_ARRAYMASK` iterator flag. This is equivalent
+ to :c:data:`NPY_UINT8`.
-.. c:enumerator:: NPY_DEFAULT_TYPE
+ .. c:enumerator:: NPY_DEFAULT_TYPE
- The default type to use when no dtype is explicitly specified, for
- example when calling np.zero(shape). This is equivalent to
- :c:data:`NPY_DOUBLE`.
+ The default type to use when no dtype is explicitly specified, for
+ example when calling np.zero(shape). This is equivalent to
+ :c:data:`NPY_DOUBLE`.
Other useful related constants are
diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst
index 66bc04f05..2bcc61108 100644
--- a/doc/source/reference/c-api/types-and-structures.rst
+++ b/doc/source/reference/c-api/types-and-structures.rst
@@ -122,7 +122,7 @@ PyArray_Type and PyArrayObject
``ndarraytypes.h`` points to this data member. :c:data:`NPY_MAXDIMS`
is the largest number of dimensions for any array.
- .. c:member:: npy_intp dimensions
+ .. c:member:: npy_intp *dimensions
An array of integers providing the shape in each dimension as
long as nd :math:`\geq` 1. The integer is always large enough
@@ -1253,7 +1253,7 @@ ScalarArrayTypes
----------------
There is a Python type for each of the different built-in data types
-that can be present in the array Most of these are simple wrappers
+that can be present in the array. Most of these are simple wrappers
around the corresponding data type in C. The C-names for these types
are ``Py{TYPE}ArrType_Type`` where ``{TYPE}`` can be
diff --git a/doc/source/reference/random/extending.rst b/doc/source/reference/random/extending.rst
index 6bb941496..998faf80a 100644
--- a/doc/source/reference/random/extending.rst
+++ b/doc/source/reference/random/extending.rst
@@ -25,7 +25,7 @@ provided by ``ctypes.next_double``.
Both CTypes and CFFI allow the more complicated distributions to be used
directly in Numba after compiling the file distributions.c into a ``DLL`` or
``so``. An example showing the use of a more complicated distribution is in
-the `examples` section below.
+the `Examples`_ section below.
.. _random_cython:
@@ -113,6 +113,6 @@ Examples
.. toctree::
Numba <examples/numba>
- CFFI + Numba <examples/numba_cffi>
+ CFFI + Numba <examples/numba_cffi>
Cython <examples/cython/index>
CFFI <examples/cffi>
diff --git a/doc/source/reference/routines.dual.rst b/doc/source/reference/routines.dual.rst
deleted file mode 100644
index 18c7791d0..000000000
--- a/doc/source/reference/routines.dual.rst
+++ /dev/null
@@ -1,47 +0,0 @@
-Optionally SciPy-accelerated routines (:mod:`numpy.dual`)
-=========================================================
-
-.. automodule:: numpy.dual
-
-Linear algebra
---------------
-
-.. currentmodule:: numpy.linalg
-
-.. autosummary::
-
- cholesky
- det
- eig
- eigh
- eigvals
- eigvalsh
- inv
- lstsq
- norm
- pinv
- solve
- svd
-
-FFT
----
-
-.. currentmodule:: numpy.fft
-
-.. autosummary::
-
- fft
- fft2
- fftn
- ifft
- ifft2
- ifftn
-
-Other
------
-
-.. currentmodule:: numpy
-
-.. autosummary::
-
- i0
diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst
index d503cc243..fd22a74aa 100644
--- a/doc/source/reference/routines.ma.rst
+++ b/doc/source/reference/routines.ma.rst
@@ -31,6 +31,7 @@ From existing data
ma.fromfunction
ma.MaskedArray.copy
+ ma.diagflat
Ones and zeros
@@ -72,6 +73,9 @@ Inspecting the array
ma.isMaskedArray
ma.isMA
ma.isarray
+ ma.isin
+ ma.in1d
+ ma.unique
ma.MaskedArray.all
@@ -394,6 +398,17 @@ Clipping and rounding
ma.MaskedArray.clip
ma.MaskedArray.round
+Set operations
+~~~~~~~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+
+ ma.intersect1d
+ ma.setdiff1d
+ ma.setxor1d
+ ma.union1d
+
Miscellanea
~~~~~~~~~~~
diff --git a/doc/source/reference/routines.rst b/doc/source/reference/routines.rst
index 24117895b..72ed80972 100644
--- a/doc/source/reference/routines.rst
+++ b/doc/source/reference/routines.rst
@@ -24,7 +24,6 @@ indentation.
routines.ctypeslib
routines.datetime
routines.dtype
- routines.dual
routines.emath
routines.err
routines.fft
diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst
index 40b4e378d..3ee501889 100644
--- a/doc/source/user/basics.creation.rst
+++ b/doc/source/user/basics.creation.rst
@@ -75,8 +75,8 @@ the computation, here ``uint32`` and ``int32`` can both be represented in
as ``int64``.
The default NumPy behavior is to create arrays in either 32 or 64-bit signed
-integers (platform dependent and matches C int size) or double precision
-floating point numbers, int32/int64 and float, respectively. If you expect your
+integers (platform dependent and matches C ``long`` size) or double precision
+floating point numbers. If you expect your
integer arrays to be a specific type, then you need to specify the dtype while
you create the array.
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index d138242d7..783d5a447 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -1482,7 +1482,7 @@ Further reading
- The `Python tutorial <https://docs.python.org/tutorial/>`__
- :ref:`reference`
-- `SciPy Tutorial <https://docs.scipy.org/doc/scipy/reference/tutorial/index.html>`__
+- `SciPy Tutorial <https://docs.scipy.org/doc/scipy/tutorial/index.html>`__
- `SciPy Lecture Notes <https://scipy-lectures.org>`__
- A `matlab, R, IDL, NumPy/SciPy dictionary <http://mathesaurus.sf.net/>`__
- :doc:`tutorial-svd <numpy-tutorials:content/tutorial-svd>`
diff --git a/meson.build b/meson.build
index c1fc1dad8..47e71efc0 100644
--- a/meson.build
+++ b/meson.build
@@ -11,7 +11,7 @@ project(
'buildtype=debugoptimized',
'b_ndebug=if-release',
'c_std=c99',
- 'cpp_std=c++14',
+ 'cpp_std=c++17',
'blas=openblas',
'lapack=openblas',
'pkgconfig.relocatable=true',
diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd
index ec4c563ad..0dd2fff2b 100644
--- a/numpy/__init__.cython-30.pxd
+++ b/numpy/__init__.cython-30.pxd
@@ -515,7 +515,6 @@ cdef extern from "numpy/arrayobject.h":
void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil
void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil
- void PyArray_XDECREF_ERR(ndarray)
# Cannot be supported due to out arg
# void PyArray_DESCR_REPLACE(descr)
diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd
index a6990cc68..47d9294c1 100644
--- a/numpy/__init__.pxd
+++ b/numpy/__init__.pxd
@@ -473,7 +473,6 @@ cdef extern from "numpy/arrayobject.h":
void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil
void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil
- void PyArray_XDECREF_ERR(ndarray)
# Cannot be supported due to out arg
# void PyArray_DESCR_REPLACE(descr)
diff --git a/numpy/__init__.py b/numpy/__init__.py
index e41f7eae6..1880ceace 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -74,10 +74,6 @@ test
Run numpy unittests
show_config
Show numpy build configuration
-dual
- Overwrite certain functions with high-performance SciPy tools.
- Note: `numpy.dual` is deprecated. Use the functions from NumPy or Scipy
- directly instead of importing them from `numpy.dual`.
matlib
Make everything matrices.
__version__
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index ee5fbb601..8627f6c60 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -1921,7 +1921,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
def __neg__(self: NDArray[object_]) -> Any: ...
# Binary ops
- # NOTE: `ndarray` does not implement `__imatmul__`
@overload
def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
@@ -2508,6 +2507,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+ @overload
+ def __imatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ...
+ @overload
+ def __imatmul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __imatmul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __imatmul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
+ @overload
+ def __imatmul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
+ @overload
+ def __imatmul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ...
def __dlpack_device__(self) -> tuple[int, L[0]]: ...
diff --git a/numpy/array_api/_array_object.py b/numpy/array_api/_array_object.py
index eee117be6..a949b5977 100644
--- a/numpy/array_api/_array_object.py
+++ b/numpy/array_api/_array_object.py
@@ -850,23 +850,13 @@ class Array:
"""
Performs the operation __imatmul__.
"""
- # Note: NumPy does not implement __imatmul__.
-
# matmul is not defined for scalars, but without this, we may get
# the wrong error message from asarray.
other = self._check_allowed_dtypes(other, "numeric", "__imatmul__")
if other is NotImplemented:
return other
-
- # __imatmul__ can only be allowed when it would not change the shape
- # of self.
- other_shape = other.shape
- if self.shape == () or other_shape == ():
- raise ValueError("@= requires at least one dimension")
- if len(other_shape) == 1 or other_shape[-1] != other_shape[-2]:
- raise ValueError("@= cannot change the shape of the input array")
- self._array[:] = self._array.__matmul__(other._array)
- return self
+ res = self._array.__imatmul__(other._array)
+ return self.__class__._new(res)
def __rmatmul__(self: Array, other: Array, /) -> Array:
"""
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index f2a2216c3..bd7c4f519 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -2935,7 +2935,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
- """ a.__array__([dtype], /) -> reference if type unchanged, copy otherwise.
+ """ a.__array__([dtype], /)
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
@@ -3008,7 +3008,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__class_getitem__',
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
- """a.__deepcopy__(memo, /) -> Deep copy of array.
+ """a.__deepcopy__(memo, /)
Used if :func:`copy.deepcopy` is called on an array.
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index d6c5885b8..01966f0fe 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -728,7 +728,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False):
* if False no optimization is taken
* if True defaults to the 'greedy' algorithm
* 'optimal' An algorithm that combinatorially explores all possible
- ways of contracting the listed tensors and choosest the least costly
+ ways of contracting the listed tensors and chooses the least costly
path. Scales exponentially with the number of terms in the
contraction.
* 'greedy' An algorithm that chooses the best pair contraction
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index b36667427..4608bc6de 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -238,24 +238,9 @@ def reshape(a, newshape, order='C'):
Notes
-----
- It is not always possible to change the shape of an array without
- copying the data. If you want an error to be raised when the data is copied,
- you should assign the new shape to the shape attribute of the array::
-
- >>> a = np.zeros((10, 2))
-
- # A transpose makes the array non-contiguous
- >>> b = a.T
-
- # Taking a view makes it possible to modify the shape without modifying
- # the initial object.
- >>> c = b.view()
- >>> c.shape = (20)
- Traceback (most recent call last):
- ...
- AttributeError: Incompatible shape for in-place modification. Use
- `.reshape()` to make a copy with the desired shape.
-
+ It is not always possible to change the shape of an array without copying
+ the data.
+
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
@@ -3802,7 +3787,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
# Aliases of other functions. Provided unique docstrings
-# are reference purposes only. Wherever possible,
+# are for reference purposes only. Wherever possible,
# avoid using them.
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 52b17bfc8..680c2a5f6 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -20,7 +20,7 @@ from setup_common import * # noqa: F403
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
if not NPY_RELAXED_STRIDES_CHECKING:
raise SystemError(
- "Support for NPY_RELAXED_STRIDES_CHECKING=0 has been remove as of "
+ "Support for NPY_RELAXED_STRIDES_CHECKING=0 has been removed as of "
"NumPy 1.23. This error will eventually be removed entirely.")
# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a
@@ -405,7 +405,6 @@ def configuration(parent_package='',top_path=None):
exec_mod_from_location)
from numpy.distutils.system_info import (get_info, blas_opt_info,
lapack_opt_info)
- from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
from numpy.version import release as is_released
config = Configuration('core', parent_package, top_path)
@@ -658,44 +657,6 @@ def configuration(parent_package='',top_path=None):
# but we cannot use add_installed_pkg_config here either, so we only
# update the substitution dictionary during npymath build
config_cmd = config.get_config_cmd()
- # Check that the toolchain works, to fail early if it doesn't
- # (avoid late errors with MATHLIB which are confusing if the
- # compiler does not work).
- for lang, test_code, note in (
- ('c', 'int main(void) { return 0;}', ''),
- ('c++', (
- 'int main(void)'
- '{ auto x = 0.0; return static_cast<int>(x); }'
- ), (
- 'note: A compiler with support for C++11 language '
- 'features is required.'
- )
- ),
- ):
- is_cpp = lang == 'c++'
- if is_cpp:
- # this a workaround to get rid of invalid c++ flags
- # without doing big changes to config.
- # c tested first, compiler should be here
- bk_c = config_cmd.compiler
- config_cmd.compiler = bk_c.cxx_compiler()
-
- # Check that Linux compiler actually support the default flags
- if hasattr(config_cmd.compiler, 'compiler'):
- config_cmd.compiler.compiler.extend(NPY_CXX_FLAGS)
- config_cmd.compiler.compiler_so.extend(NPY_CXX_FLAGS)
-
- st = config_cmd.try_link(test_code, lang=lang)
- if not st:
- # rerun the failing command in verbose mode
- config_cmd.compiler.verbose = True
- config_cmd.try_link(test_code, lang=lang)
- raise RuntimeError(
- f"Broken toolchain: cannot link a simple {lang.upper()} "
- f"program. {note}"
- )
- if is_cpp:
- config_cmd.compiler = bk_c
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
@@ -1067,8 +1028,7 @@ def configuration(parent_package='',top_path=None):
common_deps,
libraries=['npymath'],
extra_objects=svml_objs,
- extra_info=extra_info,
- extra_cxx_compile_args=NPY_CXX_FLAGS)
+ extra_info=extra_info)
#######################################################################
# umath_tests module #
diff --git a/numpy/core/src/common/ucsnarrow.h b/numpy/core/src/common/ucsnarrow.h
index 6fe157199..4b17a2809 100644
--- a/numpy/core/src/common/ucsnarrow.h
+++ b/numpy/core/src/common/ucsnarrow.h
@@ -2,6 +2,6 @@
#define NUMPY_CORE_SRC_COMMON_NPY_UCSNARROW_H_
NPY_NO_EXPORT PyUnicodeObject *
-PyUnicode_FromUCS4(char *src, Py_ssize_t size, int swap, int align);
+PyUnicode_FromUCS4(char const *src, Py_ssize_t size, int swap, int align);
#endif /* NUMPY_CORE_SRC_COMMON_NPY_UCSNARROW_H_ */
diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c
index d6bee1a7b..d55a5752b 100644
--- a/numpy/core/src/multiarray/array_coercion.c
+++ b/numpy/core/src/multiarray/array_coercion.c
@@ -878,7 +878,8 @@ find_descriptor_from_array(
* means that legacy behavior is used: The dtype instances "S0", "U0", and
* "V0" are converted to mean the DType classes instead.
* When dtype != NULL, this path is ignored, and the function does nothing
- * unless descr == NULL.
+ * unless descr == NULL. If both descr and dtype are null, it returns the
+ * descriptor for the array.
*
* This function is identical to normal casting using only the dtype, however,
* it supports inspecting the elements when the array has object dtype
@@ -927,7 +928,7 @@ PyArray_AdaptDescriptorToArray(
/* This is an object array but contained no elements, use default */
new_descr = NPY_DT_CALL_default_descr(dtype);
}
- Py_DECREF(dtype);
+ Py_XDECREF(dtype);
return new_descr;
}
@@ -1280,7 +1281,9 @@ PyArray_DiscoverDTypeAndShape(
}
if (requested_descr != NULL) {
- assert(fixed_DType == NPY_DTYPE(requested_descr));
+ if (fixed_DType != NULL) {
+ assert(fixed_DType == NPY_DTYPE(requested_descr));
+ }
/* The output descriptor must be the input. */
Py_INCREF(requested_descr);
*out_descr = requested_descr;
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index 76652550a..c9d881e16 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -17,6 +17,7 @@
#include "numpyos.h"
#include "arrayobject.h"
#include "scalartypes.h"
+#include "dtypemeta.h"
/*************************************************************************
**************** Implement Buffer Protocol ****************************
@@ -417,9 +418,16 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
break;
}
default:
- PyErr_Format(PyExc_ValueError,
- "cannot include dtype '%c' in a buffer",
- descr->type);
+ if (NPY_DT_is_legacy(NPY_DTYPE(descr))) {
+ PyErr_Format(PyExc_ValueError,
+ "cannot include dtype '%c' in a buffer",
+ descr->type);
+ }
+ else {
+ PyErr_Format(PyExc_ValueError,
+ "cannot include dtype '%s' in a buffer",
+ ((PyTypeObject*)NPY_DTYPE(descr))->tp_name);
+ }
return -1;
}
}
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index e99bc3fe4..9e0c9fb60 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -21,6 +21,7 @@
#include "convert.h"
#include "array_coercion.h"
+#include "refcount.h"
int
fallocate(int fd, int mode, off_t offset, off_t len);
@@ -416,7 +417,7 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
descr, value);
if (PyDataType_REFCHK(descr)) {
- PyArray_Item_XDECREF(value, descr);
+ PyArray_ClearBuffer(descr, value, 0, 1, 1);
}
PyMem_FREE(value_buffer_heap);
return retcode;
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 38af60427..a6335c783 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -1605,6 +1605,37 @@ NPY_NO_EXPORT PyObject *
PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
int max_depth, int flags, PyObject *context)
{
+ npy_dtype_info dt_info = {NULL, NULL};
+
+ int res = PyArray_ExtractDTypeAndDescriptor(
+ newtype, &dt_info.descr, &dt_info.dtype);
+
+ Py_XDECREF(newtype);
+
+ if (res < 0) {
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
+ return NULL;
+ }
+
+ PyObject* ret = PyArray_FromAny_int(op, dt_info.descr, dt_info.dtype,
+ min_depth, max_depth, flags, context);
+
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
+ return ret;
+}
+
+/*
+ * Internal version of PyArray_FromAny that accepts a dtypemeta. Borrows
+ * references to the descriptor and dtype.
+ */
+
+NPY_NO_EXPORT PyObject *
+PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr,
+ PyArray_DTypeMeta *in_DType, int min_depth, int max_depth,
+ int flags, PyObject *context)
+{
/*
* This is the main code to make a NumPy array from a Python
* Object. It is called from many different places.
@@ -1620,26 +1651,15 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
return NULL;
}
- PyArray_Descr *fixed_descriptor;
- PyArray_DTypeMeta *fixed_DType;
- if (PyArray_ExtractDTypeAndDescriptor(newtype,
- &fixed_descriptor, &fixed_DType) < 0) {
- Py_XDECREF(newtype);
- return NULL;
- }
- Py_XDECREF(newtype);
-
ndim = PyArray_DiscoverDTypeAndShape(op,
- NPY_MAXDIMS, dims, &cache, fixed_DType, fixed_descriptor, &dtype,
+ NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype,
flags & NPY_ARRAY_ENSURENOCOPY);
- Py_XDECREF(fixed_descriptor);
- Py_XDECREF(fixed_DType);
if (ndim < 0) {
return NULL;
}
- if (NPY_UNLIKELY(fixed_descriptor != NULL && PyDataType_HASSUBARRAY(dtype))) {
+ if (NPY_UNLIKELY(in_descr != NULL && PyDataType_HASSUBARRAY(dtype))) {
/*
* When a subarray dtype was passed in, its dimensions are appended
* to the array dimension (causing a dimension mismatch).
@@ -1737,7 +1757,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
}
else if (cache == NULL && PyArray_IsScalar(op, Void) &&
!(((PyVoidScalarObject *)op)->flags & NPY_ARRAY_OWNDATA) &&
- newtype == NULL) {
+ ((in_descr == NULL) && (in_DType == NULL))) {
/*
* Special case, we return a *view* into void scalars, mainly to
* allow things similar to the "reversed" assignment:
@@ -1768,7 +1788,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
return NULL;
}
- if (cache == NULL && newtype != NULL &&
+ if (cache == NULL && in_descr != NULL &&
PyDataType_ISSIGNED(dtype) &&
PyArray_IsScalar(op, Generic)) {
assert(ndim == 0);
@@ -1899,7 +1919,6 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
* NPY_ARRAY_FORCECAST will cause a cast to occur regardless of whether or not
* it is safe.
*
- * context is passed through to PyArray_GetArrayParamsFromObject
*/
/*NUMPY_API
@@ -1909,24 +1928,57 @@ NPY_NO_EXPORT PyObject *
PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth,
int max_depth, int requires, PyObject *context)
{
+ npy_dtype_info dt_info = {NULL, NULL};
+
+ int res = PyArray_ExtractDTypeAndDescriptor(
+ descr, &dt_info.descr, &dt_info.dtype);
+
+ Py_XDECREF(descr);
+
+ if (res < 0) {
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
+ return NULL;
+ }
+
+ PyObject* ret = PyArray_CheckFromAny_int(
+ op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requires,
+ context);
+
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
+ return ret;
+}
+
+/*
+ * Internal version of PyArray_CheckFromAny that accepts a dtypemeta. Borrows
+ * references to the descriptor and dtype.
+ */
+
+NPY_NO_EXPORT PyObject *
+PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr,
+ PyArray_DTypeMeta *in_DType, int min_depth,
+ int max_depth, int requires, PyObject *context)
+{
PyObject *obj;
if (requires & NPY_ARRAY_NOTSWAPPED) {
- if (!descr && PyArray_Check(op) &&
+ if (!in_descr && PyArray_Check(op) &&
PyArray_ISBYTESWAPPED((PyArrayObject* )op)) {
- descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op));
- if (descr == NULL) {
+ in_descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op));
+ if (in_descr == NULL) {
return NULL;
}
}
- else if (descr && !PyArray_ISNBO(descr->byteorder)) {
- PyArray_DESCR_REPLACE(descr);
+ else if (in_descr && !PyArray_ISNBO(in_descr->byteorder)) {
+ PyArray_DESCR_REPLACE(in_descr);
}
- if (descr && descr->byteorder != NPY_IGNORE) {
- descr->byteorder = NPY_NATIVE;
+ if (in_descr && in_descr->byteorder != NPY_IGNORE) {
+ in_descr->byteorder = NPY_NATIVE;
}
}
- obj = PyArray_FromAny(op, descr, min_depth, max_depth, requires, context);
+ obj = PyArray_FromAny_int(op, in_descr, in_DType, min_depth,
+ max_depth, requires, context);
if (obj == NULL) {
return NULL;
}
diff --git a/numpy/core/src/multiarray/ctors.h b/numpy/core/src/multiarray/ctors.h
index 98160b1cc..22020e26a 100644
--- a/numpy/core/src/multiarray/ctors.h
+++ b/numpy/core/src/multiarray/ctors.h
@@ -36,10 +36,20 @@ _array_from_array_like(PyObject *op,
int never_copy);
NPY_NO_EXPORT PyObject *
+PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr,
+ PyArray_DTypeMeta *in_DType, int min_depth, int max_depth,
+ int flags, PyObject *context);
+
+NPY_NO_EXPORT PyObject *
PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
int max_depth, int flags, PyObject *context);
NPY_NO_EXPORT PyObject *
+PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr,
+ PyArray_DTypeMeta *in_DType, int min_depth,
+ int max_depth, int requires, PyObject *context);
+
+NPY_NO_EXPORT PyObject *
PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth,
int max_depth, int requires, PyObject *context);
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index f518f3a02..93b290020 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -28,6 +28,7 @@
#include "strfuncs.h"
#include "array_assign.h"
#include "npy_dlpack.h"
+#include "multiarraymodule.h"
#include "methods.h"
#include "alloc.h"
@@ -1102,7 +1103,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds)
int nin, nout;
PyObject *out_kwd_obj;
PyObject *fast;
- PyObject **in_objs, **out_objs;
+ PyObject **in_objs, **out_objs, *where_obj;
/* check inputs */
nin = PyTuple_Size(args);
@@ -1133,6 +1134,17 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds)
}
}
Py_DECREF(out_kwd_obj);
+ /* check where if it exists */
+ where_obj = PyDict_GetItemWithError(kwds, npy_ma_str_where);
+ if (where_obj == NULL) {
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+ } else {
+ if (PyUFunc_HasOverride(where_obj)){
+ return 1;
+ }
+ }
return 0;
}
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index e85f8affa..98ca15ac4 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -1042,12 +1042,11 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out)
#endif
if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) {
- result = (PyArray_NDIM(ap1) == 0 ? ap1 : ap2);
- result = (PyArrayObject *)Py_TYPE(result)->tp_as_number->nb_multiply(
- (PyObject *)ap1, (PyObject *)ap2);
+ PyObject *mul_res = PyObject_CallFunctionObjArgs(
+ n_ops.multiply, ap1, ap2, out, NULL);
Py_DECREF(ap1);
Py_DECREF(ap2);
- return (PyObject *)result;
+ return mul_res;
}
l = PyArray_DIMS(ap1)[PyArray_NDIM(ap1) - 1];
if (PyArray_NDIM(ap2) > 1) {
@@ -1635,25 +1634,35 @@ _prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order)
((order) == NPY_CORDER && PyArray_IS_C_CONTIGUOUS(op)) || \
((order) == NPY_FORTRANORDER && PyArray_IS_F_CONTIGUOUS(op)))
+
static inline PyObject *
_array_fromobject_generic(
- PyObject *op, PyArray_Descr *type, _PyArray_CopyMode copy, NPY_ORDER order,
- npy_bool subok, int ndmin)
+ PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType,
+ _PyArray_CopyMode copy, NPY_ORDER order, npy_bool subok, int ndmin)
{
PyArrayObject *oparr = NULL, *ret = NULL;
PyArray_Descr *oldtype = NULL;
int nd, flags = 0;
+ /* Hold on to `in_descr` as `dtype`, since we may also set it below. */
+ Py_XINCREF(in_descr);
+ PyArray_Descr *dtype = in_descr;
+
if (ndmin > NPY_MAXDIMS) {
PyErr_Format(PyExc_ValueError,
"ndmin bigger than allowable number of dimensions "
"NPY_MAXDIMS (=%d)", NPY_MAXDIMS);
- return NULL;
+ goto finish;
}
/* fast exit if simple call */
if (PyArray_CheckExact(op) || (subok && PyArray_Check(op))) {
oparr = (PyArrayObject *)op;
- if (type == NULL) {
+
+ if (dtype == NULL && in_DType == NULL) {
+ /*
+ * User did not ask for a specific dtype instance or class. So
+ * we can return either self or a copy.
+ */
if (copy != NPY_COPY_ALWAYS && STRIDING_OK(oparr, order)) {
ret = oparr;
Py_INCREF(ret);
@@ -1663,17 +1672,30 @@ _array_fromobject_generic(
if (copy == NPY_COPY_NEVER) {
PyErr_SetString(PyExc_ValueError,
"Unable to avoid copy while creating a new array.");
- return NULL;
+ goto finish;
}
ret = (PyArrayObject *)PyArray_NewCopy(oparr, order);
goto finish;
}
}
+ else if (dtype == NULL) {
+ /*
+ * If the user passed a DType class but not a dtype instance,
+ * we must use `PyArray_AdaptDescriptorToArray` to find the
+ * correct dtype instance.
+ * Even if the fast-path doesn't work we will use this.
+ */
+ dtype = PyArray_AdaptDescriptorToArray(oparr, in_DType, NULL);
+ if (dtype == NULL) {
+ goto finish;
+ }
+ }
+
/* One more chance for faster exit if user specified the dtype. */
oldtype = PyArray_DESCR(oparr);
- if (PyArray_EquivTypes(oldtype, type)) {
+ if (PyArray_EquivTypes(oldtype, dtype)) {
if (copy != NPY_COPY_ALWAYS && STRIDING_OK(oparr, order)) {
- if (oldtype == type) {
+ if (oldtype == dtype) {
Py_INCREF(op);
ret = oparr;
}
@@ -1681,10 +1703,10 @@ _array_fromobject_generic(
/* Create a new PyArrayObject from the caller's
* PyArray_Descr. Use the reference `op` as the base
* object. */
- Py_INCREF(type);
+ Py_INCREF(dtype);
ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
Py_TYPE(op),
- type,
+ dtype,
PyArray_NDIM(oparr),
PyArray_DIMS(oparr),
PyArray_STRIDES(oparr),
@@ -1700,10 +1722,10 @@ _array_fromobject_generic(
if (copy == NPY_COPY_NEVER) {
PyErr_SetString(PyExc_ValueError,
"Unable to avoid copy while creating a new array.");
- return NULL;
+ goto finish;
}
ret = (PyArrayObject *)PyArray_NewCopy(oparr, order);
- if (oldtype == type || ret == NULL) {
+ if (oldtype == dtype || ret == NULL) {
goto finish;
}
Py_INCREF(oldtype);
@@ -1734,11 +1756,13 @@ _array_fromobject_generic(
}
flags |= NPY_ARRAY_FORCECAST;
- Py_XINCREF(type);
- ret = (PyArrayObject *)PyArray_CheckFromAny(op, type,
- 0, 0, flags, NULL);
+
+ ret = (PyArrayObject *)PyArray_CheckFromAny_int(
+ op, dtype, in_DType, 0, 0, flags, NULL);
finish:
+ Py_XDECREF(dtype);
+
if (ret == NULL) {
return NULL;
}
@@ -1765,7 +1789,7 @@ array_array(PyObject *NPY_UNUSED(ignored),
npy_bool subok = NPY_FALSE;
_PyArray_CopyMode copy = NPY_COPY_ALWAYS;
int ndmin = 0;
- PyArray_Descr *type = NULL;
+ npy_dtype_info dt_info = {NULL, NULL};
NPY_ORDER order = NPY_KEEPORDER;
PyObject *like = Py_None;
NPY_PREPARE_ARGPARSER;
@@ -1773,21 +1797,23 @@ array_array(PyObject *NPY_UNUSED(ignored),
if (len_args != 1 || (kwnames != NULL)) {
if (npy_parse_arguments("array", args, len_args, kwnames,
"object", NULL, &op,
- "|dtype", &PyArray_DescrConverter2, &type,
+ "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info,
"$copy", &PyArray_CopyConverter, &copy,
"$order", &PyArray_OrderConverter, &order,
"$subok", &PyArray_BoolConverter, &subok,
"$ndmin", &PyArray_PythonPyIntFromInt, &ndmin,
"$like", NULL, &like,
NULL, NULL, NULL) < 0) {
- Py_XDECREF(type);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return NULL;
}
if (like != Py_None) {
PyObject *deferred = array_implement_c_array_function_creation(
"array", like, NULL, NULL, args, len_args, kwnames);
if (deferred != Py_NotImplemented) {
- Py_XDECREF(type);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return deferred;
}
}
@@ -1798,8 +1824,9 @@ array_array(PyObject *NPY_UNUSED(ignored),
}
PyObject *res = _array_fromobject_generic(
- op, type, copy, order, subok, ndmin);
- Py_XDECREF(type);
+ op, dt_info.descr, dt_info.dtype, copy, order, subok, ndmin);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return res;
}
@@ -1808,7 +1835,7 @@ array_asarray(PyObject *NPY_UNUSED(ignored),
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
PyObject *op;
- PyArray_Descr *type = NULL;
+ npy_dtype_info dt_info = {NULL, NULL};
NPY_ORDER order = NPY_KEEPORDER;
PyObject *like = Py_None;
NPY_PREPARE_ARGPARSER;
@@ -1816,18 +1843,20 @@ array_asarray(PyObject *NPY_UNUSED(ignored),
if (len_args != 1 || (kwnames != NULL)) {
if (npy_parse_arguments("asarray", args, len_args, kwnames,
"a", NULL, &op,
- "|dtype", &PyArray_DescrConverter2, &type,
+ "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info,
"|order", &PyArray_OrderConverter, &order,
"$like", NULL, &like,
NULL, NULL, NULL) < 0) {
- Py_XDECREF(type);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return NULL;
}
if (like != Py_None) {
PyObject *deferred = array_implement_c_array_function_creation(
"asarray", like, NULL, NULL, args, len_args, kwnames);
if (deferred != Py_NotImplemented) {
- Py_XDECREF(type);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return deferred;
}
}
@@ -1837,8 +1866,9 @@ array_asarray(PyObject *NPY_UNUSED(ignored),
}
PyObject *res = _array_fromobject_generic(
- op, type, NPY_FALSE, order, NPY_FALSE, 0);
- Py_XDECREF(type);
+ op, dt_info.descr, dt_info.dtype, NPY_FALSE, order, NPY_FALSE, 0);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return res;
}
@@ -1847,7 +1877,7 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored),
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
PyObject *op;
- PyArray_Descr *type = NULL;
+ npy_dtype_info dt_info = {NULL, NULL};
NPY_ORDER order = NPY_KEEPORDER;
PyObject *like = Py_None;
NPY_PREPARE_ARGPARSER;
@@ -1855,18 +1885,20 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored),
if (len_args != 1 || (kwnames != NULL)) {
if (npy_parse_arguments("asanyarray", args, len_args, kwnames,
"a", NULL, &op,
- "|dtype", &PyArray_DescrConverter2, &type,
+ "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info,
"|order", &PyArray_OrderConverter, &order,
"$like", NULL, &like,
NULL, NULL, NULL) < 0) {
- Py_XDECREF(type);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return NULL;
}
if (like != Py_None) {
PyObject *deferred = array_implement_c_array_function_creation(
"asanyarray", like, NULL, NULL, args, len_args, kwnames);
if (deferred != Py_NotImplemented) {
- Py_XDECREF(type);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return deferred;
}
}
@@ -1876,8 +1908,9 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored),
}
PyObject *res = _array_fromobject_generic(
- op, type, NPY_FALSE, order, NPY_TRUE, 0);
- Py_XDECREF(type);
+ op, dt_info.descr, dt_info.dtype, NPY_FALSE, order, NPY_TRUE, 0);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return res;
}
@@ -1887,24 +1920,26 @@ array_ascontiguousarray(PyObject *NPY_UNUSED(ignored),
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
PyObject *op;
- PyArray_Descr *type = NULL;
+ npy_dtype_info dt_info = {NULL, NULL};
PyObject *like = Py_None;
NPY_PREPARE_ARGPARSER;
if (len_args != 1 || (kwnames != NULL)) {
if (npy_parse_arguments("ascontiguousarray", args, len_args, kwnames,
"a", NULL, &op,
- "|dtype", &PyArray_DescrConverter2, &type,
+ "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info,
"$like", NULL, &like,
NULL, NULL, NULL) < 0) {
- Py_XDECREF(type);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return NULL;
}
if (like != Py_None) {
PyObject *deferred = array_implement_c_array_function_creation(
"ascontiguousarray", like, NULL, NULL, args, len_args, kwnames);
if (deferred != Py_NotImplemented) {
- Py_XDECREF(type);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return deferred;
}
}
@@ -1914,8 +1949,10 @@ array_ascontiguousarray(PyObject *NPY_UNUSED(ignored),
}
PyObject *res = _array_fromobject_generic(
- op, type, NPY_FALSE, NPY_CORDER, NPY_FALSE, 1);
- Py_XDECREF(type);
+ op, dt_info.descr, dt_info.dtype, NPY_FALSE, NPY_CORDER, NPY_FALSE,
+ 1);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return res;
}
@@ -1925,24 +1962,26 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored),
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames)
{
PyObject *op;
- PyArray_Descr *type = NULL;
+ npy_dtype_info dt_info = {NULL, NULL};
PyObject *like = Py_None;
NPY_PREPARE_ARGPARSER;
if (len_args != 1 || (kwnames != NULL)) {
if (npy_parse_arguments("asfortranarray", args, len_args, kwnames,
"a", NULL, &op,
- "|dtype", &PyArray_DescrConverter2, &type,
+ "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info,
"$like", NULL, &like,
NULL, NULL, NULL) < 0) {
- Py_XDECREF(type);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return NULL;
}
if (like != Py_None) {
PyObject *deferred = array_implement_c_array_function_creation(
"asfortranarray", like, NULL, NULL, args, len_args, kwnames);
if (deferred != Py_NotImplemented) {
- Py_XDECREF(type);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return deferred;
}
}
@@ -1952,8 +1991,10 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored),
}
PyObject *res = _array_fromobject_generic(
- op, type, NPY_FALSE, NPY_FORTRANORDER, NPY_FALSE, 1);
- Py_XDECREF(type);
+ op, dt_info.descr, dt_info.dtype, NPY_FALSE, NPY_FORTRANORDER,
+ NPY_FALSE, 1);
+ Py_XDECREF(dt_info.descr);
+ Py_XDECREF(dt_info.dtype);
return res;
}
@@ -4843,6 +4884,7 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis1 = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis2 = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_like = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_numpy = NULL;
+NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_where = NULL;
static int
intern_strings(void)
@@ -4899,6 +4941,10 @@ intern_strings(void)
if (npy_ma_str_numpy == NULL) {
return -1;
}
+ npy_ma_str_where = PyUnicode_InternFromString("where");
+ if (npy_ma_str_where == NULL) {
+ return -1;
+ }
return 0;
}
diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h
index 992acd09f..9ba2a1831 100644
--- a/numpy/core/src/multiarray/multiarraymodule.h
+++ b/numpy/core/src/multiarray/multiarraymodule.h
@@ -16,5 +16,6 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis1;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis2;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_like;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_numpy;
+NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_where;
#endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 2e25152d5..c208fb203 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -53,6 +53,8 @@ static PyObject *
array_inplace_remainder(PyArrayObject *m1, PyObject *m2);
static PyObject *
array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo));
+static PyObject *
+array_inplace_matrix_multiply(PyArrayObject *m1, PyObject *m2);
/*
* Dictionary can contain any of the numeric operations, by name.
@@ -339,7 +341,6 @@ array_divmod(PyObject *m1, PyObject *m2)
return PyArray_GenericBinaryFunction(m1, m2, n_ops.divmod);
}
-/* Need this to be version dependent on account of the slot check */
static PyObject *
array_matrix_multiply(PyObject *m1, PyObject *m2)
{
@@ -348,13 +349,70 @@ array_matrix_multiply(PyObject *m1, PyObject *m2)
}
static PyObject *
-array_inplace_matrix_multiply(
- PyArrayObject *NPY_UNUSED(m1), PyObject *NPY_UNUSED(m2))
+array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other)
{
- PyErr_SetString(PyExc_TypeError,
- "In-place matrix multiplication is not (yet) supported. "
- "Use 'a = a @ b' instead of 'a @= b'.");
- return NULL;
+ static PyObject *AxisError_cls = NULL;
+ npy_cache_import("numpy.exceptions", "AxisError", &AxisError_cls);
+ if (AxisError_cls == NULL) {
+ return NULL;
+ }
+
+ INPLACE_GIVE_UP_IF_NEEDED(self, other,
+ nb_inplace_matrix_multiply, array_inplace_matrix_multiply);
+
+ /*
+ * Unlike `matmul(a, b, out=a)` we ensure that the result is not broadcast
+ * if the result without `out` would have less dimensions than `a`.
+ * Since the signature of matmul is '(n?,k),(k,m?)->(n?,m?)' this is the
+ * case exactly when the second operand has both core dimensions.
+ *
+ * The error here will be confusing, but for now, we enforce this by
+ * passing the correct `axes=`.
+ */
+ static PyObject *axes_1d_obj_kwargs = NULL;
+ static PyObject *axes_2d_obj_kwargs = NULL;
+ if (NPY_UNLIKELY(axes_1d_obj_kwargs == NULL)) {
+ axes_1d_obj_kwargs = Py_BuildValue(
+ "{s, [(i), (i, i), (i)]}", "axes", -1, -2, -1, -1);
+ if (axes_1d_obj_kwargs == NULL) {
+ return NULL;
+ }
+ }
+ if (NPY_UNLIKELY(axes_2d_obj_kwargs == NULL)) {
+ axes_2d_obj_kwargs = Py_BuildValue(
+ "{s, [(i, i), (i, i), (i, i)]}", "axes", -2, -1, -2, -1, -2, -1);
+ if (axes_2d_obj_kwargs == NULL) {
+ return NULL;
+ }
+ }
+
+ PyObject *args = PyTuple_Pack(3, self, other, self);
+ if (args == NULL) {
+ return NULL;
+ }
+ PyObject *kwargs;
+ if (PyArray_NDIM(self) == 1) {
+ kwargs = axes_1d_obj_kwargs;
+ }
+ else {
+ kwargs = axes_2d_obj_kwargs;
+ }
+ PyObject *res = PyObject_Call(n_ops.matmul, args, kwargs);
+ Py_DECREF(args);
+
+ if (res == NULL) {
+ /*
+ * AxisError should indicate that the axes argument didn't work out
+ * which should mean the second operand not being 2 dimensional.
+ */
+ if (PyErr_ExceptionMatches(AxisError_cls)) {
+ PyErr_SetString(PyExc_ValueError,
+ "inplace matrix multiplication requires the first operand to "
+ "have at least one and the second at least two dimensions.");
+ }
+ }
+
+ return res;
}
/*
diff --git a/numpy/core/src/npymath/npy_math_private.h b/numpy/core/src/npymath/npy_math_private.h
index a474b3de3..20c94f98a 100644
--- a/numpy/core/src/npymath/npy_math_private.h
+++ b/numpy/core/src/npymath/npy_math_private.h
@@ -21,6 +21,7 @@
#include <Python.h>
#ifdef __cplusplus
#include <cmath>
+#include <complex>
using std::isgreater;
using std::isless;
#else
@@ -494,8 +495,9 @@ do { \
* Microsoft C defines _MSC_VER
* Intel compiler does not use MSVC complex types, but defines _MSC_VER by
* default.
+ * since c++17 msvc is no longer support them.
*/
-#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+#if !defined(__cplusplus) && defined(_MSC_VER) && !defined(__INTEL_COMPILER)
typedef union {
npy_cdouble npy_z;
_Dcomplex c99_z;
diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c
index d247c2639..167164163 100644
--- a/numpy/core/src/umath/override.c
+++ b/numpy/core/src/umath/override.c
@@ -23,18 +23,19 @@
* Returns -1 on failure.
*/
static int
-get_array_ufunc_overrides(PyObject *in_args, PyObject *out_args,
+get_array_ufunc_overrides(PyObject *in_args, PyObject *out_args, PyObject *wheremask_obj,
PyObject **with_override, PyObject **methods)
{
int i;
int num_override_args = 0;
- int narg, nout;
+ int narg, nout, nwhere;
narg = (int)PyTuple_GET_SIZE(in_args);
/* It is valid for out_args to be NULL: */
nout = (out_args != NULL) ? (int)PyTuple_GET_SIZE(out_args) : 0;
+ nwhere = (wheremask_obj != NULL) ? 1: 0;
- for (i = 0; i < narg + nout; ++i) {
+ for (i = 0; i < narg + nout + nwhere; ++i) {
PyObject *obj;
int j;
int new_class = 1;
@@ -42,9 +43,12 @@ get_array_ufunc_overrides(PyObject *in_args, PyObject *out_args,
if (i < narg) {
obj = PyTuple_GET_ITEM(in_args, i);
}
- else {
+ else if (i < narg + nout){
obj = PyTuple_GET_ITEM(out_args, i - narg);
}
+ else {
+ obj = wheremask_obj;
+ }
/*
* Have we seen this class before? If so, ignore.
*/
@@ -208,7 +212,7 @@ copy_positional_args_to_kwargs(const char **keywords,
*/
NPY_NO_EXPORT int
PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
- PyObject *in_args, PyObject *out_args,
+ PyObject *in_args, PyObject *out_args, PyObject *wheremask_obj,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames,
PyObject **result)
{
@@ -227,7 +231,7 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
* Check inputs for overrides
*/
num_override_args = get_array_ufunc_overrides(
- in_args, out_args, with_override, array_ufunc_methods);
+ in_args, out_args, wheremask_obj, with_override, array_ufunc_methods);
if (num_override_args == -1) {
goto fail;
}
diff --git a/numpy/core/src/umath/override.h b/numpy/core/src/umath/override.h
index 4e9a323ca..20621bb19 100644
--- a/numpy/core/src/umath/override.h
+++ b/numpy/core/src/umath/override.h
@@ -6,7 +6,7 @@
NPY_NO_EXPORT int
PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
- PyObject *in_args, PyObject *out_args,
+ PyObject *in_args, PyObject *out_args, PyObject *wheremask_obj,
PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames,
PyObject **result);
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index a159003de..d4aced05f 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -4071,7 +4071,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
/* We now have all the information required to check for Overrides */
PyObject *override = NULL;
int errval = PyUFunc_CheckOverride(ufunc, _reduce_type[operation],
- full_args.in, full_args.out, args, len_args, kwnames, &override);
+ full_args.in, full_args.out, wheremask_obj, args, len_args, kwnames, &override);
if (errval) {
return NULL;
}
@@ -4843,7 +4843,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
/* We now have all the information required to check for Overrides */
PyObject *override = NULL;
errval = PyUFunc_CheckOverride(ufunc, method,
- full_args.in, full_args.out,
+ full_args.in, full_args.out, where_obj,
args, len_args, kwnames, &override);
if (errval) {
goto fail;
@@ -5935,6 +5935,9 @@ trivial_at_loop(PyArrayMethodObject *ufuncimpl, NPY_ARRAYMETHOD_FLAGS flags,
res = ufuncimpl->contiguous_indexed_loop(
context, args, inner_size, steps, NULL);
+ if (args[2] != NULL) {
+ args[2] += (*inner_size) * steps[2];
+ }
} while (res == 0 && iter->outer_next(iter->outer));
if (res == 0 && !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) {
@@ -6261,7 +6264,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
return NULL;
}
errval = PyUFunc_CheckOverride(ufunc, "at",
- args, NULL, NULL, 0, NULL, &override);
+ args, NULL, NULL, NULL, 0, NULL, &override);
if (errval) {
return NULL;
diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py
index 0ba736c05..aeddac585 100644
--- a/numpy/core/tests/test_array_coercion.py
+++ b/numpy/core/tests/test_array_coercion.py
@@ -1,6 +1,6 @@
"""
Tests for array coercion, mainly through testing `np.array` results directly.
-Note that other such tests exist e.g. in `test_api.py` and many corner-cases
+Note that other such tests exist, e.g., in `test_api.py` and many corner-cases
are tested (sometimes indirectly) elsewhere.
"""
@@ -20,8 +20,8 @@ from numpy.testing import (
def arraylikes():
"""
Generator for functions converting an array into various array-likes.
- If full is True (default) includes array-likes not capable of handling
- all dtypes
+ If full is True (default) it includes array-likes not capable of handling
+ all dtypes.
"""
# base array:
def ndarray(a):
@@ -40,8 +40,8 @@ def arraylikes():
class _SequenceLike():
# We are giving a warning that array-like's were also expected to be
- # sequence-like in `np.array([array_like])`, this can be removed
- # when the deprecation exired (started NumPy 1.20)
+ # sequence-like in `np.array([array_like])`. This can be removed
+ # when the deprecation expired (started NumPy 1.20).
def __len__(self):
raise TypeError
@@ -161,6 +161,8 @@ class TestStringDiscovery:
# A nested array is also discovered correctly
arr = np.array(obj, dtype="O")
assert np.array(arr, dtype="S").dtype == expected
+ # Also if we use the dtype class
+ assert np.array(arr, dtype=type(expected)).dtype == expected
# Check that .astype() behaves identical
assert arr.astype("S").dtype == expected
# The DType class is accepted by `.astype()`
@@ -259,7 +261,7 @@ class TestScalarDiscovery:
@pytest.mark.parametrize("scalar", scalar_instances())
def test_scalar_coercion(self, scalar):
# This tests various scalar coercion paths, mainly for the numerical
- # types. It includes some paths not directly related to `np.array`
+ # types. It includes some paths not directly related to `np.array`.
if isinstance(scalar, np.inexact):
# Ensure we have a full-precision number if available
scalar = type(scalar)((scalar * 2)**0.5)
@@ -294,7 +296,7 @@ class TestScalarDiscovery:
* `np.array(scalar, dtype=dtype)`
* `np.empty((), dtype=dtype)[()] = scalar`
* `np.array(scalar).astype(dtype)`
- should behave the same. The only exceptions are paramteric dtypes
+ should behave the same. The only exceptions are parametric dtypes
(mainly datetime/timedelta without unit) and void without fields.
"""
dtype = cast_to.dtype # use to parametrize only the target dtype
@@ -386,7 +388,7 @@ class TestScalarDiscovery:
"""
dtype = np.dtype(dtype)
- # This is a special case using casting logic. It warns for the NaN
+ # This is a special case using casting logic. It warns for the NaN
# but allows the cast (giving undefined behaviour).
with np.errstate(invalid="ignore"):
coerced = np.array(scalar, dtype=dtype)
diff --git a/numpy/core/tests/test_custom_dtypes.py b/numpy/core/tests/test_custom_dtypes.py
index 1a34c6fa3..da6a4bd50 100644
--- a/numpy/core/tests/test_custom_dtypes.py
+++ b/numpy/core/tests/test_custom_dtypes.py
@@ -229,6 +229,12 @@ class TestSFloat:
expected = arr.astype(SF(1.)) # above will have discovered 1. scaling
assert_array_equal(res.view(np.float64), expected.view(np.float64))
+ def test_creation_class(self):
+ arr1 = np.array([1., 2., 3.], dtype=SF)
+ assert arr1.dtype == SF(1.)
+ arr2 = np.array([1., 2., 3.], dtype=SF(1.))
+ assert_array_equal(arr1.view(np.float64), arr2.view(np.float64))
+
def test_type_pickle():
# can't actually unpickle, but we can pickle (if in namespace)
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index ac4bd42d3..4a064827d 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import collections.abc
import tempfile
import sys
@@ -1194,6 +1196,17 @@ class TestCreation:
expected = expected * (arr.nbytes // len(expected))
assert arr.tobytes() == expected
+ @pytest.mark.parametrize("func", [
+ np.array, np.asarray, np.asanyarray, np.ascontiguousarray,
+ np.asfortranarray])
+ def test_creation_from_dtypemeta(self, func):
+ dtype = np.dtype('i')
+ arr1 = func([1, 2, 3], dtype=dtype)
+ arr2 = func([1, 2, 3], dtype=type(dtype))
+ assert_array_equal(arr1, arr2)
+ assert arr2.dtype == dtype
+
+
class TestStructured:
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
@@ -3718,7 +3731,7 @@ class TestBinop:
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
- 'matmul': (np.matmul, False, float),
+ 'matmul': (np.matmul, True, float),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
@@ -6662,6 +6675,22 @@ class TestDot:
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
+ def test_dot_out_result(self):
+ x = np.ones((), dtype=np.float16)
+ y = np.ones((5,), dtype=np.float16)
+ z = np.zeros((5,), dtype=np.float16)
+ res = x.dot(y, out=z)
+ assert np.array_equal(res, y)
+ assert np.array_equal(z, y)
+
+ def test_dot_out_aliasing(self):
+ x = np.ones((), dtype=np.float16)
+ y = np.ones((5,), dtype=np.float16)
+ z = np.zeros((5,), dtype=np.float16)
+ res = x.dot(y, out=z)
+ z[0] = 2
+ assert np.array_equal(res, z)
+
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
@@ -7169,16 +7198,69 @@ class TestMatmulOperator(MatmulCommon):
assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc'))
-def test_matmul_inplace():
- # It would be nice to support in-place matmul eventually, but for now
- # we don't have a working implementation, so better just to error out
- # and nudge people to writing "a = a @ b".
- a = np.eye(3)
- b = np.eye(3)
- assert_raises(TypeError, a.__imatmul__, b)
- import operator
- assert_raises(TypeError, operator.imatmul, a, b)
- assert_raises(TypeError, exec, "a @= b", globals(), locals())
+
+class TestMatmulInplace:
+ DTYPES = {}
+ for i in MatmulCommon.types:
+ for j in MatmulCommon.types:
+ if np.can_cast(j, i):
+ DTYPES[f"{i}-{j}"] = (np.dtype(i), np.dtype(j))
+
+ @pytest.mark.parametrize("dtype1,dtype2", DTYPES.values(), ids=DTYPES)
+ def test_basic(self, dtype1: np.dtype, dtype2: np.dtype) -> None:
+ a = np.arange(10).reshape(5, 2).astype(dtype1)
+ a_id = id(a)
+ b = np.ones((2, 2), dtype=dtype2)
+
+ ref = a @ b
+ a @= b
+
+ assert id(a) == a_id
+ assert a.dtype == dtype1
+ assert a.shape == (5, 2)
+ if dtype1.kind in "fc":
+ np.testing.assert_allclose(a, ref)
+ else:
+ np.testing.assert_array_equal(a, ref)
+
+ SHAPES = {
+ "2d_large": ((10**5, 10), (10, 10)),
+ "3d_large": ((10**4, 10, 10), (1, 10, 10)),
+ "1d": ((3,), (3,)),
+ "2d_1d": ((3, 3), (3,)),
+ "1d_2d": ((3,), (3, 3)),
+ "2d_broadcast": ((3, 3), (3, 1)),
+ "2d_broadcast_reverse": ((1, 3), (3, 3)),
+ "3d_broadcast1": ((3, 3, 3), (1, 3, 1)),
+ "3d_broadcast2": ((3, 3, 3), (1, 3, 3)),
+ "3d_broadcast3": ((3, 3, 3), (3, 3, 1)),
+ "3d_broadcast_reverse1": ((1, 3, 3), (3, 3, 3)),
+ "3d_broadcast_reverse2": ((3, 1, 3), (3, 3, 3)),
+ "3d_broadcast_reverse3": ((1, 1, 3), (3, 3, 3)),
+ }
+
+ @pytest.mark.parametrize("a_shape,b_shape", SHAPES.values(), ids=SHAPES)
+ def test_shapes(self, a_shape: tuple[int, ...], b_shape: tuple[int, ...]):
+ a_size = np.prod(a_shape)
+ a = np.arange(a_size).reshape(a_shape).astype(np.float64)
+ a_id = id(a)
+
+ b_size = np.prod(b_shape)
+ b = np.arange(b_size).reshape(b_shape)
+
+ ref = a @ b
+ if ref.shape != a_shape:
+ with pytest.raises(ValueError):
+ a @= b
+ return
+ else:
+ a @= b
+
+ assert id(a) == a_id
+ assert a.dtype.type == np.float64
+ assert a.shape == a_shape
+ np.testing.assert_allclose(a, ref)
+
def test_matmul_axes():
a = np.arange(3*4*5).reshape(3, 4, 5)
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
index ae4cddb0e..25f551f6f 100644
--- a/numpy/core/tests/test_overrides.py
+++ b/numpy/core/tests/test_overrides.py
@@ -241,6 +241,19 @@ class TestArrayFunctionDispatch:
with assert_raises_regex(TypeError, 'no implementation found'):
dispatched_one_arg(array)
+ def test_where_dispatch(self):
+
+ class DuckArray:
+ def __array_function__(self, ufunc, method, *inputs, **kwargs):
+ return "overridden"
+
+ array = np.array(1)
+ duck_array = DuckArray()
+
+ result = np.std(array, where=duck_array)
+
+ assert_equal(result, "overridden")
+
class TestVerifyMatchingSignatures:
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 498a654c8..04add9fa7 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -2054,6 +2054,17 @@ class TestUfunc:
# If it is [-1, -1, -1, -100, 0] then the regular strided loop was used
assert np.all(arr == [-1, -1, -1, -200, -1])
+ def test_ufunc_at_large(self):
+ # issue gh-23457
+ indices = np.zeros(8195, dtype=np.int16)
+ b = np.zeros(8195, dtype=float)
+ b[0] = 10
+ b[1] = 5
+ b[8192:] = 100
+ a = np.zeros(1, dtype=float)
+ np.add.at(a, indices, b)
+ assert a[0] == b.sum()
+
def test_cast_index_fastpath(self):
arr = np.zeros(10)
values = np.ones(100000)
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 13f7375c2..0ed64d72a 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -3497,6 +3497,79 @@ class TestSpecialMethods:
assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three'))
assert_raises(ValueError, np.modf, a, out=('one',))
+ def test_ufunc_override_where(self):
+
+ class OverriddenArrayOld(np.ndarray):
+
+ def _unwrap(self, objs):
+ cls = type(self)
+ result = []
+ for obj in objs:
+ if isinstance(obj, cls):
+ obj = np.array(obj)
+ elif type(obj) != np.ndarray:
+ return NotImplemented
+ result.append(obj)
+ return result
+
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+
+ inputs = self._unwrap(inputs)
+ if inputs is NotImplemented:
+ return NotImplemented
+
+ kwargs = kwargs.copy()
+ if "out" in kwargs:
+ kwargs["out"] = self._unwrap(kwargs["out"])
+ if kwargs["out"] is NotImplemented:
+ return NotImplemented
+
+ r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
+ if r is not NotImplemented:
+ r = r.view(type(self))
+
+ return r
+
+ class OverriddenArrayNew(OverriddenArrayOld):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+
+ kwargs = kwargs.copy()
+ if "where" in kwargs:
+ kwargs["where"] = self._unwrap((kwargs["where"], ))
+ if kwargs["where"] is NotImplemented:
+ return NotImplemented
+ else:
+ kwargs["where"] = kwargs["where"][0]
+
+ r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
+ if r is not NotImplemented:
+ r = r.view(type(self))
+
+ return r
+
+ ufunc = np.negative
+
+ array = np.array([1, 2, 3])
+ where = np.array([True, False, True])
+ expected = ufunc(array, where=where)
+
+ with pytest.raises(TypeError):
+ ufunc(array, where=where.view(OverriddenArrayOld))
+
+ result_1 = ufunc(
+ array,
+ where=where.view(OverriddenArrayNew)
+ )
+ assert isinstance(result_1, OverriddenArrayNew)
+ assert np.all(np.array(result_1) == expected, where=where)
+
+ result_2 = ufunc(
+ array.view(OverriddenArrayNew),
+ where=where.view(OverriddenArrayNew)
+ )
+ assert isinstance(result_2, OverriddenArrayNew)
+ assert np.all(np.array(result_2) == expected, where=where)
+
def test_ufunc_override_exception(self):
class A:
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index 4904dd3dd..6ba4cd816 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -16,15 +16,6 @@ import re
import subprocess
import textwrap
-# These flags are used to compile any C++ source within Numpy.
-# They are chosen to have very few runtime dependencies.
-NPY_CXX_FLAGS = [
- '-std=c++11', # Minimal standard version
- '-D__STDC_VERSION__=0', # for compatibility with C headers
- '-fno-exceptions', # no exception support
- '-fno-rtti'] # no runtime type information
-
-
class _Config:
"""An abstract class holds all configurable attributes of `CCompilerOpt`,
these class attributes can be used to change the default behavior
@@ -1000,7 +991,7 @@ class _CCompiler:
)
detect_args = (
("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""),
- ("cc_has_native",
+ ("cc_has_native",
".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""),
# in case if the class run with -DNPY_DISABLE_OPTIMIZATION
("cc_noopt", ".*DISABLE_OPT.*", ""),
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index 45201f98f..11999dae2 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -307,6 +307,7 @@ class build_clib(old_build_clib):
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
+ extra_cflags += extra_cxxflags
# filtering C dispatch-table sources when optimization is not disabled,
# otherwise treated as normal sources.
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index 6dc6b4265..d24162a42 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -407,6 +407,7 @@ class build_ext (old_build_ext):
if cxx_sources:
# Needed to compile kiva.agg._agg extension.
extra_args.append('/Zm1000')
+ extra_cflags += extra_cxxflags
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py
index efe3a4cb5..68f516b92 100644
--- a/numpy/distutils/fcompiler/absoft.py
+++ b/numpy/distutils/fcompiler/absoft.py
@@ -1,6 +1,6 @@
-# http://www.absoft.com/literature/osxuserguide.pdf
-# http://www.absoft.com/documentation.html
+# Absoft Corporation ceased operations on 12/31/2022.
+# Thus, all links to <http://www.absoft.com> are invalid.
# Notes:
# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
diff --git a/numpy/dual.py b/numpy/dual.py
deleted file mode 100644
index eb7e61aac..000000000
--- a/numpy/dual.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""
-.. deprecated:: 1.20
-
-*This module is deprecated. Instead of importing functions from*
-``numpy.dual``, *the functions should be imported directly from NumPy
-or SciPy*.
-
-Aliases for functions which may be accelerated by SciPy.
-
-SciPy_ can be built to use accelerated or otherwise improved libraries
-for FFTs, linear algebra, and special functions. This module allows
-developers to transparently support these accelerated functions when
-SciPy is available but still support users who have only installed
-NumPy.
-
-.. _SciPy : https://www.scipy.org
-
-"""
-import warnings
-
-
-warnings.warn('The module numpy.dual is deprecated. Instead of using dual, '
- 'use the functions directly from numpy or scipy.',
- category=DeprecationWarning,
- stacklevel=2)
-
-# This module should be used for functions both in numpy and scipy if
-# you want to use the numpy version if available but the scipy version
-# otherwise.
-# Usage --- from numpy.dual import fft, inv
-
-__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2',
- 'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals',
- 'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0']
-
-import numpy.linalg as linpkg
-import numpy.fft as fftpkg
-from numpy.lib import i0
-import sys
-
-
-fft = fftpkg.fft
-ifft = fftpkg.ifft
-fftn = fftpkg.fftn
-ifftn = fftpkg.ifftn
-fft2 = fftpkg.fft2
-ifft2 = fftpkg.ifft2
-
-norm = linpkg.norm
-inv = linpkg.inv
-svd = linpkg.svd
-solve = linpkg.solve
-det = linpkg.det
-eig = linpkg.eig
-eigvals = linpkg.eigvals
-eigh = linpkg.eigh
-eigvalsh = linpkg.eigvalsh
-lstsq = linpkg.lstsq
-pinv = linpkg.pinv
-cholesky = linpkg.cholesky
-
-_restore_dict = {}
-
-def register_func(name, func):
- if name not in __all__:
- raise ValueError("{} not a dual function.".format(name))
- f = sys._getframe(0).f_globals
- _restore_dict[name] = f[name]
- f[name] = func
-
-def restore_func(name):
- if name not in __all__:
- raise ValueError("{} not a dual function.".format(name))
- try:
- val = _restore_dict[name]
- except KeyError:
- return
- else:
- sys._getframe(0).f_globals[name] = val
-
-def restore_all():
- for name in _restore_dict.keys():
- restore_func(name)
diff --git a/numpy/exceptions.py b/numpy/exceptions.py
index 73ad5ea6d..2f8438101 100644
--- a/numpy/exceptions.py
+++ b/numpy/exceptions.py
@@ -60,7 +60,7 @@ class ModuleDeprecationWarning(DeprecationWarning):
.. warning::
- This warning should not be used, since nose testing is not relvant
+ This warning should not be used, since nose testing is not relevant
anymore.
The nose tester turns ordinary Deprecation warnings into test failures.
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index b831697d8..36a913047 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -935,7 +935,7 @@ typedefpattern = re.compile(
r'(?:,(?P<attributes>[\w(),]+))?(::)?(?P<name>\b[a-z$_][\w$]*\b)'
r'(?:\((?P<params>[\w,]*)\))?\Z', re.I)
nameargspattern = re.compile(
- r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I)
+ r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>(?:(?!@\)@).)*)\s*@\)@))*\s*\Z', re.I)
operatorpattern = re.compile(
r'\s*(?P<scheme>(operator|assignment))'
r'@\(@\s*(?P<name>[^)]+)\s*@\)@\s*\Z', re.I)
diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py
index 73ac4e276..23965087d 100644
--- a/numpy/f2py/tests/test_crackfortran.py
+++ b/numpy/f2py/tests/test_crackfortran.py
@@ -1,9 +1,10 @@
import importlib
import codecs
+import time
import unicodedata
import pytest
import numpy as np
-from numpy.f2py.crackfortran import markinnerspaces
+from numpy.f2py.crackfortran import markinnerspaces, nameargspattern
from . import util
from numpy.f2py import crackfortran
import textwrap
@@ -276,3 +277,49 @@ class TestUnicodeComment(util.F2PyTest):
)
def test_encoding_comment(self):
self.module.foo(3)
+
+class TestNameArgsPatternBacktracking:
+ @pytest.mark.parametrize(
+ ['adversary'],
+ [
+ ('@)@bind@(@',),
+ ('@)@bind @(@',),
+ ('@)@bind foo bar baz@(@',)
+ ]
+ )
+ def test_nameargspattern_backtracking(self, adversary):
+ '''address ReDOS vulnerability:
+ https://github.com/numpy/numpy/issues/23338'''
+ last_median = 0.
+ trials_per_count = 128
+ start_reps, end_reps = 15, 25
+ times_median_doubled = 0
+ for ii in range(start_reps, end_reps):
+ repeated_adversary = adversary * ii
+ times = []
+ for _ in range(trials_per_count):
+ t0 = time.perf_counter()
+ mtch = nameargspattern.search(repeated_adversary)
+ times.append(time.perf_counter() - t0)
+ # We should use a measure of time that's resilient to outliers.
+ # Times jump around a lot due to the CPU's scheduler.
+ median = np.median(times)
+ assert not mtch
+ # if the adversary is capped with @)@, it becomes acceptable
+ # according to the old version of the regex.
+ # that should still be true.
+ good_version_of_adversary = repeated_adversary + '@)@'
+ assert nameargspattern.search(good_version_of_adversary)
+ if ii > start_reps:
+ # the hallmark of exponentially catastrophic backtracking
+ # is that runtime doubles for every added instance of
+ # the problematic pattern.
+ times_median_doubled += median > 2 * last_median
+ # also try to rule out non-exponential but still bad cases
+ # arbitrarily, we should set a hard limit of 10ms as too slow
+ assert median < trials_per_count * 0.01
+ last_median = median
+ # we accept that maybe the median might double once, due to
+ # the CPU scheduler acting weird or whatever. More than that
+ # seems suspicious.
+ assert times_median_doubled < 2 \ No newline at end of file
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 8f3fd694d..ef50fb19d 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -437,15 +437,15 @@ def _write_array_header(fp, d, version=None):
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
-
+
# Add some spare space so that the array header can be modified in-place
# when changing the array size, e.g. when growing it by appending data at
- # the end.
+ # the end.
shape = d['shape']
header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(
shape[-1 if d['fortran_order'] else 0]
))) if len(shape) > 0 else 0)
-
+
if version is None:
header = _wrap_header_guess_version(header)
else:
@@ -505,7 +505,7 @@ def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
Raises
------
@@ -532,7 +532,7 @@ def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
Returns
-------
@@ -764,7 +764,7 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *,
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
@@ -883,7 +883,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
Returns
-------
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index f0f374f97..277ae3dc4 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -2117,10 +2117,10 @@ def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes,
@set_module('numpy')
class vectorize:
"""
- vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,
- signature=None)
+ vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None,
+ cache=False, signature=None)
- Generalized function class.
+ Returns an object that acts like pyfunc, but takes arrays as input.
Define a vectorized function which takes a nested sequence of objects or
numpy arrays as inputs and returns a single numpy array or a tuple of numpy
@@ -2134,8 +2134,9 @@ class vectorize:
Parameters
----------
- pyfunc : callable
+ pyfunc : callable, optional
A python function or method.
+ Can be omitted to produce a decorator with keyword arguments.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
@@ -2167,8 +2168,9 @@ class vectorize:
Returns
-------
- vectorized : callable
- Vectorized function.
+ out : callable
+ A vectorized function if ``pyfunc`` was provided,
+ a decorator otherwise.
See Also
--------
@@ -2265,18 +2267,44 @@ class vectorize:
[0., 0., 1., 2., 1., 0.],
[0., 0., 0., 1., 2., 1.]])
+ Decorator syntax is supported. The decorator can be called as
+ a function to provide keyword arguments.
+ >>>@np.vectorize
+ ...def identity(x):
+ ... return x
+ ...
+ >>>identity([0, 1, 2])
+ array([0, 1, 2])
+ >>>@np.vectorize(otypes=[float])
+ ...def as_float(x):
+ ... return x
+ ...
+ >>>as_float([0, 1, 2])
+ array([0., 1., 2.])
"""
- def __init__(self, pyfunc, otypes=None, doc=None, excluded=None,
- cache=False, signature=None):
+ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None,
+ excluded=None, cache=False, signature=None):
+
+ if (pyfunc != np._NoValue) and (not callable(pyfunc)):
+ #Splitting the error message to keep
+ #the length below 79 characters.
+ part1 = "When used as a decorator, "
+ part2 = "only accepts keyword arguments."
+ raise TypeError(part1 + part2)
+
self.pyfunc = pyfunc
self.cache = cache
self.signature = signature
- self._ufunc = {} # Caching to improve default performance
+ if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'):
+ self.__name__ = pyfunc.__name__
- if doc is None:
+ self._ufunc = {} # Caching to improve default performance
+ self._doc = None
+ self.__doc__ = doc
+ if doc is None and hasattr(pyfunc, '__doc__'):
self.__doc__ = pyfunc.__doc__
else:
- self.__doc__ = doc
+ self._doc = doc
if isinstance(otypes, str):
for char in otypes:
@@ -2298,7 +2326,15 @@ class vectorize:
else:
self._in_and_out_core_dims = None
- def __call__(self, *args, **kwargs):
+ def _init_stage_2(self, pyfunc, *args, **kwargs):
+ self.__name__ = pyfunc.__name__
+ self.pyfunc = pyfunc
+ if self._doc is None:
+ self.__doc__ = pyfunc.__doc__
+ else:
+ self.__doc__ = self._doc
+
+ def _call_as_normal(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
@@ -2328,6 +2364,13 @@ class vectorize:
return self._vectorize_call(func=func, args=vargs)
+ def __call__(self, *args, **kwargs):
+ if self.pyfunc is np._NoValue:
+ self._init_stage_2(*args, **kwargs)
+ return self
+
+ return self._call_as_normal(*args, **kwargs)
+
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index b1f85f709..bfda6804a 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -142,7 +142,7 @@ class NpzFile(Mapping):
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
@@ -309,7 +309,7 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
@@ -1159,10 +1159,10 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
while such lines are counted in `skiprows`.
.. versionadded:: 1.16.0
-
+
.. versionchanged:: 1.23.0
- Lines containing no data, including comment lines (e.g., lines
- starting with '#' or as specified via `comments`) are not counted
+ Lines containing no data, including comment lines (e.g., lines
+ starting with '#' or as specified via `comments`) are not counted
towards `max_rows`.
quotechar : unicode character or None, optional
The character used to denote the start and end of a quoted item.
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 3ec46735c..09d1195ad 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -8,7 +8,7 @@ import pytest
import hypothesis
from hypothesis.extra.numpy import arrays
import hypothesis.strategies as st
-
+from functools import partial
import numpy as np
from numpy import ma
@@ -1787,6 +1787,70 @@ class TestVectorize:
assert_equal(type(r), subclass)
assert_equal(r, m * v)
+ def test_name(self):
+ #See gh-23021
+ @np.vectorize
+ def f2(a, b):
+ return a + b
+
+ assert f2.__name__ == 'f2'
+
+ def test_decorator(self):
+ @vectorize
+ def addsubtract(a, b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+
+ r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7])
+ assert_array_equal(r, [1, 6, 1, 2])
+
+ def test_docstring(self):
+ @vectorize
+ def f(x):
+ """Docstring"""
+ return x
+
+ if sys.flags.optimize < 2:
+ assert f.__doc__ == "Docstring"
+
+ def test_partial(self):
+ def foo(x, y):
+ return x + y
+
+ bar = partial(foo, 3)
+ vbar = np.vectorize(bar)
+ assert vbar(1) == 4
+
+ def test_signature_otypes_decorator(self):
+ @vectorize(signature='(n)->(n)', otypes=['float64'])
+ def f(x):
+ return x
+
+ r = f([1, 2, 3])
+ assert_equal(r.dtype, np.dtype('float64'))
+ assert_array_equal(r, [1, 2, 3])
+ assert f.__name__ == 'f'
+
+ def test_bad_input(self):
+ with assert_raises(TypeError):
+ A = np.vectorize(pyfunc = 3)
+
+ def test_no_keywords(self):
+ with assert_raises(TypeError):
+ @np.vectorize("string")
+ def foo():
+ return "bar"
+
+ def test_positional_regression_9477(self):
+ # This supplies the first keyword argument as a positional,
+ # to ensure that they are still properly forwarded after the
+ # enhancement for #9477
+ f = vectorize((lambda x: x), ['float64'])
+ r = f([2])
+ assert_equal(r.dtype, np.dtype('float64'))
+
class TestLeaks:
class A:
diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py
index 1c4e1295e..6f72635ab 100644
--- a/numpy/linalg/setup.py
+++ b/numpy/linalg/setup.py
@@ -4,7 +4,6 @@ import sysconfig
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
- from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
from numpy.distutils.system_info import get_info, system_info
config = Configuration('linalg', parent_package, top_path)
@@ -81,7 +80,6 @@ def configuration(parent_package='', top_path=None):
sources=['umath_linalg.cpp', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
- extra_cxx_compile_args=NPY_CXX_FLAGS,
libraries=['npymath'],
)
config.add_data_files('*.pyi')
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index fcc321a73..7f57985a9 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -7033,6 +7033,29 @@ def compressed(x):
--------
ma.MaskedArray.compressed : Equivalent method.
+ Examples
+ --------
+
+ Create an array with negative values masked:
+
+ >>> import numpy as np
+ >>> x = np.array([[1, -1, 0], [2, -1, 3], [7, 4, -1]])
+ >>> masked_x = np.ma.masked_array(x, mask=x < 0)
+ >>> masked_x
+ masked_array(
+ data=[[1, --, 0],
+ [2, --, 3],
+ [7, 4, --]],
+ mask=[[False, True, False],
+ [False, True, False],
+ [False, False, True]],
+ fill_value=999999)
+
+ Compress the masked array into a 1-D array of non-masked values:
+
+ >>> np.ma.compressed(masked_x)
+ array([1, 0, 2, 3, 7, 4])
+
"""
return asanyarray(x).compressed()
@@ -7110,7 +7133,7 @@ def diag(v, k=0):
Examples
--------
-
+
Create an array with negative values masked:
>>> import numpy as np
@@ -7521,7 +7544,7 @@ def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
if len(combined) > 1:
a = np.ma.concatenate(combined, axis)
- # GH 22465 np.diff without prepend/append preserves the mask
+ # GH 22465 np.diff without prepend/append preserves the mask
return np.diff(a, n, axis)
@@ -7752,94 +7775,18 @@ def round_(a, decimals=0, out=None):
round = round_
-# Needed by dot, so move here from extras.py. It will still be exported
-# from extras.py for compatibility.
-def mask_rowcols(a, axis=None):
+def _mask_propagate(a, axis):
"""
- Mask rows and/or columns of a 2D array that contain masked values.
-
- Mask whole rows and/or columns of a 2D array that contain
- masked values. The masking behavior is selected using the
- `axis` parameter.
-
- - If `axis` is None, rows *and* columns are masked.
- - If `axis` is 0, only rows are masked.
- - If `axis` is 1 or -1, only columns are masked.
-
- Parameters
- ----------
- a : array_like, MaskedArray
- The array to mask. If not a MaskedArray instance (or if no array
- elements are masked). The result is a MaskedArray with `mask` set
- to `nomask` (False). Must be a 2D array.
- axis : int, optional
- Axis along which to perform the operation. If None, applies to a
- flattened version of the array.
-
- Returns
- -------
- a : MaskedArray
- A modified version of the input array, masked depending on the value
- of the `axis` parameter.
-
- Raises
- ------
- NotImplementedError
- If input array `a` is not 2D.
-
- See Also
- --------
- mask_rows : Mask rows of a 2D array that contain masked values.
- mask_cols : Mask cols of a 2D array that contain masked values.
- masked_where : Mask where a condition is met.
-
- Notes
- -----
- The input array's mask is modified by this function.
-
- Examples
- --------
- >>> import numpy.ma as ma
- >>> a = np.zeros((3, 3), dtype=int)
- >>> a[1, 1] = 1
- >>> a
- array([[0, 0, 0],
- [0, 1, 0],
- [0, 0, 0]])
- >>> a = ma.masked_equal(a, 1)
- >>> a
- masked_array(
- data=[[0, 0, 0],
- [0, --, 0],
- [0, 0, 0]],
- mask=[[False, False, False],
- [False, True, False],
- [False, False, False]],
- fill_value=1)
- >>> ma.mask_rowcols(a)
- masked_array(
- data=[[0, --, 0],
- [--, --, --],
- [0, --, 0]],
- mask=[[False, True, False],
- [ True, True, True],
- [False, True, False]],
- fill_value=1)
-
+ Mask whole 1-d vectors of an array that contain masked values.
"""
a = array(a, subok=False)
- if a.ndim != 2:
- raise NotImplementedError("mask_rowcols works for 2D arrays only.")
m = getmask(a)
- # Nothing is masked: return a
- if m is nomask or not m.any():
+ if m is nomask or not m.any() or axis is None:
return a
- maskedval = m.nonzero()
a._mask = a._mask.copy()
- if not axis:
- a[np.unique(maskedval[0])] = masked
- if axis in [None, 1, -1]:
- a[:, np.unique(maskedval[1])] = masked
+ axes = normalize_axis_tuple(axis, a.ndim)
+ for ax in axes:
+ a._mask |= m.any(axis=ax, keepdims=True)
return a
@@ -7856,10 +7803,6 @@ def dot(a, b, strict=False, out=None):
corresponding method, it is recommended that the optional arguments be
treated as keyword only. At some point that may be mandatory.
- .. note::
- Works only with 2-D arrays at the moment.
-
-
Parameters
----------
a, b : masked_array_like
@@ -7903,18 +7846,22 @@ def dot(a, b, strict=False, out=None):
fill_value=999999)
"""
- # !!!: Works only with 2D arrays. There should be a way to get it to run
- # with higher dimension
- if strict and (a.ndim == 2) and (b.ndim == 2):
- a = mask_rowcols(a, 0)
- b = mask_rowcols(b, 1)
+ if strict is True:
+ if np.ndim(a) == 0 or np.ndim(b) == 0:
+ pass
+ elif b.ndim == 1:
+ a = _mask_propagate(a, a.ndim - 1)
+ b = _mask_propagate(b, b.ndim - 1)
+ else:
+ a = _mask_propagate(a, a.ndim - 1)
+ b = _mask_propagate(b, b.ndim - 2)
am = ~getmaskarray(a)
bm = ~getmaskarray(b)
if out is None:
d = np.dot(filled(a, 0), filled(b, 0))
m = ~np.dot(am, bm)
- if d.ndim == 0:
+ if np.ndim(d) == 0:
d = np.asarray(d)
r = d.view(get_masked_subclass(a, b))
r.__setmask__(m)
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 4abe2107a..8a6246c36 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -27,8 +27,7 @@ from . import core as ma
from .core import (
MaskedArray, MAError, add, array, asarray, concatenate, filled, count,
getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
- nomask, ones, sort, zeros, getdata, get_masked_subclass, dot,
- mask_rowcols
+ nomask, ones, sort, zeros, getdata, get_masked_subclass, dot
)
import numpy as np
@@ -955,6 +954,95 @@ def compress_cols(a):
return compress_rowcols(a, 1)
+def mask_rowcols(a, axis=None):
+ """
+ Mask rows and/or columns of a 2D array that contain masked values.
+
+ Mask whole rows and/or columns of a 2D array that contain
+ masked values. The masking behavior is selected using the
+ `axis` parameter.
+
+ - If `axis` is None, rows *and* columns are masked.
+ - If `axis` is 0, only rows are masked.
+ - If `axis` is 1 or -1, only columns are masked.
+
+ Parameters
+ ----------
+ a : array_like, MaskedArray
+ The array to mask. If not a MaskedArray instance (or if no array
+ elements are masked), the result is a MaskedArray with `mask` set
+ to `nomask` (False). Must be a 2D array.
+ axis : int, optional
+ Axis along which to perform the operation. If None, applies to a
+ flattened version of the array.
+
+ Returns
+ -------
+ a : MaskedArray
+ A modified version of the input array, masked depending on the value
+ of the `axis` parameter.
+
+ Raises
+ ------
+ NotImplementedError
+ If input array `a` is not 2D.
+
+ See Also
+ --------
+ mask_rows : Mask rows of a 2D array that contain masked values.
+ mask_cols : Mask cols of a 2D array that contain masked values.
+ masked_where : Mask where a condition is met.
+
+ Notes
+ -----
+ The input array's mask is modified by this function.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.zeros((3, 3), dtype=int)
+ >>> a[1, 1] = 1
+ >>> a
+ array([[0, 0, 0],
+ [0, 1, 0],
+ [0, 0, 0]])
+ >>> a = ma.masked_equal(a, 1)
+ >>> a
+ masked_array(
+ data=[[0, 0, 0],
+ [0, --, 0],
+ [0, 0, 0]],
+ mask=[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ fill_value=1)
+ >>> ma.mask_rowcols(a)
+ masked_array(
+ data=[[0, --, 0],
+ [--, --, --],
+ [0, --, 0]],
+ mask=[[False, True, False],
+ [ True, True, True],
+ [False, True, False]],
+ fill_value=1)
+
+ """
+ a = array(a, subok=False)
+ if a.ndim != 2:
+ raise NotImplementedError("mask_rowcols works for 2D arrays only.")
+ m = getmask(a)
+ # Nothing is masked: return a
+ if m is nomask or not m.any():
+ return a
+ maskedval = m.nonzero()
+ a._mask = a._mask.copy()
+ if not axis:
+ a[np.unique(maskedval[0])] = masked
+ if axis in [None, 1, -1]:
+ a[:, np.unique(maskedval[1])] = masked
+ return a
+
+
def mask_rows(a, axis=np._NoValue):
"""
Mask rows of a 2D array that contain masked values.
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index e59ba3656..d09a50fec 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -730,6 +730,47 @@ class TestCompressFunctions:
assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]])
c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+ #
+ a = masked_array(np.arange(8).reshape(2, 2, 2),
+ mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+ b = masked_array(np.arange(8).reshape(2, 2, 2),
+ mask=[[[0, 0], [0, 0]], [[0, 0], [0, 1]]])
+ c = dot(a, b, strict=True)
+ assert_equal(c.mask,
+ [[[[1, 1], [1, 1]], [[0, 0], [0, 1]]],
+ [[[0, 0], [0, 1]], [[0, 0], [0, 1]]]])
+ c = dot(a, b, strict=False)
+ assert_equal(c.mask,
+ [[[[0, 0], [0, 1]], [[0, 0], [0, 0]]],
+ [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]])
+ c = dot(b, a, strict=True)
+ assert_equal(c.mask,
+ [[[[1, 0], [0, 0]], [[1, 0], [0, 0]]],
+ [[[1, 0], [0, 0]], [[1, 1], [1, 1]]]])
+ c = dot(b, a, strict=False)
+ assert_equal(c.mask,
+ [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]],
+ [[[0, 0], [0, 0]], [[1, 0], [0, 0]]]])
+ #
+ a = masked_array(np.arange(8).reshape(2, 2, 2),
+ mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+ b = 5.
+ c = dot(a, b, strict=True)
+ assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+ c = dot(a, b, strict=False)
+ assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+ c = dot(b, a, strict=True)
+ assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+ c = dot(b, a, strict=False)
+ assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+ #
+ a = masked_array(np.arange(8).reshape(2, 2, 2),
+ mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+ b = masked_array(np.arange(2), mask=[0, 1])
+ c = dot(a, b, strict=True)
+ assert_equal(c.mask, [[1, 1], [1, 1]])
+ c = dot(a, b, strict=False)
+ assert_equal(c.mask, [[1, 0], [0, 0]])
def test_dot_returns_maskedarray(self):
# See gh-6611
diff --git a/numpy/meson.build b/numpy/meson.build
index b366b7b05..cc5a79bb2 100644
--- a/numpy/meson.build
+++ b/numpy/meson.build
@@ -103,7 +103,6 @@ python_sources = [
'conftest.py',
'ctypeslib.py',
'ctypeslib.pyi',
- 'dual.py',
'exceptions.py',
'exceptions.pyi',
'matlib.py',
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index 108ba0b74..e0681426e 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -248,7 +248,6 @@ PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
"distutils.numpy_distribution",
"distutils.pathccompiler",
"distutils.unixccompiler",
- "dual",
"f2py.auxfuncs",
"f2py.capi_maps",
"f2py.cb_rules",
diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py
index 19a1af9e2..6beacc5d7 100644
--- a/numpy/typing/tests/data/pass/ndarray_misc.py
+++ b/numpy/typing/tests/data/pass/ndarray_misc.py
@@ -150,7 +150,7 @@ A.argpartition([0])
A.diagonal()
A.dot(1)
-A.dot(1, out=B0)
+A.dot(1, out=B2)
A.nonzero()
diff --git a/setup.py b/setup.py
index 671df90fd..edd8c4d6d 100755
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,9 @@ import textwrap
import warnings
import builtins
import re
+import tempfile
+from distutils.errors import CompileError
# Python supported version checks. Keep right after stdlib imports to ensure we
# get a sensible error for older Python versions
@@ -184,45 +186,135 @@ class sdist_checked(cmdclass['sdist']):
def get_build_overrides():
"""
- Custom build commands to add `-std=c99` to compilation
+ Custom build commands to add std flags if required to compilation
"""
from numpy.distutils.command.build_clib import build_clib
from numpy.distutils.command.build_ext import build_ext
from numpy._utils import _pep440
- def _needs_gcc_c99_flag(obj):
- if obj.compiler.compiler_type != 'unix':
- return False
+ def try_compile(compiler, file, flags = [], verbose=False):
+ # To bypass trapping warnings by Travis CI
+ if getattr(compiler, 'compiler_type', '') == 'unix':
+ flags = ['-Werror'] + flags
+ bk_ver = getattr(compiler, 'verbose', False)
+ compiler.verbose = verbose
+ try:
+ compiler.compile([file], extra_postargs=flags)
+ return True, ''
+ except CompileError as e:
+ return False, str(e)
+ finally:
+ compiler.verbose = bk_ver
+
+ def flags_is_required(compiler, is_cpp, flags, code):
+ if is_cpp:
+ compiler = compiler.cxx_compiler()
+ suf = '.cpp'
+ else:
+ suf = '.c'
+ with tempfile.TemporaryDirectory() as temp_dir:
+ tmp_file = os.path.join(temp_dir, "test" + suf)
+ with open(tmp_file, "w+") as f:
+ f.write(code)
+ # without specify any flags in case of the required
+ # standard already supported by default, then there's
+ # no need for passing the flags
+ comp = try_compile(compiler, tmp_file)
+ if not comp[0]:
+ comp = try_compile(compiler, tmp_file, flags)
+ if not comp[0]:
+ # rerun to verbose the error
+ try_compile(compiler, tmp_file, flags, True)
+ if is_cpp:
+ raise RuntimeError(
+ "Broken toolchain during testing C++ compiler. \n"
+ "A compiler with support for C++17 language "
+ "features is required.\n"
+ f"Triggered the following error: {comp[1]}."
+ )
+ else:
+ raise RuntimeError(
+ "Broken toolchain during testing C compiler. \n"
+ "A compiler with support for C99 language "
+ "features is required.\n"
+ f"Triggered the following error: {comp[1]}."
+ )
+ return True
+ return False
- cc = obj.compiler.compiler[0]
- if "gcc" not in cc:
- return False
-
- # will print something like '4.2.1\n'
- out = subprocess.run([cc, '-dumpversion'],
- capture_output=True, text=True)
- # -std=c99 is default from this version on
- if _pep440.parse(out.stdout) >= _pep440.Version('5.0'):
- return False
- return True
+ def std_cxx_flags(cmd):
+ compiler = cmd.compiler
+ flags = getattr(compiler, '__np_cache_cpp_flags', None)
+ if flags is not None:
+ return flags
+ flags = dict(
+ msvc = ['/std:c++17']
+ ).get(compiler.compiler_type, ['-std=c++17'])
+ # These flags are used to compile any C++ source within Numpy.
+ # They are chosen to have very few runtime dependencies.
+ extra_flags = dict(
+ # to update #def __cplusplus with enabled C++ version
+ msvc = ['/Zc:__cplusplus']
+ ).get(compiler.compiler_type, [
+ # The following flag is used to avoid emit any extra code
+ # from STL since extensions are build by C linker and
+ # without C++ runtime dependencies.
+ '-fno-threadsafe-statics',
+ '-D__STDC_VERSION__=0', # for compatibility with C headers
+ '-fno-exceptions', # no exception support
+ '-fno-rtti' # no runtime type information
+ ])
+ if not flags_is_required(compiler, True, flags, textwrap.dedent('''
+ #include <type_traits>
+ template<typename ...T>
+ constexpr bool test_fold = (... && std::is_const_v<T>);
+ int main()
+ {
+ if constexpr (test_fold<int, const int>) {
+ return 0;
+ }
+ else {
+ return -1;
+ }
+ }
+ ''')):
+ flags.clear()
+ flags += extra_flags
+ setattr(compiler, '__np_cache_cpp_flags', flags)
+ return flags
+
+ def std_c_flags(cmd):
+ compiler = cmd.compiler
+ flags = getattr(compiler, '__np_cache_c_flags', None)
+ if flags is not None:
+ return flags
+ flags = dict(
+ msvc = []
+ ).get(compiler.compiler_type, ['-std=c99'])
+
+ if not flags_is_required(compiler, False, flags, textwrap.dedent('''
+ inline int test_inline() { return 0; }
+ int main(void)
+ { return test_inline(); }
+ ''')):
+ flags.clear()
+
+ setattr(compiler, '__np_cache_c_flags', flags)
+ return flags
class new_build_clib(build_clib):
def build_a_library(self, build_info, lib_name, libraries):
- from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
- if _needs_gcc_c99_flag(self):
- build_info['extra_cflags'] = ['-std=c99']
- build_info['extra_cxxflags'] = NPY_CXX_FLAGS
+ build_info['extra_cflags'] = std_c_flags(self)
+ build_info['extra_cxxflags'] = std_cxx_flags(self)
build_clib.build_a_library(self, build_info, lib_name, libraries)
class new_build_ext(build_ext):
def build_extension(self, ext):
- if _needs_gcc_c99_flag(self):
- if '-std=c99' not in ext.extra_compile_args:
- ext.extra_compile_args.append('-std=c99')
+ ext.extra_c_compile_args += std_c_flags(self)
+ ext.extra_cxx_compile_args += std_cxx_flags(self)
build_ext.build_extension(self, ext)
return new_build_clib, new_build_ext
-
def generate_cython():
# Check Cython version
from numpy._utils import _pep440
diff --git a/spin b/spin
deleted file mode 100755
index 7a11042ab..000000000
--- a/spin
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-#
-# Example stub for running `python -m spin`
-#
-# Copy this into your project root.
-
-import os
-import sys
-import runpy
-
-sys.path.remove(os.path.abspath(os.path.dirname(sys.argv[0])))
-try:
- runpy.run_module("spin", run_name="__main__")
-except ImportError:
- print("Cannot import spin; please install it using")
- print()
- print(" pip install spin")
- print()
- sys.exit(1)
diff --git a/tools/gitpod/Dockerfile b/tools/gitpod/Dockerfile
deleted file mode 100644
index 1ff9076cd..000000000
--- a/tools/gitpod/Dockerfile
+++ /dev/null
@@ -1,101 +0,0 @@
-#
-# Dockerfile for NumPy development
-#
-# Usage:
-# -------
-#
-# To make a local build of the container, from the 'Docker-dev' directory:
-# docker build --rm -f "Dockerfile" -t <build-tag> "."
-#
-# To use the container use the following command. It assumes that you are in
-# the root folder of the NumPy git repository, making it available as
-# /home/numpy in the container. Whatever changes you make to that directory
-# are visible in the host and container.
-# The docker image is retrieved from the NumPy dockerhub repository
-#
-# docker run --rm -it -v $(pwd):/home/numpy numpy/numpy-dev:<image-tag>
-#
-# By default the container will activate the conda environment numpy-dev
-# which contains all the dependencies needed for NumPy development
-#
-# To build NumPy run: python setup.py build_ext --inplace
-#
-# To run the tests use: python runtests.py
-#
-# This image is based on: Ubuntu 20.04 (focal)
-# https://hub.docker.com/_/ubuntu/?tab=tags&name=focal
-# OS/ARCH: linux/amd64
-FROM gitpod/workspace-base:latest@sha256:770d3022db71512bdd1b7fdc06983f17cfc956342853e315d2d1c0ab39216a36
-
-ARG MAMBAFORGE_VERSION="4.11.0-0"
-ARG CONDA_ENV=numpy-dev
-
-
-# ---- Configure environment ----
-ENV CONDA_DIR=/home/gitpod/mambaforge3 \
- SHELL=/bin/bash
-ENV PATH=${CONDA_DIR}/bin:$PATH \
- WORKSPACE=/workspace/numpy
-
-
-# -----------------------------------------------------------------------------
-# ---- Creating as root - note: make sure to change to gitpod in the end ----
-USER root
-
-# hadolint ignore=DL3008
-RUN apt-get update && \
- apt-get install -yq --no-install-recommends \
- ca-certificates \
- dirmngr \
- dvisvgm \
- gnupg \
- gpg-agent \
- texlive-latex-extra \
- vim && \
- # this needs to be done after installing dirmngr
- apt-key adv --keyserver keyserver.ubuntu.com --recv-key 23F3D4EA75716059 && \
- apt-add-repository https://cli.github.com/packages && \
- apt-get install -yq --no-install-recommends \
- gh && \
- locale-gen en_US.UTF-8 && \
- apt-get clean && \
- rm -rf /var/cache/apt/* &&\
- rm -rf /var/lib/apt/lists/* &&\
- rm -rf /tmp/*
-
-# Allows this Dockerfile to activate conda environments
-SHELL ["/bin/bash", "--login", "-o", "pipefail", "-c"]
-
-# -----------------------------------------------------------------------------
-# ---- Installing mamba ----
-RUN wget -q -O mambaforge3.sh \
- "https://github.com/conda-forge/miniforge/releases/download/$MAMBAFORGE_VERSION/Mambaforge-$MAMBAFORGE_VERSION-Linux-x86_64.sh" && \
- bash mambaforge3.sh -p ${CONDA_DIR} -b && \
- rm mambaforge3.sh
-
-# -----------------------------------------------------------------------------
-# ---- Copy needed files ----
-# basic workspace configurations
-COPY ./tools/gitpod/workspace_config /usr/local/bin/workspace_config
-
-RUN chmod a+rx /usr/local/bin/workspace_config && \
- workspace_config
-
-# Copy conda environment file into the container - this needs to exists inside
-# the container to create a conda environment from it
-COPY environment.yml /tmp/environment.yml
-
-# -----------------------------------------------------------------------------
-# ---- Create conda environment ----
-# Install NumPy dependencies
-RUN mamba env create -f /tmp/environment.yml && \
- conda activate ${CONDA_ENV} && \
- mamba install ccache -y && \
- # needed for docs rendering later on
- python -m pip install --no-cache-dir sphinx-autobuild && \
- conda clean --all -f -y && \
- rm -rf /tmp/*
-
-# -----------------------------------------------------------------------------
-# Always make sure we are not root
-USER gitpod \ No newline at end of file
diff --git a/tools/gitpod/gitpod.Dockerfile b/tools/gitpod/gitpod.Dockerfile
deleted file mode 100644
index 7c369ac49..000000000
--- a/tools/gitpod/gitpod.Dockerfile
+++ /dev/null
@@ -1,51 +0,0 @@
-# Doing a local shallow clone - keeps the container secure
-# and much slimmer than using COPY directly or making a
-# remote clone
-ARG BASE_CONTAINER="numpy/numpy-dev:latest"
-FROM gitpod/workspace-base:latest as clone
-
-COPY --chown=gitpod . /tmp/numpy_repo
-
-# the clone should be deep enough for versioneer to work
-RUN git clone --shallow-since=2021-05-22 file:////tmp/numpy_repo /tmp/numpy
-
-# -----------------------------------------------------------------------------
-# Using the numpy-dev Docker image as a base
-# This way, we ensure we have all the needed compilers and dependencies
-# while reducing the build time
-FROM ${BASE_CONTAINER} as build
-
-# -----------------------------------------------------------------------------
-USER root
-
-# -----------------------------------------------------------------------------
-# ---- ENV variables ----
-# ---- Directories needed ----
-ENV WORKSPACE=/workspace/numpy/ \
- CONDA_ENV=numpy-dev
-
-# Allows this Dockerfile to activate conda environments
-SHELL ["/bin/bash", "--login", "-o", "pipefail", "-c"]
-
-# Copy over the shallow clone
-COPY --from=clone --chown=gitpod /tmp/numpy ${WORKSPACE}
-
-# Everything happens in the /workspace/numpy directory
-WORKDIR ${WORKSPACE}
-
-# Build numpy to populate the cache used by ccache
-# Note, hadolint suggests consolidating the RUN commands. That info
-# level complaint (DL3059) is currently ignored to avoid errors.
-RUN git config --global --add safe.directory /workspace/numpy
-RUN git submodule update --init --depth=1 -- numpy/core/src/umath/svml numpy/core/src/npysort/x86-simd-sort
-RUN conda activate ${CONDA_ENV} && \
- python setup.py build_ext --inplace && \
- ccache -s
-
-# Gitpod will load the repository into /workspace/numpy. We remove the
-# directory from the image to prevent conflicts
-RUN rm -rf ${WORKSPACE}
-
-# -----------------------------------------------------------------------------
-# Always return to non privileged user
-USER gitpod
diff --git a/tools/gitpod/settings.json b/tools/gitpod/settings.json
deleted file mode 100644
index 50296336d..000000000
--- a/tools/gitpod/settings.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "restructuredtext.updateOnTextChanged": "true",
- "restructuredtext.updateDelay": 300,
- "restructuredtext.linter.disabledLinters": ["doc8","rst-lint", "rstcheck"],
- "python.defaultInterpreterPath": "/home/gitpod/mambaforge3/envs/numpy-dev/bin/python",
- "esbonio.sphinx.buildDir": "${workspaceRoot}/doc/build/html",
- "esbonio.sphinx.confDir": ""
-} \ No newline at end of file
diff --git a/tools/gitpod/workspace_config b/tools/gitpod/workspace_config
deleted file mode 100644
index aa859c9be..000000000
--- a/tools/gitpod/workspace_config
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-# Basic configurations for the workspace
-
-set -e
-
-# gitpod/workspace-base needs at least one file here
-touch /home/gitpod/.bashrc.d/empty
-
-# Add git aliases
-git config --global alias.co checkout
-git config --global alias.ci commit
-git config --global alias.st status
-git config --global alias.br branch
-git config --global alias.hist "log --pretty=format:'%h %ad | %s%d [%an]' --graph --date=short"
-git config --global alias.type 'cat-file -t'
-git config --global alias.dump 'cat-file -p'
-
-# Enable basic vim defaults in ~/.vimrc
-echo "filetype plugin indent on" >>~/.vimrc
-echo "set colorcolumn=80" >>~/.vimrc
-echo "set number" >>~/.vimrc
-echo "syntax enable" >>~/.vimrc
-
-# Vanity custom bash prompt - makes it more legible
-echo "PS1='\[\e]0;\u \w\a\]\[\033[01;36m\]\u\[\033[m\] > \[\033[38;5;141m\]\w\[\033[m\] \\$ '" >>~/.bashrc
-
-# Enable prompt color in the skeleton .bashrc
-# hadolint ignore=SC2016
-sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc
-
-# .gitpod.yml is configured to install NumPy from /workspace/numpy
-echo "export PYTHONPATH=${WORKSPACE}" >>~/.bashrc
-
-# make conda activate command available from /bin/bash (login and interactive)
-if [[ ! -f "/etc/profile.d/conda.sh" ]]; then
- ln -s ${CONDA_DIR}/etc/profile.d/conda.sh /etc/profile.d/conda.sh
-fi
-echo ". ${CONDA_DIR}/etc/profile.d/conda.sh" >>~/.bashrc
-echo "conda activate numpy-dev" >>~/.bashrc
-
-# Enable prompt color in the skeleton .bashrc
-# hadolint ignore=SC2016
-sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc
-
-# .gitpod.yml is configured to install numpy from /workspace/numpy
-echo "export PYTHONPATH=/workspace/numpy" >>~/.bashrc
-
-# Set up ccache for compilers for this Dockerfile
-# REF: https://github.com/conda-forge/compilers-feedstock/issues/31
-echo "conda activate numpy-dev" >>~/.startuprc
-echo "export CC=\"ccache \$CC\"" >>~/.startuprc
-echo "export CXX=\"ccache \$CXX\"" >>~/.startuprc
-echo "export F77=\"ccache \$F77\"" >>~/.startuprc
-echo "export F90=\"ccache \$F90\"" >>~/.startuprc
-echo "export GFORTRAN=\"ccache \$GFORTRAN\"" >>~/.startuprc
-echo "export FC=\"ccache \$FC\"" >>~/.startuprc
-echo "source ~/.startuprc" >>~/.profile
-echo "source ~/.startuprc" >>~/.bashrc