summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoryuki <drsuaimqjgar@gmail.com>2023-03-22 01:16:42 +0000
committeryuki <drsuaimqjgar@gmail.com>2023-03-22 01:16:42 +0000
commit6f1679c48d19198273c02379acd68cb8c601d21e (patch)
treea912b119658a714524f57fc704695ded1738d8bb
parent03edb7b8dddb12198509d2bf602ea8b382d27199 (diff)
parent294c7f2c893b7e5ef783fc1cb1912d06404b452b (diff)
downloadnumpy-6f1679c48d19198273c02379acd68cb8c601d21e.tar.gz
Merge branch 'main' into enh-ma-dot
-rw-r--r--.devcontainer/devcontainer.json5
-rwxr-xr-x.devcontainer/setup.sh7
-rw-r--r--.github/dependabot.yml11
-rw-r--r--.github/workflows/build_test.yml78
-rw-r--r--.github/workflows/circleci.yml2
-rw-r--r--.github/workflows/codeql.yml73
-rw-r--r--.github/workflows/cygwin.yml8
-rw-r--r--.github/workflows/dependency-review.yml20
-rw-r--r--.github/workflows/docker.yml13
-rw-r--r--.github/workflows/emscripten.yml8
-rw-r--r--.github/workflows/gitpod.yml13
-rw-r--r--.github/workflows/labeler.yml2
-rw-r--r--.github/workflows/linux_meson.yml10
-rw-r--r--.github/workflows/linux_musl.yml4
-rw-r--r--.github/workflows/scorecards.yml6
-rw-r--r--.github/workflows/wheels.yml20
-rw-r--r--.github/workflows/windows_meson.yml4
-rw-r--r--INSTALL.rst2
-rw-r--r--build_requirements.txt2
-rw-r--r--building_with_meson.md16
-rwxr-xr-xdev.py19
-rw-r--r--doc/release/upcoming_changes/22982.new_feature.rst13
-rw-r--r--doc/release/upcoming_changes/23229.compatibility.rst3
-rw-r--r--doc/release/upcoming_changes/23314.deprecation.rst4
-rw-r--r--doc/release/upcoming_changes/23376.expired.rst9
-rw-r--r--doc/release/upcoming_changes/23403.expired.rst4
-rw-r--r--doc/source/dev/index.rst4
-rw-r--r--doc/source/reference/arrays.classes.rst9
-rw-r--r--doc/source/reference/global_state.rst13
-rw-r--r--doc/source/user/absolute_beginners.rst4
-rw-r--r--doc/source/user/building.rst2
-rw-r--r--environment.yml2
-rw-r--r--numpy/__init__.pyi18
-rw-r--r--numpy/array_api/_typing.py31
-rw-r--r--numpy/core/_methods.py71
-rw-r--r--numpy/core/arrayprint.py10
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py10
-rw-r--r--numpy/core/fromnumeric.py74
-rw-r--r--numpy/core/include/numpy/npy_cpu.h2
-rw-r--r--numpy/core/meson.build3
-rw-r--r--numpy/core/overrides.py14
-rw-r--r--numpy/core/setup.py16
-rw-r--r--numpy/core/shape_base.py11
-rw-r--r--numpy/core/src/multiarray/calculation.c17
-rw-r--r--numpy/core/src/multiarray/datetime_busday.c5
-rw-r--r--numpy/core/src/multiarray/descriptor.c1
m---------numpy/core/src/npysort/x86-simd-sort0
-rw-r--r--numpy/core/tests/test_array_coercion.py16
-rw-r--r--numpy/core/tests/test_arrayprint.py42
-rw-r--r--numpy/core/tests/test_datetime.py24
-rw-r--r--numpy/core/tests/test_deprecations.py19
-rw-r--r--numpy/core/tests/test_dtype.py8
-rw-r--r--numpy/core/tests/test_indexing.py2
-rw-r--r--numpy/core/tests/test_mem_overlap.py2
-rw-r--r--numpy/core/tests/test_memmap.py4
-rw-r--r--numpy/core/tests/test_multiarray.py3
-rw-r--r--numpy/core/tests/test_numeric.py69
-rw-r--r--numpy/core/tests/test_overrides.py19
-rw-r--r--numpy/core/tests/test_regression.py23
-rw-r--r--numpy/core/tests/test_scalar_methods.py10
-rw-r--r--numpy/core/tests/test_scalarmath.py3
-rw-r--r--numpy/core/tests/test_umath.py4
-rw-r--r--numpy/distutils/ccompiler.py2
-rw-r--r--numpy/distutils/ccompiler_opt.py13
-rw-r--r--numpy/distutils/fujitsuccompiler.py28
-rw-r--r--numpy/distutils/mingw32ccompiler.py5
-rw-r--r--numpy/distutils/system_info.py81
-rw-r--r--numpy/distutils/tests/test_ccompiler_opt.py6
-rw-r--r--numpy/fft/helper.pyi8
-rw-r--r--numpy/lib/__init__.pyi1
-rw-r--r--numpy/lib/function_base.py33
-rw-r--r--numpy/lib/histograms.py2
-rw-r--r--numpy/lib/index_tricks.py19
-rw-r--r--numpy/lib/shape_base.py8
-rw-r--r--numpy/lib/tests/test_function_base.py8
-rw-r--r--numpy/lib/tests/test_twodim_base.py11
-rw-r--r--numpy/lib/tests/test_type_check.py2
-rw-r--r--numpy/lib/tests/test_ufunclike.py6
-rw-r--r--numpy/lib/ufunclike.py60
-rw-r--r--numpy/lib/utils.py27
-rw-r--r--numpy/linalg/linalg.py4
-rw-r--r--numpy/ma/core.py2
-rw-r--r--numpy/ma/core.pyi4
-rw-r--r--numpy/ma/extras.py4
-rw-r--r--numpy/ma/tests/test_core.py8
-rw-r--r--numpy/ma/tests/test_old_ma.py8
-rw-r--r--numpy/ma/tests/test_regression.py6
-rw-r--r--numpy/ma/testutils.py2
-rw-r--r--numpy/random/_generator.pyi98
-rw-r--r--numpy/random/_generator.pyx8
-rw-r--r--numpy/random/bit_generator.pyx6
-rw-r--r--numpy/random/mtrand.pyx2
-rw-r--r--numpy/testing/_private/utils.py19
-rw-r--r--numpy/testing/tests/test_utils.py3
-rw-r--r--numpy/tests/test_public_api.py1
-rw-r--r--numpy/typing/tests/data/reveal/scalars.pyi6
-rw-r--r--numpy/typing/tests/test_runtime.py6
-rw-r--r--pyproject.toml11
-rwxr-xr-xsetup.py3
-rw-r--r--site.cfg.example10
-rwxr-xr-xspin19
-rw-r--r--tools/ci/cirrus_general.yml4
-rw-r--r--tools/gitpod/Dockerfile2
-rw-r--r--tools/gitpod/gitpod.Dockerfile2
104 files changed, 859 insertions, 620 deletions
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 9b39524e8..a31be7931 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -1,9 +1,4 @@
{
- // Conda requires lots of memory to resolve our environment
- "hostRequirements": {
- "memory": "8gb"
- },
-
// More info about Features: https://containers.dev/features
"image": "mcr.microsoft.com/devcontainers/universal:2",
"features": {},
diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh
index 7d05d9e8f..4ea718ec9 100755
--- a/.devcontainer/setup.sh
+++ b/.devcontainer/setup.sh
@@ -2,7 +2,12 @@
set -e
+curl micro.mamba.pm/install.sh | bash
+
conda init --all
-conda env create -f environment.yml
+micromamba shell init -s bash
+micromamba env create -f environment.yml --yes
+# Note that `micromamba activate numpy-dev` doesn't work, it must be run by the
+# user (same applies to `conda activate`)
git submodule update --init
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 000000000..6bcbdbfcb
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,11 @@
+version: 2
+updates:
+ - package-ecosystem: github-actions
+ directory: /
+ schedule:
+ interval: daily
+
+ - package-ecosystem: docker
+ directory: /tools/gitpod
+ schedule:
+ interval: daily
diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml
index ff74cdf61..23d6d8572 100644
--- a/.github/workflows/build_test.yml
+++ b/.github/workflows/build_test.yml
@@ -27,11 +27,11 @@ jobs:
runs-on: ubuntu-latest
continue-on-error: true
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install linter requirements
@@ -47,11 +47,11 @@ jobs:
env:
WITHOUT_SIMD: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -65,11 +65,11 @@ jobs:
env:
EXPECT_CPU_FEATURES: "SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL"
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ matrix.python-version }}
- uses: ./.github/actions
@@ -79,7 +79,7 @@ jobs:
# provides GCC 7, 8
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
@@ -117,11 +117,11 @@ jobs:
env:
WITHOUT_OPTIMIZATIONS: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -132,11 +132,11 @@ jobs:
env:
CPU_DISPATCH: "none"
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -147,11 +147,11 @@ jobs:
env:
CPU_DISPATCH: "max -xop -fma4 -avx512f -avx512cd -avx512_knl -avx512_knm -avx512_skx -avx512_clx -avx512_cnl -avx512_icl"
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -162,11 +162,11 @@ jobs:
env:
CPU_DISPATCH: "SSSE3 SSE41 POPCNT SSE42 AVX F16C"
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -177,11 +177,11 @@ jobs:
env:
USE_DEBUG: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -192,11 +192,11 @@ jobs:
env:
NPY_USE_BLAS_ILP64: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -210,11 +210,11 @@ jobs:
RUN_COVERAGE: 1
INSTALL_PICKLE5: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -231,11 +231,11 @@ jobs:
NPY_LAPACK_ORDER: MKL,OPENBLAS,ATLAS,LAPACK
USE_ASV: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -248,11 +248,11 @@ jobs:
NPY_USE_BLAS_ILP64: 1
NPY_RELAXED_STRIDES_DEBUG: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -263,32 +263,30 @@ jobs:
env:
USE_WHEEL: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
- numpy2_flag_and_no_array_func:
+ numpy2_flag:
needs: [smoke_test]
runs-on: ubuntu-latest
env:
- NUMPY_EXPERIMENTAL_ARRAY_FUNCTION: 0
- # Array func and numpy-2 should be involved, so use this
- # to have a test for feature flagged behavior.
+ # Test for numpy-2.0 feature-flagged behavior.
NPY_NUMPY_2_BEHAVIOR: 1
# Using the future "weak" state doesn't pass tests
# currently unfortunately
NPY_PROMOTION_STATE: legacy
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -302,11 +300,11 @@ jobs:
ATLAS: None
DOWNLOAD_OPENBLAS: ''
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -317,11 +315,11 @@ jobs:
env:
USE_SDIST: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
@@ -331,7 +329,7 @@ jobs:
# make sure this matches the base docker image below
runs-on: ubuntu-22.04
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
@@ -386,11 +384,11 @@ jobs:
needs: [smoke_test]
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install Intel SDE
diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml
index c43dc4fdb..55ee74fbc 100644
--- a/.github/workflows/circleci.yml
+++ b/.github/workflows/circleci.yml
@@ -18,7 +18,7 @@ jobs:
statuses: write
steps:
- name: GitHub Action step
- uses: larsoner/circleci-artifacts-redirector-action@master
+ uses: larsoner/circleci-artifacts-redirector-action@1e28a97d7b1e273a8f78ed4692bfd10f84706a45 # master
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
artifact-path: 0/doc/build/html/index.html
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
new file mode 100644
index 000000000..925d6a1f5
--- /dev/null
+++ b/.github/workflows/codeql.yml
@@ -0,0 +1,73 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: ["main"]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: ["main"]
+ schedule:
+ - cron: "0 0 * * 1"
+
+permissions:
+ contents: read
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: ["python"]
+ # CodeQL supports [ $supported-codeql-languages ]
+ # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.2.7
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.2.7
+
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
+
+ # If the Autobuild fails above, remove it and uncomment the following three lines.
+ # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
+
+ # - run: |
+ # echo "Run, Build Application using script"
+ # ./location_of_script_within_repo/buildscript.sh
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.2.7
+ with:
+ category: "/language:${{matrix.language}}"
diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml
index 1345883ff..a600447e7 100644
--- a/.github/workflows/cygwin.yml
+++ b/.github/workflows/cygwin.yml
@@ -20,12 +20,12 @@ jobs:
runs-on: windows-latest
if: "github.repository == 'numpy/numpy'"
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- name: Install Cygwin
- uses: cygwin/cygwin-install-action@v2
+ uses: cygwin/cygwin-install-action@f5e0f048310c425e84bc789f493a828c6dc80a25 # v3
with:
platform: x86_64
install-dir: 'C:\tools\cygwin'
@@ -36,7 +36,7 @@ jobs:
python39-hypothesis liblapack-devel
gcc-fortran gcc-g++ git dash
- name: Set Windows PATH
- uses: egor-tensin/cleanup-path@v1
+ uses: egor-tensin/cleanup-path@8469525c8ee3eddabbd3487658621a6235b3c581 # v3
with:
dirs: 'C:\tools\cygwin\bin;C:\tools\cygwin\lib\lapack'
- name: Verify that bash is Cygwin bash
@@ -66,7 +66,7 @@ jobs:
run: |
/usr/bin/python3.9 runtests.py -n
- name: Upload wheel if tests fail
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
if: failure()
with:
name: numpy-cygwin-wheel
diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml
new file mode 100644
index 000000000..152196a86
--- /dev/null
+++ b/.github/workflows/dependency-review.yml
@@ -0,0 +1,20 @@
+# Dependency Review Action
+#
+# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging.
+#
+# Source repository: https://github.com/actions/dependency-review-action
+# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
+name: 'Dependency Review'
+on: [pull_request]
+
+permissions:
+ contents: read
+
+jobs:
+ dependency-review:
+ runs-on: ubuntu-latest
+ steps:
+ - name: 'Checkout Repository'
+ uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ - name: 'Dependency Review'
+ uses: actions/dependency-review-action@f46c48ed6d4f1227fb2d9ea62bf6bcbed315589e # v3.0.4
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index de3128fc3..c8f95d676 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -18,11 +18,12 @@ jobs:
if: "github.repository_owner == 'numpy'"
steps:
- name: Clone repository
- uses: actions/checkout@v3
+ uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
- name: Lint Docker
- uses: brpaz/hadolint-action@v1.2.1
+ uses: hadolint/hadolint-action@54c9adbab1582c2ef04b2016b760714a4bfde3cf # v3.1.0
with:
dockerfile: ./tools/gitpod/Dockerfile
+ ignore: DL3059
- name: Get refs
shell: bash
run: |
@@ -32,21 +33,21 @@ jobs:
echo "sha8=$(echo ${GITHUB_SHA} | cut -c1-8)" >> $GITHUB_OUTPUT
id: getrefs
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2.5.0
- name: Cache Docker layers
- uses: actions/cache@v3
+ uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: ${{ runner.os }}-buildx-
- name: Login to Docker Hub
- uses: docker/login-action@v1
+ uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build
- uses: docker/build-push-action@v2
+ uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4.0.0
with:
context: "."
file: "./tools/gitpod/Dockerfile"
diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml
index b60a77f1b..7ad7903a3 100644
--- a/.github/workflows/emscripten.yml
+++ b/.github/workflows/emscripten.yml
@@ -31,7 +31,7 @@ jobs:
NODE_VERSION: 18
steps:
- name: Checkout numpy
- uses: actions/checkout@v3
+ uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: true
# versioneer.py requires the latest tag to be reachable. Here we
@@ -42,11 +42,11 @@ jobs:
- name: set up python
id: setup-python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- - uses: mymindstorm/setup-emsdk@v12
+ - uses: mymindstorm/setup-emsdk@ab889da2abbcbb280f91ec4c215d3bb4f3a8f775 # v12
with:
version: ${{ env.EMSCRIPTEN_VERSION }}
actions-cache-folder: emsdk-cache
@@ -58,7 +58,7 @@ jobs:
run: CFLAGS=-g2 LDFLAGS=-g2 pyodide build
- name: set up node
- uses: actions/setup-node@v3
+ uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0
with:
node-version: ${{ env.NODE_VERSION }}
diff --git a/.github/workflows/gitpod.yml b/.github/workflows/gitpod.yml
index f48b506b4..737c3c07b 100644
--- a/.github/workflows/gitpod.yml
+++ b/.github/workflows/gitpod.yml
@@ -16,13 +16,14 @@ jobs:
if: "github.repository_owner == 'numpy'"
steps:
- name: Clone repository
- uses: actions/checkout@v3
+ uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
fetch-depth: 0
- name: Lint Docker
- uses: brpaz/hadolint-action@v1.2.1
+ uses: hadolint/hadolint-action@54c9adbab1582c2ef04b2016b760714a4bfde3cf # v3.1.0
with:
dockerfile: ./tools/gitpod/gitpod.Dockerfile
+ ignore: DL3059
- name: Get refs
shell: bash
run: |
@@ -32,21 +33,21 @@ jobs:
echo "sha8=$(echo ${GITHUB_SHA} | cut -c1-8)" >> $GITHUB_OUTPUT
id: getrefs
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2.5.0
- name: Cache Docker layers
- uses: actions/cache@v3
+ uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: ${{ runner.os }}-buildx-
- name: Login to Docker Hub
- uses: docker/login-action@v1
+ uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build
- uses: docker/build-push-action@v2
+ uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4.0.0
with:
context: "."
file: "./tools/gitpod/gitpod.Dockerfile"
diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml
index e2d47a0df..9ceafebb7 100644
--- a/.github/workflows/labeler.yml
+++ b/.github/workflows/labeler.yml
@@ -12,7 +12,7 @@ jobs:
pull-requests: write # to add labels
steps:
- name: Label the PR
- uses: gerrymanoim/pr-prefix-labeler@v3
+ uses: gerrymanoim/pr-prefix-labeler@c8062327f6de59a9ae1c19f7f07cacd0b976b6fa # v3
continue-on-error: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/linux_meson.yml b/.github/workflows/linux_meson.yml
index b03144a12..902c06997 100644
--- a/.github/workflows/linux_meson.yml
+++ b/.github/workflows/linux_meson.yml
@@ -21,15 +21,15 @@ permissions:
contents: read # to fetch code (actions/checkout)
jobs:
- meson_devpy:
+ meson_spin:
if: "github.repository == 'numpy/numpy'"
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
@@ -41,7 +41,7 @@ jobs:
env:
TERM: xterm-256color
run:
- ./dev.py build -- --werror
+ ./spin build -- --werror
- name: Check build-internal dependencies
run:
ninja -C build -t missingdeps
@@ -54,4 +54,4 @@ jobs:
TERM: xterm-256color
run: |
pip install pytest hypothesis typing_extensions
- ./dev.py test
+ ./spin test
diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml
index c25fbbaf5..54c8b7c2d 100644
--- a/.github/workflows/linux_musl.yml
+++ b/.github/workflows/linux_musl.yml
@@ -62,5 +62,5 @@ jobs:
pip install pytest hypothesis typing_extensions
# use meson to build and test
- ./dev.py build
- ./dev.py test
+ ./spin build
+ ./spin test
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 0cef4f7a9..6924048be 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -25,12 +25,12 @@ jobs:
steps:
- name: "Checkout code"
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0
+ uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.1.0
with:
persist-credentials: false
- name: "Run analysis"
- uses: ossf/scorecard-action@99c53751e09b9529366343771cc321ec74e9bd3d # v2.0.6
+ uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2
with:
results_file: results.sarif
results_format: sarif
@@ -50,6 +50,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@807578363a7869ca324a79039e6db9c843e0e100 # v2.1.27
+ uses: github/codeql-action/upload-sarif@168b99b3c22180941ae7dbdd5f5c9678ede476ba # v2.1.27
with:
sarif_file: results.sarif
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
index 681981e8e..df31be80e 100644
--- a/.github/workflows/wheels.yml
+++ b/.github/workflows/wheels.yml
@@ -43,7 +43,7 @@ jobs:
message: ${{ steps.commit_message.outputs.message }}
steps:
- name: Checkout numpy
- uses: actions/checkout@v3
+ uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
# Gets the correct commit message for pull request
with:
ref: ${{ github.event.pull_request.head.sha }}
@@ -92,7 +92,7 @@ jobs:
IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
steps:
- name: Checkout numpy
- uses: actions/checkout@v3
+ uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: true
# versioneer.py requires the latest tag to be reachable. Here we
@@ -102,7 +102,7 @@ jobs:
fetch-depth: 0
# Used to push the built wheels
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: "3.x"
@@ -114,16 +114,16 @@ jobs:
if: ${{ matrix.buildplat[1] == 'win32' }}
- name: Build wheels
- uses: pypa/cibuildwheel@v2.12.0
+ uses: pypa/cibuildwheel@02ad79a31bf7aa0eee07f690509048d2fb9fd445 # v2.12.1
env:
CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }}
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: ${{ matrix.python }}-${{ startsWith(matrix.buildplat[1], 'macosx') && 'macosx' || matrix.buildplat[1] }}
path: ./wheelhouse/*.whl
- - uses: conda-incubator/setup-miniconda@v2
+ - uses: conda-incubator/setup-miniconda@3b0f2504dd76ef23b6d31f291f4913fb60ab5ff3 # v2.2.0
with:
# for installation of anaconda-client, required for upload to
# anaconda.org
@@ -171,7 +171,7 @@ jobs:
# IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
steps:
- name: Checkout numpy
- uses: actions/checkout@v3
+ uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: true
# versioneer.py requires the latest tag to be reachable. Here we
@@ -180,7 +180,7 @@ jobs:
# https://github.com/actions/checkout/issues/338
fetch-depth: 0
# Used to push the built wheels
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
# Build sdist on lowest supported Python
python-version: "3.9"
@@ -201,12 +201,12 @@ jobs:
python -mpip install twine
twine check dist/*
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: sdist
path: ./dist/*
- - uses: conda-incubator/setup-miniconda@v2
+ - uses: conda-incubator/setup-miniconda@3b0f2504dd76ef23b6d31f291f4913fb60ab5ff3 # v2.2.0
with:
# for installation of anaconda-client, required for upload to
# anaconda.org
diff --git a/.github/workflows/windows_meson.yml b/.github/workflows/windows_meson.yml
index 9e82d8fce..e0064dc19 100644
--- a/.github/workflows/windows_meson.yml
+++ b/.github/workflows/windows_meson.yml
@@ -23,12 +23,12 @@ jobs:
# if: "github.repository == 'numpy/numpy'"
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
with:
submodules: recursive
fetch-depth: 0
- name: Setup Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
diff --git a/INSTALL.rst b/INSTALL.rst
index f136b7cfc..9ac3aa526 100644
--- a/INSTALL.rst
+++ b/INSTALL.rst
@@ -14,7 +14,7 @@ Prerequisites
Building NumPy requires the following installed software:
-1) Python__ 3.8.x or newer.
+1) Python__ 3.9.x or newer.
Please note that the Python development headers also need to be installed,
e.g., on Debian/Ubuntu one needs to install both `python3` and
diff --git a/build_requirements.txt b/build_requirements.txt
index b2d55a73e..3699320c8 100644
--- a/build_requirements.txt
+++ b/build_requirements.txt
@@ -2,4 +2,4 @@ meson-python>=0.10.0
Cython>=0.29.30,<3.0
wheel==0.38.1
ninja
-git+https://github.com/scientific-python/devpy
+spin>=0.2
diff --git a/building_with_meson.md b/building_with_meson.md
index f9cbd003b..cf198e7d9 100644
--- a/building_with_meson.md
+++ b/building_with_meson.md
@@ -15,17 +15,17 @@ into a problem._
# also make sure you have pkg-config and the usual system dependencies for
# NumPy`
-Then install devpy:
-- `python -m pip install git+https://github.com/scientific-python/devpy`
+Then install spin:
+- `python -m pip install spin`
-**Compile and install:** `./dev.py build`
+**Compile and install:** `./spin build`
This builds in the `build/` directory, and installs into the `build-install` directory.
-Then run the test suite or a shell via `dev.py`:
+Then run the test suite or a shell via `spin`:
```
-./dev.py test
-./dev.py ipython
+./spin test
+./spin ipython
```
Alternatively, to use the package, add it to your `PYTHONPATH`:
@@ -44,7 +44,7 @@ Note that `pip` will use the default build system, which is (as of now) still
After that is done, `pip install .` or `pip install --no-build-isolation .`
will work as expected. As does building an sdist or wheel with `python -m build`.
Note, however, that `pip install -e .` (in-place developer install) does not!
-Use `dev.py` instead (see above).
+Use `spin` instead (see above).
@@ -67,5 +67,5 @@ Libs: -L${libdir} -lopenblas
Then build with:
```
-./dev.py build -- -Dpkg_config_path=${HOME}/lib/pkgconfig
+./spin build -- -Dpkg_config_path=${HOME}/lib/pkgconfig
```
diff --git a/dev.py b/dev.py
deleted file mode 100755
index 205014938..000000000
--- a/dev.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-#
-# Example stub for running `python -m dev.py`
-#
-# Copy this into your project root.
-
-import os
-import sys
-import runpy
-
-sys.path.remove(os.path.abspath(os.path.dirname(sys.argv[0])))
-try:
- runpy.run_module("devpy", run_name="__main__")
-except ImportError:
- print("Cannot import devpy; please install it using")
- print()
- print(" pip install git+https://github.com/scientific-python/devpy")
- print()
- sys.exit(1)
diff --git a/doc/release/upcoming_changes/22982.new_feature.rst b/doc/release/upcoming_changes/22982.new_feature.rst
new file mode 100644
index 000000000..c98f2791c
--- /dev/null
+++ b/doc/release/upcoming_changes/22982.new_feature.rst
@@ -0,0 +1,13 @@
+Fujitsu C/C++ compiler is now supported
+----------------------------------------------
+Support for Fujitsu compiler has been added.
+To build with Fujitsu compiler, run:
+
+ python setup.py build -c fujitsu
+
+
+SSL2 is now supported
+-----------------------------------
+Support for SSL2 has been added. SSL2 is a library that provides OpenBLAS compatible GEMM functions.
+To enable SSL2, it need to edit site.cfg and build with Fujitsu compiler.
+See site.cfg.example.
diff --git a/doc/release/upcoming_changes/23229.compatibility.rst b/doc/release/upcoming_changes/23229.compatibility.rst
new file mode 100644
index 000000000..284cc06ab
--- /dev/null
+++ b/doc/release/upcoming_changes/23229.compatibility.rst
@@ -0,0 +1,3 @@
+- The ``busday_count`` method now correctly handles cases where the ``begindates`` is later in time
+ than the ``enddates``. Previously, the ``enddates`` was included, even though the documentation states
+ it is always excluded.
diff --git a/doc/release/upcoming_changes/23314.deprecation.rst b/doc/release/upcoming_changes/23314.deprecation.rst
new file mode 100644
index 000000000..8bed1aef8
--- /dev/null
+++ b/doc/release/upcoming_changes/23314.deprecation.rst
@@ -0,0 +1,4 @@
+* ``np.product`` is deprecated. Use `np.prod` instead.
+* ``np.cumproduct`` is deprecated. Use `np.cumprod` instead.
+* ``np.sometrue`` is deprecated. Use `np.any` instead.
+* ``np.alltrue`` is deprecated. Use `np.all` instead.
diff --git a/doc/release/upcoming_changes/23376.expired.rst b/doc/release/upcoming_changes/23376.expired.rst
new file mode 100644
index 000000000..e289b087c
--- /dev/null
+++ b/doc/release/upcoming_changes/23376.expired.rst
@@ -0,0 +1,9 @@
+Environment variable to disable dispatching removed
+---------------------------------------------------
+Support for the ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION`` environment variable has
+been removed. This variable disabled dispatching with ``__array_function__``.
+
+Support for ``y=`` as an alias of ``out=`` removed
+--------------------------------------------------
+The ``fix``, ``isposinf`` and ``isneginf`` functions allowed using ``y=`` as a
+(deprecated) alias for ``out=``. This is no longer supported.
diff --git a/doc/release/upcoming_changes/23403.expired.rst b/doc/release/upcoming_changes/23403.expired.rst
new file mode 100644
index 000000000..b099eb4e9
--- /dev/null
+++ b/doc/release/upcoming_changes/23403.expired.rst
@@ -0,0 +1,4 @@
+* ``np.clip`` now defaults to same-kind casting. Falling back to
+ unsafe casting was deprecated in NumPy 1.17.
+* ``np.clip`` will now propagate ``np.nan`` values passed as ``min`` or ``max``.
+ Previously, a scalar NaN was usually ignored. This was deprecated in NumPy 1.17.
diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst
index 55f5ce99a..bd3595741 100644
--- a/doc/source/dev/index.rst
+++ b/doc/source/dev/index.rst
@@ -45,7 +45,7 @@ Here's the short summary, complete TOC links are below:
* Clone the project to your local computer::
- git clone https://github.com/your-username/numpy.git
+ git clone --recurse-submodules https://github.com/your-username/numpy.git
* Change the directory::
@@ -180,7 +180,7 @@ Guidelines
get no response to your pull request within a week.
.. _stylistic-guidelines:
-
+
Stylistic Guidelines
--------------------
diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst
index 2f40423ee..2cce595e0 100644
--- a/doc/source/reference/arrays.classes.rst
+++ b/doc/source/reference/arrays.classes.rst
@@ -162,15 +162,6 @@ NumPy provides several hooks that classes can customize:
.. versionadded:: 1.16
- .. note::
-
- - In NumPy 1.17, the protocol is enabled by default, but can be disabled
- with ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=0``.
- - In NumPy 1.16, you need to set the environment variable
- ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1`` before importing NumPy to use
- NumPy function overrides.
- - Eventually, expect to ``__array_function__`` to always be enabled.
-
- ``func`` is an arbitrary callable exposed by NumPy's public API,
which was called in the form ``func(*args, **kwargs)``.
- ``types`` is a collection :py:class:`collections.abc.Collection`
diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst
index a110ce7c1..d73da4244 100644
--- a/doc/source/reference/global_state.rst
+++ b/doc/source/reference/global_state.rst
@@ -55,19 +55,6 @@ SIMD feature selection
Setting ``NPY_DISABLE_CPU_FEATURES`` will exclude simd features at runtime.
See :ref:`runtime-simd-dispatch`.
-Interoperability-Related Options
-================================
-
-The array function protocol which allows array-like objects to
-hook into the NumPy API is currently enabled by default.
-This option exists since NumPy 1.16 and is enabled by default since
-NumPy 1.17. It can be disabled using::
-
- NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=0
-
-See also :py:meth:`numpy.class.__array_function__` for more information.
-This flag is checked at import time.
-
Debugging-Related Options
=========================
diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst
index dae2c4f06..dfcdc669b 100644
--- a/doc/source/user/absolute_beginners.rst
+++ b/doc/source/user/absolute_beginners.rst
@@ -64,8 +64,8 @@ To access NumPy and its functions import it in your Python code like this::
import numpy as np
We shorten the imported name to ``np`` for better readability of code using
-NumPy. This is a widely adopted convention that you should follow so that
-anyone working with your code can easily understand it.
+NumPy. This is a widely adopted convention that makes your code more readable
+for everyone working on it. We recommend to always use import numpy as ``np``.
Reading the example code
------------------------
diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst
index de25dc13c..442bda4b3 100644
--- a/doc/source/user/building.rst
+++ b/doc/source/user/building.rst
@@ -20,7 +20,7 @@ Prerequisites
Building NumPy requires the following software installed:
-1) Python 3.8.x or newer
+1) Python 3.9.x or newer
Please note that the Python development headers also need to be installed,
e.g., on Debian/Ubuntu one needs to install both `python3` and
diff --git a/environment.yml b/environment.yml
index 37c94e7f1..7e5ee6cfd 100644
--- a/environment.yml
+++ b/environment.yml
@@ -17,7 +17,7 @@ dependencies:
- ninja
- pkg-config
- meson-python
- - pip # so you can use pip to install devpy
+ - pip # so you can use pip to install spin
# For testing
- pytest
- pytest-cov
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index a42e6c0e4..ee5fbb601 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -1,19 +1,15 @@
import builtins
import os
-import sys
import mmap
import ctypes as ct
import array as _array
import datetime as dt
import enum
from abc import abstractmethod
-from types import TracebackType, MappingProxyType
+from types import TracebackType, MappingProxyType, GenericAlias
from contextlib import ContextDecorator
from contextlib import contextmanager
-if sys.version_info >= (3, 9):
- from types import GenericAlias
-
from numpy._pytesttester import PytestTester
from numpy.core._internal import _ctypes
@@ -461,7 +457,6 @@ from numpy.lib.function_base import (
digitize as digitize,
cov as cov,
corrcoef as corrcoef,
- msort as msort,
median as median,
sinc as sinc,
hamming as hamming,
@@ -2979,9 +2974,8 @@ class floating(inexact[_NBit1]):
@classmethod
def fromhex(cls: type[float64], string: str, /) -> float64: ...
def as_integer_ratio(self) -> tuple[int, int]: ...
- if sys.version_info >= (3, 9):
- def __ceil__(self: float64) -> int: ...
- def __floor__(self: float64) -> int: ...
+ def __ceil__(self: float64) -> int: ...
+ def __floor__(self: float64) -> int: ...
def __trunc__(self: float64) -> int: ...
def __getnewargs__(self: float64) -> tuple[float]: ...
def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ...
@@ -3139,10 +3133,6 @@ infty: Final[float]
nan: Final[float]
pi: Final[float]
-CLIP: L[0]
-WRAP: L[1]
-RAISE: L[2]
-
ERR_IGNORE: L[0]
ERR_WARN: L[1]
ERR_RAISE: L[2]
@@ -3211,7 +3201,7 @@ class ufunc:
# can't type them very precisely.
reduce: Any
accumulate: Any
- reduce: Any
+ reduceat: Any
outer: Any
# Similarly at won't be defined for ufuncs that return multiple
# outputs, so we can't type it very precisely.
diff --git a/numpy/array_api/_typing.py b/numpy/array_api/_typing.py
index dfa87b358..3f9b7186a 100644
--- a/numpy/array_api/_typing.py
+++ b/numpy/array_api/_typing.py
@@ -17,14 +17,12 @@ __all__ = [
"PyCapsule",
]
-import sys
from typing import (
Any,
Literal,
Sequence,
Type,
Union,
- TYPE_CHECKING,
TypeVar,
Protocol,
)
@@ -51,21 +49,20 @@ class NestedSequence(Protocol[_T_co]):
def __len__(self, /) -> int: ...
Device = Literal["cpu"]
-if TYPE_CHECKING or sys.version_info >= (3, 9):
- Dtype = dtype[Union[
- int8,
- int16,
- int32,
- int64,
- uint8,
- uint16,
- uint32,
- uint64,
- float32,
- float64,
- ]]
-else:
- Dtype = dtype
+
+Dtype = dtype[Union[
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+]]
+
SupportsBufferProtocol = Any
PyCapsule = Any
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index 040f02a9d..0fc070b34 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -87,79 +87,16 @@ def _count_reduce_items(arr, axis, keepdims=False, where=True):
keepdims)
return items
-# Numpy 1.17.0, 2019-02-24
-# Various clip behavior deprecations, marked with _clip_dep as a prefix.
-
-def _clip_dep_is_scalar_nan(a):
- # guarded to protect circular imports
- from numpy.core.fromnumeric import ndim
- if ndim(a) != 0:
- return False
- try:
- return um.isnan(a)
- except TypeError:
- return False
-
-def _clip_dep_is_byte_swapped(a):
- if isinstance(a, mu.ndarray):
- return not a.dtype.isnative
- return False
-
-def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs):
- # normal path
- if casting is not None:
- return ufunc(*args, out=out, casting=casting, **kwargs)
-
- # try to deal with broken casting rules
- try:
- return ufunc(*args, out=out, **kwargs)
- except _exceptions._UFuncOutputCastingError as e:
- # Numpy 1.17.0, 2019-02-24
- warnings.warn(
- "Converting the output of clip from {!r} to {!r} is deprecated. "
- "Pass `casting=\"unsafe\"` explicitly to silence this warning, or "
- "correct the type of the variables.".format(e.from_, e.to),
- DeprecationWarning,
- stacklevel=2
- )
- return ufunc(*args, out=out, casting="unsafe", **kwargs)
-
-def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs):
+def _clip(a, min=None, max=None, out=None, **kwargs):
if min is None and max is None:
raise ValueError("One of max or min must be given")
- # Numpy 1.17.0, 2019-02-24
- # This deprecation probably incurs a substantial slowdown for small arrays,
- # it will be good to get rid of it.
- if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out):
- using_deprecated_nan = False
- if _clip_dep_is_scalar_nan(min):
- min = -float('inf')
- using_deprecated_nan = True
- if _clip_dep_is_scalar_nan(max):
- max = float('inf')
- using_deprecated_nan = True
- if using_deprecated_nan:
- warnings.warn(
- "Passing `np.nan` to mean no clipping in np.clip has always "
- "been unreliable, and is now deprecated. "
- "In future, this will always return nan, like it already does "
- "when min or max are arrays that contain nan. "
- "To skip a bound, pass either None or an np.inf of an "
- "appropriate sign.",
- DeprecationWarning,
- stacklevel=2
- )
-
if min is None:
- return _clip_dep_invoke_with_casting(
- um.minimum, a, max, out=out, casting=casting, **kwargs)
+ return um.minimum(a, max, out=out, **kwargs)
elif max is None:
- return _clip_dep_invoke_with_casting(
- um.maximum, a, min, out=out, casting=casting, **kwargs)
+ return um.maximum(a, min, out=out, **kwargs)
else:
- return _clip_dep_invoke_with_casting(
- um.clip, a, min, max, out=out, casting=casting, **kwargs)
+ return um.clip(a, min, max, out=out, **kwargs)
def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
arr = asanyarray(a)
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index a243366b7..dcfb6e6a8 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -1428,6 +1428,10 @@ def dtype_is_implied(dtype):
# not just void types can be structured, and names are not part of the repr
if dtype.names is not None:
return False
+
+ # should care about endianness *unless size is 1* (e.g., int8, bool)
+ if not dtype.isnative:
+ return False
return dtype.type in _typelessdata
@@ -1453,10 +1457,14 @@ def dtype_short_repr(dtype):
return "'%s'" % str(dtype)
typename = dtype.name
+ if not dtype.isnative:
+ # deal with cases like dtype('<u2') that are identical to an
+ # established dtype (in this case uint16)
+ # except that they have a different endianness.
+ return "'%s'" % str(dtype)
# quote typenames which can't be represented as python variable names
if typename and not (typename[0].isalpha() and typename.isalnum()):
typename = repr(typename)
-
return typename
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index ecfc5affe..05f947c15 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -2021,7 +2021,7 @@ add_newdoc('numpy.core.umath', 'log',
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
-
+
In the cases where the input has a negative real part and a very small
negative complex part (approaching 0), the result is so close to `-pi`
that it evaluates to exactly `-pi`.
@@ -2457,7 +2457,7 @@ add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
- Compare two arrays and returns a new array containing the element-wise
+ Compare two arrays and return a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
@@ -2516,7 +2516,7 @@ add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
- Compare two arrays and returns a new array containing the element-wise
+ Compare two arrays and return a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
@@ -2575,7 +2575,7 @@ add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
- Compare two arrays and returns a new array containing the element-wise
+ Compare two arrays and return a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
@@ -2633,7 +2633,7 @@ add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
- Compare two arrays and returns a new array containing the element-wise
+ Compare two arrays and return a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index ed8b68ecd..b36667427 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -438,7 +438,7 @@ def _repeat_dispatcher(a, repeats, axis=None):
@array_function_dispatch(_repeat_dispatcher)
def repeat(a, repeats, axis=None):
"""
- Repeat elements of an array.
+ Repeat each element of an array after themselves
Parameters
----------
@@ -3805,7 +3805,16 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
# are reference purposes only. Wherever possible,
# avoid using them.
-@array_function_dispatch(_round_dispatcher)
+
+def _round__dispatcher(a, decimals=None, out=None):
+ # 2023-02-28, 1.25.0
+ warnings.warn("`round_` is deprecated as of NumPy 1.25.0, and will be "
+ "removed in NumPy 2.0. Please use `round` instead.",
+ DeprecationWarning, stacklevel=3)
+ return (a, out)
+
+
+@array_function_dispatch(_round__dispatcher)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
@@ -3821,17 +3830,27 @@ def round_(a, decimals=0, out=None):
--------
around : equivalent function; see for details.
"""
- warnings.warn("`round_` is deprecated as of NumPy 1.25.0, and will be "
- "removed in NumPy 2.0. Please use `round` instead.",
- DeprecationWarning, stacklevel=2)
return around(a, decimals=decimals, out=out)
-@array_function_dispatch(_prod_dispatcher, verify=False)
+def _product_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
+ # 2023-03-02, 1.25.0
+ warnings.warn("`product` is deprecated as of NumPy 1.25.0, and will be "
+ "removed in NumPy 2.0. Please use `prod` instead.",
+ DeprecationWarning, stacklevel=3)
+ return (a, out)
+
+
+@array_function_dispatch(_product_dispatcher, verify=False)
def product(*args, **kwargs):
"""
Return the product of array elements over a given axis.
+ .. deprecated:: 1.25.0
+ ``product`` is deprecated as of NumPy 1.25.0, and will be
+ removed in NumPy 2.0. Please use `prod` instead.
+
See Also
--------
prod : equivalent function; see for details.
@@ -3839,11 +3858,23 @@ def product(*args, **kwargs):
return prod(*args, **kwargs)
-@array_function_dispatch(_cumprod_dispatcher, verify=False)
+def _cumproduct_dispatcher(a, axis=None, dtype=None, out=None):
+ # 2023-03-02, 1.25.0
+ warnings.warn("`cumproduct` is deprecated as of NumPy 1.25.0, and will be "
+ "removed in NumPy 2.0. Please use `cumprod` instead.",
+ DeprecationWarning, stacklevel=3)
+ return (a, out)
+
+
+@array_function_dispatch(_cumproduct_dispatcher, verify=False)
def cumproduct(*args, **kwargs):
"""
Return the cumulative product over the given axis.
+ .. deprecated:: 1.25.0
+ ``cumproduct`` is deprecated as of NumPy 1.25.0, and will be
+ removed in NumPy 2.0. Please use `cumprod` instead.
+
See Also
--------
cumprod : equivalent function; see for details.
@@ -3851,13 +3882,26 @@ def cumproduct(*args, **kwargs):
return cumprod(*args, **kwargs)
-@array_function_dispatch(_any_dispatcher, verify=False)
+def _sometrue_dispatcher(a, axis=None, out=None, keepdims=None, *,
+ where=np._NoValue):
+ # 2023-03-02, 1.25.0
+ warnings.warn("`sometrue` is deprecated as of NumPy 1.25.0, and will be "
+ "removed in NumPy 2.0. Please use `any` instead.",
+ DeprecationWarning, stacklevel=3)
+ return (a, where, out)
+
+
+@array_function_dispatch(_sometrue_dispatcher, verify=False)
def sometrue(*args, **kwargs):
"""
Check whether some values are true.
Refer to `any` for full documentation.
+ .. deprecated:: 1.25.0
+ ``sometrue`` is deprecated as of NumPy 1.25.0, and will be
+ removed in NumPy 2.0. Please use `any` instead.
+
See Also
--------
any : equivalent function; see for details.
@@ -3865,11 +3909,23 @@ def sometrue(*args, **kwargs):
return any(*args, **kwargs)
-@array_function_dispatch(_all_dispatcher, verify=False)
+def _alltrue_dispatcher(a, axis=None, out=None, keepdims=None, *, where=None):
+ # 2023-03-02, 1.25.0
+ warnings.warn("`alltrue` is deprecated as of NumPy 1.25.0, and will be "
+ "removed in NumPy 2.0. Please use `all` instead.",
+ DeprecationWarning, stacklevel=3)
+ return (a, where, out)
+
+
+@array_function_dispatch(_alltrue_dispatcher, verify=False)
def alltrue(*args, **kwargs):
"""
Check if all elements of input array are true.
+ .. deprecated:: 1.25.0
+ ``alltrue`` is deprecated as of NumPy 1.25.0, and will be
+ removed in NumPy 2.0. Please use `all` instead.
+
See Also
--------
numpy.all : Equivalent function; see for details.
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 78d229e7d..a19f8e6bb 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -77,7 +77,7 @@
#elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
#if defined(__ARM_32BIT_STATE)
#define NPY_CPU_ARMEL_AARCH32
- #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64)
+ #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__)
#define NPY_CPU_ARMEL_AARCH64
#else
#define NPY_CPU_ARMEL
diff --git a/numpy/core/meson.build b/numpy/core/meson.build
index 646dc0597..9aaa5ed87 100644
--- a/numpy/core/meson.build
+++ b/numpy/core/meson.build
@@ -83,6 +83,9 @@ if use_svml
error('Missing the `SVML` git submodule! Run `git submodule update --init` to fix this.')
endif
endif
+if not fs.exists('src/npysort/x86-simd-sort/README.md')
+ error('Missing the `x86-simd-sort` git submodule! Run `git submodule update --init` to fix this.')
+endif
# Check sizes of types. Note, some of these landed in config.h before, but were
# unused. So clean that up and only define the NPY_SIZEOF flavors rather than
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index e93ef5d88..6403e65b0 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -11,9 +11,6 @@ from numpy.core._multiarray_umath import (
ARRAY_FUNCTIONS = set()
-ARRAY_FUNCTION_ENABLED = bool(
- int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1)))
-
array_function_like_doc = (
"""like : array_like, optional
Reference object to allow the creation of arrays which are not
@@ -140,17 +137,8 @@ def array_function_dispatch(dispatcher=None, module=None, verify=True,
Returns
-------
Function suitable for decorating the implementation of a NumPy function.
- """
-
- if not ARRAY_FUNCTION_ENABLED:
- def decorator(implementation):
- if docs_from_dispatcher:
- add_docstring(implementation, dispatcher.__doc__)
- if module is not None:
- implementation.__module__ = module
- return implementation
- return decorator
+ """
def decorator(implementation):
if verify:
if dispatcher is not None:
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 77e1ebf99..52b17bfc8 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -79,11 +79,13 @@ def can_link_svml():
and "linux" in platform
and sys.maxsize > 2**31)
-def check_svml_submodule(svmlpath):
- if not os.path.exists(svmlpath + "/README.md"):
- raise RuntimeError("Missing `SVML` submodule! Run `git submodule "
- "update --init` to fix this.")
- return True
+def check_git_submodules():
+ out = os.popen("git submodule status")
+ modules = out.readlines()
+ for submodule in modules:
+ if (submodule.strip()[0] == "-"):
+ raise RuntimeError("git submodules are not initialized."
+ "Please run `git submodule update --init` to fix this.")
def pythonlib_dir():
"""return path where libpython* is."""
@@ -414,6 +416,8 @@ def configuration(parent_package='',top_path=None):
# actual C API VERSION. Will raise a MismatchCAPIError if so.
check_api_version(C_API_VERSION, codegen_dir)
+ check_git_submodules()
+
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = exec_mod_from_location('_'.join(n.split('.')),
@@ -1036,7 +1040,7 @@ def configuration(parent_package='',top_path=None):
# after all maintainable code.
svml_filter = (
)
- if can_link_svml() and check_svml_submodule(svml_path):
+ if can_link_svml():
svml_objs = glob.glob(svml_path + '/**/*.s', recursive=True)
svml_objs = [o for o in svml_objs if not o.endswith(svml_filter)]
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 56c5a70f2..250fffd42 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -283,9 +283,6 @@ def vstack(tup, *, dtype=None, casting="same_kind"):
[6]])
"""
- if not overrides.ARRAY_FUNCTION_ENABLED:
- # reject non-sequences (and make tuple)
- tup = _arrays_for_stack_dispatcher(tup)
arrs = atleast_2d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
@@ -352,10 +349,6 @@ def hstack(tup, *, dtype=None, casting="same_kind"):
[3, 6]])
"""
- if not overrides.ARRAY_FUNCTION_ENABLED:
- # reject non-sequences (and make tuple)
- tup = _arrays_for_stack_dispatcher(tup)
-
arrs = atleast_1d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
@@ -447,10 +440,6 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"):
[3, 6]])
"""
- if not overrides.ARRAY_FUNCTION_ENABLED:
- # reject non-sequences (and make tuple)
- arrays = _arrays_for_stack_dispatcher(arrays)
-
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index a985a2308..62a994c64 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -7,6 +7,7 @@
#include "numpy/arrayobject.h"
#include "lowlevel_strided_loops.h"
+#include "dtypemeta.h"
#include "npy_config.h"
@@ -50,7 +51,7 @@ _PyArray_ArgMinMaxCommon(PyArrayObject *op,
int axis_copy = axis;
npy_intp _shape_buf[NPY_MAXDIMS];
npy_intp *out_shape;
- // Keep the number of dimensions and shape of
+ // Keep the number of dimensions and shape of
// original array. Helps when `keepdims` is True.
npy_intp* original_op_shape = PyArray_DIMS(op);
int out_ndim = PyArray_NDIM(op);
@@ -87,9 +88,13 @@ _PyArray_ArgMinMaxCommon(PyArrayObject *op,
op = ap;
}
- /* Will get native-byte order contiguous copy. */
- ap = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)op,
- PyArray_DESCR(op)->type_num, 1, 0);
+ // Will get native-byte order contiguous copy.
+ PyArray_Descr *descr = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(op));
+ if (descr == NULL) {
+ return NULL;
+ }
+ ap = (PyArrayObject *)PyArray_FromArray(op, descr, NPY_ARRAY_DEFAULT);
+
Py_DECREF(op);
if (ap == NULL) {
return NULL;
@@ -106,9 +111,9 @@ _PyArray_ArgMinMaxCommon(PyArrayObject *op,
for (int i = 0; i < out_ndim; i++) {
out_shape[i] = 1;
}
- }
+ }
else {
- /*
+ /*
* While `ap` may be transposed, we can ignore this for `out` because the
* transpose only reorders the size 1 `axis` (not changing memory layout).
*/
diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c
index d3e9e1451..93ed0972e 100644
--- a/numpy/core/src/multiarray/datetime_busday.c
+++ b/numpy/core/src/multiarray/datetime_busday.c
@@ -365,6 +365,7 @@ apply_business_day_count(npy_datetime date_begin, npy_datetime date_end,
npy_datetime *holidays_begin, npy_datetime *holidays_end)
{
npy_int64 count, whole_weeks;
+
int day_of_week = 0;
int swapped = 0;
@@ -386,6 +387,10 @@ apply_business_day_count(npy_datetime date_begin, npy_datetime date_end,
date_begin = date_end;
date_end = tmp;
swapped = 1;
+ // we swapped date_begin and date_end, so we need to correct for the
+ // original date_end that should not be included. gh-23197
+ date_begin++;
+ date_end++;
}
/* Remove any earlier holidays */
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index c4f37ee18..68d398d64 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -2447,7 +2447,6 @@ arraydescr_new(PyTypeObject *subtype,
PyErr_NoMemory();
return NULL;
}
- PyObject_Init((PyObject *)descr, subtype);
descr->f = &NPY_DT_SLOTS(DType)->f;
Py_XINCREF(DType->scalar_type);
descr->typeobj = DType->scalar_type;
diff --git a/numpy/core/src/npysort/x86-simd-sort b/numpy/core/src/npysort/x86-simd-sort
-Subproject 7d7591cf5927e83e4a1e7c4b6f2c4dc91a97889
+Subproject 58501d026a390895f7fd7ebbe0fb7aea55055ad
diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py
index 0ba736c05..838c61bc2 100644
--- a/numpy/core/tests/test_array_coercion.py
+++ b/numpy/core/tests/test_array_coercion.py
@@ -1,6 +1,6 @@
"""
Tests for array coercion, mainly through testing `np.array` results directly.
-Note that other such tests exist e.g. in `test_api.py` and many corner-cases
+Note that other such tests exist, e.g., in `test_api.py` and many corner-cases
are tested (sometimes indirectly) elsewhere.
"""
@@ -20,8 +20,8 @@ from numpy.testing import (
def arraylikes():
"""
Generator for functions converting an array into various array-likes.
- If full is True (default) includes array-likes not capable of handling
- all dtypes
+ If full is True (default) it includes array-likes not capable of handling
+ all dtypes.
"""
# base array:
def ndarray(a):
@@ -40,8 +40,8 @@ def arraylikes():
class _SequenceLike():
# We are giving a warning that array-like's were also expected to be
- # sequence-like in `np.array([array_like])`, this can be removed
- # when the deprecation exired (started NumPy 1.20)
+ # sequence-like in `np.array([array_like])`. This can be removed
+ # when the deprecation expired (started NumPy 1.20).
def __len__(self):
raise TypeError
@@ -259,7 +259,7 @@ class TestScalarDiscovery:
@pytest.mark.parametrize("scalar", scalar_instances())
def test_scalar_coercion(self, scalar):
# This tests various scalar coercion paths, mainly for the numerical
- # types. It includes some paths not directly related to `np.array`
+ # types. It includes some paths not directly related to `np.array`.
if isinstance(scalar, np.inexact):
# Ensure we have a full-precision number if available
scalar = type(scalar)((scalar * 2)**0.5)
@@ -294,7 +294,7 @@ class TestScalarDiscovery:
* `np.array(scalar, dtype=dtype)`
* `np.empty((), dtype=dtype)[()] = scalar`
* `np.array(scalar).astype(dtype)`
- should behave the same. The only exceptions are paramteric dtypes
+ should behave the same. The only exceptions are parametric dtypes
(mainly datetime/timedelta without unit) and void without fields.
"""
dtype = cast_to.dtype # use to parametrize only the target dtype
@@ -386,7 +386,7 @@ class TestScalarDiscovery:
"""
dtype = np.dtype(dtype)
- # This is a special case using casting logic. It warns for the NaN
+ # This is a special case using casting logic. It warns for the NaN
# but allows the cast (giving undefined behaviour).
with np.errstate(invalid="ignore"):
coerced = np.array(scalar, dtype=dtype)
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index f1883f703..372cb8d41 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -9,6 +9,7 @@ from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
assert_raises_regex,
)
+from numpy.core.arrayprint import _typelessdata
import textwrap
class TestArrayRepr:
@@ -796,6 +797,47 @@ class TestPrintOptions:
array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
dtype='{}')""".format(styp)))
+ @pytest.mark.parametrize(
+ ['native'],
+ [
+ ('bool',),
+ ('uint8',),
+ ('uint16',),
+ ('uint32',),
+ ('uint64',),
+ ('int8',),
+ ('int16',),
+ ('int32',),
+ ('int64',),
+ ('float16',),
+ ('float32',),
+ ('float64',),
+ ('U1',), # 4-byte width string
+ ],
+ )
+ def test_dtype_endianness_repr(self, native):
+ '''
+ there was an issue where
+ repr(array([0], dtype='<u2')) and repr(array([0], dtype='>u2'))
+ both returned the same thing:
+ array([0], dtype=uint16)
+ even though their dtypes have different endianness.
+ '''
+ native_dtype = np.dtype(native)
+ non_native_dtype = native_dtype.newbyteorder()
+ non_native_repr = repr(np.array([1], non_native_dtype))
+ native_repr = repr(np.array([1], native_dtype))
+ # preserve the sensible default of only showing dtype if nonstandard
+ assert ('dtype' in native_repr) ^ (native_dtype in _typelessdata),\
+ ("an array's repr should show dtype if and only if the type "
+ 'of the array is NOT one of the standard types '
+ '(e.g., int32, bool, float64).')
+ if non_native_dtype.itemsize > 1:
+ # if the type is >1 byte, the non-native endian version
+ # must show endianness.
+ assert non_native_repr != native_repr
+ assert f"dtype='{non_native_dtype.byteorder}" in non_native_repr
+
def test_linewidth_repr(self):
a = np.full(7, fill_value=2)
np.set_printoptions(linewidth=17)
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 2b56d4824..547ebf9d6 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -2330,16 +2330,23 @@ class TestDateTime:
assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
+ # -1 since the '2011-01-01' is not a busday
assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),
- -np.arange(366))
+ -np.arange(366) - 1)
+ # 2011-12-31 is a saturday
dates = np.busday_offset('2011-12-31', -np.arange(366),
roll='forward', busdaycal=bdd)
+ # only the first generated date is in the future of 2011-12-31
+ expected = np.arange(366)
+ expected[0] = -1
assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),
- np.arange(366))
+ expected)
# Returns negative value when reversed
+ expected = -np.arange(366)+1
+ expected[0] = 0
assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),
- -np.arange(366))
+ expected)
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
@@ -2352,6 +2359,17 @@ class TestDateTime:
# Returns negative value when reversed
assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)
+ sunday = np.datetime64('2023-03-05')
+ monday = sunday + 1
+ friday = sunday + 5
+ saturday = sunday + 6
+ assert_equal(np.busday_count(sunday, monday), 0)
+ assert_equal(np.busday_count(monday, sunday), -1)
+
+ assert_equal(np.busday_count(friday, saturday), 1)
+ assert_equal(np.busday_count(saturday, friday), 0)
+
+
def test_datetime_is_busday(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 8ff52c885..96ae4b2a8 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -902,8 +902,23 @@ class TestDeprecatedFinfo(_DeprecationTestCase):
def test_deprecated_none(self):
self.assert_deprecated(np.finfo, args=(None,))
-
-class TestRound_(_DeprecationTestCase):
+class TestFromnumeric(_DeprecationTestCase):
# 2023-02-28, 1.25.0
def test_round_(self):
self.assert_deprecated(lambda: np.round_(np.array([1.5, 2.5, 3.5])))
+
+ # 2023-03-02, 1.25.0
+ def test_cumproduct(self):
+ self.assert_deprecated(lambda: np.cumproduct(np.array([1, 2, 3])))
+
+ # 2023-03-02, 1.25.0
+ def test_product(self):
+ self.assert_deprecated(lambda: np.product(np.array([1, 2, 3])))
+
+ # 2023-03-02, 1.25.0
+ def test_sometrue(self):
+ self.assert_deprecated(lambda: np.sometrue(np.array([True, False])))
+
+ # 2023-03-02, 1.25.0
+ def test_alltrue(self):
+ self.assert_deprecated(lambda: np.alltrue(np.array([True, False])))
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 9b44367e6..f764a4daa 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -1833,7 +1833,6 @@ class TestUserDType:
create_custom_field_dtype(blueprint, mytype, 2)
-@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
class TestClassGetItem:
def test_dtype(self) -> None:
alias = np.dtype[Any]
@@ -1866,10 +1865,3 @@ def test_result_type_integers_and_unitless_timedelta64():
td = np.timedelta64(4)
result = np.result_type(0, td)
assert_dtype_equal(result, td.dtype)
-
-
-@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
-def test_class_getitem_38() -> None:
- match = "Type subscription requires python >= 3.9"
- with pytest.raises(TypeError, match=match):
- np.dtype[Any]
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 74075639c..042936702 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -1062,7 +1062,7 @@ class TestMultiIndexingAutomated:
if np.any(_indx >= _size) or np.any(_indx < -_size):
raise IndexError
if len(indx[1:]) == len(orig_slice):
- if np.product(orig_slice) == 0:
+ if np.prod(orig_slice) == 0:
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py
index d66decfda..1fd4c4d41 100644
--- a/numpy/core/tests/test_mem_overlap.py
+++ b/numpy/core/tests/test_mem_overlap.py
@@ -55,7 +55,7 @@ def _indices(ndims):
def _check_assignment(srcidx, dstidx):
"""Check assignment arr[dstidx] = arr[srcidx] works."""
- arr = np.arange(np.product(shape)).reshape(shape)
+ arr = np.arange(np.prod(shape)).reshape(shape)
cpy = arr.copy()
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index 914f86f14..ad074b312 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -6,7 +6,7 @@ from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryFile
from numpy import (
- memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
+ memmap, sum, average, prod, ndarray, isscalar, add, subtract, multiply)
from numpy import arange, allclose, asarray
from numpy.testing import (
@@ -153,7 +153,7 @@ class TestMemmap:
with suppress_warnings() as sup:
sup.filter(FutureWarning, "np.average currently does not preserve")
- for unary_op in [sum, average, product]:
+ for unary_op in [sum, average, prod]:
result = unary_op(fp)
assert_(isscalar(result))
assert_(result.__class__ is self.data[0, 0].__class__)
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 0799c0a5a..ac4bd42d3 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -28,7 +28,7 @@ from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, IS_PYSTON, HAS_REFCOUNT, assert_array_less,
- runstring, temppath, suppress_warnings, break_cycles,
+ runstring, temppath, suppress_warnings, break_cycles, _SUPPORTS_SVE,
)
from numpy.testing._private.utils import requires_memory, _no_tracing
from numpy.core.tests._locales import CommaDecimalPointLocale
@@ -9837,6 +9837,7 @@ class TestViewDtype:
assert_array_equal(x.view('<i2'), expected)
+@pytest.mark.xfail(_SUPPORTS_SVE, reason="gh-22982")
# Test various array sizes that hit different code paths in quicksort-avx512
@pytest.mark.parametrize("N", np.arange(1, 512))
@pytest.mark.parametrize("dtype", ['e', 'f', 'd'])
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 3cc168b34..832a47c92 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -114,7 +114,9 @@ class TestNonarrayArgs:
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
- assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
+ with assert_warns(DeprecationWarning):
+ expected = np.array([1, 2, 6, 24, 120, 720])
+ assert_(np.all(np.cumproduct(A) == expected))
def test_diagonal(self):
a = [[0, 1, 2, 3],
@@ -1193,8 +1195,8 @@ class TestFromiter:
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
- assert_(np.alltrue(a == expected, axis=0))
- assert_(np.alltrue(a20 == expected[:20], axis=0))
+ assert_(np.all(a == expected, axis=0))
+ assert_(np.all(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
@@ -1822,20 +1824,11 @@ class TestClip:
self.nr = 5
self.nc = 3
- def fastclip(self, a, m, M, out=None, casting=None):
- if out is None:
- if casting is None:
- return a.clip(m, M)
- else:
- return a.clip(m, M, casting=casting)
- else:
- if casting is None:
- return a.clip(m, M, out)
- else:
- return a.clip(m, M, out, casting=casting)
+ def fastclip(self, a, m, M, out=None, **kwargs):
+ return a.clip(m, M, out=out, **kwargs)
def clip(self, a, m, M, out=None):
- # use slow-clip
+ # use a.choose to verify fastclip result
selector = np.less(a, m) + 2*np.greater(a, M)
return selector.choose((a, m, M), out=out)
@@ -1991,14 +1984,13 @@ class TestClip:
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
if casting is None:
- with assert_warns(DeprecationWarning):
- # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ with pytest.raises(TypeError):
self.fastclip(a, m, M, ac, casting=casting)
else:
# explicitly passing "unsafe" will silence warning
self.fastclip(a, m, M, ac, casting=casting)
- self.clip(a, m, M, act)
- assert_array_strict_equal(ac, act)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
# Test native int32 input with int32 scalar min/max and int64 out.
@@ -2018,9 +2010,7 @@ class TestClip:
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- with assert_warns(DeprecationWarning):
- # NumPy 1.17.0, 2018-02-24 - casting is unsafe
- self.fastclip(a, m, M, ac)
+ self.fastclip(a, m, M, out=ac, casting="unsafe")
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -2031,9 +2021,7 @@ class TestClip:
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- with assert_warns(DeprecationWarning):
- # NumPy 1.17.0, 2018-02-24 - casting is unsafe
- self.fastclip(a, m, M, ac)
+ self.fastclip(a, m, M, out=ac, casting="unsafe")
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -2209,9 +2197,7 @@ class TestClip:
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- with assert_warns(DeprecationWarning):
- # NumPy 1.17.0, 2018-02-24 - casting is unsafe
- self.fastclip(a, m, M, ac)
+ self.fastclip(a, m, M, out=ac, casting="unsafe")
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -2233,9 +2219,7 @@ class TestClip:
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- with assert_warns(DeprecationWarning):
- # NumPy 1.17.0, 2018-02-24 - casting is unsafe
- self.fastclip(a, m, M, ac)
+ self.fastclip(a, m, M, out=ac, casting="unsafe")
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -2246,9 +2230,7 @@ class TestClip:
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- with assert_warns(DeprecationWarning):
- # NumPy 1.17.0, 2018-02-24 - casting is unsafe
- self.fastclip(a, m, M, ac)
+ self.fastclip(a, m, M, out=ac, casting="unsafe")
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -2301,16 +2283,11 @@ class TestClip:
def test_clip_nan(self):
d = np.arange(7.)
- with assert_warns(DeprecationWarning):
- assert_equal(d.clip(min=np.nan), d)
- with assert_warns(DeprecationWarning):
- assert_equal(d.clip(max=np.nan), d)
- with assert_warns(DeprecationWarning):
- assert_equal(d.clip(min=np.nan, max=np.nan), d)
- with assert_warns(DeprecationWarning):
- assert_equal(d.clip(min=-2, max=np.nan), d)
- with assert_warns(DeprecationWarning):
- assert_equal(d.clip(min=np.nan, max=10), d)
+ assert_equal(d.clip(min=np.nan), np.nan)
+ assert_equal(d.clip(max=np.nan), np.nan)
+ assert_equal(d.clip(min=np.nan, max=np.nan), np.nan)
+ assert_equal(d.clip(min=-2, max=np.nan), np.nan)
+ assert_equal(d.clip(min=np.nan, max=10), np.nan)
def test_object_clip(self):
a = np.arange(10, dtype=object)
@@ -2362,16 +2339,12 @@ class TestClip:
actual = np.clip(arr, amin, amax)
assert_equal(actual, exp)
- @pytest.mark.xfail(reason="no scalar nan propagation yet",
- raises=AssertionError,
- strict=True)
@pytest.mark.parametrize("arr, amin, amax", [
# problematic scalar nan case from hypothesis
(np.zeros(10, dtype=np.int64),
np.array(np.nan),
np.zeros(10, dtype=np.int32)),
])
- @pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_clip_scalar_nan_propagation(self, arr, amin, amax):
# enforcement of scalar nan propagation for comparisons
# called through clip()
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
index 90d917701..ae4cddb0e 100644
--- a/numpy/core/tests/test_overrides.py
+++ b/numpy/core/tests/test_overrides.py
@@ -10,16 +10,11 @@ from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex)
from numpy.core.overrides import (
_get_implementing_args, array_function_dispatch,
- verify_matching_signatures, ARRAY_FUNCTION_ENABLED)
+ verify_matching_signatures)
from numpy.compat import pickle
import pytest
-requires_array_function = pytest.mark.skipif(
- not ARRAY_FUNCTION_ENABLED,
- reason="__array_function__ dispatch not enabled.")
-
-
def _return_not_implemented(self, *args, **kwargs):
return NotImplemented
@@ -150,7 +145,6 @@ class TestGetImplementingArgs:
class TestNDArrayArrayFunction:
- @requires_array_function
def test_method(self):
class Other:
@@ -209,7 +203,6 @@ class TestNDArrayArrayFunction:
args=(array,), kwargs={})
-@requires_array_function
class TestArrayFunctionDispatch:
def test_pickle(self):
@@ -249,7 +242,6 @@ class TestArrayFunctionDispatch:
dispatched_one_arg(array)
-@requires_array_function
class TestVerifyMatchingSignatures:
def test_verify_matching_signatures(self):
@@ -302,7 +294,6 @@ def _new_duck_type_and_implements():
return (MyArray, implements)
-@requires_array_function
class TestArrayFunctionImplementation:
def test_one_arg(self):
@@ -472,7 +463,6 @@ class TestNumPyFunctions:
signature = inspect.signature(np.sum)
assert_('axis' in signature.parameters)
- @requires_array_function
def test_override_sum(self):
MyArray, implements = _new_duck_type_and_implements()
@@ -482,7 +472,6 @@ class TestNumPyFunctions:
assert_equal(np.sum(MyArray()), 'yes')
- @requires_array_function
def test_sum_on_mock_array(self):
# We need a proxy for mocks because __array_function__ is only looked
@@ -503,7 +492,6 @@ class TestNumPyFunctions:
np.sum, (ArrayProxy,), (proxy,), {})
proxy.value.__array__.assert_not_called()
- @requires_array_function
def test_sum_forwarding_implementation(self):
class MyArray(np.ndarray):
@@ -555,7 +543,6 @@ class TestArrayLike:
def func_args(*args, **kwargs):
return args, kwargs
- @requires_array_function
def test_array_like_not_implemented(self):
self.add_method('array', self.MyArray)
@@ -588,7 +575,6 @@ class TestArrayLike:
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
@pytest.mark.parametrize('numpy_ref', [True, False])
- @requires_array_function
def test_array_like(self, function, args, kwargs, numpy_ref):
self.add_method('array', self.MyArray)
self.add_method(function, self.MyArray)
@@ -621,7 +607,6 @@ class TestArrayLike:
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
@pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"])
- @requires_array_function
def test_no_array_function_like(self, function, args, kwargs, ref):
self.add_method('array', self.MyNoArrayFunctionArray)
self.add_method(function, self.MyNoArrayFunctionArray)
@@ -663,7 +648,6 @@ class TestArrayLike:
assert type(array_like) is self.MyArray
assert array_like.function is self.MyArray.fromfile
- @requires_array_function
def test_exception_handling(self):
self.add_method('array', self.MyArray, enable_value_error=True)
@@ -692,7 +676,6 @@ class TestArrayLike:
assert_equal(array_like, expected)
-@requires_array_function
def test_function_like():
# We provide a `__get__` implementation, make sure it works
assert type(np.mean) is np.core._multiarray_umath._ArrayFunctionDispatcher
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 413ece045..fdd536bb9 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -516,22 +516,15 @@ class TestRegression:
def test_method_args(self):
# Make sure methods and functions have same default axis
# keyword and arguments
- funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
- ('sometrue', 'any'),
- ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
- 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
- 'round', 'min', 'max', 'argsort', 'sort']
+ funcs1 = ['argmax', 'argmin', 'sum', 'any', 'all', 'cumsum',
+ 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
+ 'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
- if isinstance(func, tuple):
- func_meth = func[1]
- func = func[0]
- else:
- func_meth = func
- res1 = getattr(arr, func_meth)()
+ res1 = getattr(arr, func)()
res2 = getattr(np, func)(arr2)
if res1 is None:
res1 = arr
@@ -1336,8 +1329,8 @@ class TestRegression:
# Ticket #1058
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
- assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
- assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+ assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+ assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
# Ticket #1078: segfaults when creating an array with a sequence of
@@ -1515,8 +1508,8 @@ class TestRegression:
def test_fromiter_comparison(self):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
- assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
- assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+ assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+ assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
diff --git a/numpy/core/tests/test_scalar_methods.py b/numpy/core/tests/test_scalar_methods.py
index 4f57c94c0..18a7bc828 100644
--- a/numpy/core/tests/test_scalar_methods.py
+++ b/numpy/core/tests/test_scalar_methods.py
@@ -1,7 +1,6 @@
"""
Test the scalar constructors, which also do type-coercion
"""
-import sys
import fractions
import platform
import types
@@ -134,7 +133,6 @@ class TestIsInteger:
assert not value.is_integer()
-@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
class TestClassGetItem:
@pytest.mark.parametrize("cls", [
np.number,
@@ -188,14 +186,6 @@ class TestClassGetItem:
assert np.number[Any]
-@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
-@pytest.mark.parametrize("cls", [np.number, np.complexfloating, np.int64])
-def test_class_getitem_38(cls: Type[np.number]) -> None:
- match = "Type subscription requires python >= 3.9"
- with pytest.raises(TypeError, match=match):
- cls[Any]
-
-
class TestBitCount:
# derived in part from the cpython test "test_bit_count"
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 79423cda0..c737099c1 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -14,7 +14,7 @@ import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
- assert_warns,
+ assert_warns, _SUPPORTS_SVE,
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
@@ -146,6 +146,7 @@ def test_int_float_promotion_truediv(fscalar):
class TestBaseMath:
+ @pytest.mark.xfail(_SUPPORTS_SVE, reason="gh-22982")
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index e504ddd6e..13f7375c2 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -589,6 +589,8 @@ class TestDivision:
assert_equal(np.signbit(x//1), 0)
assert_equal(np.signbit((-x)//1), 1)
+ @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"),
+ reason="gh-22982")
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_floor_division_errors(self, dtype):
@@ -731,6 +733,8 @@ class TestRemainder:
# inf / 0 does not set any flags, only the modulo creates a NaN
np.divmod(finf, fzero)
+ @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"),
+ reason="gh-22982")
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.xfail(sys.platform.startswith("darwin"),
reason="MacOS seems to not give the correct 'invalid' warning for "
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index ee00511a8..0e018a268 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -720,6 +720,8 @@ compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
"PathScale Compiler for SiCortex-based applications")
compiler_class['arm'] = ('armccompiler', 'ArmCCompiler',
"Arm C Compiler")
+compiler_class['fujitsu'] = ('fujitsuccompiler', 'FujitsuCCompiler',
+ "Fujitsu C Compiler")
ccompiler._default_compilers += (('linux.*', 'intel'),
('linux.*', 'intele'),
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index 781404446..4904dd3dd 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -229,6 +229,11 @@ class _Config:
native = None,
opt = '/O2',
werror = '/WX',
+ ),
+ fcc = dict(
+ native = '-mcpu=a64fx',
+ opt = None,
+ werror = None,
)
)
conf_min_features = dict(
@@ -342,7 +347,7 @@ class _Config:
return {}
on_x86 = self.cc_on_x86 or self.cc_on_x64
- is_unix = self.cc_is_gcc or self.cc_is_clang
+ is_unix = self.cc_is_gcc or self.cc_is_clang or self.cc_is_fcc
if on_x86 and is_unix: return dict(
SSE = dict(flags="-msse"),
@@ -989,12 +994,14 @@ class _CCompiler:
("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""),
("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like
("cc_is_msvc", ".*msvc.*", ""),
+ ("cc_is_fcc", ".*fcc.*", ""),
# undefined compiler will be treat it as gcc
("cc_is_nocc", "", ""),
)
detect_args = (
("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""),
- ("cc_has_native", ".*(-march=native|-xHost|/QxHost).*", ""),
+ ("cc_has_native",
+ ".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""),
# in case if the class run with -DNPY_DISABLE_OPTIMIZATION
("cc_noopt", ".*DISABLE_OPT.*", ""),
)
@@ -1055,7 +1062,7 @@ class _CCompiler:
break
self.cc_name = "unknown"
- for name in ("gcc", "clang", "iccw", "icc", "msvc"):
+ for name in ("gcc", "clang", "iccw", "icc", "msvc", "fcc"):
if getattr(self, "cc_is_" + name):
self.cc_name = name
break
diff --git a/numpy/distutils/fujitsuccompiler.py b/numpy/distutils/fujitsuccompiler.py
new file mode 100644
index 000000000..c25900b34
--- /dev/null
+++ b/numpy/distutils/fujitsuccompiler.py
@@ -0,0 +1,28 @@
+from distutils.unixccompiler import UnixCCompiler
+
+class FujitsuCCompiler(UnixCCompiler):
+
+ """
+ Fujitsu compiler.
+ """
+
+ compiler_type = 'fujitsu'
+ cc_exe = 'fcc'
+ cxx_exe = 'FCC'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__(self, verbose, dry_run, force)
+ cc_compiler = self.cc_exe
+ cxx_compiler = self.cxx_exe
+ self.set_executables(
+ compiler=cc_compiler +
+ ' -O3 -Nclang -fPIC',
+ compiler_so=cc_compiler +
+ ' -O3 -Nclang -fPIC',
+ compiler_cxx=cxx_compiler +
+ ' -O3 -Nclang -fPIC',
+ linker_exe=cc_compiler +
+ ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared',
+ linker_so=cc_compiler +
+ ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared'
+ )
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index f42dfdc9b..4763f41ad 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -206,10 +206,7 @@ def find_python_dll():
if implementation == 'cpython':
dllname = f'python{major_version}{minor_version}.dll'
elif implementation == 'pypy':
- if sys.version_info >= (3, 9):
- dllname = f'libpypy{major_version}.{minor_version}-c.dll'
- else:
- dllname = f'libpypy{major_version}-c.dll'
+ dllname = f'libpypy{major_version}.{minor_version}-c.dll'
else:
dllname = f'Unknown platform {implementation}'
print("Looking for %s" % dllname)
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index d5a1687da..434554915 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -62,6 +62,7 @@ Currently, the following classes are available, along with their section names:
blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix)
blas_info:blas
blas_mkl_info:mkl
+ blas_ssl2_info:ssl2
blas_opt_info:ALL # usage recommended
blas_src_info:blas_src
blis_info:blis
@@ -93,9 +94,11 @@ Currently, the following classes are available, along with their section names:
lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix)
lapack_info:lapack
lapack_mkl_info:mkl
+ lapack_ssl2_info:ssl2
lapack_opt_info:ALL # usage recommended
lapack_src_info:lapack_src
mkl_info:mkl
+ ssl2_info:ssl2
numarray_info:numarray
numerix_info:numerix
numpy_info:numpy
@@ -519,6 +522,7 @@ def get_info(name, notfound_action=0):
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
'flame': flame_info, # use lapack_opt instead
'mkl': mkl_info,
+ 'ssl2': ssl2_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
# openblas with embedded lapack
@@ -527,6 +531,8 @@ def get_info(name, notfound_action=0):
'blis': blis_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
+ 'lapack_ssl2': lapack_ssl2_info,
+ 'blas_ssl2': blas_ssl2_info,
'accelerate': accelerate_info, # use blas_opt instead
'openblas64_': openblas64__info,
'openblas64__lapack': openblas64__lapack_info,
@@ -1325,6 +1331,63 @@ class blas_mkl_info(mkl_info):
pass
+class ssl2_info(system_info):
+ section = 'ssl2'
+ dir_env_var = 'SSL2_DIR'
+ # Multi-threaded version. Python itself must be built by Fujitsu compiler.
+ _lib_ssl2 = ['fjlapackexsve']
+ # Single-threaded version
+ #_lib_ssl2 = ['fjlapacksve']
+
+ def get_tcsds_rootdir(self):
+ tcsdsroot = os.environ.get('TCSDS_PATH', None)
+ if tcsdsroot is not None:
+ return tcsdsroot
+ return None
+
+ def __init__(self):
+ tcsdsroot = self.get_tcsds_rootdir()
+ if tcsdsroot is None:
+ system_info.__init__(self)
+ else:
+ system_info.__init__(
+ self,
+ default_lib_dirs=[os.path.join(tcsdsroot, 'lib64')],
+ default_include_dirs=[os.path.join(tcsdsroot,
+ 'clang-comp/include')])
+
+ def calc_info(self):
+ tcsdsroot = self.get_tcsds_rootdir()
+
+ lib_dirs = self.get_lib_dirs()
+ if lib_dirs is None:
+ lib_dirs = os.path.join(tcsdsroot, 'lib64')
+
+ incl_dirs = self.get_include_dirs()
+ if incl_dirs is None:
+ incl_dirs = os.path.join(tcsdsroot, 'clang-comp/include')
+
+ ssl2_libs = self.get_libs('ssl2_libs', self._lib_ssl2)
+
+ info = self.check_libs2(lib_dirs, ssl2_libs)
+ if info is None:
+ return
+ dict_append(info,
+ define_macros=[('HAVE_CBLAS', None),
+ ('HAVE_SSL2', 1)],
+ include_dirs=incl_dirs,)
+ self.set_info(**info)
+
+
+class lapack_ssl2_info(ssl2_info):
+ pass
+
+
+class blas_ssl2_info(ssl2_info):
+ pass
+
+
+
class armpl_info(system_info):
section = 'armpl'
dir_env_var = 'ARMPL_DIR'
@@ -1787,7 +1850,7 @@ class lapack_opt_info(system_info):
notfounderror = LapackNotFoundError
# List of all known LAPACK libraries, in the default order
- lapack_order = ['armpl', 'mkl', 'openblas', 'flame',
+ lapack_order = ['armpl', 'mkl', 'ssl2', 'openblas', 'flame',
'accelerate', 'atlas', 'lapack']
order_env_var_name = 'NPY_LAPACK_ORDER'
@@ -1805,6 +1868,13 @@ class lapack_opt_info(system_info):
return True
return False
+ def _calc_info_ssl2(self):
+ info = get_info('lapack_ssl2')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
def _calc_info_openblas(self):
info = get_info('openblas_lapack')
if info:
@@ -1971,7 +2041,7 @@ class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
# List of all known BLAS libraries, in the default order
- blas_order = ['armpl', 'mkl', 'blis', 'openblas',
+ blas_order = ['armpl', 'mkl', 'ssl2', 'blis', 'openblas',
'accelerate', 'atlas', 'blas']
order_env_var_name = 'NPY_BLAS_ORDER'
@@ -1989,6 +2059,13 @@ class blas_opt_info(system_info):
return True
return False
+ def _calc_info_ssl2(self):
+ info = get_info('blas_ssl2')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
def _calc_info_blis(self):
info = get_info('blis')
if info:
diff --git a/numpy/distutils/tests/test_ccompiler_opt.py b/numpy/distutils/tests/test_ccompiler_opt.py
index 657ebdb68..a1b780336 100644
--- a/numpy/distutils/tests/test_ccompiler_opt.py
+++ b/numpy/distutils/tests/test_ccompiler_opt.py
@@ -31,7 +31,7 @@ arch_compilers = dict(
ppc64 = ("gcc", "clang"),
ppc64le = ("gcc", "clang"),
armhf = ("gcc", "clang"),
- aarch64 = ("gcc", "clang"),
+ aarch64 = ("gcc", "clang", "fcc"),
s390x = ("gcc", "clang"),
noarch = ("gcc",)
)
@@ -422,8 +422,8 @@ class _Test_CCompilerOpt:
# when option "native" is activated through the args
try:
self.expect("native",
- trap_flags=".*(-march=native|-xHost|/QxHost).*",
- x86=".*", ppc64=".*", armhf=".*", s390x=".*"
+ trap_flags=".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*",
+ x86=".*", ppc64=".*", armhf=".*", s390x=".*", aarch64=".*",
)
if self.march() != "unknown":
raise AssertionError(
diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi
index b49fc88f7..9b6525190 100644
--- a/numpy/fft/helper.pyi
+++ b/numpy/fft/helper.pyi
@@ -27,21 +27,21 @@ def ifftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ...
@overload
def fftfreq(
n: int | integer[Any],
- d: _ArrayLikeFloat_co,
+ d: _ArrayLikeFloat_co = ...,
) -> NDArray[floating[Any]]: ...
@overload
def fftfreq(
n: int | integer[Any],
- d: _ArrayLikeComplex_co,
+ d: _ArrayLikeComplex_co = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def rfftfreq(
n: int | integer[Any],
- d: _ArrayLikeFloat_co,
+ d: _ArrayLikeFloat_co = ...,
) -> NDArray[floating[Any]]: ...
@overload
def rfftfreq(
n: int | integer[Any],
- d: _ArrayLikeComplex_co,
+ d: _ArrayLikeComplex_co = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi
index 1fa2d226e..d3553bbcc 100644
--- a/numpy/lib/__init__.pyi
+++ b/numpy/lib/__init__.pyi
@@ -64,7 +64,6 @@ from numpy.lib.function_base import (
digitize as digitize,
cov as cov,
corrcoef as corrcoef,
- msort as msort,
median as median,
sinc as sinc,
hamming as hamming,
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 405790025..f0f374f97 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -4910,23 +4910,22 @@ def trapz(y, x=None, dx=1.0, axis=-1):
return ret
-if overrides.ARRAY_FUNCTION_ENABLED:
- # If array-function is enabled (normal), we wrap everything into a C
- # callable, which has no __code__ or other attributes normal Python funcs
- # have. SciPy however, tries to "clone" `trapz` into a new Python function
- # which requires `__code__` and a few other attributes.
- # So we create a dummy clone and copy over its attributes allowing
- # SciPy <= 1.10 to work: https://github.com/scipy/scipy/issues/17811
- assert not hasattr(trapz, "__code__")
-
- def _fake_trapz(y, x=None, dx=1.0, axis=-1):
- return trapz(y, x=x, dx=dx, axis=axis)
-
- trapz.__code__ = _fake_trapz.__code__
- trapz.__globals__ = _fake_trapz.__globals__
- trapz.__defaults__ = _fake_trapz.__defaults__
- trapz.__closure__ = _fake_trapz.__closure__
- trapz.__kwdefaults__ = _fake_trapz.__kwdefaults__
+# __array_function__ has no __code__ or other attributes normal Python funcs we
+# wrap everything into a C callable. SciPy however, tries to "clone" `trapz`
+# into a new Python function which requires `__code__` and a few other
+# attributes. So we create a dummy clone and copy over its attributes allowing
+# SciPy <= 1.10 to work: https://github.com/scipy/scipy/issues/17811
+assert not hasattr(trapz, "__code__")
+
+def _fake_trapz(y, x=None, dx=1.0, axis=-1):
+ return trapz(y, x=x, dx=dx, axis=axis)
+
+
+trapz.__code__ = _fake_trapz.__code__
+trapz.__globals__ = _fake_trapz.__globals__
+trapz.__defaults__ = _fake_trapz.__defaults__
+trapz.__closure__ = _fake_trapz.__closure__
+trapz.__kwdefaults__ = _fake_trapz.__kwdefaults__
def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None):
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index 0dfa7b4c1..35745e6dd 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -981,7 +981,7 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None):
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
- ' sample x.')
+ 'sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index fa3a90e4f..abf9e1090 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -3,11 +3,10 @@ import sys
import math
import warnings
+import numpy as np
from .._utils import set_module
import numpy.core.numeric as _nx
-from numpy.core.numeric import (
- asarray, ScalarType, array, alltrue, cumprod, arange, ndim
-)
+from numpy.core.numeric import ScalarType, array
from numpy.core.numerictypes import find_common_type, issubdtype
import numpy.matrixlib as matrixlib
@@ -94,7 +93,7 @@ def ix_(*args):
nd = len(args)
for k, new in enumerate(args):
if not isinstance(new, _nx.ndarray):
- new = asarray(new)
+ new = np.asarray(new)
if new.size == 0:
# Explicitly type empty arrays to avoid float default
new = new.astype(_nx.intp)
@@ -396,7 +395,7 @@ class AxisConcatenator:
scalar = True
scalartypes.append(newobj.dtype)
else:
- item_ndim = ndim(item)
+ item_ndim = np.ndim(item)
newobj = array(item, copy=False, subok=True, ndmin=ndmin)
if trans1d != -1 and item_ndim < ndmin:
k2 = ndmin - item_ndim
@@ -596,7 +595,7 @@ class ndenumerate:
"""
def __init__(self, arr):
- self.iter = asarray(arr).flat
+ self.iter = np.asarray(arr).flat
def __next__(self):
"""
@@ -909,9 +908,9 @@ def fill_diagonal(a, val, wrap=False):
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
- if not alltrue(diff(a.shape) == 0):
+ if not np.all(diff(a.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
- step = 1 + (cumprod(a.shape[:-1])).sum()
+ step = 1 + (np.cumprod(a.shape[:-1])).sum()
# Write the value out into the diagonal.
a.flat[:end:step] = val
@@ -982,7 +981,7 @@ def diag_indices(n, ndim=2):
[0, 1]]])
"""
- idx = arange(n)
+ idx = np.arange(n)
return (idx,) * ndim
@@ -1041,7 +1040,7 @@ def diag_indices_from(arr):
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
- if not alltrue(diff(arr.shape) == 0):
+ if not np.all(diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 154faa1dd..5d8a41bfe 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -643,10 +643,6 @@ def column_stack(tup):
[3, 4]])
"""
- if not overrides.ARRAY_FUNCTION_ENABLED:
- # reject non-sequences (and make tuple)
- tup = _arrays_for_stack_dispatcher(tup)
-
arrays = []
for v in tup:
arr = asanyarray(v)
@@ -713,10 +709,6 @@ def dstack(tup):
[[3, 4]]])
"""
- if not overrides.ARRAY_FUNCTION_ENABLED:
- # reject non-sequences (and make tuple)
- tup = _arrays_for_stack_dispatcher(tup)
-
arrs = atleast_3d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index cc8003f61..3ec46735c 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -229,8 +229,8 @@ class TestAny:
def test_nd(self):
y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]
assert_(np.any(y1))
- assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0])
- assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
+ assert_array_equal(np.any(y1, axis=0), [1, 1, 0])
+ assert_array_equal(np.any(y1, axis=1), [0, 1, 1])
class TestAll:
@@ -247,8 +247,8 @@ class TestAll:
def test_nd(self):
y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
assert_(not np.all(y1))
- assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1])
- assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
+ assert_array_equal(np.all(y1, axis=0), [0, 0, 1])
+ assert_array_equal(np.all(y1, axis=1), [0, 0, 1])
class TestCopy:
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index 141f508fd..eb008c600 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -4,20 +4,14 @@
from numpy.testing import (
assert_equal, assert_array_equal, assert_array_max_ulp,
assert_array_almost_equal, assert_raises, assert_
- )
-
+)
from numpy import (
arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d,
tri, mask_indices, triu_indices, triu_indices_from, tril_indices,
tril_indices_from, vander,
- )
-
+)
import numpy as np
-
-from numpy.core.tests.test_overrides import requires_array_function
-
-
import pytest
@@ -283,7 +277,6 @@ class TestHistogram2d:
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
- @requires_array_function
def test_dispatch(self):
class ShouldDispatch:
def __array_function__(self, function, types, args, kwargs):
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index 3f4ca6309..ea0326139 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -155,7 +155,7 @@ class TestIscomplex:
def test_fail(self):
z = np.array([-1, 0, 1])
res = iscomplex(z)
- assert_(not np.sometrue(res, axis=0))
+ assert_(not np.any(res, axis=0))
def test_pass(self):
z = np.array([-1j, 1, 0])
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index c280b6969..fac4f41d0 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -80,12 +80,6 @@ class TestUfunclike:
assert_(isinstance(f0d, MyArray))
assert_equal(f0d.metadata, 'bar')
- def test_deprecated(self):
- # NumPy 1.13.0, 2017-04-26
- assert_warns(DeprecationWarning, ufl.fix, [1, 2], y=nx.empty(2))
- assert_warns(DeprecationWarning, ufl.isposinf, [1, 2], y=nx.empty(2))
- assert_warns(DeprecationWarning, ufl.isneginf, [1, 2], y=nx.empty(2))
-
def test_scalar(self):
x = np.inf
actual = np.isposinf(x)
diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py
index a93c4773b..05fe60c5b 100644
--- a/numpy/lib/ufunclike.py
+++ b/numpy/lib/ufunclike.py
@@ -6,72 +6,16 @@ storing results in an output array.
__all__ = ['fix', 'isneginf', 'isposinf']
import numpy.core.numeric as nx
-from numpy.core.overrides import (
- array_function_dispatch, ARRAY_FUNCTION_ENABLED,
-)
+from numpy.core.overrides import array_function_dispatch
import warnings
import functools
-def _deprecate_out_named_y(f):
- """
- Allow the out argument to be passed as the name `y` (deprecated)
-
- In future, this decorator should be removed.
- """
- @functools.wraps(f)
- def func(x, out=None, **kwargs):
- if 'y' in kwargs:
- if 'out' in kwargs:
- raise TypeError(
- "{} got multiple values for argument 'out'/'y'"
- .format(f.__name__)
- )
- out = kwargs.pop('y')
- # NumPy 1.13.0, 2017-04-26
- warnings.warn(
- "The name of the out argument to {} has changed from `y` to "
- "`out`, to match other ufuncs.".format(f.__name__),
- DeprecationWarning, stacklevel=3)
- return f(x, out=out, **kwargs)
-
- return func
-
-
-def _fix_out_named_y(f):
- """
- Allow the out argument to be passed as the name `y` (deprecated)
-
- This decorator should only be used if _deprecate_out_named_y is used on
- a corresponding dispatcher function.
- """
- @functools.wraps(f)
- def func(x, out=None, **kwargs):
- if 'y' in kwargs:
- # we already did error checking in _deprecate_out_named_y
- out = kwargs.pop('y')
- return f(x, out=out, **kwargs)
-
- return func
-
-
-def _fix_and_maybe_deprecate_out_named_y(f):
- """
- Use the appropriate decorator, depending upon if dispatching is being used.
- """
- if ARRAY_FUNCTION_ENABLED:
- return _fix_out_named_y(f)
- else:
- return _deprecate_out_named_y(f)
-
-
-@_deprecate_out_named_y
def _dispatcher(x, out=None):
return (x, out)
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
-@_fix_and_maybe_deprecate_out_named_y
def fix(x, out=None):
"""
Round to nearest integer towards zero.
@@ -125,7 +69,6 @@ def fix(x, out=None):
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
-@_fix_and_maybe_deprecate_out_named_y
def isposinf(x, out=None):
"""
Test element-wise for positive infinity, return result as bool array.
@@ -197,7 +140,6 @@ def isposinf(x, out=None):
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
-@_fix_and_maybe_deprecate_out_named_y
def isneginf(x, out=None):
"""
Test element-wise for negative infinity, return result as bool array.
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 8d2d2fe33..095c914db 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -528,15 +528,16 @@ def _info(obj, output=None):
@set_module('numpy')
def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
"""
- Get help information for a function, class, or module.
+ Get help information for an array, function, class, or module.
Parameters
----------
object : object or str, optional
- Input object or name to get information about. If `object` is a
- numpy object, its docstring is given. If it is a string, available
- modules are searched for matching objects. If None, information
- about `info` itself is returned.
+ Input object or name to get information about. If `object` is
+ an `ndarray` instance, information about the array is printed.
+ If `object` is a numpy object, its docstring is given. If it is
+ a string, available modules are searched for matching objects.
+ If None, information about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
@@ -575,6 +576,22 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
+ When the argument is an array, information about the array is printed.
+
+ >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64)
+ >>> np.info(a)
+ class: ndarray
+ shape: (2, 3)
+ strides: (24, 8)
+ itemsize: 8
+ aligned: True
+ contiguous: True
+ fortran: False
+ data pointer: 0x562b6e0d2860 # may vary
+ byteorder: little
+ byteswap: False
+ type: complex64
+
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 255de94e5..78927d1ae 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -23,7 +23,7 @@ from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, sum, isfinite,
- finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
+ finfo, errstate, geterrobj, moveaxis, amin, amax, prod, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan, sign, argsort, sort,
reciprocal
@@ -196,7 +196,7 @@ def _assert_finite(*arrays):
def _is_empty_2d(arr):
# check size first for efficiency
- return arr.size == 0 and product(arr.shape[-2:]) == 0
+ return arr.size == 0 and prod(arr.shape[-2:]) == 0
def transpose(a):
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 4245aed49..e6d696288 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -2857,7 +2857,7 @@ class MaskedArray(ndarray):
mask = np.array(
[getmaskarray(np.asanyarray(m, dtype=_data.dtype))
for m in data], dtype=mdtype)
- except ValueError:
+ except (ValueError, TypeError):
# If data is nested
mask = nomask
# Force shrinking of the mask if needed (and possible)
diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi
index 58d73a231..15f37c422 100644
--- a/numpy/ma/core.pyi
+++ b/numpy/ma/core.pyi
@@ -219,6 +219,10 @@ class MaskedArray(ndarray[_ShapeType, _DType_co]):
def compress(self, condition, axis=..., out=...): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
+ def __ge__(self, other): ...
+ def __gt__(self, other): ...
+ def __le__(self, other): ...
+ def __lt__(self, other): ...
def __add__(self, other): ...
def __radd__(self, other): ...
def __sub__(self, other): ...
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 41bce0f22..4abe2107a 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -398,7 +398,7 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
dtypes.append(np.asarray(res).dtype)
outarr = zeros(outshape, object)
outarr[tuple(ind)] = res
- Ntot = np.product(outshape)
+ Ntot = np.prod(outshape)
k = 1
while k < Ntot:
# increment the index
@@ -418,7 +418,7 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
j = i.copy()
j[axis] = ([slice(None, None)] * res.ndim)
j.put(indlist, ind)
- Ntot = np.product(outshape)
+ Ntot = np.prod(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = res.shape
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 172a1ee8d..5db01b74a 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -1336,16 +1336,16 @@ class TestMaskedArrayArithmetic:
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
- assert_equal(np.product(x, axis=0), product(x, axis=0))
- assert_equal(np.product(x, 0), product(x, 0))
- assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
+ assert_equal(np.prod(x, axis=0), product(x, axis=0))
+ assert_equal(np.prod(x, 0), product(x, 0))
+ assert_equal(np.prod(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
- assert_equal(np.product(x, 1), product(x, 1))
+ assert_equal(np.prod(x, 1), product(x, 1))
def test_binops_d2D(self):
# Test binary operations on 2D data
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index 8465b1153..7b892ad23 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -194,16 +194,16 @@ class TestMa:
assert_(eq(np.sum(x, axis=0), sum(x, axis=0)))
assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
assert_(eq(np.sum(x, 0), sum(x, 0)))
- assert_(eq(np.product(x, axis=0), product(x, axis=0)))
- assert_(eq(np.product(x, 0), product(x, 0)))
- assert_(eq(np.product(filled(xm, 1), axis=0),
+ assert_(eq(np.prod(x, axis=0), product(x, axis=0)))
+ assert_(eq(np.prod(x, 0), product(x, 0)))
+ assert_(eq(np.prod(filled(xm, 1), axis=0),
product(xm, axis=0)))
if len(s) > 1:
assert_(eq(np.concatenate((x, y), 1),
concatenate((xm, ym), 1)))
assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
assert_(eq(np.sum(x, 1), sum(x, 1)))
- assert_(eq(np.product(x, 1), product(x, 1)))
+ assert_(eq(np.prod(x, 1), product(x, 1)))
def test_testCI(self):
# Test of conversions and indexing
diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py
index cb3d0349f..f4f32cc7a 100644
--- a/numpy/ma/tests/test_regression.py
+++ b/numpy/ma/tests/test_regression.py
@@ -89,3 +89,9 @@ class TestRegression:
def test_masked_array_tobytes_fortran(self):
ma = np.ma.arange(4).reshape((2,2))
assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes())
+
+ def test_structured_array(self):
+ # see gh-22041
+ np.ma.array((1, (b"", b"")),
+ dtype=[("x", np.int_),
+ ("y", [("i", np.void), ("j", np.void)])])
diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py
index 2dd479abe..7a633906b 100644
--- a/numpy/ma/testutils.py
+++ b/numpy/ma/testutils.py
@@ -233,7 +233,7 @@ def fail_if_array_equal(x, y, err_msg='', verbose=True):
"""
def compare(x, y):
- return (not np.alltrue(approx(x, y)))
+ return (not np.all(approx(x, y)))
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header='Arrays are not equal')
diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi
index 23c04e472..e1cdefb15 100644
--- a/numpy/random/_generator.pyi
+++ b/numpy/random/_generator.pyi
@@ -29,6 +29,7 @@ from numpy._typing import (
_DTypeLikeUInt,
_Float32Codes,
_Float64Codes,
+ _FloatLike_co,
_Int8Codes,
_Int16Codes,
_Int32Codes,
@@ -188,13 +189,18 @@ class Generator:
out: None | ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def beta(
+ self,
+ a: _FloatLike_co,
+ b: _FloatLike_co,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def beta(
self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def exponential(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
@@ -371,7 +377,12 @@ class Generator:
shuffle: bool = ...,
) -> ndarray[Any, Any]: ...
@overload
- def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def uniform(
+ self,
+ low: _FloatLike_co = ...,
+ high: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def uniform(
self,
@@ -380,7 +391,12 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def normal(
+ self,
+ loc: _FloatLike_co = ...,
+ scale: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def normal(
self,
@@ -391,7 +407,7 @@ class Generator:
@overload
def standard_gamma( # type: ignore[misc]
self,
- shape: float,
+ shape: _FloatLike_co,
size: None = ...,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
out: None = ...,
@@ -426,7 +442,7 @@ class Generator:
out: None | ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def gamma(
self,
@@ -435,13 +451,13 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def f(
self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_f(
self,
@@ -451,19 +467,19 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def chisquare(
self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_chisquare(
self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: None = ...
@@ -473,25 +489,25 @@ class Generator:
self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def vonmises(
self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def pareto(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def weibull(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def power(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
@@ -501,7 +517,12 @@ class Generator:
@overload
def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
@overload
- def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def laplace(
+ self,
+ loc: _FloatLike_co = ...,
+ scale: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def laplace(
self,
@@ -510,7 +531,12 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def gumbel(
+ self,
+ loc: _FloatLike_co = ...,
+ scale: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def gumbel(
self,
@@ -519,7 +545,12 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def logistic(
+ self,
+ loc: _FloatLike_co = ...,
+ scale: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def logistic(
self,
@@ -528,7 +559,12 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def lognormal(
+ self,
+ mean: _FloatLike_co = ...,
+ sigma: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def lognormal(
self,
@@ -537,19 +573,25 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def rayleigh(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def wald(
self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def triangular(
+ self,
+ left: _FloatLike_co,
+ mode: _FloatLike_co,
+ right: _FloatLike_co,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def triangular(
self,
@@ -559,31 +601,31 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def binomial(
self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
- def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def negative_binomial(
self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
- def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc]
+ def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc]
@overload
def poisson(
self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
- def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
+ def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def zipf(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
- def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def geometric(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
@@ -599,7 +641,7 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[int64]]: ...
@overload
- def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def logseries(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index faf19eaf2..1b19d00d9 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -240,6 +240,8 @@ cdef class Generator:
def spawn(self, int n_children):
"""
+ spawn(n_children)
+
Create new independent child generators.
See :ref:`seedsequence-spawn` for additional notes on spawning
@@ -247,6 +249,10 @@ cdef class Generator:
.. versionadded:: 1.25.0
+ Parameters
+ ----------
+ n_children : int
+
Returns
-------
child_generators : list of Generators
@@ -2628,7 +2634,7 @@ cdef class Generator:
>>> b = []
>>> for i in range(1000):
... a = 10. + rng.standard_normal(100)
- ... b.append(np.product(a))
+ ... b.append(np.prod(a))
>>> b = np.array(b) / np.min(b) # scale values to be positive
>>> count, bins, ignored = plt.hist(b, 100, density=True, align='mid')
diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx
index 06f8c9753..83441747a 100644
--- a/numpy/random/bit_generator.pyx
+++ b/numpy/random/bit_generator.pyx
@@ -581,6 +581,8 @@ cdef class BitGenerator():
def spawn(self, int n_children):
"""
+ spawn(n_children)
+
Create new independent child bit generators.
See :ref:`seedsequence-spawn` for additional notes on spawning
@@ -589,6 +591,10 @@ cdef class BitGenerator():
.. versionadded:: 1.25.0
+ Parameters
+ ----------
+ n_children : int
+
Returns
-------
child_bit_generators : list of BitGenerators
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index ca6ba9de8..dfa553ee4 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -3066,7 +3066,7 @@ cdef class RandomState:
>>> b = []
>>> for i in range(1000):
... a = 10. + np.random.standard_normal(100)
- ... b.append(np.product(a))
+ ... b.append(np.prod(a))
>>> b = np.array(b) / np.min(b) # scale values to be positive
>>> count, bins, ignored = plt.hist(b, 100, density=True, align='mid')
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 6ceea5771..b9ea703d0 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -37,7 +37,7 @@ __all__ = [
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare',
'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON',
- '_OLD_PROMOTION', 'IS_MUSL'
+ '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE'
]
@@ -1295,6 +1295,22 @@ def rundocs(filename=None, raise_on_error=True):
raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
+def check_support_sve():
+ """
+ gh-22982
+ """
+
+ import subprocess
+ cmd = 'lscpu'
+ try:
+ return "sve" in (subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ shell=True).communicate()[0]).decode('utf-8')
+ except OSError:
+ return False
+
+
+_SUPPORTS_SVE = check_support_sve()
+
#
# assert_raises and assert_raises_regex are taken from unittest.
#
@@ -2549,3 +2565,4 @@ def _get_glibc_version():
_glibcver = _get_glibc_version()
_glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x)
+
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 8f4912fab..0aaa508ee 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -14,7 +14,6 @@ from numpy.testing import (
clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
)
-from numpy.core.overrides import ARRAY_FUNCTION_ENABLED
class _GenericTest:
@@ -191,8 +190,6 @@ class TestArrayEqual(_GenericTest):
self._test_not_equal(a, b)
self._test_not_equal(b, a)
- @pytest.mark.skipif(
- not ARRAY_FUNCTION_ENABLED, reason='requires __array_function__')
def test_subclass_that_does_not_implement_npall(self):
class MyArray(np.ndarray):
def __array_function__(self, *args, **kwargs):
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index a7bec6313..108ba0b74 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -194,6 +194,7 @@ PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
"core.umath",
"core.umath_tests",
"distutils.armccompiler",
+ "distutils.fujitsuccompiler",
"distutils.ccompiler",
'distutils.ccompiler_opt',
"distutils.command",
diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi
index b7fc75acc..965aa5ace 100644
--- a/numpy/typing/tests/data/reveal/scalars.pyi
+++ b/numpy/typing/tests/data/reveal/scalars.pyi
@@ -1,4 +1,3 @@
-import sys
import numpy as np
b: np.bool_
@@ -151,8 +150,7 @@ reveal_type(round(u8, 3)) # E: {uint64}
reveal_type(round(f8)) # E: int
reveal_type(round(f8, 3)) # E: {float64}
-if sys.version_info >= (3, 9):
- reveal_type(f8.__ceil__()) # E: int
- reveal_type(f8.__floor__()) # E: int
+reveal_type(f8.__ceil__()) # E: int
+reveal_type(f8.__floor__()) # E: int
reveal_type(i8.is_integer()) # E: Literal[True]
diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py
index 44d069006..c32c5db32 100644
--- a/numpy/typing/tests/test_runtime.py
+++ b/numpy/typing/tests/test_runtime.py
@@ -2,7 +2,6 @@
from __future__ import annotations
-import sys
from typing import (
get_type_hints,
Union,
@@ -24,10 +23,7 @@ class TypeTup(NamedTuple):
origin: None | type
-if sys.version_info >= (3, 9):
- NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray)
-else:
- NDArrayTup = TypeTup(npt.NDArray, (), None)
+NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray)
TYPES = {
"ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union),
diff --git a/pyproject.toml b/pyproject.toml
index 1e443c507..1b0e86023 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -30,7 +30,7 @@ requires = [
#maintainers = [
# {name = "NumPy Developers", email="numpy-discussion@python.org"},
#]
-#requires-python = ">=3.8"
+#requires-python = ">=3.9"
#readme = "README.md"
#classifiers = [
# 'Development Status :: 5 - Production/Stable',
@@ -40,7 +40,6 @@ requires = [
# 'Programming Language :: C',
# 'Programming Language :: Python',
# 'Programming Language :: Python :: 3',
-# 'Programming Language :: Python :: 3.8',
# 'Programming Language :: Python :: 3.9',
# 'Programming Language :: Python :: 3.10',
# 'Programming Language :: Python :: 3.11',
@@ -175,9 +174,9 @@ environment = { OPENBLAS64_="openblas", OPENBLAS="", NPY_USE_BLAS_ILP64="1", CFL
select = "*-win32"
environment = { OPENBLAS64_="", OPENBLAS="openblas", NPY_USE_BLAS_ILP64="0", CFLAGS="-m32", LDFLAGS="-m32" }
-[tool.devpy]
+[tool.spin]
package = 'numpy'
-[tool.devpy.commands]
-"Build" = ["devpy.build", "devpy.test"]
-"Environments" = ["devpy.shell", "devpy.ipython", "devpy.python"]
+[tool.spin.commands]
+"Build" = ["spin.cmds.meson.build", "spin.cmds.meson.test"]
+"Environments" = ["spin.cmds.meson.shell", "spin.cmds.meson.ipython", "spin.cmds.meson.python"]
diff --git a/setup.py b/setup.py
index bc8c7e0db..671df90fd 100755
--- a/setup.py
+++ b/setup.py
@@ -90,7 +90,6 @@ License :: OSI Approved :: BSD License
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
-Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
@@ -432,7 +431,7 @@ def setup_package():
test_suite='pytest',
version=versioneer.get_version(),
cmdclass=cmdclass,
- python_requires='>=3.8',
+ python_requires='>=3.9',
zip_safe=False,
entry_points={
'console_scripts': f2py_cmds,
diff --git a/site.cfg.example b/site.cfg.example
index 4df01a210..941917867 100644
--- a/site.cfg.example
+++ b/site.cfg.example
@@ -252,3 +252,13 @@
#[djbfft]
#include_dirs = /usr/local/djbfft/include
#library_dirs = /usr/local/djbfft/lib
+
+# Fujitsu SSL2
+# ----
+#[ssl2]
+#library_dirs = /opt/FJSVstclanga/v1.1.0/lib64
+#include_dirs = /opt/FJSVstclanga/v1.1.0/clang-comp/include
+# Single-threaded version.
+#libraies = fjlapacksve
+# Multi-threaded version. Python itself must be built by Fujitsu compiler.
+#libraies = fjlapackexsve
diff --git a/spin b/spin
new file mode 100755
index 000000000..7a11042ab
--- /dev/null
+++ b/spin
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+#
+# Example stub for running `python -m spin`
+#
+# Copy this into your project root.
+
+import os
+import sys
+import runpy
+
+sys.path.remove(os.path.abspath(os.path.dirname(sys.argv[0])))
+try:
+ runpy.run_module("spin", run_name="__main__")
+except ImportError:
+ print("Cannot import spin; please install it using")
+ print()
+ print(" pip install spin")
+ print()
+ sys.exit(1)
diff --git a/tools/ci/cirrus_general.yml b/tools/ci/cirrus_general.yml
index 95e42d334..c21bfd615 100644
--- a/tools/ci/cirrus_general.yml
+++ b/tools/ci/cirrus_general.yml
@@ -6,7 +6,6 @@ build_and_store_wheels: &BUILD_AND_STORE_WHEELS
wheels_artifacts:
path: "wheelhouse/*"
-
######################################################################
# Build linux_aarch64 natively
######################################################################
@@ -24,11 +23,14 @@ linux_aarch64_task:
# single task takes longer than 60 mins (the default time limit for a
# cirrus-ci task).
- env:
+ CIRRUS_CLONE_SUBMODULES: true
CIBW_BUILD: cp39-*
EXPECT_CPU_FEATURES: NEON NEON_FP16 NEON_VFPV4 ASIMD ASIMDHP ASIMDDP ASIMDFHM
- env:
+ CIRRUS_CLONE_SUBMODULES: true
CIBW_BUILD: cp310-*
- env:
+ CIRRUS_CLONE_SUBMODULES: true
CIBW_BUILD: cp311-*
build_script: |
diff --git a/tools/gitpod/Dockerfile b/tools/gitpod/Dockerfile
index dd5561750..1ff9076cd 100644
--- a/tools/gitpod/Dockerfile
+++ b/tools/gitpod/Dockerfile
@@ -25,7 +25,7 @@
# This image is based on: Ubuntu 20.04 (focal)
# https://hub.docker.com/_/ubuntu/?tab=tags&name=focal
# OS/ARCH: linux/amd64
-FROM gitpod/workspace-base:latest
+FROM gitpod/workspace-base:latest@sha256:770d3022db71512bdd1b7fdc06983f17cfc956342853e315d2d1c0ab39216a36
ARG MAMBAFORGE_VERSION="4.11.0-0"
ARG CONDA_ENV=numpy-dev
diff --git a/tools/gitpod/gitpod.Dockerfile b/tools/gitpod/gitpod.Dockerfile
index 23e4f3728..7c369ac49 100644
--- a/tools/gitpod/gitpod.Dockerfile
+++ b/tools/gitpod/gitpod.Dockerfile
@@ -34,6 +34,8 @@ COPY --from=clone --chown=gitpod /tmp/numpy ${WORKSPACE}
WORKDIR ${WORKSPACE}
# Build numpy to populate the cache used by ccache
+# Note, hadolint suggests consolidating the RUN commands. That info
+# level complaint (DL3059) is currently ignored to avoid errors.
RUN git config --global --add safe.directory /workspace/numpy
RUN git submodule update --init --depth=1 -- numpy/core/src/umath/svml numpy/core/src/npysort/x86-simd-sort
RUN conda activate ${CONDA_ENV} && \