diff options
350 files changed, 10130 insertions, 4375 deletions
diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index 079496d93..000000000 --- a/.appveyor.yml +++ /dev/null @@ -1,154 +0,0 @@ -# As config was originally based on an example by Olivier Grisel. Thanks! -# https://github.com/ogrisel/python-appveyor-demo/blob/master/appveyor.yml -clone_depth: 50 - -# No reason for us to restrict the number concurrent jobs -max_jobs: 100 - -cache: - - '%LOCALAPPDATA%\pip\Cache' - -environment: - global: - MINGW_32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin - MINGW_64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin - OPENBLAS_32: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win32.zip - OPENBLAS_64: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win64.zip - APPVEYOR_SAVE_CACHE_ON_ERROR: true - APPVEYOR_SKIP_FINALIZE_ON_EXIT: true - TEST_TIMEOUT: 1000 - NPY_NUM_BUILD_JOBS: 4 - - matrix: - - PYTHON: C:\Python36 - PYTHON_VERSION: 3.6 - PYTHON_ARCH: 32 - TEST_MODE: fast - - - PYTHON: C:\Python37 - PYTHON_VERSION: 3.7 - PYTHON_ARCH: 32 - TEST_MODE: fast - - - PYTHON: C:\Python36-x64 - PYTHON_VERSION: 3.6 - PYTHON_ARCH: 64 - TEST_MODE: full - INSTALL_PICKLE5: 1 - - - PYTHON: C:\Python37-x64 - PYTHON_VERSION: 3.7 - PYTHON_ARCH: 64 - TEST_MODE: full - INSTALL_PICKLE5: 1 - -init: - - "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%" - - "ECHO \"%APPVEYOR_SCHEDULED_BUILD%\"" - # If there is a newer build queued for the same PR, cancel this one. - # The AppVeyor 'rollout builds' option is supposed to serve the same - # purpose but it is problematic because it tends to cancel builds pushed - # directly to master instead of just PR builds (or the converse). - # credits: JuliaLang developers. - - ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod ` - https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | ` - Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { ` - raise "There are newer queued builds for this pull request, skipping build." - } - -install: - # Prepend newly installed Python to the PATH of this build (this cannot be - # done from inside the powershell script as it would require to restart - # the parent CMD process). - - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH% - - if [%PYTHON_ARCH%]==[32] SET PATH=%MINGW_32%;%PATH% & SET OPENBLAS=%OPENBLAS_32% - - if [%PYTHON_ARCH%]==[64] SET PATH=%MINGW_64%;%PATH% & SET OPENBLAS=%OPENBLAS_64% - - # Check that we have the expected version and architecture for Python - - python --version - - >- - %CMD_IN_ENV% - python -c "import sys,platform,struct; - print(sys.platform, platform.machine(), struct.calcsize('P') * 8, )" - - # Install "openblas.a" to PYTHON\lib - # Library provided by Matthew Brett at https://github.com/matthew-brett/build-openblas - - ps: | - $clnt = new-object System.Net.WebClient - $file = "$(New-TemporaryFile).zip" - $tmpdir = New-TemporaryFile | %{ rm $_; mkdir $_ } - $destination = "$env:PYTHON\lib\openblas.a" - - echo $file - echo $tmpdir - echo $env:OPENBLAS - - $clnt.DownloadFile($env:OPENBLAS, $file) - Get-FileHash $file | Format-List - - Expand-Archive $file $tmpdir - - rm $tmpdir\$env:PYTHON_ARCH\lib\*.dll.a - $lib = ls $tmpdir\$env:PYTHON_ARCH\lib\*.a | ForEach { ls $_ } | Select-Object -first 1 - echo $lib - - cp $lib $destination - ls $destination - - # Upgrade to the latest pip. - - 'python -m pip install -U pip setuptools wheel' - - - if [%INSTALL_PICKLE5%]==[1] echo pickle5 >> tools/ci/appveyor/requirements.txt - - # Install the numpy test dependencies. - - 'pip install -U --timeout 5 --retries 2 -r tools/ci/appveyor/requirements.txt' - -build_script: - # Here, we add MinGW to the path to be able to link an OpenBLAS.dll - # We then use the import library from the DLL to compile with MSVC - - ps: | - pip wheel -v -v -v --wheel-dir=dist . - - # For each wheel that pip has placed in the "dist" directory - # First, upload the wheel to the "artifacts" tab and then - # install the wheel. If we have only built numpy (as is the case here), - # then there will be one wheel to install. - - # This method is more representative of what will be distributed, - # because it actually tests what the built wheels will be rather than - # what 'setup.py install' will do and at it uploads the wheels so that - # they can be inspected. - - ls dist -r | Foreach-Object { - Push-AppveyorArtifact $_.FullName - pip install $_.FullName - } - -test_script: - python runtests.py -v -n -m %TEST_MODE% -- --junitxml=%cd%\junit-results.xml - -after_build: - # Remove old or huge cache files to hopefully not exceed the 1GB cache limit. - # - # If the cache limit is reached, the cache will not be updated (of not even - # created in the first run). So this is a trade of between keeping the cache - # current and having a cache at all. - # NB: This is done only `on_success` since the cache in uploaded only on - # success anyway. - - C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -type f -mtime +360 -delete - - C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -type f -size +10M -delete - - C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -empty -delete - # Show size of cache - - C:\cygwin\bin\du -hs "%LOCALAPPDATA%\pip\Cache" - -on_finish: - # We can get a nice display of test results in the "test" tab with py.test - # For now, this does nothing. - - ps: | - If (Test-Path .\junit-results.xml) { - (new-object net.webclient).UploadFile( - "https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", - (Resolve-Path .\junit-results.xml) - ) - } - $LastExitCode = 0 diff --git a/.circleci/config.yml b/.circleci/config.yml index 6b4ab812f..772c3fbfd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -21,7 +21,7 @@ jobs: python3 -m venv venv ln -s $(which python3) venv/bin/python3.6 . venv/bin/activate - pip install cython sphinx==1.8.5 matplotlib ipython + pip install cython sphinx==2.2.0 matplotlib ipython sudo apt-get update sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex @@ -35,6 +35,14 @@ jobs: pip install scipy - run: + name: create release notes + command: | + . venv/bin/activate + pip install git+https://github.com/hawkowl/towncrier.git@master + VERSION=$(python -c "import setup; print(setup.VERSION)") + towncrier --version $VERSION --yes + ./tools/ci/test_all_newsfragments_used.py + - run: name: build devdocs command: | . venv/bin/activate diff --git a/.codecov.yml b/.codecov.yml index 35584a188..d92d54c9d 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -1,6 +1,4 @@ codecov: - ci: - - !appveyor notify: require_ci_to_pass: no after_n_builds: 1 diff --git a/.dependabot/config.yml b/.dependabot/config.yml new file mode 100644 index 000000000..160ec85cf --- /dev/null +++ b/.dependabot/config.yml @@ -0,0 +1,9 @@ +version: 1 +update_configs: + - package_manager: "python" + directory: "/" + update_schedule: "weekly" + commit_message: + prefix: "MAINT" + default_labels: + - "03 - Maintenance" diff --git a/.gitignore b/.gitignore index 9283cb477..2ad02b560 100644 --- a/.gitignore +++ b/.gitignore @@ -146,10 +146,6 @@ numpy/core/src/npysort/radixsort.c numpy/core/src/npysort/selection.c numpy/core/src/npysort/timsort.c numpy/core/src/npysort/sort.c -numpy/core/src/common/npy_binsearch.h -numpy/core/src/common/npy_partition.h -numpy/core/src/common/npy_sort.h -numpy/core/src/common/templ_common.h numpy/core/src/private/npy_binsearch.h numpy/core/src/private/npy_partition.h numpy/core/src/private/templ_common.h @@ -16,6 +16,9 @@ extraction: index: build_command: - python3 setup.py build + after_prepare: + - pip3 install --upgrade --user cython + - export PATH="$HOME/.local/bin:$PATH" queries: - include: py/file-not-closed @@ -22,6 +22,7 @@ Alex Thomas <alexthomas93@users.noreply.github.com> alexthomas93 <alexthomas93@u Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com> Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com> Alexander Belopolsky <abalkin@enlnt.com> sasha <sasha@localhost> +Alexander Jung <kontakt@ajung.name> aleju <kontakt@ajung.name> Alexander Shadchin <alexandr.shadchin@gmail.com> Alexandr Shadchin <alexandr.shadchin@gmail.com> Alexander Shadchin <alexandr.shadchin@gmail.com> shadchin <alexandr.shadchin@gmail.com> Allan Haldane <allan.haldane@gmail.com> ahaldane <ealloc@gmail.com> @@ -142,6 +143,7 @@ Kiko Correoso <kachine@protonmail.com> kikocorreoso <kikocorreoso@gmail.com> Kiko Correoso <kachine@protonmail.com> kikocorreoso <kikocorreoso@users.noreply.github.com> Konrad Kapp <k_kapp@yahoo.com> k_kapp@yahoo.com <k_kapp@yahoo.com> Kriti Singh <kritisingh1.ks@gmail.com> kritisingh1 <kritisingh1.ks@gmail.com> +Kmol Yuan <pyslvs@gmail.com> Yuan <pyslvs@gmail.com> Lars Buitinck <larsmans@gmail.com> Lars Buitinck <l.buitinck@esciencecenter.nl> Lars Buitinck <larsmans@gmail.com> Lars Buitinck <L.J.Buitinck@uva.nl> Lars Grüter <lagru@mailbox.org> Lars G <lagru@mailbox.org> diff --git a/.travis.yml b/.travis.yml index 714122957..68564d35b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,14 @@ cache: directories: - $HOME/.cache/pip +stage: Comprehensive tests + +stages: + # Do the style check and a single test job, don't proceed if it fails + - name: Initial tests + # Do the rest of the tests + - name: Comprehensive tests + env: global: - OpenBLAS_version=0.3.7 @@ -29,13 +37,14 @@ env: iFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAM\ ahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU=" -python: - - 3.5 - - 3.6 - - 3.7 - - 3.8-dev matrix: include: + # Do all python versions without environment variables set + - python: 3.5 + - stage: Initial tests + python: 3.6 + - python: 3.7 + - python: 3.8-dev - python: 3.7 env: INSTALL_PICKLE5=1 - python: 3.6 diff --git a/MANIFEST.in b/MANIFEST.in index 7c8f3b6ef..b58f85d4d 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,19 +5,24 @@ # Avoid using MANIFEST.in for that. # include MANIFEST.in +include pyproject.toml include pytest.ini include *.txt include README.md include site.cfg.example +include runtests.py +include tox.ini +include .coveragerc +include test_requirements.txt recursive-include numpy/random *.pyx *.pxd *.pyx.in *.pxd.in +include numpy/random/include/* +include numpy/__init__.pxd # Add build support that should go in sdist, but not go in bdist/be installed # Note that sub-directories that don't have __init__ are apparently not # included by 'recursive-include', so list those separately recursive-include numpy * recursive-include numpy/_build_utils * recursive-include numpy/linalg/lapack_lite * -include runtests.py -include tox.ini pytest.ini .coveragerc recursive-include tools * # Add sdist files whose use depends on local configuration. include numpy/core/src/common/cblasfuncs.c @@ -2,8 +2,6 @@ []( https://travis-ci.org/numpy/numpy) -[]( - https://ci.appveyor.com/project/charris/numpy) []( https://dev.azure.com/numpy/numpy/_build/latest?definitionId=5) []( @@ -12,7 +10,7 @@ NumPy is the fundamental package needed for scientific computing with Python. - **Website:** https://www.numpy.org -- **Documentation:** http://docs.scipy.org/ +- **Documentation:** https://docs.scipy.org/ - **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion - **Source code:** https://github.com/numpy/numpy - **Contributing:** https://www.numpy.org/devdocs/dev/index.html diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 802912673..633808c0b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -10,213 +10,173 @@ variables: # to match numpy-wheels repo OpenBLAS_version: 0.3.7 -jobs: -- job: Linux_Python_36_32bit_full_with_asserts - pool: - vmImage: 'ubuntu-16.04' - steps: - - script: | - docker pull i386/ubuntu:bionic - docker run -v $(pwd):/numpy i386/ubuntu:bionic /bin/bash -c "cd numpy && \ - apt-get -y update && \ - apt-get -y install python3.6-dev python3-pip locales python3-certifi && \ - locale-gen fr_FR && update-locale && \ - pip3 install setuptools nose cython==0.29.0 pytest pytz pickle5 && \ - apt-get -y install gfortran-5 wget && \ - target=\$(python3 tools/openblas_support.py) && \ - cp -r \$target/usr/local/lib/* /usr/lib && \ - cp \$target/usr/local/include/* /usr/include && \ - python3 -m pip install . && \ - F77=gfortran-5 F90=gfortran-5 \ - CFLAGS='-UNDEBUG -std=c99' python3 runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml && \ - python3 tools/openblas_support.py --check_version $(OpenBLAS_version)" - displayName: 'Run 32-bit Ubuntu Docker Build / Tests' - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for Python 3.6-32 bit full Linux' -- job: macOS - pool: - # NOTE: at time of writing, there is a danger - # that using an invalid vmIMage string for macOS - # image silently redirects to a Windows build on Azure; - # for now, use the only image name officially present in - # the docs even though i.e., numba uses another in their - # azure config for mac os -- Microsoft has indicated - # they will patch this issue - vmImage: macOS-10.13 - steps: - # the @0 refers to the (major) version of the *task* on Microsoft's - # end, not the order in the build matrix nor anything to do - # with version of Python selected - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.6' - addToPath: true - architecture: 'x64' - # NOTE: do we have a compelling reason to use older / newer - # versions of Xcode toolchain for testing? - - script: /bin/bash -c "sudo xcode-select -s /Applications/Xcode_10.app/Contents/Developer" - displayName: 'select Xcode version' - # NOTE: might be better if we could avoid installing - # two C compilers, but with homebrew looks like we're - # now stuck getting the full gcc toolchain instead of - # just pulling in gfortran - - script: | - # same version of gfortran as the wheel builds - brew install gcc49 - # manually link critical gfortran libraries - ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libgfortran.3.dylib /usr/local/lib/libgfortran.3.dylib - ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libquadmath.0.dylib /usr/local/lib/libquadmath.0.dylib - # manually symlink gfortran-4.9 to plain gfortran - # for f2py - ln -s /usr/local/bin/gfortran-4.9 /usr/local/bin/gfortran - displayName: 'make gfortran available on mac os vm' - # use the pre-built openblas binary that most closely - # matches our MacOS wheel builds -- currently based - # primarily on file size / name details - - script: | - target=$(python tools/openblas_support.py) - # manually link to appropriate system paths - cp $target/usr/local/lib/* /usr/local/lib/ - cp $target/usr/local/include/* /usr/local/include/ - displayName: 'install pre-built openblas' - - script: python -m pip install --upgrade pip setuptools wheel - displayName: 'Install tools' - - script: python -m pip install cython nose pytz pytest pickle5 vulture docutils sphinx==1.8.5 numpydoc - displayName: 'Install dependencies; some are optional to avoid test skips' - - script: /bin/bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/ | grep 'unreachable'" - displayName: 'Check for unreachable code paths in Python modules' - # prefer usage of clang over gcc proper - # to match likely scenario on many user mac machines - - script: python setup.py build -j 4 install - displayName: 'Build NumPy' - env: - BLAS: None - LAPACK: None - ATLAS: None - ACCELERATE: None - CC: /usr/bin/clang - # wait until after dev build of NumPy to pip - # install matplotlib to avoid pip install of older numpy - - script: python -m pip install matplotlib - displayName: 'Install matplotlib before refguide run' - - script: python runtests.py -g --refguide-check - displayName: 'Run Refuide Check' - - script: python runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml - displayName: 'Run Full NumPy Test Suite' - - bash: python tools/openblas_support.py --check_version $(OpenBLAS_version) - displayName: 'Verify OpenBLAS version' - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for Python 3.6 64-bit full Mac OS' -- job: Windows - pool: - vmImage: 'VS2017-Win2016' - strategy: - maxParallel: 6 - matrix: - Python36-32bit-fast: - PYTHON_VERSION: '3.6' - PYTHON_ARCH: 'x86' - TEST_MODE: fast - BITS: 32 - Python37-32bit-fast: - PYTHON_VERSION: '3.7' - PYTHON_ARCH: 'x86' - TEST_MODE: fast - BITS: 32 - Python35-64bit-full: - PYTHON_VERSION: '3.5' - PYTHON_ARCH: 'x64' - TEST_MODE: full - BITS: 64 - Python36-64bit-full: - PYTHON_VERSION: '3.6' - PYTHON_ARCH: 'x64' - TEST_MODE: full - INSTALL_PICKLE5: 1 - BITS: 64 - Python37-64bit-full: - PYTHON_VERSION: '3.7' - PYTHON_ARCH: 'x64' - TEST_MODE: full - INSTALL_PICKLE5: 1 - BITS: 64 - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: $(PYTHON_VERSION) - addToPath: true - architecture: $(PYTHON_ARCH) - - script: python -m pip install --upgrade pip setuptools wheel - displayName: 'Install tools' - - script: python -m pip install cython nose pytz pytest - displayName: 'Install dependencies; some are optional to avoid test skips' - - script: if [%INSTALL_PICKLE5%]==[1] python -m pip install pickle5 - displayName: 'Install optional pickle5 backport (only for python3.6 and 3.7)' +stages: +- stage: InitialTests + jobs: + - job: WindowsFast + pool: + vmImage: 'VS2017-Win2016' + strategy: + matrix: + Python36-64bit-fast: + PYTHON_VERSION: '3.6' + PYTHON_ARCH: 'x64' + TEST_MODE: fast + BITS: 64 + steps: + - template: azure-steps-windows.yml - - powershell: | - $pyversion = python -c "from __future__ import print_function; import sys; print(sys.version.split()[0])" - Write-Host "Python Version: $pyversion" - $target = "C:\\hostedtoolcache\\windows\\Python\\$pyversion\\$(PYTHON_ARCH)\\lib\\openblas.a" - Write-Host "target path: $target" - $openblas = python tools/openblas_support.py - cp $openblas $target - displayName: 'Download / Install OpenBLAS' - - - powershell: | - choco install -y mingw --forcex86 --force --version=5.3.0 - displayName: 'Install 32-bit mingw for 32-bit builds' - condition: eq(variables['BITS'], 32) - # NOTE: for Windows builds it seems much more tractable to use runtests.py - # vs. manual setup.py and then runtests.py for testing only - - powershell: | - If ($(BITS) -eq 32) { - $env:CFLAGS = "-m32" - $env:LDFLAGS = "-m32" - $env:PATH = "C:\\tools\\mingw32\\bin;" + $env:PATH - refreshenv - } - python -c "from tools import openblas_support; openblas_support.make_init('numpy')" - pip wheel -v -v -v --wheel-dir=dist . - - ls dist -r | Foreach-Object { - pip install $_.FullName - } - displayName: 'Build NumPy' - - bash: | - pushd . && cd .. && target=$(python -c "import numpy, os; print(os.path.abspath(os.path.join(os.path.dirname(numpy.__file__), '.libs')))") && popd - pip download -d destination --only-binary --no-deps numpy==1.14 - cd destination && unzip numpy*.whl && cp numpy/.libs/*.dll $target - ls $target - displayName: 'Add extraneous & older DLL to numpy/.libs to probe DLL handling robustness' - condition: eq(variables['PYTHON_VERSION'], '3.6') - - script: pushd . && cd .. && python -c "from ctypes import windll; windll.kernel32.SetDefaultDllDirectories(0x00000800); import numpy" && popd - displayName: 'For gh-12667; Windows DLL resolution' - - script: python runtests.py -n --show-build-log --mode=$(TEST_MODE) -- -rsx --junitxml=junit/test-results.xml - displayName: 'Run NumPy Test Suite' - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for Python $(PYTHON_VERSION) $(BITS)-bit $(TEST_MODE) Windows' - -- job: Linux_PyPy3 - pool: - vmIMage: 'ubuntu-16.04' - steps: - - script: source tools/pypy-test.sh - displayName: 'Run PyPy3 Build / Tests' - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - testRunTitle: 'Publish test results for PyPy3' - failTaskOnFailedTests: true +- stage: ComprehensiveTests + jobs: + - job: Linux_Python_36_32bit_full_with_asserts + pool: + vmImage: 'ubuntu-16.04' + steps: + - script: | + docker pull i386/ubuntu:bionic + docker run -v $(pwd):/numpy i386/ubuntu:bionic /bin/bash -c "cd numpy && \ + apt-get -y update && \ + apt-get -y install python3.6-dev python3-pip locales python3-certifi && \ + locale-gen fr_FR && update-locale && \ + apt-get -y install gfortran-5 wget && \ + target=\$(python3 tools/openblas_support.py) && \ + cp -r \$target/usr/local/lib/* /usr/lib && \ + cp \$target/usr/local/include/* /usr/include && \ + python3 -m pip install --user --upgrade pip setuptools && \ + python3 -m pip install --user -r test_requirements.txt && \ + python3 -m pip install . && \ + F77=gfortran-5 F90=gfortran-5 \ + CFLAGS='-UNDEBUG -std=c99' python3 runtests.py -n --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml && \ + python3 tools/openblas_support.py --check_version $(OpenBLAS_version)" + displayName: 'Run 32-bit Ubuntu Docker Build / Tests' + - task: PublishTestResults@2 + condition: succeededOrFailed() + inputs: + testResultsFiles: '**/test-*.xml' + failTaskOnFailedTests: true + testRunTitle: 'Publish test results for Python 3.6-32 bit full Linux' + - job: macOS + pool: + # NOTE: at time of writing, there is a danger + # that using an invalid vmIMage string for macOS + # image silently redirects to a Windows build on Azure; + # for now, use the only image name officially present in + # the docs even though i.e., numba uses another in their + # azure config for mac os -- Microsoft has indicated + # they will patch this issue + vmImage: macOS-10.13 + steps: + # the @0 refers to the (major) version of the *task* on Microsoft's + # end, not the order in the build matrix nor anything to do + # with version of Python selected + - task: UsePythonVersion@0 + inputs: + versionSpec: '3.6' + addToPath: true + architecture: 'x64' + # NOTE: do we have a compelling reason to use older / newer + # versions of Xcode toolchain for testing? + - script: /bin/bash -c "sudo xcode-select -s /Applications/Xcode_10.app/Contents/Developer" + displayName: 'select Xcode version' + # NOTE: might be better if we could avoid installing + # two C compilers, but with homebrew looks like we're + # now stuck getting the full gcc toolchain instead of + # just pulling in gfortran + - script: | + # same version of gfortran as the wheel builds + brew install gcc49 + # manually link critical gfortran libraries + ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libgfortran.3.dylib /usr/local/lib/libgfortran.3.dylib + ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libquadmath.0.dylib /usr/local/lib/libquadmath.0.dylib + # manually symlink gfortran-4.9 to plain gfortran + # for f2py + ln -s /usr/local/bin/gfortran-4.9 /usr/local/bin/gfortran + displayName: 'make gfortran available on mac os vm' + # use the pre-built openblas binary that most closely + # matches our MacOS wheel builds -- currently based + # primarily on file size / name details + - script: | + target=$(python tools/openblas_support.py) + # manually link to appropriate system paths + cp $target/usr/local/lib/* /usr/local/lib/ + cp $target/usr/local/include/* /usr/local/include/ + displayName: 'install pre-built openblas' + - script: python -m pip install --upgrade pip setuptools wheel + displayName: 'Install tools' + - script: | + python -m pip install -r test_requirements.txt + python -m pip install vulture docutils sphinx==2.2.0 numpydoc + displayName: 'Install dependencies; some are optional to avoid test skips' + - script: /bin/bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/ | grep 'unreachable'" + displayName: 'Check for unreachable code paths in Python modules' + # prefer usage of clang over gcc proper + # to match likely scenario on many user mac machines + - script: python setup.py build -j 4 build_src --verbose-cfg install + displayName: 'Build NumPy' + env: + BLAS: None + LAPACK: None + ATLAS: None + ACCELERATE: None + CC: /usr/bin/clang + # wait until after dev build of NumPy to pip + # install matplotlib to avoid pip install of older numpy + - script: python -m pip install matplotlib + displayName: 'Install matplotlib before refguide run' + - script: python runtests.py -g --refguide-check + displayName: 'Run Refuide Check' + - script: python runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml + displayName: 'Run Full NumPy Test Suite' + - bash: python tools/openblas_support.py --check_version $(OpenBLAS_version) + displayName: 'Verify OpenBLAS version' + - task: PublishTestResults@2 + condition: succeededOrFailed() + inputs: + testResultsFiles: '**/test-*.xml' + failTaskOnFailedTests: true + testRunTitle: 'Publish test results for Python 3.6 64-bit full Mac OS' + - job: Windows + pool: + vmImage: 'VS2017-Win2016' + strategy: + maxParallel: 6 + matrix: + Python36-32bit-fast: + PYTHON_VERSION: '3.6' + PYTHON_ARCH: 'x86' + TEST_MODE: fast + BITS: 32 + Python37-32bit-fast: + PYTHON_VERSION: '3.7' + PYTHON_ARCH: 'x86' + TEST_MODE: fast + BITS: 32 + Python35-64bit-full: + PYTHON_VERSION: '3.5' + PYTHON_ARCH: 'x64' + TEST_MODE: full + BITS: 64 + Python36-64bit-full: + PYTHON_VERSION: '3.6' + PYTHON_ARCH: 'x64' + TEST_MODE: full + BITS: 64 + Python37-64bit-full: + PYTHON_VERSION: '3.7' + PYTHON_ARCH: 'x64' + TEST_MODE: full + BITS: 64 + steps: + - template: azure-steps-windows.yml + - job: Linux_PyPy3 + pool: + vmIMage: 'ubuntu-16.04' + steps: + - script: source tools/pypy-test.sh + displayName: 'Run PyPy3 Build / Tests' + - task: PublishTestResults@2 + condition: succeededOrFailed() + inputs: + testResultsFiles: '**/test-*.xml' + testRunTitle: 'Publish test results for PyPy3' + failTaskOnFailedTests: true diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml new file mode 100644 index 000000000..26d7a667d --- /dev/null +++ b/azure-steps-windows.yml @@ -0,0 +1,56 @@ +steps: +- task: UsePythonVersion@0 + inputs: + versionSpec: $(PYTHON_VERSION) + addToPath: true + architecture: $(PYTHON_ARCH) +- script: python -m pip install --upgrade pip setuptools wheel + displayName: 'Install tools' +- script: python -m pip install -r test_requirements.txt + displayName: 'Install dependencies; some are optional to avoid test skips' +- powershell: | + $pyversion = python -c "from __future__ import print_function; import sys; print(sys.version.split()[0])" + Write-Host "Python Version: $pyversion" + $target = "C:\\hostedtoolcache\\windows\\Python\\$pyversion\\$(PYTHON_ARCH)\\lib\\openblas.a" + Write-Host "target path: $target" + $openblas = python tools/openblas_support.py + cp $openblas $target + displayName: 'Download / Install OpenBLAS' + +- powershell: | + choco install -y mingw --forcex86 --force --version=5.3.0 + displayName: 'Install 32-bit mingw for 32-bit builds' + condition: eq(variables['BITS'], 32) +# NOTE: for Windows builds it seems much more tractable to use runtests.py +# vs. manual setup.py and then runtests.py for testing only +- powershell: | + If ($(BITS) -eq 32) { + $env:CFLAGS = "-m32" + $env:LDFLAGS = "-m32" + $env:PATH = "C:\\tools\\mingw32\\bin;" + $env:PATH + refreshenv + } + python -c "from tools import openblas_support; openblas_support.make_init('numpy')" + pip wheel -v -v -v --wheel-dir=dist . + + ls dist -r | Foreach-Object { + pip install $_.FullName + } + displayName: 'Build NumPy' +- bash: | + pushd . && cd .. && target=$(python -c "import numpy, os; print(os.path.abspath(os.path.join(os.path.dirname(numpy.__file__), '.libs')))") && popd + pip download -d destination --only-binary --no-deps numpy==1.14 + cd destination && unzip numpy*.whl && cp numpy/.libs/*.dll $target + ls $target + displayName: 'Add extraneous & older DLL to numpy/.libs to probe DLL handling robustness' + condition: eq(variables['PYTHON_VERSION'], '3.6') +- script: pushd . && cd .. && python -c "from ctypes import windll; windll.kernel32.SetDefaultDllDirectories(0x00000800); import numpy" && popd + displayName: 'For gh-12667; Windows DLL resolution' +- script: python runtests.py -n --show-build-log --mode=$(TEST_MODE) -- -rsx --junitxml=junit/test-results.xml + displayName: 'Run NumPy Test Suite' +- task: PublishTestResults@2 + condition: succeededOrFailed() + inputs: + testResultsFiles: '**/test-*.xml' + failTaskOnFailedTests: true + testRunTitle: 'Publish test results for Python $(PYTHON_VERSION) $(BITS)-bit $(TEST_MODE) Windows'
\ No newline at end of file diff --git a/benchmarks/benchmarks/bench_avx.py b/benchmarks/benchmarks/bench_avx.py new file mode 100644 index 000000000..f7b524e43 --- /dev/null +++ b/benchmarks/benchmarks/bench_avx.py @@ -0,0 +1,34 @@ +from __future__ import absolute_import, division, print_function + +from .common import Benchmark + +import numpy as np + +avx_ufuncs = ['sqrt', + 'absolute', + 'reciprocal', + 'square', + 'rint', + 'floor', + 'ceil' , + 'trunc'] +stride = [1, 2, 4] +dtype = ['f', 'd'] + +class AVX_UFunc(Benchmark): + params = [avx_ufuncs, stride, dtype] + param_names = ['avx_based_ufunc', 'stride', 'dtype'] + timeout = 10 + + def setup(self, ufuncname, stride, dtype): + np.seterr(all='ignore') + try: + self.f = getattr(np, ufuncname) + except AttributeError: + raise NotImplementedError() + N = 10000 + self.arr = np.ones(stride*N, dtype) + + def time_ufunc(self, ufuncname, stride, dtype): + self.f(self.arr[::stride]) + diff --git a/doc/DISTUTILS.rst.txt b/doc/DISTUTILS.rst.txt index eadde63f8..bcef82500 100644 --- a/doc/DISTUTILS.rst.txt +++ b/doc/DISTUTILS.rst.txt @@ -243,7 +243,7 @@ in writing setup scripts: after processing all source generators, no extension module will be built. This is the recommended way to conditionally define extension modules. Source generator functions are called by the - ``build_src`` command of ``numpy.distutils``. + ``build_src`` sub-command of ``numpy.distutils``. For example, here is a typical source generator function:: diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt index bd3e3c124..0a761e350 100644 --- a/doc/RELEASE_WALKTHROUGH.rst.txt +++ b/doc/RELEASE_WALKTHROUGH.rst.txt @@ -56,7 +56,7 @@ repository:: $ git checkout maintenance/1.14.x $ git pull upstream maintenance/1.14.x $ git submodule update - $ git clean -xdf > /dev/null + $ git clean -xdfq Edit pavement.py and setup.py as detailed in HOWTO_RELEASE:: @@ -83,7 +83,7 @@ Paver is used to build the source releases. It will create the ``release`` and ``release/installers`` directories and put the ``*.zip`` and ``*.tar.gz`` source releases in the latter. :: - $ cython --version # check that you have the correct cython version + $ python3 -m cython --version # check for correct cython version $ paver sdist # sdist will do a git clean -xdf, so we omit that @@ -232,28 +232,39 @@ add files, using an editable text window and as binary uploads. - Hit the ``{Publish,Update} release`` button at the bottom. -Upload documents to docs.scipy.org ----------------------------------- +Upload documents to numpy.org +----------------------------- This step is only needed for final releases and can be skipped for -pre-releases. You will also need upload permission for the document server, if -you do not have permission ping Pauli Virtanen or Ralf Gommers to generate and -upload the documentation. Otherwise:: +pre-releases. ``make merge-doc`` clones the ``numpy/doc`` repo into +``doc/build/merge`` and updates it with the new documentation:: $ pushd doc $ make dist - $ make upload USERNAME=<yourname> RELEASE=v1.14.5 + $ make merge-doc $ popd -If the release series is a new one, you will need to rebuild and upload the -``docs.scipy.org`` front page:: +If the release series is a new one, you will need to add a new section to the +``doc/build/merge/index.html`` front page just after the "insert here" comment:: - $ cd ../docs.scipy.org - $ gvim index.rst + $ gvim doc/build/merge/index.html +/'insert here' -Note: there is discussion about moving the docs to github. This section will be -updated when/if that happens. +Otherwise, only the ``zip`` and ``pdf`` links should be updated with the +new tag name:: + $ gvim doc/build/merge/index.html +/'tag v1.14' + +You can "test run" the new documentation in a browser to make sure the links +work:: + + $ firefox doc/build/merge/index.html + +Once everything seems satisfactory, commit and upload the changes:: + + $ pushd doc/build/merge + $ git commit -am"Add documentation for v1.14.5" + $ git push + $ popd Announce the release on scipy.org --------------------------------- diff --git a/doc/changelog/1.16.5-changelog.rst b/doc/changelog/1.16.5-changelog.rst new file mode 100644 index 000000000..19374058d --- /dev/null +++ b/doc/changelog/1.16.5-changelog.rst @@ -0,0 +1,54 @@ + +Contributors +============ + +A total of 18 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alexander Shadchin +* Allan Haldane +* Bruce Merry + +* Charles Harris +* Colin Snyder + +* Dan Allan + +* Emile + +* Eric Wieser +* Grey Baker + +* Maksim Shabunin + +* Marten van Kerkwijk +* Matti Picus +* Peter Andreas Entschev + +* Ralf Gommers +* Richard Harris + +* Sebastian Berg +* Sergei Lebedev + +* Stephan Hoyer + +Pull requests merged +==================== + +A total of 23 pull requests were merged for this release. + +* `#13742 <https://github.com/numpy/numpy/pull/13742>`__: ENH: Add project URLs to setup.py +* `#13823 <https://github.com/numpy/numpy/pull/13823>`__: TEST, ENH: fix tests and ctypes code for PyPy +* `#13845 <https://github.com/numpy/numpy/pull/13845>`__: BUG: use npy_intp instead of int for indexing array +* `#13867 <https://github.com/numpy/numpy/pull/13867>`__: TST: Ignore DeprecationWarning during nose imports +* `#13905 <https://github.com/numpy/numpy/pull/13905>`__: BUG: Fix use-after-free in boolean indexing +* `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs +* `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns] +* `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation +* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject +* `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors. +* `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked. +* `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers +* `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher. +* `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7 +* `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API... +* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor +* `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level. +* `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__` +* `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7 +* `#14297 <https://github.com/numpy/numpy/pull/14297>`__: TST: Pin pytest version to 5.0.1. +* `#14322 <https://github.com/numpy/numpy/pull/14322>`__: ENH: Enable huge pages in all Linux builds +* `#14346 <https://github.com/numpy/numpy/pull/14346>`__: BUG: fix behavior of structured_to_unstructured on non-trivial... +* `#14382 <https://github.com/numpy/numpy/pull/14382>`__: REL: Prepare for the NumPy 1.16.5 release. diff --git a/doc/changelog/1.17.1-changelog.rst b/doc/changelog/1.17.1-changelog.rst new file mode 100644 index 000000000..c7c8b6c8e --- /dev/null +++ b/doc/changelog/1.17.1-changelog.rst @@ -0,0 +1,55 @@ + +Contributors +============ + +A total of 17 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alexander Jung + +* Allan Haldane +* Charles Harris +* Eric Wieser +* Giuseppe Cuccu + +* Hiroyuki V. Yamazaki +* Jérémie du Boisberranger +* Kmol Yuan + +* Matti Picus +* Max Bolingbroke + +* Maxwell Aladago + +* Oleksandr Pavlyk +* Peter Andreas Entschev +* Sergei Lebedev +* Seth Troisi + +* Vladimir Pershin + +* Warren Weckesser + +Pull requests merged +==================== + +A total of 24 pull requests were merged for this release. + +* `#14156 <https://github.com/numpy/numpy/pull/14156>`__: TST: Allow fuss in testing strided/non-strided exp/log loops +* `#14157 <https://github.com/numpy/numpy/pull/14157>`__: BUG: avx2_scalef_ps must be static +* `#14158 <https://github.com/numpy/numpy/pull/14158>`__: BUG: Remove stray print that causes a SystemError on python 3.7. +* `#14159 <https://github.com/numpy/numpy/pull/14159>`__: BUG: Fix DeprecationWarning in python 3.8. +* `#14160 <https://github.com/numpy/numpy/pull/14160>`__: BLD: Add missing gcd/lcm definitions to npy_math.h +* `#14161 <https://github.com/numpy/numpy/pull/14161>`__: DOC, BUILD: cleanups and fix (again) 'build dist' +* `#14166 <https://github.com/numpy/numpy/pull/14166>`__: TST: Add 3.8-dev to travisCI testing. +* `#14194 <https://github.com/numpy/numpy/pull/14194>`__: BUG: Remove the broken clip wrapper (Backport) +* `#14198 <https://github.com/numpy/numpy/pull/14198>`__: DOC: Fix hermitian argument docs in svd. +* `#14199 <https://github.com/numpy/numpy/pull/14199>`__: MAINT: Workaround for Intel compiler bug leading to failing test +* `#14200 <https://github.com/numpy/numpy/pull/14200>`__: TST: Clean up of test_pocketfft.py +* `#14201 <https://github.com/numpy/numpy/pull/14201>`__: BUG: Make advanced indexing result on read-only subclass writeable... +* `#14236 <https://github.com/numpy/numpy/pull/14236>`__: BUG: Fixed default BitGenerator name +* `#14237 <https://github.com/numpy/numpy/pull/14237>`__: ENH: add c-imported modules for freeze analysis in np.random +* `#14296 <https://github.com/numpy/numpy/pull/14296>`__: TST: Pin pytest version to 5.0.1 +* `#14301 <https://github.com/numpy/numpy/pull/14301>`__: BUG: Fix leak in the f2py-generated module init and `PyMem_Del`... +* `#14302 <https://github.com/numpy/numpy/pull/14302>`__: BUG: Fix formatting error in exception message +* `#14307 <https://github.com/numpy/numpy/pull/14307>`__: MAINT: random: Match type of SeedSequence.pool_size to DEFAULT_POOL_SIZE. +* `#14308 <https://github.com/numpy/numpy/pull/14308>`__: BUG: Fix numpy.random bug in platform detection +* `#14309 <https://github.com/numpy/numpy/pull/14309>`__: ENH: Enable huge pages in all Linux builds +* `#14330 <https://github.com/numpy/numpy/pull/14330>`__: BUG: Fix segfault in `random.permutation(x)` when x is a string. +* `#14338 <https://github.com/numpy/numpy/pull/14338>`__: BUG: don't fail when lexsorting some empty arrays (#14228) +* `#14339 <https://github.com/numpy/numpy/pull/14339>`__: BUG: Fix misuse of .names and .fields in various places (backport... +* `#14345 <https://github.com/numpy/numpy/pull/14345>`__: BUG: fix behavior of structured_to_unstructured on non-trivial... +* `#14350 <https://github.com/numpy/numpy/pull/14350>`__: REL: Prepare 1.17.1 release diff --git a/doc/changelog/1.17.2-changelog.rst b/doc/changelog/1.17.2-changelog.rst new file mode 100644 index 000000000..144f40038 --- /dev/null +++ b/doc/changelog/1.17.2-changelog.rst @@ -0,0 +1,28 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* CakeWithSteak + +* Charles Harris +* Dan Allan +* Hameer Abbasi +* Lars Grueter +* Matti Picus +* Sebastian Berg + +Pull requests merged +==================== + +A total of 8 pull requests were merged for this release. + +* `#14418 <https://github.com/numpy/numpy/pull/14418>`__: BUG: Fix aradixsort indirect indexing. +* `#14420 <https://github.com/numpy/numpy/pull/14420>`__: DOC: Fix a minor typo in dispatch documentation. +* `#14421 <https://github.com/numpy/numpy/pull/14421>`__: BUG: test, fix regression in converting to ctypes +* `#14430 <https://github.com/numpy/numpy/pull/14430>`__: BUG: Do not show Override module in private error classes. +* `#14432 <https://github.com/numpy/numpy/pull/14432>`__: BUG: Fixed maximum relative error reporting in assert_allclose. +* `#14433 <https://github.com/numpy/numpy/pull/14433>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative... +* `#14436 <https://github.com/numpy/numpy/pull/14436>`__: BUG: Update 1.17.x with 1.18.0-dev pocketfft.py. +* `#14446 <https://github.com/numpy/numpy/pull/14446>`__: REL: Prepare for NumPy 1.17.2 release. diff --git a/doc/changelog/1.17.3-changelog.rst b/doc/changelog/1.17.3-changelog.rst new file mode 100644 index 000000000..f911c8465 --- /dev/null +++ b/doc/changelog/1.17.3-changelog.rst @@ -0,0 +1,32 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Allan Haldane +* Charles Harris +* Kevin Sheppard +* Matti Picus +* Ralf Gommers +* Sebastian Berg +* Warren Weckesser + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#14456 <https://github.com/numpy/numpy/pull/14456>`__: MAINT: clean up pocketfft modules inside numpy.fft namespace. +* `#14463 <https://github.com/numpy/numpy/pull/14463>`__: BUG: random.hypergeometic assumes npy_long is npy_int64, hung... +* `#14502 <https://github.com/numpy/numpy/pull/14502>`__: BUG: random: Revert gh-14458 and refix gh-14557. +* `#14504 <https://github.com/numpy/numpy/pull/14504>`__: BUG: add a specialized loop for boolean matmul. +* `#14506 <https://github.com/numpy/numpy/pull/14506>`__: MAINT: Update pytest version for Python 3.8 +* `#14512 <https://github.com/numpy/numpy/pull/14512>`__: DOC: random: fix doc linking, was referencing private submodules. +* `#14513 <https://github.com/numpy/numpy/pull/14513>`__: BUG,MAINT: Some fixes and minor cleanup based on clang analysis +* `#14515 <https://github.com/numpy/numpy/pull/14515>`__: BUG: Fix randint when range is 2**32 +* `#14519 <https://github.com/numpy/numpy/pull/14519>`__: MAINT: remove the entropy c-extension module +* `#14563 <https://github.com/numpy/numpy/pull/14563>`__: DOC: remove note about Pocketfft license file (non-existing here). +* `#14578 <https://github.com/numpy/numpy/pull/14578>`__: BUG: random: Create a legacy implementation of random.binomial. +* `#14687 <https://github.com/numpy/numpy/pull/14687>`__: BUG: properly define PyArray_DescrCheck diff --git a/doc/neps/index.rst.tmpl b/doc/neps/index.rst.tmpl index 0ad8e0f80..4c5b7766f 100644 --- a/doc/neps/index.rst.tmpl +++ b/doc/neps/index.rst.tmpl @@ -23,7 +23,7 @@ Meta-NEPs (NEPs about NEPs or Processes) .. toctree:: :maxdepth: 1 -{% for nep, tags in neps.items() if tags['Type'] == 'Process' %} +{% for nep, tags in neps.items() if tags['Status'] == 'Active' %} {{ tags['Title'] }} <{{ tags['Filename'] }}> {% endfor %} diff --git a/doc/neps/nep-0000.rst b/doc/neps/nep-0000.rst index 89ba177cb..97b69279b 100644 --- a/doc/neps/nep-0000.rst +++ b/doc/neps/nep-0000.rst @@ -138,7 +138,7 @@ accepted that a competing proposal is a better alternative. When a NEP is ``Accepted``, ``Rejected``, or ``Withdrawn``, the NEP should be updated accordingly. In addition to updating the status field, at the very least the ``Resolution`` header should be added with a link to the relevant -post in the mailing list archives. +thread in the mailing list archives. NEPs can also be ``Superseded`` by a different NEP, rendering the original obsolete. The ``Replaced-By`` and ``Replaces`` headers diff --git a/doc/neps/nep-0019-rng-policy.rst b/doc/neps/nep-0019-rng-policy.rst index aa5fdc653..9704b24ca 100644 --- a/doc/neps/nep-0019-rng-policy.rst +++ b/doc/neps/nep-0019-rng-policy.rst @@ -7,7 +7,7 @@ NEP 19 — Random Number Generator Policy :Type: Standards Track :Created: 2018-05-24 :Updated: 2019-05-21 -:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-June/078126.html +:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-July/078380.html Abstract -------- diff --git a/doc/neps/nep-0024-missing-data-2.rst b/doc/neps/nep-0024-missing-data-2.rst index c8b19561f..f4414e0a0 100644 --- a/doc/neps/nep-0024-missing-data-2.rst +++ b/doc/neps/nep-0024-missing-data-2.rst @@ -28,7 +28,7 @@ Detailed description Rationale ^^^^^^^^^ -The purpose of this aNEP is to define two interfaces -- one for handling +The purpose of this NEP is to define two interfaces -- one for handling 'missing values', and one for handling 'masked arrays'. An ordinary value is something like an integer or a floating point number. A diff --git a/doc/neps/nep-0029-deprecation_policy.rst b/doc/neps/nep-0029-deprecation_policy.rst index fef0dc995..2f5c8ecb5 100644 --- a/doc/neps/nep-0029-deprecation_policy.rst +++ b/doc/neps/nep-0029-deprecation_policy.rst @@ -3,17 +3,18 @@ NEP 29 — Recommend Python and Numpy version support as a community policy stan ================================================================================== -:Author: Thomas A Caswell <tcaswell@gmail.com>, Andreas Mueller, Brian Granger, Madicken Munk, Ralf Gommers, Matt Haberland <mhaberla@calpoly.edu>, Matthias Bussonnier <bussonniermatthias@gmail.com>, Stefan van der Walt -:Status: Draft -:Type: Informational Track +:Author: Thomas A Caswell <tcaswell@gmail.com>, Andreas Mueller, Brian Granger, Madicken Munk, Ralf Gommers, Matt Haberland <mhaberla@calpoly.edu>, Matthias Bussonnier <bussonniermatthias@gmail.com>, Stefan van der Walt <stefanv@berkeley.edu> +:Status: Final +:Type: Informational :Created: 2019-07-13 +:Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-October/080128.html Abstract -------- -This NEP recommends and encourages all projects across the Scientific -Python ecosystem to adopt a common "time window-based" policy for +This NEP recommends that all projects across the Scientific +Python ecosystem adopt a common "time window-based" policy for support of Python and NumPy versions. Standardizing a recommendation for project support of minimum Python and NumPy versions will improve downstream project planning. @@ -36,25 +37,25 @@ Detailed description For the purposes of this NEP we assume semantic versioning and define: *major version* - A release that change the first number (e.g. X.0.0) + A release that changes the first number (e.g. X.0.0) *minor version* - A release that changes the second number (e.g x.Y.0) + A release that changes the second number (e.g 1.Y.0) *patch version* - A release that changes the third number (e.g. x.y.Z) + A release that changes the third number (e.g. 1.1.Z) -When a project creates a new major or minor version, we recommend that -the project should support at least all minor versions of Python -introduced and released in the prior 42 months ~~from their -anticipated release date~~ with a minimum of 2 minor versions of +When a project releases a new major or minor version, we recommend that +they support at least all minor versions of Python +introduced and released in the prior 42 months *from the +anticipated release date* with a minimum of 2 minor versions of Python, and all minor versions of NumPy released in the prior 24 -months ~~from their anticipated release date~~ with a minimum of 3 +months *from the anticipated release date* with a minimum of 3 minor versions of NumPy. -The diagram:: +Consider the following timeline:: Jan 16 Jan 17 Jan 18 Jan 19 Jan 20 | | | | | @@ -65,33 +66,33 @@ The diagram:: |-----------------------------------------> Dec19 |-----------------------------------------> Nov20 -shows the 42 month support windows for Python. A project with a -major or minor version release in Feb19 should support py35 and newer, -a project with a major or minor version release in Dec19 should -support py36 and newer, and a project with a major or minor version -release in Nov20 should support py37 and newer. +It shows the 42 month support windows for Python. A project with a +major or minor version release in February 2019 should support Python 3.5 and newer, +a project with a major or minor version released in December 2019 should +support Python 3.6 and newer, and a project with a major or minor version +release in November 2020 should support Python 3.7 and newer. The current Python release cadence is 18 months so a 42 month window ensures that there will always be at least two minor versions of Python -in the window. By padding the window by 6 months from the anticipated -Python cadence we avoid the edge cases where a project releases -the month after Python and would effectively only support one -minor version of Python that has an installed base. -This six month buffer provides resilience to minor fluctuations / -delays in the Python release schedule. - -Because Python minor version support is based on historical release -dates, a 36 month time window, and a project's plans, a project can -decide to drop a given minor version of Python very early in the release -process. - -While there will be some unavoidable mismatch in supported versions of -Python between projects if releases occurs immediately after a -minor version of Python ages out. This should not last longer than one -release cycle of each of the projects, and when a given project does a -minor or major release, it is guaranteed that there will be a stable -release of all other projects that support the set of Python the -new release will support. +in the window. The window is extended 6 months beyond the anticipated two-release +interval for Python to provides resilience against small fluctuations / +delays in its release schedule. + +Because Python minor version support is based only on historical +release dates, a 42 month time window, and a planned project release +date, one can predict with high confidence when a project will be able +to drop any given minor version of Python. This, in turn, could save +months of unnecessary maintenance burden. + +If a project releases immediately after a minor version of Python +drops out of the support window, there will inevitably be some +mismatch in supported versions—but this situation should only last +until other projects in the ecosystem make releases. + +Otherwise, once a project does a minor or major release, it is +guaranteed that there will be a stable release of all other projects +that, at the source level, support the same set of Python versions +supported by the new release. If there is a Python 4 or a NumPy 2 this policy will have to be reviewed in light of the community's and projects' best interests. @@ -103,9 +104,6 @@ Support Table ============ ====== ===== Date Python NumPy ------------ ------ ----- -Jan 16, 2019 3.5+ 1.13+ -Mar 14, 2019 3.6+ 1.13+ -Jun 08, 2019 3.6+ 1.14+ Jan 07, 2020 3.6+ 1.15+ Jun 23, 2020 3.7+ 1.15+ Jul 23, 2020 3.7+ 1.16+ @@ -120,9 +118,7 @@ Drop Schedule :: - On Jan 16, 2019 drop support for Numpy 1.12 (initially released on Jan 15, 2017) - On Mar 14, 2019 drop support for Python 3.5 (initially released on Sep 13, 2015) - On Jun 08, 2019 drop support for Numpy 1.13 (initially released on Jun 07, 2017) + On next release, drop support for Python 3.5 (initially released on Sep 13, 2015) On Jan 07, 2020 drop support for Numpy 1.14 (initially released on Jan 06, 2018) On Jun 23, 2020 drop support for Python 3.6 (initially released on Dec 23, 2016) On Jul 23, 2020 drop support for Numpy 1.15 (initially released on Jul 23, 2018) @@ -137,27 +133,20 @@ Implementation We suggest that all projects adopt the following language into their development guidelines: + This project supports: - - This project supports at least the minor versions of Python - initially released 42 months prior to a planned project release - date. - - The project will always support at least the 2 latest minor - versions of Python. - - support minor versions of ``numpy`` initially released in the 24 - months prior to a planned project release date or the oldest - version that supports the minimum Python version (whichever is - higher). - - The project will always support at least the 3 latest minor - versions of NumPy. + - All minor versions of Python released 42 months prior to the + project, and at minimum the two latest minor versions. + - All minor versions of ``numpy`` released in the 24 months prior + to the project, and at minimum the last three minor versions. - The minimum supported version of Python will be set to - ``python_requires`` in ``setup``. All supported minor versions of - Python will be in the test matrix and have binary artifacts built - for releases. + In ``setup.py``, the ``python_requires`` variable should be set to + the minimum supported version of Python. All supported minor + versions of Python should be in the test matrix and have binary + artifacts built for the release. - The project should adjust upward the minimum Python and NumPy - version support on every minor and major release, but never on a - patch release. + Minimum Python and NumPy version support should be adjusted upward + on every major and minor release, but never on a patch release. Backward compatibility @@ -171,28 +160,28 @@ Alternatives Ad-Hoc version support ~~~~~~~~~~~~~~~~~~~~~~ -A project could on every release evaluate whether to increase +A project could, on every release, evaluate whether to increase the minimum version of Python supported. As a major downside, an ad-hoc approach makes it hard for downstream users to predict what the future minimum versions will be. As there is no objective threshold to when the minimum version should be dropped, it is easy for these -version support discussions to devolve into [bike shedding](https://en.wikipedia.org/wiki/Wikipedia:Avoid_Parkinson%27s_bicycle-shed_effect) and acrimony. +version support discussions to devolve into `bike shedding <https://en.wikipedia.org/wiki/Wikipedia:Avoid_Parkinson%27s_bicycle-shed_effect>`_ and acrimony. All CPython supported versions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The CPython supported versions of Python are listed in the Python -Developers Guide and the Python PEPs. Supporting these is a very -clear and conservative approach. However, it means that there is 4 -year lag between when new language features come into the language and -when the projects are able to use them. Additionally, for projects -that have a significant component of compiled extensions this requires -building many binary artifacts for each release. +Developers Guide and the Python PEPs. Supporting these is a very clear +and conservative approach. However, it means that there exists a four +year lag between when a new features is introduced into the language +and when a project is able to use it. Additionally, for projects with +compiled extensions this requires building many binary artifacts for +each release. For the case of NumPy, many projects carry workarounds to bugs that are fixed in subsequent versions of NumPy. Being proactive about -increasing the minimum version of NumPy will allow downstream +increasing the minimum version of NumPy allows downstream packages to carry fewer version-specific patches. @@ -203,13 +192,14 @@ Default version on Linux distribution The policy could be to support the version of Python that ships by default in the latest Ubuntu LTS or CentOS/RHEL release. However, we would still have to standardize across the community which -distribution we are following. +distribution to follow. By following the versions supported by major Linux distributions, we are giving up technical control of our projects to external organizations that may have different motivations and concerns than we do. + N minor versions of Python ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -223,12 +213,10 @@ A more fundamental problem with a policy based on number of Python releases is that it is hard to predict when support for a given minor version of Python will be dropped as that requires correctly predicting the release schedule of Python for the next 3-4 years. A -time-based rule is only depends on things that have already happened +time-based rule, in contrast, only depends on past events and the length of the support window. - - Time window from the X.Y.1 Python release ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index 8eb2560d5..353c5df1e 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -16,7 +16,7 @@ Abstract We propose the ``__duckarray__`` protocol, following the high-level overview described in NEP 22, allowing downstream libraries to return arrays of their defined types, in contrast to ``np.asarray``, that coerces those ``array_like`` -to NumPy arrays. +objects to NumPy arrays. Detailed description -------------------- @@ -35,14 +35,38 @@ coercion of a NumPy-like array to a pure NumPy array where necessary, while still allowing that NumPy-like array libraries that do not wish to implement the protocol to coerce arrays to a pure Numpy array via ``np.asarray``. +Usage Guidance +~~~~~~~~~~~~~~ + +Code that uses np.duckarray is meant for supporting other ndarray-like objects +that "follow the NumPy API". That is an ill-defined concept at the moment -- +every known library implements the NumPy API only partly, and many deviate +intentionally in at least some minor ways. This cannot be easily remedied, so +for users of ``__duckarray__`` we recommend the following strategy: check if the +NumPy functionality used by the code that follows your use of ``__duckarray__`` +is present in Dask, CuPy and Sparse. If so, it's reasonable to expect any duck +array to work here. If not, we suggest you indicate in your docstring what kinds +of duck arrays are accepted, or what properties they need to have. + +To exemplify the usage of duck arrays, suppose one wants to take the ``mean()`` +of an array-like object ``arr``. Using NumPy to achieve that, one could write +``np.asarray(arr).mean()`` to achieve the intended result. However, libraries +may expect ``arr`` to be a NumPy-like array, and at the same time, the array may +or may not be an object compliant to the NumPy API (either in full or partially) +such as a CuPy, Sparse or a Dask array. In the case where ``arr`` is already an +object compliant to the NumPy API, we would simply return it (and prevent it +from being coerced into a pure NumPy array), otherwise, it would then be coerced +into a NumPy array. + Implementation -------------- The implementation idea is fairly straightforward, requiring a new function ``duckarray`` to be introduced in NumPy, and a new method ``__duckarray__`` in NumPy-like array classes. The new ``__duckarray__`` method shall return the -downstream array-like object itself, such as the ``self`` object, while the -``__array__`` method returns ``TypeError``. +downstream array-like object itself, such as the ``self`` object. If appropriate, +an ``__array__`` method may be implemented that returns a NumPy array or possibly +raise a ``TypeError`` with a helpful message. The new NumPy ``duckarray`` function can be implemented as follows: @@ -67,15 +91,68 @@ a complete implementation would look like the following: return self def __array__(self): - return TypeError + return TypeError("NumPyLikeArray can not be converted to a numpy array. " + "You may want to use np.duckarray.") The implementation above exemplifies the simplest case, but the overall idea is that libraries will implement a ``__duckarray__`` method that returns the -original object, and ``__array__`` solely for the purpose of raising a -``TypeError``, thus preventing unintentional NumPy-coercion. In case of existing -libraries that don't already implement ``__array__`` but would like to use duck -array typing, it is advised that they they introduce both ``__array__`` and -``__duckarray__`` methods. +original object, and an ``__array__`` method that either creates and returns an +appropriate NumPy array, or raises a``TypeError`` to prevent unintentional use +as an object in a NumPy array (if ``np.asarray`` is called on an arbitrary +object that does not implement ``__array__``, it will create a NumPy array +scalar). + +In case of existing libraries that don't already implement ``__array__`` but +would like to use duck array typing, it is advised that they introduce +both ``__array__`` and``__duckarray__`` methods. + +Usage +----- + +An example of how the ``__duckarray__`` protocol could be used to write a +``stack`` function based on ``concatenate``, and its produced outcome, can be +seen below. The example here was chosen not only to demonstrate the usage of +the ``duckarray`` function, but also to demonstrate its dependency on the NumPy +API, demonstrated by checks on the array's ``shape`` attribute. Note that the +example is merely a simplified version of NumPy's actualy implementation of +``stack`` working on the first axis, and it is assumed that Dask has implemented +the ``__duckarray__`` method. + +.. code:: python + + def duckarray_stack(arrays): + arrays = [np.duckarray(arr) for arr in arrays] + + shapes = {arr.shape for arr in arrays} + if len(shapes) != 1: + raise ValueError('all input arrays must have the same shape') + + expanded_arrays = [arr[np.newaxis, ...] for arr in arrays] + return np.concatenate(expanded_arrays, axis=0) + + dask_arr = dask.array.arange(10) + np_arr = np.arange(10) + np_like = list(range(10)) + + duckarray_stack((dask_arr, dask_arr)) # Returns dask.array + duckarray_stack((dask_arr, np_arr)) # Returns dask.array + duckarray_stack((dask_arr, np_like)) # Returns dask.array + +In contrast, using only ``np.asarray`` (at the time of writing of this NEP, this +is the usual method employed by library developers to ensure arrays are +NumPy-like) has a different outcome: + +.. code:: python + + def asarray_stack(arrays): + arrays = [np.asanyarray(arr) for arr in arrays] + + # The remaining implementation is the same as that of + # ``duckarray_stack`` above + + asarray_stack((dask_arr, dask_arr)) # Returns np.ndarray + asarray_stack((dask_arr, np_arr)) # Returns np.ndarray + asarray_stack((dask_arr, np_like)) # Returns np.ndarray Backward compatibility ---------------------- diff --git a/doc/neps/nep-0031-uarray.rst b/doc/neps/nep-0031-uarray.rst new file mode 100644 index 000000000..3519b6bc0 --- /dev/null +++ b/doc/neps/nep-0031-uarray.rst @@ -0,0 +1,637 @@ +============================================================ +NEP 31 — Context-local and global overrides of the NumPy API +============================================================ + +:Author: Hameer Abbasi <habbasi@quansight.com> +:Author: Ralf Gommers <rgommers@quansight.com> +:Author: Peter Bell <pbell@quansight.com> +:Status: Draft +:Type: Standards Track +:Created: 2019-08-22 + + +Abstract +-------- + +This NEP proposes to make all of NumPy's public API overridable via an +extensible backend mechanism. + +Acceptance of this NEP means NumPy would provide global and context-local +overrides, as well as a dispatch mechanism similar to NEP-18 [2]_. First +experiences with ``__array_function__`` show that it is necessary to be able +to override NumPy functions that *do not take an array-like argument*, and +hence aren't overridable via ``__array_function__``. The most pressing need is +array creation and coercion functions, such as ``numpy.zeros`` or +``numpy.asarray``; see e.g. NEP-30 [9]_. + +This NEP proposes to allow, in an opt-in fashion, overriding any part of the +NumPy API. It is intended as a comprehensive resolution to NEP-22 [3]_, and +obviates the need to add an ever-growing list of new protocols for each new +type of function or object that needs to become overridable. + +Motivation and Scope +-------------------- + +The motivation behind ``uarray`` is manyfold: First, there have been several +attempts to allow dispatch of parts of the NumPy API, including (most +prominently), the ``__array_ufunc__`` protocol in NEP-13 [4]_, and the +``__array_function__`` protocol in NEP-18 [2]_, but this has shown the need +for further protocols to be developed, including a protocol for coercion (see +[5]_, [9]_). The reasons these overrides are needed have been extensively +discussed in the references, and this NEP will not attempt to go into the +details of why these are needed; but in short: It is necessary for library +authors to be able to coerce arbitrary objects into arrays of their own types, +such as CuPy needing to coerce to a CuPy array, for example, instead of +a NumPy array. In simpler words, one needs things like ``np.asarray(...)`` or +an alternative to "just work" and return duck-arrays. + +The primary end-goal of this NEP is to make the following possible: + +.. code:: python + + # On the library side + import numpy.overridable as unp + + def library_function(array): + array = unp.asarray(array) + # Code using unumpy as usual + return array + + # On the user side: + import numpy.overridable as unp + import uarray as ua + import dask.array as da + + ua.register_backend(da) # Can be done within Dask itself + + library_function(dask_array) # works and returns dask_array + + with unp.set_backend(da): + library_function([1, 2, 3, 4]) # actually returns a Dask array. + +Here, ``backend`` can be any compatible object defined either by NumPy or an +external library, such as Dask or CuPy. Ideally, it should be the module +``dask.array`` or ``cupy`` itself. + +These kinds of overrides are useful for both the end-user as well as library +authors. End-users may have written or wish to write code that they then later +speed up or move to a different implementation, say PyData/Sparse. They can do +this simply by setting a backend. Library authors may also wish to write code +that is portable across array implementations, for example ``sklearn`` may wish +to write code for a machine learning algorithm that is portable across array +implementations while also using array creation functions. + +This NEP takes a holistic approach: It assumes that there are parts of +the API that need to be overridable, and that these will grow over time. It +provides a general framework and a mechanism to avoid a design of a new +protocol each time this is required. This was the goal of ``uarray``: to +allow for overrides in an API without needing the design of a new protocol. + +This NEP proposes the following: That ``unumpy`` [8]_ becomes the +recommended override mechanism for the parts of the NumPy API not yet covered +by ``__array_function__`` or ``__array_ufunc__``, and that ``uarray`` is +vendored into a new namespace within NumPy to give users and downstream +dependencies access to these overrides. This vendoring mechanism is similar +to what SciPy decided to do for making ``scipy.fft`` overridable (see [10]_). + + +Detailed description +-------------------- + +Using overrides +~~~~~~~~~~~~~~~ + +Here are a few examples of how an end-user would use overrides. + +.. code:: python + + data = da.from_zarr('myfile.zarr') + # result should still be dask, all things being equal + result = library_function(data) + result.to_zarr('output.zarr') + +This would keep on working, assuming the Dask backend was either set or +registered. Registration can also be done at import-time. + +Now consider another function, and what would need to happen in order to +make this work: + +.. code:: python + + from dask import array as da + from magic_library import pytorch_predict + + data = da.from_zarr('myfile.zarr') + # normally here one would use e.g. data.map_overlap + result = pytorch_predict(data) + result.to_zarr('output.zarr') + +This would work in two scenarios: The first is that ``pytorch_predict`` was a +multimethod, and implemented by the Dask backend. Dask could provide utility +functions to allow external libraries to register implementations. + +The second, and perhaps more useful way, is that ``pytorch_predict`` was defined +in an idiomatic style true to NumPy in terms of other multimethods, and that Dask +implemented the required multimethods itself, e.g. ``np.convolve``. If this +happened, then the above example would work without either ``magic_library`` +or Dask having to do anything specific to the other. + +Composing backends +~~~~~~~~~~~~~~~~~~ + +There are some backends which may depend on other backends, for example xarray +depending on `numpy.fft`, and transforming a time axis into a frequency axis, +or Dask/xarray holding an array other than a NumPy array inside it. This would +be handled in the following manner inside code:: + + with ua.set_backend(cupy), ua.set_backend(dask.array): + # Code that has distributed GPU arrays here + +Proposals +~~~~~~~~~ + +The only change this NEP proposes at its acceptance, is to make ``unumpy`` the +officially recommended way to override NumPy, along with making some submodules +overridable by default via ``uarray``. ``unumpy`` will remain a separate +repository/package (which we propose to vendor to avoid a hard dependency, and +use the separate ``unumpy`` package only if it is installed, rather than depend +on for the time being). In concrete terms, ``numpy.overridable`` becomes an +alias for ``unumpy``, if available with a fallback to the a vendored version if +not. ``uarray`` and ``unumpy`` and will be developed primarily with the input +of duck-array authors and secondarily, custom dtype authors, via the usual +GitHub workflow. There are a few reasons for this: + +* Faster iteration in the case of bugs or issues. +* Faster design changes, in the case of needed functionality. +* ``unumpy`` will work with older versions of NumPy as well. +* The user and library author opt-in to the override process, + rather than breakages happening when it is least expected. + In simple terms, bugs in ``unumpy`` mean that ``numpy`` remains + unaffected. +* For ``numpy.fft``, ``numpy.linalg`` and ``numpy.random``, the functions in + the main namespace will mirror those in the ``numpy.overridable`` namespace. + The reason for this is that there may exist functions in the in these + submodules that need backends, even for ``numpy.ndarray`` inputs. + +Advantanges of ``unumpy`` over other solutions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``unumpy`` offers a number of advantanges over the approach of defining a new +protocol for every problem encountered: Whenever there is something requiring +an override, ``unumpy`` will be able to offer a unified API with very minor +changes. For example: + +* ``ufunc`` objects can be overridden via their ``__call__``, ``reduce`` and + other methods. +* Other functions can be overridden in a similar fashion. +* ``np.asduckarray`` goes away, and becomes ``np.overridable.asarray`` with a + backend set. +* The same holds for array creation functions such as ``np.zeros``, + ``np.empty`` and so on. + +This also holds for the future: Making something overridable would require only +minor changes to ``unumpy``. + +Another promise ``unumpy`` holds is one of default implementations. Default +implementations can be provided for any multimethod, in terms of others. This +allows one to override a large part of the NumPy API by defining only a small +part of it. This is to ease the creation of new duck-arrays, by providing +default implementations of many functions that can be easily expressed in +terms of others, as well as a repository of utility functions that help in the +implementation of duck-arrays that most duck-arrays would require. This would +allow us to avoid designing entire protocols, e.g., a protocol for stacking +and concatenating would be replaced by simply implementing ``stack`` and/or +``concatenate`` and then providing default implementations for everything else +in that class. The same applies for transposing, and many other functions for +which protocols haven't been proposed, such as ``isin`` in terms of ``in1d``, +``setdiff1d`` in terms of ``unique``, and so on. + +It also allows one to override functions in a manner which +``__array_function__`` simply cannot, such as overriding ``np.einsum`` with the +version from the ``opt_einsum`` package, or Intel MKL overriding FFT, BLAS +or ``ufunc`` objects. They would define a backend with the appropriate +multimethods, and the user would select them via a ``with`` statement, or +registering them as a backend. + +The last benefit is a clear way to coerce to a given backend (via the +``coerce`` keyword in ``ua.set_backend``), and a protocol +for coercing not only arrays, but also ``dtype`` objects and ``ufunc`` objects +with similar ones from other libraries. This is due to the existence of actual, +third party dtype packages, and their desire to blend into the NumPy ecosystem +(see [6]_). This is a separate issue compared to the C-level dtype redesign +proposed in [7]_, it's about allowing third-party dtype implementations to +work with NumPy, much like third-party array implementations. These can provide +features such as, for example, units, jagged arrays or other such features that +are outside the scope of NumPy. + +Mixing NumPy and ``unumpy`` in the same file +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Normally, one would only want to import only one of ``unumpy`` or ``numpy``, +you would import it as ``np`` for familiarity. However, there may be situations +where one wishes to mix NumPy and the overrides, and there are a few ways to do +this, depending on the user's style:: + + from numpy import overridable as unp + import numpy as np + +or:: + + import numpy as np + + # Use unumpy via np.overridable + +Duck-array coercion +~~~~~~~~~~~~~~~~~~~ + +There are inherent problems about returning objects that are not NumPy arrays +from ``numpy.array`` or ``numpy.asarray``, particularly in the context of C/C++ +or Cython code that may get an object with a different memory layout than the +one it expects. However, we believe this problem may apply not only to these +two functions but all functions that return NumPy arrays. For this reason, +overrides are opt-in for the user, by using the submodule ``numpy.overridable`` +rather than ``numpy``. NumPy will continue to work unaffected by anything in +``numpy.overridable``. + +If the user wishes to obtain a NumPy array, there are two ways of doing it: + +1. Use ``numpy.asarray`` (the non-overridable version). +2. Use ``numpy.overridable.asarray`` with the NumPy backend set and coercion + enabled + +Aliases outside of the ``numpy.overridable`` namespace +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +All functionality in ``numpy.random``, ``numpy.linalg`` and ``numpy.fft`` +will be aliased to their respective overridable versions inside +``numpy.overridable``. The reason for this is that there are alternative +implementations of RNGs (``mkl-random``), linear algebra routines (``eigen``, +``blis``) and FFT routines (``mkl-fft``, ``pyFFTW``) that need to operate on +``numpy.ndarray`` inputs, but still need the ability to switch behaviour. + +This is different from monkeypatching in a few different ways: + +* The caller-facing signature of the function is always the same, + so there is at least the loose sense of an API contract. Monkeypatching + does not provide this ability. +* There is the ability of locally switching the backend. +* It has been `suggested <http://numpy-discussion.10968.n7.nabble.com/NEP-31-Context-local-and-global-overrides-of-the-NumPy-API-tp47452p47472.html>`_ + that the reason that 1.17 hasn't landed in the Anaconda defaults channel is + due to the incompatibility between monkeypatching and ``__array_function__``, + as monkeypatching would bypass the protocol completely. +* Statements of the form ``from numpy import x; x`` and ``np.x`` would have + different results depending on whether the import was made before or + after monkeypatching happened. + +All this isn't possible at all with ``__array_function__`` or +``__array_ufunc__``. + +It has been formally realised (at least in part) that a backend system is +needed for this, in the `NumPy roadmap <https://numpy.org/neps/roadmap.html#other-functionality>`_. + +For ``numpy.random``, it's still necessary to make the C-API fit the one +proposed in `NEP-19 <https://numpy.org/neps/nep-0019-rng-policy.html>`_. +This is impossible for `mkl-random`, because then it would need to be +rewritten to fit that framework. The guarantees on stream +compatibility will be the same as before, but if there's a backend that affects +``numpy.random`` set, we make no guarantees about stream compatibility, and it +is up to the backend author to provide their own guarantees. + +Providing a way for implicit dispatch +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It has been suggested that the ability to dispatch methods which do not take +a dispatchable is needed, while guessing that backend from another dispatchable. + +As a concrete example, consider the following: + +.. code:: python + + with unumpy.determine_backend(array_like, np.ndarray): + unumpy.arange(len(array_like)) + +While this does not exist yet in ``uarray``, it is trivial to add it. The need for +this kind of code exists because one might want to have an alternative for the +proposed ``*_like`` functions, or the ``like=`` keyword argument. The need for these +exists because there are functions in the NumPy API that do not take a dispatchable +argument, but there is still the need to select a backend based on a different +dispatchable. + +The need for an opt-in module +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The need for an opt-in module is realised because of a few reasons: + +* There are parts of the API (like `numpy.asarray`) that simply cannot be + overridden due to incompatibility concerns with C/Cython extensions, however, + one may want to coerce to a duck-array using ``asarray`` with a backend set. +* There are possible issues around an implicit option and monkeypatching, such + as those mentioned above. + +NEP 18 notes that this may require maintenance of two separate APIs. However, +this burden may be lessened by, for example, parametrizing all tests over +``numpy.overridable`` separately via a fixture. This also has the side-effect +of thoroughly testing it, unlike ``__array_function__``. We also feel that it +provides an oppurtunity to separate the NumPy API contract properly from the +implementation. + +Benefits to end-users and mixing backends +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Mixing backends is easy in ``uarray``, one only has to do: + +.. code:: python + + # Explicitly say which backends you want to mix + ua.register_backend(backend1) + ua.register_backend(backend2) + ua.register_backend(backend3) + + # Freely use code that mixes backends here. + +The benefits to end-users extend beyond just writing new code. Old code +(usually in the form of scripts) can be easily ported to different backends +by a simple import switch and a line adding the preferred backend. This way, +users may find it easier to port existing code to GPU or distributed computing. + +Related Work +------------ + +Other override mechanisms +~~~~~~~~~~~~~~~~~~~~~~~~~ + +* NEP-18, the ``__array_function__`` protocol. [2]_ +* NEP-13, the ``__array_ufunc__`` protocol. [3]_ +* NEP-30, the ``__duck_array__`` protocol. [9]_ + +Existing NumPy-like array implementations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Dask: https://dask.org/ +* CuPy: https://cupy.chainer.org/ +* PyData/Sparse: https://sparse.pydata.org/ +* Xnd: https://xnd.readthedocs.io/ +* Astropy's Quantity: https://docs.astropy.org/en/stable/units/ + +Existing and potential consumers of alternative arrays +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Dask: https://dask.org/ +* scikit-learn: https://scikit-learn.org/ +* xarray: https://xarray.pydata.org/ +* TensorLy: http://tensorly.org/ + +Existing alternate dtype implementations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``ndtypes``: https://ndtypes.readthedocs.io/en/latest/ +* Datashape: https://datashape.readthedocs.io +* Plum: https://plum-py.readthedocs.io/ + +Alternate implementations of parts of the NumPy API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``mkl_random``: https://github.com/IntelPython/mkl_random +* ``mkl_fft``: https://github.com/IntelPython/mkl_fft +* ``bottleneck``: https://github.com/pydata/bottleneck +* ``opt_einsum``: https://github.com/dgasmith/opt_einsum + +Implementation +-------------- + +The implementation of this NEP will require the following steps: + +* Implementation of ``uarray`` multimethods corresponding to the + NumPy API, including classes for overriding ``dtype``, ``ufunc`` + and ``array`` objects, in the ``unumpy`` repository, which are usually + very easy to create. +* Moving backends from ``unumpy`` into the respective array libraries. + +Maintenance can be eased by testing over ``{numpy, unumpy}`` via parameterized +tests. If a new argument is added to a method, the corresponding argument +extractor and replacer will need to be updated within ``unumpy``. + +A lot of argument extractors can be re-used from the existing implementation +of the ``__array_function__`` protocol, and the replacers can be usually +re-used across many methods. + +For the parts of the namespace which are going to be overridable by default, +the main method will need to be renamed and hidden behind a ``uarray`` multimethod. + +Default implementations are usually seen in the documentation using the words +"equivalent to", and thus, are easily available. + +``uarray`` Primer +~~~~~~~~~~~~~~~~~ + +**Note:** *This section will not attempt to go into too much detail about +uarray, that is the purpose of the uarray documentation.* [1]_ +*However, the NumPy community will have input into the design of +uarray, via the issue tracker.* + +``unumpy`` is the interface that defines a set of overridable functions +(multimethods) compatible with the numpy API. To do this, it uses the +``uarray`` library. ``uarray`` is a general purpose tool for creating +multimethods that dispatch to one of multiple different possible backend +implementations. In this sense, it is similar to the ``__array_function__`` +protocol but with the key difference that the backend is explicitly installed +by the end-user and not coupled into the array type. + +Decoupling the backend from the array type gives much more flexibility to +end-users and backend authors. For example, it is possible to: + +* override functions not taking arrays as arguments +* create backends out of source from the array type +* install multiple backends for the same array type + +This decoupling also means that ``uarray`` is not constrained to dispatching +over array-like types. The backend is free to inspect the entire set of +function arguments to determine if it can implement the function e.g. ``dtype`` +parameter dispatching. + +Defining backends +^^^^^^^^^^^^^^^^^ + +``uarray`` consists of two main protocols: ``__ua_convert__`` and +``__ua_function__``, called in that order, along with ``__ua_domain__``. +``__ua_convert__`` is for conversion and coercion. It has the signature +``(dispatchables, coerce)``, where ``dispatchables`` is an iterable of +``ua.Dispatchable`` objects and ``coerce`` is a boolean indicating whether or +not to force the conversion. ``ua.Dispatchable`` is a simple class consisting +of three simple values: ``type``, ``value``, and ``coercible``. +``__ua_convert__`` returns an iterable of the converted values, or +``NotImplemented`` in the case of failure. + +``__ua_function__`` has the signature ``(func, args, kwargs)`` and defines +the actual implementation of the function. It recieves the function and its +arguments. Returning ``NotImplemented`` will cause a move to the default +implementation of the function if one exists, and failing that, the next +backend. + +Here is what will happen assuming a ``uarray`` multimethod is called: + +1. We canonicalise the arguments so any arguments without a default + are placed in ``*args`` and those with one are placed in ``**kwargs``. +2. We check the list of backends. + + a. If it is empty, we try the default implementation. + +3. We check if the backend's ``__ua_convert__`` method exists. If it exists: + + a. We pass it the output of the dispatcher, + which is an iterable of ``ua.Dispatchable`` objects. + b. We feed this output, along with the arguments, + to the argument replacer. ``NotImplemented`` means we move to 3 + with the next backend. + c. We store the replaced arguments as the new arguments. + +4. We feed the arguments into ``__ua_function__``, and return the output, and + exit if it isn't ``NotImplemented``. +5. If the default implementation exists, we try it with the current backend. +6. On failure, we move to 3 with the next backend. If there are no more + backends, we move to 7. +7. We raise a ``ua.BackendNotImplementedError``. + +Defining overridable multimethods +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To define an overridable function (a multimethod), one needs a few things: + +1. A dispatcher that returns an iterable of ``ua.Dispatchable`` objects. +2. A reverse dispatcher that replaces dispatchable values with the supplied + ones. +3. A domain. +4. Optionally, a default implementation, which can be provided in terms of + other multimethods. + +As an example, consider the following:: + + import uarray as ua + + def full_argreplacer(args, kwargs, dispatchables): + def full(shape, fill_value, dtype=None, order='C'): + return (shape, fill_value), dict( + dtype=dispatchables[0], + order=order + ) + + return full(*args, **kwargs) + + @ua.create_multimethod(full_argreplacer, domain="numpy") + def full(shape, fill_value, dtype=None, order='C'): + return (ua.Dispatchable(dtype, np.dtype),) + +A large set of examples can be found in the ``unumpy`` repository, [8]_. +This simple act of overriding callables allows us to override: + +* Methods +* Properties, via ``fget`` and ``fset`` +* Entire objects, via ``__get__``. + +Examples for NumPy +^^^^^^^^^^^^^^^^^^ + +A library that implements a NumPy-like API will use it in the following +manner (as an example):: + + import numpy.overridable as unp + _ua_implementations = {} + + __ua_domain__ = "numpy" + + def __ua_function__(func, args, kwargs): + fn = _ua_implementations.get(func, None) + return fn(*args, **kwargs) if fn is not None else NotImplemented + + def implements(ua_func): + def inner(func): + _ua_implementations[ua_func] = func + return func + + return inner + + @implements(unp.asarray) + def asarray(a, dtype=None, order=None): + # Code here + # Either this method or __ua_convert__ must + # return NotImplemented for unsupported types, + # Or they shouldn't be marked as dispatchable. + + # Provides a default implementation for ones and zeros. + @implements(unp.full) + def full(shape, fill_value, dtype=None, order='C'): + # Code here + +Backward compatibility +---------------------- + +There are no backward incompatible changes proposed in this NEP. + +Alternatives +------------ + +The current alternative to this problem is a combination of NEP-18 [2]_, +NEP-13 [4]_ and NEP-30 [9]_ plus adding more protocols (not yet specified) +in addition to it. Even then, some parts of the NumPy API will remain +non-overridable, so it's a partial alternative. + +The main alternative to vendoring ``unumpy`` is to simply move it into NumPy +completely and not distribute it as a separate package. This would also achieve +the proposed goals, however we prefer to keep it a separate package for now, +for reasons already stated above. + +The third alternative is to move ``unumpy`` into the NumPy organisation and +develop it as a NumPy project. This will also achieve the said goals, and is +also a possibility that can be considered by this NEP. However, the act of +doing an extra ``pip install`` or ``conda install`` may discourage some users +from adopting this method. + +An alternative to requiring opt-in is mainly to *not* override ``np.asarray`` +and ``np.array``, and making the rest of the NumPy API surface overridable, +instead providing ``np.duckarray`` and ``np.asduckarray`` +as duck-array friendly alternatives that used the respective overrides. However, +this has the downside of adding a minor overhead to NumPy calls. + +Discussion +---------- + +* ``uarray`` blogpost: https://labs.quansight.org/blog/2019/07/uarray-update-api-changes-overhead-and-comparison-to-__array_function__/ +* The discussion section of NEP-18: https://numpy.org/neps/nep-0018-array-function-protocol.html#discussion +* NEP-22: https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html +* Dask issue #4462: https://github.com/dask/dask/issues/4462 +* PR #13046: https://github.com/numpy/numpy/pull/13046 +* Dask issue #4883: https://github.com/dask/dask/issues/4883 +* Issue #13831: https://github.com/numpy/numpy/issues/13831 +* Discussion PR 1: https://github.com/hameerabbasi/numpy/pull/3 +* Discussion PR 2: https://github.com/hameerabbasi/numpy/pull/4 +* Discussion PR 3: https://github.com/numpy/numpy/pull/14389 + + +References and Footnotes +------------------------ + +.. [1] uarray, A general dispatch mechanism for Python: https://uarray.readthedocs.io + +.. [2] NEP 18 — A dispatch mechanism for NumPy’s high level array functions: https://numpy.org/neps/nep-0018-array-function-protocol.html + +.. [3] NEP 22 — Duck typing for NumPy arrays – high level overview: https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html + +.. [4] NEP 13 — A Mechanism for Overriding Ufuncs: https://numpy.org/neps/nep-0013-ufunc-overrides.html + +.. [5] Reply to Adding to the non-dispatched implementation of NumPy methods: http://numpy-discussion.10968.n7.nabble.com/Adding-to-the-non-dispatched-implementation-of-NumPy-methods-tp46816p46874.html + +.. [6] Custom Dtype/Units discussion: http://numpy-discussion.10968.n7.nabble.com/Custom-Dtype-Units-discussion-td43262.html + +.. [7] The epic dtype cleanup plan: https://github.com/numpy/numpy/issues/2899 + +.. [8] unumpy: NumPy, but implementation-independent: https://unumpy.readthedocs.io + +.. [9] NEP 30 — Duck Typing for NumPy Arrays - Implementation: https://www.numpy.org/neps/nep-0030-duck-array-protocol.html + +.. [10] http://scipy.github.io/devdocs/fft.html#backend-control + + +Copyright +--------- + +This document has been placed in the public domain. diff --git a/doc/neps/nep-0032-remove-financial-functions.rst b/doc/neps/nep-0032-remove-financial-functions.rst new file mode 100644 index 000000000..a78b11fea --- /dev/null +++ b/doc/neps/nep-0032-remove-financial-functions.rst @@ -0,0 +1,214 @@ +================================================== +NEP 32 — Remove the financial functions from NumPy +================================================== + +:Author: Warren Weckesser <warren.weckesser@gmail.com> +:Status: Accepted +:Type: Standards Track +:Created: 2019-08-30 +:Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-September/080074.html + + +Abstract +-------- + +We propose deprecating and ultimately removing the financial functions [1]_ +from NumPy. The functions will be moved to an independent repository, +and provided to the community as a separate package with the name +``numpy_financial``. + + +Motivation and scope +-------------------- + +The NumPy financial functions [1]_ are the 10 functions ``fv``, ``ipmt``, +``irr``, ``mirr``, ``nper``, ``npv``, ``pmt``, ``ppmt``, ``pv`` and ``rate``. +The functions provide elementary financial calculations such as future value, +net present value, etc. These functions were added to NumPy in 2008 [2]_. + +In May, 2009, a request by Joe Harrington to add a function called ``xirr`` to +the financial functions triggered a long thread about these functions [3]_. +One important point that came up in that thread is that a "real" financial +library must be able to handle real dates. The NumPy financial functions do +not work with actual dates or calendars. The preference for a more capable +library independent of NumPy was expressed several times in that thread. + +In June, 2009, D. L. Goldsmith expressed concerns about the correctness of the +implementations of some of the financial functions [4]_. It was suggested then +to move the financial functions out of NumPy to an independent package. + +In a GitHub issue in 2013 [5]_, Nathaniel Smith suggested moving the financial +functions from the top-level namespace to ``numpy.financial``. He also +suggested giving the functions better names. Responses at that time included +the suggestion to deprecate them and move them from NumPy to a separate +package. This issue is still open. + +Later in 2013 [6]_, it was suggested on the mailing list that these functions +be removed from NumPy. + +The arguments for the removal of these functions from NumPy: + +* They are too specialized for NumPy. +* They are not actually useful for "real world" financial calculations, because + they do not handle real dates and calendars. +* The definition of "correctness" for some of these functions seems to be a + matter of convention, and the current NumPy developers do not have the + background to judge their correctness. +* There has been little interest among past and present NumPy developers + in maintaining these functions. + +The main arguments for keeping the functions in NumPy are: + +* Removing these functions will be disruptive for some users. Current users + will have to add the new ``numpy_financial`` package to their dependencies, + and then modify their code to use the new package. +* The functions provided, while not "industrial strength", are apparently + similar to functions provided by spreadsheets and some calculators. Having + them available in NumPy makes it easier for some developers to migrate their + software to Python and NumPy. + +It is clear from comments in the mailing list discussions and in the GitHub +issues that many current NumPy developers believe the benefits of removing +the functions outweigh the costs. For example, from [5]_:: + + The financial functions should probably be part of a separate package + -- Charles Harris + + If there's a better package we can point people to we could just deprecate + them and then remove them entirely... I'd be fine with that too... + -- Nathaniel Smith + + +1 to deprecate them. If no other package exists, it can be created if + someone feels the need for that. + -- Ralf Gommers + + I feel pretty strongly that we should deprecate these. If nobody on numpy’s + core team is interested in maintaining them, then it is purely a drag on + development for NumPy. + -- Stephan Hoyer + +And from the 2013 mailing list discussion, about removing the functions from +NumPy:: + + I am +1 as well, I don't think they should have been included in the first + place. + -- David Cournapeau + +But not everyone was in favor of removal:: + + The fin routines are tiny and don't require much maintenance once + written. If we made an effort (putting up pages with examples of common + financial calculations and collecting those under a topical web page, + then linking to that page from various places and talking it up), I + would think they could attract users looking for a free way to play with + financial scenarios. [...] + So, I would say we keep them. If ours are not the best, we should bring + them up to snuff. + -- Joe Harrington + +For an idea of the maintenance burden of the financial functions, one can +look for all the GitHub issues [7]_ and pull requests [8]_ that have the tag +``component: numpy.lib.financial``. + +One method for measuring the effect of removing these functions is to find +all the packages on GitHub that use them. Such a search can be performed +with the ``python-api-inspect`` service [9]_. A search for all uses of the +NumPy financial functions finds just eight repositories. (See the comments +in [5]_ for the actual SQL query.) + + +Implementation +-------------- + +* Create a new Python package, ``numpy_financial``, to be maintained in the + top-level NumPy github organization. This repository will contain the + definitions and unit tests for the financial functions. The package will + be added to PyPI so it can be installed with ``pip``. +* Deprecate the financial functions in the ``numpy`` namespace, beginning in + NumPy version 1.18. Remove the financial functions from NumPy version 1.20. + + +Backward compatibility +---------------------- + +The removal of these functions breaks backward compatibility, as explained +earlier. The effects are mitigated by providing the ``numpy_financial`` +library. + + +Alternatives +------------ + +The following alternatives were mentioned in [5]_: + +* *Maintain the functions as they are (i.e. do nothing).* + A review of the history makes clear that this is not the preference of many + NumPy developers. A recurring comment is that the functions simply do not + belong in NumPy. When that sentiment is combined with the history of bug + reports and the ongoing questions about the correctness of the functions, the + conclusion is that the cleanest solution is deprecation and removal. +* *Move the functions from the ``numpy`` namespace to ``numpy.financial``.* + This was the initial suggestion in [5]_. Such a change does not address the + maintenance issues, and doesn't change the misfit that many developers see + between these functions and NumPy. It causes disruption for the current + users of these functions without addressing what many developers see as the + fundamental problem. + + +Discussion +---------- + +Links to past mailing list discussions, and to relevant GitHub issues and pull +requests, have already been given. The announcement of this NEP was made on +the NumPy-Discussion mailing list on 3 September 2019 [10]_, and on the +PyData mailing list on 8 September 2019 [11]_. The formal proposal to accept +the NEP was made on 19 September 2019 [12]_; a notification was also sent to +PyData (same thread as [11]_). There have been no substantive objections. + + +References and footnotes +------------------------ + +.. [1] Financial functions, + https://numpy.org/doc/1.17/reference/routines.financial.html + +.. [2] Numpy-discussion mailing list, "Simple financial functions for NumPy", + https://mail.python.org/pipermail/numpy-discussion/2008-April/032353.html + +.. [3] Numpy-discussion mailing list, "add xirr to numpy financial functions?", + https://mail.python.org/pipermail/numpy-discussion/2009-May/042645.html + +.. [4] Numpy-discussion mailing list, "Definitions of pv, fv, nper, pmt, and rate", + https://mail.python.org/pipermail/numpy-discussion/2009-June/043188.html + +.. [5] Get financial functions out of main namespace, + https://github.com/numpy/numpy/issues/2880 + +.. [6] Numpy-discussion mailing list, "Deprecation of financial routines", + https://mail.python.org/pipermail/numpy-discussion/2013-August/067409.html + +.. [7] ``component: numpy.lib.financial`` issues, + https://github.com/numpy/numpy/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22component%3A+numpy.lib.financial%22+ + +.. [8] ``component: numpy.lib.financial`` pull requests, + https://github.com/numpy/numpy/pulls?utf8=%E2%9C%93&q=is%3Apr+label%3A%22component%3A+numpy.lib.financial%22+ + +.. [9] Quansight-Labs/python-api-inspect, + https://github.com/Quansight-Labs/python-api-inspect/ + +.. [10] Numpy-discussion mailing list, "NEP 32: Remove the financial functions + from NumPy" + https://mail.python.org/pipermail/numpy-discussion/2019-September/079965.html + +.. [11] PyData mailing list (pydata@googlegroups.com), "NumPy proposal to + remove the financial functions. + https://mail.google.com/mail/u/0/h/1w0mjgixc4rpe/?&th=16d5c38be45f77c4&q=nep+32&v=c&s=q + +.. [12] Numpy-discussion mailing list, "Proposal to accept NEP 32: Remove the + financial functions from NumPy" + https://mail.python.org/pipermail/numpy-discussion/2019-September/080074.html + +Copyright +--------- + +This document has been placed in the public domain. diff --git a/doc/records.rst.txt b/doc/records.rst.txt index a608880d7..3c0d55216 100644 --- a/doc/records.rst.txt +++ b/doc/records.rst.txt @@ -50,7 +50,7 @@ New possibilities for the "data-type" **Dictionary (keys "names", "titles", and "formats")** - This will be converted to a ``PyArray_VOID`` type with corresponding + This will be converted to a ``NPY_VOID`` type with corresponding fields parameter (the formats list will be converted to actual ``PyArray_Descr *`` objects). @@ -58,10 +58,10 @@ New possibilities for the "data-type" **Objects (anything with an .itemsize and .fields attribute)** If its an instance of (a sub-class of) void type, then a new ``PyArray_Descr*`` structure is created corresponding to its - typeobject (and ``PyArray_VOID``) typenumber. If the type is + typeobject (and ``NPY_VOID``) typenumber. If the type is registered, then the registered type-number is used. - Otherwise a new ``PyArray_VOID PyArray_Descr*`` structure is created + Otherwise a new ``NPY_VOID PyArray_Descr*`` structure is created and filled ->elsize and ->fields filled in appropriately. The itemsize attribute must return a number > 0. The fields diff --git a/doc/release/time_based_proposal.rst b/doc/release/time_based_proposal.rst deleted file mode 100644 index 2eb13562d..000000000 --- a/doc/release/time_based_proposal.rst +++ /dev/null @@ -1,129 +0,0 @@ -.. vim:syntax=rst - -Introduction -============ - -This document proposes some enhancements for numpy and scipy releases. -Successive numpy and scipy releases are too far apart from a time point of -view - some people who are in the numpy release team feel that it cannot -improve without a bit more formal release process. The main proposal is to -follow a time-based release, with expected dates for code freeze, beta and rc. -The goal is two folds: make release more predictable, and move the code forward. - -Rationale -========= - -Right now, the release process of numpy is relatively organic. When some -features are there, we may decide to make a new release. Because there is not -fixed schedule, people don't really know when new features and bug fixes will -go into a release. More significantly, having an expected release schedule -helps to *coordinate* efforts: at the beginning of a cycle, everybody can jump -in and put new code, even break things if needed. But after some point, only -bug fixes are accepted: this makes beta and RC releases much easier; calming -things down toward the release date helps focusing on bugs and regressions - -Proposal -======== - -Time schedule -------------- - -The proposed schedule is to release numpy every 9 weeks - the exact period can -be tweaked if it ends up not working as expected. There will be several stages -for the cycle: - - * Development: anything can happen (by anything, we mean as currently - done). The focus is on new features, refactoring, etc... - - * Beta: no new features. No bug fixing which requires heavy changes. - regression fixes which appear on supported platforms and were not - caught earlier. - - * Polish/RC: only docstring changes and blocker regressions are allowed. - -The schedule would be as follows: - - +------+-----------------+-----------------+------------------+ - | Week | 1.3.0 | 1.4.0 | Release time | - +======+=================+=================+==================+ - | 1 | Development | | | - +------+-----------------+-----------------+------------------+ - | 2 | Development | | | - +------+-----------------+-----------------+------------------+ - | 3 | Development | | | - +------+-----------------+-----------------+------------------+ - | 4 | Development | | | - +------+-----------------+-----------------+------------------+ - | 5 | Development | | | - +------+-----------------+-----------------+------------------+ - | 6 | Development | | | - +------+-----------------+-----------------+------------------+ - | 7 | Beta | | | - +------+-----------------+-----------------+------------------+ - | 8 | Beta | | | - +------+-----------------+-----------------+------------------+ - | 9 | Beta | | 1.3.0 released | - +------+-----------------+-----------------+------------------+ - | 10 | Polish | Development | | - +------+-----------------+-----------------+------------------+ - | 11 | Polish | Development | | - +------+-----------------+-----------------+------------------+ - | 12 | Polish | Development | | - +------+-----------------+-----------------+------------------+ - | 13 | Polish | Development | | - +------+-----------------+-----------------+------------------+ - | 14 | | Development | | - +------+-----------------+-----------------+------------------+ - | 15 | | Development | | - +------+-----------------+-----------------+------------------+ - | 16 | | Beta | | - +------+-----------------+-----------------+------------------+ - | 17 | | Beta | | - +------+-----------------+-----------------+------------------+ - | 18 | | Beta | 1.4.0 released | - +------+-----------------+-----------------+------------------+ - -Each stage can be defined as follows: - - +------------------+-------------+----------------+----------------+ - | | Development | Beta | Polish | - +==================+=============+================+================+ - | Python Frozen | | slushy | Y | - +------------------+-------------+----------------+----------------+ - | Docstring Frozen | | slushy | thicker slush | - +------------------+-------------+----------------+----------------+ - | C code Frozen | | thicker slush | thicker slush | - +------------------+-------------+----------------+----------------+ - -Terminology: - - * slushy: you can change it if you beg the release team and it's really - important and you coordinate with docs/translations; no "big" - changes. - - * thicker slush: you can change it if it's an open bug marked - showstopper for the Polish release, you beg the release team, the - change is very very small yet very very important, and you feel - extremely guilty about your transgressions. - -The different frozen states are intended to be gradients. The exact meaning is -decided by the release manager: he has the last word on what's go in, what -doesn't. The proposed schedule means that there would be at most 12 weeks -between putting code into the source code repository and being released. - -Release team ------------- - -For every release, there would be at least one release manager. We propose to -rotate the release manager: rotation means it is not always the same person -doing the dirty job, and it should also keep the release manager honest. - -References -========== - - * Proposed schedule for Gnome from Havoc Pennington (one of the core - GTK and Gnome manager): - https://mail.gnome.org/archives/gnome-hackers/2002-June/msg00041.html - The proposed schedule is heavily based on this email - - * https://wiki.gnome.org/ReleasePlanning/Freezes diff --git a/doc/release/upcoming_changes/10151.improvement.rst b/doc/release/upcoming_changes/10151.improvement.rst new file mode 100644 index 000000000..3706a5132 --- /dev/null +++ b/doc/release/upcoming_changes/10151.improvement.rst @@ -0,0 +1,9 @@ +Different C numeric types of the same size have unique names +------------------------------------------------------------ +On any given platform, two of ``np.intc``, ``np.int_``, and ``np.longlong`` +would previously appear indistinguishable through their ``repr``, despite +their corresponding ``dtype`` having different properties. +A similar problem existed for the unsigned counterparts to these types, and on +some platforms for ``np.double`` and ``np.longdouble`` + +These types now always print with a unique ``__name__``. diff --git a/doc/release/upcoming_changes/12284.new_feature.rst b/doc/release/upcoming_changes/12284.new_feature.rst new file mode 100644 index 000000000..25321cd9b --- /dev/null +++ b/doc/release/upcoming_changes/12284.new_feature.rst @@ -0,0 +1,5 @@ + +Add our own ``*.pxd`` cython import file +-------------------------------------------- +Added a ``numpy/__init__.pxd`` file. It will be used for `cimport numpy` + diff --git a/doc/release/upcoming_changes/13605.deprecation.rst b/doc/release/upcoming_changes/13605.deprecation.rst new file mode 100644 index 000000000..bff12e965 --- /dev/null +++ b/doc/release/upcoming_changes/13605.deprecation.rst @@ -0,0 +1,9 @@ +`np.fromfile` and `np.fromstring` will error on bad data +-------------------------------------------------------- + +In future numpy releases, the functions `np.fromfile` and `np.fromstring` +will throw an error when parsing bad data. +This will now give a ``DeprecationWarning`` where previously partial or +even invalid data was silently returned. This deprecation also affects +the C defined functions c:func:`PyArray_FromString`` and +c:func:`PyArray_FromFile` diff --git a/doc/release/upcoming_changes/13610.improvement.rst b/doc/release/upcoming_changes/13610.improvement.rst new file mode 100644 index 000000000..6f97b43ad --- /dev/null +++ b/doc/release/upcoming_changes/13610.improvement.rst @@ -0,0 +1,5 @@ +``argwhere`` now produces a consistent result on 0d arrays +---------------------------------------------------------- +On N-d arrays, `numpy.argwhere` now always produces an array of shape +``(n_non_zero, arr.ndim)``, even when ``arr.ndim == 0``. Previously, the +last axis would have a dimension of 1 in this case. diff --git a/doc/release/upcoming_changes/13794.new_function.rst b/doc/release/upcoming_changes/13794.new_function.rst new file mode 100644 index 000000000..cf8b38bb0 --- /dev/null +++ b/doc/release/upcoming_changes/13794.new_function.rst @@ -0,0 +1,5 @@ +Multivariate hypergeometric distribution added to `numpy.random` +---------------------------------------------------------------- +The method `multivariate_hypergeometric` has been added to the class +`numpy.random.Generator`. This method generates random variates from +the multivariate hypergeometric probability distribution. diff --git a/doc/release/upcoming_changes/13829.improvement.rst b/doc/release/upcoming_changes/13829.improvement.rst new file mode 100644 index 000000000..ede1b2a53 --- /dev/null +++ b/doc/release/upcoming_changes/13829.improvement.rst @@ -0,0 +1,6 @@ +Add ``axis`` argument for ``random.permutation`` and ``random.shuffle`` +----------------------------------------------------------------------- + +Previously the ``random.permutation`` and ``random.shuffle`` functions +can only shuffle an array along the first axis; they now have a +new argument ``axis`` which allows shuffle along a specified axis. diff --git a/doc/release/upcoming_changes/14036.deprecation.rst b/doc/release/upcoming_changes/14036.deprecation.rst new file mode 100644 index 000000000..3d997b9a2 --- /dev/null +++ b/doc/release/upcoming_changes/14036.deprecation.rst @@ -0,0 +1,4 @@ +Deprecate `PyArray_As1D`, `PyArray_As2D` +---------------------------------------- +`PyArray_As1D`, `PyArray_As2D` are deprecated, use +`PyArray_AsCArray` instead
\ No newline at end of file diff --git a/doc/release/upcoming_changes/14100.expired.rst b/doc/release/upcoming_changes/14100.expired.rst index 953922c72..e9ea9eeb4 100644 --- a/doc/release/upcoming_changes/14100.expired.rst +++ b/doc/release/upcoming_changes/14100.expired.rst @@ -1,3 +1,3 @@ -* ``PyArray_FromDimsAndDataAndDescr`` has been removed, use - ``PyArray_NewFromDescr`` instead -* ``PyArray_FromDims`` has been removed, use ``PyArray_SimpleNew`` instead +* ``PyArray_FromDimsAndDataAndDescr`` and ``PyArray_FromDims`` have been + removed (they will always raise an error). Use ``PyArray_NewFromDescr`` + and ``PyArray_SimpleNew`` instead. diff --git a/doc/release/upcoming_changes/14248.change.rst b/doc/release/upcoming_changes/14248.change.rst new file mode 100644 index 000000000..9ae0f16bc --- /dev/null +++ b/doc/release/upcoming_changes/14248.change.rst @@ -0,0 +1,10 @@ +`numpy.distutils`: append behavior changed for LDFLAGS and similar +------------------------------------------------------------------ +`numpy.distutils` has always overridden rather than appended to ``LDFLAGS`` and +other similar such environment variables for compiling Fortran extensions. Now +the default behavior has changed to appending - which is the expected behavior +in most situations. To preserve the old (overwriting) behavior, set the +``NPY_DISTUTILS_APPEND_FLAGS`` environment variable to 0. This applies to: +``LDFLAGS``, ``F77FLAGS``, ``F90FLAGS``, ``FREEFLAGS``, ``FOPT``, ``FDEBUG``, +and ``FFLAGS``. NumPy 1.16 and 1.17 gave build warnings in situations where this +change in behavior would have affected the compile flags used. diff --git a/doc/release/upcoming_changes/14248.changes.rst b/doc/release/upcoming_changes/14248.changes.rst deleted file mode 100644 index ff5f4acef..000000000 --- a/doc/release/upcoming_changes/14248.changes.rst +++ /dev/null @@ -1,10 +0,0 @@ -numpy.distutils: append behavior changed for LDFLAGS and similar ----------------------------------------------------------------- -`numpy.distutils` has always overridden rather than appended to `LDFLAGS` and -other similar such environment variables for compiling Fortran extensions. Now -the default behavior has changed to appending - which is the expected behavior -in most situations. To preserve the old (overwriting) behavior, set the -`NPY_DISTUTILS_APPEND_FLAGS` environment variable to 0. This applies to: -`LDFLAGS`, `F77FLAGS`, `F90FLAGS`, `FREEFLAGS`, `FOPT`, `FDEBUG`, and `FFLAGS`. -NumPy 1.16 and 1.17 gave build warnings in situations where this change in -behavior would have affected the compile flags used. diff --git a/doc/release/upcoming_changes/14255.improvement.rst b/doc/release/upcoming_changes/14255.improvement.rst new file mode 100644 index 000000000..e17835efd --- /dev/null +++ b/doc/release/upcoming_changes/14255.improvement.rst @@ -0,0 +1,4 @@ +`numpy.unique` has consistent axes order (except the chosen one) when ``axis`` is not None +------------------------------------------------------------------------------------------ +Using ``moveaxis`` instead of ``swapaxes`` in `numpy.unique`, so that the ordering of axes +except the axis in arguments will not be broken. diff --git a/doc/release/upcoming_changes/14393.c_api.rst b/doc/release/upcoming_changes/14393.c_api.rst new file mode 100644 index 000000000..0afd27584 --- /dev/null +++ b/doc/release/upcoming_changes/14393.c_api.rst @@ -0,0 +1,5 @@ +PyDataType_ISUNSIZED(descr) now returns False for structured datatypes +---------------------------------------------------------------------- +Previously this returned True for any datatype of itemsize 0, but now this +returns false for the non-flexible datatype with itemsize 0, ``np.dtype([])``. + diff --git a/doc/release/upcoming_changes/14464.improvement.rst b/doc/release/upcoming_changes/14464.improvement.rst new file mode 100644 index 000000000..36ee4090b --- /dev/null +++ b/doc/release/upcoming_changes/14464.improvement.rst @@ -0,0 +1,6 @@ +`numpy.matmul` with boolean output now converts to boolean values +----------------------------------------------------------------- +Calling `numpy.matmul` where the output is a boolean array would fill the array +with uint8 equivalents of the result, rather than 0/1. Now it forces the output +to 0 or 1 (``NPY_TRUE`` or ``NPY_FALSE``). + diff --git a/doc/release/upcoming_changes/14498.change.rst b/doc/release/upcoming_changes/14498.change.rst new file mode 100644 index 000000000..fd784e289 --- /dev/null +++ b/doc/release/upcoming_changes/14498.change.rst @@ -0,0 +1,7 @@ +Remove ``numpy.random.entropy`` without a deprecation +----------------------------------------------------- + +``numpy.random.entropy`` was added to the `numpy.random` namespace in 1.17.0. +It was meant to be a private c-extension module, but was exposed as public. +It has been replaced by `numpy.random.SeedSequence` so the module was +completely removed. diff --git a/doc/release/upcoming_changes/14501.improvement.rst b/doc/release/upcoming_changes/14501.improvement.rst new file mode 100644 index 000000000..f397ecccf --- /dev/null +++ b/doc/release/upcoming_changes/14501.improvement.rst @@ -0,0 +1,6 @@ +`numpy.random.randint` produced incorrect value when the range was ``2**32`` +---------------------------------------------------------------------------- +The implementation introduced in 1.17.0 had an incorrect check when +determining whether to use the 32-bit path or the full 64-bit +path that incorrectly redirected random integer generation with a high - low +range of ``2**32`` to the 64-bit generator. diff --git a/doc/release/upcoming_changes/14510.compatibility.rst b/doc/release/upcoming_changes/14510.compatibility.rst new file mode 100644 index 000000000..fc5edbc39 --- /dev/null +++ b/doc/release/upcoming_changes/14510.compatibility.rst @@ -0,0 +1,12 @@ +`numpy.lib.recfunctions.drop_fields` can no longer return None +-------------------------------------------------------------- +If ``drop_fields`` is used to drop all fields, previously the array would +be completely discarded and None returned. Now it returns an array of the +same shape as the input, but with no fields. The old behavior can be retained +with:: + + dropped_arr = drop_fields(arr, ['a', 'b']) + if dropped_arr.dtype.names == (): + dropped_arr = None + +converting the empty recarray to None diff --git a/doc/release/upcoming_changes/14518.change.rst b/doc/release/upcoming_changes/14518.change.rst new file mode 100644 index 000000000..ba3844c85 --- /dev/null +++ b/doc/release/upcoming_changes/14518.change.rst @@ -0,0 +1,18 @@ +Add options to quiet build configuration and build with ``-Werror`` +------------------------------------------------------------------- +Added two new configuration options. During the ``build_src`` subcommand, as +part of configuring NumPy, the files ``_numpyconfig.h`` and ``config.h`` are +created by probing support for various runtime functions and routines. +Previously, the very verbose compiler output during this stage clouded more +important information. By default the output is silenced. Running ``runtests.py +--debug-info`` will add ``--verbose-cfg`` to the ``build_src`` subcommand, +which will restore the previous behaviour. + +Adding ``CFLAGS=-Werror`` to turn warnings into errors would trigger errors +during the configuration. Now ``runtests.py --warn-error`` will add +``--warn-error`` to the ``build`` subcommand, which will percolate to the +``build_ext`` and ``build_lib`` subcommands. This will add the compiler flag +to those stages and turn compiler warnings into errors while actually building +NumPy itself, avoiding the ``build_src`` subcommand compiler calls. + +(`gh-14527 <https://github.com/numpy/numpy/pull/14527>`__) diff --git a/doc/release/upcoming_changes/14567.expired.rst b/doc/release/upcoming_changes/14567.expired.rst new file mode 100644 index 000000000..59cb600fb --- /dev/null +++ b/doc/release/upcoming_changes/14567.expired.rst @@ -0,0 +1,5 @@ +The files ``numpy/testing/decorators.py``, ``numpy/testing/noseclasses.py`` +and ``numpy/testing/nosetester.py`` have been removed. They were never +meant to be public (all relevant objects are present in the +``numpy.testing`` namespace), and importing them has given a deprecation +warning since NumPy 1.15.0 diff --git a/doc/release/upcoming_changes/14583.expired.rst b/doc/release/upcoming_changes/14583.expired.rst new file mode 100644 index 000000000..1fad06309 --- /dev/null +++ b/doc/release/upcoming_changes/14583.expired.rst @@ -0,0 +1,2 @@ +* Remove deprecated support for boolean and empty condition lists in + `numpy.select` diff --git a/doc/release/upcoming_changes/14596.expired.rst b/doc/release/upcoming_changes/14596.expired.rst new file mode 100644 index 000000000..3831d5401 --- /dev/null +++ b/doc/release/upcoming_changes/14596.expired.rst @@ -0,0 +1,2 @@ +* Array order only accepts 'C', 'F', 'A', and 'K'. More permissive options + were deprecated in NumPy 1.11. diff --git a/doc/release/upcoming_changes/14620.expired.rst b/doc/release/upcoming_changes/14620.expired.rst new file mode 100644 index 000000000..e35589b53 --- /dev/null +++ b/doc/release/upcoming_changes/14620.expired.rst @@ -0,0 +1 @@ +* np.linspace param num must be an integer. This was deprecated in NumPy 1.12. diff --git a/doc/release/upcoming_changes/14682.expired.rst b/doc/release/upcoming_changes/14682.expired.rst new file mode 100644 index 000000000..e9a8107ec --- /dev/null +++ b/doc/release/upcoming_changes/14682.expired.rst @@ -0,0 +1,2 @@ +* UFuncs with multiple outputs must use a tuple for the `out` kwarg. This + finishes a deprecation started in NumPy 1.10. diff --git a/doc/release/upcoming_changes/14717.compatibility.rst b/doc/release/upcoming_changes/14717.compatibility.rst new file mode 100644 index 000000000..f6f0ec8e5 --- /dev/null +++ b/doc/release/upcoming_changes/14717.compatibility.rst @@ -0,0 +1,4 @@ +``numpy.argmin/argmax/min/max`` returns ``NaT`` if it exists in array +--------------------------------------------------------------------- +``numpy.argmin``, ``numpy.argmax``, ``numpy.min``, and ``numpy.max`` will return +``NaT`` if it exists in the array. diff --git a/doc/release/upcoming_changes/14720.deprecation.rst b/doc/release/upcoming_changes/14720.deprecation.rst new file mode 100644 index 000000000..46ad6d8f7 --- /dev/null +++ b/doc/release/upcoming_changes/14720.deprecation.rst @@ -0,0 +1,8 @@ +Deprecate the financial functions +--------------------------------- +In accordance with +`NEP-32 <https://numpy.org/neps/nep-0032-remove-financial-functions.html>`_, +the functions `fv`, `ipmt`, `irr`, `mirr`, `nper`, `npv`, `pmt`, `ppmt`, +`pv` and `rate` are deprecated, and will be removed from NumPy 1.20. +The replacement for these functions is the Python package +`numpy-financial <https://pypi.org/project/numpy-financial>`_. diff --git a/doc/release/upcoming_changes/README.rst b/doc/release/upcoming_changes/README.rst index fd35850e4..7f6476bda 100644 --- a/doc/release/upcoming_changes/README.rst +++ b/doc/release/upcoming_changes/README.rst @@ -26,7 +26,7 @@ Each file should be named like ``<PULL REQUEST>.<TYPE>.rst``, where * ``highlight``: Adds a highlight bullet point to use as a possibly highlight of the release. -Most categories shouldl be formatted as paragraphs with a heading. +Most categories should be formatted as paragraphs with a heading. So for example: ``123.new_feature.rst`` would have the content:: ``my_new_feature`` option for `my_favorite_function` diff --git a/doc/release/upcoming_changes/template.rst b/doc/release/upcoming_changes/template.rst index 21c4d19c6..9c8a3b5fc 100644 --- a/doc/release/upcoming_changes/template.rst +++ b/doc/release/upcoming_changes/template.rst @@ -1,19 +1,25 @@ +{% set title = "NumPy {} Release Notes".format(versiondata.version) %} +{{ "=" * title|length }} +{{ title }} +{{ "=" * title|length }} + {% for section, _ in sections.items() %} -{% set underline = underlines[0] %}{% if section %}{{section}} +{% set underline = underlines[0] %}{% if section %}{{ section }} {{ underline * section|length }}{% set underline = underlines[1] %} {% endif %} - {% if sections[section] %} -{% for category, val in definitions.items() if category in sections[section]%} +{% for category, val in definitions.items() if category in sections[section] %} + {{ definitions[category]['name'] }} {{ underline * definitions[category]['name']|length }} {% if definitions[category]['showcontent'] %} {% for text, values in sections[section][category].items() %} -{{ text }} ({{ values|join(', ') }}) -{% endfor %} +{{ text }} +{{ get_indent(text) }}({{values|join(', ') }}) +{% endfor %} {% else %} - {{ sections[section][category]['']|join(', ') }} @@ -23,7 +29,6 @@ No significant changes. {% else %} {% endif %} - {% endfor %} {% else %} No significant changes. diff --git a/doc/source/_templates/autosummary/base.rst b/doc/source/_templates/autosummary/base.rst new file mode 100644 index 000000000..0331154a7 --- /dev/null +++ b/doc/source/_templates/autosummary/base.rst @@ -0,0 +1,14 @@ +{% if objtype == 'property' %} +:orphan: +{% endif %} + +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +{% if objtype == 'property' %} +property +{% endif %} + +.. auto{{ objtype }}:: {{ objname }} + diff --git a/doc/source/conf.py b/doc/source/conf.py index 4f312eff5..83cecc917 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -3,12 +3,8 @@ from __future__ import division, absolute_import, print_function import sys, os, re -# Check Sphinx version -import sphinx -if sphinx.__version__ < "1.2.1": - raise RuntimeError("Sphinx 1.2.1 or newer required") - -needs_sphinx = '1.0' +# Minimum version, enforced by sphinx +needs_sphinx = '2.2.0' # ----------------------------------------------------------------------------- # General configuration @@ -31,13 +27,10 @@ extensions = [ 'matplotlib.sphinxext.plot_directive', 'IPython.sphinxext.ipython_console_highlighting', 'IPython.sphinxext.ipython_directive', + 'sphinx.ext.imgmath', ] -if sphinx.__version__ >= "1.4": - extensions.append('sphinx.ext.imgmath') - imgmath_image_format = 'svg' -else: - extensions.append('sphinx.ext.pngmath') +imgmath_image_format = 'svg' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -45,6 +38,8 @@ templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' +master_doc = 'contents' + # General substitutions. project = 'NumPy' copyright = '2008-2019, The SciPy community' @@ -93,6 +88,7 @@ pygments_style = 'sphinx' def setup(app): # add a config value for `ifconfig` directives app.add_config_value('python_version_major', str(sys.version_info.major), 'env') + app.add_lexer('NumPyC', NumPyLexer(stripnl=False)) # ----------------------------------------------------------------------------- # HTML output @@ -177,6 +173,10 @@ latex_documents = [ # not chapters. #latex_use_parts = False +latex_elements = { + 'fontenc': r'\usepackage[LGR,T1]{fontenc}' +} + # Additional stuff for the LaTeX preamble. latex_preamble = r''' \usepackage{amsmath} @@ -368,18 +368,15 @@ def linkcode_resolve(domain, info): from pygments.lexers import CLexer from pygments import token -from sphinx.highlighting import lexers import copy class NumPyLexer(CLexer): name = 'NUMPYLEXER' - tokens = copy.deepcopy(lexers['c'].tokens) + tokens = copy.deepcopy(CLexer.tokens) # Extend the regex for valid identifiers with @ for k, val in tokens.items(): for i, v in enumerate(val): if isinstance(v, tuple): if isinstance(v[0], str): val[i] = (v[0].replace('a-zA-Z', 'a-zA-Z@'),) + v[1:] - -lexers['NumPyC'] = NumPyLexer(stripnl=False) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index ce571926e..9d618cc9f 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -96,6 +96,11 @@ installs a ``.egg-link`` file into your site-packages as well as adjusts the Other build options ------------------- +Build options can be discovered by running any of:: + + $ python setup.py --help + $ python setup.py --help-commands + It's possible to do a parallel build with ``numpy.distutils`` with the ``-j`` option; see :ref:`parallel-builds` for more details. @@ -106,6 +111,16 @@ source tree is to use:: $ export PYTHONPATH=/some/owned/folder/lib/python3.4/site-packages +NumPy uses a series of tests to probe the compiler and libc libraries for +funtions. The results are stored in ``_numpyconfig.h`` and ``config.h`` files +using ``HAVE_XXX`` definitions. These tests are run during the ``build_src`` +phase of the ``_multiarray_umath`` module in the ``generate_config_h`` and +``generate_numpyconfig_h`` functions. Since the output of these calls includes +many compiler warnings and errors, by default it is run quietly. If you wish +to see this output, you can run the ``build_src`` stage verbosely:: + + $ python build build_src -v + Using virtualenvs ----------------- diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 3b409f5ca..306c15069 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -131,8 +131,11 @@ Here's the short summary, complete TOC links are below: Beyond changes to a functions docstring and possible description in the general documentation, if your change introduces any user-facing - modifications, update the current release notes under - ``doc/release/X.XX-notes.rst`` + modifications they may need to be mentioned in the release notes. + To add your change to the release notes, you need to create a short file + with a summary and place it in ``doc/release/upcoming_changes``. + The file ``doc/release/upcoming_changes/README.rst`` details the format and + filename conventions. If your change introduces a deprecation, make sure to discuss this first on GitHub or the mailing list first. If agreement on the deprecation is @@ -226,7 +229,7 @@ Requirements ~~~~~~~~~~~~ `Sphinx <http://www.sphinx-doc.org/en/stable/>`__ is needed to build -the documentation. Matplotlib and SciPy are also required. +the documentation. Matplotlib, SciPy, and IPython are also required. Fixing Warnings ~~~~~~~~~~~~~~~ diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst index 4bb7628c1..6deacda5c 100644 --- a/doc/source/docs/howto_build_docs.rst +++ b/doc/source/docs/howto_build_docs.rst @@ -5,7 +5,7 @@ Building the NumPy API and reference docs ========================================= We currently use Sphinx_ for generating the API and reference -documentation for NumPy. You will need Sphinx 1.8.3 or newer. +documentation for NumPy. You will need Sphinx 1.8.3 <= 1.8.5. If you only want to get the documentation, note that pre-built versions can be found at diff --git a/doc/source/f2py/distutils.rst b/doc/source/f2py/distutils.rst index fdcd38468..71f6eab5a 100644 --- a/doc/source/f2py/distutils.rst +++ b/doc/source/f2py/distutils.rst @@ -26,7 +26,7 @@ sources, call F2PY to construct extension modules, etc. :mod:`numpy.distutils` extends ``distutils`` with the following features: -* ``Extension`` class argument ``sources`` may contain Fortran source +* :class:`Extension` class argument ``sources`` may contain Fortran source files. In addition, the list ``sources`` may contain at most one F2PY signature file, and then the name of an Extension module must match with the ``<modulename>`` used in signature file. It is @@ -37,7 +37,7 @@ sources, call F2PY to construct extension modules, etc. to scan Fortran source files for routine signatures to construct the wrappers to Fortran codes. - Additional options to F2PY process can be given using ``Extension`` + Additional options to F2PY process can be given using :class:`Extension` class argument ``f2py_options``. * The following new ``distutils`` commands are defined: diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 39410b2a4..9dcbb6267 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -51,7 +51,7 @@ NumPy provides several hooks that classes can customize: .. versionadded:: 1.13 Any class, ndarray subclass or not, can define this method or set it to - :obj:`None` in order to override the behavior of NumPy's ufuncs. This works + None in order to override the behavior of NumPy's ufuncs. This works quite similarly to Python's ``__mul__`` and other binary operation routines. - *ufunc* is the ufunc object that was called. @@ -94,13 +94,13 @@ NumPy provides several hooks that classes can customize: :class:`ndarray` handles binary operations like ``arr + obj`` and ``arr < obj`` when ``arr`` is an :class:`ndarray` and ``obj`` is an instance of a custom class. There are two possibilities. If - ``obj.__array_ufunc__`` is present and not :obj:`None`, then + ``obj.__array_ufunc__`` is present and not None, then ``ndarray.__add__`` and friends will delegate to the ufunc machinery, meaning that ``arr + obj`` becomes ``np.add(arr, obj)``, and then :func:`~numpy.add` invokes ``obj.__array_ufunc__``. This is useful if you want to define an object that acts like an array. - Alternatively, if ``obj.__array_ufunc__`` is set to :obj:`None`, then as a + Alternatively, if ``obj.__array_ufunc__`` is set to None, then as a special case, special methods like ``ndarray.__add__`` will notice this and *unconditionally* raise :exc:`TypeError`. This is useful if you want to create objects that interact with arrays via binary operations, but @@ -135,7 +135,7 @@ NumPy provides several hooks that classes can customize: place rather than separately by the ufunc machinery and by the binary operation rules (which gives preference to special methods of subclasses; the alternative way to enforce a one-place only hierarchy, - of setting :func:`__array_ufunc__` to :obj:`None`, would seem very + of setting :func:`__array_ufunc__` to None, would seem very unexpected and thus confusing, as then the subclass would not work at all with ufuncs). - :class:`ndarray` defines its own :func:`__array_ufunc__`, which, @@ -280,7 +280,7 @@ NumPy provides several hooks that classes can customize: .. py:method:: class.__array_prepare__(array, context=None) - At the beginning of every :ref:`ufunc <ufuncs.output-type>`, this + At the beginning of every :ref:`ufunc <ufuncs-output-type>`, this method is called on the input object with the highest array priority, or the output object if one was specified. The output array is passed in and whatever is returned is passed to the ufunc. @@ -295,7 +295,7 @@ NumPy provides several hooks that classes can customize: .. py:method:: class.__array_wrap__(array, context=None) - At the end of every :ref:`ufunc <ufuncs.output-type>`, this method + At the end of every :ref:`ufunc <ufuncs-output-type>`, this method is called on the input object with the highest array priority, or the output object if one was specified. The ufunc-computed array is passed in and whatever is returned is passed to the user. @@ -322,7 +322,7 @@ NumPy provides several hooks that classes can customize: If a class (ndarray subclass or not) having the :func:`__array__` method is used as the output object of an :ref:`ufunc - <ufuncs.output-type>`, results will be written to the object + <ufuncs-output-type>`, results will be written to the object returned by :func:`__array__`. Similar conversion is done on input arrays. diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 387515f59..2225eedb3 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -26,7 +26,9 @@ be either a :ref:`date unit <arrays.dtypes.dateunits>` or a :ref:`time unit <arrays.dtypes.timeunits>`. The date units are years ('Y'), months ('M'), weeks ('W'), and days ('D'), while the time units are hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and -some additional SI-prefix seconds-based units. +some additional SI-prefix seconds-based units. The datetime64 data type +also accepts the string "NAT", in any combination of lowercase/uppercase +letters, for a "Not A Time" value. .. admonition:: Example @@ -50,6 +52,11 @@ some additional SI-prefix seconds-based units. >>> np.datetime64('2005-02-25T03:30') numpy.datetime64('2005-02-25T03:30') + NAT (not a time): + + >>> numpy.datetime64('nat') + numpy.datetime64('NaT') + When creating an array of datetimes from a string, it is still possible to automatically select the unit from the inputs, by using the datetime type with generic units. @@ -100,7 +107,21 @@ Datetime and Timedelta Arithmetic NumPy allows the subtraction of two Datetime values, an operation which produces a number with a time unit. Because NumPy doesn't have a physical quantities system in its core, the timedelta64 data type was created -to complement datetime64. +to complement datetime64. The arguments for timedelta64 are a number, +to represent the number of units, and a date/time unit, such as +(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64 +data type also accepts the string "NAT" in place of the number for a "Not A Time" value. + +.. admonition:: Example + + >>> numpy.timedelta64(1, 'D') + numpy.timedelta64(1,'D') + + >>> numpy.timedelta64(4, 'h') + numpy.timedelta64(4,'h') + + >>> numpy.timedelta64('nAt') + numpy.timedelta64('NaT') Datetimes and Timedeltas work together to provide ways for simple datetime calculations. @@ -122,6 +143,12 @@ simple datetime calculations. >>> np.timedelta64(1,'W') % np.timedelta64(10,'D') numpy.timedelta64(7,'D') + >>> numpy.datetime64('nat') - numpy.datetime64('2009-01-01') + numpy.timedelta64('NaT','D') + + >>> numpy.datetime64('2009-01-01') + numpy.timedelta64('nat') + numpy.datetime64('NaT') + There are two Timedelta units ('Y', years and 'M', months) which are treated specially, because how much time they represent changes depending on when they are used. While a timedelta day unit is equivalent to @@ -366,132 +393,4 @@ As a corollary to this change, we no longer prohibit casting between datetimes with date units and datetimes with timeunits. With timezone naive datetimes, the rule for casting from dates to times is no longer ambiguous. -.. _pandas: http://pandas.pydata.org - - -Differences Between 1.6 and 1.7 Datetimes -========================================= - -The NumPy 1.6 release includes a more primitive datetime data type -than 1.7. This section documents many of the changes that have taken -place. - -String Parsing -`````````````` - -The datetime string parser in NumPy 1.6 is very liberal in what it accepts, -and silently allows invalid input without raising errors. The parser in -NumPy 1.7 is quite strict about only accepting ISO 8601 dates, with a few -convenience extensions. 1.6 always creates microsecond (us) units by -default, whereas 1.7 detects a unit based on the format of the string. -Here is a comparison.:: - - # NumPy 1.6.1 - >>> np.datetime64('1979-03-22') - 1979-03-22 00:00:00 - # NumPy 1.7.0 - >>> np.datetime64('1979-03-22') - numpy.datetime64('1979-03-22') - - # NumPy 1.6.1, unit default microseconds - >>> np.datetime64('1979-03-22').dtype - dtype('datetime64[us]') - # NumPy 1.7.0, unit of days detected from string - >>> np.datetime64('1979-03-22').dtype - dtype('<M8[D]') - - # NumPy 1.6.1, ignores invalid part of string - >>> np.datetime64('1979-03-2corruptedstring') - 1979-03-02 00:00:00 - # NumPy 1.7.0, raises error for invalid input - >>> np.datetime64('1979-03-2corruptedstring') - Traceback (most recent call last): - File "<stdin>", line 1, in <module> - ValueError: Error parsing datetime string "1979-03-2corruptedstring" at position 8 - - # NumPy 1.6.1, 'nat' produces today's date - >>> np.datetime64('nat') - 2012-04-30 00:00:00 - # NumPy 1.7.0, 'nat' produces not-a-time - >>> np.datetime64('nat') - numpy.datetime64('NaT') - - # NumPy 1.6.1, 'garbage' produces today's date - >>> np.datetime64('garbage') - 2012-04-30 00:00:00 - # NumPy 1.7.0, 'garbage' raises an exception - >>> np.datetime64('garbage') - Traceback (most recent call last): - File "<stdin>", line 1, in <module> - ValueError: Error parsing datetime string "garbage" at position 0 - - # NumPy 1.6.1, can't specify unit in scalar constructor - >>> np.datetime64('1979-03-22T19:00', 'h') - Traceback (most recent call last): - File "<stdin>", line 1, in <module> - TypeError: function takes at most 1 argument (2 given) - # NumPy 1.7.0, unit in scalar constructor - >>> np.datetime64('1979-03-22T19:00', 'h') - numpy.datetime64('1979-03-22T19:00-0500','h') - - # NumPy 1.6.1, reads ISO 8601 strings w/o TZ as UTC - >>> np.array(['1979-03-22T19:00'], dtype='M8[h]') - array([1979-03-22 19:00:00], dtype=datetime64[h]) - # NumPy 1.7.0, reads ISO 8601 strings w/o TZ as local (ISO specifies this) - >>> np.array(['1979-03-22T19:00'], dtype='M8[h]') - array(['1979-03-22T19-0500'], dtype='datetime64[h]') - - # NumPy 1.6.1, doesn't parse all ISO 8601 strings correctly - >>> np.array(['1979-03-22T12'], dtype='M8[h]') - array([1979-03-22 00:00:00], dtype=datetime64[h]) - >>> np.array(['1979-03-22T12:00'], dtype='M8[h]') - array([1979-03-22 12:00:00], dtype=datetime64[h]) - # NumPy 1.7.0, handles this case correctly - >>> np.array(['1979-03-22T12'], dtype='M8[h]') - array(['1979-03-22T12-0500'], dtype='datetime64[h]') - >>> np.array(['1979-03-22T12:00'], dtype='M8[h]') - array(['1979-03-22T12-0500'], dtype='datetime64[h]') - -Unit Conversion -``````````````` - -The 1.6 implementation of datetime does not convert between units correctly.:: - - # NumPy 1.6.1, the representation value is untouched - >>> np.array(['1979-03-22'], dtype='M8[D]') - array([1979-03-22 00:00:00], dtype=datetime64[D]) - >>> np.array(['1979-03-22'], dtype='M8[D]').astype('M8[M]') - array([2250-08-01 00:00:00], dtype=datetime64[M]) - # NumPy 1.7.0, the representation is scaled accordingly - >>> np.array(['1979-03-22'], dtype='M8[D]') - array(['1979-03-22'], dtype='datetime64[D]') - >>> np.array(['1979-03-22'], dtype='M8[D]').astype('M8[M]') - array(['1979-03'], dtype='datetime64[M]') - -Datetime Arithmetic -``````````````````` - -The 1.6 implementation of datetime only works correctly for a small subset of -arithmetic operations. Here we show some simple cases.:: - - # NumPy 1.6.1, produces invalid results if units are incompatible - >>> a = np.array(['1979-03-22T12'], dtype='M8[h]') - >>> b = np.array([3*60], dtype='m8[m]') - >>> a + b - array([1970-01-01 00:00:00.080988], dtype=datetime64[us]) - # NumPy 1.7.0, promotes to higher-resolution unit - >>> a = np.array(['1979-03-22T12'], dtype='M8[h]') - >>> b = np.array([3*60], dtype='m8[m]') - >>> a + b - array(['1979-03-22T15:00-0500'], dtype='datetime64[m]') - - # NumPy 1.6.1, arithmetic works if everything is microseconds - >>> a = np.array(['1979-03-22T12:00'], dtype='M8[us]') - >>> b = np.array([3*60*60*1000000], dtype='m8[us]') - >>> a + b - array([1979-03-22 15:00:00], dtype=datetime64[us]) - # NumPy 1.7.0 - >>> a = np.array(['1979-03-22T12:00'], dtype='M8[us]') - >>> b = np.array([3*60*60*1000000], dtype='m8[us]') - >>> a + b - array(['1979-03-22T15:00:00.000000-0500'], dtype='datetime64[us]') +.. _pandas: http://pandas.pydata.org
\ No newline at end of file diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index ab743a8ee..231707b11 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -128,7 +128,7 @@ What can be converted to a data-type object is described below: Used as-is. -:const:`None` +None .. index:: triple: dtype; construction; from None @@ -392,7 +392,7 @@ Type strings their values must each be lists of the same length as the *names* and *formats* lists. The *offsets* value is a list of byte offsets (limited to `ctypes.c_int`) for each field, while the *titles* value is a - list of titles for each field (:const:`None` can be used if no title is + list of titles for each field (None can be used if no title is desired for that field). The *titles* can be any :class:`string` or :class:`unicode` object and will add another entry to the fields dictionary keyed by the title and referencing the same diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst index f361ccb06..f36a083aa 100644 --- a/doc/source/reference/arrays.interface.rst +++ b/doc/source/reference/arrays.interface.rst @@ -138,18 +138,18 @@ This approach to the interface consists of the object having an This attribute can also be an object exposing the :c:func:`buffer interface <PyObject_AsCharBuffer>` which will be used to share the data. If this key is not present (or - returns :class:`None`), then memory sharing will be done + returns None), then memory sharing will be done through the buffer interface of the object itself. In this case, the offset key can be used to indicate the start of the buffer. A reference to the object exposing the array interface must be stored by the new object if the memory area is to be secured. - **Default**: :const:`None` + **Default**: None **strides** (optional) - Either :const:`None` to indicate a C-style contiguous array or + Either None to indicate a C-style contiguous array or a Tuple of strides which provides the number of bytes needed to jump to the next array element in the corresponding dimension. Each entry must be an integer (a Python @@ -157,29 +157,29 @@ This approach to the interface consists of the object having an be larger than can be represented by a C "int" or "long"; the calling code should handle this appropriately, either by raising an error, or by using :c:type:`Py_LONG_LONG` in C. The - default is :const:`None` which implies a C-style contiguous + default is None which implies a C-style contiguous memory buffer. In this model, the last dimension of the array varies the fastest. For example, the default strides tuple for an object whose array entries are 8 bytes long and whose shape is (10,20,30) would be (4800, 240, 8) - **Default**: :const:`None` (C-style contiguous) + **Default**: None (C-style contiguous) **mask** (optional) - :const:`None` or an object exposing the array interface. All + None or an object exposing the array interface. All elements of the mask array should be interpreted only as true or not true indicating which elements of this array are valid. The shape of this object should be `"broadcastable" <arrays.broadcasting.broadcastable>` to the shape of the original array. - **Default**: :const:`None` (All array values are valid) + **Default**: None (All array values are valid) **offset** (optional) An integer offset into the array data region. This can only be - used when data is :const:`None` or returns a :class:`buffer` + used when data is None or returns a :class:`buffer` object. **Default**: 0. diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 8f431bc9c..831d211bc 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -329,7 +329,7 @@ Item selection and manipulation ------------------------------- For array methods that take an *axis* keyword, it defaults to -:const:`None`. If axis is *None*, then the array is treated as a 1-D +*None*. If axis is *None*, then the array is treated as a 1-D array. Any other value for *axis* represents the dimension along which the operation should proceed. diff --git a/doc/source/reference/arrays.nditer.rst b/doc/source/reference/arrays.nditer.rst index fa8183f75..7dab09a71 100644 --- a/doc/source/reference/arrays.nditer.rst +++ b/doc/source/reference/arrays.nditer.rst @@ -115,13 +115,18 @@ context is exited. array([[ 0, 2, 4], [ 6, 8, 10]]) +If you are writing code that needs to support older versions of numpy, +note that prior to 1.15, :class:`nditer` was not a context manager and +did not have a `close` method. Instead it relied on the destructor to +initiate the writeback of the buffer. + Using an External Loop ---------------------- In all the examples so far, the elements of `a` are provided by the iterator one at a time, because all the looping logic is internal to the -iterator. While this is simple and convenient, it is not very efficient. A -better approach is to move the one-dimensional innermost loop into your +iterator. While this is simple and convenient, it is not very efficient. +A better approach is to move the one-dimensional innermost loop into your code, external to the iterator. This way, NumPy's vectorized operations can be used on larger chunks of the elements being visited. @@ -156,41 +161,29 @@ element in a computation. For example, you may want to visit the elements of an array in memory order, but use a C-order, Fortran-order, or multidimensional index to look up values in a different array. -The Python iterator protocol doesn't have a natural way to query these -additional values from the iterator, so we introduce an alternate syntax -for iterating with an :class:`nditer`. This syntax explicitly works -with the iterator object itself, so its properties are readily accessible -during iteration. With this looping construct, the current value is -accessible by indexing into the iterator, and the index being tracked -is the property `index` or `multi_index` depending on what was requested. - -The Python interactive interpreter unfortunately prints out the -values of expressions inside the while loop during each iteration of the -loop. We have modified the output in the examples using this looping -construct in order to be more readable. +The index is tracked by the iterator object itself, and accessible +through the `index` or `multi_index` properties, depending on what was +requested. The examples below show printouts demonstrating the +progression of the index: .. admonition:: Example >>> a = np.arange(6).reshape(2,3) >>> it = np.nditer(a, flags=['f_index']) - >>> while not it.finished: - ... print("%d <%d>" % (it[0], it.index), end=' ') - ... it.iternext() + >>> for x in it: + ... print("%d <%d>" % (x, it.index), end=' ') ... 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> >>> it = np.nditer(a, flags=['multi_index']) - >>> while not it.finished: - ... print("%d <%s>" % (it[0], it.multi_index), end=' ') - ... it.iternext() + >>> for x in it: + ... print("%d <%s>" % (x, it.multi_index), end=' ') ... 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> - >>> it = np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) - >>> with it: - .... while not it.finished: - ... it[0] = it.multi_index[1] - it.multi_index[0] - ... it.iternext() + >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: + ... for x in it: + ... x[...] = it.multi_index[1] - it.multi_index[0] ... >>> a array([[ 0, 1, 2], @@ -199,7 +192,7 @@ construct in order to be more readable. Tracking an index or multi-index is incompatible with using an external loop, because it requires a different index value per element. If you try to combine these flags, the :class:`nditer` object will -raise an exception +raise an exception. .. admonition:: Example @@ -209,6 +202,42 @@ raise an exception File "<stdin>", line 1, in <module> ValueError: Iterator flag EXTERNAL_LOOP cannot be used if an index or multi-index is being tracked +Alternative Looping and Element Access +-------------------------------------- + +To make its properties more readily accessible during iteration, +:class:`nditer` has an alternative syntax for iterating, which works +explicitly with the iterator object itself. With this looping construct, +the current value is accessible by indexing into the iterator. Other +properties, such as tracked indices remain as before. The examples below +produce identical results to the ones in the previous section. + +.. admonition:: Example + + >>> a = np.arange(6).reshape(2,3) + >>> it = np.nditer(a, flags=['f_index']) + >>> while not it.finished: + ... print("%d <%d>" % (it[0], it.index), end=' ') + ... it.iternext() + ... + 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> + + >>> it = np.nditer(a, flags=['multi_index']) + >>> while not it.finished: + ... print("%d <%s>" % (it[0], it.multi_index), end=' ') + ... it.iternext() + ... + 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> + + >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: + ... while not it.finished: + ... it[0] = it.multi_index[1] - it.multi_index[0] + ... it.iternext() + ... + >>> a + array([[ 0, 1, 2], + [-1, 0, 1]]) + Buffering the Array Elements ---------------------------- diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index a2b56cee7..0530a5747 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -226,7 +226,7 @@ From scratch If *data* is not ``NULL``, then it is assumed to point to the memory to be used for the array and the *flags* argument is used as the - new flags for the array (except the state of :c:data:`NPY_OWNDATA`, + new flags for the array (except the state of :c:data:`NPY_ARRAY_OWNDATA`, :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` and :c:data:`NPY_ARRAY_UPDATEIFCOPY` flags of the new array will be reset). @@ -916,122 +916,126 @@ enumerated array data type. For the array type checking macros the argument must be a :c:type:`PyObject *<PyObject>` that can be directly interpreted as a :c:type:`PyArrayObject *`. -.. c:function:: PyTypeNum_ISUNSIGNED(num) +.. c:function:: PyTypeNum_ISUNSIGNED(int num) -.. c:function:: PyDataType_ISUNSIGNED(descr) +.. c:function:: PyDataType_ISUNSIGNED(PyArray_Descr *descr) -.. c:function:: PyArray_ISUNSIGNED(obj) +.. c:function:: PyArray_ISUNSIGNED(PyArrayObject *obj) Type represents an unsigned integer. -.. c:function:: PyTypeNum_ISSIGNED(num) +.. c:function:: PyTypeNum_ISSIGNED(int num) -.. c:function:: PyDataType_ISSIGNED(descr) +.. c:function:: PyDataType_ISSIGNED(PyArray_Descr *descr) -.. c:function:: PyArray_ISSIGNED(obj) +.. c:function:: PyArray_ISSIGNED(PyArrayObject *obj) Type represents a signed integer. -.. c:function:: PyTypeNum_ISINTEGER(num) +.. c:function:: PyTypeNum_ISINTEGER(int num) -.. c:function:: PyDataType_ISINTEGER(descr) +.. c:function:: PyDataType_ISINTEGER(PyArray_Descr* descr) -.. c:function:: PyArray_ISINTEGER(obj) +.. c:function:: PyArray_ISINTEGER(PyArrayObject *obj) Type represents any integer. -.. c:function:: PyTypeNum_ISFLOAT(num) +.. c:function:: PyTypeNum_ISFLOAT(int num) -.. c:function:: PyDataType_ISFLOAT(descr) +.. c:function:: PyDataType_ISFLOAT(PyArray_Descr* descr) -.. c:function:: PyArray_ISFLOAT(obj) +.. c:function:: PyArray_ISFLOAT(PyArrayObject *obj) Type represents any floating point number. -.. c:function:: PyTypeNum_ISCOMPLEX(num) +.. c:function:: PyTypeNum_ISCOMPLEX(int num) -.. c:function:: PyDataType_ISCOMPLEX(descr) +.. c:function:: PyDataType_ISCOMPLEX(PyArray_Descr* descr) -.. c:function:: PyArray_ISCOMPLEX(obj) +.. c:function:: PyArray_ISCOMPLEX(PyArrayObject *obj) Type represents any complex floating point number. -.. c:function:: PyTypeNum_ISNUMBER(num) +.. c:function:: PyTypeNum_ISNUMBER(int num) -.. c:function:: PyDataType_ISNUMBER(descr) +.. c:function:: PyDataType_ISNUMBER(PyArray_Descr* descr) -.. c:function:: PyArray_ISNUMBER(obj) +.. c:function:: PyArray_ISNUMBER(PyArrayObject *obj) Type represents any integer, floating point, or complex floating point number. -.. c:function:: PyTypeNum_ISSTRING(num) +.. c:function:: PyTypeNum_ISSTRING(int num) -.. c:function:: PyDataType_ISSTRING(descr) +.. c:function:: PyDataType_ISSTRING(PyArray_Descr* descr) -.. c:function:: PyArray_ISSTRING(obj) +.. c:function:: PyArray_ISSTRING(PyArrayObject *obj) Type represents a string data type. -.. c:function:: PyTypeNum_ISPYTHON(num) +.. c:function:: PyTypeNum_ISPYTHON(int num) -.. c:function:: PyDataType_ISPYTHON(descr) +.. c:function:: PyDataType_ISPYTHON(PyArray_Descr* descr) -.. c:function:: PyArray_ISPYTHON(obj) +.. c:function:: PyArray_ISPYTHON(PyArrayObject *obj) Type represents an enumerated type corresponding to one of the standard Python scalar (bool, int, float, or complex). -.. c:function:: PyTypeNum_ISFLEXIBLE(num) +.. c:function:: PyTypeNum_ISFLEXIBLE(int num) -.. c:function:: PyDataType_ISFLEXIBLE(descr) +.. c:function:: PyDataType_ISFLEXIBLE(PyArray_Descr* descr) -.. c:function:: PyArray_ISFLEXIBLE(obj) +.. c:function:: PyArray_ISFLEXIBLE(PyArrayObject *obj) Type represents one of the flexible array types ( :c:data:`NPY_STRING`, :c:data:`NPY_UNICODE`, or :c:data:`NPY_VOID` ). -.. c:function:: PyDataType_ISUNSIZED(descr): +.. c:function:: PyDataType_ISUNSIZED(PyArray_Descr* descr): Type has no size information attached, and can be resized. Should only be called on flexible dtypes. Types that are attached to an array will always be sized, hence the array form of this macro not existing. -.. c:function:: PyTypeNum_ISUSERDEF(num) + .. versionchanged:: 1.18 -.. c:function:: PyDataType_ISUSERDEF(descr) + For structured datatypes with no fields this function now returns False. -.. c:function:: PyArray_ISUSERDEF(obj) +.. c:function:: PyTypeNum_ISUSERDEF(int num) + +.. c:function:: PyDataType_ISUSERDEF(PyArray_Descr* descr) + +.. c:function:: PyArray_ISUSERDEF(PyArrayObject *obj) Type represents a user-defined type. -.. c:function:: PyTypeNum_ISEXTENDED(num) +.. c:function:: PyTypeNum_ISEXTENDED(int num) -.. c:function:: PyDataType_ISEXTENDED(descr) +.. c:function:: PyDataType_ISEXTENDED(PyArray_Descr* descr) -.. c:function:: PyArray_ISEXTENDED(obj) +.. c:function:: PyArray_ISEXTENDED(PyArrayObject *obj) Type is either flexible or user-defined. -.. c:function:: PyTypeNum_ISOBJECT(num) +.. c:function:: PyTypeNum_ISOBJECT(int num) -.. c:function:: PyDataType_ISOBJECT(descr) +.. c:function:: PyDataType_ISOBJECT(PyArray_Descr* descr) -.. c:function:: PyArray_ISOBJECT(obj) +.. c:function:: PyArray_ISOBJECT(PyArrayObject *obj) Type represents object data type. -.. c:function:: PyTypeNum_ISBOOL(num) +.. c:function:: PyTypeNum_ISBOOL(int num) -.. c:function:: PyDataType_ISBOOL(descr) +.. c:function:: PyDataType_ISBOOL(PyArray_Descr* descr) -.. c:function:: PyArray_ISBOOL(obj) +.. c:function:: PyArray_ISBOOL(PyArrayObject *obj) Type represents Boolean data type. -.. c:function:: PyDataType_HASFIELDS(descr) +.. c:function:: PyDataType_HASFIELDS(PyArray_Descr* descr) -.. c:function:: PyArray_HASFIELDS(obj) +.. c:function:: PyArray_HASFIELDS(PyArrayObject *obj) Type has fields associated with it. @@ -1580,7 +1584,7 @@ Flag checking For all of these macros *arr* must be an instance of a (subclass of) :c:data:`PyArray_Type`. -.. c:function:: PyArray_CHKFLAGS(arr, flags) +.. c:function:: PyArray_CHKFLAGS(PyObject *arr, flags) The first parameter, arr, must be an ndarray or subclass. The parameter, *flags*, should be an integer consisting of bitwise @@ -1590,60 +1594,60 @@ For all of these macros *arr* must be an instance of a (subclass of) :c:data:`NPY_ARRAY_WRITEABLE`, :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, :c:data:`NPY_ARRAY_UPDATEIFCOPY`. -.. c:function:: PyArray_IS_C_CONTIGUOUS(arr) +.. c:function:: PyArray_IS_C_CONTIGUOUS(PyObject *arr) Evaluates true if *arr* is C-style contiguous. -.. c:function:: PyArray_IS_F_CONTIGUOUS(arr) +.. c:function:: PyArray_IS_F_CONTIGUOUS(PyObject *arr) Evaluates true if *arr* is Fortran-style contiguous. -.. c:function:: PyArray_ISFORTRAN(arr) +.. c:function:: PyArray_ISFORTRAN(PyObject *arr) Evaluates true if *arr* is Fortran-style contiguous and *not* C-style contiguous. :c:func:`PyArray_IS_F_CONTIGUOUS` is the correct way to test for Fortran-style contiguity. -.. c:function:: PyArray_ISWRITEABLE(arr) +.. c:function:: PyArray_ISWRITEABLE(PyObject *arr) Evaluates true if the data area of *arr* can be written to -.. c:function:: PyArray_ISALIGNED(arr) +.. c:function:: PyArray_ISALIGNED(PyObject *arr) Evaluates true if the data area of *arr* is properly aligned on the machine. -.. c:function:: PyArray_ISBEHAVED(arr) +.. c:function:: PyArray_ISBEHAVED(PyObject *arr) Evaluates true if the data area of *arr* is aligned and writeable and in machine byte-order according to its descriptor. -.. c:function:: PyArray_ISBEHAVED_RO(arr) +.. c:function:: PyArray_ISBEHAVED_RO(PyObject *arr) Evaluates true if the data area of *arr* is aligned and in machine byte-order. -.. c:function:: PyArray_ISCARRAY(arr) +.. c:function:: PyArray_ISCARRAY(PyObject *arr) Evaluates true if the data area of *arr* is C-style contiguous, and :c:func:`PyArray_ISBEHAVED` (*arr*) is true. -.. c:function:: PyArray_ISFARRAY(arr) +.. c:function:: PyArray_ISFARRAY(PyObject *arr) Evaluates true if the data area of *arr* is Fortran-style contiguous and :c:func:`PyArray_ISBEHAVED` (*arr*) is true. -.. c:function:: PyArray_ISCARRAY_RO(arr) +.. c:function:: PyArray_ISCARRAY_RO(PyObject *arr) Evaluates true if the data area of *arr* is C-style contiguous, aligned, and in machine byte-order. -.. c:function:: PyArray_ISFARRAY_RO(arr) +.. c:function:: PyArray_ISFARRAY_RO(PyObject *arr) Evaluates true if the data area of *arr* is Fortran-style contiguous, aligned, and in machine byte-order **.** -.. c:function:: PyArray_ISONESEGMENT(arr) +.. c:function:: PyArray_ISONESEGMENT(PyObject *arr) Evaluates true if the data area of *arr* consists of a single (C-style or Fortran-style) contiguous segment. @@ -2049,7 +2053,7 @@ Calculation .. tip:: Pass in :c:data:`NPY_MAXDIMS` for axis in order to achieve the same - effect that is obtained by passing in *axis* = :const:`None` in Python + effect that is obtained by passing in ``axis=None`` in Python (treating the array as a 1-d array). @@ -2655,18 +2659,27 @@ cost of a slight overhead. The mode should be one of: .. c:macro:: NPY_NEIGHBORHOOD_ITER_ZERO_PADDING + Zero padding. Outside bounds values will be 0. + .. c:macro:: NPY_NEIGHBORHOOD_ITER_ONE_PADDING + One padding, Outside bounds values will be 1. + .. c:macro:: NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING + Constant padding. Outside bounds values will be the same as the first item in fill_value. + .. c:macro:: NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING + Mirror padding. Outside bounds values will be as if the array items were mirrored. For example, for the array [1, 2, 3, 4], x[-2] will be 2, x[-2] will be 1, x[4] will be 4, x[5] will be 1, etc... + .. c:macro:: NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING + Circular padding. Outside bounds values will be as if the array was repeated. For example, for the array [1, 2, 3, 4], x[-2] will be 3, x[-2] will be 4, x[4] will be 1, x[5] will be 2, etc... @@ -2793,10 +2806,7 @@ Array Scalars *arr* is not ``NULL`` and the first element is negative then :c:data:`NPY_INTNEG_SCALAR` is returned, otherwise :c:data:`NPY_INTPOS_SCALAR` is returned. The possible return values - are :c:data:`NPY_{kind}_SCALAR` where ``{kind}`` can be **INTPOS**, - **INTNEG**, **FLOAT**, **COMPLEX**, **BOOL**, or **OBJECT**. - :c:data:`NPY_NOSCALAR` is also an enumerated value - :c:type:`NPY_SCALARKIND` variables can take on. + are the enumerated values in :c:type:`NPY_SCALARKIND`. .. c:function:: int PyArray_CanCoerceScalar( \ char thistype, char neededtype, NPY_SCALARKIND scalar) @@ -3507,6 +3517,10 @@ Miscellaneous Macros Evaluates as True if arrays *a1* and *a2* have the same shape. +.. c:var:: a + +.. c:var:: b + .. c:macro:: PyArray_MAX(a,b) Returns the maximum of *a* and *b*. If (*a*) or (*b*) are @@ -3592,11 +3606,21 @@ Enumerated Types A special variable type indicating the number of "kinds" of scalars distinguished in determining scalar-coercion rules. This - variable can take on the values :c:data:`NPY_{KIND}` where ``{KIND}`` can be + variable can take on the values: + + .. c:var:: NPY_NOSCALAR + + .. c:var:: NPY_BOOL_SCALAR + + .. c:var:: NPY_INTPOS_SCALAR + + .. c:var:: NPY_INTNEG_SCALAR + + .. c:var:: NPY_FLOAT_SCALAR + + .. c:var:: NPY_COMPLEX_SCALAR - **NOSCALAR**, **BOOL_SCALAR**, **INTPOS_SCALAR**, - **INTNEG_SCALAR**, **FLOAT_SCALAR**, **COMPLEX_SCALAR**, - **OBJECT_SCALAR** + .. c:var:: NPY_OBJECT_SCALAR .. c:var:: NPY_NSCALARKINDS diff --git a/doc/source/reference/c-api/ufunc.rst b/doc/source/reference/c-api/ufunc.rst index 92a679510..c9cc60141 100644 --- a/doc/source/reference/c-api/ufunc.rst +++ b/doc/source/reference/c-api/ufunc.rst @@ -198,10 +198,10 @@ Functions to calling PyUFunc_FromFuncAndData. A copy of the string is made, so the passed in buffer can be freed. -.. c:function:: PyObject* PyUFunc_FromFuncAndDataAndSignatureAndIdentity( +.. c:function:: PyObject* PyUFunc_FromFuncAndDataAndSignatureAndIdentity( \ PyUFuncGenericFunction *func, void **data, char *types, int ntypes, \ - int nin, int nout, int identity, char *name, char *doc, int unused, char *signature, - PyObject *identity_value) + int nin, int nout, int identity, char *name, char *doc, int unused, \ + char *signature, PyObject *identity_value) This function is very similar to `PyUFunc_FromFuncAndDataAndSignature` above, but has an extra *identity_value* argument, to define an arbitrary identity diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst index 46e5ec25e..a22db3e8e 100644 --- a/doc/source/reference/distutils.rst +++ b/doc/source/reference/distutils.rst @@ -22,38 +22,30 @@ information is available in the :ref:`distutils-user-guide`. Modules in :mod:`numpy.distutils` ================================= +.. toctree:: + :maxdepth: 2 -misc_util ---------- + distutils/misc_util -.. module:: numpy.distutils.misc_util + +.. currentmodule:: numpy.distutils .. autosummary:: :toctree: generated/ - get_numpy_include_dirs - dict_append - appendpath - allpath - dot_join - generate_config_py - get_cmd - terminal_has_colors - red_text - green_text - yellow_text - blue_text - cyan_text - cyg2win32 - all_strings - has_f_sources - has_cxx_sources - filter_sources - get_dependencies - is_local_src_dir - get_ext_source_files - get_script_files + ccompiler + cpuinfo.cpu + core.Extension + exec_command + log.set_verbosity + system_info.get_info + system_info.get_standard_file + + +Configuration class +=================== +.. currentmodule:: numpy.distutils.misc_util .. class:: Configuration(package_name=None, parent_name=None, top_path=None, package_path=None, **attrs) @@ -109,20 +101,6 @@ misc_util .. automethod:: get_info -Other modules -------------- - -.. currentmodule:: numpy.distutils - -.. autosummary:: - :toctree: generated/ - - system_info.get_info - system_info.get_standard_file - cpuinfo.cpu - log.set_verbosity - exec_command - Building Installable C libraries ================================ diff --git a/doc/source/reference/distutils/misc_util.rst b/doc/source/reference/distutils/misc_util.rst new file mode 100644 index 000000000..bbb83a5ab --- /dev/null +++ b/doc/source/reference/distutils/misc_util.rst @@ -0,0 +1,7 @@ +distutils.misc_util +=================== + +.. automodule:: numpy.distutils.misc_util + :members: + :undoc-members: + :exclude-members: Configuration diff --git a/doc/source/reference/maskedarray.baseclass.rst b/doc/source/reference/maskedarray.baseclass.rst index 204ebfe08..5bbdd0299 100644 --- a/doc/source/reference/maskedarray.baseclass.rst +++ b/doc/source/reference/maskedarray.baseclass.rst @@ -160,9 +160,9 @@ replaced with ``n`` integers which will be interpreted as an n-tuple. Item selection and manipulation ------------------------------- -For array methods that take an *axis* keyword, it defaults to `None`. -If axis is *None*, then the array is treated as a 1-D array. -Any other value for *axis* represents the dimension along which +For array methods that take an ``axis`` keyword, it defaults to None. +If axis is None, then the array is treated as a 1-D array. +Any other value for ``axis`` represents the dimension along which the operation should proceed. .. autosummary:: diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst index 7375d60fb..41c3ee564 100644 --- a/doc/source/reference/maskedarray.generic.rst +++ b/doc/source/reference/maskedarray.generic.rst @@ -74,7 +74,7 @@ To create an array with the second element invalid, we would do:: To create a masked array where all values close to 1.e20 are invalid, we would do:: - >>> z = masked_values([1.0, 1.e20, 3.0, 4.0], 1.e20) + >>> z = ma.masked_values([1.0, 1.e20, 3.0, 4.0], 1.e20) For a complete discussion of creation methods for masked arrays please see section :ref:`Constructing masked arrays <maskedarray.generic.constructing>`. @@ -110,15 +110,15 @@ There are several ways to construct a masked array. >>> x = np.array([1, 2, 3]) >>> x.view(ma.MaskedArray) - masked_array(data = [1 2 3], - mask = False, - fill_value = 999999) + masked_array(data=[1, 2, 3], + mask=False, + fill_value=999999) >>> x = np.array([(1, 1.), (2, 2.)], dtype=[('a',int), ('b', float)]) >>> x.view(ma.MaskedArray) - masked_array(data = [(1, 1.0) (2, 2.0)], - mask = [(False, False) (False, False)], - fill_value = (999999, 1e+20), - dtype = [('a', '<i4'), ('b', '<f8')]) + masked_array(data=[(1, 1.0), (2, 2.0)], + mask=[(False, False), (False, False)], + fill_value=(999999, 1.e+20), + dtype=[('a', '<i8'), ('b', '<f8')]) * Yet another possibility is to use any of the following functions: @@ -195,9 +195,9 @@ index. The inverse of the mask can be calculated with the >>> x = ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) >>> x[~x.mask] - masked_array(data = [1 4], - mask = [False False], - fill_value = 999999) + masked_array(data=[1, 4], + mask=[False, False], + fill_value=999999) Another way to retrieve the valid data is to use the :meth:`compressed` method, which returns a one-dimensional :class:`~numpy.ndarray` (or one of its @@ -223,27 +223,26 @@ as invalid is to assign the special value :attr:`masked` to them:: >>> x = ma.array([1, 2, 3]) >>> x[0] = ma.masked >>> x - masked_array(data = [-- 2 3], - mask = [ True False False], - fill_value = 999999) + masked_array(data=[--, 2, 3], + mask=[ True, False, False], + fill_value=999999) >>> y = ma.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y[(0, 1, 2), (1, 2, 0)] = ma.masked >>> y - masked_array(data = - [[1 -- 3] - [4 5 --] - [-- 8 9]], - mask = - [[False True False] - [False False True] - [ True False False]], - fill_value = 999999) + masked_array( + data=[[1, --, 3], + [4, 5, --], + [--, 8, 9]], + mask=[[False, True, False], + [False, False, True], + [ True, False, False]], + fill_value=999999) >>> z = ma.array([1, 2, 3, 4]) >>> z[:-2] = ma.masked >>> z - masked_array(data = [-- -- 3 4], - mask = [ True True False False], - fill_value = 999999) + masked_array(data=[--, --, 3, 4], + mask=[ True, True, False, False], + fill_value=999999) A second possibility is to modify the :attr:`~MaskedArray.mask` directly, @@ -263,9 +262,10 @@ mask:: >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x.mask = True >>> x - masked_array(data = [-- -- --], - mask = [ True True True], - fill_value = 999999) + masked_array(data=[--, --, --], + mask=[ True, True, True], + fill_value=999999, + dtype=int64) Finally, specific entries can be masked and/or unmasked by assigning to the mask a sequence of booleans:: @@ -273,9 +273,9 @@ mask a sequence of booleans:: >>> x = ma.array([1, 2, 3]) >>> x.mask = [0, 1, 0] >>> x - masked_array(data = [1 -- 3], - mask = [False True False], - fill_value = 999999) + masked_array(data=[1, --, 3], + mask=[False, True, False], + fill_value=999999) Unmasking an entry ~~~~~~~~~~~~~~~~~~ @@ -285,14 +285,14 @@ new valid values to them:: >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x - masked_array(data = [1 2 --], - mask = [False False True], - fill_value = 999999) + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) >>> x[-1] = 5 >>> x - masked_array(data = [1 2 5], - mask = [False False False], - fill_value = 999999) + masked_array(data=[1, 2, 5], + mask=[False, False, False], + fill_value=999999) .. note:: Unmasking an entry by direct assignment will silently fail if the masked @@ -304,21 +304,27 @@ new valid values to them:: >>> x = ma.array([1, 2, 3], mask=[0, 0, 1], hard_mask=True) >>> x - masked_array(data = [1 2 --], - mask = [False False True], - fill_value = 999999) + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) >>> x[-1] = 5 >>> x - masked_array(data = [1 2 --], - mask = [False False True], - fill_value = 999999) + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) >>> x.soften_mask() + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) >>> x[-1] = 5 >>> x - masked_array(data = [1 2 5], - mask = [False False False], - fill_value = 999999) + masked_array(data=[1, 2, 5], + mask=[False, False, False], + fill_value=999999) >>> x.harden_mask() + masked_array(data=[1, 2, 5], + mask=[False, False, False], + fill_value=999999) To unmask all masked entries of a masked array (provided the mask isn't a hard @@ -327,15 +333,14 @@ mask:: >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x - masked_array(data = [1 2 --], - mask = [False False True], - fill_value = 999999) + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) >>> x.mask = ma.nomask >>> x - masked_array(data = [1 2 3], - mask = [False False False], - fill_value = 999999) - + masked_array(data=[1, 2, 3], + mask=[False, False, False], + fill_value=999999) Indexing and slicing @@ -353,9 +358,7 @@ the mask is ``True``):: >>> x[0] 1 >>> x[-1] - masked_array(data = --, - mask = True, - fill_value = 1e+20) + masked >>> x[-1] is ma.masked True @@ -370,10 +373,7 @@ is masked. >>> y[0] (1, 2) >>> y[-1] - masked_array(data = (3, --), - mask = (False, True), - fill_value = (999999, 999999), - dtype = [('a', '<i4'), ('b', '<i4')]) + (3, --) When accessing a slice, the output is a masked array whose @@ -385,20 +385,19 @@ required to ensure propagation of any modification of the mask to the original. >>> x = ma.array([1, 2, 3, 4, 5], mask=[0, 1, 0, 0, 1]) >>> mx = x[:3] >>> mx - masked_array(data = [1 -- 3], - mask = [False True False], - fill_value = 999999) + masked_array(data=[1, --, 3], + mask=[False, True, False], + fill_value=999999) >>> mx[1] = -1 >>> mx - masked_array(data = [1 -1 3], - mask = [False False False], - fill_value = 999999) + masked_array(data=[1, -1, 3], + mask=[False, False, False], + fill_value=999999) >>> x.mask - array([False, True, False, False, True]) + array([False, False, False, False, True]) >>> x.data array([ 1, -1, 3, 4, 5]) - Accessing a field of a masked array with structured datatype returns a :class:`MaskedArray`. @@ -421,9 +420,9 @@ ufuncs. Unary and binary functions that have a validity domain (such as constant whenever the input is masked or falls outside the validity domain:: >>> ma.log([-1, 0, 1, 2]) - masked_array(data = [-- -- 0.0 0.69314718056], - mask = [ True True False False], - fill_value = 1e+20) + masked_array(data=[--, --, 0.0, 0.6931471805599453], + mask=[ True, True, False, False], + fill_value=1e+20) Masked arrays also support standard numpy ufuncs. The output is then a masked array. The result of a unary ufunc is masked wherever the input is masked. The @@ -435,10 +434,9 @@ input fall outside the validity domain:: >>> x = ma.array([-1, 1, 0, 2, 3], mask=[0, 0, 0, 0, 1]) >>> np.log(x) - masked_array(data = [-- -- 0.0 0.69314718056 --], - mask = [ True True False False True], - fill_value = 1e+20) - + masked_array(data=[--, 0.0, --, 0.6931471805599453, --], + mask=[ True, False, True, False, True], + fill_value=1e+20) Examples @@ -454,11 +452,11 @@ of anomalies (deviations from the average):: >>> import numpy.ma as ma >>> x = [0.,1.,-9999.,3.,4.] >>> mx = ma.masked_values (x, -9999.) - >>> print mx.mean() + >>> print(mx.mean()) 2.0 - >>> print mx - mx.mean() + >>> print(mx - mx.mean()) [-2.0 -1.0 -- 1.0 2.0] - >>> print mx.anom() + >>> print(mx.anom()) [-2.0 -1.0 -- 1.0 2.0] @@ -468,7 +466,7 @@ Filling in the missing data Suppose now that we wish to print that same data, but with the missing values replaced by the average value. - >>> print mx.filled(mx.mean()) + >>> print(mx.filled(mx.mean())) [ 0. 1. 2. 3. 4.] @@ -478,10 +476,10 @@ Numerical operations Numerical operations can be easily performed without worrying about missing values, dividing by zero, square roots of negative numbers, etc.:: - >>> import numpy as np, numpy.ma as ma + >>> import numpy.ma as ma >>> x = ma.array([1., -1., 3., 4., 5., 6.], mask=[0,0,0,0,1,0]) >>> y = ma.array([1., 2., 0., 4., 5., 6.], mask=[0,0,0,0,0,1]) - >>> print np.sqrt(x/y) + >>> print(ma.sqrt(x/y)) [1.0 -- -- 1.0 -- --] Four values of the output are invalid: the first one comes from taking the @@ -492,8 +490,10 @@ the last two where the inputs were masked. Ignoring extreme values ----------------------- -Let's consider an array ``d`` of random floats between 0 and 1. We wish to +Let's consider an array ``d`` of floats between 0 and 1. We wish to compute the average of the values of ``d`` while ignoring any data outside -the range ``[0.1, 0.9]``:: +the range ``[0.2, 0.9]``:: - >>> print ma.masked_outside(d, 0.1, 0.9).mean() + >>> d = np.linspace(0, 1, 20) + >>> print(d.mean() - ma.masked_outside(d, 0.2, 0.9).mean()) + -0.05263157894736836 diff --git a/doc/source/reference/random/bit_generators/bitgenerators.rst b/doc/source/reference/random/bit_generators/bitgenerators.rst deleted file mode 100644 index 1474f7dac..000000000 --- a/doc/source/reference/random/bit_generators/bitgenerators.rst +++ /dev/null @@ -1,11 +0,0 @@ -:orphan: - -BitGenerator ------------- - -.. currentmodule:: numpy.random.bit_generator - -.. autosummary:: - :toctree: generated/ - - BitGenerator diff --git a/doc/source/reference/random/bit_generators/index.rst b/doc/source/reference/random/bit_generators/index.rst index 35d9e5d09..94d3d8a3c 100644 --- a/doc/source/reference/random/bit_generators/index.rst +++ b/doc/source/reference/random/bit_generators/index.rst @@ -1,5 +1,3 @@ -.. _bit_generator: - .. currentmodule:: numpy.random Bit Generators @@ -35,14 +33,18 @@ The included BitGenerators are: .. _`Random123`: https://www.deshawresearch.com/resources_random123.html .. _`SFC author's page`: http://pracrand.sourceforge.net/RNG_engines.txt +.. autosummary:: + :toctree: generated/ + + BitGenerator + .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - BitGenerator <bitgenerators> - MT19937 <mt19937> - PCG64 <pcg64> - Philox <philox> - SFC64 <sfc64> + MT19937 <mt19937> + PCG64 <pcg64> + Philox <philox> + SFC64 <sfc64> Seeding and Entropy ------------------- @@ -53,14 +55,14 @@ seed. All of the provided BitGenerators will take an arbitrary-sized non-negative integer, or a list of such integers, as a seed. BitGenerators need to take those inputs and process them into a high-quality internal state for the BitGenerator. All of the BitGenerators in numpy delegate that task to -`~SeedSequence`, which uses hashing techniques to ensure that even low-quality +`SeedSequence`, which uses hashing techniques to ensure that even low-quality seeds generate high-quality initial states. .. code-block:: python - from numpy.random import PCG64 + from numpy.random import PCG64 - bg = PCG64(12345678903141592653589793) + bg = PCG64(12345678903141592653589793) .. end_block @@ -75,14 +77,14 @@ user, which is up to you. .. code-block:: python - from numpy.random import PCG64, SeedSequence + from numpy.random import PCG64, SeedSequence - # Get the user's seed somehow, maybe through `argparse`. - # If the user did not provide a seed, it should return `None`. - seed = get_user_seed() - ss = SeedSequence(seed) - print('seed = {}'.format(ss.entropy)) - bg = PCG64(ss) + # Get the user's seed somehow, maybe through `argparse`. + # If the user did not provide a seed, it should return `None`. + seed = get_user_seed() + ss = SeedSequence(seed) + print('seed = {}'.format(ss.entropy)) + bg = PCG64(ss) .. end_block @@ -104,9 +106,6 @@ or using ``secrets.randbits(128)`` from the standard library are both convenient ways. .. autosummary:: - :toctree: generated/ + :toctree: generated/ SeedSequence - bit_generator.ISeedSequence - bit_generator.ISpawnableSeedSequence - bit_generator.SeedlessSeedSequence diff --git a/doc/source/reference/random/bit_generators/mt19937.rst b/doc/source/reference/random/bit_generators/mt19937.rst index 25ba1d7b5..71875db4e 100644 --- a/doc/source/reference/random/bit_generators/mt19937.rst +++ b/doc/source/reference/random/bit_generators/mt19937.rst @@ -1,9 +1,7 @@ -Mersenne Twister (MT19937) +Mersenne Twister (MT19937) -------------------------- -.. module:: numpy.random.mt19937 - -.. currentmodule:: numpy.random.mt19937 +.. currentmodule:: numpy.random .. autoclass:: MT19937 :exclude-members: diff --git a/doc/source/reference/random/bit_generators/pcg64.rst b/doc/source/reference/random/bit_generators/pcg64.rst index 7aef1e0dd..5881b7008 100644 --- a/doc/source/reference/random/bit_generators/pcg64.rst +++ b/doc/source/reference/random/bit_generators/pcg64.rst @@ -1,9 +1,7 @@ Parallel Congruent Generator (64-bit, PCG64) -------------------------------------------- -.. module:: numpy.random.pcg64 - -.. currentmodule:: numpy.random.pcg64 +.. currentmodule:: numpy.random .. autoclass:: PCG64 :exclude-members: diff --git a/doc/source/reference/random/bit_generators/philox.rst b/doc/source/reference/random/bit_generators/philox.rst index 5e581e094..8eba2d351 100644 --- a/doc/source/reference/random/bit_generators/philox.rst +++ b/doc/source/reference/random/bit_generators/philox.rst @@ -1,9 +1,7 @@ Philox Counter-based RNG ------------------------ -.. module:: numpy.random.philox - -.. currentmodule:: numpy.random.philox +.. currentmodule:: numpy.random .. autoclass:: Philox :exclude-members: diff --git a/doc/source/reference/random/bit_generators/sfc64.rst b/doc/source/reference/random/bit_generators/sfc64.rst index dc03820ae..d34124a33 100644 --- a/doc/source/reference/random/bit_generators/sfc64.rst +++ b/doc/source/reference/random/bit_generators/sfc64.rst @@ -1,9 +1,7 @@ SFC64 Small Fast Chaotic PRNG ----------------------------- -.. module:: numpy.random.sfc64 - -.. currentmodule:: numpy.random.sfc64 +.. currentmodule:: numpy.random .. autoclass:: SFC64 :exclude-members: diff --git a/doc/source/reference/random/entropy.rst b/doc/source/reference/random/entropy.rst deleted file mode 100644 index 0664da6f9..000000000 --- a/doc/source/reference/random/entropy.rst +++ /dev/null @@ -1,6 +0,0 @@ -System Entropy -============== - -.. module:: numpy.random.entropy - -.. autofunction:: random_entropy diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst index 068143270..a2cbb493a 100644 --- a/doc/source/reference/random/generator.rst +++ b/doc/source/reference/random/generator.rst @@ -62,6 +62,7 @@ Distributions ~numpy.random.Generator.lognormal ~numpy.random.Generator.logseries ~numpy.random.Generator.multinomial + ~numpy.random.Generator.multivariate_hypergeometric ~numpy.random.Generator.multivariate_normal ~numpy.random.Generator.negative_binomial ~numpy.random.Generator.noncentral_chisquare diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index 01f9981a2..9b19620d8 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -123,7 +123,7 @@ The `Generator` is the user-facing object that is nearly identical to rg.random() One can also instantiate `Generator` directly with a `BitGenerator` instance. -To use the older `~mt19937.MT19937` algorithm, one can instantiate it directly +To use the older `MT19937` algorithm, one can instantiate it directly and pass it to `Generator`. .. code-block:: python @@ -151,9 +151,6 @@ What's New or Different select distributions * Optional ``out`` argument that allows existing arrays to be filled for select distributions -* `~entropy.random_entropy` provides access to the system - source of randomness that is used in cryptographic applications (e.g., - ``/dev/urandom`` on Unix). * All BitGenerators can produce doubles, uint64s and uint32s via CTypes (`~.PCG64.ctypes`) and CFFI (`~.PCG64.cffi`). This allows the bit generators to be used in numba. @@ -190,7 +187,7 @@ Concepts :maxdepth: 1 generator - legacy mtrand <legacy> + Legacy Generator (RandomState) <legacy> BitGenerators, SeedSequences <bit_generators/index> Features @@ -203,7 +200,6 @@ Features new-or-different Comparing Performance <performance> extending - Reading System Entropy <entropy> Original Source ~~~~~~~~~~~~~~~ diff --git a/doc/source/reference/random/legacy.rst b/doc/source/reference/random/legacy.rst index 04d4d3569..413a42727 100644 --- a/doc/source/reference/random/legacy.rst +++ b/doc/source/reference/random/legacy.rst @@ -4,7 +4,7 @@ Legacy Random Generation ------------------------ -The `~mtrand.RandomState` provides access to +The `RandomState` provides access to legacy generators. This generator is considered frozen and will have no further improvements. It is guaranteed to produce the same values as the final point release of NumPy v1.16. These all depend on Box-Muller @@ -12,19 +12,19 @@ normals or inverse CDF exponentials or gammas. This class should only be used if it is essential to have randoms that are identical to what would have been produced by previous versions of NumPy. -`~mtrand.RandomState` adds additional information +`RandomState` adds additional information to the state which is required when using Box-Muller normals since these are produced in pairs. It is important to use -`~mtrand.RandomState.get_state`, and not the underlying bit generators +`RandomState.get_state`, and not the underlying bit generators `state`, when accessing the state so that these extra values are saved. -Although we provide the `~mt19937.MT19937` BitGenerator for use independent of -`~mtrand.RandomState`, note that its default seeding uses `~SeedSequence` -rather than the legacy seeding algorithm. `~mtrand.RandomState` will use the +Although we provide the `MT19937` BitGenerator for use independent of +`RandomState`, note that its default seeding uses `SeedSequence` +rather than the legacy seeding algorithm. `RandomState` will use the legacy seeding algorithm. The methods to use the legacy seeding algorithm are currently private as the main reason to use them is just to implement -`~mtrand.RandomState`. However, one can reset the state of `~mt19937.MT19937` -using the state of the `~mtrand.RandomState`: +`RandomState`. However, one can reset the state of `MT19937` +using the state of the `RandomState`: .. code-block:: python @@ -47,8 +47,6 @@ using the state of the `~mtrand.RandomState`: rs2.standard_exponential() -.. currentmodule:: numpy.random.mtrand - .. autoclass:: RandomState :exclude-members: diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst index 5442f46c9..b3bddb443 100644 --- a/doc/source/reference/random/new-or-different.rst +++ b/doc/source/reference/random/new-or-different.rst @@ -10,9 +10,10 @@ What's New or Different The Box-Muller method used to produce NumPy's normals is no longer available in `Generator`. It is not possible to reproduce the exact random values using ``Generator`` for the normal distribution or any other - distribution that relies on the normal such as the `gamma` or - `standard_t`. If you require bitwise backward compatible - streams, use `RandomState`. + distribution that relies on the normal such as the `Generator.gamma` or + `Generator.standard_t`. If you require bitwise backward compatible + streams, use `RandomState`, i.e., `RandomState.gamma` or + `RandomState.standard_t`. Quick comparison of legacy `mtrand <legacy>`_ to the new `Generator` @@ -20,9 +21,9 @@ Quick comparison of legacy `mtrand <legacy>`_ to the new `Generator` Feature Older Equivalent Notes ------------------ -------------------- ------------- `~.Generator` `~.RandomState` ``Generator`` requires a stream - source, called a `BitGenerator - <bit_generators>` A number of these - are provided. ``RandomState`` uses + source, called a `BitGenerator` + A number of these are provided. + ``RandomState`` uses the Mersenne Twister `~.MT19937` by default, but can also be instantiated with any BitGenerator. @@ -45,9 +46,6 @@ Feature Older Equivalent Notes And in more detail: -* `~.entropy.random_entropy` provides access to the system - source of randomness that is used in cryptographic applications (e.g., - ``/dev/urandom`` on Unix). * Simulate from the complex normal distribution (`~.Generator.complex_normal`) * The normal, exponential and gamma generators use 256-step Ziggurat diff --git a/doc/source/reference/random/parallel.rst b/doc/source/reference/random/parallel.rst index 2f79f22d8..721584014 100644 --- a/doc/source/reference/random/parallel.rst +++ b/doc/source/reference/random/parallel.rst @@ -18,10 +18,10 @@ a `~BitGenerator`. It uses hashing techniques to ensure that low-quality seeds are turned into high quality initial states (at least, with very high probability). -For example, `~mt19937.MT19937` has a state consisting of 624 +For example, `MT19937` has a state consisting of 624 `uint32` integers. A naive way to take a 32-bit integer seed would be to just set the last element of the state to the 32-bit seed and leave the rest 0s. This is -a valid state for `~mt19937.MT19937`, but not a good one. The Mersenne Twister +a valid state for `MT19937`, but not a good one. The Mersenne Twister algorithm `suffers if there are too many 0s`_. Similarly, two adjacent 32-bit integer seeds (i.e. ``12345`` and ``12346``) would produce very similar streams. @@ -91,15 +91,15 @@ territory ([2]_). .. [2] In this calculation, we can ignore the amount of numbers drawn from each stream. Each of the PRNGs we provide has some extra protection built in that avoids overlaps if the `~SeedSequence` pools differ in the - slightest bit. `~pcg64.PCG64` has :math:`2^{127}` separate cycles + slightest bit. `PCG64` has :math:`2^{127}` separate cycles determined by the seed in addition to the position in the :math:`2^{128}` long period for each cycle, so one has to both get on or near the same cycle *and* seed a nearby position in the cycle. - `~philox.Philox` has completely independent cycles determined by the seed. - `~sfc64.SFC64` incorporates a 64-bit counter so every unique seed is at + `Philox` has completely independent cycles determined by the seed. + `SFC64` incorporates a 64-bit counter so every unique seed is at least :math:`2^{64}` iterations away from any other seed. And - finally, `~mt19937.MT19937` has just an unimaginably huge period. Getting - a collision internal to `~SeedSequence` is the way a failure would be + finally, `MT19937` has just an unimaginably huge period. Getting + a collision internal to `SeedSequence` is the way a failure would be observed. .. _`implements an algorithm`: http://www.pcg-random.org/posts/developing-a-seed_seq-alternative.html @@ -113,10 +113,10 @@ territory ([2]_). Independent Streams ------------------- -:class:`~philox.Philox` is a counter-based RNG based which generates values by +`Philox` is a counter-based RNG based which generates values by encrypting an incrementing counter using weak cryptographic primitives. The seed determines the key that is used for the encryption. Unique keys create -unique, independent streams. :class:`~philox.Philox` lets you bypass the +unique, independent streams. `Philox` lets you bypass the seeding algorithm to directly set the 128-bit key. Similar, but different, keys will still create independent streams. diff --git a/doc/source/reference/random/performance.rst b/doc/source/reference/random/performance.rst index 2d5fca496..d70dd064a 100644 --- a/doc/source/reference/random/performance.rst +++ b/doc/source/reference/random/performance.rst @@ -5,21 +5,21 @@ Performance Recommendation ************** -The recommended generator for general use is :class:`~pcg64.PCG64`. It is +The recommended generator for general use is `PCG64`. It is statistically high quality, full-featured, and fast on most platforms, but somewhat slow when compiled for 32-bit processes. -:class:`~philox.Philox` is fairly slow, but its statistical properties have +`Philox` is fairly slow, but its statistical properties have very high quality, and it is easy to get assuredly-independent stream by using unique keys. If that is the style you wish to use for parallel streams, or you are porting from another system that uses that style, then -:class:`~philox.Philox` is your choice. +`Philox` is your choice. -:class:`~sfc64.SFC64` is statistically high quality and very fast. However, it +`SFC64` is statistically high quality and very fast. However, it lacks jumpability. If you are not using that capability and want lots of speed, even on 32-bit processes, this is your choice. -:class:`~mt19937.MT19937` `fails some statistical tests`_ and is not especially +`MT19937` `fails some statistical tests`_ and is not especially fast compared to modern PRNGs. For these reasons, we mostly do not recommend using it on its own, only through the legacy `~.RandomState` for reproducing old results. That said, it has a very long history as a default in @@ -31,20 +31,20 @@ Timings ******* The timings below are the time in ns to produce 1 random value from a -specific distribution. The original :class:`~mt19937.MT19937` generator is +specific distribution. The original `MT19937` generator is much slower since it requires 2 32-bit values to equal the output of the faster generators. Integer performance has a similar ordering. The pattern is similar for other, more complex generators. The normal -performance of the legacy :class:`~.RandomState` generator is much +performance of the legacy `RandomState` generator is much lower than the other since it uses the Box-Muller transformation rather than the Ziggurat generator. The performance gap for Exponentials is also large due to the cost of computing the log function to invert the CDF. The column labeled MT19973 is used the same 32-bit generator as -:class:`~.RandomState` but produces random values using -:class:`~Generator`. +`RandomState` but produces random values using +`Generator`. .. csv-table:: :header: ,MT19937,PCG64,Philox,SFC64,RandomState @@ -61,7 +61,7 @@ The column labeled MT19973 is used the same 32-bit generator as Poissons,67.6,52.4,69.2,46.4,78.1 The next table presents the performance in percentage relative to values -generated by the legacy generator, `RandomState(MT19937())`. The overall +generated by the legacy generator, ``RandomState(MT19937())``. The overall performance was computed using a geometric mean. .. csv-table:: diff --git a/doc/source/reference/routines.array-manipulation.rst b/doc/source/reference/routines.array-manipulation.rst index cc93d1029..bf43232ef 100644 --- a/doc/source/reference/routines.array-manipulation.rst +++ b/doc/source/reference/routines.array-manipulation.rst @@ -9,6 +9,7 @@ Basic operations :toctree: generated/ copyto + shape Changing array shape ==================== diff --git a/doc/source/reference/routines.testing.rst b/doc/source/reference/routines.testing.rst index c676dec07..98ce3f377 100644 --- a/doc/source/reference/routines.testing.rst +++ b/doc/source/reference/routines.testing.rst @@ -37,11 +37,11 @@ Decorators .. autosummary:: :toctree: generated/ - decorators.deprecated - decorators.knownfailureif - decorators.setastest - decorators.skipif - decorators.slow + dec.deprecated + dec.knownfailureif + dec.setastest + dec.skipif + dec.slow decorate_methods Test Running diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index d00e88b34..0416d6efc 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -100,7 +100,7 @@ is true: - *d* acts like a (5,6) array where the single value is repeated. -.. _ufuncs.output-type: +.. _ufuncs-output-type: Output type determination ========================= @@ -228,46 +228,47 @@ can generate this table for your system with the code given in the Figure. .. admonition:: Figure - Code segment showing the "can cast safely" table for a 32-bit system. + Code segment showing the "can cast safely" table for a 64-bit system. + Generally the output depends on the system; your system might result in + a different table. + >>> mark = {False: ' -', True: ' Y'} >>> def print_table(ntypes): - ... print 'X', - ... for char in ntypes: print char, - ... print + ... print('X ' + ' '.join(ntypes)) ... for row in ntypes: - ... print row, + ... print(row, end='') ... for col in ntypes: - ... print int(np.can_cast(row, col)), - ... print + ... print(mark[np.can_cast(row, col)], end='') + ... print() + ... >>> print_table(np.typecodes['All']) X ? b h i l q p B H I L Q P e f d g F D G S U V O M m - ? 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - b 0 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0 - h 0 0 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0 - i 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0 - l 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0 - q 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0 - p 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0 - B 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 - H 0 0 0 1 1 1 1 0 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 0 - I 0 0 0 0 1 1 1 0 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0 - L 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0 - Q 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0 - P 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0 - e 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0 - f 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0 - d 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0 - g 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 1 1 1 1 0 0 - F 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 0 0 - D 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 - G 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0 - S 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 - U 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 - V 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 - O 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 - M 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 - m 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 - + ? Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y + b - Y Y Y Y Y Y - - - - - - Y Y Y Y Y Y Y Y Y Y Y - Y + h - - Y Y Y Y Y - - - - - - - Y Y Y Y Y Y Y Y Y Y - Y + i - - - Y Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y + l - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y + q - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y + p - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y + B - - Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y + H - - - Y Y Y Y - Y Y Y Y Y - Y Y Y Y Y Y Y Y Y Y - Y + I - - - - Y Y Y - - Y Y Y Y - - Y Y - Y Y Y Y Y Y - Y + L - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - Y + Q - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - Y + P - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - Y + e - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y Y - - + f - - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y - - + d - - - - - - - - - - - - - - - Y Y - Y Y Y Y Y Y - - + g - - - - - - - - - - - - - - - - Y - - Y Y Y Y Y - - + F - - - - - - - - - - - - - - - - - Y Y Y Y Y Y Y - - + D - - - - - - - - - - - - - - - - - - Y Y Y Y Y Y - - + G - - - - - - - - - - - - - - - - - - - Y Y Y Y Y - - + S - - - - - - - - - - - - - - - - - - - - Y Y Y Y - - + U - - - - - - - - - - - - - - - - - - - - - Y Y Y - - + V - - - - - - - - - - - - - - - - - - - - - - Y Y - - + O - - - - - - - - - - - - - - - - - - - - - - Y Y - - + M - - - - - - - - - - - - - - - - - - - - - - Y Y Y - + m - - - - - - - - - - - - - - - - - - - - - - Y Y - Y You should note that, while included in the table for completeness, the 'S', 'U', and 'V' types cannot be operated on by ufuncs. Also, @@ -319,7 +320,7 @@ advanced usage and will not typically be used. .. versionadded:: 1.10 The 'out' keyword argument is expected to be a tuple with one entry per - output (which can be `None` for arrays to be allocated by the ufunc). + output (which can be None for arrays to be allocated by the ufunc). For ufuncs with a single output, passing a single array (instead of a tuple holding a single array) is also valid. @@ -493,7 +494,7 @@ keyword, and an *out* keyword, and the arrays must all have dimension >= 1. The *axis* keyword specifies the axis of the array over which the reduction will take place (with negative values counting backwards). Generally, it is an integer, though for :meth:`ufunc.reduce`, it can also be a tuple of `int` to -reduce over several axes at once, or `None`, to reduce over all axes. +reduce over several axes at once, or None, to reduce over all axes. The *dtype* keyword allows you to manage a very common problem that arises when naively using :meth:`ufunc.reduce`. Sometimes you may have an array of a certain data type and wish to add up all of its diff --git a/doc/source/release.rst b/doc/source/release.rst index f8d83726f..3bfe81243 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -2,51 +2,59 @@ Release Notes ************* -.. include:: ../release/1.17.0-notes.rst -.. include:: ../release/1.16.4-notes.rst -.. include:: ../release/1.16.3-notes.rst -.. include:: ../release/1.16.2-notes.rst -.. include:: ../release/1.16.1-notes.rst -.. include:: ../release/1.16.0-notes.rst -.. include:: ../release/1.15.4-notes.rst -.. include:: ../release/1.15.3-notes.rst -.. include:: ../release/1.15.2-notes.rst -.. include:: ../release/1.15.1-notes.rst -.. include:: ../release/1.15.0-notes.rst -.. include:: ../release/1.14.6-notes.rst -.. include:: ../release/1.14.5-notes.rst -.. include:: ../release/1.14.4-notes.rst -.. include:: ../release/1.14.3-notes.rst -.. include:: ../release/1.14.2-notes.rst -.. include:: ../release/1.14.1-notes.rst -.. include:: ../release/1.14.0-notes.rst -.. include:: ../release/1.13.3-notes.rst -.. include:: ../release/1.13.2-notes.rst -.. include:: ../release/1.13.1-notes.rst -.. include:: ../release/1.13.0-notes.rst -.. include:: ../release/1.12.1-notes.rst -.. include:: ../release/1.12.0-notes.rst -.. include:: ../release/1.11.3-notes.rst -.. include:: ../release/1.11.2-notes.rst -.. include:: ../release/1.11.1-notes.rst -.. include:: ../release/1.11.0-notes.rst -.. include:: ../release/1.10.4-notes.rst -.. include:: ../release/1.10.3-notes.rst -.. include:: ../release/1.10.2-notes.rst -.. include:: ../release/1.10.1-notes.rst -.. include:: ../release/1.10.0-notes.rst -.. include:: ../release/1.9.2-notes.rst -.. include:: ../release/1.9.1-notes.rst -.. include:: ../release/1.9.0-notes.rst -.. include:: ../release/1.8.2-notes.rst -.. include:: ../release/1.8.1-notes.rst -.. include:: ../release/1.8.0-notes.rst -.. include:: ../release/1.7.2-notes.rst -.. include:: ../release/1.7.1-notes.rst -.. include:: ../release/1.7.0-notes.rst -.. include:: ../release/1.6.2-notes.rst -.. include:: ../release/1.6.1-notes.rst -.. include:: ../release/1.6.0-notes.rst -.. include:: ../release/1.5.0-notes.rst -.. include:: ../release/1.4.0-notes.rst -.. include:: ../release/1.3.0-notes.rst +.. toctree:: + :maxdepth: 3 + + 1.18.0 <release/1.18.0-notes> + 1.17.3 <release/1.17.3-notes> + 1.17.2 <release/1.17.2-notes> + 1.17.1 <release/1.17.1-notes> + 1.17.0 <release/1.17.0-notes> + 1.16.5 <release/1.16.5-notes> + 1.16.4 <release/1.16.4-notes> + 1.16.3 <release/1.16.3-notes> + 1.16.2 <release/1.16.2-notes> + 1.16.1 <release/1.16.1-notes> + 1.16.0 <release/1.16.0-notes> + 1.15.4 <release/1.15.4-notes> + 1.15.3 <release/1.15.3-notes> + 1.15.2 <release/1.15.2-notes> + 1.15.1 <release/1.15.1-notes> + 1.15.0 <release/1.15.0-notes> + 1.14.6 <release/1.14.6-notes> + 1.14.5 <release/1.14.5-notes> + 1.14.4 <release/1.14.4-notes> + 1.14.3 <release/1.14.3-notes> + 1.14.2 <release/1.14.2-notes> + 1.14.1 <release/1.14.1-notes> + 1.14.0 <release/1.14.0-notes> + 1.13.3 <release/1.13.3-notes> + 1.13.2 <release/1.13.2-notes> + 1.13.1 <release/1.13.1-notes> + 1.13.0 <release/1.13.0-notes> + 1.12.1 <release/1.12.1-notes> + 1.12.0 <release/1.12.0-notes> + 1.11.3 <release/1.11.3-notes> + 1.11.2 <release/1.11.2-notes> + 1.11.1 <release/1.11.1-notes> + 1.11.0 <release/1.11.0-notes> + 1.10.4 <release/1.10.4-notes> + 1.10.3 <release/1.10.3-notes> + 1.10.2 <release/1.10.2-notes> + 1.10.1 <release/1.10.1-notes> + 1.10.0 <release/1.10.0-notes> + 1.9.2 <release/1.9.2-notes> + 1.9.1 <release/1.9.1-notes> + 1.9.0 <release/1.9.0-notes> + 1.8.2 <release/1.8.2-notes> + 1.8.1 <release/1.8.1-notes> + 1.8.0 <release/1.8.0-notes> + 1.7.2 <release/1.7.2-notes> + 1.7.1 <release/1.7.1-notes> + 1.7.0 <release/1.7.0-notes> + 1.6.2 <release/1.6.2-notes> + 1.6.1 <release/1.6.1-notes> + 1.6.0 <release/1.6.0-notes> + 1.5.0 <release/1.5.0-notes> + 1.4.0 <release/1.4.0-notes> + 1.3.0 <release/1.3.0-notes> diff --git a/doc/release/1.10.0-notes.rst b/doc/source/release/1.10.0-notes.rst index 88062e463..88062e463 100644 --- a/doc/release/1.10.0-notes.rst +++ b/doc/source/release/1.10.0-notes.rst diff --git a/doc/release/1.10.1-notes.rst b/doc/source/release/1.10.1-notes.rst index 4e541d279..4e541d279 100644 --- a/doc/release/1.10.1-notes.rst +++ b/doc/source/release/1.10.1-notes.rst diff --git a/doc/release/1.10.2-notes.rst b/doc/source/release/1.10.2-notes.rst index 8c26b463c..8c26b463c 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/source/release/1.10.2-notes.rst diff --git a/doc/release/1.10.3-notes.rst b/doc/source/release/1.10.3-notes.rst index 0d4df4ce6..0d4df4ce6 100644 --- a/doc/release/1.10.3-notes.rst +++ b/doc/source/release/1.10.3-notes.rst diff --git a/doc/release/1.10.4-notes.rst b/doc/source/release/1.10.4-notes.rst index 481928ca7..481928ca7 100644 --- a/doc/release/1.10.4-notes.rst +++ b/doc/source/release/1.10.4-notes.rst diff --git a/doc/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst index 166502ac5..166502ac5 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/source/release/1.11.0-notes.rst diff --git a/doc/release/1.11.1-notes.rst b/doc/source/release/1.11.1-notes.rst index 6303c32f0..6303c32f0 100644 --- a/doc/release/1.11.1-notes.rst +++ b/doc/source/release/1.11.1-notes.rst diff --git a/doc/release/1.11.2-notes.rst b/doc/source/release/1.11.2-notes.rst index c954089d5..c954089d5 100644 --- a/doc/release/1.11.2-notes.rst +++ b/doc/source/release/1.11.2-notes.rst diff --git a/doc/release/1.11.3-notes.rst b/doc/source/release/1.11.3-notes.rst index 8381a97f7..8381a97f7 100644 --- a/doc/release/1.11.3-notes.rst +++ b/doc/source/release/1.11.3-notes.rst diff --git a/doc/release/1.12.0-notes.rst b/doc/source/release/1.12.0-notes.rst index 711055d16..711055d16 100644 --- a/doc/release/1.12.0-notes.rst +++ b/doc/source/release/1.12.0-notes.rst diff --git a/doc/release/1.12.1-notes.rst b/doc/source/release/1.12.1-notes.rst index f67dab108..f67dab108 100644 --- a/doc/release/1.12.1-notes.rst +++ b/doc/source/release/1.12.1-notes.rst diff --git a/doc/release/1.13.0-notes.rst b/doc/source/release/1.13.0-notes.rst index 3b719db09..3b719db09 100644 --- a/doc/release/1.13.0-notes.rst +++ b/doc/source/release/1.13.0-notes.rst diff --git a/doc/release/1.13.1-notes.rst b/doc/source/release/1.13.1-notes.rst index 88a4bc3dd..88a4bc3dd 100644 --- a/doc/release/1.13.1-notes.rst +++ b/doc/source/release/1.13.1-notes.rst diff --git a/doc/release/1.13.2-notes.rst b/doc/source/release/1.13.2-notes.rst index f2f9120f5..f2f9120f5 100644 --- a/doc/release/1.13.2-notes.rst +++ b/doc/source/release/1.13.2-notes.rst diff --git a/doc/release/1.13.3-notes.rst b/doc/source/release/1.13.3-notes.rst index 7f7170bcc..7f7170bcc 100644 --- a/doc/release/1.13.3-notes.rst +++ b/doc/source/release/1.13.3-notes.rst diff --git a/doc/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst index 462631de6..462631de6 100644 --- a/doc/release/1.14.0-notes.rst +++ b/doc/source/release/1.14.0-notes.rst diff --git a/doc/release/1.14.1-notes.rst b/doc/source/release/1.14.1-notes.rst index 7b95c2e28..7b95c2e28 100644 --- a/doc/release/1.14.1-notes.rst +++ b/doc/source/release/1.14.1-notes.rst diff --git a/doc/release/1.14.2-notes.rst b/doc/source/release/1.14.2-notes.rst index 3f47cb5f5..3f47cb5f5 100644 --- a/doc/release/1.14.2-notes.rst +++ b/doc/source/release/1.14.2-notes.rst diff --git a/doc/release/1.14.3-notes.rst b/doc/source/release/1.14.3-notes.rst index 60b631168..60b631168 100644 --- a/doc/release/1.14.3-notes.rst +++ b/doc/source/release/1.14.3-notes.rst diff --git a/doc/release/1.14.4-notes.rst b/doc/source/release/1.14.4-notes.rst index 3fb94383b..3fb94383b 100644 --- a/doc/release/1.14.4-notes.rst +++ b/doc/source/release/1.14.4-notes.rst diff --git a/doc/release/1.14.5-notes.rst b/doc/source/release/1.14.5-notes.rst index 9a97cc033..9a97cc033 100644 --- a/doc/release/1.14.5-notes.rst +++ b/doc/source/release/1.14.5-notes.rst diff --git a/doc/release/1.14.6-notes.rst b/doc/source/release/1.14.6-notes.rst index ac6a78272..ac6a78272 100644 --- a/doc/release/1.14.6-notes.rst +++ b/doc/source/release/1.14.6-notes.rst diff --git a/doc/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst index 7235ca915..7235ca915 100644 --- a/doc/release/1.15.0-notes.rst +++ b/doc/source/release/1.15.0-notes.rst diff --git a/doc/release/1.15.1-notes.rst b/doc/source/release/1.15.1-notes.rst index ddb83303c..ddb83303c 100644 --- a/doc/release/1.15.1-notes.rst +++ b/doc/source/release/1.15.1-notes.rst diff --git a/doc/release/1.15.2-notes.rst b/doc/source/release/1.15.2-notes.rst index a3e61fccd..a3e61fccd 100644 --- a/doc/release/1.15.2-notes.rst +++ b/doc/source/release/1.15.2-notes.rst diff --git a/doc/release/1.15.3-notes.rst b/doc/source/release/1.15.3-notes.rst index 753eecec9..753eecec9 100644 --- a/doc/release/1.15.3-notes.rst +++ b/doc/source/release/1.15.3-notes.rst diff --git a/doc/release/1.15.4-notes.rst b/doc/source/release/1.15.4-notes.rst index 033bd5828..033bd5828 100644 --- a/doc/release/1.15.4-notes.rst +++ b/doc/source/release/1.15.4-notes.rst diff --git a/doc/release/1.16.0-notes.rst b/doc/source/release/1.16.0-notes.rst index 1034d6e6c..1034d6e6c 100644 --- a/doc/release/1.16.0-notes.rst +++ b/doc/source/release/1.16.0-notes.rst diff --git a/doc/release/1.16.1-notes.rst b/doc/source/release/1.16.1-notes.rst index 2a190ef91..2a190ef91 100644 --- a/doc/release/1.16.1-notes.rst +++ b/doc/source/release/1.16.1-notes.rst diff --git a/doc/release/1.16.2-notes.rst b/doc/source/release/1.16.2-notes.rst index 62b90dc40..62b90dc40 100644 --- a/doc/release/1.16.2-notes.rst +++ b/doc/source/release/1.16.2-notes.rst diff --git a/doc/release/1.16.3-notes.rst b/doc/source/release/1.16.3-notes.rst index 181a7264d..181a7264d 100644 --- a/doc/release/1.16.3-notes.rst +++ b/doc/source/release/1.16.3-notes.rst diff --git a/doc/release/1.16.4-notes.rst b/doc/source/release/1.16.4-notes.rst index a236b05c8..a236b05c8 100644 --- a/doc/release/1.16.4-notes.rst +++ b/doc/source/release/1.16.4-notes.rst diff --git a/doc/source/release/1.16.5-notes.rst b/doc/source/release/1.16.5-notes.rst new file mode 100644 index 000000000..5b6eb585b --- /dev/null +++ b/doc/source/release/1.16.5-notes.rst @@ -0,0 +1,68 @@ +========================== +NumPy 1.16.5 Release Notes +========================== + +The NumPy 1.16.5 release fixes bugs reported against the 1.16.4 release, and +also backports several enhancements from master that seem appropriate for a +release series that is the last to support Python 2.7. The wheels on PyPI are +linked with OpenBLAS v0.3.7-dev, which should fix errors on Skylake series +cpus. + +Downstream developers building this release should use Cython >= 0.29.2 and, if +using OpenBLAS, OpenBLAS >= v0.3.7. The supported Python versions are 2.7 and +3.5-3.7. + + +Contributors +============ + +A total of 18 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alexander Shadchin +* Allan Haldane +* Bruce Merry + +* Charles Harris +* Colin Snyder + +* Dan Allan + +* Emile + +* Eric Wieser +* Grey Baker + +* Maksim Shabunin + +* Marten van Kerkwijk +* Matti Picus +* Peter Andreas Entschev + +* Ralf Gommers +* Richard Harris + +* Sebastian Berg +* Sergei Lebedev + +* Stephan Hoyer + +Pull requests merged +==================== + +A total of 23 pull requests were merged for this release. + +* `#13742 <https://github.com/numpy/numpy/pull/13742>`__: ENH: Add project URLs to setup.py +* `#13823 <https://github.com/numpy/numpy/pull/13823>`__: TEST, ENH: fix tests and ctypes code for PyPy +* `#13845 <https://github.com/numpy/numpy/pull/13845>`__: BUG: use npy_intp instead of int for indexing array +* `#13867 <https://github.com/numpy/numpy/pull/13867>`__: TST: Ignore DeprecationWarning during nose imports +* `#13905 <https://github.com/numpy/numpy/pull/13905>`__: BUG: Fix use-after-free in boolean indexing +* `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs +* `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns] +* `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation +* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject +* `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors. +* `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked. +* `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers +* `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher. +* `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7 +* `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API... +* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor +* `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level. +* `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__` +* `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7 +* `#14297 <https://github.com/numpy/numpy/pull/14297>`__: TST: Pin pytest version to 5.0.1. +* `#14322 <https://github.com/numpy/numpy/pull/14322>`__: ENH: Enable huge pages in all Linux builds +* `#14346 <https://github.com/numpy/numpy/pull/14346>`__: BUG: fix behavior of structured_to_unstructured on non-trivial... +* `#14382 <https://github.com/numpy/numpy/pull/14382>`__: REL: Prepare for the NumPy 1.16.5 release. diff --git a/doc/release/1.17.0-notes.rst b/doc/source/release/1.17.0-notes.rst index 8d69e36d9..a0e737982 100644 --- a/doc/release/1.17.0-notes.rst +++ b/doc/source/release/1.17.0-notes.rst @@ -239,7 +239,7 @@ New extensible `numpy.random` module with selectable random number generators ----------------------------------------------------------------------------- A new extensible `numpy.random` module along with four selectable random number generators and improved seeding designed for use in parallel processes has been -added. The currently available :ref:`Bit Generators <bit_generator>` are +added. The currently available `Bit Generators` are `~mt19937.MT19937`, `~pcg64.PCG64`, `~philox.Philox`, and `~sfc64.SFC64`. ``PCG64`` is the new default while ``MT19937`` is retained for backwards compatibility. Note that the legacy random module is unchanged and is now diff --git a/doc/source/release/1.17.1-notes.rst b/doc/source/release/1.17.1-notes.rst new file mode 100644 index 000000000..bd837ee5b --- /dev/null +++ b/doc/source/release/1.17.1-notes.rst @@ -0,0 +1,73 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.17.1 Release Notes +========================== + +This release contains a number of fixes for bugs reported against NumPy 1.17.0 +along with a few documentation and build improvements. The Python versions +supported are 3.5-3.7, note that Python 2.7 has been dropped. Python 3.8b3 +should work with the released source packages, but there are no future +guarantees. + +Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and +OpenBLAS >= 3.7 to avoid problems on the Skylake architecture. The NumPy wheels +on PyPI are built from the OpenBLAS development branch in order to avoid those +problems. + + +Contributors +============ + +A total of 17 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alexander Jung + +* Allan Haldane +* Charles Harris +* Eric Wieser +* Giuseppe Cuccu + +* Hiroyuki V. Yamazaki +* Jérémie du Boisberranger +* Kmol Yuan + +* Matti Picus +* Max Bolingbroke + +* Maxwell Aladago + +* Oleksandr Pavlyk +* Peter Andreas Entschev +* Sergei Lebedev +* Seth Troisi + +* Vladimir Pershin + +* Warren Weckesser + + +Pull requests merged +==================== + +A total of 24 pull requests were merged for this release. + +* `#14156 <https://github.com/numpy/numpy/pull/14156>`__: TST: Allow fuss in testing strided/non-strided exp/log loops +* `#14157 <https://github.com/numpy/numpy/pull/14157>`__: BUG: avx2_scalef_ps must be static +* `#14158 <https://github.com/numpy/numpy/pull/14158>`__: BUG: Remove stray print that causes a SystemError on python 3.7. +* `#14159 <https://github.com/numpy/numpy/pull/14159>`__: BUG: Fix DeprecationWarning in python 3.8. +* `#14160 <https://github.com/numpy/numpy/pull/14160>`__: BLD: Add missing gcd/lcm definitions to npy_math.h +* `#14161 <https://github.com/numpy/numpy/pull/14161>`__: DOC, BUILD: cleanups and fix (again) 'build dist' +* `#14166 <https://github.com/numpy/numpy/pull/14166>`__: TST: Add 3.8-dev to travisCI testing. +* `#14194 <https://github.com/numpy/numpy/pull/14194>`__: BUG: Remove the broken clip wrapper (Backport) +* `#14198 <https://github.com/numpy/numpy/pull/14198>`__: DOC: Fix hermitian argument docs in svd. +* `#14199 <https://github.com/numpy/numpy/pull/14199>`__: MAINT: Workaround for Intel compiler bug leading to failing test +* `#14200 <https://github.com/numpy/numpy/pull/14200>`__: TST: Clean up of test_pocketfft.py +* `#14201 <https://github.com/numpy/numpy/pull/14201>`__: BUG: Make advanced indexing result on read-only subclass writeable... +* `#14236 <https://github.com/numpy/numpy/pull/14236>`__: BUG: Fixed default BitGenerator name +* `#14237 <https://github.com/numpy/numpy/pull/14237>`__: ENH: add c-imported modules for freeze analysis in np.random +* `#14296 <https://github.com/numpy/numpy/pull/14296>`__: TST: Pin pytest version to 5.0.1 +* `#14301 <https://github.com/numpy/numpy/pull/14301>`__: BUG: Fix leak in the f2py-generated module init and `PyMem_Del`... +* `#14302 <https://github.com/numpy/numpy/pull/14302>`__: BUG: Fix formatting error in exception message +* `#14307 <https://github.com/numpy/numpy/pull/14307>`__: MAINT: random: Match type of SeedSequence.pool_size to DEFAULT_POOL_SIZE. +* `#14308 <https://github.com/numpy/numpy/pull/14308>`__: BUG: Fix numpy.random bug in platform detection +* `#14309 <https://github.com/numpy/numpy/pull/14309>`__: ENH: Enable huge pages in all Linux builds +* `#14330 <https://github.com/numpy/numpy/pull/14330>`__: BUG: Fix segfault in `random.permutation(x)` when x is a string. +* `#14338 <https://github.com/numpy/numpy/pull/14338>`__: BUG: don't fail when lexsorting some empty arrays (#14228) +* `#14339 <https://github.com/numpy/numpy/pull/14339>`__: BUG: Fix misuse of .names and .fields in various places (backport... +* `#14345 <https://github.com/numpy/numpy/pull/14345>`__: BUG: fix behavior of structured_to_unstructured on non-trivial... +* `#14350 <https://github.com/numpy/numpy/pull/14350>`__: REL: Prepare 1.17.1 release diff --git a/doc/source/release/1.17.2-notes.rst b/doc/source/release/1.17.2-notes.rst new file mode 100644 index 000000000..65cdaf903 --- /dev/null +++ b/doc/source/release/1.17.2-notes.rst @@ -0,0 +1,49 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.17.2 Release Notes +========================== + +This release contains fixes for bugs reported against NumPy 1.17.1 along with a +some documentation improvements. The most important fix is for lexsort when the +keys are of type (u)int8 or (u)int16. If you are currently using 1.17 you +should upgrade. + +The Python versions supported in this release are 3.5-3.7, Python 2.7 has been +dropped. Python 3.8b4 should work with the released source packages, but there +are no future guarantees. + +Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and +OpenBLAS >= 3.7 to avoid errors on the Skylake architecture. The NumPy wheels +on PyPI are built from the OpenBLAS development branch in order to avoid those +errors. + + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* CakeWithSteak + +* Charles Harris +* Dan Allan +* Hameer Abbasi +* Lars Grueter +* Matti Picus +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 8 pull requests were merged for this release. + +* `#14418 <https://github.com/numpy/numpy/pull/14418>`__: BUG: Fix aradixsort indirect indexing. +* `#14420 <https://github.com/numpy/numpy/pull/14420>`__: DOC: Fix a minor typo in dispatch documentation. +* `#14421 <https://github.com/numpy/numpy/pull/14421>`__: BUG: test, fix regression in converting to ctypes +* `#14430 <https://github.com/numpy/numpy/pull/14430>`__: BUG: Do not show Override module in private error classes. +* `#14432 <https://github.com/numpy/numpy/pull/14432>`__: BUG: Fixed maximum relative error reporting in assert_allclose. +* `#14433 <https://github.com/numpy/numpy/pull/14433>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative... +* `#14436 <https://github.com/numpy/numpy/pull/14436>`__: BUG: Update 1.17.x with 1.18.0-dev pocketfft.py. +* `#14446 <https://github.com/numpy/numpy/pull/14446>`__: REL: Prepare for NumPy 1.17.2 release. diff --git a/doc/source/release/1.17.3-notes.rst b/doc/source/release/1.17.3-notes.rst new file mode 100644 index 000000000..e33ca1917 --- /dev/null +++ b/doc/source/release/1.17.3-notes.rst @@ -0,0 +1,59 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.17.3 Release Notes +========================== + +This release contains fixes for bugs reported against NumPy 1.17.2 along with a +some documentation improvements. The Python versions supported in this release +are 3.5-3.8. + +Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and +OpenBLAS >= 3.7 to avoid errors on the Skylake architecture. + + +Highlights +========== + +- Wheels for Python 3.8 +- Boolean ``matmul`` fixed to use booleans instead of integers. + + +Compatibility notes +=================== + +- The seldom used ``PyArray_DescrCheck`` macro has been changed/fixed. + + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Allan Haldane +* Charles Harris +* Kevin Sheppard +* Matti Picus +* Ralf Gommers +* Sebastian Berg +* Warren Weckesser + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#14456 <https://github.com/numpy/numpy/pull/14456>`__: MAINT: clean up pocketfft modules inside numpy.fft namespace. +* `#14463 <https://github.com/numpy/numpy/pull/14463>`__: BUG: random.hypergeometic assumes npy_long is npy_int64, hung... +* `#14502 <https://github.com/numpy/numpy/pull/14502>`__: BUG: random: Revert gh-14458 and refix gh-14557. +* `#14504 <https://github.com/numpy/numpy/pull/14504>`__: BUG: add a specialized loop for boolean matmul. +* `#14506 <https://github.com/numpy/numpy/pull/14506>`__: MAINT: Update pytest version for Python 3.8 +* `#14512 <https://github.com/numpy/numpy/pull/14512>`__: DOC: random: fix doc linking, was referencing private submodules. +* `#14513 <https://github.com/numpy/numpy/pull/14513>`__: BUG,MAINT: Some fixes and minor cleanup based on clang analysis +* `#14515 <https://github.com/numpy/numpy/pull/14515>`__: BUG: Fix randint when range is 2**32 +* `#14519 <https://github.com/numpy/numpy/pull/14519>`__: MAINT: remove the entropy c-extension module +* `#14563 <https://github.com/numpy/numpy/pull/14563>`__: DOC: remove note about Pocketfft license file (non-existing here). +* `#14578 <https://github.com/numpy/numpy/pull/14578>`__: BUG: random: Create a legacy implementation of random.binomial. +* `#14687 <https://github.com/numpy/numpy/pull/14687>`__: BUG: properly define PyArray_DescrCheck diff --git a/doc/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst index 04bfdb7fc..e66540410 100644 --- a/doc/release/1.18.0-notes.rst +++ b/doc/source/release/1.18.0-notes.rst @@ -5,4 +5,4 @@ The ``numpy/doc/release/upcoming_changes/README.txt`` details how to add new release notes. For the work in progress release notes for the current development -version, see the `devdocs<https://numpy.org/devdocs/release.html>`. +version, see the `devdocs <https://numpy.org/devdocs/release.html>`__. diff --git a/doc/release/1.3.0-notes.rst b/doc/source/release/1.3.0-notes.rst index 239714246..239714246 100644 --- a/doc/release/1.3.0-notes.rst +++ b/doc/source/release/1.3.0-notes.rst diff --git a/doc/release/1.4.0-notes.rst b/doc/source/release/1.4.0-notes.rst index 9480a054e..9480a054e 100644 --- a/doc/release/1.4.0-notes.rst +++ b/doc/source/release/1.4.0-notes.rst diff --git a/doc/release/1.5.0-notes.rst b/doc/source/release/1.5.0-notes.rst index a2184ab13..a2184ab13 100644 --- a/doc/release/1.5.0-notes.rst +++ b/doc/source/release/1.5.0-notes.rst diff --git a/doc/release/1.6.0-notes.rst b/doc/source/release/1.6.0-notes.rst index c5f53a0eb..c5f53a0eb 100644 --- a/doc/release/1.6.0-notes.rst +++ b/doc/source/release/1.6.0-notes.rst diff --git a/doc/release/1.6.1-notes.rst b/doc/source/release/1.6.1-notes.rst index 05fcb4ab9..05fcb4ab9 100644 --- a/doc/release/1.6.1-notes.rst +++ b/doc/source/release/1.6.1-notes.rst diff --git a/doc/release/1.6.2-notes.rst b/doc/source/release/1.6.2-notes.rst index 8f0b06f98..8f0b06f98 100644 --- a/doc/release/1.6.2-notes.rst +++ b/doc/source/release/1.6.2-notes.rst diff --git a/doc/release/1.7.0-notes.rst b/doc/source/release/1.7.0-notes.rst index f111f80dc..f111f80dc 100644 --- a/doc/release/1.7.0-notes.rst +++ b/doc/source/release/1.7.0-notes.rst diff --git a/doc/release/1.7.1-notes.rst b/doc/source/release/1.7.1-notes.rst index 04216b0df..04216b0df 100644 --- a/doc/release/1.7.1-notes.rst +++ b/doc/source/release/1.7.1-notes.rst diff --git a/doc/release/1.7.2-notes.rst b/doc/source/release/1.7.2-notes.rst index b0951bd72..b0951bd72 100644 --- a/doc/release/1.7.2-notes.rst +++ b/doc/source/release/1.7.2-notes.rst diff --git a/doc/release/1.8.0-notes.rst b/doc/source/release/1.8.0-notes.rst index 80c39f8bc..80c39f8bc 100644 --- a/doc/release/1.8.0-notes.rst +++ b/doc/source/release/1.8.0-notes.rst diff --git a/doc/release/1.8.1-notes.rst b/doc/source/release/1.8.1-notes.rst index ea34e75ac..ea34e75ac 100644 --- a/doc/release/1.8.1-notes.rst +++ b/doc/source/release/1.8.1-notes.rst diff --git a/doc/release/1.8.2-notes.rst b/doc/source/release/1.8.2-notes.rst index 71e549526..71e549526 100644 --- a/doc/release/1.8.2-notes.rst +++ b/doc/source/release/1.8.2-notes.rst diff --git a/doc/release/1.9.0-notes.rst b/doc/source/release/1.9.0-notes.rst index 7ea29e354..7ea29e354 100644 --- a/doc/release/1.9.0-notes.rst +++ b/doc/source/release/1.9.0-notes.rst diff --git a/doc/release/1.9.1-notes.rst b/doc/source/release/1.9.1-notes.rst index 4558237f4..4558237f4 100644 --- a/doc/release/1.9.1-notes.rst +++ b/doc/source/release/1.9.1-notes.rst diff --git a/doc/release/1.9.2-notes.rst b/doc/source/release/1.9.2-notes.rst index 268f3aa64..268f3aa64 100644 --- a/doc/release/1.9.2-notes.rst +++ b/doc/source/release/1.9.2-notes.rst diff --git a/doc/release/template.rst b/doc/source/release/template.rst index fdfec2be9..cde7646df 100644 --- a/doc/release/template.rst +++ b/doc/source/release/template.rst @@ -1,3 +1,5 @@ +:orphan: + ========================== NumPy 1.xx.x Release Notes ========================== diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst index 6ef80bf8e..19e37eabc 100644 --- a/doc/source/user/basics.io.genfromtxt.rst +++ b/doc/source/user/basics.io.genfromtxt.rst @@ -27,13 +27,13 @@ Defining the input ================== The only mandatory argument of :func:`~numpy.genfromtxt` is the source of -the data. It can be a string, a list of strings, or a generator. If a -single string is provided, it is assumed to be the name of a local or -remote file, or an open file-like object with a :meth:`read` method, for -example, a file or :class:`io.StringIO` object. If a list of strings -or a generator returning strings is provided, each string is treated as one -line in a file. When the URL of a remote file is passed, the file is -automatically downloaded to the current directory and opened. +the data. It can be a string, a list of strings, a generator or an open +file-like object with a :meth:`read` method, for example, a file or +:class:`io.StringIO` object. If a single string is provided, it is assumed +to be the name of a local or remote file. If a list of strings or a generator +returning strings is provided, each string is treated as one line in a file. +When the URL of a remote file is passed, the file is automatically downloaded +to the current directory and opened. Recognized file types are text files and archives. Currently, the function recognizes :class:`gzip` and :class:`bz2` (`bzip2`) archives. The type of diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index d4d941a5e..dd25861b4 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -300,9 +300,10 @@ An example castfunc is: static void double_to_float(double *from, float* to, npy_intp n, - void* ig1, void* ig2); - while (n--) { - (*to++) = (double) *(from++); + void* ignore1, void* ignore2) { + while (n--) { + (*to++) = (double) *(from++); + } } This could then be registered to convert doubles to floats using the diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index a23a7b2c7..6211d0c69 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -206,8 +206,8 @@ of elements that we want, instead of the step:: `empty_like`, `arange`, `linspace`, - `numpy.random.mtrand.RandomState.rand`, - `numpy.random.mtrand.RandomState.randn`, + `numpy.random.RandomState.rand`, + `numpy.random.RandomState.randn`, `fromfunction`, `fromfile` diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd new file mode 100644 index 000000000..23bd22e36 --- /dev/null +++ b/numpy/__init__.pxd @@ -0,0 +1,978 @@ +# NumPy static imports for Cython +# +# If any of the PyArray_* functions are called, import_array must be +# called first. +# +# This also defines backwards-compatibility buffer acquisition +# code for use in Python 2.x (or Python <= 2.5 when NumPy starts +# implementing PEP-3118 directly). +# +# Because of laziness, the format string of the buffer is statically +# allocated. Increase the size if this is not enough, or submit a +# patch to do this properly. +# +# Author: Dag Sverre Seljebotn +# + +DEF _buffer_format_string_len = 255 + +cimport cpython.buffer as pybuf +from cpython.ref cimport Py_INCREF +from cpython.mem cimport PyObject_Malloc, PyObject_Free +from cpython.object cimport PyObject, PyTypeObject +from cpython.buffer cimport PyObject_GetBuffer +from cpython.type cimport type +cimport libc.stdio as stdio + +cdef extern from "Python.h": + ctypedef int Py_intptr_t + +cdef extern from "numpy/arrayobject.h": + ctypedef Py_intptr_t npy_intp + ctypedef size_t npy_uintp + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_INT128 + NPY_INT256 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_UINT128 + NPY_UINT256 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_FLOAT256 + NPY_COMPLEX32 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + NPY_COMPLEX512 + + NPY_INTP + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + # DEPRECATED since NumPy 1.7 ! Do not use in new code! + NPY_C_CONTIGUOUS + NPY_F_CONTIGUOUS + NPY_CONTIGUOUS + NPY_FORTRAN + NPY_OWNDATA + NPY_FORCECAST + NPY_ENSURECOPY + NPY_ENSUREARRAY + NPY_ELEMENTSTRIDES + NPY_ALIGNED + NPY_NOTSWAPPED + NPY_WRITEABLE + NPY_UPDATEIFCOPY + NPY_ARR_HAS_DESCR + + NPY_BEHAVED + NPY_BEHAVED_NS + NPY_CARRAY + NPY_CARRAY_RO + NPY_FARRAY + NPY_FARRAY_RO + NPY_DEFAULT + + NPY_IN_ARRAY + NPY_OUT_ARRAY + NPY_INOUT_ARRAY + NPY_IN_FARRAY + NPY_OUT_FARRAY + NPY_INOUT_FARRAY + + NPY_UPDATE_ALL + + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_UPDATEIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS + + npy_intp NPY_MAX_ELSIZE + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef char flags + cdef int type_num + cdef int itemsize "elsize" + cdef int alignment + cdef dict fields + cdef tuple names + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + cdef PyArray_ArrayDescr* subarray + + ctypedef extern class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + cdef int numiter + cdef npy_intp size, index + cdef int nd + cdef npy_intp *dimensions + cdef void **iters + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + cdef: + # Only taking a few of the most commonly used and stable fields. + # One should use PyArray_* macros instead to access the C fields. + char *data + int ndim "nd" + npy_intp *shape "dimensions" + npy_intp *strides + dtype descr # deprecated since NumPy 1.7 ! + PyObject* base + + # Note: This syntax (function definition in pxd files) is an + # experimental exception made for __getbuffer__ and __releasebuffer__ + # -- the details of this may change. + def __getbuffer__(ndarray self, Py_buffer* info, int flags): + PyObject_GetBuffer(<object>self, info, flags); + + def __releasebuffer__(ndarray self, Py_buffer* info): + # We should call a possible tp_bufferrelease(self, info) but no + # interface to that is exposed by cython or python. And currently + # the function is NULL in numpy, we rely on refcounting to release + # info when self is collected + pass + + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + ctypedef signed long long npy_int96 + ctypedef signed long long npy_int128 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + ctypedef unsigned long long npy_uint96 + ctypedef unsigned long long npy_uint128 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + double real + double imag + + ctypedef struct npy_cdouble: + double real + double imag + + ctypedef struct npy_clongdouble: + long double real + long double imag + + ctypedef struct npy_complex64: + float real + float imag + + ctypedef struct npy_complex128: + double real + double imag + + ctypedef struct npy_complex160: + long double real + long double imag + + ctypedef struct npy_complex192: + long double real + long double imag + + ctypedef struct npy_complex256: + long double real + long double imag + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + int _import_array() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) + bint PyArray_ISCONTIGUOUS(ndarray m) + bint PyArray_ISWRITEABLE(ndarray m) + bint PyArray_ISALIGNED(ndarray m) + + int PyArray_NDIM(ndarray) + bint PyArray_ISONESEGMENT(ndarray) + bint PyArray_ISFORTRAN(ndarray) + int PyArray_FORTRANIF(ndarray) + + void* PyArray_DATA(ndarray) + char* PyArray_BYTES(ndarray) + npy_intp* PyArray_DIMS(ndarray) + npy_intp* PyArray_STRIDES(ndarray) + npy_intp PyArray_DIM(ndarray, size_t) + npy_intp PyArray_STRIDE(ndarray, size_t) + + PyObject *PyArray_BASE(ndarray) # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) # returns borrowed reference to dtype! + int PyArray_FLAGS(ndarray) + npy_intp PyArray_ITEMSIZE(ndarray) + int PyArray_TYPE(ndarray arr) + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) + + bint PyTypeNum_ISBOOL(int) + bint PyTypeNum_ISUNSIGNED(int) + bint PyTypeNum_ISSIGNED(int) + bint PyTypeNum_ISINTEGER(int) + bint PyTypeNum_ISFLOAT(int) + bint PyTypeNum_ISNUMBER(int) + bint PyTypeNum_ISSTRING(int) + bint PyTypeNum_ISCOMPLEX(int) + bint PyTypeNum_ISPYTHON(int) + bint PyTypeNum_ISFLEXIBLE(int) + bint PyTypeNum_ISUSERDEF(int) + bint PyTypeNum_ISEXTENDED(int) + bint PyTypeNum_ISOBJECT(int) + + bint PyDataType_ISBOOL(dtype) + bint PyDataType_ISUNSIGNED(dtype) + bint PyDataType_ISSIGNED(dtype) + bint PyDataType_ISINTEGER(dtype) + bint PyDataType_ISFLOAT(dtype) + bint PyDataType_ISNUMBER(dtype) + bint PyDataType_ISSTRING(dtype) + bint PyDataType_ISCOMPLEX(dtype) + bint PyDataType_ISPYTHON(dtype) + bint PyDataType_ISFLEXIBLE(dtype) + bint PyDataType_ISUSERDEF(dtype) + bint PyDataType_ISEXTENDED(dtype) + bint PyDataType_ISOBJECT(dtype) + bint PyDataType_HASFIELDS(dtype) + bint PyDataType_HASSUBARRAY(dtype) + + bint PyArray_ISBOOL(ndarray) + bint PyArray_ISUNSIGNED(ndarray) + bint PyArray_ISSIGNED(ndarray) + bint PyArray_ISINTEGER(ndarray) + bint PyArray_ISFLOAT(ndarray) + bint PyArray_ISNUMBER(ndarray) + bint PyArray_ISSTRING(ndarray) + bint PyArray_ISCOMPLEX(ndarray) + bint PyArray_ISPYTHON(ndarray) + bint PyArray_ISFLEXIBLE(ndarray) + bint PyArray_ISUSERDEF(ndarray) + bint PyArray_ISEXTENDED(ndarray) + bint PyArray_ISOBJECT(ndarray) + bint PyArray_HASFIELDS(ndarray) + + bint PyArray_ISVARIABLE(ndarray) + + bint PyArray_SAFEALIGNEDCOPY(ndarray) + bint PyArray_ISNBO(char) # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) + bint PyArray_ISBYTESWAPPED(ndarray) + + bint PyArray_FLAGSWAP(ndarray, int) + + bint PyArray_ISCARRAY(ndarray) + bint PyArray_ISCARRAY_RO(ndarray) + bint PyArray_ISFARRAY(ndarray) + bint PyArray_ISFARRAY_RO(ndarray) + bint PyArray_ISBEHAVED(ndarray) + bint PyArray_ISBEHAVED_RO(ndarray) + + + bint PyDataType_ISNOTSWAPPED(dtype) + bint PyDataType_ISBYTESWAPPED(dtype) + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) + npy_intp PyArray_SIZE(ndarray) + npy_intp PyArray_NBYTES(ndarray) + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(object, int val) + npy_intp PyArray_REFCOUNT(object) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) + + void PyArray_XDECREF_ERR(ndarray) + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_SetNumericOps (object) + object PyArray_GetNumericOps () + int PyArray_INCREF (ndarray) + int PyArray_XDECREF (ndarray) + void PyArray_SetStringFunction (object, int) + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CastTo (ndarray, ndarray) + int PyArray_CastAnyTo (ndarray, ndarray) + int PyArray_CanCastSafely (int, int) + npy_bool PyArray_CanCastTo (dtype, dtype) + int PyArray_ObjectType (object, int) + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + object PyArray_ScalarFromObject (object) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + object PyArray_FromDims (int, int *, int) + #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_MoveInto (ndarray, ndarray) + int PyArray_CopyInto (ndarray, ndarray) + int PyArray_CopyAnyInto (ndarray, ndarray) + int PyArray_CopyObject (ndarray, object) + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) + int PyArray_Dump (object, object, int) + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) + void PyArray_FillObjectArray (ndarray, object) + int PyArray_FillWithScalar (ndarray, object) + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + object PyArray_NewFlagsObject (object) + npy_bool PyArray_CanCastScalar (type, type) + #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t) + int PyArray_RemoveSmallest (broadcast) + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) + void PyArray_Item_XDECREF (char *, dtype) + object PyArray_FieldNames (object) + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + #int PyArray_As1D (object*, char **, int *, int) + #int PyArray_As2D (object*, char ***, int *, int *, int) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_CopyAndTranspose (object) + object PyArray_Correlate (object, object, int) + int PyArray_TypestrConvert (int, int) + #int PyArray_DescrConverter (object, dtype*) + #int PyArray_DescrConverter2 (object, dtype*) + int PyArray_IntpConverter (object, PyArray_Dims *) + #int PyArray_BufferConverter (object, chunk) + int PyArray_AxisConverter (object, int *) + int PyArray_BoolConverter (object, npy_bool *) + int PyArray_ByteorderConverter (object, char *) + int PyArray_OrderConverter (object, NPY_ORDER *) + unsigned char PyArray_EquivTypes (dtype, dtype) + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_TypeNumFromName (char *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) + #int PyArray_OutputConverter (object, ndarray*) + object PyArray_BroadcastToShape (object, npy_intp *, int) + void _PyArray_SigintHandler (int) + void* _PyArray_GetSigintBuf () + #int PyArray_DescrAlignConverter (object, dtype*) + #int PyArray_DescrAlignConverter2 (object, dtype*) + int PyArray_SearchsideConverter (object, void *) + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_CompareString (char *, char *, size_t) + int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead. + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t +#ctypedef npy_int96 int96_t +#ctypedef npy_int128 int128_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t +#ctypedef npy_uint96 uint96_t +#ctypedef npy_uint128 uint128_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +# The int types are mapped a bit surprising -- +# numpy.int corresponds to 'l' and numpy.long to 'q' +ctypedef npy_long int_t +ctypedef npy_longlong long_t +ctypedef npy_longlong longlong_t + +ctypedef npy_ulong uint_t +ctypedef npy_ulonglong ulong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef npy_cfloat cfloat_t +ctypedef npy_cdouble cdouble_t +ctypedef npy_clongdouble clongdouble_t + +ctypedef npy_cdouble complex_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, <void*>a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, <void*>a, <void*>b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return <tuple>d.subarray.shape + else: + return () + +cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: + # Recursive utility function used in __getbuffer__ to get format + # string. The new location in the format string is returned. + + cdef dtype child + cdef int endian_detector = 1 + cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) + cdef tuple fields + + for childname in descr.names: + fields = descr.fields[childname] + child, new_offset = fields + + if (end - f) - <int>(new_offset - offset[0]) < 15: + raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + + if ((child.byteorder == c'>' and little_endian) or + (child.byteorder == c'<' and not little_endian)): + raise ValueError(u"Non-native byte order not supported") + # One could encode it in the format string and have Cython + # complain instead, BUT: < and > in format strings also imply + # standardized sizes for datatypes, and we rely on native in + # order to avoid reencoding data types based on their size. + # + # A proper PEP 3118 exporter for other clients than Cython + # must deal properly with this! + + # Output padding bytes + while offset[0] < new_offset: + f[0] = 120 # "x"; pad byte + f += 1 + offset[0] += 1 + + offset[0] += child.itemsize + + if not PyDataType_HASFIELDS(child): + t = child.type_num + if end - f < 5: + raise RuntimeError(u"Format string allocated too short.") + + # Until ticket #99 is fixed, use integers to avoid warnings + if t == NPY_BYTE: f[0] = 98 #"b" + elif t == NPY_UBYTE: f[0] = 66 #"B" + elif t == NPY_SHORT: f[0] = 104 #"h" + elif t == NPY_USHORT: f[0] = 72 #"H" + elif t == NPY_INT: f[0] = 105 #"i" + elif t == NPY_UINT: f[0] = 73 #"I" + elif t == NPY_LONG: f[0] = 108 #"l" + elif t == NPY_ULONG: f[0] = 76 #"L" + elif t == NPY_LONGLONG: f[0] = 113 #"q" + elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + elif t == NPY_FLOAT: f[0] = 102 #"f" + elif t == NPY_DOUBLE: f[0] = 100 #"d" + elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + elif t == NPY_OBJECT: f[0] = 79 #"O" + else: + raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + f += 1 + else: + # Cython ignores struct boundary information ("T{...}"), + # so don't output it + f = _util_dtypestring(child, f, end, offset) + return f + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef extern class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + UFUNC_ERR_IGNORE + UFUNC_ERR_WARN + UFUNC_ERR_RAISE + UFUNC_ERR_CALL + UFUNC_ERR_PRINT + UFUNC_ERR_LOG + UFUNC_MASK_DIVIDEBYZERO + UFUNC_MASK_OVERFLOW + UFUNC_MASK_UNDERFLOW + UFUNC_MASK_INVALID + UFUNC_SHIFT_DIVIDEBYZERO + UFUNC_SHIFT_OVERFLOW + UFUNC_SHIFT_UNDERFLOW + UFUNC_SHIFT_INVALID + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + UFUNC_ERR_DEFAULT + UFUNC_ERR_DEFAULT2 + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) + int PyUFunc_GenericFunction \ + (ufunc, PyObject *, PyObject *, PyArrayObject **) + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + int PyUFunc_GetPyValues \ + (char *, int *, int *, PyObject **) + int PyUFunc_checkfperr \ + (int, PyObject *, int *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_handlefperr \ + (int, PyObject *, int, int *) + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return <object>base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + _import_array() + except Exception: + raise ImportError("numpy.core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") diff --git a/numpy/__init__.py b/numpy/__init__.py index ae297597e..fef8245de 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -143,7 +143,9 @@ else: from .core import * from . import compat from . import lib + # FIXME: why have numpy.lib if everything is imported here?? from .lib import * + from . import linalg from . import fft from . import polynomial @@ -174,6 +176,14 @@ else: __all__.extend(lib.__all__) __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma']) + # Remove things that are in the numpy.lib but not in the numpy namespace + # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace) + # that prevents adding more things to the main namespace by accident. + # The list below will grow until the `from .lib import *` fixme above is + # taken care of + __all__.remove('Arrayterator') + del Arrayterator + # Filter out Cython harmless warnings warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") @@ -202,9 +212,8 @@ else: from .testing import Tester return Tester else: - raise AttributeError( - "module %s has no attribute $s".format(__name__, attr)) - + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) def __dir__(): return __all__ + ['Tester', 'testing'] diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 8d1a3811c..b25224c20 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -48,10 +48,9 @@ class PytestTester(object): """ Pytest test runner. - This class is made available in ``numpy.testing``, and a test function - is typically added to a package's __init__.py like so:: + A test function is typically added to a package's __init__.py like so:: - from numpy.testing import PytestTester + from numpy._pytesttester import PytestTester test = PytestTester(__name__).test del PytestTester @@ -68,6 +67,12 @@ class PytestTester(object): module_name : module name The name of the module to test. + Notes + ----- + Unlike the previous ``nose``-based implementation, this class is not + publicly exposed as it performs some ``numpy``-specific warning + suppression. + """ def __init__(self, module_name): self.module_name = module_name diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index ce443bb22..c3b3f0392 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -1,6 +1,13 @@ +""" +Contains the core of NumPy: ndarray, ufuncs, dtypes, etc. + +Please note that this module is private. All functions and objects +are available in the main ``numpy`` namespace - use that instead. + +""" + from __future__ import division, absolute_import, print_function -from .info import __doc__ from numpy.version import version as __version__ import os diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index f041e0cd6..bd309f4a5 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -386,12 +386,12 @@ add_newdoc('numpy.core', 'nditer', >>> luf(lambda i,j:i*i + j/2, a, b) array([ 0.5, 1.5, 4.5, 9.5, 16.5]) - If operand flags `"writeonly"` or `"readwrite"` are used the operands may - be views into the original data with the `WRITEBACKIFCOPY` flag. In this case - nditer must be used as a context manager or the nditer.close - method must be called before using the result. The temporary - data will be written back to the original data when the `__exit__` - function is called but not before: + If operand flags `"writeonly"` or `"readwrite"` are used the + operands may be views into the original data with the + `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a + context manager or the `nditer.close` method must be called before + using the result. The temporary data will be written back to the + original data when the `__exit__` function is called but not before: >>> a = np.arange(6, dtype='i4')[::-2] >>> with np.nditer(a, [], @@ -413,6 +413,8 @@ add_newdoc('numpy.core', 'nditer', `x.data` will still point at some part of `a.data`, and writing to one will affect the other. + Context management and the `close` method appeared in version 1.15.0. + """) # nditer methods @@ -568,6 +570,8 @@ add_newdoc('numpy.core', 'nditer', ('close', Resolve all writeback semantics in writeable operands. + .. versionadded:: 1.15.0 + See Also -------- @@ -1322,9 +1326,9 @@ add_newdoc('numpy.core.multiarray', 'arange', See Also -------- - linspace : Evenly spaced numbers with careful handling of endpoints. - ogrid: Arrays of evenly spaced numbers in N-dimensions. - mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. + numpy.linspace : Evenly spaced numbers with careful handling of endpoints. + numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions. + numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. Examples -------- @@ -1342,7 +1346,7 @@ add_newdoc('numpy.core.multiarray', 'arange', add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', """_get_ndarray_c_version() - Return the compile time NDARRAY_VERSION number. + Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number. """) @@ -3702,10 +3706,10 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', See Also -------- numpy.sort : Return a sorted copy of an array. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in sorted array. - partition: Partial sort. + numpy.argsort : Indirect sort. + numpy.lexsort : Indirect stable sort on multiple keys. + numpy.searchsorted : Find elements in sorted array. + numpy.partition: Partial sort. Notes ----- @@ -4493,7 +4497,7 @@ add_newdoc('numpy.core', 'ufunc', Alternate array object(s) in which to put the result; if provided, it must have a shape that the inputs broadcast to. A tuple of arrays (possible only as a keyword argument) must have length equal to the - number of outputs; use `None` for uninitialized outputs to be + number of outputs; use None for uninitialized outputs to be allocated by the ufunc. where : array_like, optional This condition is broadcast over the input. At locations where the @@ -4687,7 +4691,7 @@ add_newdoc('numpy.core', 'ufunc', ('signature', ----- Generalized ufuncs are used internally in many linalg functions, and in the testing suite; the examples below are taken from these. - For ufuncs that operate on scalars, the signature is `None`, which is + For ufuncs that operate on scalars, the signature is None, which is equivalent to '()' for every argument. Examples @@ -4738,7 +4742,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce', .. versionadded:: 1.7.0 - If this is `None`, a reduction is performed over all the axes. + If this is None, a reduction is performed over all the axes. If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. @@ -4751,7 +4755,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce', to the data-type of the output array if this is provided, or the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or `None`, + A location into which the result is stored. If not provided or None, a freshly-allocated array is returned. For consistency with ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. @@ -4868,7 +4872,7 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate', to the data-type of the output array if such is provided, or the the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or `None`, + A location into which the result is stored. If not provided or None, a freshly-allocated array is returned. For consistency with ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. @@ -4950,7 +4954,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat', to the data type of the output array if this is provided, or the data type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or `None`, + A location into which the result is stored. If not provided or None, a freshly-allocated array is returned. For consistency with ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. @@ -5323,7 +5327,8 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('descr', `__array_interface__` attribute. Warning: This attribute exists specifically for `__array_interface__`, - and is not a datatype description compatible with `np.dtype`. + and passing it directly to `np.dtype` will not accurately reconstruct + some dtypes (e.g., scalar and subarray dtypes). Examples -------- diff --git a/numpy/core/_dtype.py b/numpy/core/_dtype.py index 092b848dc..df1ff180e 100644 --- a/numpy/core/_dtype.py +++ b/numpy/core/_dtype.py @@ -316,26 +316,39 @@ def _subarray_str(dtype): ) +def _name_includes_bit_suffix(dtype): + if dtype.type == np.object_: + # pointer size varies by system, best to omit it + return False + elif dtype.type == np.bool_: + # implied + return False + elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype): + # unspecified + return False + else: + return True + + def _name_get(dtype): - # provides dtype.name.__get__ + # provides dtype.name.__get__, documented as returning a "bit name" if dtype.isbuiltin == 2: # user dtypes don't promise to do anything special return dtype.type.__name__ - # Builtin classes are documented as returning a "bit name" - name = dtype.type.__name__ - - # handle bool_, str_, etc - if name[-1] == '_': - name = name[:-1] + if issubclass(dtype.type, np.void): + # historically, void subclasses preserve their name, eg `record64` + name = dtype.type.__name__ + else: + name = _kind_name(dtype) - # append bit counts to str, unicode, and void - if np.issubdtype(dtype, np.flexible) and not _isunsized(dtype): + # append bit counts + if _name_includes_bit_suffix(dtype): name += "{}".format(dtype.itemsize * 8) # append metadata to datetimes - elif dtype.type in (np.datetime64, np.timedelta64): + if dtype.type in (np.datetime64, np.timedelta64): name += _datetime_metadata_str(dtype) return name diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py index a1af7a78d..88a45561f 100644 --- a/numpy/core/_exceptions.py +++ b/numpy/core/_exceptions.py @@ -27,6 +27,7 @@ def _display_as_base(cls): assert issubclass(cls, Exception) cls.__name__ = cls.__base__.__name__ cls.__qualname__ = cls.__base__.__qualname__ + set_module(cls.__base__.__module__)(cls) return cls @@ -146,6 +147,54 @@ class _ArrayMemoryError(MemoryError): self.shape = shape self.dtype = dtype - def __str__(self): - return "Unable to allocate array with shape {} and data type {}".format(self.shape, self.dtype) + @property + def _total_size(self): + num_bytes = self.dtype.itemsize + for dim in self.shape: + num_bytes *= dim + return num_bytes + + @staticmethod + def _size_to_string(num_bytes): + """ Convert a number of bytes into a binary size string """ + import math + + # https://en.wikipedia.org/wiki/Binary_prefix + LOG2_STEP = 10 + STEP = 1024 + units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] + + unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP + unit_val = 1 << (unit_i * LOG2_STEP) + n_units = num_bytes / unit_val + del unit_val + + # ensure we pick a unit that is correct after rounding + if round(n_units) == STEP: + unit_i += 1 + n_units /= STEP + + # deal with sizes so large that we don't have units for them + if unit_i >= len(units): + new_unit_i = len(units) - 1 + n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP) + unit_i = new_unit_i + + unit_name = units[unit_i] + # format with a sensible number of digits + if unit_i == 0: + # no decimal point on bytes + return '{:.0f} {}'.format(n_units, unit_name) + elif round(n_units) < 1000: + # 3 significant figures, if none are dropped to the left of the . + return '{:#.3g} {}'.format(n_units, unit_name) + else: + # just give all the digits otherwise + return '{:#.0f} {}'.format(n_units, unit_name) + def __str__(self): + size_str = self._size_to_string(self._total_size) + return ( + "Unable to allocate {} for an array with shape {} and data type {}" + .format(size_str, self.shape, self.dtype) + ) diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index b0ea603e1..05e401e0b 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -247,55 +247,13 @@ class _missing_ctypes(object): self.value = ptr -class _unsafe_first_element_pointer(object): - """ - Helper to allow viewing an array as a ctypes pointer to the first element - - This avoids: - * dealing with strides - * `.view` rejecting object-containing arrays - * `memoryview` not supporting overlapping fields - """ - def __init__(self, arr): - self.base = arr - - @property - def __array_interface__(self): - i = dict( - shape=(), - typestr='|V0', - data=(self.base.__array_interface__['data'][0], False), - strides=(), - version=3, - ) - return i - - -def _get_void_ptr(arr): - """ - Get a `ctypes.c_void_p` to arr.data, that keeps a reference to the array - """ - import numpy as np - # convert to a 0d array that has a data pointer referrign to the start - # of arr. This holds a reference to arr. - simple_arr = np.asarray(_unsafe_first_element_pointer(arr)) - - # create a `char[0]` using the same memory. - c_arr = (ctypes.c_char * 0).from_buffer(simple_arr) - - # finally cast to void* - return ctypes.cast(ctypes.pointer(c_arr), ctypes.c_void_p) - - class _ctypes(object): def __init__(self, array, ptr=None): self._arr = array if ctypes: self._ctypes = ctypes - # get a void pointer to the buffer, which keeps the array alive - self._data = _get_void_ptr(array) - assert self._data.value == ptr + self._data = self._ctypes.c_void_p(ptr) else: # fake a pointer-like object that holds onto the reference self._ctypes = _missing_ctypes() @@ -317,7 +275,14 @@ class _ctypes(object): The returned pointer will keep a reference to the array. """ - return self._ctypes.cast(self._data, obj) + # _ctypes.cast function causes a circular reference of self._data in + # self._data._objects. Attributes of self._data cannot be released + # until gc.collect is called. Make a copy of the pointer first then let + # it hold the array reference. This is a workaround to circumvent the + # CPython bug https://bugs.python.org/issue12836 + ptr = self._ctypes.cast(self._data, obj) + ptr._arr = self._arr + return ptr def shape_as(self, obj): """ @@ -348,7 +313,7 @@ class _ctypes(object): crashing. User Beware! The value of this attribute is exactly the same as ``self._array_interface_['data'][0]``. - Note that unlike `data_as`, a reference will not be kept to the array: + Note that unlike ``data_as``, a reference will not be kept to the array: code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a pointer to a deallocated array, and should be spelt ``(a + b).ctypes.data_as(ctypes.c_void_p)`` @@ -385,7 +350,7 @@ class _ctypes(object): Enables `c_func(some_array.ctypes)` """ - return self._data + return self.data_as(ctypes.c_void_p) # kept for compatibility get_data = data.fget diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 5761c4875..401018015 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -111,7 +111,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, ---------- precision : int or None, optional Number of digits of precision for floating point output (default 8). - May be `None` if `floatmode` is not `fixed`, to print as many digits as + May be None if `floatmode` is not `fixed`, to print as many digits as necessary to uniquely specify the value. threshold : int, optional Total number of array elements which trigger summarization @@ -194,12 +194,14 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, See Also -------- - get_printoptions, set_string_function, array2string + get_printoptions, printoptions, set_string_function, array2string Notes ----- `formatter` is always reset with a call to `set_printoptions`. + Use `printoptions` as a context manager to set the values temporarily. + Examples -------- Floating point precision can be set: @@ -236,9 +238,16 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, To put back the default options, you can use: - >>> np.set_printoptions(edgeitems=3,infstr='inf', + >>> np.set_printoptions(edgeitems=3, infstr='inf', ... linewidth=75, nanstr='nan', precision=8, ... suppress=False, threshold=1000, formatter=None) + + Also to temporarily override options, use `printoptions` as a context manager: + + >>> with np.printoptions(precision=2, suppress=True, threshold=5): + ... np.linspace(0, 10, 10) + array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) + """ legacy = kwarg.pop('legacy', None) if kwarg: @@ -285,7 +294,7 @@ def get_printoptions(): See Also -------- - set_printoptions, set_string_function + set_printoptions, printoptions, set_string_function """ return _format_options.copy() @@ -1470,7 +1479,11 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): arr, max_line_width, precision, suppress_small) -_guarded_str = _recursive_guard()(str) +@_recursive_guard() +def _guarded_repr_or_str(v): + if isinstance(v, bytes): + return repr(v) + return str(v) def _array_str_implementation( @@ -1488,7 +1501,7 @@ def _array_str_implementation( # obtain a scalar and call str on it, avoiding problems for subclasses # for which indexing with () returns a 0d instead of a scalar by using # ndarray's getindex. Also guard against recursive 0d object arrays. - return _guarded_str(np.ndarray.__getitem__(a, ())) + return _guarded_repr_or_str(np.ndarray.__getitem__(a, ())) return array2string(a, max_line_width, precision, suppress_small, ' ', "") diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 923c34425..7336e5e13 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -259,7 +259,8 @@ def find_functions(filename, tag='API'): elif state == STATE_ARGS: if line.startswith('{'): # finished - fargs_str = ' '.join(function_args).rstrip(' )') + # remove any white space and the closing bracket: + fargs_str = ' '.join(function_args).rstrip()[:-1].rstrip() fargs = split_arguments(fargs_str) f = Function(function_name, return_type, fargs, '\n'.join(doclist)) diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index ae871ea6f..e0b6a654c 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -287,7 +287,7 @@ defdict = { Ufunc(2, 1, None, # Zero is only a unit to the right, not the left docstrings.get('numpy.core.umath.subtract'), 'PyUFunc_SubtractionTypeResolver', - TD(notimes_or_obj, simd=[('avx2', ints)]), + TD(ints + inexact, simd=[('avx2', ints)]), [TypeDescription('M', FullTypeDescr, 'Mm', 'M'), TypeDescription('m', FullTypeDescr, 'mm', 'm'), TypeDescription('M', FullTypeDescr, 'MM', 'm'), @@ -358,14 +358,14 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.square'), None, - TD(ints+inexact, simd=[('avx2', ints)]), + TD(ints+inexact, simd=[('avx2', ints), ('fma', 'fd'), ('avx512f', 'fd')]), TD(O, f='Py_square'), ), 'reciprocal': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.reciprocal'), None, - TD(ints+inexact, simd=[('avx2', ints)]), + TD(ints+inexact, simd=[('avx2', ints), ('fma', 'fd'), ('avx512f','fd')]), TD(O, f='Py_reciprocal'), ), # This is no longer used as numpy.ones_like, however it is @@ -395,7 +395,7 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.absolute'), 'PyUFunc_AbsoluteTypeResolver', - TD(bints+flts+timedeltaonly), + TD(bints+flts+timedeltaonly, simd=[('fma', 'fd'), ('avx512f', 'fd')]), TD(cmplx, out=('f', 'd', 'g')), TD(O, f='PyNumber_Absolute'), ), @@ -409,7 +409,7 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.negative'), 'PyUFunc_NegativeTypeResolver', - TD(bints+flts+timedeltaonly, simd=[('avx2', ints)]), + TD(ints+flts+timedeltaonly, simd=[('avx2', ints)]), TD(cmplx, f='neg'), TD(O, f='PyNumber_Negative'), ), @@ -664,7 +664,7 @@ defdict = { None, TD('e', f='cos', astype={'e':'f'}), TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]), - TD(inexact, f='cos', astype={'e':'f'}), + TD('fdg' + cmplx, f='cos'), TD(P, f='cos'), ), 'sin': @@ -673,7 +673,7 @@ defdict = { None, TD('e', f='sin', astype={'e':'f'}), TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]), - TD(inexact, f='sin', astype={'e':'f'}), + TD('fdg' + cmplx, f='sin'), TD(P, f='sin'), ), 'tan': @@ -710,7 +710,7 @@ defdict = { None, TD('e', f='exp', astype={'e':'f'}), TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]), - TD(inexact, f='exp', astype={'e':'f'}), + TD('fdg' + cmplx, f='exp'), TD(P, f='exp'), ), 'exp2': @@ -733,7 +733,7 @@ defdict = { None, TD('e', f='log', astype={'e':'f'}), TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]), - TD(inexact, f='log', astype={'e':'f'}), + TD('fdg' + cmplx, f='log'), TD(P, f='log'), ), 'log2': @@ -762,8 +762,8 @@ defdict = { docstrings.get('numpy.core.umath.sqrt'), None, TD('e', f='sqrt', astype={'e':'f'}), - TD(inexactvec), - TD(inexact, f='sqrt', astype={'e':'f'}), + TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]), + TD('fdg' + cmplx, f='sqrt'), TD(P, f='sqrt'), ), 'cbrt': @@ -777,14 +777,18 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.ceil'), None, - TD(flts, f='ceil', astype={'e':'f'}), + TD('e', f='ceil', astype={'e':'f'}), + TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]), + TD('fdg', f='ceil'), TD(O, f='npy_ObjectCeil'), ), 'trunc': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.trunc'), None, - TD(flts, f='trunc', astype={'e':'f'}), + TD('e', f='trunc', astype={'e':'f'}), + TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]), + TD('fdg', f='trunc'), TD(O, f='npy_ObjectTrunc'), ), 'fabs': @@ -798,14 +802,18 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.floor'), None, - TD(flts, f='floor', astype={'e':'f'}), + TD('e', f='floor', astype={'e':'f'}), + TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]), + TD('fdg', f='floor'), TD(O, f='npy_ObjectFloor'), ), 'rint': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.rint'), None, - TD(inexact, f='rint', astype={'e':'f'}), + TD('e', f='rint', astype={'e':'f'}), + TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]), + TD('fdg' + cmplx, f='rint'), TD(P, f='rint'), ), 'arctan2': diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index fb418aadc..4dec73505 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -22,7 +22,7 @@ subst = { 'PARAMS': textwrap.dedent(""" out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have - a shape that the inputs broadcast to. If not provided or `None`, + a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. where : array_like, optional @@ -2183,7 +2183,7 @@ add_newdoc('numpy.core.umath', 'logical_and', Returns ------- y : ndarray or bool - Boolean result of the logical OR operation applied to the elements + Boolean result of the logical AND operation applied to the elements of `x1` and `x2`; the shape is determined by broadcasting. $OUT_SCALAR_2 @@ -2596,7 +2596,7 @@ add_newdoc('numpy.core.umath', 'matmul', out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not - provided or `None`, a freshly-allocated array is returned. + provided or None, a freshly-allocated array is returned. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`. diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py index d7ecce1b4..2d89d6fe0 100644 --- a/numpy/core/defchararray.py +++ b/numpy/core/defchararray.py @@ -82,7 +82,7 @@ def _clean_args(*args): Many of the Python string operations that have optional arguments do not use 'None' to indicate a default value. In these cases, - we need to remove all `None` arguments, and those following them. + we need to remove all None arguments, and those following them. """ newargs = [] for chk in args: @@ -1333,7 +1333,7 @@ def rsplit(a, sep=None, maxsplit=None): a : array_like of str or unicode sep : str or unicode, optional - If `sep` is not specified or `None`, any whitespace string + If `sep` is not specified or None, any whitespace string is a separator. maxsplit : int, optional If `maxsplit` is given, at most `maxsplit` splits are done, @@ -1417,7 +1417,7 @@ def split(a, sep=None, maxsplit=None): a : array_like of str or unicode sep : str or unicode, optional - If `sep` is not specified or `None`, any whitespace string is a + If `sep` is not specified or None, any whitespace string is a separator. maxsplit : int, optional @@ -1840,7 +1840,7 @@ class chararray(ndarray): This constructor creates the array, using `buffer` (with `offset` and `strides`) if it is not ``None``. If `buffer` is ``None``, then constructs a new array with `strides` in "C order", unless both - ``len(shape) >= 2`` and ``order='Fortran'``, in which case `strides` + ``len(shape) >= 2`` and ``order='F'``, in which case `strides` is in "Fortran order". Methods @@ -2659,7 +2659,7 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None): unicode : bool, optional When true, the resulting `chararray` can contain Unicode characters, when false only 8-bit characters. If unicode is - `None` and `obj` is one of the following: + None and `obj` is one of the following: - a `chararray`, - an ndarray of type `str` or `unicode` @@ -2799,7 +2799,7 @@ def asarray(obj, itemsize=None, unicode=None, order=None): unicode : bool, optional When true, the resulting `chararray` can contain Unicode characters, when false only 8-bit characters. If unicode is - `None` and `obj` is one of the following: + None and `obj` is one of the following: - a `chararray`, - an ndarray of type `str` or 'unicode` diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index bde37fca3..5f7716455 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1409,7 +1409,7 @@ def squeeze(a, axis=None): Raises ------ ValueError - If `axis` is not `None`, and an axis being squeezed is not of length 1 + If `axis` is not None, and an axis being squeezed is not of length 1 See Also -------- @@ -1775,11 +1775,13 @@ def nonzero(a): which returns a row for each non-zero element. .. note:: - When called on a zero-d array or scalar, ``nonzero(a)`` is treated - as ``nonzero(atleast1d(a))``. - ..deprecated:: 1.17.0 - Use `atleast1d` explicitly if this behavior is deliberate. + When called on a zero-d array or scalar, ``nonzero(a)`` is treated + as ``nonzero(atleast1d(a))``. + + .. deprecated:: 1.17.0 + + Use `atleast1d` explicitly if this behavior is deliberate. Parameters ---------- @@ -1943,7 +1945,7 @@ def compress(condition, a, axis=None, out=None): take, choose, diag, diagonal, select ndarray.compress : Equivalent method in ndarray np.extract: Equivalent method when working on 1-D arrays - numpy.doc.ufuncs : Section "Output arguments" + ufuncs-output-type Examples -------- @@ -1993,14 +1995,14 @@ def clip(a, a_min, a_max, out=None, **kwargs): ---------- a : array_like Array containing elements to clip. - a_min : scalar or array_like or `None` - Minimum value. If `None`, clipping is not performed on lower + a_min : scalar or array_like or None + Minimum value. If None, clipping is not performed on lower interval edge. Not more than one of `a_min` and `a_max` may be - `None`. - a_max : scalar or array_like or `None` - Maximum value. If `None`, clipping is not performed on upper + None. + a_max : scalar or array_like or None + Maximum value. If None, clipping is not performed on upper interval edge. Not more than one of `a_min` and `a_max` may be - `None`. If `a_min` or `a_max` are array_like, then the three + None. If `a_min` or `a_max` are array_like, then the three arrays will be broadcasted to match their shapes. out : ndarray, optional The results will be placed in this array. It may be the input @@ -2021,7 +2023,7 @@ def clip(a, a_min, a_max, out=None, **kwargs): See Also -------- - numpy.doc.ufuncs : Section "Output arguments" + ufuncs-output-type Examples -------- @@ -2204,7 +2206,7 @@ def any(a, axis=None, out=None, keepdims=np._NoValue): Input array or object that can be converted to an array. axis : None or int or tuple of ints, optional Axis or axes along which a logical OR reduction is performed. - The default (`axis` = `None`) is to perform a logical OR over all + The default (``axis=None``) is to perform a logical OR over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. @@ -2217,7 +2219,7 @@ def any(a, axis=None, out=None, keepdims=np._NoValue): the same shape as the expected output and its type is preserved (e.g., if it is of type float, then it will remain so, returning 1.0 for True and 0.0 for False, regardless of the type of `a`). - See `doc.ufuncs` (Section "Output arguments") for details. + See `ufuncs-output-type` for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left @@ -2290,7 +2292,7 @@ def all(a, axis=None, out=None, keepdims=np._NoValue): Input array or object that can be converted to an array. axis : None or int or tuple of ints, optional Axis or axes along which a logical AND reduction is performed. - The default (`axis` = `None`) is to perform a logical AND over all + The default (``axis=None``) is to perform a logical AND over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. @@ -2302,8 +2304,8 @@ def all(a, axis=None, out=None, keepdims=np._NoValue): Alternate output array in which to place the result. It must have the same shape as the expected output and its type is preserved (e.g., if ``dtype(out)`` is float, the result - will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section - "Output arguments") for more details. + will consist of 0.0's and 1.0's). See `ufuncs-output-type` for more + details. keepdims : bool, optional If this is set to True, the axes which are reduced are left @@ -2381,8 +2383,8 @@ def cumsum(a, axis=None, dtype=None, out=None): out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output - but the type will be cast if necessary. See `doc.ufuncs` - (Section "Output arguments") for more details. + but the type will be cast if necessary. See `ufuncs-output-type` for + more details. Returns ------- @@ -2527,7 +2529,7 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. - See `doc.ufuncs` (Section "Output arguments") for more details. + See `ufuncs-output-type` for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left @@ -2652,7 +2654,7 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. - See `doc.ufuncs` (Section "Output arguments") for more details. + See `ufuncs-output-type` for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left @@ -2859,7 +2861,7 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, See Also -------- ndarray.prod : equivalent method - numpy.doc.ufuncs : Section "Output arguments" + ufuncs-output-type Notes ----- @@ -2955,7 +2957,7 @@ def cumprod(a, axis=None, dtype=None, out=None): See Also -------- - numpy.doc.ufuncs : Section "Output arguments" + ufuncs-output-type Notes ----- @@ -3101,8 +3103,8 @@ def around(a, decimals=0, out=None): out : ndarray, optional Alternative output array in which to place the result. It must have the same shape as the expected output, but the type of the output - values will be cast if necessary. See `doc.ufuncs` (Section - "Output arguments") for details. + values will be cast if necessary. See `ufuncs-output-type` for more + details. Returns ------- @@ -3125,10 +3127,37 @@ def around(a, decimals=0, out=None): ----- For values exactly halfway between rounded decimal values, NumPy rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, - -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due - to the inexact representation of decimal fractions in the IEEE - floating point standard [1]_ and errors introduced when scaling - by powers of ten. + -0.5 and 0.5 round to 0.0, etc. + + ``np.around`` uses a fast but sometimes inexact algorithm to round + floating-point datatypes. For positive `decimals` it is equivalent to + ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has + error due to the inexact representation of decimal fractions in the IEEE + floating point standard [1]_ and errors introduced when scaling by powers + of ten. For instance, note the extra "1" in the following: + + >>> np.round(56294995342131.5, 3) + 56294995342131.51 + + If your goal is to print such values with a fixed number of decimals, it is + preferable to use numpy's float printing routines to limit the number of + printed decimals: + + >>> np.format_float_positional(56294995342131.5, precision=3) + '56294995342131.5' + + The float printing routines use an accurate but much more computationally + demanding algorithm to compute the number of digits after the decimal + point. + + Alternatively, Python's builtin `round` function uses a more accurate + but slower algorithm for 64-bit floating point values: + + >>> round(56294995342131.5, 3) + 56294995342131.5 + >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997 + (16.06, 16.05) + References ---------- @@ -3189,7 +3218,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. - See `doc.ufuncs` for details. + See `ufuncs-output-type` for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left @@ -3324,7 +3353,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): See Also -------- var, mean, nanmean, nanstd, nanvar - numpy.doc.ufuncs : Section "Output arguments" + ufuncs-output-type Notes ----- @@ -3419,7 +3448,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the variance. For arrays of integer type - the default is `float32`; for arrays of float types it is the same as + the default is `float64`; for arrays of float types it is the same as the array type. out : ndarray, optional Alternate output array in which to place the result. It must have @@ -3449,7 +3478,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): See Also -------- std, mean, nanmean, nanstd, nanvar - numpy.doc.ufuncs : Section "Output arguments" + ufuncs-output-type Notes ----- diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index d83af9911..42604ec3f 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -18,18 +18,6 @@ array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') -def _index_deprecate(i, stacklevel=2): - try: - i = operator.index(i) - except TypeError: - msg = ("object of type {} cannot be safely interpreted as " - "an integer.".format(type(i))) - i = int(i) - stacklevel += 1 - warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel) - return i - - def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None, dtype=None, axis=None): return (start, stop) @@ -125,8 +113,13 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, >>> plt.show() """ - # 2016-02-25, 1.12 - num = _index_deprecate(num) + try: + num = operator.index(num) + except TypeError: + raise TypeError( + "object of type {} cannot be safely interpreted as an integer." + .format(type(num))) + if num < 0: raise ValueError("Number of samples, %s, must be non-negative." % num) div = (num - 1) if endpoint else num diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h index 2cc7ced35..95e9cb060 100644 --- a/numpy/core/include/numpy/ndarrayobject.h +++ b/numpy/core/include/numpy/ndarrayobject.h @@ -23,7 +23,7 @@ extern "C" { /* C-API that requires previous API to be defined */ -#define PyArray_DescrCheck(op) (((PyObject*)(op))->ob_type==&PyArrayDescr_Type) +#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) #define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) #define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 1221aeece..ad98d562b 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -1095,7 +1095,8 @@ typedef struct PyArrayIterObject_tag PyArrayIterObject; * type of the function which translates a set of coordinates to a * pointer to the data */ -typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); +typedef char* (*npy_iter_get_dataptr_t)( + PyArrayIterObject* iter, const npy_intp*); struct PyArrayIterObject_tag { PyObject_HEAD @@ -1695,7 +1696,8 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) #define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) #define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) #define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) -#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0) +#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ + !PyDataType_HASFIELDS(dtype)) #define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) #define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h index 126b861bf..69e690f28 100644 --- a/numpy/core/include/numpy/npy_math.h +++ b/numpy/core/include/numpy/npy_math.h @@ -177,6 +177,28 @@ NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b); NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b); NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b); +NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b); + +NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b); +NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b); +NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b); +NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b); +NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b); + /* * avx function has a common API for both sin & cos. This enum is used to * distinguish between the two diff --git a/numpy/core/info.py b/numpy/core/info.py deleted file mode 100644 index c6f7bbcf2..000000000 --- a/numpy/core/info.py +++ /dev/null @@ -1,87 +0,0 @@ -"""Defines a multi-dimensional array and useful procedures for Numerical computation. - -Functions - -- array - NumPy Array construction -- zeros - Return an array of all zeros -- empty - Return an uninitialized array -- shape - Return shape of sequence or array -- rank - Return number of dimensions -- size - Return number of elements in entire array or a - certain dimension -- fromstring - Construct array from (byte) string -- take - Select sub-arrays using sequence of indices -- put - Set sub-arrays using sequence of 1-D indices -- putmask - Set portion of arrays using a mask -- reshape - Return array with new shape -- repeat - Repeat elements of array -- choose - Construct new array from indexed array tuple -- correlate - Correlate two 1-d arrays -- searchsorted - Search for element in 1-d array -- sum - Total sum over a specified dimension -- average - Average, possibly weighted, over axis or array. -- cumsum - Cumulative sum over a specified dimension -- product - Total product over a specified dimension -- cumproduct - Cumulative product over a specified dimension -- alltrue - Logical and over an entire axis -- sometrue - Logical or over an entire axis -- allclose - Tests if sequences are essentially equal - -More Functions: - -- arange - Return regularly spaced array -- asarray - Guarantee NumPy array -- convolve - Convolve two 1-d arrays -- swapaxes - Exchange axes -- concatenate - Join arrays together -- transpose - Permute axes -- sort - Sort elements of array -- argsort - Indices of sorted array -- argmax - Index of largest value -- argmin - Index of smallest value -- inner - Innerproduct of two arrays -- dot - Dot product (matrix multiplication) -- outer - Outerproduct of two arrays -- resize - Return array with arbitrary new shape -- indices - Tuple of indices -- fromfunction - Construct array from universal function -- diagonal - Return diagonal array -- trace - Trace of array -- dump - Dump array to file object (pickle) -- dumps - Return pickled string representing data -- load - Return array stored in file object -- loads - Return array from pickled string -- ravel - Return array as 1-D -- nonzero - Indices of nonzero elements for 1-D array -- shape - Shape of array -- where - Construct array from binary result -- compress - Elements of array where condition is true -- clip - Clip array between two values -- ones - Array of all ones -- identity - 2-D identity array (matrix) - -(Universal) Math Functions - - add logical_or exp - subtract logical_xor log - multiply logical_not log10 - divide maximum sin - divide_safe minimum sinh - conjugate bitwise_and sqrt - power bitwise_or tan - absolute bitwise_xor tanh - negative invert ceil - greater left_shift fabs - greater_equal right_shift floor - less arccos arctan2 - less_equal arcsin fmod - equal arctan hypot - not_equal cos around - logical_and cosh sign - arccosh arcsinh arctanh - -""" -from __future__ import division, absolute_import, print_function - -depends = ['testing'] -global_symbols = ['*'] diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 8ada87b9f..1e011e2e7 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -26,6 +26,7 @@ if sys.version_info[0] < 3: from . import overrides from . import umath +from . import shape_base from .overrides import set_module from .umath import (multiply, invert, sin, PINF, NAN) from . import numerictypes @@ -291,7 +292,7 @@ def full(shape, fill_value, dtype=None, order='C'): fill_value : scalar Fill value. dtype : data-type, optional - The desired data-type for the array The default, `None`, means + The desired data-type for the array The default, None, means `np.array(fill_value).dtype`. order : {'C', 'F'}, optional Whether to store multidimensional data in C- or Fortran-contiguous @@ -522,7 +523,7 @@ def isfortran(a): C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. - >>> np.isfortran(np.array([1, 2], order='FORTRAN')) + >>> np.isfortran(np.array([1, 2], order='F')) False """ @@ -545,8 +546,10 @@ def argwhere(a): Returns ------- - index_array : ndarray + index_array : (N, a.ndim) ndarray Indices of elements that are non-zero. Indices are grouped by element. + This array will have shape ``(N, a.ndim)`` where ``N`` is the number of + non-zero items. See Also -------- @@ -554,7 +557,8 @@ def argwhere(a): Notes ----- - ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``. + ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, + but produces a result of the correct shape for a 0D array. The output of ``argwhere`` is not suitable for indexing arrays. For this purpose use ``nonzero(a)`` instead. @@ -572,6 +576,11 @@ def argwhere(a): [1, 2]]) """ + # nonzero does not behave well on 0d, so promote to 1d + if np.ndim(a) == 0: + a = shape_base.atleast_1d(a) + # then remove the added dimension + return argwhere(a)[:,:0] return transpose(nonzero(a)) @@ -929,7 +938,7 @@ def tensordot(a, b, axes=2): Returns ------- output : ndarray - The tensor dot product of the input. + The tensor dot product of the input. See Also -------- @@ -951,6 +960,9 @@ def tensordot(a, b, axes=2): two sequences of the same length, with the first axis to sum over given first in both sequences, the second axis second, and so forth. + The shape of the result consists of the non-contracted axes of the + first tensor, followed by the non-contracted axes of the second. + Examples -------- A "traditional" example: @@ -1772,19 +1784,19 @@ def _frombuffer(buf, dtype, shape, order): @set_module('numpy') -def isscalar(num): +def isscalar(element): """ - Returns True if the type of `num` is a scalar type. + Returns True if the type of `element` is a scalar type. Parameters ---------- - num : any + element : any Input argument, can be of any type and shape. Returns ------- val : bool - True if `num` is a scalar type, False if it is not. + True if `element` is a scalar type, False if it is not. See Also -------- @@ -1792,10 +1804,14 @@ def isscalar(num): Notes ----- - In almost all cases ``np.ndim(x) == 0`` should be used instead of this - function, as that will also return true for 0d arrays. This is how - numpy overloads functions in the style of the ``dx`` arguments to `gradient` - and the ``bins`` argument to `histogram`. Some key differences: + If you need a stricter way to identify a *numerical* scalar, use + ``isinstance(x, numbers.Number)``, as that returns ``False`` for most + non-numerical elements such as strings. + + In most cases ``np.ndim(x) == 0`` should be used instead of this function, + as that will also return true for 0d arrays. This is how numpy overloads + functions in the style of the ``dx`` arguments to `gradient` and the ``bins`` + argument to `histogram`. Some key differences: +--------------------------------------+---------------+-------------------+ | x |``isscalar(x)``|``np.ndim(x) == 0``| @@ -1843,9 +1859,9 @@ def isscalar(num): True """ - return (isinstance(num, generic) - or type(num) in ScalarType - or isinstance(num, numbers.Number)) + return (isinstance(element, generic) + or type(element) in ScalarType + or isinstance(element, numbers.Number)) @set_module('numpy') @@ -2082,9 +2098,9 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): `atol` are added together to compare against the absolute difference between `a` and `b`. - If either array contains one or more NaNs, False is returned. - Infs are treated as equal if they are in the same place and of the same - sign in both arrays. + NaNs are treated as equal if they are in the same place and if + ``equal_nan=True``. Infs are treated as equal if they are in the same + place and of the same sign in both arrays. Parameters ---------- @@ -2096,7 +2112,7 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): The absolute tolerance parameter (see Notes). equal_nan : bool Whether to compare NaN's as equal. If True, NaN's in `a` will be - considered equal to NaN's in `b`. + considered equal to NaN's in `b` in the output array. .. versionadded:: 1.10.0 diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 338502791..5f2f4a7b2 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -463,8 +463,14 @@ def configuration(parent_package='',top_path=None): rep = check_long_double_representation(config_cmd) moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1)) + if check_for_right_shift_internal_compiler_error(config_cmd): + moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift') + moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift') + moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift') + moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift') + # Py3K check - if sys.version_info[0] == 3: + if sys.version_info[0] >= 3: moredefs.append(('NPY_PY3K', 1)) # Generate the config.h file from moredefs @@ -491,10 +497,10 @@ def configuration(parent_package='',top_path=None): #endif """)) - print('File:', target) + log.info('File: %s' % target) with open(target) as target_f: - print(target_f.read()) - print('EOF') + log.info(target_f.read()) + log.info('EOF') else: mathlibs = [] with open(target) as target_f: @@ -581,10 +587,10 @@ def configuration(parent_package='',top_path=None): """)) # Dump the numpyconfig.h header to stdout - print('File: %s' % target) + log.info('File: %s' % target) with open(target) as target_f: - print(target_f.read()) - print('EOF') + log.info(target_f.read()) + log.info('EOF') config.add_data_files((header_dir, target)) return target @@ -633,23 +639,6 @@ def configuration(parent_package='',top_path=None): ] ####################################################################### - # dummy module # - ####################################################################### - - # npymath needs the config.h and numpyconfig.h files to be generated, but - # build_clib cannot handle generate_config_h and generate_numpyconfig_h - # (don't ask). Because clib are generated before extensions, we have to - # explicitly add an extension which has generate_config_h and - # generate_numpyconfig_h as sources *before* adding npymath. - - config.add_extension('_dummy', - sources=[join('src', 'dummymodule.c'), - generate_config_h, - generate_numpyconfig_h, - generate_numpy_api] - ) - - ####################################################################### # npymath library # ####################################################################### diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index a3f7acd6d..84b78b585 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -5,6 +5,7 @@ import sys import warnings import copy import binascii +import textwrap from numpy.distutils.misc_util import mingw32 @@ -415,3 +416,41 @@ def long_double_representation(lines): else: # We never detected the after_sequence raise ValueError("Could not lock sequences (%s)" % saw) + + +def check_for_right_shift_internal_compiler_error(cmd): + """ + On our arm CI, this fails with an internal compilation error + + The failure looks like the following, and can be reproduced on ARM64 GCC 5.4: + + <source>: In function 'right_shift': + <source>:4:20: internal compiler error: in expand_shift_1, at expmed.c:2349 + ip1[i] = ip1[i] >> in2; + ^ + Please submit a full bug report, + with preprocessed source if appropriate. + See <http://gcc.gnu.org/bugs.html> for instructions. + Compiler returned: 1 + + This function returns True if this compiler bug is present, and we need to + turn off optimization for the function + """ + cmd._check_compiler() + has_optimize = cmd.try_compile(textwrap.dedent("""\ + __attribute__((optimize("O3"))) void right_shift() {} + """), None, None) + if not has_optimize: + return False + + no_err = cmd.try_compile(textwrap.dedent("""\ + typedef long the_type; /* fails also for unsigned and long long */ + __attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) { + for (int i = 0; i < n; i++) { + if (in2 < (the_type)sizeof(the_type) * 8) { + ip1[i] = ip1[i] >> in2; + } + } + } + """), None, None) + return not no_err diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 710f64827..369d956fb 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -9,8 +9,9 @@ import warnings from . import numeric as _nx from . import overrides -from .numeric import array, asanyarray, newaxis +from ._asarray import array, asanyarray from .multiarray import normalize_axis_index +from . import fromnumeric as _from_nx array_function_dispatch = functools.partial( @@ -123,7 +124,7 @@ def atleast_2d(*arys): if ary.ndim == 0: result = ary.reshape(1, 1) elif ary.ndim == 1: - result = ary[newaxis, :] + result = ary[_nx.newaxis, :] else: result = ary res.append(result) @@ -193,9 +194,9 @@ def atleast_3d(*arys): if ary.ndim == 0: result = ary.reshape(1, 1, 1) elif ary.ndim == 1: - result = ary[newaxis, :, newaxis] + result = ary[_nx.newaxis, :, _nx.newaxis] elif ary.ndim == 2: - result = ary[:, :, newaxis] + result = ary[:, :, _nx.newaxis] else: result = ary res.append(result) @@ -435,9 +436,9 @@ def stack(arrays, axis=0, out=None): # Internal functions to eliminate the overhead of repeated dispatch in one of # the two possible paths inside np.block. # Use getattr to protect against __array_function__ being disabled. -_size = getattr(_nx.size, '__wrapped__', _nx.size) -_ndim = getattr(_nx.ndim, '__wrapped__', _nx.ndim) -_concatenate = getattr(_nx.concatenate, '__wrapped__', _nx.concatenate) +_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) +_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) +_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate) def _block_format_index(index): @@ -471,7 +472,7 @@ def _block_check_depths_match(arrays, parent_index=[]): first_index : list of int The full index of an element from the bottom of the nesting in `arrays`. If any element at the bottom is an empty list, this will - refer to it, and the last index along the empty axis will be `None`. + refer to it, and the last index along the empty axis will be None. max_arr_ndim : int The maximum of the ndims of the arrays nested in `arrays`. final_size: int diff --git a/numpy/core/src/common/npy_partition.h.src b/numpy/core/src/common/npy_partition.h.src index a22cf911c..97dc2536b 100644 --- a/numpy/core/src/common/npy_partition.h.src +++ b/numpy/core/src/common/npy_partition.h.src @@ -113,9 +113,6 @@ get_argpartition_func(int type, NPY_SELECTKIND which) npy_intp i; npy_intp ntypes = ARRAY_SIZE(_part_map); - if (which >= NPY_NSELECTS) { - return NULL; - } for (i = 0; i < ntypes; i++) { if (type == _part_map[i].typenum) { return _part_map[i].argpart[which]; diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src index 1365e87bb..fa2efb428 100644 --- a/numpy/core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/core/src/multiarray/_multiarray_tests.c.src @@ -593,6 +593,25 @@ fail: return NULL; } +/* + * Helper to test fromstring of 0 terminated strings, as the C-API supports + * the -1 length identifier. + */ +static PyObject * +fromstring_null_term_c_api(PyObject *dummy, PyObject *byte_obj) +{ + char *string; + PyArray_Descr *descr; + + string = PyBytes_AsString(byte_obj); + if (string == NULL) { + return NULL; + } + descr = PyArray_DescrNewFromType(NPY_FLOAT64); + return PyArray_FromString(string, -1, descr, -1, " "); +} + + /* check no elison for avoided increfs */ static PyObject * incref_elide(PyObject *dummy, PyObject *args) @@ -656,6 +675,43 @@ npy_updateifcopy_deprecation(PyObject* NPY_UNUSED(self), PyObject* args) Py_RETURN_NONE; } +/* used to test PyArray_As1D usage emits not implemented error */ +static PyObject* +npy_pyarrayas1d_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args)) +{ + PyObject *op = Py_BuildValue("i", 42); + PyObject *result = op; + int dim = 4; + double arg[2] = {1, 2}; + int temp = PyArray_As1D(&result, (char **)&arg, &dim, NPY_DOUBLE); + if (temp < 0) { + Py_DECREF(op); + return NULL; + } + /* op != result */ + Py_DECREF(op); + return result; +} + +/* used to test PyArray_As2D usage emits not implemented error */ +static PyObject* +npy_pyarrayas2d_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args)) +{ + PyObject *op = Py_BuildValue("i", 42); + PyObject *result = op; + int dim1 = 4; + int dim2 = 6; + double arg[2][2] = {{1, 2}, {3, 4}}; + int temp = PyArray_As2D(&result, (char ***)&arg, &dim1, &dim2, NPY_DOUBLE); + if (temp < 0) { + Py_DECREF(op); + return NULL; + } + /* op != result */ + Py_DECREF(op); + return result; +} + /* used to create array with WRITEBACKIFCOPY flag */ static PyObject* npy_create_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) @@ -857,6 +913,7 @@ static PyObject* get_c_wrapping_array(PyObject* NPY_UNUSED(self), PyObject* arg) { int writeable, flags; + PyArray_Descr *descr; npy_intp zero = 0; writeable = PyObject_IsTrue(arg); @@ -866,7 +923,8 @@ get_c_wrapping_array(PyObject* NPY_UNUSED(self), PyObject* arg) flags = writeable ? NPY_ARRAY_WRITEABLE : 0; /* Create an empty array (which points to a random place) */ - return PyArray_NewFromDescr(&PyArray_Type, PyArray_DescrFromType(NPY_INTP), + descr = PyArray_DescrNewFromType(NPY_INTP); + return PyArray_NewFromDescr(&PyArray_Type, descr, 1, &zero, NULL, &zero, flags, NULL); } @@ -1927,6 +1985,9 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"test_inplace_increment", inplace_increment, METH_VARARGS, NULL}, + {"fromstring_null_term_c_api", + fromstring_null_term_c_api, + METH_O, NULL}, {"incref_elide", incref_elide, METH_VARARGS, NULL}, @@ -1939,6 +2000,12 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"npy_updateifcopy_deprecation", npy_updateifcopy_deprecation, METH_O, NULL}, + {"npy_pyarrayas1d_deprecation", + npy_pyarrayas1d_deprecation, + METH_NOARGS, NULL}, + {"npy_pyarrayas2d_deprecation", + npy_pyarrayas2d_deprecation, + METH_NOARGS, NULL}, {"npy_create_writebackifcopy", npy_create_writebackifcopy, METH_O, NULL}, diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index eb939f47c..5ed5b7635 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -462,7 +462,7 @@ WARN_IN_DEALLOC(PyObject* warning, const char * msg) { PyErr_WriteUnraisable(Py_None); } } -}; +} /* array object functions */ @@ -557,7 +557,7 @@ PyArray_DebugPrint(PyArrayObject *obj) printf(" ndim : %d\n", fobj->nd); printf(" shape :"); for (i = 0; i < fobj->nd; ++i) { - printf(" %d", (int)fobj->dimensions[i]); + printf(" %" NPY_INTP_FMT, fobj->dimensions[i]); } printf("\n"); @@ -567,7 +567,7 @@ PyArray_DebugPrint(PyArrayObject *obj) printf(" data : %p\n", fobj->data); printf(" strides:"); for (i = 0; i < fobj->nd; ++i) { - printf(" %d", (int)fobj->strides[i]); + printf(" %" NPY_INTP_FMT, fobj->strides[i]); } printf("\n"); @@ -607,7 +607,7 @@ PyArray_DebugPrint(PyArrayObject *obj) * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT void -PyArray_SetDatetimeParseFunction(PyObject *op) +PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op)) { } @@ -630,7 +630,7 @@ PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) /*NUMPY_API */ NPY_NO_EXPORT int -PyArray_CompareString(char *s1, char *s2, size_t len) +PyArray_CompareString(const char *s1, const char *s2, size_t len) { const unsigned char *c1 = (unsigned char *)s1; const unsigned char *c2 = (unsigned char *)s2; @@ -1200,15 +1200,28 @@ _void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op) } } if (res == NULL && !PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, "No fields found."); + /* these dtypes had no fields. Use a MultiIter to broadcast them + * to an output array, and fill with True (for EQ)*/ + PyArrayMultiIterObject *mit = (PyArrayMultiIterObject *) + PyArray_MultiIterNew(2, self, other); + if (mit == NULL) { + return NULL; + } + + res = PyArray_NewFromDescr(&PyArray_Type, + PyArray_DescrFromType(NPY_BOOL), + mit->nd, mit->dimensions, + NULL, NULL, 0, NULL); + Py_DECREF(mit); + if (res) { + PyArray_FILLWBYTE((PyArrayObject *)res, + cmp_op == Py_EQ ? 1 : 0); + } } return res; } else { - /* - * compare as a string. Assumes self and - * other have same descr->type - */ + /* compare as a string. Assumes self and other have same descr->type */ return _strings_richcompare(self, other, cmp_op, 0); } } diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index 5d9e990e8..152a2be9c 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -3078,6 +3078,7 @@ BOOL_argmax(npy_bool *ip, npy_intp n, npy_intp *max_ind, * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*8# * #iscomplex = 0*14, 1*3, 0*2# * #incr = ip++*14, ip+=2*3, ip++*2# + * #isdatetime = 0*17, 1*2# */ static int @fname@_argmax(@type@ *ip, npy_intp n, npy_intp *max_ind, @@ -3103,6 +3104,12 @@ static int return 0; } #endif +#if @isdatetime@ + if (mp == NPY_DATETIME_NAT) { + /* NaT encountered, it's maximal */ + return 0; + } +#endif for (i = 1; i < n; i++) { @incr@; @@ -3122,6 +3129,13 @@ static int } } #else +#if @isdatetime@ + if (*ip == NPY_DATETIME_NAT) { + /* NaT encountered, it's maximal */ + *max_ind = i; + break; + } +#endif if (!@le@(*ip, mp)) { /* negated, for correct nan handling */ mp = *ip; *max_ind = i; @@ -3158,16 +3172,19 @@ BOOL_argmin(npy_bool *ip, npy_intp n, npy_intp *min_ind, * #fname = BYTE, UBYTE, SHORT, USHORT, INT, UINT, * LONG, ULONG, LONGLONG, ULONGLONG, * HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE# + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * DATETIME, TIMEDELTA# * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, * npy_long, npy_ulong, npy_longlong, npy_ulonglong, * npy_half, npy_float, npy_double, npy_longdouble, - * npy_float, npy_double, npy_longdouble# - * #isfloat = 0*10, 1*7# - * #isnan = nop*10, npy_half_isnan, npy_isnan*6# - * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*6# - * #iscomplex = 0*14, 1*3# - * #incr = ip++*14, ip+=2*3# + * npy_float, npy_double, npy_longdouble, + * npy_datetime, npy_timedelta# + * #isfloat = 0*10, 1*7, 0*2# + * #isnan = nop*10, npy_half_isnan, npy_isnan*6, nop*2# + * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*8# + * #iscomplex = 0*14, 1*3, 0*2# + * #incr = ip++*14, ip+=2*3, ip++*2# + * #isdatetime = 0*17, 1*2# */ static int @fname@_argmin(@type@ *ip, npy_intp n, npy_intp *min_ind, @@ -3193,6 +3210,12 @@ static int return 0; } #endif +#if @isdatetime@ + if (mp == NPY_DATETIME_NAT) { + /* NaT encountered, it's minimal */ + return 0; + } +#endif for (i = 1; i < n; i++) { @incr@; @@ -3212,6 +3235,13 @@ static int } } #else +#if @isdatetime@ + if (*ip == NPY_DATETIME_NAT) { + /* NaT encountered, it's minimal */ + *min_ind = i; + break; + } +#endif if (!@le@(mp, *ip)) { /* negated, for correct nan handling */ mp = *ip; *min_ind = i; @@ -3231,43 +3261,6 @@ static int #undef _LESS_THAN_OR_EQUAL -/**begin repeat - * - * #fname = DATETIME, TIMEDELTA# - * #type = npy_datetime, npy_timedelta# - */ -static int -@fname@_argmin(@type@ *ip, npy_intp n, npy_intp *min_ind, - PyArrayObject *NPY_UNUSED(aip)) -{ - /* NPY_DATETIME_NAT is smaller than every other value, we skip - * it for consistency with min(). - */ - npy_intp i; - @type@ mp = NPY_DATETIME_NAT; - - i = 0; - while (i < n && mp == NPY_DATETIME_NAT) { - mp = ip[i]; - i++; - } - if (i == n) { - /* All NaTs: return 0 */ - *min_ind = 0; - return 0; - } - *min_ind = i - 1; - for (; i < n; i++) { - if (mp > ip[i] && ip[i] != NPY_DATETIME_NAT) { - mp = ip[i]; - *min_ind = i; - } - } - return 0; -} - -/**end repeat**/ - static int OBJECT_argmax(PyObject **ip, npy_intp n, npy_intp *max_ind, PyArrayObject *NPY_UNUSED(aip)) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index c38067681..055d3e60f 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -942,6 +942,20 @@ ravel_multi_index_loop(int ravel_ndim, npy_intp *ravel_dims, char invalid; npy_intp j, m; + /* + * Check for 0-dimensional axes unless there is nothing to do. + * An empty array/shape cannot be indexed at all. + */ + if (count != 0) { + for (i = 0; i < ravel_ndim; ++i) { + if (ravel_dims[i] == 0) { + PyErr_SetString(PyExc_ValueError, + "cannot unravel if shape has zero entries (is empty)."); + return NPY_FAIL; + } + } + } + NPY_BEGIN_ALLOW_THREADS; invalid = 0; while (count--) { diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c index a370874a6..5f0ad5817 100644 --- a/numpy/core/src/multiarray/conversion_utils.c +++ b/numpy/core/src/multiarray/conversion_utils.c @@ -406,7 +406,6 @@ PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind) } *sortkind = NPY_QUICKSORT; - str = PyBytes_AsString(obj); if (!str) { @@ -551,10 +550,9 @@ PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) int ret; tmp = PyUnicode_AsASCIIString(object); if (tmp == NULL) { - PyErr_SetString(PyExc_ValueError, "Invalid unicode string passed in " - "for the array ordering. " - "Please pass in 'C', 'F', 'A' " - "or 'K' instead"); + PyErr_SetString(PyExc_ValueError, + "Invalid unicode string passed in for the array ordering. " + "Please pass in 'C', 'F', 'A' or 'K' instead"); return NPY_FAIL; } ret = PyArray_OrderConverter(tmp, val); @@ -562,38 +560,18 @@ PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) return ret; } else if (!PyBytes_Check(object) || PyBytes_GET_SIZE(object) < 1) { - /* 2015-12-14, 1.11 */ - int ret = DEPRECATE("Non-string object detected for " - "the array ordering. Please pass " - "in 'C', 'F', 'A', or 'K' instead"); - - if (ret < 0) { - return -1; - } - - if (PyObject_IsTrue(object)) { - *val = NPY_FORTRANORDER; - } - else { - *val = NPY_CORDER; - } - if (PyErr_Occurred()) { - return NPY_FAIL; - } - return NPY_SUCCEED; + PyErr_SetString(PyExc_ValueError, + "Non-string object detected for the array ordering. " + "Please pass in 'C', 'F', 'A', or 'K' instead"); + return NPY_FAIL; } else { str = PyBytes_AS_STRING(object); if (strlen(str) != 1) { - /* 2015-12-14, 1.11 */ - int ret = DEPRECATE("Non length-one string passed " - "in for the array ordering. " - "Please pass in 'C', 'F', 'A', " - "or 'K' instead"); - - if (ret < 0) { - return -1; - } + PyErr_SetString(PyExc_ValueError, + "Non-string object detected for the array ordering. " + "Please pass in 'C', 'F', 'A', or 'K' instead"); + return NPY_FAIL; } if (str[0] == 'C' || str[0] == 'c') { @@ -689,8 +667,8 @@ PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE *modes, int n) if (object && (PyTuple_Check(object) || PyList_Check(object))) { if (PySequence_Size(object) != n) { PyErr_Format(PyExc_ValueError, - "list of clipmodes has wrong length (%d instead of %d)", - (int)PySequence_Size(object), n); + "list of clipmodes has wrong length (%zd instead of %d)", + PySequence_Size(object), n); return NPY_FAIL; } diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c index 7db467308..aa4e40e66 100644 --- a/numpy/core/src/multiarray/convert.c +++ b/numpy/core/src/multiarray/convert.c @@ -543,35 +543,6 @@ PyArray_AssignZero(PyArrayObject *dst, return retcode; } -/* - * Fills an array with ones. - * - * dst: The destination array. - * wheremask: If non-NULL, a boolean mask specifying where to set the values. - * - * Returns 0 on success, -1 on failure. - */ -NPY_NO_EXPORT int -PyArray_AssignOne(PyArrayObject *dst, - PyArrayObject *wheremask) -{ - npy_bool value; - PyArray_Descr *bool_dtype; - int retcode; - - /* Create a raw bool scalar with the value True */ - bool_dtype = PyArray_DescrFromType(NPY_BOOL); - if (bool_dtype == NULL) { - return -1; - } - value = 1; - - retcode = PyArray_AssignRawScalar(dst, bool_dtype, (char *)&value, - wheremask, NPY_SAFE_CASTING); - - Py_DECREF(bool_dtype); - return retcode; -} /*NUMPY_API * Copy an array. diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index f0a15505d..9b6f59e3a 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -40,9 +40,31 @@ * regards to the handling of text representations. */ +/* + * Scanning function for next element parsing and seperator skipping. + * These functions return: + * - 0 to indicate more data to read + * - -1 when reading stopped at the end of the string/file + * - -2 when reading stopped before the end was reached. + * + * The dtype specific parsing functions may set the python error state + * (they have to get the GIL first) additionally. + */ typedef int (*next_element)(void **, void *, PyArray_Descr *, void *); typedef int (*skip_separator)(void **, const char *, void *); + +static npy_bool +string_is_fully_read(char const* start, char const* end) { + if (end == NULL) { + return *start == '\0'; /* null terminated */ + } + else { + return start >= end; /* fixed length */ + } +} + + static int fromstr_next_element(char **s, void *dptr, PyArray_Descr *dtype, const char *end) @@ -50,19 +72,23 @@ fromstr_next_element(char **s, void *dptr, PyArray_Descr *dtype, char *e = *s; int r = dtype->f->fromstr(*s, dptr, &e, dtype); /* - * fromstr always returns 0 for basic dtypes - * s points to the end of the parsed string - * if an error occurs s is not changed + * fromstr always returns 0 for basic dtypes; s points to the end of the + * parsed string. If s is not changed an error occurred or the end was + * reached. */ - if (*s == e) { - /* Nothing read */ - return -1; + if (*s == e || r < 0) { + /* Nothing read, could be end of string or an error (or both) */ + if (string_is_fully_read(*s, end)) { + return -1; + } + return -2; } *s = e; if (end != NULL && *s > end) { + /* Stop the iteration if we read far enough */ return -1; } - return r; + return 0; } static int @@ -75,9 +101,13 @@ fromfile_next_element(FILE **fp, void *dptr, PyArray_Descr *dtype, if (r == 1) { return 0; } - else { + else if (r == EOF) { return -1; } + else { + /* unable to read more, but EOF not reached indicating an error. */ + return -2; + } } /* @@ -143,9 +173,10 @@ fromstr_skip_separator(char **s, const char *sep, const char *end) { char *string = *s; int result = 0; + while (1) { char c = *string; - if (c == '\0' || (end != NULL && string >= end)) { + if (string_is_fully_read(string, end)) { result = -1; break; } @@ -513,8 +544,8 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s, */ if (slen != PyArray_DIMS(a)[dim] && slen != 1) { PyErr_Format(PyExc_ValueError, - "cannot copy sequence with size %d to array axis " - "with dimension %d", (int)slen, (int)PyArray_DIMS(a)[dim]); + "cannot copy sequence with size %zd to array axis " + "with dimension %" NPY_INTP_FMT, slen, PyArray_DIMS(a)[dim]); goto fail; } @@ -936,6 +967,39 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it, return 0; } +static PyObject * +raise_memory_error(int nd, npy_intp *dims, PyArray_Descr *descr) +{ + static PyObject *exc_type = NULL; + + npy_cache_import( + "numpy.core._exceptions", "_ArrayMemoryError", + &exc_type); + if (exc_type == NULL) { + goto fail; + } + + PyObject *shape = PyArray_IntTupleFromIntp(nd, dims); + if (shape == NULL) { + goto fail; + } + + /* produce an error object */ + PyObject *exc_value = PyTuple_Pack(2, shape, (PyObject *)descr); + Py_DECREF(shape); + if (exc_value == NULL){ + goto fail; + } + PyErr_SetObject(exc_type, exc_value); + Py_DECREF(exc_value); + return NULL; + +fail: + /* we couldn't raise the formatted exception for some reason */ + PyErr_WriteUnraisable(NULL); + return PyErr_NoMemory(); +} + /* * Generic new array creation routine. * Internal variant with calloc argument for PyArray_Zeros. @@ -1113,30 +1177,7 @@ PyArray_NewFromDescr_int( data = npy_alloc_cache(nbytes); } if (data == NULL) { - static PyObject *exc_type = NULL; - - npy_cache_import( - "numpy.core._exceptions", "_ArrayMemoryError", - &exc_type); - if (exc_type == NULL) { - return NULL; - } - - PyObject *shape = PyArray_IntTupleFromIntp(fa->nd,fa->dimensions); - if (shape == NULL) { - return NULL; - } - - /* produce an error object */ - PyObject *exc_value = PyTuple_Pack(2, shape, descr); - Py_DECREF(shape); - if (exc_value == NULL){ - return NULL; - } - PyErr_SetObject(exc_type, exc_value); - Py_DECREF(exc_value); - return NULL; - + return raise_memory_error(fa->nd, fa->dimensions, descr); } fa->flags |= NPY_ARRAY_OWNDATA; @@ -1426,28 +1467,6 @@ _dtype_from_buffer_3118(PyObject *memoryview) } -/* - * Call the python _is_from_ctypes - */ -NPY_NO_EXPORT int -_is_from_ctypes(PyObject *obj) { - PyObject *ret_obj; - static PyObject *py_func = NULL; - - npy_cache_import("numpy.core._internal", "_is_from_ctypes", &py_func); - - if (py_func == NULL) { - return -1; - } - ret_obj = PyObject_CallFunctionObjArgs(py_func, obj, NULL); - if (ret_obj == NULL) { - return -1; - } - - return PyObject_IsTrue(ret_obj); -} - - NPY_NO_EXPORT PyObject * _array_from_buffer_3118(PyObject *memoryview) { @@ -2770,9 +2789,9 @@ PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) Deprecated, use PyArray_NewFromDescr instead. */ NPY_NO_EXPORT PyObject * -PyArray_FromDimsAndDataAndDescr(int nd, int *d, +PyArray_FromDimsAndDataAndDescr(int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *descr, - char *data) + char *NPY_UNUSED(data)) { PyErr_SetString(PyExc_NotImplementedError, "PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr."); @@ -2784,7 +2803,7 @@ PyArray_FromDimsAndDataAndDescr(int nd, int *d, Deprecated, use PyArray_SimpleNew instead. */ NPY_NO_EXPORT PyObject * -PyArray_FromDims(int nd, int *d, int type) +PyArray_FromDims(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type)) { PyErr_SetString(PyExc_NotImplementedError, "PyArray_FromDims: use PyArray_SimpleNew."); @@ -2875,8 +2894,8 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) src_size = PyArray_SIZE(src); if (dst_size != src_size) { PyErr_Format(PyExc_ValueError, - "cannot copy from array of size %d into an array " - "of size %d", (int)src_size, (int)dst_size); + "cannot copy from array of size %" NPY_INTP_FMT " into an array " + "of size %" NPY_INTP_FMT, src_size, dst_size); return -1; } @@ -3555,11 +3574,13 @@ PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject *step, PyArray_Descr return NULL; } +/* This array creation function steals the reference to dtype. */ static PyArrayObject * array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, npy_intp num, size_t *nread) { PyArrayObject *r; npy_off_t start, numbytes; + int elsize; if (num < 0) { int fail = 0; @@ -3586,27 +3607,29 @@ array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, npy_intp num, size_t *nrea } num = numbytes / dtype->elsize; } + /* - * When dtype->subarray is true, PyArray_NewFromDescr will decref dtype - * even on success, so make sure it stays around until exit. + * Array creation may move sub-array dimensions from the dtype to array + * dimensions, so we need to use the original element size when reading. */ - Py_INCREF(dtype); + elsize = dtype->elsize; + r = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &num, NULL, NULL, 0, NULL); if (r == NULL) { - Py_DECREF(dtype); return NULL; } + NPY_BEGIN_ALLOW_THREADS; - *nread = fread(PyArray_DATA(r), dtype->elsize, num, fp); + *nread = fread(PyArray_DATA(r), elsize, num, fp); NPY_END_ALLOW_THREADS; - Py_DECREF(dtype); return r; } /* * Create an array by reading from the given stream, using the passed * next_element and skip_separator functions. + * As typical for array creation functions, it steals the reference to dtype. */ #define FROM_BUFFER_SIZE 4096 static PyArrayObject * @@ -3618,6 +3641,7 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread, npy_intp i; char *dptr, *clean_sep, *tmp; int err = 0; + int stop_reading_flag; /* -1 indicates end reached; -2 a parsing error */ npy_intp thisbuf = 0; npy_intp size; npy_intp bytes, totalbytes; @@ -3625,10 +3649,11 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread, size = (num >= 0) ? num : FROM_BUFFER_SIZE; /* - * When dtype->subarray is true, PyArray_NewFromDescr will decref dtype - * even on success, so make sure it stays around until exit. + * Array creation may move sub-array dimensions from the dtype to array + * dimensions, so we need to use the original dtype when reading. */ Py_INCREF(dtype); + r = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &size, NULL, NULL, 0, NULL); @@ -3636,6 +3661,7 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread, Py_DECREF(dtype); return NULL; } + clean_sep = swab_separator(sep); if (clean_sep == NULL) { err = 1; @@ -3645,9 +3671,9 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread, NPY_BEGIN_ALLOW_THREADS; totalbytes = bytes = size * dtype->elsize; dptr = PyArray_DATA(r); - for (i= 0; num < 0 || i < num; i++) { - if (next(&stream, dptr, dtype, stream_data) < 0) { - /* EOF */ + for (i = 0; num < 0 || i < num; i++) { + stop_reading_flag = next(&stream, dptr, dtype, stream_data); + if (stop_reading_flag < 0) { break; } *nread += 1; @@ -3664,7 +3690,12 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread, dptr = tmp + (totalbytes - bytes); thisbuf = 0; } - if (skip_sep(&stream, clean_sep, stream_data) < 0) { + stop_reading_flag = skip_sep(&stream, clean_sep, stream_data); + if (stop_reading_flag < 0) { + if (num == i + 1) { + /* if we read as much as requested sep is optional */ + stop_reading_flag = -1; + } break; } } @@ -3683,8 +3714,24 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread, } } NPY_END_ALLOW_THREADS; + free(clean_sep); + if (stop_reading_flag == -2) { + if (PyErr_Occurred()) { + /* If an error is already set (unlikely), do not create new one */ + Py_DECREF(r); + Py_DECREF(dtype); + return NULL; + } + /* 2019-09-12, NumPy 1.18 */ + if (DEPRECATE( + "string or file could not be read to its end due to unmatched " + "data; this will raise a ValueError in the future.") < 0) { + goto fail; + } + } + fail: Py_DECREF(dtype); if (err == 1) { @@ -3703,9 +3750,8 @@ fail: * Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an * array corresponding to the data encoded in that file. * - * If the dtype is NULL, the default array type is used (double). - * If non-null, the reference is stolen and if dtype->subarray is true dtype - * will be decrefed even on success. + * The reference to `dtype` is stolen (it is possible that the passed in + * dtype is not held on to). * * The number of elements to read is given as ``num``; if it is < 0, then * then as many as possible are read. @@ -3753,7 +3799,6 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char *sep) (skip_separator) fromfile_skip_separator, NULL); } if (ret == NULL) { - Py_DECREF(dtype); return NULL; } if (((npy_intp) nread) < num) { @@ -3843,7 +3888,13 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, s = (npy_intp)ts - offset; n = (npy_intp)count; itemsize = type->elsize; - if (n < 0 ) { + if (n < 0) { + if (itemsize == 0) { + PyErr_SetString(PyExc_ValueError, + "cannot determine count if itemsize is 0"); + Py_DECREF(type); + return NULL; + } if (s % itemsize != 0) { PyErr_SetString(PyExc_ValueError, "buffer size must be a multiple"\ @@ -3948,6 +3999,11 @@ PyArray_FromString(char *data, npy_intp slen, PyArray_Descr *dtype, return NULL; } } + /* + * NewFromDescr may replace dtype to absorb subarray shape + * into the array, so get size beforehand. + */ + npy_intp size_to_copy = num*dtype->elsize; ret = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &num, NULL, NULL, @@ -3955,7 +4011,7 @@ PyArray_FromString(char *data, npy_intp slen, PyArray_Descr *dtype, if (ret == NULL) { return NULL; } - memcpy(PyArray_DATA(ret), data, num*dtype->elsize); + memcpy(PyArray_DATA(ret), data, size_to_copy); } else { /* read from character-based string */ @@ -4036,7 +4092,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) } for (i = 0; (i < count || count == -1) && (value = PyIter_Next(iter)); i++) { - if (i >= elcount) { + if (i >= elcount && elsize != 0) { npy_intp nbytes; /* Grow PyArray_DATA(ret): diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index 60e6bbae2..72a3df89c 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -27,6 +27,40 @@ #include "datetime_strings.h" /* + * Computes the python `ret, d = divmod(d, unit)`. + * + * Note that GCC is smart enough at -O2 to eliminate the `if(*d < 0)` branch + * for subsequent calls to this command - it is able to deduce that `*d >= 0`. + */ +static inline +npy_int64 extract_unit_64(npy_int64 *d, npy_int64 unit) { + assert(unit > 0); + npy_int64 div = *d / unit; + npy_int64 mod = *d % unit; + if (mod < 0) { + mod += unit; + div -= 1; + } + assert(mod >= 0); + *d = mod; + return div; +} + +static inline +npy_int32 extract_unit_32(npy_int32 *d, npy_int32 unit) { + assert(unit > 0); + npy_int32 div = *d / unit; + npy_int32 mod = *d % unit; + if (mod < 0) { + mod += unit; + div -= 1; + } + assert(mod >= 0); + *d = mod; + return div; +} + +/* * Imports the PyDateTime functions so we can create these objects. * This is called during module initialization */ @@ -160,17 +194,7 @@ days_to_yearsdays(npy_int64 *days_) npy_int64 year; /* Break down the 400 year cycle to get the year and day within the year */ - if (days >= 0) { - year = 400 * (days / days_per_400years); - days = days % days_per_400years; - } - else { - year = 400 * ((days - (days_per_400years - 1)) / days_per_400years); - days = days % days_per_400years; - if (days < 0) { - days += days_per_400years; - } - } + year = 400 * extract_unit_64(&days, days_per_400years); /* Work out the year/day within the 400 year cycle */ if (days >= 366) { @@ -386,7 +410,8 @@ convert_datetimestruct_to_datetime(PyArray_DatetimeMetaData *meta, * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT npy_datetime -PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d) +PyArray_DatetimeStructToDatetime( + NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d)) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_DatetimeStructToDatetime function has " @@ -400,7 +425,8 @@ PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d) * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT npy_datetime -PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT fr, npy_timedeltastruct *d) +PyArray_TimedeltaStructToTimedelta( + NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d)) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_TimedeltaStructToTimedelta function has " @@ -409,26 +435,6 @@ PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT fr, npy_timedeltastruct *d) } /* - * Computes the python `ret, d = divmod(d, unit)`. - * - * Note that GCC is smart enough at -O2 to eliminate the `if(*d < 0)` branch - * for subsequent calls to this command - it is able to deduce that `*d >= 0`. - */ -static inline -npy_int64 extract_unit(npy_datetime *d, npy_datetime unit) { - assert(unit > 0); - npy_int64 div = *d / unit; - npy_int64 mod = *d % unit; - if (mod < 0) { - mod += unit; - div -= 1; - } - assert(mod >= 0); - *d = mod; - return div; -} - -/* * Converts a datetime based on the given metadata into a datetimestruct */ NPY_NO_EXPORT int @@ -436,7 +442,7 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta, npy_datetime dt, npy_datetimestruct *out) { - npy_int64 perday; + npy_int64 days; /* Initialize the output to all zeros */ memset(out, 0, sizeof(npy_datetimestruct)); @@ -471,7 +477,7 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta, break; case NPY_FR_M: - out->year = 1970 + extract_unit(&dt, 12); + out->year = 1970 + extract_unit_64(&dt, 12); out->month = dt + 1; break; @@ -485,73 +491,67 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta, break; case NPY_FR_h: - perday = 24LL; - - set_datetimestruct_days(extract_unit(&dt, perday), out); + days = extract_unit_64(&dt, 24LL); + set_datetimestruct_days(days, out); out->hour = (int)dt; break; case NPY_FR_m: - perday = 24LL * 60; - - set_datetimestruct_days(extract_unit(&dt, perday), out); - out->hour = (int)extract_unit(&dt, 60); - out->min = (int)dt; + days = extract_unit_64(&dt, 60LL*24); + set_datetimestruct_days(days, out); + out->hour = (int)extract_unit_64(&dt, 60LL); + out->min = (int)dt; break; case NPY_FR_s: - perday = 24LL * 60 * 60; - - set_datetimestruct_days(extract_unit(&dt, perday), out); - out->hour = (int)extract_unit(&dt, 60*60); - out->min = (int)extract_unit(&dt, 60); + days = extract_unit_64(&dt, 60LL*60*24); + set_datetimestruct_days(days, out); + out->hour = (int)extract_unit_64(&dt, 60LL*60); + out->min = (int)extract_unit_64(&dt, 60LL); out->sec = (int)dt; break; case NPY_FR_ms: - perday = 24LL * 60 * 60 * 1000; - - set_datetimestruct_days(extract_unit(&dt, perday), out); - out->hour = (int)extract_unit(&dt, 1000LL*60*60); - out->min = (int)extract_unit(&dt, 1000LL*60); - out->sec = (int)extract_unit(&dt, 1000LL); + days = extract_unit_64(&dt, 1000LL*60*60*24); + set_datetimestruct_days(days, out); + out->hour = (int)extract_unit_64(&dt, 1000LL*60*60); + out->min = (int)extract_unit_64(&dt, 1000LL*60); + out->sec = (int)extract_unit_64(&dt, 1000LL); out->us = (int)(dt * 1000); break; case NPY_FR_us: - perday = 24LL * 60LL * 60LL * 1000LL * 1000LL; - set_datetimestruct_days(extract_unit(&dt, perday), out); - out->hour = (int)extract_unit(&dt, 1000LL*1000*60*60); - out->min = (int)extract_unit(&dt, 1000LL*1000*60); - out->sec = (int)extract_unit(&dt, 1000LL*1000); + days = extract_unit_64(&dt, 1000LL*1000*60*60*24); + set_datetimestruct_days(days, out); + out->hour = (int)extract_unit_64(&dt, 1000LL*1000*60*60); + out->min = (int)extract_unit_64(&dt, 1000LL*1000*60); + out->sec = (int)extract_unit_64(&dt, 1000LL*1000); out->us = (int)dt; break; case NPY_FR_ns: - perday = 24LL * 60LL * 60LL * 1000LL * 1000LL * 1000LL; - - set_datetimestruct_days(extract_unit(&dt, perday), out); - out->hour = (int)extract_unit(&dt, 1000LL*1000*1000*60*60); - out->min = (int)extract_unit(&dt, 1000LL*1000*1000*60); - out->sec = (int)extract_unit(&dt, 1000LL*1000*1000); - out->us = (int)extract_unit(&dt, 1000LL); + days = extract_unit_64(&dt, 1000LL*1000*1000*60*60*24); + set_datetimestruct_days(days, out); + out->hour = (int)extract_unit_64(&dt, 1000LL*1000*1000*60*60); + out->min = (int)extract_unit_64(&dt, 1000LL*1000*1000*60); + out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000); + out->us = (int)extract_unit_64(&dt, 1000LL); out->ps = (int)(dt * 1000); break; case NPY_FR_ps: - perday = 24LL * 60 * 60 * 1000 * 1000 * 1000 * 1000; - - set_datetimestruct_days(extract_unit(&dt, perday), out); - out->hour = (int)extract_unit(&dt, 1000LL*1000*1000*1000*60*60); - out->min = (int)extract_unit(&dt, 1000LL*1000*1000*1000*60); - out->sec = (int)extract_unit(&dt, 1000LL*1000*1000*1000); - out->us = (int)extract_unit(&dt, 1000LL*1000); + days = extract_unit_64(&dt, 1000LL*1000*1000*1000*60*60*24); + set_datetimestruct_days(days, out); + out->hour = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*60*60); + out->min = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*60); + out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000); + out->us = (int)extract_unit_64(&dt, 1000LL*1000); out->ps = (int)(dt); break; case NPY_FR_fs: /* entire range is only +- 2.6 hours */ - out->hour = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000*60*60); + out->hour = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000*60*60); if (out->hour < 0) { out->year = 1969; out->month = 12; @@ -559,16 +559,16 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta, out->hour += 24; assert(out->hour >= 0); } - out->min = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000*60); - out->sec = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000); - out->us = (int)extract_unit(&dt, 1000LL*1000*1000); - out->ps = (int)extract_unit(&dt, 1000LL); + out->min = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000*60); + out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000); + out->us = (int)extract_unit_64(&dt, 1000LL*1000*1000); + out->ps = (int)extract_unit_64(&dt, 1000LL); out->as = (int)(dt * 1000); break; case NPY_FR_as: /* entire range is only +- 9.2 seconds */ - out->sec = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000*1000); + out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000*1000); if (out->sec < 0) { out->year = 1969; out->month = 12; @@ -578,8 +578,8 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta, out->sec += 60; assert(out->sec >= 0); } - out->us = (int)extract_unit(&dt, 1000LL*1000*1000*1000); - out->ps = (int)extract_unit(&dt, 1000LL*1000); + out->us = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000); + out->ps = (int)extract_unit_64(&dt, 1000LL*1000); out->as = (int)dt; break; @@ -600,8 +600,9 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta, * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT void -PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr, - npy_datetimestruct *result) +PyArray_DatetimeToDatetimeStruct( + npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), + npy_datetimestruct *result) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_DatetimeToDatetimeStruct function has " @@ -621,8 +622,9 @@ PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr, * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT void -PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT fr, - npy_timedeltastruct *result) +PyArray_TimedeltaToTimedeltaStruct( + npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), + npy_timedeltastruct *result) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_TimedeltaToTimedeltaStruct function has " @@ -756,8 +758,8 @@ parse_datetime_extended_unit_from_string(char *str, Py_ssize_t len, bad_input: if (metastr != NULL) { PyErr_Format(PyExc_TypeError, - "Invalid datetime metadata string \"%s\" at position %d", - metastr, (int)(substr-metastr)); + "Invalid datetime metadata string \"%s\" at position %zd", + metastr, substr-metastr); } else { PyErr_Format(PyExc_TypeError, @@ -818,8 +820,8 @@ parse_datetime_metadata_from_metastr(char *metastr, Py_ssize_t len, bad_input: if (substr != metastr) { PyErr_Format(PyExc_TypeError, - "Invalid datetime metadata string \"%s\" at position %d", - metastr, (int)(substr-metastr)); + "Invalid datetime metadata string \"%s\" at position %zd", + metastr, substr - metastr); } else { PyErr_Format(PyExc_TypeError, @@ -2013,20 +2015,8 @@ add_seconds_to_datetimestruct(npy_datetimestruct *dts, int seconds) int minutes; dts->sec += seconds; - if (dts->sec < 0) { - minutes = dts->sec / 60; - dts->sec = dts->sec % 60; - if (dts->sec < 0) { - --minutes; - dts->sec += 60; - } - add_minutes_to_datetimestruct(dts, minutes); - } - else if (dts->sec >= 60) { - minutes = dts->sec / 60; - dts->sec = dts->sec % 60; - add_minutes_to_datetimestruct(dts, minutes); - } + minutes = extract_unit_32(&dts->sec, 60); + add_minutes_to_datetimestruct(dts, minutes); } /* @@ -2038,28 +2028,13 @@ add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes) { int isleap; - /* MINUTES */ dts->min += minutes; - while (dts->min < 0) { - dts->min += 60; - dts->hour--; - } - while (dts->min >= 60) { - dts->min -= 60; - dts->hour++; - } - /* HOURS */ - while (dts->hour < 0) { - dts->hour += 24; - dts->day--; - } - while (dts->hour >= 24) { - dts->hour -= 24; - dts->day++; - } + /* propagate invalid minutes into hour and day changes */ + dts->hour += extract_unit_32(&dts->min, 60); + dts->day += extract_unit_32(&dts->hour, 24); - /* DAYS */ + /* propagate invalid days into month and year changes */ if (dts->day < 1) { dts->month--; if (dts->month < 1) { @@ -2298,15 +2273,15 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out, invalid_date: PyErr_Format(PyExc_ValueError, - "Invalid date (%d,%d,%d) when converting to NumPy datetime", - (int)out->year, (int)out->month, (int)out->day); + "Invalid date (%" NPY_INT64_FMT ",%" NPY_INT32_FMT ",%" NPY_INT32_FMT ") when converting to NumPy datetime", + out->year, out->month, out->day); return -1; invalid_time: PyErr_Format(PyExc_ValueError, - "Invalid time (%d,%d,%d,%d) when converting " + "Invalid time (%" NPY_INT32_FMT ",%" NPY_INT32_FMT ",%" NPY_INT32_FMT ",%" NPY_INT32_FMT ") when converting " "to NumPy datetime", - (int)out->hour, (int)out->min, (int)out->sec, (int)out->us); + out->hour, out->min, out->sec, out->us); return -1; } @@ -2886,7 +2861,6 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta) NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) { - PyObject *ret = NULL; npy_timedelta value; int days = 0, seconds = 0, useconds = 0; @@ -2916,54 +2890,47 @@ convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) /* Convert to days/seconds/useconds */ switch (meta->base) { case NPY_FR_W: - value *= 7; + days = value * 7; break; case NPY_FR_D: + days = value; break; case NPY_FR_h: - seconds = (int)((value % 24) * (60*60)); - value = value / 24; + days = extract_unit_64(&value, 24ULL); + seconds = value*60*60; break; case NPY_FR_m: - seconds = (int)(value % (24*60)) * 60; - value = value / (24*60); + days = extract_unit_64(&value, 60ULL*24); + seconds = value*60; break; case NPY_FR_s: - seconds = (int)(value % (24*60*60)); - value = value / (24*60*60); + days = extract_unit_64(&value, 60ULL*60*24); + seconds = value; break; case NPY_FR_ms: - useconds = (int)(value % 1000) * 1000; - value = value / 1000; - seconds = (int)(value % (24*60*60)); - value = value / (24*60*60); + days = extract_unit_64(&value, 1000ULL*60*60*24); + seconds = extract_unit_64(&value, 1000ULL); + useconds = value*1000; break; case NPY_FR_us: - useconds = (int)(value % (1000*1000)); - value = value / (1000*1000); - seconds = (int)(value % (24*60*60)); - value = value / (24*60*60); + days = extract_unit_64(&value, 1000ULL*1000*60*60*24); + seconds = extract_unit_64(&value, 1000ULL*1000); + useconds = value; break; default: + // unreachable, handled by the `if` above + assert(NPY_FALSE); break; } /* - * 'value' represents days, and seconds/useconds are filled. - * * If it would overflow the datetime.timedelta days, return a raw int */ - if (value < -999999999 || value > 999999999) { + if (days < -999999999 || days > 999999999) { return PyLong_FromLongLong(td); } else { - days = (int)value; - ret = PyDelta_FromDSU(days, seconds, useconds); - if (ret == NULL) { - return NULL; - } + return PyDelta_FromDSU(days, seconds, useconds); } - - return ret; } /* @@ -3128,7 +3095,7 @@ is_any_numpy_datetime_or_timedelta(PyObject *obj) */ NPY_NO_EXPORT int convert_pyobjects_to_datetimes(int count, - PyObject **objs, int *type_nums, + PyObject **objs, const int *type_nums, NPY_CASTING casting, npy_int64 *out_values, PyArray_DatetimeMetaData *inout_meta) @@ -3254,18 +3221,6 @@ NPY_NO_EXPORT PyArrayObject * datetime_arange(PyObject *start, PyObject *stop, PyObject *step, PyArray_Descr *dtype) { - PyArray_DatetimeMetaData meta; - /* - * Both datetime and timedelta are stored as int64, so they can - * share value variables. - */ - npy_int64 values[3]; - PyObject *objs[3]; - int type_nums[3]; - - npy_intp i, length; - PyArrayObject *ret; - npy_int64 *ret_data; /* * First normalize the input parameters so there is no Py_None, @@ -3298,6 +3253,8 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step, /* Check if the units of the given dtype are generic, in which * case we use the code path that detects the units */ + int type_nums[3]; + PyArray_DatetimeMetaData meta; if (dtype != NULL) { PyArray_DatetimeMetaData *meta_tmp; @@ -3346,6 +3303,7 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step, } /* Set up to convert the objects to a common datetime unit metadata */ + PyObject *objs[3]; objs[0] = start; objs[1] = stop; objs[2] = step; @@ -3366,11 +3324,22 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step, type_nums[2] = NPY_TIMEDELTA; } - /* Convert all the arguments */ + /* Convert all the arguments + * + * Both datetime and timedelta are stored as int64, so they can + * share value variables. + */ + npy_int64 values[3]; if (convert_pyobjects_to_datetimes(3, objs, type_nums, NPY_SAME_KIND_CASTING, values, &meta) < 0) { return NULL; } + /* If no start was provided, default to 0 */ + if (start == NULL) { + /* enforced above */ + assert(type_nums[0] == NPY_TIMEDELTA); + values[0] = 0; + } /* If no step was provided, default to 1 */ if (step == NULL) { @@ -3395,6 +3364,7 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step, } /* Calculate the array length */ + npy_intp length; if (values[2] > 0 && values[1] > values[0]) { length = (values[1] - values[0] + (values[2] - 1)) / values[2]; } @@ -3422,19 +3392,20 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step, } /* Create the result array */ - ret = (PyArrayObject *)PyArray_NewFromDescr( - &PyArray_Type, dtype, 1, &length, NULL, - NULL, 0, NULL); + PyArrayObject *ret = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, dtype, 1, &length, NULL, + NULL, 0, NULL); + if (ret == NULL) { return NULL; } if (length > 0) { /* Extract the data pointer */ - ret_data = (npy_int64 *)PyArray_DATA(ret); + npy_int64 *ret_data = (npy_int64 *)PyArray_DATA(ret); /* Create the timedeltas or datetimes */ - for (i = 0; i < length; ++i) { + for (npy_intp i = 0; i < length; ++i) { *ret_data = values[0]; values[0] += values[2]; ret_data++; diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c index c04a6c125..cdeb65d0e 100644 --- a/numpy/core/src/multiarray/datetime_busday.c +++ b/numpy/core/src/multiarray/datetime_busday.c @@ -48,7 +48,7 @@ get_day_of_week(npy_datetime date) */ static int is_holiday(npy_datetime date, - npy_datetime *holidays_begin, npy_datetime *holidays_end) + npy_datetime *holidays_begin, const npy_datetime *holidays_end) { npy_datetime *trial; @@ -88,7 +88,7 @@ is_holiday(npy_datetime date, */ static npy_datetime * find_earliest_holiday_on_or_after(npy_datetime date, - npy_datetime *holidays_begin, npy_datetime *holidays_end) + npy_datetime *holidays_begin, const npy_datetime *holidays_end) { npy_datetime *trial; @@ -127,7 +127,7 @@ find_earliest_holiday_on_or_after(npy_datetime date, */ static npy_datetime * find_earliest_holiday_after(npy_datetime date, - npy_datetime *holidays_begin, npy_datetime *holidays_end) + npy_datetime *holidays_begin, const npy_datetime *holidays_end) { npy_datetime *trial; @@ -159,7 +159,7 @@ static int apply_business_day_roll(npy_datetime date, npy_datetime *out, int *out_day_of_week, NPY_BUSDAY_ROLL roll, - npy_bool *weekmask, + const npy_bool *weekmask, npy_datetime *holidays_begin, npy_datetime *holidays_end) { int day_of_week; @@ -361,7 +361,7 @@ apply_business_day_offset(npy_datetime date, npy_int64 offset, static int apply_business_day_count(npy_datetime date_begin, npy_datetime date_end, npy_int64 *out, - npy_bool *weekmask, int busdays_in_weekmask, + const npy_bool *weekmask, int busdays_in_weekmask, npy_datetime *holidays_begin, npy_datetime *holidays_end) { npy_int64 count, whole_weeks; @@ -722,7 +722,7 @@ finish: */ NPY_NO_EXPORT PyArrayObject * is_business_day(PyArrayObject *dates, PyArrayObject *out, - npy_bool *weekmask, int busdays_in_weekmask, + const npy_bool *weekmask, int busdays_in_weekmask, npy_datetime *holidays_begin, npy_datetime *holidays_end) { PyArray_DatetimeMetaData temp_meta; diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c index 95b7bb3dc..dfc01494f 100644 --- a/numpy/core/src/multiarray/datetime_strings.c +++ b/numpy/core/src/multiarray/datetime_strings.c @@ -743,8 +743,8 @@ finish: parse_error: PyErr_Format(PyExc_ValueError, - "Error parsing datetime string \"%s\" at position %d", - str, (int)(substr-str)); + "Error parsing datetime string \"%s\" at position %zd", + str, substr - str); return -1; error: diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index c7db092e6..522b69307 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -149,7 +149,7 @@ array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args) arg == '|' || arg == '=') static int -_check_for_commastring(char *type, Py_ssize_t len) +_check_for_commastring(const char *type, Py_ssize_t len) { Py_ssize_t i; int sqbracket; @@ -1149,8 +1149,8 @@ _convert_from_dict(PyObject *obj, int align) } Py_DECREF(off); if (offset < 0) { - PyErr_Format(PyExc_ValueError, "offset %d cannot be negative", - (int)offset); + PyErr_Format(PyExc_ValueError, "offset %ld cannot be negative", + offset); Py_DECREF(tup); Py_DECREF(ind); goto fail; @@ -1164,10 +1164,10 @@ _convert_from_dict(PyObject *obj, int align) /* If align=True, enforce field alignment */ if (align && offset % newdescr->alignment != 0) { PyErr_Format(PyExc_ValueError, - "offset %d for NumPy dtype with fields is " + "offset %ld for NumPy dtype with fields is " "not divisible by the field alignment %d " "with align=True", - (int)offset, (int)newdescr->alignment); + offset, newdescr->alignment); ret = NPY_FAIL; } else if (offset + newdescr->elsize > totalsize) { @@ -1286,7 +1286,7 @@ _convert_from_dict(PyObject *obj, int align) PyErr_Format(PyExc_ValueError, "NumPy dtype descriptor requires %d bytes, " "cannot override to smaller itemsize of %d", - (int)new->elsize, (int)itemsize); + new->elsize, itemsize); Py_DECREF(new); goto fail; } @@ -1295,7 +1295,7 @@ _convert_from_dict(PyObject *obj, int align) PyErr_Format(PyExc_ValueError, "NumPy dtype descriptor requires alignment of %d bytes, " "which is not divisible into the specified itemsize %d", - (int)new->alignment, (int)itemsize); + new->alignment, itemsize); Py_DECREF(new); goto fail; } @@ -1385,7 +1385,6 @@ NPY_NO_EXPORT int PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) { int check_num = NPY_NOTYPE + 10; - PyObject *item; int elsize = 0; char endian = '='; @@ -1664,16 +1663,22 @@ finish: PyErr_Clear(); /* Now check to see if the object is registered in typeDict */ if (typeDict != NULL) { - item = PyDict_GetItem(typeDict, obj); + PyObject *item = NULL; #if defined(NPY_PY3K) - if (!item && PyBytes_Check(obj)) { + if (PyBytes_Check(obj)) { PyObject *tmp; tmp = PyUnicode_FromEncodedObject(obj, "ascii", "strict"); - if (tmp != NULL) { - item = PyDict_GetItem(typeDict, tmp); - Py_DECREF(tmp); + if (tmp == NULL) { + goto fail; } + item = PyDict_GetItem(typeDict, tmp); + Py_DECREF(tmp); + } + else { + item = PyDict_GetItem(typeDict, obj); } +#else + item = PyDict_GetItem(typeDict, obj); #endif if (item) { /* Check for a deprecated Numeric-style typecode */ @@ -3277,7 +3282,7 @@ arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op) } static int -descr_nonzero(PyObject *self) +descr_nonzero(PyObject *NPY_UNUSED(self)) { /* `bool(np.dtype(...)) == True` for all dtypes. Needed to override default * nonzero implementation, which checks if `len(object) > 0`. */ diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index a90416a40..ef0dd4a01 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -3337,7 +3337,7 @@ get_decsrcref_transfer_function(int aligned, /* If there are subarrays, need to wrap it */ else if (PyDataType_HASSUBARRAY(src_dtype)) { PyArray_Dims src_shape = {NULL, -1}; - npy_intp src_size = 1; + npy_intp src_size; PyArray_StridedUnaryOp *stransfer; NpyAuxData *data; diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src index e7bbc3d0b..58af44091 100644 --- a/numpy/core/src/multiarray/einsum.c.src +++ b/numpy/core/src/multiarray/einsum.c.src @@ -1876,7 +1876,7 @@ parse_operand_subscripts(char *subscripts, int length, * later where it matters the char is cast to a signed char. */ for (idim = 0; idim < ndim - 1; ++idim) { - int label = op_labels[idim]; + int label = (signed char)op_labels[idim]; /* If it is a proper label, find any duplicates of it. */ if (label > 0) { /* Search for the next matching label. */ diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c index bed92403f..116e37ce5 100644 --- a/numpy/core/src/multiarray/getset.c +++ b/numpy/core/src/multiarray/getset.c @@ -190,7 +190,7 @@ array_strides_set(PyArrayObject *self, PyObject *obj) static PyObject * -array_priority_get(PyArrayObject *self) +array_priority_get(PyArrayObject *NPY_UNUSED(self)) { return PyFloat_FromDouble(NPY_PRIORITY); } diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 19de04a93..a6ac902d3 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -1336,7 +1336,11 @@ PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int axis, PyArray_ArgSortFunc *argsort; PyObject *ret; - if (which < 0 || which >= NPY_NSELECTS) { + /* + * As a C-exported function, enum NPY_SELECTKIND loses its enum property + * Check the values to make sure they are in range + */ + if ((int)which < 0 || (int)which >= NPY_NSELECTS) { PyErr_SetString(PyExc_ValueError, "not a valid partition kind"); return NULL; @@ -1519,7 +1523,7 @@ PyArray_LexSort(PyObject *sort_keys, int axis) int *swaps; assert(N > 0); /* Guaranteed and assumed by indbuffer */ - int valbufsize = N * maxelsize; + npy_intp valbufsize = N * maxelsize; if (NPY_UNLIKELY(valbufsize) == 0) { valbufsize = 1; /* Ensure allocation is not empty */ } @@ -2475,7 +2479,7 @@ finish: * array of values, which must be of length PyArray_NDIM(self). */ NPY_NO_EXPORT PyObject * -PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index) +PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index) { int idim, ndim = PyArray_NDIM(self); char *data = PyArray_DATA(self); @@ -2503,7 +2507,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index) * Returns 0 on success, -1 on failure. */ NPY_NO_EXPORT int -PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index, +PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index, PyObject *obj) { int idim, ndim = PyArray_NDIM(self); diff --git a/numpy/core/src/multiarray/item_selection.h b/numpy/core/src/multiarray/item_selection.h index 90bb5100d..2276b4db7 100644 --- a/numpy/core/src/multiarray/item_selection.h +++ b/numpy/core/src/multiarray/item_selection.h @@ -15,7 +15,7 @@ count_boolean_trues(int ndim, char *data, npy_intp *ashape, npy_intp *astrides); * array of values, which must be of length PyArray_NDIM(self). */ NPY_NO_EXPORT PyObject * -PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index); +PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index); /* * Sets a single item in the array, based on a single multi-index @@ -24,7 +24,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index); * Returns 0 on success, -1 on failure. */ NPY_NO_EXPORT int -PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index, +PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index, PyObject *obj); #endif diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 0d7679fe7..e66bb36aa 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -98,7 +98,7 @@ parse_index_entry(PyObject *op, npy_intp *step_size, /* get the dataptr from its current coordinates for simple iterator */ static char* -get_ptr_simple(PyArrayIterObject* iter, npy_intp *coordinates) +get_ptr_simple(PyArrayIterObject* iter, const npy_intp *coordinates) { npy_intp i; char *ret; @@ -840,7 +840,6 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) if (check_and_adjust_index(&start, self->size, -1, NULL) < 0) { goto finish; } - retval = 0; PyArray_ITER_GOTO1D(self, start); retval = type->f->setitem(val, self->dataptr, self->ao); PyArray_ITER_RESET(self); @@ -1666,7 +1665,7 @@ static char* _set_constant(PyArrayNeighborhoodIterObject* iter, /* set the dataptr from its current coordinates */ static char* -get_ptr_constant(PyArrayIterObject* _iter, npy_intp *coordinates) +get_ptr_constant(PyArrayIterObject* _iter, const npy_intp *coordinates) { int i; npy_intp bd, _coordinates[NPY_MAXDIMS]; @@ -1721,7 +1720,7 @@ __npy_pos_remainder(npy_intp i, npy_intp n) /* set the dataptr from its current coordinates */ static char* -get_ptr_mirror(PyArrayIterObject* _iter, npy_intp *coordinates) +get_ptr_mirror(PyArrayIterObject* _iter, const npy_intp *coordinates) { int i; npy_intp bd, _coordinates[NPY_MAXDIMS], lb; @@ -1755,7 +1754,7 @@ __npy_euclidean_division(npy_intp i, npy_intp n) _coordinates[c] = lb + __npy_euclidean_division(bd, p->limits_sizes[c]); static char* -get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates) +get_ptr_circular(PyArrayIterObject* _iter, const npy_intp *coordinates) { int i; npy_intp bd, _coordinates[NPY_MAXDIMS], lb; @@ -1777,7 +1776,7 @@ get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates) * A Neighborhood Iterator object. */ NPY_NO_EXPORT PyObject* -PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp *bounds, +PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp *bounds, int mode, PyArrayObject* fill) { int i; diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 9bb85e320..8dcd28c84 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -176,7 +176,7 @@ unpack_tuple(PyTupleObject *index, PyObject **result, npy_intp result_n) /* Unpack a single scalar index, taking a new reference to match unpack_tuple */ static NPY_INLINE npy_intp -unpack_scalar(PyObject *index, PyObject **result, npy_intp result_n) +unpack_scalar(PyObject *index, PyObject **result, npy_intp NPY_UNUSED(result_n)) { Py_INCREF(index); result[0] = index; @@ -1198,9 +1198,9 @@ array_assign_boolean_subscript(PyArrayObject *self, if (size != PyArray_DIMS(v)[0]) { PyErr_Format(PyExc_ValueError, "NumPy boolean array indexing assignment " - "cannot assign %d input values to " - "the %d output values where the mask is true", - (int)PyArray_DIMS(v)[0], (int)size); + "cannot assign %" NPY_INTP_FMT " input values to " + "the %" NPY_INTP_FMT " output values where the mask is true", + PyArray_DIMS(v)[0], size); return -1; } v_stride = PyArray_STRIDES(v)[0]; diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 79c60aa2e..e5845f2f6 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -1051,7 +1051,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) NPY_NO_EXPORT PyObject * -array_ufunc(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_ufunc(PyArrayObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) { PyObject *ufunc, *method_name, *normal_args, *ufunc_method; PyObject *result = NULL; @@ -1100,7 +1100,7 @@ cleanup: } static PyObject * -array_function(PyArrayObject *self, PyObject *c_args, PyObject *c_kwds) +array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kwds) { PyObject *func, *types, *args, *kwargs, *result; static char *kwlist[] = {"func", "types", "args", "kwargs", NULL}; @@ -1179,7 +1179,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) return NULL; } - ret = PyArray_Resize(self, &newshape, refcheck, NPY_CORDER); + ret = PyArray_Resize(self, &newshape, refcheck, NPY_ANYORDER); npy_free_cache_dim_obj(newshape); if (ret == NULL) { return NULL; @@ -1732,7 +1732,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args)) } static PyObject * -array_reduce_ex_regular(PyArrayObject *self, int protocol) +array_reduce_ex_regular(PyArrayObject *self, int NPY_UNUSED(protocol)) { PyObject *subclass_array_reduce = NULL; PyObject *ret; @@ -1861,7 +1861,7 @@ array_reduce_ex(PyArrayObject *self, PyObject *args) PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || (PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) && ((PyObject*)self)->ob_type != &PyArray_Type) || - PyDataType_ISUNSIZED(descr)) { + descr->elsize == 0) { /* The PickleBuffer class from version 5 of the pickle protocol * can only be used for arrays backed by a contiguous data buffer. * For all other cases we fallback to the generic array_reduce diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index bef978c94..441567049 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -286,7 +286,8 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, * Convert to a 1D C-array */ NPY_NO_EXPORT int -PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) +PyArray_As1D(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), + int *NPY_UNUSED(d1), int NPY_UNUSED(typecode)) { /* 2008-07-14, 1.5 */ PyErr_SetString(PyExc_NotImplementedError, @@ -298,7 +299,8 @@ PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) * Convert to a 2D C-array */ NPY_NO_EXPORT int -PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode) +PyArray_As2D(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), + int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode)) { /* 2008-07-14, 1.5 */ PyErr_SetString(PyExc_NotImplementedError, @@ -1560,7 +1562,8 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) PyArrayObject *oparr = NULL, *ret = NULL; npy_bool subok = NPY_FALSE; npy_bool copy = NPY_TRUE; - int ndmin = 0, nd; + int nd; + npy_intp ndmin = 0; PyArray_Descr *type = NULL; PyArray_Descr *oldtype = NULL; NPY_ORDER order = NPY_KEEPORDER; @@ -1631,12 +1634,10 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) } } - /* copy=False with default dtype, order and ndim */ - if (STRIDING_OK(oparr, order)) { - ret = oparr; - Py_INCREF(ret); - goto finish; - } + /* copy=False with default dtype, order (any is OK) and ndim */ + ret = oparr; + Py_INCREF(ret); + goto finish; } } @@ -3781,7 +3782,7 @@ _vec_string_no_args(PyArrayObject* char_array, } static PyObject * -_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) { PyArrayObject* char_array = NULL; PyArray_Descr *type; diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index 18ca127e1..e7fe0fa50 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -371,8 +371,8 @@ NpyIter_ResetToIterIndexRange(NpyIter *iter, } if (errmsg == NULL) { PyErr_Format(PyExc_ValueError, - "Out-of-bounds range [%d, %d) passed to " - "ResetToIterIndexRange", (int)istart, (int)iend); + "Out-of-bounds range [%" NPY_INTP_FMT ", %" NPY_INTP_FMT ") passed to " + "ResetToIterIndexRange", istart, iend); } else { *errmsg = "Out-of-bounds range passed to ResetToIterIndexRange"; @@ -382,8 +382,8 @@ NpyIter_ResetToIterIndexRange(NpyIter *iter, else if (iend < istart) { if (errmsg == NULL) { PyErr_Format(PyExc_ValueError, - "Invalid range [%d, %d) passed to ResetToIterIndexRange", - (int)istart, (int)iend); + "Invalid range [%" NPY_INTP_FMT ", %" NPY_INTP_FMT ") passed to ResetToIterIndexRange", + istart, iend); } else { *errmsg = "Invalid range passed to ResetToIterIndexRange"; @@ -1429,8 +1429,8 @@ NpyIter_DebugPrint(NpyIter *iter) printf("REUSE_REDUCE_LOOPS "); printf("\n"); - printf("| NDim: %d\n", (int)ndim); - printf("| NOp: %d\n", (int)nop); + printf("| NDim: %d\n", ndim); + printf("| NOp: %d\n", nop); if (NIT_MASKOP(iter) >= 0) { printf("| MaskOp: %d\n", (int)NIT_MASKOP(iter)); } @@ -1628,15 +1628,12 @@ npyiter_coalesce_axes(NpyIter *iter) npy_intp istrides, nstrides = NAD_NSTRIDES(); NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - NpyIter_AxisData *ad_compress; + NpyIter_AxisData *ad_compress = axisdata; npy_intp new_ndim = 1; /* The HASMULTIINDEX or IDENTPERM flags do not apply after coalescing */ NIT_ITFLAGS(iter) &= ~(NPY_ITFLAG_IDENTPERM|NPY_ITFLAG_HASMULTIINDEX); - axisdata = NIT_AXISDATA(iter); - ad_compress = axisdata; - for (idim = 0; idim < ndim-1; ++idim) { int can_coalesce = 1; npy_intp shape0 = NAD_SHAPE(ad_compress); diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c index 3b3635afe..5e770338d 100644 --- a/numpy/core/src/multiarray/nditer_constr.c +++ b/numpy/core/src/multiarray/nditer_constr.c @@ -24,7 +24,7 @@ static int npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags); static int npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes, - npy_intp *itershape); + const npy_intp *itershape); static int npyiter_calculate_ndim(int nop, PyArrayObject **op_in, int oa_ndim); @@ -55,7 +55,7 @@ npyiter_check_casting(int nop, PyArrayObject **op, static int npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags, char **op_dataptr, - npy_uint32 *op_flags, int **op_axes, + const npy_uint32 *op_flags, int **op_axes, npy_intp *itershape); static void npyiter_replace_axisdata(NpyIter *iter, int iop, @@ -74,23 +74,23 @@ static void npyiter_find_best_axis_ordering(NpyIter *iter); static PyArray_Descr * npyiter_get_common_dtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, + const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, PyArray_Descr **op_request_dtypes, int only_inputs); static PyArrayObject * npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype, npy_uint32 flags, npyiter_opitflags *op_itflags, int op_ndim, npy_intp *shape, - PyArray_Descr *op_dtype, int *op_axes); + PyArray_Descr *op_dtype, const int *op_axes); static int npyiter_allocate_arrays(NpyIter *iter, npy_uint32 flags, PyArray_Descr **op_dtype, PyTypeObject *subtype, - npy_uint32 *op_flags, npyiter_opitflags *op_itflags, + const npy_uint32 *op_flags, npyiter_opitflags *op_itflags, int **op_axes); static void npyiter_get_priority_subtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, + const npyiter_opitflags *op_itflags, double *subtype_priority, PyTypeObject **subtype); static int npyiter_allocate_transfer_functions(NpyIter *iter); @@ -154,7 +154,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags, if (nop > NPY_MAXARGS) { PyErr_Format(PyExc_ValueError, "Cannot construct an iterator with more than %d operands " - "(%d were requested)", (int)NPY_MAXARGS, (int)nop); + "(%d were requested)", NPY_MAXARGS, nop); return NULL; } @@ -787,7 +787,7 @@ npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags) static int npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes, - npy_intp *itershape) + const npy_intp *itershape) { char axes_dupcheck[NPY_MAXDIMS]; int iop, idim; @@ -810,7 +810,7 @@ npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes, PyErr_Format(PyExc_ValueError, "Cannot construct an iterator with more than %d dimensions " "(%d were requested for op_axes)", - (int)NPY_MAXDIMS, oa_ndim); + NPY_MAXDIMS, oa_ndim); return 0; } if (op_axes == NULL) { @@ -826,14 +826,14 @@ npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes, if (axes != NULL) { memset(axes_dupcheck, 0, NPY_MAXDIMS); for (idim = 0; idim < oa_ndim; ++idim) { - npy_intp i = axes[idim]; + int i = axes[idim]; if (i >= 0) { if (i >= NPY_MAXDIMS) { PyErr_Format(PyExc_ValueError, "The 'op_axes' provided to the iterator " "constructor for operand %d " "contained invalid " - "values %d", (int)iop, (int)i); + "values %d", iop, i); return 0; } else if (axes_dupcheck[i] == 1) { @@ -841,7 +841,7 @@ npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes, "The 'op_axes' provided to the iterator " "constructor for operand %d " "contained duplicate " - "value %d", (int)iop, (int)i); + "value %d", iop, i); return 0; } else { @@ -1311,7 +1311,7 @@ npyiter_check_casting(int nop, PyArrayObject **op, PyObject *errmsg; errmsg = PyUString_FromFormat( "Iterator operand %d dtype could not be cast from ", - (int)iop); + iop); PyUString_ConcatAndDel(&errmsg, PyObject_Repr((PyObject *)PyArray_DESCR(op[iop]))); PyUString_ConcatAndDel(&errmsg, @@ -1342,7 +1342,7 @@ npyiter_check_casting(int nop, PyArrayObject **op, PyUString_ConcatAndDel(&errmsg, PyUString_FromFormat(", the operand %d dtype, " "according to the rule %s", - (int)iop, + iop, npyiter_casting_to_string(casting))); PyErr_SetObject(PyExc_TypeError, errmsg); Py_DECREF(errmsg); @@ -1423,7 +1423,7 @@ check_mask_for_writemasked_reduction(NpyIter *iter, int iop) static int npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags, char **op_dataptr, - npy_uint32 *op_flags, int **op_axes, + const npy_uint32 *op_flags, int **op_axes, npy_intp *itershape) { npy_uint32 itflags = NIT_ITFLAGS(iter); @@ -1500,8 +1500,8 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf "Iterator input op_axes[%d][%d] (==%d) " "is not a valid axis of op[%d], which " "has %d dimensions ", - (int)iop, (int)(ndim-idim-1), (int)i, - (int)iop, (int)ondim); + iop, (ndim-idim-1), i, + iop, ondim); return 0; } } @@ -2409,7 +2409,7 @@ npyiter_find_best_axis_ordering(NpyIter *iter) */ static PyArray_Descr * npyiter_get_common_dtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, + const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, PyArray_Descr **op_request_dtypes, int only_inputs) { @@ -2477,7 +2477,7 @@ static PyArrayObject * npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype, npy_uint32 flags, npyiter_opitflags *op_itflags, int op_ndim, npy_intp *shape, - PyArray_Descr *op_dtype, int *op_axes) + PyArray_Descr *op_dtype, const int *op_axes) { npy_uint32 itflags = NIT_ITFLAGS(iter); int idim, ndim = NIT_NDIM(iter); @@ -2706,7 +2706,7 @@ static int npyiter_allocate_arrays(NpyIter *iter, npy_uint32 flags, PyArray_Descr **op_dtype, PyTypeObject *subtype, - npy_uint32 *op_flags, npyiter_opitflags *op_itflags, + const npy_uint32 *op_flags, npyiter_opitflags *op_itflags, int **op_axes) { npy_uint32 itflags = NIT_ITFLAGS(iter); @@ -3109,7 +3109,7 @@ npyiter_allocate_arrays(NpyIter *iter, */ static void npyiter_get_priority_subtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, + const npyiter_opitflags *op_itflags, double *subtype_priority, PyTypeObject **subtype) { diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c index ffea08bb3..246f9d382 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.c +++ b/numpy/core/src/multiarray/nditer_pywrap.c @@ -82,7 +82,8 @@ static int npyiter_cache_values(NewNpyArrayIterObject *self) } static PyObject * -npyiter_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) +npyiter_new(PyTypeObject *subtype, PyObject *NPY_UNUSED(args), + PyObject *NPY_UNUSED(kwds)) { NewNpyArrayIterObject *self; @@ -535,7 +536,7 @@ try_single_dtype: } static int -npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop, +npyiter_convert_op_axes(PyObject *op_axes_in, int nop, int **op_axes, int *oa_ndim) { PyObject *a; @@ -2015,7 +2016,7 @@ npyiter_seq_item(NewNpyArrayIterObject *self, Py_ssize_t i) if (i < 0 || i >= nop) { PyErr_Format(PyExc_IndexError, - "Iterator operand index %d is out of bounds", (int)i_orig); + "Iterator operand index %zd is out of bounds", i_orig); return NULL; } @@ -2029,7 +2030,7 @@ npyiter_seq_item(NewNpyArrayIterObject *self, Py_ssize_t i) */ if (!self->readflags[i]) { PyErr_Format(PyExc_RuntimeError, - "Iterator operand %d is write-only", (int)i); + "Iterator operand %zd is write-only", i); return NULL; } #endif @@ -2146,12 +2147,12 @@ npyiter_seq_ass_item(NewNpyArrayIterObject *self, Py_ssize_t i, PyObject *v) if (i < 0 || i >= nop) { PyErr_Format(PyExc_IndexError, - "Iterator operand index %d is out of bounds", (int)i_orig); + "Iterator operand index %zd is out of bounds", i_orig); return -1; } if (!self->writeflags[i]) { PyErr_Format(PyExc_RuntimeError, - "Iterator operand %d is not writeable", (int)i_orig); + "Iterator operand %zd is not writeable", i_orig); return -1; } @@ -2365,7 +2366,7 @@ npyiter_close(NewNpyArrayIterObject *self) } static PyObject * -npyiter_exit(NewNpyArrayIterObject *self, PyObject *args) +npyiter_exit(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args)) { /* even if called via exception handling, writeback any data */ return npyiter_close(self); diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index 0ceb994ef..dabc866ff 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -391,7 +391,8 @@ array_matrix_multiply(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_inplace_matrix_multiply(PyArrayObject *m1, PyObject *m2) +array_inplace_matrix_multiply( + PyArrayObject *NPY_UNUSED(m1), PyObject *NPY_UNUSED(m2)) { PyErr_SetString(PyExc_TypeError, "In-place matrix multiplication is not (yet) supported. " diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 34839b866..9adca6773 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -4492,6 +4492,36 @@ initialize_numeric_types(void) PyArrayIter_Type.tp_iter = PyObject_SelfIter; PyArrayMapIter_Type.tp_iter = PyObject_SelfIter; + + /* + * Give types different names when they are the same size (gh-9799). + * `np.intX` always refers to the first int of that size in the sequence + * `['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']`. + */ +#if (NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT) + PyByteArrType_Type.tp_name = "numpy.byte"; + PyUByteArrType_Type.tp_name = "numpy.ubyte"; +#endif +#if (NPY_SIZEOF_SHORT == NPY_SIZEOF_INT) + PyShortArrType_Type.tp_name = "numpy.short"; + PyUShortArrType_Type.tp_name = "numpy.ushort"; +#endif +#if (NPY_SIZEOF_INT == NPY_SIZEOF_LONG) + PyIntArrType_Type.tp_name = "numpy.intc"; + PyUIntArrType_Type.tp_name = "numpy.uintc"; +#endif +#if (NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG) + PyLongLongArrType_Type.tp_name = "numpy.longlong"; + PyULongLongArrType_Type.tp_name = "numpy.ulonglong"; +#endif + + /* + Do the same for longdouble + */ +#if (NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE) + PyLongDoubleArrType_Type.tp_name = "numpy.longdouble"; + PyCLongDoubleArrType_Type.tp_name = "numpy.clongdouble"; +#endif } typedef struct { diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c index 30820737e..4e31f003b 100644 --- a/numpy/core/src/multiarray/shape.c +++ b/numpy/core/src/multiarray/shape.c @@ -26,7 +26,7 @@ static int _fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr); static int -_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims, +_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order); static void @@ -40,11 +40,11 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype); */ NPY_NO_EXPORT PyObject * PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, - NPY_ORDER order) + NPY_ORDER NPY_UNUSED(order)) { npy_intp oldnbytes, newnbytes; npy_intp oldsize, newsize; - int new_nd=newshape->len, k, n, elsize; + int new_nd=newshape->len, k, elsize; int refcnt; npy_intp* new_dimensions=newshape->ptr; npy_intp new_strides[NPY_MAXDIMS]; @@ -136,8 +136,8 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, PyObject *zero = PyInt_FromLong(0); char *optr; optr = PyArray_BYTES(self) + oldnbytes; - n = newsize - oldsize; - for (k = 0; k < n; k++) { + npy_intp n_new = newsize - oldsize; + for (npy_intp i = 0; i < n_new; i++) { _putzero((char *)optr, zero, PyArray_DESCR(self)); optr += elsize; } @@ -361,7 +361,7 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype) * stride of the next-fastest index. */ static int -_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims, +_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order) { int oldnd; @@ -766,7 +766,7 @@ static int _npy_stride_sort_item_comparator(const void *a, const void *b) * [(2, 12), (0, 4), (1, -2)]. */ NPY_NO_EXPORT void -PyArray_CreateSortedStridePerm(int ndim, npy_intp *strides, +PyArray_CreateSortedStridePerm(int ndim, npy_intp const *strides, npy_stride_sort_item *out_strideperm) { int i; @@ -1048,7 +1048,7 @@ build_shape_string(npy_intp n, npy_intp *vals) * from a reduction result once its computation is complete. */ NPY_NO_EXPORT void -PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags) +PyArray_RemoveAxesInPlace(PyArrayObject *arr, const npy_bool *flags) { PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; npy_intp *shape = fa->dimensions, *strides = fa->strides; diff --git a/numpy/core/src/npymath/npy_math_complex.c.src b/numpy/core/src/npymath/npy_math_complex.c.src index dad381232..8c432e483 100644 --- a/numpy/core/src/npymath/npy_math_complex.c.src +++ b/numpy/core/src/npymath/npy_math_complex.c.src @@ -40,13 +40,14 @@ * flag in an efficient way. The flag is IEEE specific. See * https://github.com/freebsd/freebsd/blob/4c6378299/lib/msun/src/catrig.c#L42 */ +#if !defined(HAVE_CACOSF) || !defined(HAVE_CACOSL) || !defined(HAVE_CASINHF) || !defined(HAVE_CASINHL) #define raise_inexact() do { \ volatile npy_float NPY_UNUSED(junk) = 1 + tiny; \ } while (0) static const volatile npy_float tiny = 3.9443045e-31f; - +#endif /**begin repeat * #type = npy_float, npy_double, npy_longdouble# @@ -64,9 +65,6 @@ static const volatile npy_float tiny = 3.9443045e-31f; * Constants *=========================================================*/ static const @ctype@ c_1@c@ = {1.0@C@, 0.0}; -static const @ctype@ c_half@c@ = {0.5@C@, 0.0}; -static const @ctype@ c_i@c@ = {0.0, 1.0@C@}; -static const @ctype@ c_ihalf@c@ = {0.0, 0.5@C@}; /*========================================================== * Helper functions @@ -76,22 +74,6 @@ static const @ctype@ c_ihalf@c@ = {0.0, 0.5@C@}; *=========================================================*/ static NPY_INLINE @ctype@ -cadd@c@(@ctype@ a, @ctype@ b) -{ - return npy_cpack@c@(npy_creal@c@(a) + npy_creal@c@(b), - npy_cimag@c@(a) + npy_cimag@c@(b)); -} - -static NPY_INLINE -@ctype@ -csub@c@(@ctype@ a, @ctype@ b) -{ - return npy_cpack@c@(npy_creal@c@(a) - npy_creal@c@(b), - npy_cimag@c@(a) - npy_cimag@c@(b)); -} - -static NPY_INLINE -@ctype@ cmul@c@(@ctype@ a, @ctype@ b) { @type@ ar, ai, br, bi; @@ -132,20 +114,6 @@ cdiv@c@(@ctype@ a, @ctype@ b) } } -static NPY_INLINE -@ctype@ -cneg@c@(@ctype@ a) -{ - return npy_cpack@c@(-npy_creal@c@(a), -npy_cimag@c@(a)); -} - -static NPY_INLINE -@ctype@ -cmuli@c@(@ctype@ a) -{ - return npy_cpack@c@(-npy_cimag@c@(a), npy_creal@c@(a)); -} - /*========================================================== * Custom implementation of missing complex C99 functions *=========================================================*/ diff --git a/numpy/core/src/npymath/npy_math_internal.h.src b/numpy/core/src/npymath/npy_math_internal.h.src index fa820baac..18b6d1434 100644 --- a/numpy/core/src/npymath/npy_math_internal.h.src +++ b/numpy/core/src/npymath/npy_math_internal.h.src @@ -716,3 +716,44 @@ npy_@func@@c@(@type@ a, @type@ b) return npy_@func@u@c@(a < 0 ? -a : a, b < 0 ? -b : b); } /**end repeat**/ + +/* Unlike LCM and GCD, we need byte and short variants for the shift operators, + * since the result is dependent on the width of the type + */ +/**begin repeat + * + * #type = byte, short, int, long, longlong# + * #c = hh,h,,l,ll# + */ +/**begin repeat1 + * + * #u = u,# + * #is_signed = 0,1# + */ +NPY_INPLACE npy_@u@@type@ +npy_lshift@u@@c@(npy_@u@@type@ a, npy_@u@@type@ b) +{ + if (NPY_LIKELY((size_t)b < sizeof(a) * CHAR_BIT)) { + return a << b; + } + else { + return 0; + } +} +NPY_INPLACE npy_@u@@type@ +npy_rshift@u@@c@(npy_@u@@type@ a, npy_@u@@type@ b) +{ + if (NPY_LIKELY((size_t)b < sizeof(a) * CHAR_BIT)) { + return a >> b; + } +#if @is_signed@ + else if (a < 0) { + return (npy_@u@@type@)-1; /* preserve the sign bit */ + } +#endif + else { + return 0; + } +} +/**end repeat1**/ +/**end repeat**/ diff --git a/numpy/core/src/npysort/radixsort.c.src b/numpy/core/src/npysort/radixsort.c.src index c90b06974..72887d7e4 100644 --- a/numpy/core/src/npysort/radixsort.c.src +++ b/numpy/core/src/npysort/radixsort.c.src @@ -198,9 +198,9 @@ aradixsort_@suff@(void *start, npy_intp* tosort, npy_intp num, void *NPY_UNUSED( return 0; } - k1 = KEY_OF(arr[0]); + k1 = KEY_OF(arr[tosort[0]]); for (npy_intp i = 1; i < num; i++) { - k2 = KEY_OF(arr[i]); + k2 = KEY_OF(arr[tosort[i]]); if (k1 > k2) { all_sorted = 0; break; diff --git a/numpy/core/src/umath/_rational_tests.c.src b/numpy/core/src/umath/_rational_tests.c.src index 9e74845df..615e395c7 100644 --- a/numpy/core/src/umath/_rational_tests.c.src +++ b/numpy/core/src/umath/_rational_tests.c.src @@ -539,11 +539,11 @@ static PyObject* pyrational_str(PyObject* self) { rational x = ((PyRational*)self)->r; if (d(x)!=1) { - return PyString_FromFormat( + return PyUString_FromFormat( "%ld/%ld",(long)x.n,(long)d(x)); } else { - return PyString_FromFormat( + return PyUString_FromFormat( "%ld",(long)x.n); } } diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 2028a5712..e6d8eca0d 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -699,6 +699,7 @@ BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED * #ftype = npy_float, npy_float, npy_float, npy_float, npy_double, npy_double, * npy_double, npy_double, npy_double, npy_double# * #SIGNED = 1, 0, 1, 0, 1, 0, 1, 0, 1, 0# + * #c = hh,uhh,h,uh,,u,l,ul,ll,ull# */ #define @TYPE@_floor_divide @TYPE@_divide @@ -776,16 +777,15 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void /**begin repeat2 * Arithmetic - * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor, - * left_shift, right_shift# - * #OP = +, -,*, &, |, ^, <<, >># + * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor# + * #OP = +, -, *, &, |, ^# */ #if @CHK@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void @TYPE@_@kind@@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { - if(IS_BINARY_REDUCE) { + if (IS_BINARY_REDUCE) { BINARY_REDUCE_LOOP(@type@) { io1 @OP@= *(@type@ *)ip2; } @@ -799,6 +799,47 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void /**end repeat2**/ +/* + * Arithmetic bit shift operations. + * + * Intel hardware masks bit shift values, so large shifts wrap around + * and can produce surprising results. The special handling ensures that + * behavior is independent of compiler or hardware. + * TODO: We could implement consistent behavior for negative shifts, + * which is undefined in C. + */ + +#define INT_left_shift_needs_clear_floatstatus +#define UINT_left_shift_needs_clear_floatstatus + +NPY_NO_EXPORT NPY_GCC_OPT_3 void +@TYPE@_left_shift@isa@(char **args, npy_intp *dimensions, npy_intp *steps, + void *NPY_UNUSED(func)) +{ + BINARY_LOOP_FAST(@type@, @type@, *out = npy_lshift@c@(in1, in2)); + +#ifdef @TYPE@_left_shift_needs_clear_floatstatus + // For some reason, our macOS CI sets an "invalid" flag here, but only + // for some types. + npy_clear_floatstatus_barrier((char*)dimensions); +#endif +} + +#undef INT_left_shift_needs_clear_floatstatus +#undef UINT_left_shift_needs_clear_floatstatus + +NPY_NO_EXPORT +#ifndef NPY_DO_NOT_OPTIMIZE_@TYPE@_right_shift +NPY_GCC_OPT_3 +#endif +void +@TYPE@_right_shift@isa@(char **args, npy_intp *dimensions, npy_intp *steps, + void *NPY_UNUSED(func)) +{ + BINARY_LOOP_FAST(@type@, @type@, *out = npy_rshift@c@(in1, in2)); +} + + /**begin repeat2 * #kind = equal, not_equal, greater, greater_equal, less, less_equal, * logical_and, logical_or# @@ -1253,10 +1294,10 @@ NPY_NO_EXPORT void const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; if (in1 == NPY_DATETIME_NAT) { - *((@type@ *)op1) = in2; + *((@type@ *)op1) = in1; } else if (in2 == NPY_DATETIME_NAT) { - *((@type@ *)op1) = in1; + *((@type@ *)op1) = in2; } else { *((@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2; @@ -1594,6 +1635,30 @@ NPY_NO_EXPORT void /**end repeat**/ /**begin repeat + * #func = rint, ceil, floor, trunc# + * #scalarf = npy_rint, npy_ceil, npy_floor, npy_trunc# + */ + +/**begin repeat1 +* #TYPE = FLOAT, DOUBLE# +* #type = npy_float, npy_double# +* #typesub = f, # +*/ + +NPY_NO_EXPORT NPY_GCC_OPT_3 void +@TYPE@_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)) +{ + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = @scalarf@@typesub@(in1); + } +} + + +/**end repeat1**/ +/**end repeat**/ + +/**begin repeat * #func = sin, cos, exp, log# * #scalarf = npy_sinf, npy_cosf, npy_expf, npy_logf# */ @@ -1616,6 +1681,78 @@ FLOAT_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSE */ /**begin repeat1 + * #TYPE = FLOAT, DOUBLE# + * #type = npy_float, npy_double# + * #typesub = f, # + */ + +NPY_NO_EXPORT NPY_GCC_OPT_3 void +@TYPE@_sqrt_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)) +{ + if (!run_unary_@isa@_sqrt_@TYPE@(args, dimensions, steps)) { + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = npy_sqrt@typesub@(in1); + } + } +} + +NPY_NO_EXPORT NPY_GCC_OPT_3 void +@TYPE@_absolute_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)) +{ + if (!run_unary_@isa@_absolute_@TYPE@(args, dimensions, steps)) { + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ tmp = in1 > 0 ? in1 : -in1; + /* add 0 to clear -0.0 */ + *((@type@ *)op1) = tmp + 0; + } + } + npy_clear_floatstatus_barrier((char*)dimensions); +} + +NPY_NO_EXPORT NPY_GCC_OPT_3 void +@TYPE@_square_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)) +{ + if (!run_unary_@isa@_square_@TYPE@(args, dimensions, steps)) { + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = in1*in1; + } + } +} + +NPY_NO_EXPORT NPY_GCC_OPT_3 void +@TYPE@_reciprocal_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)) +{ + if (!run_unary_@isa@_reciprocal_@TYPE@(args, dimensions, steps)) { + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = 1.0f/in1; + } + } +} + +/**begin repeat2 + * #func = rint, ceil, floor, trunc# + * #scalarf = npy_rint, npy_ceil, npy_floor, npy_trunc# + */ + +NPY_NO_EXPORT NPY_GCC_OPT_3 void +@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)) +{ + if (!run_unary_@isa@_@func@_@TYPE@(args, dimensions, steps)) { + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = @scalarf@@typesub@(in1); + } + } +} + +/**end repeat2**/ +/**end repeat1**/ + +/**begin repeat1 * #func = exp, log# * #scalarf = npy_expf, npy_logf# */ @@ -1665,10 +1802,9 @@ FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY } /**end repeat1**/ - - /**end repeat**/ + /**begin repeat * Float types * #type = npy_float, npy_double, npy_longdouble, npy_float# diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src index 5070ab38b..0ef14a809 100644 --- a/numpy/core/src/umath/loops.h.src +++ b/numpy/core/src/umath/loops.h.src @@ -7,14 +7,12 @@ #define _NPY_UMATH_LOOPS_H_ #define BOOL_invert BOOL_logical_not -#define BOOL_negative BOOL_logical_not #define BOOL_add BOOL_logical_or #define BOOL_bitwise_and BOOL_logical_and #define BOOL_bitwise_or BOOL_logical_or #define BOOL_logical_xor BOOL_not_equal #define BOOL_bitwise_xor BOOL_logical_xor #define BOOL_multiply BOOL_logical_and -#define BOOL_subtract BOOL_logical_xor #define BOOL_maximum BOOL_logical_or #define BOOL_minimum BOOL_logical_and #define BOOL_fmax BOOL_maximum @@ -175,6 +173,19 @@ NPY_NO_EXPORT void */ NPY_NO_EXPORT void @TYPE@_sqrt(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); + +/**begin repeat1 + * #isa = avx512f, fma# + */ + +/**begin repeat2 + * #func = sqrt, absolute, square, reciprocal# + */ +NPY_NO_EXPORT void +@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); + +/**end repeat2**/ +/**end repeat1**/ /**end repeat**/ /**begin repeat @@ -194,6 +205,26 @@ FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY /**end repeat**/ /**begin repeat + * #func = rint, ceil, floor, trunc# + */ + +/**begin repeat1 +* #TYPE = FLOAT, DOUBLE# +*/ + +NPY_NO_EXPORT NPY_GCC_OPT_3 void +@TYPE@_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)); + +/**begin repeat2 + * #isa = avx512f, fma# + */ +NPY_NO_EXPORT NPY_GCC_OPT_3 void +@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)); +/**end repeat2**/ +/**end repeat1**/ +/**end repeat**/ + +/**begin repeat * Float types * #TYPE = HALF, FLOAT, DOUBLE, LONGDOUBLE# * #c = f, f, , l# diff --git a/numpy/core/src/umath/matmul.c.src b/numpy/core/src/umath/matmul.c.src index 480c0c72f..b5204eca5 100644 --- a/numpy/core/src/umath/matmul.c.src +++ b/numpy/core/src/umath/matmul.c.src @@ -196,16 +196,14 @@ NPY_NO_EXPORT void * FLOAT, DOUBLE, HALF, * CFLOAT, CDOUBLE, CLONGDOUBLE, * UBYTE, USHORT, UINT, ULONG, ULONGLONG, - * BYTE, SHORT, INT, LONG, LONGLONG, - * BOOL# + * BYTE, SHORT, INT, LONG, LONGLONG# * #typ = npy_longdouble, * npy_float,npy_double,npy_half, * npy_cfloat, npy_cdouble, npy_clongdouble, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, - * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_bool# - * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*11# - * #IS_HALF = 0, 0, 0, 1, 0*14# + * npy_byte, npy_short, npy_int, npy_long, npy_longlong# + * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*10# + * #IS_HALF = 0, 0, 0, 1, 0*13# */ NPY_NO_EXPORT void @@ -266,7 +264,44 @@ NPY_NO_EXPORT void } /**end repeat**/ +NPY_NO_EXPORT void +BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, + void *_ip2, npy_intp is2_n, npy_intp is2_p, + void *_op, npy_intp os_m, npy_intp os_p, + npy_intp dm, npy_intp dn, npy_intp dp) + +{ + npy_intp m, n, p; + npy_intp ib2_p, ob_p; + char *ip1 = (char *)_ip1, *ip2 = (char *)_ip2, *op = (char *)_op; + ib2_p = is2_p * dp; + ob_p = os_p * dp; + + for (m = 0; m < dm; m++) { + for (p = 0; p < dp; p++) { + char *ip1tmp = ip1; + char *ip2tmp = ip2; + *(npy_bool *)op = NPY_FALSE; + for (n = 0; n < dn; n++) { + npy_bool val1 = (*(npy_bool *)ip1tmp); + npy_bool val2 = (*(npy_bool *)ip2tmp); + if (val1 != 0 && val2 != 0) { + *(npy_bool *)op = NPY_TRUE; + break; + } + ip2tmp += is2_n; + ip1tmp += is1_n; + } + op += os_p; + ip2 += is2_p; + } + op -= ob_p; + ip2 -= ib2_p; + ip1 += is1_m; + op += os_m; + } +} NPY_NO_EXPORT void OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c index 8d67f96ac..43bed425c 100644 --- a/numpy/core/src/umath/override.c +++ b/numpy/core/src/umath/override.c @@ -494,32 +494,18 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, } else { /* not a tuple */ - if (nout > 1 && DEPRECATE("passing a single argument to the " - "'out' keyword argument of a " - "ufunc with\n" - "more than one output will " - "result in an error in the " - "future") < 0) { - /* - * If the deprecation is removed, also remove the loop - * below setting tuple items to None (but keep this future - * error message.) - */ + if (nout > 1) { PyErr_SetString(PyExc_TypeError, "'out' must be a tuple of arguments"); goto fail; } if (out != Py_None) { /* not already a tuple and not None */ - PyObject *out_tuple = PyTuple_New(nout); + PyObject *out_tuple = PyTuple_New(1); if (out_tuple == NULL) { goto fail; } - for (i = 1; i < nout; i++) { - Py_INCREF(Py_None); - PyTuple_SET_ITEM(out_tuple, i, Py_None); - } /* out was borrowed ref; make it permanent */ Py_INCREF(out); /* steals reference */ diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index 8ae2f65e0..4ce8d8ab7 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -36,7 +36,7 @@ * If 'dtype' isn't NULL, this function steals its reference. */ static PyArrayObject * -allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags, +allocate_reduce_result(PyArrayObject *arr, const npy_bool *axis_flags, PyArray_Descr *dtype, int subok) { npy_intp strides[NPY_MAXDIMS], stride; @@ -84,7 +84,7 @@ allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags, * The return value is a view into 'out'. */ static PyArrayObject * -conform_reduce_result(int ndim, npy_bool *axis_flags, +conform_reduce_result(int ndim, const npy_bool *axis_flags, PyArrayObject *out, int keepdims, const char *funcname, int need_copy) { @@ -251,7 +251,7 @@ PyArray_CreateReduceResult(PyArrayObject *operand, PyArrayObject *out, * Count the number of dimensions selected in 'axis_flags' */ static int -count_axes(int ndim, npy_bool *axis_flags) +count_axes(int ndim, const npy_bool *axis_flags) { int idim; int naxes = 0; @@ -299,7 +299,7 @@ count_axes(int ndim, npy_bool *axis_flags) NPY_NO_EXPORT PyArrayObject * PyArray_InitializeReduceResult( PyArrayObject *result, PyArrayObject *operand, - npy_bool *axis_flags, + const npy_bool *axis_flags, npy_intp *out_skip_first_count, const char *funcname) { npy_intp *strides, *shape, shape_orig[NPY_MAXDIMS]; diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index a7987acda..d5d8d659b 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -246,25 +246,26 @@ static void /**end repeat**/ - -/* QUESTION: Should we check for overflow / underflow in (l,r)shift? */ - /**begin repeat * #name = byte, ubyte, short, ushort, int, uint, * long, ulong, longlong, ulonglong# * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, * npy_long, npy_ulong, npy_longlong, npy_ulonglong# + * #suffix = hh,uhh,h,uh,,u,l,ul,ll,ull# */ /**begin repeat1 - * #oper = and, xor, or, lshift, rshift# - * #op = &, ^, |, <<, >># + * #oper = and, xor, or# + * #op = &, ^, |# */ #define @name@_ctype_@oper@(arg1, arg2, out) *(out) = (arg1) @op@ (arg2) /**end repeat1**/ +#define @name@_ctype_lshift(arg1, arg2, out) *(out) = npy_lshift@suffix@(arg1, arg2) +#define @name@_ctype_rshift(arg1, arg2, out) *(out) = npy_rshift@suffix@(arg1, arg2) + /**end repeat**/ /**begin repeat @@ -405,21 +406,22 @@ half_ctype_divmod(npy_half a, npy_half b, npy_half *out1, npy_half *out2) { /**begin repeat * #name = float, double, longdouble# * #type = npy_float, npy_double, npy_longdouble# + * #c = f,,l# */ -static npy_@name@ (*_basic_@name@_pow)(@type@ a, @type@ b); static void @name@_ctype_power(@type@ a, @type@ b, @type@ *out) { - *out = _basic_@name@_pow(a, b); + *out = npy_pow@c@(a, b); } + /**end repeat**/ static void half_ctype_power(npy_half a, npy_half b, npy_half *out) { const npy_float af = npy_half_to_float(a); const npy_float bf = npy_half_to_float(b); - const npy_float outf = _basic_float_pow(af,bf); + const npy_float outf = npy_powf(af,bf); *out = npy_float_to_half(outf); } @@ -476,14 +478,10 @@ static void } /**end repeat**/ -/* - * Get the nc_powf, nc_pow, and nc_powl functions from - * the data area of the power ufunc in umathmodule. - */ - /**begin repeat * #name = cfloat, cdouble, clongdouble# * #type = npy_cfloat, npy_cdouble, npy_clongdouble# + * #c = f,,l# */ static void @name@_ctype_positive(@type@ a, @type@ *out) @@ -492,12 +490,10 @@ static void out->imag = a.imag; } -static void (*_basic_@name@_pow)(@type@ *, @type@ *, @type@ *); - static void @name@_ctype_power(@type@ a, @type@ b, @type@ *out) { - _basic_@name@_pow(&a, &b, out); + *out = npy_cpow@c@(a, b); } /**end repeat**/ @@ -570,7 +566,7 @@ static void * 1) Convert the types to the common type if both are scalars (0 return) * 2) If both are not scalars use ufunc machinery (-2 return) * 3) If both are scalars but cannot be cast to the right type - * return NotImplmented (-1 return) + * return NotImplemented (-1 return) * * 4) Perform the function on the C-type. * 5) If an error condition occurred, check to see @@ -1429,24 +1425,53 @@ static PyObject * /**begin repeat * + * #name = byte, ubyte, short, ushort, int, uint, + * long, ulong, longlong, ulonglong, + * half, float, double, longdouble, + * cfloat, cdouble, clongdouble# + * #Name = Byte, UByte, Short, UShort, Int, UInt, + * Long, ULong, LongLong, ULongLong, + * Half, Float, Double, LongDouble, + * CFloat, CDouble, CLongDouble# + * #cmplx = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1# + * #to_ctype = , , , , , , , , , , npy_half_to_double, , , , , , # + * #func = PyFloat_FromDouble*17# + */ +static NPY_INLINE PyObject * +@name@_float(PyObject *obj) +{ +#if @cmplx@ + if (emit_complexwarning() < 0) { + return NULL; + } + return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@).real)); +#else + return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@))); +#endif +} +/**end repeat**/ + + +#if !defined(NPY_PY3K) + +/**begin repeat + * * #name = (byte, ubyte, short, ushort, int, uint, * long, ulong, longlong, ulonglong, * half, float, double, longdouble, - * cfloat, cdouble, clongdouble)*2# + * cfloat, cdouble, clongdouble)# * #Name = (Byte, UByte, Short, UShort, Int, UInt, * Long, ULong, LongLong, ULongLong, * Half, Float, Double, LongDouble, - * CFloat, CDouble, CLongDouble)*2# - * #cmplx = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1)*2# - * #to_ctype = (, , , , , , , , , , npy_half_to_double, , , , , , )*2# - * #which = long*17, float*17# + * CFloat, CDouble, CLongDouble)# + * #cmplx = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1)# + * #to_ctype = (, , , , , , , , , , npy_half_to_double, , , , , , )# * #func = (PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5, * PyLong_FromDouble*3, npy_longdouble_to_PyLong, - * PyLong_FromDouble*2, npy_longdouble_to_PyLong, - * PyFloat_FromDouble*17# + * PyLong_FromDouble*2, npy_longdouble_to_PyLong# */ static NPY_INLINE PyObject * -@name@_@which@(PyObject *obj) +@name@_long(PyObject *obj) { #if @cmplx@ if (emit_complexwarning() < 0) { @@ -1459,8 +1484,6 @@ static NPY_INLINE PyObject * } /**end repeat**/ -#if !defined(NPY_PY3K) - /**begin repeat * * #name = (byte, ubyte, short, ushort, int, uint, @@ -1652,52 +1675,9 @@ add_scalarmath(void) /**end repeat**/ } -static int -get_functions(PyObject * mm) -{ - PyObject *obj; - void **funcdata; - char *signatures; - int i, j; - int ret = -1; - - /* Get the nc_pow functions */ - /* Get the pow functions */ - obj = PyObject_GetAttrString(mm, "power"); - if (obj == NULL) { - goto fail; - } - funcdata = ((PyUFuncObject *)obj)->data; - signatures = ((PyUFuncObject *)obj)->types; - - i = 0; - j = 0; - while (signatures[i] != NPY_FLOAT) { - i += 3; - j++; - } - _basic_float_pow = funcdata[j]; - _basic_double_pow = funcdata[j + 1]; - _basic_longdouble_pow = funcdata[j + 2]; - _basic_cfloat_pow = funcdata[j + 3]; - _basic_cdouble_pow = funcdata[j + 4]; - _basic_clongdouble_pow = funcdata[j + 5]; - Py_DECREF(obj); - - return ret = 0; - - fail: - Py_DECREF(mm); - return ret; -} - NPY_NO_EXPORT int initscalarmath(PyObject * m) { - if (get_functions(m) < 0) { - return -1; - } - add_scalarmath(); return 0; diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 7aec1ff49..74f52cc9d 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -139,6 +139,37 @@ abs_ptrdiff(char *a, char *b) /* prototypes */ /**begin repeat1 + * #type = npy_float, npy_double# + * #TYPE = FLOAT, DOUBLE# + */ + +/**begin repeat2 + * #func = sqrt, absolute, square, reciprocal, rint, floor, ceil, trunc# + */ + +#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS +static NPY_INLINE NPY_GCC_TARGET_@ISA@ void +@ISA@_@func@_@TYPE@(@type@ *, @type@ *, const npy_intp n, const npy_intp stride); +#endif + +static NPY_INLINE int +run_unary_@isa@_@func@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps) +{ +#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS + if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(@type@), @REGISTER_SIZE@)) { + @ISA@_@func@_@TYPE@((@type@*)args[1], (@type@*)args[0], dimensions[0], steps[0]); + return 1; + } + else + return 0; +#endif + return 0; +} + +/**end repeat2**/ +/**end repeat1**/ + +/**begin repeat1 * #func = exp, log# */ @@ -185,7 +216,6 @@ run_unary_@isa@_sincos_FLOAT(char **args, npy_intp *dimensions, npy_intp *steps, /**end repeat**/ - /**begin repeat * Float types * #type = npy_float, npy_double, npy_longdouble# @@ -1017,7 +1047,7 @@ sse2_sqrt_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n) LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) { op[i] = @scalarf@(ip[i]); } - assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || + assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || npy_is_aligned(&op[i], VECTOR_SIZE_BYTES)); if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) { LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { @@ -1069,7 +1099,7 @@ sse2_@kind@_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n) LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) { op[i] = @scalar@_@type@(ip[i]); } - assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || + assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || npy_is_aligned(&op[i], VECTOR_SIZE_BYTES)); if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) { LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { @@ -1104,7 +1134,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n) /* Order of operations important for MSVC 2015 */ *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i]; } - assert(n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)); + assert((npy_uintp)n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)); if (i + 3 * stride <= n) { /* load the first elements */ @vtype@ c1 = @vpre@_load_@vsuf@((@type@*)&ip[i]); @@ -1144,41 +1174,76 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n) #if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 -fma_get_full_load_mask(void) +fma_get_full_load_mask_ps(void) { return _mm256_set1_ps(-1.0); } +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256i +fma_get_full_load_mask_pd(void) +{ + return _mm256_castpd_si256(_mm256_set1_pd(-1.0)); +} + static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 -fma_get_partial_load_mask(const npy_int num_lanes, const npy_int total_elem) +fma_get_partial_load_mask_ps(const npy_int num_elem, const npy_int num_lanes) { float maskint[16] = {-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0, 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}; - float* addr = maskint + total_elem - num_lanes; + float* addr = maskint + num_lanes - num_elem; return _mm256_loadu_ps(addr); } +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256i +fma_get_partial_load_mask_pd(const npy_int num_elem, const npy_int num_lanes) +{ + npy_int maskint[16] = {-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1}; + npy_int* addr = maskint + 2*num_lanes - 2*num_elem; + return _mm256_loadu_si256((__m256i*) addr); +} + static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 -fma_masked_gather(__m256 src, - npy_float* addr, - __m256i vindex, - __m256 mask) +fma_masked_gather_ps(__m256 src, + npy_float* addr, + __m256i vindex, + __m256 mask) { return _mm256_mask_i32gather_ps(src, addr, vindex, mask, 4); } +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256d +fma_masked_gather_pd(__m256d src, + npy_double* addr, + __m128i vindex, + __m256d mask) +{ + return _mm256_mask_i32gather_pd(src, addr, vindex, mask, 8); +} + static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 -fma_masked_load(__m256 mask, npy_float* addr) +fma_masked_load_ps(__m256 mask, npy_float* addr) { return _mm256_maskload_ps(addr, _mm256_cvtps_epi32(mask)); } +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256d +fma_masked_load_pd(__m256i mask, npy_double* addr) +{ + return _mm256_maskload_pd(addr, mask); +} + static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 -fma_set_masked_lanes(__m256 x, __m256 val, __m256 mask) +fma_set_masked_lanes_ps(__m256 x, __m256 val, __m256 mask) { return _mm256_blendv_ps(x, val, mask); } +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256d +fma_set_masked_lanes_pd(__m256d x, __m256d val, __m256d mask) +{ + return _mm256_blendv_pd(x, val, mask); +} + static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 fma_blend(__m256 x, __m256 y, __m256 ymask) { @@ -1186,6 +1251,18 @@ fma_blend(__m256 x, __m256 y, __m256 ymask) } static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 +fma_invert_mask_ps(__m256 ymask) +{ + return _mm256_andnot_ps(ymask, _mm256_set1_ps(-1.0)); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256i +fma_invert_mask_pd(__m256i ymask) +{ + return _mm256_andnot_si256(ymask, _mm256_set1_epi32(0xFFFFFFFF)); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256 fma_should_calculate_sine(__m256i k, __m256i andop, __m256i cmp) { return _mm256_cvtepi32_ps( @@ -1290,42 +1367,115 @@ fma_scalef_ps(__m256 poly, __m256 quadrant) } } +/**begin repeat + * #vsub = ps, pd# + * #vtype = __m256, __m256d# + */ +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@ +fma_abs_@vsub@(@vtype@ x) +{ + return _mm256_andnot_@vsub@(_mm256_set1_@vsub@(-0.0), x); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@ +fma_reciprocal_@vsub@(@vtype@ x) +{ + return _mm256_div_@vsub@(_mm256_set1_@vsub@(1.0f), x); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@ +fma_rint_@vsub@(@vtype@ x) +{ + return _mm256_round_@vsub@(x, _MM_FROUND_TO_NEAREST_INT); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@ +fma_floor_@vsub@(@vtype@ x) +{ + return _mm256_round_@vsub@(x, _MM_FROUND_TO_NEG_INF); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@ +fma_ceil_@vsub@(@vtype@ x) +{ + return _mm256_round_@vsub@(x, _MM_FROUND_TO_POS_INF); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@ +fma_trunc_@vsub@(@vtype@ x) +{ + return _mm256_round_@vsub@(x, _MM_FROUND_TO_ZERO); +} +/**end repeat**/ #endif #if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16 -avx512_get_full_load_mask(void) +avx512_get_full_load_mask_ps(void) { return 0xFFFF; } +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask8 +avx512_get_full_load_mask_pd(void) +{ + return 0xFF; +} + static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16 -avx512_get_partial_load_mask(const npy_int num_elem, const npy_int total_elem) +avx512_get_partial_load_mask_ps(const npy_int num_elem, const npy_int total_elem) { return (0x0001 << num_elem) - 0x0001; } +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask8 +avx512_get_partial_load_mask_pd(const npy_int num_elem, const npy_int total_elem) +{ + return (0x01 << num_elem) - 0x01; +} + static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512 -avx512_masked_gather(__m512 src, - npy_float* addr, - __m512i vindex, - __mmask16 kmask) +avx512_masked_gather_ps(__m512 src, + npy_float* addr, + __m512i vindex, + __mmask16 kmask) { return _mm512_mask_i32gather_ps(src, kmask, vindex, addr, 4); } +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512d +avx512_masked_gather_pd(__m512d src, + npy_double* addr, + __m256i vindex, + __mmask8 kmask) +{ + return _mm512_mask_i32gather_pd(src, kmask, vindex, addr, 8); +} + static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512 -avx512_masked_load(__mmask16 mask, npy_float* addr) +avx512_masked_load_ps(__mmask16 mask, npy_float* addr) { return _mm512_maskz_loadu_ps(mask, (__m512 *)addr); } +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512d +avx512_masked_load_pd(__mmask8 mask, npy_double* addr) +{ + return _mm512_maskz_loadu_pd(mask, (__m512d *)addr); +} + static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512 -avx512_set_masked_lanes(__m512 x, __m512 val, __mmask16 mask) +avx512_set_masked_lanes_ps(__m512 x, __m512 val, __mmask16 mask) { return _mm512_mask_blend_ps(mask, x, val); } +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512d +avx512_set_masked_lanes_pd(__m512d x, __m512d val, __mmask8 mask) +{ + return _mm512_mask_blend_pd(mask, x, val); +} + static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512 avx512_blend(__m512 x, __m512 y, __mmask16 ymask) { @@ -1333,6 +1483,18 @@ avx512_blend(__m512 x, __m512 y, __mmask16 ymask) } static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16 +avx512_invert_mask_ps(__mmask16 ymask) +{ + return _mm512_knot(ymask); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask8 +avx512_invert_mask_pd(__mmask8 ymask) +{ + return _mm512_knot(ymask); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16 avx512_should_calculate_sine(__m512i k, __m512i andop, __m512i cmp) { return _mm512_cmpeq_epi32_mask(_mm512_and_epi32(k, andop), cmp); @@ -1361,6 +1523,49 @@ avx512_scalef_ps(__m512 poly, __m512 quadrant) { return _mm512_scalef_ps(poly, quadrant); } +/**begin repeat + * #vsub = ps, pd# + * #epi_vsub = epi32, epi64# + * #vtype = __m512, __m512d# + * #and_const = 0x7fffffff, 0x7fffffffffffffffLL# + */ +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@ +avx512_abs_@vsub@(@vtype@ x) +{ + return (@vtype@) _mm512_and_@epi_vsub@((__m512i) x, + _mm512_set1_@epi_vsub@ (@and_const@)); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@ +avx512_reciprocal_@vsub@(@vtype@ x) +{ + return _mm512_div_@vsub@(_mm512_set1_@vsub@(1.0f), x); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@ +avx512_rint_@vsub@(@vtype@ x) +{ + return _mm512_roundscale_@vsub@(x, 0x08); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@ +avx512_floor_@vsub@(@vtype@ x) +{ + return _mm512_roundscale_@vsub@(x, 0x09); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@ +avx512_ceil_@vsub@(@vtype@ x) +{ + return _mm512_roundscale_@vsub@(x, 0x0A); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@ +avx512_trunc_@vsub@(@vtype@ x) +{ + return _mm512_roundscale_@vsub@(x, 0x0B); +} +/**end repeat**/ #endif /**begin repeat @@ -1438,7 +1643,187 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@ sin = @fmadd@(sin, x, x); return sin; } + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@ +@isa@_sqrt_ps(@vtype@ x) +{ + return _mm@vsize@_sqrt_ps(x); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@d +@isa@_sqrt_pd(@vtype@d x) +{ + return _mm@vsize@_sqrt_pd(x); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@ +@isa@_square_ps(@vtype@ x) +{ + return _mm@vsize@_mul_ps(x,x); +} + +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@d +@isa@_square_pd(@vtype@d x) +{ + return _mm@vsize@_mul_pd(x,x); +} + +#endif +/**end repeat**/ + + +/**begin repeat + * #ISA = FMA, AVX512F# + * #isa = fma, avx512# + * #vsize = 256, 512# + * #BYTES = 32, 64# + * #cvtps_epi32 = _mm256_cvtps_epi32, # + * #mask = __m256, __mmask16# + * #vsub = , _mask# + * #vtype = __m256, __m512# + * #cvtps_epi32 = _mm256_cvtps_epi32, # + * #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps# + * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS# + */ + +/**begin repeat1 + * #func = sqrt, absolute, square, reciprocal, rint, ceil, floor, trunc# + * #vectorf = sqrt, abs, square, reciprocal, rint, ceil, floor, trunc# + * #replace_0_with_1 = 0, 0, 0, 1, 0, 0, 0, 0# + */ + +#if defined @CHK@ +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void +@ISA@_@func@_FLOAT(npy_float* op, + npy_float* ip, + const npy_intp array_size, + const npy_intp steps) +{ + const npy_intp stride = steps/sizeof(npy_float); + const npy_int num_lanes = @BYTES@/sizeof(npy_float); + npy_intp num_remaining_elements = array_size; + @vtype@ ones_f = _mm@vsize@_set1_ps(1.0f); + @mask@ load_mask = @isa@_get_full_load_mask_ps(); +#if @replace_0_with_1@ + @mask@ inv_load_mask = @isa@_invert_mask_ps(load_mask); +#endif + npy_int indexarr[16]; + for (npy_int ii = 0; ii < 16; ii++) { + indexarr[ii] = ii*stride; + } + @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]); + + while (num_remaining_elements > 0) { + if (num_remaining_elements < num_lanes) { + load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements, + num_lanes); +#if @replace_0_with_1@ + inv_load_mask = @isa@_invert_mask_ps(load_mask); +#endif + } + @vtype@ x; + if (stride == 1) { + x = @isa@_masked_load_ps(load_mask, ip); +#if @replace_0_with_1@ + /* + * Replace masked elements with 1.0f to avoid divide by zero fp + * exception in reciprocal + */ + x = @isa@_set_masked_lanes_ps(x, ones_f, inv_load_mask); +#endif + } + else { + x = @isa@_masked_gather_ps(ones_f, ip, vindex, load_mask); + } + @vtype@ out = @isa@_@vectorf@_ps(x); + @masked_store@(op, @cvtps_epi32@(load_mask), out); + + ip += num_lanes*stride; + op += num_lanes; + num_remaining_elements -= num_lanes; + } +} +#endif +/**end repeat1**/ +/**end repeat**/ + +/**begin repeat + * #ISA = FMA, AVX512F# + * #isa = fma, avx512# + * #vsize = 256, 512# + * #BYTES = 32, 64# + * #cvtps_epi32 = _mm256_cvtps_epi32, # + * #mask = __m256i, __mmask8# + * #vsub = , _mask# + * #vtype = __m256d, __m512d# + * #vindextype = __m128i, __m256i# + * #vindexsize = 128, 256# + * #vindexload = _mm_loadu_si128, _mm256_loadu_si256# + * #cvtps_epi32 = _mm256_cvtpd_epi32, # + * #castmask = _mm256_castsi256_pd, # + * #masked_store = _mm256_maskstore_pd, _mm512_mask_storeu_pd# + * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS# + */ + +/**begin repeat1 + * #func = sqrt, absolute, square, reciprocal, rint, ceil, floor, trunc# + * #vectorf = sqrt, abs, square, reciprocal, rint, ceil, floor, trunc# + * #replace_0_with_1 = 0, 0, 0, 1, 0, 0, 0, 0# + */ + +#if defined @CHK@ +static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void +@ISA@_@func@_DOUBLE(npy_double* op, + npy_double* ip, + const npy_intp array_size, + const npy_intp steps) +{ + const npy_intp stride = steps/sizeof(npy_double); + const npy_int num_lanes = @BYTES@/sizeof(npy_double); + npy_intp num_remaining_elements = array_size; + @mask@ load_mask = @isa@_get_full_load_mask_pd(); +#if @replace_0_with_1@ + @mask@ inv_load_mask = @isa@_invert_mask_pd(load_mask); +#endif + @vtype@ ones_d = _mm@vsize@_set1_pd(1.0f); + npy_int indexarr[8]; + for (npy_int ii = 0; ii < 8; ii++) { + indexarr[ii] = ii*stride; + } + @vindextype@ vindex = @vindexload@((@vindextype@*)&indexarr[0]); + + while (num_remaining_elements > 0) { + if (num_remaining_elements < num_lanes) { + load_mask = @isa@_get_partial_load_mask_pd(num_remaining_elements, + num_lanes); +#if @replace_0_with_1@ + inv_load_mask = @isa@_invert_mask_pd(load_mask); #endif + } + @vtype@ x; + if (stride == 1) { + x = @isa@_masked_load_pd(load_mask, ip); +#if @replace_0_with_1@ + /* + * Replace masked elements with 1.0f to avoid divide by zero fp + * exception in reciprocal + */ + x = @isa@_set_masked_lanes_pd(x, ones_d, @castmask@(inv_load_mask)); +#endif + } + else { + x = @isa@_masked_gather_pd(ones_d, ip, vindex, @castmask@(load_mask)); + } + @vtype@ out = @isa@_@vectorf@_pd(x); + @masked_store@(op, load_mask, out); + + ip += num_lanes*stride; + op += num_lanes; + num_remaining_elements -= num_lanes; + } +} +#endif +/**end repeat1**/ /**end repeat**/ /**begin repeat @@ -1460,7 +1845,6 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS# */ - /* * Vectorized approximate sine/cosine algorithms: The following code is a * vectorized version of the algorithm presented here: @@ -1519,7 +1903,7 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void @vtype@ quadrant, reduced_x, reduced_x2, cos, sin; @vtype@i iquadrant; @mask@ nan_mask, glibc_mask, sine_mask, negate_mask; - @mask@ load_mask = @isa@_get_full_load_mask(); + @mask@ load_mask = @isa@_get_full_load_mask_ps(); npy_intp num_remaining_elements = array_size; npy_int indexarr[16]; for (npy_int ii = 0; ii < 16; ii++) { @@ -1530,16 +1914,16 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void while (num_remaining_elements > 0) { if (num_remaining_elements < num_lanes) { - load_mask = @isa@_get_partial_load_mask(num_remaining_elements, + load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements, num_lanes); } @vtype@ x; if (stride == 1) { - x = @isa@_masked_load(load_mask, ip); + x = @isa@_masked_load_ps(load_mask, ip); } else { - x = @isa@_masked_gather(zero_f, ip, vindex, load_mask); + x = @isa@_masked_gather_ps(zero_f, ip, vindex, load_mask); } /* @@ -1551,7 +1935,7 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void glibc_mask = @isa@_in_range_mask(x, large_number,-large_number); glibc_mask = @and_masks@(load_mask, glibc_mask); nan_mask = _mm@vsize@_cmp_ps@vsub@(x, x, _CMP_NEQ_UQ); - x = @isa@_set_masked_lanes(x, zero_f, @or_masks@(nan_mask, glibc_mask)); + x = @isa@_set_masked_lanes_ps(x, zero_f, @or_masks@(nan_mask, glibc_mask)); npy_int iglibc_mask = @mask_to_int@(glibc_mask); if (iglibc_mask != @full_mask@) { @@ -1584,7 +1968,7 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void /* multiply by -1 for appropriate elements */ negate_mask = @isa@_should_negate(iquadrant, twos, twos); cos = @isa@_blend(cos, _mm@vsize@_sub_ps(zero_f, cos), negate_mask); - cos = @isa@_set_masked_lanes(cos, _mm@vsize@_set1_ps(NPY_NANF), nan_mask); + cos = @isa@_set_masked_lanes_ps(cos, _mm@vsize@_set1_ps(NPY_NANF), nan_mask); @masked_store@(op, @cvtps_epi32@(load_mask), cos); } @@ -1662,27 +2046,27 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]); @mask@ xmax_mask, xmin_mask, nan_mask, inf_mask; - @mask@ overflow_mask = @isa@_get_partial_load_mask(0, num_lanes); - @mask@ load_mask = @isa@_get_full_load_mask(); + @mask@ overflow_mask = @isa@_get_partial_load_mask_ps(0, num_lanes); + @mask@ load_mask = @isa@_get_full_load_mask_ps(); npy_intp num_remaining_elements = array_size; while (num_remaining_elements > 0) { if (num_remaining_elements < num_lanes) { - load_mask = @isa@_get_partial_load_mask(num_remaining_elements, - num_lanes); + load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements, + num_lanes); } @vtype@ x; if (stride == 1) { - x = @isa@_masked_load(load_mask, ip); + x = @isa@_masked_load_ps(load_mask, ip); } else { - x = @isa@_masked_gather(zeros_f, ip, vindex, load_mask); + x = @isa@_masked_gather_ps(zeros_f, ip, vindex, load_mask); } nan_mask = _mm@vsize@_cmp_ps@vsub@(x, x, _CMP_NEQ_UQ); - x = @isa@_set_masked_lanes(x, zeros_f, nan_mask); + x = @isa@_set_masked_lanes_ps(x, zeros_f, nan_mask); xmax_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmax), _CMP_GE_OQ); xmin_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmin), _CMP_LE_OQ); @@ -1690,7 +2074,7 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void overflow_mask = @or_masks@(overflow_mask, @xor_masks@(xmax_mask, inf_mask)); - x = @isa@_set_masked_lanes(x, zeros_f, @or_masks@( + x = @isa@_set_masked_lanes_ps(x, zeros_f, @or_masks@( @or_masks@(nan_mask, xmin_mask), xmax_mask)); quadrant = _mm@vsize@_mul_ps(x, log2e); @@ -1723,9 +2107,9 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void * elem < xmin; return 0.0f * elem = +/- nan, return nan */ - poly = @isa@_set_masked_lanes(poly, _mm@vsize@_set1_ps(NPY_NANF), nan_mask); - poly = @isa@_set_masked_lanes(poly, inf, xmax_mask); - poly = @isa@_set_masked_lanes(poly, zeros_f, xmin_mask); + poly = @isa@_set_masked_lanes_ps(poly, _mm@vsize@_set1_ps(NPY_NANF), nan_mask); + poly = @isa@_set_masked_lanes_ps(poly, inf, xmax_mask); + poly = @isa@_set_masked_lanes_ps(poly, zeros_f, xmin_mask); @masked_store@(op, @cvtps_epi32@(load_mask), poly); @@ -1790,24 +2174,24 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void @vtype@ poly, num_poly, denom_poly, exponent; @mask@ inf_mask, nan_mask, sqrt2_mask, zero_mask, negx_mask; - @mask@ invalid_mask = @isa@_get_partial_load_mask(0, num_lanes); + @mask@ invalid_mask = @isa@_get_partial_load_mask_ps(0, num_lanes); @mask@ divide_by_zero_mask = invalid_mask; - @mask@ load_mask = @isa@_get_full_load_mask(); + @mask@ load_mask = @isa@_get_full_load_mask_ps(); npy_intp num_remaining_elements = array_size; while (num_remaining_elements > 0) { if (num_remaining_elements < num_lanes) { - load_mask = @isa@_get_partial_load_mask(num_remaining_elements, - num_lanes); + load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements, + num_lanes); } @vtype@ x_in; if (stride == 1) { - x_in = @isa@_masked_load(load_mask, ip); + x_in = @isa@_masked_load_ps(load_mask, ip); } else { - x_in = @isa@_masked_gather(zeros_f, ip, vindex, load_mask); + x_in = @isa@_masked_gather_ps(zeros_f, ip, vindex, load_mask); } negx_mask = _mm@vsize@_cmp_ps@vsub@(x_in, zeros_f, _CMP_LT_OQ); @@ -1818,7 +2202,7 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void @and_masks@(zero_mask, load_mask)); invalid_mask = @or_masks@(invalid_mask, negx_mask); - @vtype@ x = @isa@_set_masked_lanes(x_in, zeros_f, negx_mask); + @vtype@ x = @isa@_set_masked_lanes_ps(x_in, zeros_f, negx_mask); /* set x = normalized mantissa */ exponent = @isa@_get_exponent(x); @@ -1852,10 +2236,10 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void * x = +/- NAN; return NAN * x = 0.0f; return -INF */ - poly = @isa@_set_masked_lanes(poly, nan, nan_mask); - poly = @isa@_set_masked_lanes(poly, neg_nan, negx_mask); - poly = @isa@_set_masked_lanes(poly, neg_inf, zero_mask); - poly = @isa@_set_masked_lanes(poly, inf, inf_mask); + poly = @isa@_set_masked_lanes_ps(poly, nan, nan_mask); + poly = @isa@_set_masked_lanes_ps(poly, neg_nan, negx_mask); + poly = @isa@_set_masked_lanes_ps(poly, neg_inf, zero_mask); + poly = @isa@_set_masked_lanes_ps(poly, inf, inf_mask); @masked_store@(op, @cvtps_epi32@(load_mask), poly); diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 5f9a0f7f4..1dc581977 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -908,7 +908,7 @@ parse_ufunc_keywords(PyUFuncObject *ufunc, PyObject *kwds, PyObject **kwnames, . typedef int converter(PyObject *, void *); while (PyDict_Next(kwds, &pos, &key, &value)) { - int i; + npy_intp i; converter *convert; void *output = NULL; npy_intp index = locate_key(kwnames, key); @@ -1193,34 +1193,11 @@ get_ufunc_arguments(PyUFuncObject *ufunc, } } else { - /* - * If the deprecated behavior is ever removed, - * keep only the else branch of this if-else - */ - if (PyArray_Check(out_kwd) || out_kwd == Py_None) { - if (DEPRECATE("passing a single array to the " - "'out' keyword argument of a " - "ufunc with\n" - "more than one output will " - "result in an error in the " - "future") < 0) { - /* The future error message */ - PyErr_SetString(PyExc_TypeError, - "'out' must be a tuple of arrays"); - goto fail; - } - if (_set_out_array(out_kwd, out_op+nin) < 0) { - goto fail; - } - } - else { - PyErr_SetString(PyExc_TypeError, - nout > 1 ? "'out' must be a tuple " - "of arrays" : - "'out' must be an array or a " - "tuple of a single array"); - goto fail; - } + PyErr_SetString(PyExc_TypeError, + nout > 1 ? "'out' must be a tuple of arrays" : + "'out' must be an array or a tuple with " + "a single array"); + goto fail; } } /* @@ -2297,7 +2274,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, * Returns 0 on success, and -1 on failure */ static int -_parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis, +_parse_axis_arg(PyUFuncObject *ufunc, const int core_num_dims[], PyObject *axis, PyArrayObject **op, int broadcast_ndim, int **remap_axis) { int nop = ufunc->nargs; int iop, axis_int; @@ -2368,7 +2345,7 @@ _parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis, */ static int _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op, - int *op_core_num_dims, npy_uint32 *core_dim_flags, + const int *op_core_num_dims, npy_uint32 *core_dim_flags, npy_intp *core_dim_sizes, int **remap_axis) { int i; int nin = ufunc->nin; @@ -4053,14 +4030,14 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, int *op_axes[3] = {op_axes_arrays[0], op_axes_arrays[1], op_axes_arrays[2]}; npy_uint32 op_flags[3]; - int i, idim, ndim, otype_final; + int idim, ndim, otype_final; int need_outer_iterator = 0; NpyIter *iter = NULL; /* The reduceat indices - ind must be validated outside this call */ npy_intp *reduceat_ind; - npy_intp ind_size, red_axis_size; + npy_intp i, ind_size, red_axis_size; /* The selected inner loop */ PyUFuncGenericFunction innerloop = NULL; void *innerloopdata = NULL; @@ -4081,8 +4058,8 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, for (i = 0; i < ind_size; ++i) { if (reduceat_ind[i] < 0 || reduceat_ind[i] >= red_axis_size) { PyErr_Format(PyExc_IndexError, - "index %d out-of-bounds in %s.%s [0, %d)", - (int)reduceat_ind[i], ufunc_name, opname, (int)red_axis_size); + "index %" NPY_INTP_FMT " out-of-bounds in %s.%s [0, %" NPY_INTP_FMT ")", + reduceat_ind[i], ufunc_name, opname, red_axis_size); return NULL; } } @@ -4146,7 +4123,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, #endif /* Set up the op_axes for the outer loop */ - for (i = 0, idim = 0; idim < ndim; ++idim) { + for (idim = 0; idim < ndim; ++idim) { /* Use the i-th iteration dimension to match up ind */ if (idim == axis) { op_axes_arrays[0][idim] = axis; @@ -4866,7 +4843,7 @@ ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args) NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature(PyUFuncObject *func, PyUFuncGenericFunction newfunc, - int *signature, + const int *signature, PyUFuncGenericFunction *oldfunc) { int i, j; @@ -4921,7 +4898,7 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi char *types, int ntypes, int nin, int nout, int identity, const char *name, const char *doc, - int unused, const char *signature, + const int unused, const char *signature, PyObject *identity_value) { PyUFuncObject *ufunc; @@ -5223,7 +5200,7 @@ NPY_NO_EXPORT int PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int usertype, PyUFuncGenericFunction function, - int *arg_types, + const int *arg_types, void *data) { PyArray_Descr *descr; diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 9be7b63a0..f93d8229e 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -883,7 +883,7 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc, /* The type resolver would have upcast already */ if (out_dtypes[0]->type_num == NPY_BOOL) { PyErr_Format(PyExc_TypeError, - "numpy boolean subtract, the `-` operator, is deprecated, " + "numpy boolean subtract, the `-` operator, is not supported, " "use the bitwise_xor, the `^` operator, or the logical_xor " "function instead."); return -1; diff --git a/numpy/core/tests/test__exceptions.py b/numpy/core/tests/test__exceptions.py new file mode 100644 index 000000000..494b51f34 --- /dev/null +++ b/numpy/core/tests/test__exceptions.py @@ -0,0 +1,42 @@ +""" +Tests of the ._exceptions module. Primarily for exercising the __str__ methods. +""" +import numpy as np + +_ArrayMemoryError = np.core._exceptions._ArrayMemoryError + +class TestArrayMemoryError: + def test_str(self): + e = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + str(e) # not crashing is enough + + # testing these properties is easier than testing the full string repr + def test__size_to_string(self): + """ Test e._size_to_string """ + f = _ArrayMemoryError._size_to_string + Ki = 1024 + assert f(0) == '0 bytes' + assert f(1) == '1 bytes' + assert f(1023) == '1023 bytes' + assert f(Ki) == '1.00 KiB' + assert f(Ki+1) == '1.00 KiB' + assert f(10*Ki) == '10.0 KiB' + assert f(int(999.4*Ki)) == '999. KiB' + assert f(int(1023.4*Ki)) == '1023. KiB' + assert f(int(1023.5*Ki)) == '1.00 MiB' + assert f(Ki*Ki) == '1.00 MiB' + + # 1023.9999 Mib should round to 1 GiB + assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB' + assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB' + # larger than sys.maxsize, adding larger prefices isn't going to help + # anyway. + assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB' + + def test__total_size(self): + """ Test e._total_size """ + e = _ArrayMemoryError((1,), np.dtype(np.uint8)) + assert e._total_size == 1 + + e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16))) + assert e._total_size == 1024 diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index f99c0f72b..11f900c5f 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -1333,10 +1333,10 @@ class TestDateTime(object): # Interaction with NaT a = np.array('1999-03-12T13', dtype='M8[2m]') dtnat = np.array('NaT', dtype='M8[h]') - assert_equal(np.minimum(a, dtnat), a) - assert_equal(np.minimum(dtnat, a), a) - assert_equal(np.maximum(a, dtnat), a) - assert_equal(np.maximum(dtnat, a), a) + assert_equal(np.minimum(a, dtnat), dtnat) + assert_equal(np.minimum(dtnat, a), dtnat) + assert_equal(np.maximum(a, dtnat), dtnat) + assert_equal(np.maximum(dtnat, a), dtnat) # Also do timedelta a = np.array(3, dtype='m8[h]') @@ -1831,7 +1831,7 @@ class TestDateTime(object): def test_timedelta_arange_no_dtype(self): d = np.array(5, dtype="m8[D]") assert_equal(np.arange(d, d + 1), d) - assert_raises(ValueError, np.arange, d) + assert_equal(np.arange(d), np.arange(0, d)) def test_datetime_maximum_reduce(self): a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]') diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index e8aa0c70b..8bffaa9af 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -10,12 +10,16 @@ import sys import operator import warnings import pytest +import shutil +import tempfile import numpy as np from numpy.testing import ( - assert_raises, assert_warns, assert_ + assert_raises, assert_warns, assert_, assert_array_equal ) +from numpy.core._multiarray_tests import fromstring_null_term_c_api + try: import pytz _has_pytz = True @@ -271,36 +275,6 @@ class TestNonCContiguousViewDeprecation(_DeprecationTestCase): self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,)) -class TestInvalidOrderParameterInputForFlattenArrayDeprecation(_DeprecationTestCase): - """Invalid arguments to the ORDER parameter in array.flatten() should not be - allowed and should raise an error. However, in the interests of not breaking - code that may inadvertently pass invalid arguments to this parameter, a - DeprecationWarning will be issued instead for the time being to give developers - time to refactor relevant code. - """ - - def test_flatten_array_non_string_arg(self): - x = np.zeros((3, 5)) - self.message = ("Non-string object detected for " - "the array ordering. Please pass " - "in 'C', 'F', 'A', or 'K' instead") - self.assert_deprecated(x.flatten, args=(np.pi,)) - - def test_flatten_array_invalid_string_arg(self): - # Tests that a DeprecationWarning is raised - # when a string of length greater than one - # starting with "C", "F", "A", or "K" (case- - # and unicode-insensitive) is passed in for - # the ORDER parameter. Otherwise, a TypeError - # will be raised! - - x = np.zeros((3, 5)) - self.message = ("Non length-one string passed " - "in for the array ordering. Please " - "pass in 'C', 'F', 'A', or 'K' instead") - self.assert_deprecated(x.flatten, args=("FACK",)) - - class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase): """Assigning the 'data' attribute of an ndarray is unsafe as pointed out in gh-7093. Eventually, such assignment should NOT be allowed, but @@ -319,22 +293,6 @@ class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase): self.assert_deprecated(a.__setattr__, args=('data', b.data)) -class TestLinspaceInvalidNumParameter(_DeprecationTestCase): - """Argument to the num parameter in linspace that cannot be - safely interpreted as an integer is deprecated in 1.12.0. - - Argument to the num parameter in linspace that cannot be - safely interpreted as an integer should not be allowed. - In the interest of not breaking code that passes - an argument that could still be interpreted as an integer, a - DeprecationWarning will be issued for the time being to give - developers time to refactor relevant code. - """ - def test_float_arg(self): - # 2016-02-25, PR#7328 - self.assert_deprecated(np.linspace, args=(0, 10, 2.5)) - - class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase): """ If a 'width' parameter is passed into ``binary_repr`` that is insufficient to @@ -442,6 +400,18 @@ class TestNPY_CHAR(_DeprecationTestCase): assert_(npy_char_deprecation() == 'S1') +class TestPyArray_AS1D(_DeprecationTestCase): + def test_npy_pyarrayas1d_deprecation(self): + from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation + assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation) + + +class TestPyArray_AS2D(_DeprecationTestCase): + def test_npy_pyarrayas2d_deprecation(self): + from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation + assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation) + + class Test_UPDATEIFCOPY(_DeprecationTestCase): """ v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use @@ -514,17 +484,71 @@ class TestPositiveOnNonNumerical(_DeprecationTestCase): def test_positive_on_non_number(self): self.assert_deprecated(operator.pos, args=(np.array('foo'),)) + class TestFromstring(_DeprecationTestCase): # 2017-10-19, 1.14 def test_fromstring(self): self.assert_deprecated(np.fromstring, args=('\x00'*80,)) + +class TestFromStringAndFileInvalidData(_DeprecationTestCase): + # 2019-06-08, 1.17.0 + # Tests should be moved to real tests when deprecation is done. + message = "string or file could not be read to its end" + + @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) + def test_deprecate_unparsable_data_file(self, invalid_str): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + + with tempfile.TemporaryFile(mode="w") as f: + x.tofile(f, sep=',', format='%.2f') + f.write(invalid_str) + + f.seek(0) + self.assert_deprecated(lambda: np.fromfile(f, sep=",")) + f.seek(0) + self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5)) + # Should not raise: + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + f.seek(0) + res = np.fromfile(f, sep=",", count=4) + assert_array_equal(res, x) + + @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) + def test_deprecate_unparsable_string(self, invalid_str): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + x_str = "1.51,2,3.51,4{}".format(invalid_str) + + self.assert_deprecated(lambda: np.fromstring(x_str, sep=",")) + self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5)) + + # The C-level API can use not fixed size, but 0 terminated strings, + # so test that as well: + bytestr = x_str.encode("ascii") + self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr)) + + with assert_warns(DeprecationWarning): + # this is slightly strange, in that fromstring leaves data + # potentially uninitialized (would be good to error when all is + # read, but count is larger then actual data maybe). + res = np.fromstring(x_str, sep=",", count=5) + assert_array_equal(res[:-1], x) + + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # Should not raise: + res = np.fromstring(x_str, sep=",", count=4) + assert_array_equal(res, x) + + class Test_GetSet_NumericOps(_DeprecationTestCase): # 2018-09-20, 1.16.0 def test_get_numeric_ops(self): from numpy.core._multiarray_tests import getset_numericops self.assert_deprecated(getset_numericops, num=2) - + # empty kwargs prevents any state actually changing which would break # other tests. self.assert_deprecated(np.set_numeric_ops, kwargs={}) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index f60eab696..d2fbbae5b 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -419,6 +419,31 @@ class TestRecord(object): assert_raises(ValueError, np.dtype, {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)}) + def test_fieldless_views(self): + a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[], + 'itemsize':8}) + assert_raises(ValueError, a.view, np.dtype([])) + + d = np.dtype((np.dtype([]), 10)) + assert_equal(d.shape, (10,)) + assert_equal(d.itemsize, 0) + assert_equal(d.base, np.dtype([])) + + arr = np.fromiter((() for i in range(10)), []) + assert_equal(arr.dtype, np.dtype([])) + assert_raises(ValueError, np.frombuffer, b'', dtype=[]) + assert_equal(np.frombuffer(b'', dtype=[], count=2), + np.empty(2, dtype=[])) + + assert_raises(ValueError, np.dtype, ([], 'f8')) + assert_raises(ValueError, np.zeros(1, dtype='i4').view, []) + + assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]), + np.ones(2, dtype=bool)) + + assert_equal(np.zeros((1, 2), dtype=[]) == a, + np.ones((1, 2), dtype=bool)) + class TestSubarray(object): def test_single_subarray(self): @@ -938,13 +963,6 @@ class TestDtypeAttributes(object): new_dtype = np.dtype(dtype.descr) assert_equal(new_dtype.itemsize, 16) - @pytest.mark.parametrize('t', np.typeDict.values()) - def test_name_builtin(self, t): - name = t.__name__ - if name.endswith('_'): - name = name[:-1] - assert_equal(np.dtype(t).name, name) - def test_name_dtype_subclass(self): # Ticket #4357 class user_def_subcls(np.void): diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py index cfeeb8a90..1b5b4cb26 100644 --- a/numpy/core/tests/test_einsum.py +++ b/numpy/core/tests/test_einsum.py @@ -5,7 +5,7 @@ import itertools import numpy as np from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_raises, suppress_warnings, assert_raises_regex + assert_raises, suppress_warnings, assert_raises_regex, assert_allclose ) # Setup for optimize einsum @@ -700,6 +700,14 @@ class TestEinsum(object): y2 = x[idx[:, None], idx[:, None], idx, idx] assert_equal(y1, y2) + def test_einsum_failed_on_p9_and_s390x(self): + # Issues gh-14692 and gh-12689 + # Bug with signed vs unsigned char errored on power9 and s390x Linux + tensor = np.random.random_sample((10, 10, 10, 10)) + x = np.einsum('ijij->', tensor) + y = tensor.trace(axis1=0, axis2=2).trace() + assert_allclose(x, y) + def test_einsum_all_contig_non_contig_output(self): # Issue gh-5907, tests that the all contiguous special case # actually checks the contiguity of the output diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py index 6f5709372..84b60b19c 100644 --- a/numpy/core/tests/test_function_base.py +++ b/numpy/core/tests/test_function_base.py @@ -236,10 +236,7 @@ class TestLinspace(object): def test_corner(self): y = list(linspace(0, 1, 1)) assert_(y == [0.0], y) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, ".*safely interpreted as an integer") - y = list(linspace(0, 1, 2.5)) - assert_(y == [0.0, 1.0]) + assert_raises(TypeError, linspace, 0, 1, num=2.5) def test_type(self): t1 = linspace(0, 1, 0).dtype diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py index ee4197f8f..59ac5923c 100644 --- a/numpy/core/tests/test_longdouble.py +++ b/numpy/core/tests/test_longdouble.py @@ -5,7 +5,8 @@ import pytest import numpy as np from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, temppath, + assert_, assert_equal, assert_raises, assert_warns, assert_array_equal, + temppath, ) from numpy.core.tests._locales import CommaDecimalPointLocale @@ -71,18 +72,21 @@ def test_fromstring(): def test_fromstring_bogus(): - assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "), - np.array([1., 2., 3.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "), + np.array([1., 2., 3.])) def test_fromstring_empty(): - assert_equal(np.fromstring("xxxxx", sep="x"), - np.array([])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("xxxxx", sep="x"), + np.array([])) def test_fromstring_missing(): - assert_equal(np.fromstring("1xx3x4x5x6", sep="x"), - np.array([1])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1xx3x4x5x6", sep="x"), + np.array([1])) class TestFileBased(object): @@ -95,7 +99,9 @@ class TestFileBased(object): with temppath() as path: with open(path, 'wt') as f: f.write("1. 2. 3. flop 4.\n") - res = np.fromfile(path, dtype=float, sep=" ") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=float, sep=" ") assert_equal(res, np.array([1., 2., 3.])) @pytest.mark.skipif(string_to_longdouble_inaccurate, @@ -186,12 +192,14 @@ class TestCommaDecimalPointLocale(CommaDecimalPointLocale): assert_equal(a[0], f) def test_fromstring_best_effort_float(self): - assert_equal(np.fromstring("1,234", dtype=float, sep=" "), - np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1,234", dtype=float, sep=" "), + np.array([1.])) def test_fromstring_best_effort(self): - assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "), - np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "), + np.array([1.])) def test_fromstring_foreign(self): s = "1.234" @@ -204,8 +212,10 @@ class TestCommaDecimalPointLocale(CommaDecimalPointLocale): assert_array_equal(a, b) def test_fromstring_foreign_value(self): - b = np.fromstring("1,234", dtype=np.longdouble, sep=" ") - assert_array_equal(b[0], 1) + with assert_warns(DeprecationWarning): + b = np.fromstring("1,234", dtype=np.longdouble, sep=" ") + assert_array_equal(b[0], 1) + @pytest.mark.parametrize("int_val", [ # cases discussed in gh-10723 diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 2593045ed..c699a9bc1 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -114,7 +114,7 @@ class TestFlags(object): # Ensure that any base being writeable is sufficient to change flag; # this is especially interesting for arrays from an array interface. arr = np.arange(10) - + class subclass(np.ndarray): pass @@ -2789,6 +2789,12 @@ class TestMethods(object): assert_equal(x1.flatten('F'), y1f) assert_equal(x1.flatten('F'), x1.T.flatten()) + def test_flatten_invalid_order(self): + # invalid after gh-14596 + for order in ['Z', 'c', False, True, 0, 8]: + x = np.array([[1, 2, 3], [4, 5, 6]], np.int32) + assert_raises(ValueError, x.flatten, {"order": order}) + @pytest.mark.parametrize('func', (np.dot, np.matmul)) def test_arr_mult(self, func): a = np.array([[1, 0], [0, 1]]) @@ -3596,10 +3602,10 @@ class TestBinop(object): assert_equal(np.modf(dummy, out=(None, a)), (1,)) assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) assert_equal(np.modf(a, out=(dummy, a)), 0) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', DeprecationWarning) - assert_equal(np.modf(dummy, out=a), (0,)) - assert_(w[0].category is DeprecationWarning) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs + np.modf(dummy, out=a) + assert_raises(ValueError, np.modf, dummy, out=(a,)) # 2 inputs, 1 output @@ -3964,13 +3970,13 @@ class TestPickling(object): def test_datetime64_byteorder(self): original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]') - + original_byte_reversed = original.copy(order='K') original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S') original_byte_reversed.byteswap(inplace=True) new = pickle.loads(pickle.dumps(original_byte_reversed)) - + assert_equal(original.dtype, new.dtype) @@ -4099,17 +4105,17 @@ class TestArgmax(object): np.datetime64('2010-01-03T05:14:12'), np.datetime64('NaT'), np.datetime64('2015-09-23T10:10:13'), - np.datetime64('1932-10-10T03:50:30')], 4), + np.datetime64('1932-10-10T03:50:30')], 0), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), np.datetime64('NaT'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), - np.datetime64('2013-05-08T18:15:23')], 0), + np.datetime64('2013-05-08T18:15:23')], 2), ([np.timedelta64(2, 's'), np.timedelta64(1, 's'), np.timedelta64('NaT', 's'), - np.timedelta64(3, 's')], 3), + np.timedelta64(3, 's')], 2), ([np.timedelta64('NaT', 's')] * 3, 0), ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), @@ -4234,17 +4240,17 @@ class TestArgmin(object): np.datetime64('2010-01-03T05:14:12'), np.datetime64('NaT'), np.datetime64('2015-09-23T10:10:13'), - np.datetime64('1932-10-10T03:50:30')], 5), + np.datetime64('1932-10-10T03:50:30')], 0), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), np.datetime64('NaT'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), - np.datetime64('2013-05-08T18:15:23')], 4), + np.datetime64('2013-05-08T18:15:23')], 2), ([np.timedelta64(2, 's'), np.timedelta64(1, 's'), np.timedelta64('NaT', 's'), - np.timedelta64(3, 's')], 1), + np.timedelta64(3, 's')], 2), ([np.timedelta64('NaT', 's')] * 3, 0), ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), @@ -4360,18 +4366,14 @@ class TestMinMax(object): assert_equal(np.amax([[1, 2, 3]], axis=1), 3) def test_datetime(self): - # NaTs are ignored + # Do not ignore NaT for dtype in ('m8[s]', 'm8[Y]'): a = np.arange(10).astype(dtype) - a[3] = 'NaT' assert_equal(np.amin(a), a[0]) assert_equal(np.amax(a), a[9]) - a[0] = 'NaT' - assert_equal(np.amin(a), a[1]) - assert_equal(np.amax(a), a[9]) - a.fill('NaT') - assert_equal(np.amin(a), a[0]) - assert_equal(np.amax(a), a[0]) + a[3] = 'NaT' + assert_equal(np.amin(a), a[3]) + assert_equal(np.amax(a), a[3]) class TestNewaxis(object): @@ -4587,18 +4589,26 @@ class TestTake(object): assert_equal(y, np.array([1, 2, 3])) class TestLexsort(object): - def test_basic(self): - a = [1, 2, 1, 3, 1, 5] - b = [0, 4, 5, 6, 2, 3] + @pytest.mark.parametrize('dtype',[ + np.uint8, np.uint16, np.uint32, np.uint64, + np.int8, np.int16, np.int32, np.int64, + np.float16, np.float32, np.float64 + ]) + def test_basic(self, dtype): + a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype) + b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype) idx = np.lexsort((b, a)) expected_idx = np.array([0, 4, 2, 1, 3, 5]) assert_array_equal(idx, expected_idx) + assert_array_equal(a[idx], np.sort(a)) - x = np.vstack((b, a)) - idx = np.lexsort(x) - assert_array_equal(idx, expected_idx) + def test_mixed(self): + a = np.array([1, 2, 1, 3, 1, 5]) + b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]') - assert_array_equal(x[1][idx], np.sort(x[1])) + idx = np.lexsort((b, a)) + expected_idx = np.array([0, 4, 2, 1, 3, 5]) + assert_array_equal(idx, expected_idx) def test_datetime(self): a = np.array([0,0,0], dtype='datetime64[D]') @@ -4865,7 +4875,7 @@ class TestIO(object): offset_bytes = self.dtype.itemsize z = np.fromfile(f, dtype=self.dtype, offset=offset_bytes) assert_array_equal(z, self.x.flat[offset_items+count_items+1:]) - + with open(self.filename, 'wb') as f: self.x.tofile(f, sep=",") @@ -4955,7 +4965,8 @@ class TestIO(object): self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') def test_malformed(self): - self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ') + with assert_warns(DeprecationWarning): + self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ') def test_long_sep(self): self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') @@ -5008,6 +5019,19 @@ class TestIO(object): self.test_tofile_sep() self.test_tofile_format() + def test_fromfile_subarray_binary(self): + # Test subarray dtypes which are absorbed into the shape + x = np.arange(24, dtype="i4").reshape(2, 3, 4) + x.tofile(self.filename) + res = np.fromfile(self.filename, dtype="(3,4)i4") + assert_array_equal(x, res) + + x_str = x.tobytes() + with assert_warns(DeprecationWarning): + # binary fromstring is deprecated + res = np.fromstring(x_str, dtype="(3,4)i4") + assert_array_equal(x, res) + class TestFromBuffer(object): @pytest.mark.parametrize('byteorder', ['<', '>']) @@ -6222,14 +6246,14 @@ class TestMatmul(MatmulCommon): r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) - + def test_matmul_object(self): import fractions f = np.vectorize(fractions.Fraction) def random_ints(): return np.random.randint(1, 1000, size=(10, 3, 3)) - M1 = f(random_ints(), random_ints()) + M1 = f(random_ints(), random_ints()) M2 = f(random_ints(), random_ints()) M3 = self.matmul(M1, M2) @@ -6268,6 +6292,23 @@ class TestMatmul(MatmulCommon): with assert_raises(TypeError): b = np.matmul(a, a) + def test_matmul_bool(self): + # gh-14439 + a = np.array([[1, 0],[1, 1]], dtype=bool) + assert np.max(a.view(np.uint8)) == 1 + b = np.matmul(a, a) + # matmul with boolean output should always be 0, 1 + assert np.max(b.view(np.uint8)) == 1 + + rg = np.random.default_rng(np.random.PCG64(43)) + d = rg.integers(2, size=4*5, dtype=np.int8) + d = d.reshape(4, 5) > 0 + out1 = np.matmul(d, d.reshape(5, 4)) + out2 = np.dot(d, d.reshape(5, 4)) + assert_equal(out1, out2) + + c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool)) + assert not np.any(c) if sys.version_info[:2] >= (3, 5): @@ -7930,6 +7971,8 @@ class TestFormat(object): dst = object.__format__(a, '30') assert_equal(res, dst) +from numpy.testing import IS_PYPY + class TestCTypes(object): def test_ctypes_is_available(self): @@ -7996,7 +8039,29 @@ class TestCTypes(object): # but when the `ctypes_ptr` object dies, so should `arr` del ctypes_ptr + if IS_PYPY: + # Pypy does not recycle arr objects immediately. Trigger gc to + # release arr. Cpython uses refcounts. An explicit call to gc + # should not be needed here. + break_cycles() + assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference") + + def test_ctypes_as_parameter_holds_reference(self): + arr = np.array([None]).copy() + + arr_ref = weakref.ref(arr) + + ctypes_ptr = arr.ctypes._as_parameter_ + + # `ctypes_ptr` should hold onto `arr` + del arr break_cycles() + assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference") + + # but when the `ctypes_ptr` object dies, so should `arr` + del ctypes_ptr + if IS_PYPY: + break_cycles() assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference") diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index c479a0f6d..1358b45e9 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -2583,6 +2583,30 @@ class TestConvolve(object): class TestArgwhere(object): + + @pytest.mark.parametrize('nd', [0, 1, 2]) + def test_nd(self, nd): + # get an nd array with multiple elements in every dimension + x = np.empty((2,)*nd, bool) + + # none + x[...] = False + assert_equal(np.argwhere(x).shape, (0, nd)) + + # only one + x[...] = False + x.flat[0] = True + assert_equal(np.argwhere(x).shape, (1, nd)) + + # all but one + x[...] = True + x.flat[0] = False + assert_equal(np.argwhere(x).shape, (x.size - 1, nd)) + + # all + x[...] = True + assert_equal(np.argwhere(x).shape, (x.size, nd)) + def test_2D(self): x = np.arange(6).reshape((2, 3)) assert_array_equal(np.argwhere(x > 1), diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py index d0ff5578a..387740e35 100644 --- a/numpy/core/tests/test_numerictypes.py +++ b/numpy/core/tests/test_numerictypes.py @@ -498,3 +498,32 @@ class TestDocStrings(object): assert_('int64' in np.int_.__doc__) elif np.int64 is np.longlong: assert_('int64' in np.longlong.__doc__) + + +class TestScalarTypeNames: + # gh-9799 + + numeric_types = [ + np.byte, np.short, np.intc, np.int_, np.longlong, + np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong, + np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble, + ] + + def test_names_are_unique(self): + # none of the above may be aliases for each other + assert len(set(self.numeric_types)) == len(self.numeric_types) + + # names must be unique + names = [t.__name__ for t in self.numeric_types] + assert len(set(names)) == len(names) + + @pytest.mark.parametrize('t', numeric_types) + def test_names_reflect_attributes(self, t): + """ Test that names correspond to where the type is under ``np.`` """ + assert getattr(np, t.__name__) is t + + @pytest.mark.parametrize('t', numeric_types) + def test_names_are_undersood_by_dtype(self, t): + """ Test the dtype constructor maps names back to the type """ + assert np.dtype(t.__name__).type is t diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index c5289f6ac..9dc231deb 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -1539,7 +1539,8 @@ class TestRegression(object): def test_fromstring_crash(self): # Ticket #1345: the following should not cause a crash - np.fromstring(b'aa, aa, 1.0', sep=',') + with assert_warns(DeprecationWarning): + np.fromstring(b'aa, aa, 1.0', sep=',') def test_ticket_1539(self): dtypes = [x for x in np.typeDict.values() @@ -2500,3 +2501,13 @@ class TestRegression(object): t = T() #gh-13659, would raise in broadcasting [x=t for x in result] np.array([t]) + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + @pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8), + reason='overflows on windows, fixed in bpo-16865') + def test_to_ctypes(self): + #gh-14214 + arr = np.zeros((2 ** 31 + 1,), 'b') + assert arr.size * arr.itemsize > 2 ** 31 + c_arr = np.ctypeslib.as_ctypes(arr) + assert_equal(c_arr._length_, arr.size) diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index ebba457e3..854df5590 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -664,3 +664,31 @@ class TestAbs(object): def test_numpy_abs(self): self._test_abs_func(np.abs) + + +class TestBitShifts(object): + + @pytest.mark.parametrize('type_code', np.typecodes['AllInteger']) + @pytest.mark.parametrize('op', + [operator.rshift, operator.lshift], ids=['>>', '<<']) + def test_shift_all_bits(self, type_code, op): + """ Shifts where the shift amount is the width of the type or wider """ + # gh-2449 + dt = np.dtype(type_code) + nbits = dt.itemsize * 8 + for val in [5, -5]: + for shift in [nbits, nbits + 4]: + val_scl = dt.type(val) + shift_scl = dt.type(shift) + res_scl = op(val_scl, shift_scl) + if val_scl < 0 and op is operator.rshift: + # sign bit is preserved + assert_equal(res_scl, -1) + else: + assert_equal(res_scl, 0) + + # Result on scalars should be the same as on arrays + val_arr = np.array([val]*32, dtype=dt) + shift_arr = np.array([shift]*32, dtype=dt) + res_arr = op(val_arr, shift_arr) + assert_equal(res_arr, res_scl) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index ef48fed05..9b4ce9e47 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -75,11 +75,9 @@ class TestOut(object): assert_(r1 is o1) assert_(r2 is o2) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', DeprecationWarning) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. r1, r2 = np.frexp(d, out=o1, subok=subok) - assert_(r1 is o1) - assert_(w[0].category is DeprecationWarning) assert_raises(ValueError, np.add, a, 2, o, o, subok=subok) assert_raises(ValueError, np.add, a, 2, o, out=o, subok=subok) @@ -165,14 +163,9 @@ class TestOut(object): else: assert_(type(r1) == np.ndarray) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', DeprecationWarning) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. r1, r2 = np.frexp(d, out=o1, subok=subok) - if subok: - assert_(isinstance(r2, ArrayWrap)) - else: - assert_(type(r2) == np.ndarray) - assert_(w[0].category is DeprecationWarning) class TestComparisons(object): @@ -694,8 +687,96 @@ class TestSpecialFloats(object): assert_raises(FloatingPointError, np.cos, np.float32(-np.inf)) assert_raises(FloatingPointError, np.cos, np.float32(np.inf)) + def test_sqrt_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.inf, np.nan, 0.] + y = [np.nan, -np.nan, np.inf, -np.inf, 0.] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.sqrt(yf), xf) + + #with np.errstate(invalid='raise'): + # for dt in ['f', 'd', 'g']: + # assert_raises(FloatingPointError, np.sqrt, np.array(-100., dtype=dt)) -class TestSIMDFloat32(object): + def test_abs_values(self): + x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.abs(yf), xf) + + def test_square_values(self): + x = [np.nan, np.nan, np.inf, np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf] + with np.errstate(all='ignore'): + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.square(yf), xf) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.square, np.array(1E32, dtype='f')) + assert_raises(FloatingPointError, np.square, np.array(1E200, dtype='d')) + + def test_reciprocal_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.reciprocal(yf), xf) + + with np.errstate(divide='raise'): + for dt in ['f', 'd', 'g']: + assert_raises(FloatingPointError, np.reciprocal, np.array(-0.0, dtype=dt)) + +# func : [maxulperror, low, high] +avx_ufuncs = {'sqrt' :[1, 0., 100.], + 'absolute' :[0, -100., 100.], + 'reciprocal' :[1, 1., 100.], + 'square' :[1, -100., 100.], + 'rint' :[0, -100., 100.], + 'floor' :[0, -100., 100.], + 'ceil' :[0, -100., 100.], + 'trunc' :[0, -100., 100.]} + +class TestAVXUfuncs(object): + def test_avx_based_ufunc(self): + strides = np.array([-4,-3,-2,-1,1,2,3,4]) + np.random.seed(42) + for func, prop in avx_ufuncs.items(): + maxulperr = prop[0] + minval = prop[1] + maxval = prop[2] + # various array sizes to ensure masking in AVX is tested + for size in range(1,32): + myfunc = getattr(np, func) + x_f32 = np.float32(np.random.uniform(low=minval, high=maxval, + size=size)) + x_f64 = np.float64(x_f32) + x_f128 = np.longdouble(x_f32) + y_true128 = myfunc(x_f128) + if maxulperr == 0: + assert_equal(myfunc(x_f32), np.float32(y_true128)) + assert_equal(myfunc(x_f64), np.float64(y_true128)) + else: + assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128), + maxulp=maxulperr) + assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128), + maxulp=maxulperr) + # various strides to test gather instruction + if size > 1: + y_true32 = myfunc(x_f32) + y_true64 = myfunc(x_f64) + for jj in strides: + assert_equal(myfunc(x_f64[::jj]), y_true64[::jj]) + assert_equal(myfunc(x_f32[::jj]), y_true32[::jj]) + +class TestAVXFloat32Transcendental(object): def test_exp_float32(self): np.random.seed(42) x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000)) @@ -722,8 +803,8 @@ class TestSIMDFloat32(object): def test_strided_float32(self): np.random.seed(42) - strides = np.random.randint(low=-100, high=100, size=100) - sizes = np.random.randint(low=1, high=2000, size=100) + strides = np.array([-4,-3,-2,-1,1,2,3,4]) + sizes = np.arange(2,100) for ii in sizes: x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii)) exp_true = np.exp(x_f32) @@ -2161,10 +2242,9 @@ class TestSpecialMethods(object): assert_(np.modf(a, None) == {}) assert_(np.modf(a, None, None) == {}) assert_(np.modf(a, out=(None, None)) == {}) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', DeprecationWarning) - assert_(np.modf(a, out=None) == {}) - assert_(w[0].category is DeprecationWarning) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + np.modf(a, out=None) # don't give positional and output argument, or too many arguments. # wrong number of arguments in the tuple is an error too. diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py index fcbed0dd3..0bab04df2 100644 --- a/numpy/core/tests/test_umath_accuracy.py +++ b/numpy/core/tests/test_umath_accuracy.py @@ -35,7 +35,8 @@ class TestAccuracy(object): for filename in files: data_dir = path.join(path.dirname(__file__), 'data') filepath = path.join(data_dir, filename) - file_without_comments = (r for r in open(filepath) if not r[0] in ('$', '#')) + with open(filepath) as fid: + file_without_comments = (r for r in fid if not r[0] in ('$', '#')) data = np.genfromtxt(file_without_comments, dtype=('|S39','|S39','|S39',np.int), names=('type','input','output','ulperr'), diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index e49f8394f..58f3ef9d3 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -92,11 +92,11 @@ else: # Adapted from Albert Strasheim def load_library(libname, loader_path): """ - It is possible to load a library using + It is possible to load a library using >>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP But there are cross-platform considerations, such as library file extensions, - plus the fact Windows will just load the first library it finds with that name. + plus the fact Windows will just load the first library it finds with that name. NumPy supplies the load_library function as a convenience. Parameters @@ -110,12 +110,12 @@ else: Returns ------- ctypes.cdll[libpath] : library object - A ctypes library object + A ctypes library object Raises ------ OSError - If there is no library with the expected extension, or the + If there is no library with the expected extension, or the library is defective and cannot be loaded. """ if ctypes.__version__ < '1.0.1': @@ -535,7 +535,10 @@ if ctypes is not None: if readonly: raise TypeError("readonly arrays unsupported") - dtype = _dtype((ai["typestr"], ai["shape"])) - result = as_ctypes_type(dtype).from_address(addr) + # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows + # dtype.itemsize (gh-14214) + ctype_scalar = as_ctypes_type(ai["typestr"]) + result_type = _ctype_ndarray(ctype_scalar, ai["shape"]) + result = result_type.from_address(addr) result.__keep = obj return result diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py index 55514750e..8dbb63b28 100644 --- a/numpy/distutils/__init__.py +++ b/numpy/distutils/__init__.py @@ -1,12 +1,31 @@ +""" +An enhanced distutils, providing support for Fortran compilers, for BLAS, +LAPACK and other common libraries for numerical computing, and more. + +Public submodules are:: + + misc_util + system_info + cpu_info + log + exec_command + +For details, please see the *Packaging* and *NumPy Distutils User Guide* +sections of the NumPy Reference Guide. + +For configuring the preference for and location of libraries like BLAS and +LAPACK, and for setting include paths and similar build options, please see +``site.cfg.example`` in the root of the NumPy repository or sdist. + +""" + from __future__ import division, absolute_import, print_function -from .__version__ import version as __version__ # Must import local ccompiler ASAP in order to get # customized CCompiler.spawn effective. from . import ccompiler from . import unixccompiler -from .info import __doc__ from .npy_pkg_config import * # If numpy is installed, add distutils.test() @@ -28,7 +47,7 @@ def customized_fcompiler(plat=None, compiler=None): c.customize() return c -def customized_ccompiler(plat=None, compiler=None): - c = ccompiler.new_compiler(plat=plat, compiler=compiler) +def customized_ccompiler(plat=None, compiler=None, verbose=1): + c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) c.customize('') return c diff --git a/numpy/distutils/__version__.py b/numpy/distutils/__version__.py deleted file mode 100644 index 969decbba..000000000 --- a/numpy/distutils/__version__.py +++ /dev/null @@ -1,6 +0,0 @@ -from __future__ import division, absolute_import, print_function - -major = 0 -minor = 4 -micro = 0 -version = '%(major)d.%(minor)d.%(micro)d' % (locals()) diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index 14451fa66..643879023 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -140,7 +140,10 @@ def CCompiler_spawn(self, cmd, display=None): display = ' '.join(list(display)) log.info(display) try: - subprocess.check_output(cmd) + if self.verbose: + subprocess.check_output(cmd) + else: + subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as exc: o = exc.output s = exc.returncode @@ -162,7 +165,8 @@ def CCompiler_spawn(self, cmd, display=None): if is_sequence(cmd): cmd = ' '.join(list(cmd)) - forward_bytes_to_stdout(o) + if self.verbose: + forward_bytes_to_stdout(o) if re.search(b'Too many open files', o): msg = '\nTry rerunning setup command until build succeeds.' @@ -727,10 +731,12 @@ if sys.platform == 'win32': _distutils_new_compiler = new_compiler def new_compiler (plat=None, compiler=None, - verbose=0, + verbose=None, dry_run=0, force=0): # Try first C compilers from numpy.distutils. + if verbose is None: + verbose = log.get_threshold() <= log.INFO if plat is None: plat = os.name try: @@ -763,6 +769,7 @@ def new_compiler (plat=None, raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + "in module '%s'") % (class_name, module_name)) compiler = klass(None, dry_run, force) + compiler.verbose = verbose log.debug('new_compiler returns %s' % (klass)) return compiler diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py index 3d7101582..5a9da1217 100644 --- a/numpy/distutils/command/build.py +++ b/numpy/distutils/command/build.py @@ -16,8 +16,8 @@ class build(old_build): user_options = old_build.user_options + [ ('fcompiler=', None, "specify the Fortran compiler type"), - ('parallel=', 'j', - "number of parallel jobs"), + ('warn-error', None, + "turn all warnings into errors (-Werror)"), ] help_options = old_build.help_options + [ @@ -28,17 +28,12 @@ class build(old_build): def initialize_options(self): old_build.initialize_options(self) self.fcompiler = None - self.parallel = None + self.warn_error = False def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError: - raise ValueError("--parallel/-j argument must be an integer") build_scripts = self.build_scripts old_build.finalize_options(self) - plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) + plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) if build_scripts is None: self.build_scripts = os.path.join(self.build_base, 'scripts' + plat_specifier) diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py index 910493a77..13edf0717 100644 --- a/numpy/distutils/command/build_clib.py +++ b/numpy/distutils/command/build_clib.py @@ -33,15 +33,18 @@ class build_clib(old_build_clib): ('inplace', 'i', 'Build in-place'), ('parallel=', 'j', "number of parallel jobs"), + ('warn-error', None, + "turn all warnings into errors (-Werror)"), ] - boolean_options = old_build_clib.boolean_options + ['inplace'] + boolean_options = old_build_clib.boolean_options + ['inplace', 'warn-error'] def initialize_options(self): old_build_clib.initialize_options(self) self.fcompiler = None self.inplace = 0 self.parallel = None + self.warn_error = None def finalize_options(self): if self.parallel: @@ -50,7 +53,10 @@ class build_clib(old_build_clib): except ValueError: raise ValueError("--parallel/-j argument must be an integer") old_build_clib.finalize_options(self) - self.set_undefined_options('build', ('parallel', 'parallel')) + self.set_undefined_options('build', + ('parallel', 'parallel'), + ('warn_error', 'warn_error'), + ) def have_f_sources(self): for (lib_name, build_info) in self.libraries: @@ -86,6 +92,10 @@ class build_clib(old_build_clib): self.compiler.customize(self.distribution, need_cxx=self.have_cxx_sources()) + if self.warn_error: + self.compiler.compiler.append('-Werror') + self.compiler.compiler_so.append('-Werror') + libraries = self.libraries self.libraries = None self.compiler.customize_cmd(self) diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index ef54fb25e..cd9b1c6f1 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -33,6 +33,8 @@ class build_ext (old_build_ext): "specify the Fortran compiler type"), ('parallel=', 'j', "number of parallel jobs"), + ('warn-error', None, + "turn all warnings into errors (-Werror)"), ] help_options = old_build_ext.help_options + [ @@ -40,10 +42,13 @@ class build_ext (old_build_ext): show_fortran_compilers), ] + boolean_options = old_build_ext.boolean_options + ['warn-error'] + def initialize_options(self): old_build_ext.initialize_options(self) self.fcompiler = None self.parallel = None + self.warn_error = None def finalize_options(self): if self.parallel: @@ -69,7 +74,10 @@ class build_ext (old_build_ext): self.include_dirs.extend(incl_dirs) old_build_ext.finalize_options(self) - self.set_undefined_options('build', ('parallel', 'parallel')) + self.set_undefined_options('build', + ('parallel', 'parallel'), + ('warn_error', 'warn_error'), + ) def run(self): if not self.extensions: @@ -116,6 +124,11 @@ class build_ext (old_build_ext): force=self.force) self.compiler.customize(self.distribution) self.compiler.customize_cmd(self) + + if self.warn_error: + self.compiler.compiler.append('-Werror') + self.compiler.compiler_so.append('-Werror') + self.compiler.show_customization() # Setup directory for storing generated extra DLL files on Windows diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py index 41bb01da5..3e0522c5f 100644 --- a/numpy/distutils/command/build_src.py +++ b/numpy/distutils/command/build_src.py @@ -53,9 +53,12 @@ class build_src(build_ext.build_ext): ('inplace', 'i', "ignore build-lib and put compiled extensions into the source " + "directory alongside your pure Python modules"), + ('verbose-cfg', None, + "change logging level from WARN to INFO which will show all " + + "compiler output") ] - boolean_options = ['force', 'inplace'] + boolean_options = ['force', 'inplace', 'verbose-cfg'] help_options = [] @@ -76,6 +79,7 @@ class build_src(build_ext.build_ext): self.swig_opts = None self.swig_cpp = None self.swig = None + self.verbose_cfg = None def finalize_options(self): self.set_undefined_options('build', @@ -90,7 +94,7 @@ class build_src(build_ext.build_ext): self.data_files = self.distribution.data_files or [] if self.build_src is None: - plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) + plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) # py_modules_dict is used in build_py.find_package_modules @@ -365,6 +369,13 @@ class build_src(build_ext.build_ext): build_dir = os.path.join(*([self.build_src] +name.split('.')[:-1])) self.mkpath(build_dir) + + if self.verbose_cfg: + new_level = log.INFO + else: + new_level = log.WARN + old_level = log.set_threshold(new_level) + for func in func_sources: source = func(extension, build_dir) if not source: @@ -375,7 +386,7 @@ class build_src(build_ext.build_ext): else: log.info(" adding '%s' to sources." % (source,)) new_sources.append(source) - + log.set_threshold(old_level) return new_sources def filter_py_files(self, sources): diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py index 935f3eec9..872bd5362 100644 --- a/numpy/distutils/extension.py +++ b/numpy/distutils/extension.py @@ -19,8 +19,24 @@ if sys.version_info[0] >= 3: cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match + class Extension(old_Extension): - def __init__ ( + """ + Parameters + ---------- + name : str + Extension name. + sources : list of str + List of source file locations relative to the top directory of + the package. + extra_compile_args : list of str + Extra command line arguments to pass to the compiler. + extra_f77_compile_args : list of str + Extra command line arguments to pass to the fortran77 compiler. + extra_f90_compile_args : list of str + Extra command line arguments to pass to the fortran90 compiler. + """ + def __init__( self, name, sources, include_dirs=None, define_macros=None, diff --git a/numpy/distutils/info.py b/numpy/distutils/info.py deleted file mode 100644 index 2f5310665..000000000 --- a/numpy/distutils/info.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -Enhanced distutils with Fortran compilers support and more. -""" -from __future__ import division, absolute_import, print_function - -postpone_import = True diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py index 37f9fe5dd..ff7de86b1 100644 --- a/numpy/distutils/log.py +++ b/numpy/distutils/log.py @@ -67,6 +67,8 @@ def set_threshold(level, force=False): ' %s to %s' % (prev_level, level)) return prev_level +def get_threshold(): + return _global_log.threshold def set_verbosity(v, force=False): prev_level = _global_log.threshold diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 89171eede..7ba8ad862 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -859,7 +859,7 @@ class Configuration(object): print(message) def warn(self, message): - sys.stderr.write('Warning: %s' % (message,)) + sys.stderr.write('Warning: %s\n' % (message,)) def set_options(self, **options): """ @@ -1687,6 +1687,41 @@ class Configuration(object): and will be installed as foo.ini in the 'lib' subpath. + When cross-compiling with numpy distutils, it might be necessary to + use modified npy-pkg-config files. Using the default/generated files + will link with the host libraries (i.e. libnpymath.a). For + cross-compilation you of-course need to link with target libraries, + while using the host Python installation. + + You can copy out the numpy/core/lib/npy-pkg-config directory, add a + pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment + variable to point to the directory with the modified npy-pkg-config + files. + + Example npymath.ini modified for cross-compilation:: + + [meta] + Name=npymath + Description=Portable, core math library implementing C99 standard + Version=0.1 + + [variables] + pkgname=numpy.core + pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core + prefix=${pkgdir} + libdir=${prefix}/lib + includedir=${prefix}/include + + [default] + Libs=-L${libdir} -lnpymath + Cflags=-I${includedir} + Requires=mlib + + [msvc] + Libs=/LIBPATH:${libdir} npymath.lib + Cflags=/INCLUDE:${includedir} + Requires=mlib + """ if subst_dict is None: subst_dict = {} @@ -2092,9 +2127,22 @@ def get_numpy_include_dirs(): return include_dirs def get_npy_pkg_dir(): - """Return the path where to find the npy-pkg-config directory.""" + """Return the path where to find the npy-pkg-config directory. + + If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that + is returned. Otherwise, a path inside the location of the numpy module is + returned. + + The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining + customized npy-pkg-config .ini files for the cross-compilation + environment, and using them when cross-compiling. + + """ # XXX: import here for bootstrapping reasons import numpy + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d is not None: + return d d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'lib', 'npy-pkg-config') return d diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 6cfce3b1c..5fd1003ab 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -146,7 +146,7 @@ else: from distutils.errors import DistutilsError from distutils.dist import Distribution import distutils.sysconfig -from distutils import log +from numpy.distutils import log from distutils.util import get_platform from numpy.distutils.exec_command import ( @@ -550,7 +550,6 @@ class system_info(object): dir_env_var = None search_static_first = 0 # XXX: disabled by default, may disappear in # future unless it is proved to be useful. - verbosity = 1 saved_results = {} notfounderror = NotFoundError @@ -558,7 +557,6 @@ class system_info(object): def __init__(self, default_lib_dirs=default_lib_dirs, default_include_dirs=default_include_dirs, - verbosity=1, ): self.__class__.info = {} self.local_prefixes = [] @@ -704,7 +702,7 @@ class system_info(object): log.info(' FOUND:') res = self.saved_results.get(self.__class__.__name__) - if self.verbosity > 0 and flag: + if log.get_threshold() <= log.INFO and flag: for k, v in res.items(): v = str(v) if k in ['sources', 'libraries'] and len(v) > 270: @@ -914,7 +912,7 @@ class system_info(object): """Return a list of existing paths composed by all combinations of items from the arguments. """ - return combine_paths(*args, **{'verbosity': self.verbosity}) + return combine_paths(*args) class fft_opt_info(system_info): @@ -1531,12 +1529,12 @@ def get_atlas_version(**config): try: s, o = c.get_output(atlas_version_c_text, libraries=libraries, library_dirs=library_dirs, - use_tee=(system_info.verbosity > 0)) + ) if s and re.search(r'undefined reference to `_gfortran', o, re.M): s, o = c.get_output(atlas_version_c_text, libraries=libraries + ['gfortran'], library_dirs=library_dirs, - use_tee=(system_info.verbosity > 0)) + ) if not s: warnings.warn(textwrap.dedent(""" ***************************************************** diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py index f7bd2515b..cb548a0d0 100644 --- a/numpy/doc/broadcasting.py +++ b/numpy/doc/broadcasting.py @@ -61,8 +61,7 @@ dimensions are compatible when If these conditions are not met, a ``ValueError: operands could not be broadcast together`` exception is thrown, indicating that the arrays have incompatible shapes. The size of -the resulting array is the maximum size along each dimension of the input -arrays. +the resulting array is the size that is not 1 along each axis of the inputs. Arrays do not need to have the same *number* of dimensions. For example, if you have a ``256x256x3`` array of RGB values, and you want to scale diff --git a/numpy/doc/dispatch.py b/numpy/doc/dispatch.py index 8db607131..c9029941b 100644 --- a/numpy/doc/dispatch.py +++ b/numpy/doc/dispatch.py @@ -223,7 +223,7 @@ calls ``numpy.sum(self)``, and the same for ``mean``. ... return arr._i * arr._N ... >>> @implements(np.mean) -... def sum(arr): +... def mean(arr): ... "Implementation of np.mean for DiagonalArray objects" ... return arr._i / arr._N ... diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py index 4b983893a..d0685328e 100644 --- a/numpy/doc/subclassing.py +++ b/numpy/doc/subclassing.py @@ -118,7 +118,8 @@ For example, consider the following Python code: def __new__(cls, *args): print('Cls in __new__:', cls) print('Args in __new__:', args) - return object.__new__(cls, *args) + # The `object` type __new__ method takes a single argument. + return object.__new__(cls) def __init__(self, *args): print('type(self) in __init__:', type(self)) diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index d146739bb..42e3632fd 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -109,6 +109,7 @@ def compile(source, output = '' else: status = 0 + output = output.decode() if verbose: print(output) finally: diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 17f3861ca..ccb7b3a32 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -1049,8 +1049,10 @@ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofarg CFUNCSMESS(\"create_cb_arglist\\n\"); tot=opt=ext=siz=0; /* Get the total number of arguments */ - if (PyFunction_Check(fun)) + if (PyFunction_Check(fun)) { tmp_fun = fun; + Py_INCREF(tmp_fun); + } else { di = 1; if (PyObject_HasAttrString(fun,\"im_func\")) { @@ -1062,6 +1064,7 @@ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofarg tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); else { tmp_fun = fun; /* built-in function */ + Py_INCREF(tmp_fun); tot = maxnofargs; if (xa != NULL) tot += PyTuple_Size((PyObject *)xa); @@ -1073,6 +1076,7 @@ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofarg if (xa != NULL) tot += PyTuple_Size((PyObject *)xa); tmp_fun = fun; + Py_INCREF(tmp_fun); } else if (F2PyCapsule_Check(fun)) { tot = maxnofargs; @@ -1083,6 +1087,7 @@ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofarg goto capi_fail; } tmp_fun = fun; + Py_INCREF(tmp_fun); } } if (tmp_fun==NULL) { @@ -1091,13 +1096,19 @@ goto capi_fail; } #if PY_VERSION_HEX >= 0x03000000 if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { - if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) { #else if (PyObject_HasAttrString(tmp_fun,\"func_code\")) { - if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) { #endif - tot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di; - Py_XDECREF(tmp); + PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\"); + Py_DECREF(tmp); + if (tmp_argcount == NULL) { + goto capi_fail; + } + tot = PyInt_AsLong(tmp_argcount) - di; + Py_DECREF(tmp_argcount); + } } /* Get the number of optional arguments */ #if PY_VERSION_HEX >= 0x03000000 @@ -1136,10 +1147,12 @@ goto capi_fail; PyTuple_SET_ITEM(*args,i,tmp); } CFUNCSMESS(\"create_cb_arglist-end\\n\"); + Py_DECREF(tmp_fun); return 1; capi_fail: if ((PyErr_Occurred())==NULL) PyErr_SetString(#modulename#_error,errmess); + Py_XDECREF(tmp_fun); return 0; } """ diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py index 62c1ba207..f61d8810a 100644 --- a/numpy/f2py/common_rules.py +++ b/numpy/f2py/common_rules.py @@ -124,8 +124,9 @@ def buildhooks(m): cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' % (F_FUNC, lower_name, name.upper(), name)) cadd('}\n') - iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( - name, name, name)) + iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name)) + iadd('\tF2PyDict_SetItemString(d, \"%s\", tmp);' % name) + iadd('\tPy_DECREF(tmp);') tname = name.replace('_', '\\_') dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) dadd('\\begin{description}') diff --git a/numpy/f2py/info.py b/numpy/f2py/info.py deleted file mode 100644 index c895c5de2..000000000 --- a/numpy/f2py/info.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Fortran to Python Interface Generator. - -""" -from __future__ import division, absolute_import, print_function - -postpone_import = True diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 1b41498ea..f2f713bde 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -215,6 +215,7 @@ PyMODINIT_FUNC init#modulename#(void) { \td = PyModule_GetDict(m); \ts = PyString_FromString(\"$R""" + """evision: $\"); \tPyDict_SetItemString(d, \"__version__\", s); +\tPy_DECREF(s); #if PY_VERSION_HEX >= 0x03000000 \ts = PyUnicode_FromString( #else @@ -222,8 +223,14 @@ PyMODINIT_FUNC init#modulename#(void) { #endif \t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); \tPyDict_SetItemString(d, \"__doc__\", s); -\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); \tPy_DECREF(s); +\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); +\t/* +\t * Store the error object inside the dict, so that it could get deallocated. +\t * (in practice, this is a module, so it likely will not and cannot.) +\t */ +\tPyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error); +\tPy_DECREF(#modulename#_error); \tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) { \t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]); \t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp); @@ -238,7 +245,6 @@ PyMODINIT_FUNC init#modulename#(void) { \tif (! PyErr_Occurred()) \t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\"); #endif - \treturn RETVAL; } #ifdef __cplusplus @@ -439,12 +445,16 @@ rout_rules = [ { extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); PyObject* o = PyDict_GetItemString(d,"#name#"); - PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL)); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); #if PY_VERSION_HEX >= 0x03000000 - PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#")); + s = PyUnicode_FromString("#name#"); #else - PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#")); + s = PyString_FromString("#name#"); #endif + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); } '''}, 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, @@ -477,12 +487,16 @@ rout_rules = [ { extern void #F_FUNC#(#name_lower#,#NAME#)(void); PyObject* o = PyDict_GetItemString(d,"#name#"); - PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL)); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); #if PY_VERSION_HEX >= 0x03000000 - PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#")); + s = PyUnicode_FromString("#name#"); #else - PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#")); + s = PyString_FromString("#name#"); #endif + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); } '''}, 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, @@ -794,10 +808,13 @@ if (#varname#_capi==Py_None) { if (#varname#_xa_capi==NULL) { if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); - if (capi_tmp) + if (capi_tmp) { #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); - else + Py_DECREF(capi_tmp); + } + else { #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); + } if (#varname#_xa_capi==NULL) { PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); return NULL; diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index b55385b50..8aa55555d 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -39,19 +39,33 @@ PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) { int i; PyFortranObject *fp = NULL; PyObject *v = NULL; - if (init!=NULL) /* Initialize F90 module objects */ + if (init!=NULL) { /* Initialize F90 module objects */ (*(init))(); - if ((fp = PyObject_New(PyFortranObject, &PyFortran_Type))==NULL) return NULL; - if ((fp->dict = PyDict_New())==NULL) return NULL; + } + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) { + return NULL; + } + if ((fp->dict = PyDict_New()) == NULL) { + Py_DECREF(fp); + return NULL; + } fp->len = 0; - while (defs[fp->len].name != NULL) fp->len++; - if (fp->len == 0) goto fail; + while (defs[fp->len].name != NULL) { + fp->len++; + } + if (fp->len == 0) { + goto fail; + } fp->defs = defs; - for (i=0;i<fp->len;i++) + for (i=0;i<fp->len;i++) { if (fp->defs[i].rank == -1) { /* Is Fortran routine */ v = PyFortranObject_NewAsAttr(&(fp->defs[i])); - if (v==NULL) return NULL; + if (v==NULL) { + goto fail; + } PyDict_SetItemString(fp->dict,fp->defs[i].name,v); + Py_XDECREF(v); } else if ((fp->defs[i].data)!=NULL) { /* Is Fortran variable or array (not allocatable) */ if (fp->defs[i].type == NPY_STRING) { @@ -65,13 +79,16 @@ PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) { fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, NULL); } - if (v==NULL) return NULL; + if (v==NULL) { + goto fail; + } PyDict_SetItemString(fp->dict,fp->defs[i].name,v); + Py_XDECREF(v); } - Py_XDECREF(v); + } return (PyObject *)fp; fail: - Py_XDECREF(v); + Py_XDECREF(fp); return NULL; } diff --git a/numpy/f2py/src/test/foomodule.c b/numpy/f2py/src/test/foomodule.c index 733fab0be..caf3590d4 100644 --- a/numpy/f2py/src/test/foomodule.c +++ b/numpy/f2py/src/test/foomodule.c @@ -115,7 +115,7 @@ static PyMethodDef foo_module_methods[] = { void initfoo() { int i; - PyObject *m, *d, *s; + PyObject *m, *d, *s, *tmp; import_array(); m = Py_InitModule("foo", foo_module_methods); @@ -125,11 +125,17 @@ void initfoo() { PyDict_SetItemString(d, "__doc__", s); /* Fortran objects: */ - PyDict_SetItemString(d, "mod", PyFortranObject_New(f2py_mod_def,f2py_init_mod)); - PyDict_SetItemString(d, "foodata", PyFortranObject_New(f2py_foodata_def,f2py_init_foodata)); - for(i=0;f2py_routines_def[i].name!=NULL;i++) - PyDict_SetItemString(d, f2py_routines_def[i].name, - PyFortranObject_NewAsAttr(&f2py_routines_def[i])); + tmp = PyFortranObject_New(f2py_mod_def,f2py_init_mod); + PyDict_SetItemString(d, "mod", tmp); + Py_DECREF(tmp); + tmp = PyFortranObject_New(f2py_foodata_def,f2py_init_foodata); + PyDict_SetItemString(d, "foodata", tmp); + Py_DECREF(tmp); + for(i=0;f2py_routines_def[i].name!=NULL;i++) { + tmp = PyFortranObject_NewAsAttr(&f2py_routines_def[i]); + PyDict_SetItemString(d, f2py_routines_def[i].name, tmp); + Py_DECREF(tmp); + } Py_DECREF(s); diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index 7f46303b0..978db4e69 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -49,9 +49,18 @@ static PyObject *f2py_rout_wrap_call(PyObject *capi_self, return NULL; rank = PySequence_Length(dims_capi); dims = malloc(rank*sizeof(npy_intp)); - for (i=0;i<rank;++i) - dims[i] = (npy_intp)PyInt_AsLong(PySequence_GetItem(dims_capi,i)); - + for (i=0;i<rank;++i) { + PyObject *tmp; + tmp = PySequence_GetItem(dims_capi, i); + if (tmp == NULL) { + goto fail; + } + dims[i] = (npy_intp)PyInt_AsLong(tmp); + Py_DECREF(tmp); + if (dims[i] == -1 && PyErr_Occurred()) { + goto fail; + } + } capi_arr_tmp = array_from_pyobj(type_num,dims,rank,intent|F2PY_INTENT_OUT,arr_capi); if (capi_arr_tmp == NULL) { free(dims); @@ -60,6 +69,10 @@ static PyObject *f2py_rout_wrap_call(PyObject *capi_self, capi_buildvalue = Py_BuildValue("N",capi_arr_tmp); free(dims); return capi_buildvalue; + +fail: + free(dims); + return NULL; } static char doc_f2py_rout_wrap_attrs[] = "\ @@ -97,7 +110,7 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, PyTuple_SetItem(dimensions,i,PyInt_FromLong(PyArray_DIM(arr,i))); PyTuple_SetItem(strides,i,PyInt_FromLong(PyArray_STRIDE(arr,i))); } - return Py_BuildValue("siOOO(cciii)ii",s,PyArray_NDIM(arr), + return Py_BuildValue("siNNO(cciii)ii",s,PyArray_NDIM(arr), dimensions,strides, (PyArray_BASE(arr)==NULL?Py_None:PyArray_BASE(arr)), PyArray_DESCR(arr)->kind, @@ -154,61 +167,69 @@ PyMODINIT_FUNC inittest_array_from_pyobj_ext(void) { PyDict_SetItemString(d, "__doc__", s); wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); Py_DECREF(s); - PyDict_SetItemString(d, "F2PY_INTENT_IN", PyInt_FromLong(F2PY_INTENT_IN)); - PyDict_SetItemString(d, "F2PY_INTENT_INOUT", PyInt_FromLong(F2PY_INTENT_INOUT)); - PyDict_SetItemString(d, "F2PY_INTENT_OUT", PyInt_FromLong(F2PY_INTENT_OUT)); - PyDict_SetItemString(d, "F2PY_INTENT_HIDE", PyInt_FromLong(F2PY_INTENT_HIDE)); - PyDict_SetItemString(d, "F2PY_INTENT_CACHE", PyInt_FromLong(F2PY_INTENT_CACHE)); - PyDict_SetItemString(d, "F2PY_INTENT_COPY", PyInt_FromLong(F2PY_INTENT_COPY)); - PyDict_SetItemString(d, "F2PY_INTENT_C", PyInt_FromLong(F2PY_INTENT_C)); - PyDict_SetItemString(d, "F2PY_OPTIONAL", PyInt_FromLong(F2PY_OPTIONAL)); - PyDict_SetItemString(d, "F2PY_INTENT_INPLACE", PyInt_FromLong(F2PY_INTENT_INPLACE)); - PyDict_SetItemString(d, "NPY_BOOL", PyInt_FromLong(NPY_BOOL)); - PyDict_SetItemString(d, "NPY_BYTE", PyInt_FromLong(NPY_BYTE)); - PyDict_SetItemString(d, "NPY_UBYTE", PyInt_FromLong(NPY_UBYTE)); - PyDict_SetItemString(d, "NPY_SHORT", PyInt_FromLong(NPY_SHORT)); - PyDict_SetItemString(d, "NPY_USHORT", PyInt_FromLong(NPY_USHORT)); - PyDict_SetItemString(d, "NPY_INT", PyInt_FromLong(NPY_INT)); - PyDict_SetItemString(d, "NPY_UINT", PyInt_FromLong(NPY_UINT)); - PyDict_SetItemString(d, "NPY_INTP", PyInt_FromLong(NPY_INTP)); - PyDict_SetItemString(d, "NPY_UINTP", PyInt_FromLong(NPY_UINTP)); - PyDict_SetItemString(d, "NPY_LONG", PyInt_FromLong(NPY_LONG)); - PyDict_SetItemString(d, "NPY_ULONG", PyInt_FromLong(NPY_ULONG)); - PyDict_SetItemString(d, "NPY_LONGLONG", PyInt_FromLong(NPY_LONGLONG)); - PyDict_SetItemString(d, "NPY_ULONGLONG", PyInt_FromLong(NPY_ULONGLONG)); - PyDict_SetItemString(d, "NPY_FLOAT", PyInt_FromLong(NPY_FLOAT)); - PyDict_SetItemString(d, "NPY_DOUBLE", PyInt_FromLong(NPY_DOUBLE)); - PyDict_SetItemString(d, "NPY_LONGDOUBLE", PyInt_FromLong(NPY_LONGDOUBLE)); - PyDict_SetItemString(d, "NPY_CFLOAT", PyInt_FromLong(NPY_CFLOAT)); - PyDict_SetItemString(d, "NPY_CDOUBLE", PyInt_FromLong(NPY_CDOUBLE)); - PyDict_SetItemString(d, "NPY_CLONGDOUBLE", PyInt_FromLong(NPY_CLONGDOUBLE)); - PyDict_SetItemString(d, "NPY_OBJECT", PyInt_FromLong(NPY_OBJECT)); - PyDict_SetItemString(d, "NPY_STRING", PyInt_FromLong(NPY_STRING)); - PyDict_SetItemString(d, "NPY_UNICODE", PyInt_FromLong(NPY_UNICODE)); - PyDict_SetItemString(d, "NPY_VOID", PyInt_FromLong(NPY_VOID)); - PyDict_SetItemString(d, "NPY_NTYPES", PyInt_FromLong(NPY_NTYPES)); - PyDict_SetItemString(d, "NPY_NOTYPE", PyInt_FromLong(NPY_NOTYPE)); - PyDict_SetItemString(d, "NPY_USERDEF", PyInt_FromLong(NPY_USERDEF)); - - PyDict_SetItemString(d, "CONTIGUOUS", PyInt_FromLong(NPY_ARRAY_C_CONTIGUOUS)); - PyDict_SetItemString(d, "FORTRAN", PyInt_FromLong(NPY_ARRAY_F_CONTIGUOUS)); - PyDict_SetItemString(d, "OWNDATA", PyInt_FromLong(NPY_ARRAY_OWNDATA)); - PyDict_SetItemString(d, "FORCECAST", PyInt_FromLong(NPY_ARRAY_FORCECAST)); - PyDict_SetItemString(d, "ENSURECOPY", PyInt_FromLong(NPY_ARRAY_ENSURECOPY)); - PyDict_SetItemString(d, "ENSUREARRAY", PyInt_FromLong(NPY_ARRAY_ENSUREARRAY)); - PyDict_SetItemString(d, "ALIGNED", PyInt_FromLong(NPY_ARRAY_ALIGNED)); - PyDict_SetItemString(d, "WRITEABLE", PyInt_FromLong(NPY_ARRAY_WRITEABLE)); - PyDict_SetItemString(d, "UPDATEIFCOPY", PyInt_FromLong(NPY_ARRAY_UPDATEIFCOPY)); - PyDict_SetItemString(d, "WRITEBACKIFCOPY", PyInt_FromLong(NPY_ARRAY_WRITEBACKIFCOPY)); - - PyDict_SetItemString(d, "BEHAVED", PyInt_FromLong(NPY_ARRAY_BEHAVED)); - PyDict_SetItemString(d, "BEHAVED_NS", PyInt_FromLong(NPY_ARRAY_BEHAVED_NS)); - PyDict_SetItemString(d, "CARRAY", PyInt_FromLong(NPY_ARRAY_CARRAY)); - PyDict_SetItemString(d, "FARRAY", PyInt_FromLong(NPY_ARRAY_FARRAY)); - PyDict_SetItemString(d, "CARRAY_RO", PyInt_FromLong(NPY_ARRAY_CARRAY_RO)); - PyDict_SetItemString(d, "FARRAY_RO", PyInt_FromLong(NPY_ARRAY_FARRAY_RO)); - PyDict_SetItemString(d, "DEFAULT", PyInt_FromLong(NPY_ARRAY_DEFAULT)); - PyDict_SetItemString(d, "UPDATE_ALL", PyInt_FromLong(NPY_ARRAY_UPDATE_ALL)); + +#define ADDCONST(NAME, CONST) \ + s = PyInt_FromLong(CONST); \ + PyDict_SetItemString(d, NAME, s); \ + Py_DECREF(s) + + ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN); + ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT); + ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT); + ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE); + ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE); + ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY); + ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C); + ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL); + ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE); + ADDCONST("NPY_BOOL", NPY_BOOL); + ADDCONST("NPY_BYTE", NPY_BYTE); + ADDCONST("NPY_UBYTE", NPY_UBYTE); + ADDCONST("NPY_SHORT", NPY_SHORT); + ADDCONST("NPY_USHORT", NPY_USHORT); + ADDCONST("NPY_INT", NPY_INT); + ADDCONST("NPY_UINT", NPY_UINT); + ADDCONST("NPY_INTP", NPY_INTP); + ADDCONST("NPY_UINTP", NPY_UINTP); + ADDCONST("NPY_LONG", NPY_LONG); + ADDCONST("NPY_ULONG", NPY_ULONG); + ADDCONST("NPY_LONGLONG", NPY_LONGLONG); + ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG); + ADDCONST("NPY_FLOAT", NPY_FLOAT); + ADDCONST("NPY_DOUBLE", NPY_DOUBLE); + ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE); + ADDCONST("NPY_CFLOAT", NPY_CFLOAT); + ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE); + ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE); + ADDCONST("NPY_OBJECT", NPY_OBJECT); + ADDCONST("NPY_STRING", NPY_STRING); + ADDCONST("NPY_UNICODE", NPY_UNICODE); + ADDCONST("NPY_VOID", NPY_VOID); + ADDCONST("NPY_NTYPES", NPY_NTYPES); + ADDCONST("NPY_NOTYPE", NPY_NOTYPE); + ADDCONST("NPY_USERDEF", NPY_USERDEF); + + ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS); + ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS); + ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA); + ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST); + ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY); + ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY); + ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED); + ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE); + ADDCONST("UPDATEIFCOPY", NPY_ARRAY_UPDATEIFCOPY); + ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY); + + ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED); + ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS); + ADDCONST("CARRAY", NPY_ARRAY_CARRAY); + ADDCONST("FARRAY", NPY_ARRAY_FARRAY); + ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO); + ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO); + ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); + ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); + +#undef ADDCONST( if (PyErr_Occurred()) Py_FatalError("can't initialize module wrap"); diff --git a/numpy/f2py/tests/test_compile_function.py b/numpy/f2py/tests/test_compile_function.py index 36abf05f9..40ea7997f 100644 --- a/numpy/f2py/tests/test_compile_function.py +++ b/numpy/f2py/tests/test_compile_function.py @@ -29,6 +29,7 @@ def setup_module(): @pytest.mark.parametrize( "extra_args", [['--noopt', '--debug'], '--noopt --debug', ''] ) +@pytest.mark.leaks_references(reason="Imported module seems never deleted.") def test_f2py_init_compile(extra_args): # flush through the f2py __init__ compile() function code path as a # crude test for input handling following migration from @@ -81,6 +82,9 @@ def test_f2py_init_compile(extra_args): return_check = import_module(modname) calc_result = return_check.foo() assert_equal(calc_result, 15) + # Removal from sys.modules, is not as such necessary. Even with + # removal, the module (dict) stays alive. + del sys.modules[modname] def test_f2py_init_compile_failure(): diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index d20dc5908..77cb612d0 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -31,6 +31,7 @@ except ImportError: # _module_dir = None +_module_num = 5403 def _cleanup(): @@ -59,13 +60,14 @@ def get_module_dir(): def get_temp_module_name(): # Assume single-threaded, and the module dir usable only by this thread + global _module_num d = get_module_dir() - for j in range(5403, 9999999): - name = "_test_ext_module_%d" % j - fn = os.path.join(d, name) - if name not in sys.modules and not os.path.isfile(fn + '.py'): - return name - raise RuntimeError("Failed to create a temporary module name") + name = "_test_ext_module_%d" % _module_num + _module_num += 1 + if name in sys.modules: + # this should not be possible, but check anyway + raise RuntimeError("Temporary module name already in use.") + return name def _memoize(func): diff --git a/numpy/fft/README.md b/numpy/fft/README.md index 7040a2e9b..f79188139 100644 --- a/numpy/fft/README.md +++ b/numpy/fft/README.md @@ -10,11 +10,6 @@ advantages: - worst case complexity for transform sizes with large prime factors is `N*log(N)`, because Bluestein's algorithm [3] is used for these cases. -License -------- - -3-clause BSD (see LICENSE.md) - Some code details ----------------- diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py index 64b35bc19..fe95d8b17 100644 --- a/numpy/fft/__init__.py +++ b/numpy/fft/__init__.py @@ -1,9 +1,191 @@ -from __future__ import division, absolute_import, print_function +""" +Discrete Fourier Transform (:mod:`numpy.fft`) +============================================= + +.. currentmodule:: numpy.fft + +Standard FFTs +------------- + +.. autosummary:: + :toctree: generated/ + + fft Discrete Fourier transform. + ifft Inverse discrete Fourier transform. + fft2 Discrete Fourier transform in two dimensions. + ifft2 Inverse discrete Fourier transform in two dimensions. + fftn Discrete Fourier transform in N-dimensions. + ifftn Inverse discrete Fourier transform in N dimensions. + +Real FFTs +--------- + +.. autosummary:: + :toctree: generated/ + + rfft Real discrete Fourier transform. + irfft Inverse real discrete Fourier transform. + rfft2 Real discrete Fourier transform in two dimensions. + irfft2 Inverse real discrete Fourier transform in two dimensions. + rfftn Real discrete Fourier transform in N dimensions. + irfftn Inverse real discrete Fourier transform in N dimensions. + +Hermitian FFTs +-------------- + +.. autosummary:: + :toctree: generated/ + + hfft Hermitian discrete Fourier transform. + ihfft Inverse Hermitian discrete Fourier transform. + +Helper routines +--------------- + +.. autosummary:: + :toctree: generated/ + + fftfreq Discrete Fourier Transform sample frequencies. + rfftfreq DFT sample frequencies (for usage with rfft, irfft). + fftshift Shift zero-frequency component to center of spectrum. + ifftshift Inverse of fftshift. + + +Background information +---------------------- + +Fourier analysis is fundamentally a method for expressing a function as a +sum of periodic components, and for recovering the function from those +components. When both the function and its Fourier transform are +replaced with discretized counterparts, it is called the discrete Fourier +transform (DFT). The DFT has become a mainstay of numerical computing in +part because of a very fast algorithm for computing it, called the Fast +Fourier Transform (FFT), which was known to Gauss (1805) and was brought +to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ +provide an accessible introduction to Fourier analysis and its +applications. + +Because the discrete Fourier transform separates its input into +components that contribute at discrete frequencies, it has a great number +of applications in digital signal processing, e.g., for filtering, and in +this context the discretized input to the transform is customarily +referred to as a *signal*, which exists in the *time domain*. The output +is called a *spectrum* or *transform* and exists in the *frequency +domain*. + +Implementation details +---------------------- + +There are many ways to define the DFT, varying in the sign of the +exponent, normalization, etc. In this implementation, the DFT is defined +as + +.. math:: + A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} + \\qquad k = 0,\\ldots,n-1. + +The DFT is in general defined for complex inputs and outputs, and a +single-frequency component at linear frequency :math:`f` is +represented by a complex exponential +:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` +is the sampling interval. -# To get sub-modules -from .info import __doc__ +The values in the result follow so-called "standard" order: If ``A = +fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of +the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` +contains the positive-frequency terms, and ``A[n/2+1:]`` contains the +negative-frequency terms, in order of decreasingly negative frequency. +For an even number of input points, ``A[n/2]`` represents both positive and +negative Nyquist frequency, and is also purely real for real input. For +an odd number of input points, ``A[(n-1)/2]`` contains the largest positive +frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. +The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies +of corresponding elements in the output. The routine +``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the +zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes +that shift. + +When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` +is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. +The phase spectrum is obtained by ``np.angle(A)``. + +The inverse DFT is defined as + +.. math:: + a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} + \\qquad m = 0,\\ldots,n-1. + +It differs from the forward transform by the sign of the exponential +argument and the default normalization by :math:`1/n`. + +Normalization +------------- +The default normalization has the direct transforms unscaled and the inverse +transforms are scaled by :math:`1/n`. It is possible to obtain unitary +transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is +`None`) so that both direct and inverse transforms will be scaled by +:math:`1/\\sqrt{n}`. + +Real and Hermitian transforms +----------------------------- + +When the input is purely real, its transform is Hermitian, i.e., the +component at frequency :math:`f_k` is the complex conjugate of the +component at frequency :math:`-f_k`, which means that for real +inputs there is no information in the negative frequency components that +is not already available from the positive frequency components. +The family of `rfft` functions is +designed to operate on real inputs, and exploits this symmetry by +computing only the positive frequency components, up to and including the +Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex +output points. The inverses of this family assumes the same symmetry of +its input, and for an output of ``n`` points uses ``n/2+1`` input points. + +Correspondingly, when the spectrum is purely real, the signal is +Hermitian. The `hfft` family of functions exploits this symmetry by +using ``n/2+1`` complex points in the input (time) domain for ``n`` real +points in the frequency domain. + +In higher dimensions, FFTs are used, e.g., for image analysis and +filtering. The computational efficiency of the FFT means that it can +also be a faster way to compute large convolutions, using the property +that a convolution in the time domain is equivalent to a point-by-point +multiplication in the frequency domain. + +Higher dimensions +----------------- + +In two dimensions, the DFT is defined as + +.. math:: + A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} + a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} + \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, + +which extends in the obvious way to higher dimensions, and the inverses +in higher dimensions also extend in the same way. + +References +---------- + +.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + +.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., + 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. + 12-13. Cambridge Univ. Press, Cambridge, UK. + +Examples +-------- + +For examples, see the various functions. + +""" + +from __future__ import division, absolute_import, print_function -from .pocketfft import * +from ._pocketfft import * from .helper import * from numpy._pytesttester import PytestTester diff --git a/numpy/fft/pocketfft.c b/numpy/fft/_pocketfft.c index 9d1218e6b..d75b9983c 100644 --- a/numpy/fft/pocketfft.c +++ b/numpy/fft/_pocketfft.c @@ -2362,7 +2362,7 @@ static struct PyMethodDef methods[] = { #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, - "pocketfft_internal", + "_pocketfft_internal", NULL, -1, methods, @@ -2376,11 +2376,11 @@ static struct PyModuleDef moduledef = { /* Initialization function for the module */ #if PY_MAJOR_VERSION >= 3 #define RETVAL(x) x -PyMODINIT_FUNC PyInit_pocketfft_internal(void) +PyMODINIT_FUNC PyInit__pocketfft_internal(void) #else #define RETVAL(x) PyMODINIT_FUNC -initpocketfft_internal(void) +init_pocketfft_internal(void) #endif { PyObject *m; @@ -2389,7 +2389,7 @@ initpocketfft_internal(void) #else static const char module_documentation[] = ""; - m = Py_InitModule4("pocketfft_internal", methods, + m = Py_InitModule4("_pocketfft_internal", methods, module_documentation, (PyObject*)NULL,PYTHON_API_VERSION); #endif diff --git a/numpy/fft/pocketfft.py b/numpy/fft/_pocketfft.py index 77ea6e3ba..50720cda4 100644 --- a/numpy/fft/pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -35,7 +35,7 @@ __all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', import functools from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt -from . import pocketfft_internal as pfi +from . import _pocketfft_internal as pfi from numpy.core.multiarray import normalize_axis_index from numpy.core import overrides @@ -44,7 +44,11 @@ array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy.fft') -def _raw_fft(a, n, axis, is_real, is_forward, fct): +# `inv_norm` is a float by which the result of the transform needs to be +# divided. This replaces the original, more intuitive 'fct` parameter to avoid +# divisions by zero (or alternatively additional checks) in the case of +# zero-length axes during its computation. +def _raw_fft(a, n, axis, is_real, is_forward, inv_norm): axis = normalize_axis_index(axis, a.ndim) if n is None: n = a.shape[axis] @@ -53,6 +57,8 @@ def _raw_fft(a, n, axis, is_real, is_forward, fct): raise ValueError("Invalid number of FFT data points (%d) specified." % n) + fct = 1/inv_norm + if a.shape[axis] != n: s = list(a.shape) if s[axis] > n: @@ -176,10 +182,10 @@ def fft(a, n=None, axis=-1, norm=None): a = asarray(a) if n is None: n = a.shape[axis] - fct = 1 + inv_norm = 1 if norm is not None and _unitary(norm): - fct = 1 / sqrt(n) - output = _raw_fft(a, n, axis, False, True, fct) + inv_norm = sqrt(n) + output = _raw_fft(a, n, axis, False, True, inv_norm) return output @@ -272,10 +278,10 @@ def ifft(a, n=None, axis=-1, norm=None): if n is None: n = a.shape[axis] if norm is not None and _unitary(norm): - fct = 1/sqrt(max(n, 1)) + inv_norm = sqrt(max(n, 1)) else: - fct = 1/max(n, 1) - output = _raw_fft(a, n, axis, False, False, fct) + inv_norm = n + output = _raw_fft(a, n, axis, False, False, inv_norm) return output @@ -360,12 +366,12 @@ def rfft(a, n=None, axis=-1, norm=None): """ a = asarray(a) - fct = 1 + inv_norm = 1 if norm is not None and _unitary(norm): if n is None: n = a.shape[axis] - fct = 1/sqrt(n) - output = _raw_fft(a, n, axis, True, True, fct) + inv_norm = sqrt(n) + output = _raw_fft(a, n, axis, True, True, inv_norm) return output @@ -462,10 +468,10 @@ def irfft(a, n=None, axis=-1, norm=None): a = asarray(a) if n is None: n = (a.shape[axis] - 1) * 2 - fct = 1/n + inv_norm = n if norm is not None and _unitary(norm): - fct = 1/sqrt(n) - output = _raw_fft(a, n, axis, True, False, fct) + inv_norm = sqrt(n) + output = _raw_fft(a, n, axis, True, False, inv_norm) return output diff --git a/numpy/fft/info.py b/numpy/fft/info.py deleted file mode 100644 index cb6526b44..000000000 --- a/numpy/fft/info.py +++ /dev/null @@ -1,187 +0,0 @@ -""" -Discrete Fourier Transform (:mod:`numpy.fft`) -============================================= - -.. currentmodule:: numpy.fft - -Standard FFTs -------------- - -.. autosummary:: - :toctree: generated/ - - fft Discrete Fourier transform. - ifft Inverse discrete Fourier transform. - fft2 Discrete Fourier transform in two dimensions. - ifft2 Inverse discrete Fourier transform in two dimensions. - fftn Discrete Fourier transform in N-dimensions. - ifftn Inverse discrete Fourier transform in N dimensions. - -Real FFTs ---------- - -.. autosummary:: - :toctree: generated/ - - rfft Real discrete Fourier transform. - irfft Inverse real discrete Fourier transform. - rfft2 Real discrete Fourier transform in two dimensions. - irfft2 Inverse real discrete Fourier transform in two dimensions. - rfftn Real discrete Fourier transform in N dimensions. - irfftn Inverse real discrete Fourier transform in N dimensions. - -Hermitian FFTs --------------- - -.. autosummary:: - :toctree: generated/ - - hfft Hermitian discrete Fourier transform. - ihfft Inverse Hermitian discrete Fourier transform. - -Helper routines ---------------- - -.. autosummary:: - :toctree: generated/ - - fftfreq Discrete Fourier Transform sample frequencies. - rfftfreq DFT sample frequencies (for usage with rfft, irfft). - fftshift Shift zero-frequency component to center of spectrum. - ifftshift Inverse of fftshift. - - -Background information ----------------------- - -Fourier analysis is fundamentally a method for expressing a function as a -sum of periodic components, and for recovering the function from those -components. When both the function and its Fourier transform are -replaced with discretized counterparts, it is called the discrete Fourier -transform (DFT). The DFT has become a mainstay of numerical computing in -part because of a very fast algorithm for computing it, called the Fast -Fourier Transform (FFT), which was known to Gauss (1805) and was brought -to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ -provide an accessible introduction to Fourier analysis and its -applications. - -Because the discrete Fourier transform separates its input into -components that contribute at discrete frequencies, it has a great number -of applications in digital signal processing, e.g., for filtering, and in -this context the discretized input to the transform is customarily -referred to as a *signal*, which exists in the *time domain*. The output -is called a *spectrum* or *transform* and exists in the *frequency -domain*. - -Implementation details ----------------------- - -There are many ways to define the DFT, varying in the sign of the -exponent, normalization, etc. In this implementation, the DFT is defined -as - -.. math:: - A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} - \\qquad k = 0,\\ldots,n-1. - -The DFT is in general defined for complex inputs and outputs, and a -single-frequency component at linear frequency :math:`f` is -represented by a complex exponential -:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` -is the sampling interval. - -The values in the result follow so-called "standard" order: If ``A = -fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of -the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` -contains the positive-frequency terms, and ``A[n/2+1:]`` contains the -negative-frequency terms, in order of decreasingly negative frequency. -For an even number of input points, ``A[n/2]`` represents both positive and -negative Nyquist frequency, and is also purely real for real input. For -an odd number of input points, ``A[(n-1)/2]`` contains the largest positive -frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. -The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies -of corresponding elements in the output. The routine -``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the -zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes -that shift. - -When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` -is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. -The phase spectrum is obtained by ``np.angle(A)``. - -The inverse DFT is defined as - -.. math:: - a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} - \\qquad m = 0,\\ldots,n-1. - -It differs from the forward transform by the sign of the exponential -argument and the default normalization by :math:`1/n`. - -Normalization -------------- -The default normalization has the direct transforms unscaled and the inverse -transforms are scaled by :math:`1/n`. It is possible to obtain unitary -transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is -`None`) so that both direct and inverse transforms will be scaled by -:math:`1/\\sqrt{n}`. - -Real and Hermitian transforms ------------------------------ - -When the input is purely real, its transform is Hermitian, i.e., the -component at frequency :math:`f_k` is the complex conjugate of the -component at frequency :math:`-f_k`, which means that for real -inputs there is no information in the negative frequency components that -is not already available from the positive frequency components. -The family of `rfft` functions is -designed to operate on real inputs, and exploits this symmetry by -computing only the positive frequency components, up to and including the -Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex -output points. The inverses of this family assumes the same symmetry of -its input, and for an output of ``n`` points uses ``n/2+1`` input points. - -Correspondingly, when the spectrum is purely real, the signal is -Hermitian. The `hfft` family of functions exploits this symmetry by -using ``n/2+1`` complex points in the input (time) domain for ``n`` real -points in the frequency domain. - -In higher dimensions, FFTs are used, e.g., for image analysis and -filtering. The computational efficiency of the FFT means that it can -also be a faster way to compute large convolutions, using the property -that a convolution in the time domain is equivalent to a point-by-point -multiplication in the frequency domain. - -Higher dimensions ------------------ - -In two dimensions, the DFT is defined as - -.. math:: - A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} - a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} - \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, - -which extends in the obvious way to higher dimensions, and the inverses -in higher dimensions also extend in the same way. - -References ----------- - -.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the - machine calculation of complex Fourier series," *Math. Comput.* - 19: 297-301. - -.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., - 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. - 12-13. Cambridge Univ. Press, Cambridge, UK. - -Examples --------- - -For examples, see the various functions. - -""" -from __future__ import division, absolute_import, print_function - -depends = ['core'] diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py index 6c3548b65..8c3a31557 100644 --- a/numpy/fft/setup.py +++ b/numpy/fft/setup.py @@ -8,8 +8,8 @@ def configuration(parent_package='',top_path=None): config.add_data_dir('tests') # Configure pocketfft_internal - config.add_extension('pocketfft_internal', - sources=['pocketfft.c'] + config.add_extension('_pocketfft_internal', + sources=['_pocketfft.c'] ) return config diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index c1757150e..2db12d9a4 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -1,14 +1,31 @@ +""" +**Note:** almost all functions in the ``numpy.lib`` namespace +are also present in the main ``numpy`` namespace. Please use the +functions as ``np.<funcname>`` where possible. + +``numpy.lib`` is mostly a space for implementing functions that don't +belong in core or in another NumPy submodule with a clear purpose +(e.g. ``random``, ``fft``, ``linalg``, ``ma``). + +Most contains basic functions that are used by several submodules and are +useful to have in the main name-space. + +""" from __future__ import division, absolute_import, print_function import math -from .info import __doc__ from numpy.version import version as __version__ +# Public submodules +# Note: recfunctions and (maybe) format are public too, but not imported +from . import mixins +from . import scimath as emath + +# Private submodules from .type_check import * from .index_tricks import * from .function_base import * -from .mixins import * from .nanfunctions import * from .shape_base import * from .stride_tricks import * @@ -16,9 +33,7 @@ from .twodim_base import * from .ufunclike import * from .histograms import * -from . import scimath as emath from .polynomial import * -#import convertcode from .utils import * from .arraysetops import * from .npyio import * @@ -28,11 +43,10 @@ from .arraypad import * from ._version import * from numpy.core._multiarray_umath import tracemalloc_domain -__all__ = ['emath', 'math', 'tracemalloc_domain'] +__all__ = ['emath', 'math', 'tracemalloc_domain', 'Arrayterator'] __all__ += type_check.__all__ __all__ += index_tricks.__all__ __all__ += function_base.__all__ -__all__ += mixins.__all__ __all__ += shape_base.__all__ __all__ += stride_tricks.__all__ __all__ += twodim_base.__all__ diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py index 62330e692..33e64708d 100644 --- a/numpy/lib/arraypad.py +++ b/numpy/lib/arraypad.py @@ -17,66 +17,6 @@ __all__ = ['pad'] # Private utility functions. -def _linear_ramp(ndim, axis, start, stop, size, reverse=False): - """ - Create a linear ramp of `size` in `axis` with `ndim`. - - This algorithm behaves like a vectorized version of `numpy.linspace`. - The resulting linear ramp is broadcastable to any array that matches the - ramp in `shape[axis]` and `ndim`. - - Parameters - ---------- - ndim : int - Number of dimensions of the resulting array. All dimensions except - the one specified by `axis` will have the size 1. - axis : int - The dimension that contains the linear ramp of `size`. - start : int or ndarray - The starting value(s) of the linear ramp. If given as an array, its - size must match `size`. - stop : int or ndarray - The stop value(s) (not included!) of the linear ramp. If given as an - array, its size must match `size`. - size : int - The number of elements in the linear ramp. If this argument is 0 the - dimensions of `ramp` will all be of length 1 except for the one given - by `axis` which will be 0. - reverse : bool - If False, increment in a positive fashion, otherwise decrement. - - Returns - ------- - ramp : ndarray - Output array of dtype np.float64 that in- or decrements along the given - `axis`. - - Examples - -------- - >>> _linear_ramp(ndim=2, axis=0, start=np.arange(3), stop=10, size=2) - array([[0. , 1. , 2. ], - [5. , 5.5, 6. ]]) - >>> _linear_ramp(ndim=3, axis=0, start=2, stop=0, size=0) - array([], shape=(0, 1, 1), dtype=float64) - """ - # Create initial ramp - ramp = np.arange(size, dtype=np.float64) - if reverse: - ramp = ramp[::-1] - - # Make sure, that ramp is broadcastable - init_shape = (1,) * axis + (size,) + (1,) * (ndim - axis - 1) - ramp = ramp.reshape(init_shape) - - if size != 0: - # And scale to given start and stop values - gain = (stop - start) / float(size) - ramp = ramp * gain - ramp += start - - return ramp - - def _round_if_needed(arr, dtype): """ Rounds arr inplace if destination dtype is integer. @@ -269,17 +209,25 @@ def _get_linear_ramps(padded, axis, width_pair, end_value_pair): """ edge_pair = _get_edges(padded, axis, width_pair) - left_ramp = _linear_ramp( - padded.ndim, axis, start=end_value_pair[0], stop=edge_pair[0], - size=width_pair[0], reverse=False + left_ramp = np.linspace( + start=end_value_pair[0], + stop=edge_pair[0].squeeze(axis), # Dimensions is replaced by linspace + num=width_pair[0], + endpoint=False, + dtype=padded.dtype, + axis=axis, ) - _round_if_needed(left_ramp, padded.dtype) - right_ramp = _linear_ramp( - padded.ndim, axis, start=end_value_pair[1], stop=edge_pair[1], - size=width_pair[1], reverse=True + right_ramp = np.linspace( + start=end_value_pair[1], + stop=edge_pair[1].squeeze(axis), # Dimension is replaced by linspace + num=width_pair[1], + endpoint=False, + dtype=padded.dtype, + axis=axis, ) - _round_if_needed(right_ramp, padded.dtype) + # Reverse linear space in appropriate dimension + right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] return left_ramp, right_ramp diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index f3f4bc17e..2309f7e42 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -213,6 +213,7 @@ def unique(ar, return_index=False, return_inverse=False, ----- When an axis is specified the subarrays indexed by the axis are sorted. This is done by making the specified axis the first dimension of the array + (move the axis to the first dimension to keep the order of the other axes) and then flattening the subarrays in C order. The flattened subarrays are then viewed as a structured type with each element given a label, with the effect that we end up with a 1-D array of structured types that can be @@ -264,7 +265,7 @@ def unique(ar, return_index=False, return_inverse=False, # axis was specified and not None try: - ar = np.swapaxes(ar, axis, 0) + ar = np.moveaxis(ar, axis, 0) except np.AxisError: # this removes the "axis1" or "axis2" prefix from the error message raise np.AxisError(axis, ar.ndim) @@ -285,7 +286,7 @@ def unique(ar, return_index=False, return_inverse=False, def reshape_uniq(uniq): uniq = uniq.view(orig_dtype) uniq = uniq.reshape(-1, *orig_shape[1:]) - uniq = np.swapaxes(uniq, 0, axis) + uniq = np.moveaxis(uniq, 0, axis) return uniq output = _unique1d(consolidated, return_index, diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py index 216687475..a011e52a9 100644 --- a/numpy/lib/financial.py +++ b/numpy/lib/financial.py @@ -12,6 +12,7 @@ otherwise stated. """ from __future__ import division, absolute_import, print_function +import warnings from decimal import Decimal import functools @@ -19,6 +20,10 @@ import numpy as np from numpy.core import overrides +_depmsg = ("numpy.{name} is deprecated and will be removed from NumPy 1.20. " + "Use numpy_financial.{name} instead " + "(https://pypi.org/project/numpy-financial/).") + array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -45,6 +50,8 @@ def _convert_when(when): def _fv_dispatcher(rate, nper, pmt, pv, when=None): + warnings.warn(_depmsg.format(name='fv'), + DeprecationWarning, stacklevel=3) return (rate, nper, pmt, pv) @@ -53,6 +60,12 @@ def fv(rate, nper, pmt, pv, when='end'): """ Compute the future value. + .. deprecated:: 1.18 + + `fv` is deprecated; for details, see NEP 32 [1]_. + Use the corresponding function in the numpy-financial library, + https://pypi.org/project/numpy-financial. + Given: * a present value, `pv` * an interest `rate` compounded once per period, of which @@ -100,7 +113,9 @@ def fv(rate, nper, pmt, pv, when='end'): References ---------- - .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + .. [1] NumPy Enhancement Proposal (NEP) 32, + https://numpy.org/neps/nep-0032-remove-financial-functions.html + .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. Organization for the Advancement of Structured Information @@ -109,6 +124,7 @@ def fv(rate, nper, pmt, pv, when='end'): http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula OpenDocument-formula-20090508.odt + Examples -------- What is the future value after 10 years of saving $100 now, with @@ -139,6 +155,8 @@ def fv(rate, nper, pmt, pv, when='end'): def _pmt_dispatcher(rate, nper, pv, fv=None, when=None): + warnings.warn(_depmsg.format(name='pmt'), + DeprecationWarning, stacklevel=3) return (rate, nper, pv, fv) @@ -147,6 +165,12 @@ def pmt(rate, nper, pv, fv=0, when='end'): """ Compute the payment against loan principal plus interest. + .. deprecated:: 1.18 + + `pmt` is deprecated; for details, see NEP 32 [1]_. + Use the corresponding function in the numpy-financial library, + https://pypi.org/project/numpy-financial. + Given: * a present value, `pv` (e.g., an amount borrowed) * a future value, `fv` (e.g., 0) @@ -204,7 +228,9 @@ def pmt(rate, nper, pv, fv=0, when='end'): References ---------- - .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + .. [1] NumPy Enhancement Proposal (NEP) 32, + https://numpy.org/neps/nep-0032-remove-financial-functions.html + .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. Organization for the Advancement of Structured Information @@ -237,6 +263,8 @@ def pmt(rate, nper, pv, fv=0, when='end'): def _nper_dispatcher(rate, pmt, pv, fv=None, when=None): + warnings.warn(_depmsg.format(name='nper'), + DeprecationWarning, stacklevel=3) return (rate, pmt, pv, fv) @@ -245,6 +273,12 @@ def nper(rate, pmt, pv, fv=0, when='end'): """ Compute the number of periodic payments. + .. deprecated:: 1.18 + + `nper` is deprecated; for details, see NEP 32 [1]_. + Use the corresponding function in the numpy-financial library, + https://pypi.org/project/numpy-financial. + :class:`decimal.Decimal` type is not supported. Parameters @@ -270,6 +304,11 @@ def nper(rate, pmt, pv, fv=0, when='end'): fv + pv + pmt*nper = 0 + References + ---------- + .. [1] NumPy Enhancement Proposal (NEP) 32, + https://numpy.org/neps/nep-0032-remove-financial-functions.html + Examples -------- If you only had $150/month to pay towards the loan, how long would it take @@ -311,6 +350,8 @@ def nper(rate, pmt, pv, fv=0, when='end'): def _ipmt_dispatcher(rate, per, nper, pv, fv=None, when=None): + warnings.warn(_depmsg.format(name='ipmt'), + DeprecationWarning, stacklevel=3) return (rate, per, nper, pv, fv) @@ -319,6 +360,12 @@ def ipmt(rate, per, nper, pv, fv=0, when='end'): """ Compute the interest portion of a payment. + .. deprecated:: 1.18 + + `ipmt` is deprecated; for details, see NEP 32 [1]_. + Use the corresponding function in the numpy-financial library, + https://pypi.org/project/numpy-financial. + Parameters ---------- rate : scalar or array_like of shape(M, ) @@ -354,6 +401,11 @@ def ipmt(rate, per, nper, pv, fv=0, when='end'): ``pmt = ppmt + ipmt`` + References + ---------- + .. [1] NumPy Enhancement Proposal (NEP) 32, + https://numpy.org/neps/nep-0032-remove-financial-functions.html + Examples -------- What is the amortization schedule for a 1 year loan of $2500 at @@ -422,6 +474,8 @@ def _rbl(rate, per, pmt, pv, when): def _ppmt_dispatcher(rate, per, nper, pv, fv=None, when=None): + warnings.warn(_depmsg.format(name='ppmt'), + DeprecationWarning, stacklevel=3) return (rate, per, nper, pv, fv) @@ -430,6 +484,12 @@ def ppmt(rate, per, nper, pv, fv=0, when='end'): """ Compute the payment against loan principal. + .. deprecated:: 1.18 + + `ppmt` is deprecated; for details, see NEP 32 [1]_. + Use the corresponding function in the numpy-financial library, + https://pypi.org/project/numpy-financial. + Parameters ---------- rate : array_like @@ -450,12 +510,19 @@ def ppmt(rate, per, nper, pv, fv=0, when='end'): -------- pmt, pv, ipmt + References + ---------- + .. [1] NumPy Enhancement Proposal (NEP) 32, + https://numpy.org/neps/nep-0032-remove-financial-functions.html + """ total = pmt(rate, nper, pv, fv, when) return total - ipmt(rate, per, nper, pv, fv, when) def _pv_dispatcher(rate, nper, pmt, fv=None, when=None): + warnings.warn(_depmsg.format(name='pv'), + DeprecationWarning, stacklevel=3) return (rate, nper, nper, pv, fv) @@ -464,6 +531,12 @@ def pv(rate, nper, pmt, fv=0, when='end'): """ Compute the present value. + .. deprecated:: 1.18 + + `pv` is deprecated; for details, see NEP 32 [1]_. + Use the corresponding function in the numpy-financial library, + https://pypi.org/project/numpy-financial. + Given: * a future value, `fv` * an interest `rate` compounded once per period, of which @@ -510,7 +583,9 @@ def pv(rate, nper, pmt, fv=0, when='end'): References ---------- - .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + .. [1] NumPy Enhancement Proposal (NEP) 32, + https://numpy.org/neps/nep-0032-remove-financial-functions.html + .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. Organization for the Advancement of Structured Information @@ -567,6 +642,8 @@ def _g_div_gp(r, n, p, x, y, w): def _rate_dispatcher(nper, pmt, pv, fv, when=None, guess=None, tol=None, maxiter=None): + warnings.warn(_depmsg.format(name='rate'), + DeprecationWarning, stacklevel=3) return (nper, pmt, pv, fv) @@ -582,6 +659,12 @@ def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100): """ Compute the rate of interest per period. + .. deprecated:: 1.18 + + `rate` is deprecated; for details, see NEP 32 [1]_. + Use the corresponding function in the numpy-financial library, + https://pypi.org/project/numpy-financial. + Parameters ---------- nper : array_like @@ -612,13 +695,16 @@ def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100): References ---------- - Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document - Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated - Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. - Organization for the Advancement of Structured Information Standards - (OASIS). Billerica, MA, USA. [ODT Document]. Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt + .. [1] NumPy Enhancement Proposal (NEP) 32, + https://numpy.org/neps/nep-0032-remove-financial-functions.html + .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + Open Document Format for Office Applications (OpenDocument)v1.2, + Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, + Pre-Draft 12. Organization for the Advancement of Structured Information + Standards (OASIS). Billerica, MA, USA. [ODT Document]. + Available: + http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula + OpenDocument-formula-20090508.odt """ when = _convert_when(when) @@ -651,6 +737,8 @@ def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100): def _irr_dispatcher(values): + warnings.warn(_depmsg.format(name='irr'), + DeprecationWarning, stacklevel=3) return (values,) @@ -659,6 +747,12 @@ def irr(values): """ Return the Internal Rate of Return (IRR). + .. deprecated:: 1.18 + + `irr` is deprecated; for details, see NEP 32 [1]_. + Use the corresponding function in the numpy-financial library, + https://pypi.org/project/numpy-financial. + This is the "average" periodically compounded rate of return that gives a net present value of 0.0; for a more complete explanation, see Notes below. @@ -693,13 +787,15 @@ def irr(values): + \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0 In general, for `values` :math:`= [v_0, v_1, ... v_M]`, - irr is the solution of the equation: [G]_ + irr is the solution of the equation: [2]_ .. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0 References ---------- - .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., + .. [1] NumPy Enhancement Proposal (NEP) 32, + https://numpy.org/neps/nep-0032-remove-financial-functions.html + .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., Addison-Wesley, 2003, pg. 348. Examples @@ -715,8 +811,6 @@ def irr(values): >>> round(np.irr([-5, 10.5, 1, -8, 1]), 5) 0.0886 - (Compare with the Example given for numpy.lib.financial.npv) - """ # `np.roots` call is why this function does not support Decimal type. # @@ -736,6 +830,8 @@ def irr(values): def _npv_dispatcher(rate, values): + warnings.warn(_depmsg.format(name='npv'), + DeprecationWarning, stacklevel=3) return (values,) @@ -744,6 +840,12 @@ def npv(rate, values): """ Returns the NPV (Net Present Value) of a cash flow series. + .. deprecated:: 1.18 + + `npv` is deprecated; for details, see NEP 32 [1]_. + Use the corresponding function in the numpy-financial library, + https://pypi.org/project/numpy-financial. + Parameters ---------- rate : scalar @@ -763,23 +865,48 @@ def npv(rate, values): The NPV of the input cash flow series `values` at the discount `rate`. + Warnings + -------- + ``npv`` considers a series of cashflows starting in the present (t = 0). + NPV can also be defined with a series of future cashflows, paid at the + end, rather than the start, of each period. If future cashflows are used, + the first cashflow `values[0]` must be zeroed and added to the net + present value of the future cashflows. This is demonstrated in the + examples. + Notes ----- - Returns the result of: [G]_ + Returns the result of: [2]_ .. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}} References ---------- - .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., + .. [1] NumPy Enhancement Proposal (NEP) 32, + https://numpy.org/neps/nep-0032-remove-financial-functions.html + .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., Addison-Wesley, 2003, pg. 346. Examples -------- - >>> np.npv(0.281,[-100, 39, 59, 55, 20]) - -0.0084785916384548798 # may vary + Consider a potential project with an initial investment of $40 000 and + projected cashflows of $5 000, $8 000, $12 000 and $30 000 at the end of + each period discounted at a rate of 8% per period. To find the project's + net present value: + + >>> rate, cashflows = 0.08, [-40_000, 5_000, 8_000, 12_000, 30_000] + >>> np.npv(rate, cashflows).round(5) + 3065.22267 - (Compare with the Example given for numpy.lib.financial.irr) + It may be preferable to split the projected cashflow into an initial + investment and expected future cashflows. In this case, the value of + the initial cashflow is zero and the initial investment is later added + to the future cashflows net present value: + + >>> initial_cashflow = cashflows[0] + >>> cashflows[0] = 0 + >>> np.round(np.npv(rate, cashflows) + initial_cashflow, 5) + 3065.22267 """ values = np.asarray(values) @@ -787,6 +914,8 @@ def npv(rate, values): def _mirr_dispatcher(values, finance_rate, reinvest_rate): + warnings.warn(_depmsg.format(name='mirr'), + DeprecationWarning, stacklevel=3) return (values,) @@ -795,6 +924,12 @@ def mirr(values, finance_rate, reinvest_rate): """ Modified internal rate of return. + .. deprecated:: 1.18 + + `mirr` is deprecated; for details, see NEP 32 [1]_. + Use the corresponding function in the numpy-financial library, + https://pypi.org/project/numpy-financial. + Parameters ---------- values : array_like @@ -811,6 +946,10 @@ def mirr(values, finance_rate, reinvest_rate): out : float Modified internal rate of return + References + ---------- + .. [1] NumPy Enhancement Proposal (NEP) 32, + https://numpy.org/neps/nep-0032-remove-financial-functions.html """ values = np.asarray(values) n = values.size diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 3bf818812..1ecd72815 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -173,6 +173,9 @@ from numpy.compat import ( ) +__all__ = [] + + MAGIC_PREFIX = b'\x93NUMPY' MAGIC_LEN = len(MAGIC_PREFIX) + 2 ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 9d380e67d..3ad630a7d 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -316,14 +316,17 @@ def average(a, axis=None, weights=None, returned=False): The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If `weights=None`, then all data in `a` are assumed to have a - weight equal to one. + weight equal to one. The 1-D calculation is:: + + avg = sum(a * weights) / sum(weights) + + The only constraint on `weights` is that `sum(weights)` must not be 0. returned : bool, optional Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) is returned, otherwise only the average is returned. If `weights=None`, `sum_of_weights` is equivalent to the number of elements over which the average is taken. - Returns ------- retval, [sum_of_weights] : array_type or double @@ -679,11 +682,7 @@ def select(condlist, choicelist, default=0): # Now that the dtype is known, handle the deprecated select([], []) case if len(condlist) == 0: - # 2014-02-24, 1.9 - warnings.warn("select with an empty condition list is not possible" - "and will be deprecated", - DeprecationWarning, stacklevel=3) - return np.asarray(default)[()] + raise ValueError("select with an empty condition list is not possible") choicelist = [np.asarray(choice) for choice in choicelist] choicelist.append(np.asarray(default)) @@ -699,25 +698,11 @@ def select(condlist, choicelist, default=0): choicelist = np.broadcast_arrays(*choicelist) # If cond array is not an ndarray in boolean format or scalar bool, abort. - deprecated_ints = False for i in range(len(condlist)): cond = condlist[i] if cond.dtype.type is not np.bool_: - if np.issubdtype(cond.dtype, np.integer): - # A previous implementation accepted int ndarrays accidentally. - # Supported here deliberately, but deprecated. - condlist[i] = condlist[i].astype(bool) - deprecated_ints = True - else: - raise ValueError( - 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) - - if deprecated_ints: - # 2014-02-24, 1.9 - msg = "select condlists containing integer ndarrays is deprecated " \ - "and will be removed in the future. Use `.astype(bool)` to " \ - "convert to bools." - warnings.warn(msg, DeprecationWarning, stacklevel=3) + raise TypeError( + 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) if choicelist[0].ndim == 0: # This may be common, so avoid the call. @@ -1164,11 +1149,13 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): The axis along which the difference is taken, default is the last axis. prepend, append : array_like, optional - Values to prepend or append to "a" along axis prior to + Values to prepend or append to `a` along axis prior to performing the difference. Scalar values are expanded to arrays with length 1 in the direction of axis and the shape of the input array in along all other axes. Otherwise the - dimension and shape must match "a" except along axis. + dimension and shape must match `a` except along axis. + + .. versionadded:: 1.16.0 Returns ------- @@ -1327,9 +1314,13 @@ def interp(x, xp, fp, left=None, right=None, period=None): Notes ----- - Does not check that the x-coordinate sequence `xp` is increasing. - If `xp` is not increasing, the results are nonsense. - A simple check for increasing is:: + The x-coordinate sequence is expected to be increasing, but this is not + explicitly enforced. However, if the sequence `xp` is non-increasing, + interpolation results are meaningless. + + Note that, since NaN is unsortable, `xp` also cannot contain NaNs. + + A simple check for `xp` being strictly increasing is:: np.all(np.diff(xp) > 0) @@ -1902,7 +1893,7 @@ class vectorize(object): typecode characters or a list of data type specifiers. There should be one data type specifier for each output. doc : str, optional - The docstring for the function. If `None`, the docstring will be the + The docstring for the function. If None, the docstring will be the ``pyfunc.__doc__``. excluded : set, optional Set of strings or integers representing the positional or keyword @@ -3310,13 +3301,6 @@ def sinc(x): Text(0.5, 0, 'X') >>> plt.show() - It works in 2-D as well: - - >>> x = np.linspace(-4, 4, 401) - >>> xx = np.outer(x, x) - >>> plt.imshow(np.sinc(xx)) - <matplotlib.image.AxesImage object at 0x...> - """ x = np.asanyarray(x) y = pi * where(x == 0, 1.0e-20, x) diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py index 8474bd5d3..03c365ab6 100644 --- a/numpy/lib/histograms.py +++ b/numpy/lib/histograms.py @@ -22,6 +22,16 @@ array_function_dispatch = functools.partial( _range = range +def _ptp(x): + """Peak-to-peak value of x. + + This implementation avoids the problem of signed integer arrays having a + peak-to-peak value that cannot be represented with the array's data type. + This function returns an unsigned value for signed integer arrays. + """ + return _unsigned_subtract(x.max(), x.min()) + + def _hist_bin_sqrt(x, range): """ Square root histogram bin estimator. @@ -40,7 +50,7 @@ def _hist_bin_sqrt(x, range): h : An estimate of the optimal bin width for the given data. """ del range # unused - return x.ptp() / np.sqrt(x.size) + return _ptp(x) / np.sqrt(x.size) def _hist_bin_sturges(x, range): @@ -63,7 +73,7 @@ def _hist_bin_sturges(x, range): h : An estimate of the optimal bin width for the given data. """ del range # unused - return x.ptp() / (np.log2(x.size) + 1.0) + return _ptp(x) / (np.log2(x.size) + 1.0) def _hist_bin_rice(x, range): @@ -87,7 +97,7 @@ def _hist_bin_rice(x, range): h : An estimate of the optimal bin width for the given data. """ del range # unused - return x.ptp() / (2.0 * x.size ** (1.0 / 3)) + return _ptp(x) / (2.0 * x.size ** (1.0 / 3)) def _hist_bin_scott(x, range): @@ -137,7 +147,7 @@ def _hist_bin_stone(x, range): """ n = x.size - ptp_x = np.ptp(x) + ptp_x = _ptp(x) if n <= 1 or ptp_x == 0: return 0 @@ -184,7 +194,7 @@ def _hist_bin_doane(x, range): np.true_divide(temp, sigma, temp) np.power(temp, 3, temp) g1 = np.mean(temp) - return x.ptp() / (1.0 + np.log2(x.size) + + return _ptp(x) / (1.0 + np.log2(x.size) + np.log2(1.0 + np.absolute(g1) / sg1)) return 0.0 diff --git a/numpy/lib/info.py b/numpy/lib/info.py deleted file mode 100644 index 8815a52f0..000000000 --- a/numpy/lib/info.py +++ /dev/null @@ -1,160 +0,0 @@ -""" -Basic functions used by several sub-packages and -useful to have in the main name-space. - -Type Handling -------------- -================ =================== -iscomplexobj Test for complex object, scalar result -isrealobj Test for real object, scalar result -iscomplex Test for complex elements, array result -isreal Test for real elements, array result -imag Imaginary part -real Real part -real_if_close Turns complex number with tiny imaginary part to real -isneginf Tests for negative infinity, array result -isposinf Tests for positive infinity, array result -isnan Tests for nans, array result -isinf Tests for infinity, array result -isfinite Tests for finite numbers, array result -isscalar True if argument is a scalar -nan_to_num Replaces NaN's with 0 and infinities with large numbers -cast Dictionary of functions to force cast to each type -common_type Determine the minimum common type code for a group - of arrays -mintypecode Return minimal allowed common typecode. -================ =================== - -Index Tricks ------------- -================ =================== -mgrid Method which allows easy construction of N-d - 'mesh-grids' -``r_`` Append and construct arrays: turns slice objects into - ranges and concatenates them, for 2d arrays appends rows. -index_exp Konrad Hinsen's index_expression class instance which - can be useful for building complicated slicing syntax. -================ =================== - -Useful Functions ----------------- -================ =================== -select Extension of where to multiple conditions and choices -extract Extract 1d array from flattened array according to mask -insert Insert 1d array of values into Nd array according to mask -linspace Evenly spaced samples in linear space -logspace Evenly spaced samples in logarithmic space -fix Round x to nearest integer towards zero -mod Modulo mod(x,y) = x % y except keeps sign of y -amax Array maximum along axis -amin Array minimum along axis -ptp Array max-min along axis -cumsum Cumulative sum along axis -prod Product of elements along axis -cumprod Cumluative product along axis -diff Discrete differences along axis -angle Returns angle of complex argument -unwrap Unwrap phase along given axis (1-d algorithm) -sort_complex Sort a complex-array (based on real, then imaginary) -trim_zeros Trim the leading and trailing zeros from 1D array. -vectorize A class that wraps a Python function taking scalar - arguments into a generalized function which can handle - arrays of arguments using the broadcast rules of - numerix Python. -================ =================== - -Shape Manipulation ------------------- -================ =================== -squeeze Return a with length-one dimensions removed. -atleast_1d Force arrays to be >= 1D -atleast_2d Force arrays to be >= 2D -atleast_3d Force arrays to be >= 3D -vstack Stack arrays vertically (row on row) -hstack Stack arrays horizontally (column on column) -column_stack Stack 1D arrays as columns into 2D array -dstack Stack arrays depthwise (along third dimension) -stack Stack arrays along a new axis -split Divide array into a list of sub-arrays -hsplit Split into columns -vsplit Split into rows -dsplit Split along third dimension -================ =================== - -Matrix (2D Array) Manipulations -------------------------------- -================ =================== -fliplr 2D array with columns flipped -flipud 2D array with rows flipped -rot90 Rotate a 2D array a multiple of 90 degrees -eye Return a 2D array with ones down a given diagonal -diag Construct a 2D array from a vector, or return a given - diagonal from a 2D array. -mat Construct a Matrix -bmat Build a Matrix from blocks -================ =================== - -Polynomials ------------ -================ =================== -poly1d A one-dimensional polynomial class -poly Return polynomial coefficients from roots -roots Find roots of polynomial given coefficients -polyint Integrate polynomial -polyder Differentiate polynomial -polyadd Add polynomials -polysub Subtract polynomials -polymul Multiply polynomials -polydiv Divide polynomials -polyval Evaluate polynomial at given argument -================ =================== - -Iterators ---------- -================ =================== -Arrayterator A buffered iterator for big arrays. -================ =================== - -Import Tricks -------------- -================ =================== -ppimport Postpone module import until trying to use it -ppimport_attr Postpone module import until trying to use its attribute -ppresolve Import postponed module and return it. -================ =================== - -Machine Arithmetics -------------------- -================ =================== -machar_single Single precision floating point arithmetic parameters -machar_double Double precision floating point arithmetic parameters -================ =================== - -Threading Tricks ----------------- -================ =================== -ParallelExec Execute commands in parallel thread. -================ =================== - -Array Set Operations ------------------------ -Set operations for numeric arrays based on sort() function. - -================ =================== -unique Unique elements of an array. -isin Test whether each element of an ND array is present - anywhere within a second array. -ediff1d Array difference (auxiliary function). -intersect1d Intersection of 1D arrays with unique elements. -setxor1d Set exclusive-or of 1D arrays with unique elements. -in1d Test whether elements in a 1D array are also present in - another array. -union1d Union of 1D arrays with unique elements. -setdiff1d Set difference of 1D arrays with unique elements. -================ =================== - -""" -from __future__ import division, absolute_import, print_function - -depends = ['core', 'testing'] -global_symbols = ['*'] diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py index 52ad45b68..f974a7724 100644 --- a/numpy/lib/mixins.py +++ b/numpy/lib/mixins.py @@ -5,8 +5,8 @@ import sys from numpy.core import umath as um -# Nothing should be exposed in the top-level NumPy module. -__all__ = [] + +__all__ = ['NDArrayOperatorsMixin'] def _disables_array_ufunc(obj): diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 9a03d0b39..18ccab3b8 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -244,8 +244,8 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue): out : ndarray, optional Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `doc.ufuncs` for details. + expected output, but the type will be cast if necessary. See + `ufuncs-output-type` for more details. .. versionadded:: 1.8.0 keepdims : bool, optional @@ -359,8 +359,8 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue): out : ndarray, optional Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `doc.ufuncs` for details. + expected output, but the type will be cast if necessary. See + `ufuncs-output-type` for more details. .. versionadded:: 1.8.0 keepdims : bool, optional @@ -585,8 +585,8 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): Alternate output array in which to place the result. The default is ``None``. If provided, it must have the same shape as the expected output, but the type will be cast if necessary. See - `doc.ufuncs` for details. The casting of NaN to integer can yield - unexpected results. + `ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. .. versionadded:: 1.8.0 keepdims : bool, optional @@ -681,9 +681,9 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): out : ndarray, optional Alternate output array in which to place the result. The default is ``None``. If provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `doc.ufuncs` for details. The casting of NaN to integer can yield - unexpected results. + expected output, but the type will be cast if necessary. See + `ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. keepdims : bool, optional If True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will @@ -750,8 +750,8 @@ def nancumsum(a, axis=None, dtype=None, out=None): out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output - but the type will be cast if necessary. See `doc.ufuncs` - (Section "Output arguments") for more details. + but the type will be cast if necessary. See `ufuncs-output-type` for + more details. Returns ------- @@ -888,8 +888,8 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): out : ndarray, optional Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `doc.ufuncs` for details. + expected output, but the type will be cast if necessary. See + `ufuncs-output-type` for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, @@ -1443,7 +1443,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): the variance of the flattened array. dtype : data-type, optional Type to use in computing the variance. For arrays of integer type - the default is `float32`; for arrays of float types it is the same as + the default is `float64`; for arrays of float types it is the same as the array type. out : ndarray, optional Alternate output array in which to place the result. It must have @@ -1473,7 +1473,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): mean : Average var : Variance while not ignoring NaNs nanstd, nanmean - numpy.doc.ufuncs : Section "Output arguments" + ufuncs-output-type Notes ----- @@ -1625,7 +1625,7 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): -------- var, mean, std nanvar, nanmean - numpy.doc.ufuncs : Section "Output arguments" + ufuncs-output-type Notes ----- diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index e57a6dd47..7e1d4db4f 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -480,7 +480,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True): file : file, str, or pathlib.Path File or filename to which the data is saved. If file is a file-object, then the filename is unchanged. If file is a string or Path, a ``.npy`` - extension will be appended to the file name if it does not already + extension will be appended to the filename if it does not already have one. arr : array_like Array data to be saved. @@ -506,9 +506,9 @@ def save(file, arr, allow_pickle=True, fix_imports=True): Notes ----- For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. - - Any data saved to the file is appended to the end of the file. - + + Any data saved to the file is appended to the end of the file. + Examples -------- >>> from tempfile import TemporaryFile @@ -524,7 +524,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True): >>> with open('test.npy', 'wb') as f: ... np.save(f, np.array([1, 2])) - ... np.save(f, np.array([1, 3])) + ... np.save(f, np.array([1, 3])) >>> with open('test.npy', 'rb') as f: ... a = np.load(f) ... b = np.load(f) @@ -565,8 +565,7 @@ def _savez_dispatcher(file, *args, **kwds): @array_function_dispatch(_savez_dispatcher) def savez(file, *args, **kwds): - """ - Save several arrays into a single file in uncompressed ``.npz`` format. + """Save several arrays into a single file in uncompressed ``.npz`` format. If arguments are passed in with no keywords, the corresponding variable names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword @@ -576,9 +575,9 @@ def savez(file, *args, **kwds): Parameters ---------- file : str or file - Either the file name (string) or an open file (file-like object) + Either the filename (string) or an open file (file-like object) where the data will be saved. If file is a string or a Path, the - ``.npz`` extension will be appended to the file name if it is not + ``.npz`` extension will be appended to the filename if it is not already there. args : Arguments, optional Arrays to save to the file. Since it is not possible for Python to @@ -611,6 +610,10 @@ def savez(file, *args, **kwds): its list of arrays (with the ``.files`` attribute), and for the arrays themselves. + When saving dictionaries, the dictionary keys become filenames + inside the ZIP archive. Therefore, keys should be valid filenames. + E.g., avoid keys that begin with ``/`` or contain ``.``. + Examples -------- >>> from tempfile import TemporaryFile @@ -638,7 +641,6 @@ def savez(file, *args, **kwds): ['x', 'y'] >>> npzfile['x'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - """ _savez(file, args, kwds, False) @@ -656,15 +658,15 @@ def savez_compressed(file, *args, **kwds): Save several arrays into a single file in compressed ``.npz`` format. If keyword arguments are given, then filenames are taken from the keywords. - If arguments are passed in with no keywords, then stored file names are + If arguments are passed in with no keywords, then stored filenames are arr_0, arr_1, etc. Parameters ---------- file : str or file - Either the file name (string) or an open file (file-like object) + Either the filename (string) or an open file (file-like object) where the data will be saved. If file is a string or a Path, the - ``.npz`` extension will be appended to the file name if it is not + ``.npz`` extension will be appended to the filename if it is not already there. args : Arguments, optional Arrays to save to the file. Since it is not possible for Python to @@ -1469,7 +1471,7 @@ def fromregex(file, regexp, dtype, encoding=None): Parameters ---------- file : str or file - File name or file object to read. + Filename or file object to read. regexp : str or regexp Regular expression used to parse the file. Groups in the regular expression correspond to fields in the dtype. diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index 2c72f623c..3d07a0de4 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -479,10 +479,10 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): coefficients for `k`-th data set are in ``p[:,k]``. residuals, rank, singular_values, rcond - Present only if `full` = True. Residuals of the least-squares fit, - the effective rank of the scaled Vandermonde coefficient matrix, - its singular values, and the specified value of `rcond`. For more - details, see `linalg.lstsq`. + Present only if `full` = True. Residuals is sum of squared residuals + of the least-squares fit, the effective rank of the scaled Vandermonde + coefficient matrix, its singular values, and the specified value of + `rcond`. For more details, see `linalg.lstsq`. V : ndarray, shape (M,M) or (M,M,K) Present only if `full` = False and `cov`=True. The covariance diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index 40060b41a..927161ddb 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -200,7 +200,7 @@ def flatten_descr(ndtype): descr = [] for field in names: (typ, _) = ndtype.fields[field] - if typ.names: + if typ.names is not None: descr.extend(flatten_descr(typ)) else: descr.append((field, typ)) @@ -527,6 +527,10 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False): Nested fields are supported. + ..versionchanged: 1.18.0 + `drop_fields` returns an array with 0 fields if all fields are dropped, + rather than returning ``None`` as it did previously. + Parameters ---------- base : array @@ -566,7 +570,7 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False): current = ndtype[name] if name in drop_names: continue - if current.names: + if current.names is not None: descr = _drop_descr(current, drop_names) if descr: newdtype.append((name, descr)) @@ -575,8 +579,6 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False): return newdtype newdtype = _drop_descr(base.dtype, drop_names) - if not newdtype: - return None output = np.empty(base.shape, dtype=newdtype) output = recursive_fill_fields(base, output) diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index a5d0040aa..92d52109e 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -782,7 +782,7 @@ def _split_dispatcher(ary, indices_or_sections, axis=None): @array_function_dispatch(_split_dispatcher) def split(ary, indices_or_sections, axis=0): """ - Split an array into multiple sub-arrays. + Split an array into multiple sub-arrays as views into `ary`. Parameters ---------- @@ -809,7 +809,7 @@ def split(ary, indices_or_sections, axis=0): Returns ------- sub-arrays : list of ndarrays - A list of sub-arrays. + A list of sub-arrays as views into `ary`. Raises ------ @@ -854,8 +854,7 @@ def split(ary, indices_or_sections, axis=0): if N % sections: raise ValueError( 'array split does not result in an equal division') - res = array_split(ary, indices_or_sections, axis) - return res + return array_split(ary, indices_or_sections, axis) def _hvdsplit_dispatcher(ary, indices_or_sections): diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index b6dd3b31c..65593dd29 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -2,7 +2,6 @@ """ from __future__ import division, absolute_import, print_function -from itertools import chain import pytest @@ -11,6 +10,12 @@ from numpy.testing import assert_array_equal, assert_allclose, assert_equal from numpy.lib.arraypad import _as_pairs +_numeric_dtypes = ( + np.sctypes["uint"] + + np.sctypes["int"] + + np.sctypes["float"] + + np.sctypes["complex"] +) _all_modes = { 'constant': {'constant_values': 0}, 'edge': {}, @@ -738,6 +743,24 @@ class TestLinearRamp(object): assert_equal(a[0, :], 0.) assert_equal(a[-1, :], 0.) + @pytest.mark.parametrize("dtype", _numeric_dtypes) + def test_negative_difference(self, dtype): + """ + Check correct behavior of unsigned dtypes if there is a negative + difference between the edge to pad and `end_values`. Check both cases + to be independent of implementation. Test behavior for all other dtypes + in case dtype casting interferes with complex dtypes. See gh-14191. + """ + x = np.array([3], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=0) + expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype) + assert_equal(result, expected) + + x = np.array([0], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=3) + expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype) + assert_equal(result, expected) + class TestReflect(object): def test_check_simple(self): @@ -1330,13 +1353,7 @@ def test_memory_layout_persistence(mode): assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"] -@pytest.mark.parametrize("dtype", chain( - # Skip "other" dtypes as they are not supported by all modes - np.sctypes["int"], - np.sctypes["uint"], - np.sctypes["float"], - np.sctypes["complex"] -)) +@pytest.mark.parametrize("dtype", _numeric_dtypes) @pytest.mark.parametrize("mode", _all_modes.keys()) def test_dtype_persistence(dtype, mode): arr = np.zeros((3, 2, 1), dtype=dtype) diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index dd8a38248..fd21a7f76 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -600,8 +600,11 @@ class TestUnique(object): assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) msg = 'Unique with 3d array and axis=2 failed' - data3d = np.dstack([data] * 3) - result = data3d[..., :1] + data3d = np.array([[[1, 1], + [1, 0]], + [[0, 1], + [0, 0]]]).astype(dtype) + result = np.take(data3d, [1, 0], axis=2) assert_array_equal(unique(data3d, axis=2), result, msg) uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py index 524915041..cb67f7c0f 100644 --- a/numpy/lib/tests/test_financial.py +++ b/numpy/lib/tests/test_financial.py @@ -1,5 +1,6 @@ from __future__ import division, absolute_import, print_function +import warnings from decimal import Decimal import numpy as np @@ -8,16 +9,35 @@ from numpy.testing import ( ) +def filter_deprecation(func): + def newfunc(*args, **kwargs): + with warnings.catch_warnings(record=True) as ws: + warnings.filterwarnings('always', category=DeprecationWarning) + func(*args, **kwargs) + assert_(all(w.category is DeprecationWarning for w in ws)) + return newfunc + + class TestFinancial(object): + @filter_deprecation + def test_npv_irr_congruence(self): + # IRR is defined as the rate required for the present value of a + # a series of cashflows to be zero i.e. NPV(IRR(x), x) = 0 + cashflows = np.array([-40000, 5000, 8000, 12000, 30000]) + assert_allclose(np.npv(np.irr(cashflows), cashflows), 0, atol=1e-10, rtol=0) + + @filter_deprecation def test_rate(self): assert_almost_equal( np.rate(10, 0, -3500, 10000), 0.1107, 4) + @filter_deprecation def test_rate_decimal(self): rate = np.rate(Decimal('10'), Decimal('0'), Decimal('-3500'), Decimal('10000')) assert_equal(Decimal('0.1106908537142689284704528100'), rate) + @filter_deprecation def test_irr(self): v = [-150000, 15000, 25000, 35000, 45000, 60000] assert_almost_equal(np.irr(v), 0.0524, 2) @@ -37,20 +57,25 @@ class TestFinancial(object): v = [-1, -2, -3] assert_equal(np.irr(v), np.nan) + @filter_deprecation def test_pv(self): assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2) + @filter_deprecation def test_pv_decimal(self): assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')), Decimal('-127128.1709461939327295222005')) + @filter_deprecation def test_fv(self): assert_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.362673042924) + @filter_deprecation def test_fv_decimal(self): assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), 0, 0), Decimal('86609.36267304300040536731624')) + @filter_deprecation def test_pmt(self): res = np.pmt(0.08 / 12, 5 * 12, 15000) tgt = -304.145914 @@ -65,6 +90,7 @@ class TestFinancial(object): tgt = np.array([[-166.66667, -19311.258], [-626.90814, -19311.258]]) assert_allclose(res, tgt) + @filter_deprecation def test_pmt_decimal(self): res = np.pmt(Decimal('0.08') / Decimal('12'), 5 * 12, 15000) tgt = Decimal('-304.1459143262052370338701494') @@ -88,18 +114,22 @@ class TestFinancial(object): assert_equal(res[1][0], tgt[1][0]) assert_equal(res[1][1], tgt[1][1]) + @filter_deprecation def test_ppmt(self): assert_equal(np.round(np.ppmt(0.1 / 12, 1, 60, 55000), 2), -710.25) + @filter_deprecation def test_ppmt_decimal(self): assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000')), Decimal('-710.2541257864217612489830917')) # Two tests showing how Decimal is actually getting at a more exact result # .23 / 12 does not come out nicely as a float but does as a decimal + @filter_deprecation def test_ppmt_special_rate(self): assert_equal(np.round(np.ppmt(0.23 / 12, 1, 60, 10000000000), 8), -90238044.232277036) + @filter_deprecation def test_ppmt_special_rate_decimal(self): # When rounded out to 8 decimal places like the float based test, this should not equal the same value # as the float, substituted for the decimal @@ -112,31 +142,38 @@ class TestFinancial(object): assert_equal(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), Decimal('-90238044.2322778884413969909')) + @filter_deprecation def test_ipmt(self): assert_almost_equal(np.round(np.ipmt(0.1 / 12, 1, 24, 2000), 2), -16.67) + @filter_deprecation def test_ipmt_decimal(self): result = np.ipmt(Decimal('0.1') / Decimal('12'), 1, 24, 2000) assert_equal(result.flat[0], Decimal('-16.66666666666666666666666667')) + @filter_deprecation def test_nper(self): assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), 21.54, 2) + @filter_deprecation def test_nper2(self): assert_almost_equal(np.nper(0.0, -2000, 0, 100000.), 50.0, 1) + @filter_deprecation def test_npv(self): assert_almost_equal( np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]), 122.89, 2) + @filter_deprecation def test_npv_decimal(self): assert_equal( np.npv(Decimal('0.05'), [-15000, 1500, 2500, 3500, 4500, 6000]), Decimal('122.894854950942692161628715')) + @filter_deprecation def test_mirr(self): val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000] assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4) @@ -150,6 +187,7 @@ class TestFinancial(object): val = [39000, 30000, 21000, 37000, 46000] assert_(np.isnan(np.mirr(val, 0.10, 0.12))) + @filter_deprecation def test_mirr_decimal(self): val = [Decimal('-4500'), Decimal('-800'), Decimal('800'), Decimal('800'), Decimal('600'), Decimal('600'), Decimal('800'), Decimal('800'), @@ -168,6 +206,7 @@ class TestFinancial(object): val = [Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')] assert_(np.isnan(np.mirr(val, Decimal('0.10'), Decimal('0.12')))) + @filter_deprecation def test_when(self): # begin assert_equal(np.rate(10, 20, -3500, 10000, 1), @@ -232,6 +271,7 @@ class TestFinancial(object): assert_equal(np.nper(0.075, -2000, 0, 100000., 0), np.nper(0.075, -2000, 0, 100000., 'end')) + @filter_deprecation def test_decimal_with_when(self): """Test that decimals are still supported if the when argument is passed""" # begin @@ -306,6 +346,7 @@ class TestFinancial(object): np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), Decimal('0'), 'end').flat[0]) + @filter_deprecation def test_broadcast(self): assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]), [21.5449442, 20.76156441], 4) @@ -323,6 +364,7 @@ class TestFinancial(object): [-74.998201, -75.62318601, -75.62318601, -76.88882405, -76.88882405], 4) + @filter_deprecation def test_broadcast_decimal(self): # Use almost equal because precision is tested in the explicit tests, this test is to ensure # broadcast with Decimal is not broken. diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index eae52c002..1eae8ccfb 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -423,27 +423,17 @@ class TestSelect(object): assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0]) def test_deprecated_empty(self): - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") - assert_equal(select([], [], 3j), 3j) - - with warnings.catch_warnings(): - warnings.simplefilter("always") - assert_warns(DeprecationWarning, select, [], []) - warnings.simplefilter("error") - assert_raises(DeprecationWarning, select, [], []) + assert_raises(ValueError, select, [], [], 3j) + assert_raises(ValueError, select, [], []) def test_non_bool_deprecation(self): choices = self.choices conditions = self.conditions[:] - with warnings.catch_warnings(): - warnings.filterwarnings("always") - conditions[0] = conditions[0].astype(np.int_) - assert_warns(DeprecationWarning, select, conditions, choices) - conditions[0] = conditions[0].astype(np.uint8) - assert_warns(DeprecationWarning, select, conditions, choices) - warnings.filterwarnings("error") - assert_raises(DeprecationWarning, select, conditions, choices) + conditions[0] = conditions[0].astype(np.int_) + assert_raises(TypeError, select, conditions, choices) + conditions[0] = conditions[0].astype(np.uint8) + assert_raises(TypeError, select, conditions, choices) + assert_raises(TypeError, select, conditions, choices) def test_many_arguments(self): # This used to be limited by NPY_MAXARGS == 32 diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index 4895a722c..dbf189f3e 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -8,6 +8,7 @@ from numpy.testing import ( assert_array_almost_equal, assert_raises, assert_allclose, assert_array_max_ulp, assert_raises_regex, suppress_warnings, ) +import pytest class TestHistogram(object): @@ -591,6 +592,16 @@ class TestHistogramOptimBinNums(object): msg += " with datasize of {0}".format(testlen) assert_equal(len(a), numbins, err_msg=msg) + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_signed_integer_data(self, bins): + # Regression test for gh-14379. + a = np.array([-2, 0, 127], dtype=np.int8) + hist, edges = np.histogram(a, bins=bins) + hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins) + assert_array_equal(hist, hist32) + assert_array_equal(edges, edges32) + def test_simple_weighted(self): """ Check that weighted data raises a TypeError diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index a5cdda074..dbe445c2c 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -175,6 +175,24 @@ class TestRavelUnravelIndex(object): assert_raises_regex( ValueError, "out of bounds", np.unravel_index, [1], ()) + @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"]) + def test_empty_array_ravel(self, mode): + res = np.ravel_multi_index( + np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode) + assert(res.shape == (0,)) + + with assert_raises(ValueError): + np.ravel_multi_index( + np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode) + + def test_empty_array_unravel(self): + res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0)) + # res is a tuple of three empty arrays + assert(len(res) == 3) + assert(all(a.shape == (0,) for a in res)) + + with assert_raises(ValueError): + np.unravel_index([1], (2, 1, 0)) class TestGrid(object): def test_basic(self): diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 6ee17c830..1181fe986 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1871,7 +1871,7 @@ M 33 21.99 data = ["1, 1, 1, 1, -1.1"] * 50 mdata = TextIO("\n".join(data)) - converters = {4: lambda x: "(%s)" % x} + converters = {4: lambda x: "(%s)" % x.decode()} kwargs = dict(delimiter=",", converters=converters, dtype=[(_, int) for _ in 'abcde'],) assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index 0c839d486..fa5f4dec2 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -91,8 +91,10 @@ class TestRecFunctions(object): control = np.array([(1,), (4,)], dtype=[('a', int)]) assert_equal(test, control) + # dropping all fields results in an array with no fields test = drop_fields(a, ['a', 'b']) - assert_(test is None) + control = np.array([(), ()], dtype=[]) + assert_equal(test, control) def test_rename_fields(self): # Test rename fields @@ -378,8 +380,8 @@ class TestMergeArrays(object): z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array( - [(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) self.data = (w, x, y, z) def test_solo(self): @@ -450,8 +452,8 @@ class TestMergeArrays(object): test = merge_arrays((x, w), flatten=False) controldtype = [('f0', int), ('f1', [('a', int), - ('b', [('ba', float), ('bb', int)])])] - control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))], + ('b', [('ba', float), ('bb', int), ('bc', [])])])] + control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], dtype=controldtype) assert_equal(test, control) diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 8bcbd8e86..3c71d2a7c 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -788,13 +788,8 @@ def lookfor(what, module=None, import_modules=True, regenerate=False, if kind in ('module', 'object'): # don't show modules or objects continue - ok = True doc = docstring.lower() - for w in whats: - if w not in doc: - ok = False - break - if ok: + if all(w in doc for w in whats): found.append(name) # Relevance sort diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py index 4b696c883..55560815d 100644 --- a/numpy/linalg/__init__.py +++ b/numpy/linalg/__init__.py @@ -1,53 +1,77 @@ """ -Core Linear Algebra Tools -========================= - -=============== ========================================================== -Linear algebra basics -========================================================================== -norm Vector or matrix norm -inv Inverse of a square matrix -solve Solve a linear system of equations -det Determinant of a square matrix -slogdet Logarithm of the determinant of a square matrix -lstsq Solve linear least-squares problem -pinv Pseudo-inverse (Moore-Penrose) calculated using a singular - value decomposition -matrix_power Integer power of a square matrix -matrix_rank Calculate matrix rank using an SVD-based method -=============== ========================================================== - -=============== ========================================================== -Eigenvalues and decompositions -========================================================================== -eig Eigenvalues and vectors of a square matrix -eigh Eigenvalues and eigenvectors of a Hermitian matrix -eigvals Eigenvalues of a square matrix -eigvalsh Eigenvalues of a Hermitian matrix -qr QR decomposition of a matrix -svd Singular value decomposition of a matrix -cholesky Cholesky decomposition of a matrix -=============== ========================================================== - -=============== ========================================================== -Tensor operations -========================================================================== -tensorsolve Solve a linear tensor equation -tensorinv Calculate an inverse of a tensor -=============== ========================================================== - -=============== ========================================================== +``numpy.linalg`` +================ + +The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient +low level implementations of standard linear algebra algorithms. Those +libraries may be provided by NumPy itself using C versions of a subset of their +reference implementations but, when possible, highly optimized libraries that +take advantage of specialized processor functionality are preferred. Examples +of such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries +are multithreaded and processor dependent, environmental variables and external +packages such as threadpoolctl may be needed to control the number of threads +or specify the processor architecture. + +- OpenBLAS: https://www.openblas.net/ +- threadpoolctl: https://github.com/joblib/threadpoolctl + +Please note that the most-used linear algebra functions in NumPy are present in +the main ``numpy`` namespace rather than in ``numpy.linalg``. There are: +``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``, +``einsum_path`` and ``kron``. + +Functions present in numpy.linalg are listed below. + + +Matrix and vector products +-------------------------- + + multi_dot + matrix_power + +Decompositions +-------------- + + cholesky + qr + svd + +Matrix eigenvalues +------------------ + + eig + eigh + eigvals + eigvalsh + +Norms and other numbers +----------------------- + + norm + cond + det + matrix_rank + slogdet + +Solving equations and inverting matrices +---------------------------------------- + + solve + tensorsolve + lstsq + inv + pinv + tensorinv + Exceptions -========================================================================== -LinAlgError Indicates a failed linear algebra operation -=============== ========================================================== +---------- + + LinAlgError """ from __future__ import division, absolute_import, print_function # To get sub-modules -from .info import __doc__ - from .linalg import * from numpy._pytesttester import PytestTester diff --git a/numpy/linalg/info.py b/numpy/linalg/info.py deleted file mode 100644 index 646ecda04..000000000 --- a/numpy/linalg/info.py +++ /dev/null @@ -1,37 +0,0 @@ -"""\ -Core Linear Algebra Tools -------------------------- -Linear algebra basics: - -- norm Vector or matrix norm -- inv Inverse of a square matrix -- solve Solve a linear system of equations -- det Determinant of a square matrix -- lstsq Solve linear least-squares problem -- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular - value decomposition -- matrix_power Integer power of a square matrix - -Eigenvalues and decompositions: - -- eig Eigenvalues and vectors of a square matrix -- eigh Eigenvalues and eigenvectors of a Hermitian matrix -- eigvals Eigenvalues of a square matrix -- eigvalsh Eigenvalues of a Hermitian matrix -- qr QR decomposition of a matrix -- svd Singular value decomposition of a matrix -- cholesky Cholesky decomposition of a matrix - -Tensor operations: - -- tensorsolve Solve a linear tensor equation -- tensorinv Calculate an inverse of a tensor - -Exceptions: - -- LinAlgError Indicates a failed linear algebra operation - -""" -from __future__ import division, absolute_import, print_function - -depends = ['core'] diff --git a/numpy/ma/core.py b/numpy/ma/core.py index bb3788c9a..bb0d8d412 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -4394,7 +4394,7 @@ class MaskedArray(ndarray): ---------- axis : None or int or tuple of ints, optional Axis or axes along which the count is performed. - The default (`axis` = `None`) performs the count over all + The default, None, performs the count over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. @@ -4774,7 +4774,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.all : corresponding function for ndarrays + numpy.ndarray.all : corresponding function for ndarrays numpy.all : equivalent function Examples @@ -4812,7 +4812,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.any : corresponding function for ndarrays + numpy.ndarray.any : corresponding function for ndarrays numpy.any : equivalent function """ @@ -4866,7 +4866,7 @@ class MaskedArray(ndarray): flatnonzero : Return indices that are non-zero in the flattened version of the input array. - ndarray.nonzero : + numpy.ndarray.nonzero : Equivalent ndarray method. count_nonzero : Counts the number of non-zero elements in the input array. @@ -4994,7 +4994,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.sum : corresponding function for ndarrays + numpy.ndarray.sum : corresponding function for ndarrays numpy.sum : equivalent function Examples @@ -5065,7 +5065,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.cumsum : corresponding function for ndarrays + numpy.ndarray.cumsum : corresponding function for ndarrays numpy.cumsum : equivalent function Examples @@ -5102,7 +5102,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.prod : corresponding function for ndarrays + numpy.ndarray.prod : corresponding function for ndarrays numpy.prod : equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} @@ -5148,7 +5148,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.cumprod : corresponding function for ndarrays + numpy.ndarray.cumprod : corresponding function for ndarrays numpy.cumprod : equivalent function """ result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) @@ -5171,7 +5171,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.mean : corresponding function for ndarrays + numpy.ndarray.mean : corresponding function for ndarrays numpy.mean : Equivalent function numpy.ma.average: Weighted average. @@ -5260,7 +5260,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.var : corresponding function for ndarrays + numpy.ndarray.var : corresponding function for ndarrays numpy.var : Equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} @@ -5323,7 +5323,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.std : corresponding function for ndarrays + numpy.ndarray.std : corresponding function for ndarrays numpy.std : Equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} @@ -5344,7 +5344,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.around : corresponding function for ndarrays + numpy.ndarray.around : corresponding function for ndarrays numpy.around : equivalent function """ result = self._data.round(decimals=decimals, out=out).view(type(self)) @@ -5406,7 +5406,7 @@ class MaskedArray(ndarray): -------- MaskedArray.sort : Describes sorting algorithms used. lexsort : Indirect stable sort with multiple keys. - ndarray.sort : Inplace sort. + numpy.ndarray.sort : Inplace sort. Notes ----- @@ -5558,7 +5558,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.sort : Method to sort an array in-place. + numpy.ndarray.sort : Method to sort an array in-place. argsort : Indirect sort. lexsort : Indirect stable sort on multiple keys. searchsorted : Find elements in a sorted array. @@ -5978,7 +5978,7 @@ class MaskedArray(ndarray): See Also -------- - ndarray.tobytes + numpy.ndarray.tobytes tolist, tofile Notes diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 639b3dd1f..4a83ac781 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -542,15 +542,18 @@ def average(a, axis=None, weights=None, returned=False): Data to be averaged. Masked entries are not taken into account in the computation. axis : int, optional - Axis along which to average `a`. If `None`, averaging is done over + Axis along which to average `a`. If None, averaging is done over the flattened array. weights : array_like, optional The importance that each element has in the computation of the average. The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If ``weights=None``, then all data in `a` are assumed to have a - weight equal to one. If `weights` is complex, the imaginary parts - are ignored. + weight equal to one. The 1-D calculation is:: + + avg = sum(a * weights) / sum(weights) + + The only constraint on `weights` is that `sum(weights)` must not be 0. returned : bool, optional Flag indicating whether a tuple ``(result, sum of weights)`` should be returned as output (True), or just the result (False). diff --git a/numpy/ma/version.py b/numpy/ma/version.py deleted file mode 100644 index a2c5c42a8..000000000 --- a/numpy/ma/version.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Version number - -""" -from __future__ import division, absolute_import, print_function - -version = '1.00' -release = False - -if not release: - from . import core - from . import extras - revision = [core.__revision__.split(':')[-1][:-1].strip(), - extras.__revision__.split(':')[-1][:-1].strip(),] - version += '.dev%04i' % max([int(rev) for rev in revision]) diff --git a/numpy/matlib.py b/numpy/matlib.py index 9e115943a..b1b155586 100644 --- a/numpy/matlib.py +++ b/numpy/matlib.py @@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function import numpy as np from numpy.matrixlib.defmatrix import matrix, asmatrix -# need * as we're copying the numpy namespace +# need * as we're copying the numpy namespace (FIXME: this makes little sense) from numpy import * __version__ = np.__version__ @@ -239,7 +239,7 @@ def rand(*args): See Also -------- - randn, numpy.random.rand + randn, numpy.random.RandomState.rand Examples -------- @@ -285,7 +285,7 @@ def randn(*args): See Also -------- - rand, random.randn + rand, numpy.random.RandomState.randn Notes ----- diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 3c7e8ffc2..cabd41367 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -1046,7 +1046,7 @@ def bmat(obj, ldict=None, gdict=None): referenced by name. ldict : dict, optional A dictionary that replaces local operands in current frame. - Ignored if `obj` is not a string or `gdict` is `None`. + Ignored if `obj` is not a string or `gdict` is None. gdict : dict, optional A dictionary that replaces global operands in current frame. Ignored if `obj` is not a string. diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py index e7eecc5cd..1ceb5c4dd 100644 --- a/numpy/random/__init__.py +++ b/numpy/random/__init__.py @@ -179,21 +179,19 @@ __all__ = [ # add these for module-freeze analysis (like PyInstaller) from . import _pickle -from . import common -from . import bounded_integers -from . import entropy - +from . import _common +from . import _bounded_integers + +from ._generator import Generator, default_rng +from ._bit_generator import SeedSequence, BitGenerator +from ._mt19937 import MT19937 +from ._pcg64 import PCG64 +from ._philox import Philox +from ._sfc64 import SFC64 from .mtrand import * -from .generator import Generator, default_rng -from .bit_generator import SeedSequence -from .mt19937 import MT19937 -from .pcg64 import PCG64 -from .philox import Philox -from .sfc64 import SFC64 -from .mtrand import RandomState __all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', - 'Philox', 'PCG64', 'SFC64', 'default_rng'] + 'Philox', 'PCG64', 'SFC64', 'default_rng', 'BitGenerator'] def __RandomState_ctor(): diff --git a/numpy/random/bit_generator.pxd b/numpy/random/_bit_generator.pxd index 984033f17..30fa4a27d 100644 --- a/numpy/random/bit_generator.pxd +++ b/numpy/random/_bit_generator.pxd @@ -1,6 +1,15 @@ - -from .common cimport bitgen_t, uint32_t cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "include/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t cdef class BitGenerator(): cdef readonly object _seed_seq diff --git a/numpy/random/bit_generator.pyx b/numpy/random/_bit_generator.pyx index eb608af6c..21d21e6bb 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/_bit_generator.pyx @@ -53,9 +53,7 @@ from cpython.pycapsule cimport PyCapsule_New import numpy as np cimport numpy as np -from libc.stdint cimport uint32_t -from .common cimport (random_raw, benchmark, prepare_ctypes, prepare_cffi) -from .distributions cimport bitgen_t +from ._common cimport (random_raw, benchmark, prepare_ctypes, prepare_cffi) __all__ = ['SeedSequence', 'BitGenerator'] @@ -116,7 +114,7 @@ def _coerce_to_uint32_array(x): Examples -------- >>> import numpy as np - >>> from numpy.random.bit_generator import _coerce_to_uint32_array + >>> from numpy.random._bit_generator import _coerce_to_uint32_array >>> _coerce_to_uint32_array(12345) array([12345], dtype=uint32) >>> _coerce_to_uint32_array('12345') @@ -484,13 +482,12 @@ cdef class BitGenerator(): Parameters ---------- - seed : {None, int, array_like[ints], ISeedSequence}, optional + seed : {None, int, array_like[ints], SeedSequence}, optional A seed to initialize the `BitGenerator`. If None, then fresh, unpredictable entropy will be pulled from the OS. If an ``int`` or ``array_like[ints]`` is passed, then it will be passed to - `SeedSequence` to derive the initial `BitGenerator` state. One may also - pass in an implementor of the `ISeedSequence` interface like - `SeedSequence`. + ~`numpy.random.SeedSequence` to derive the initial `BitGenerator` state. + One may also pass in a `SeedSequence` instance. Attributes ---------- diff --git a/numpy/random/_bounded_integers.pxd b/numpy/random/_bounded_integers.pxd new file mode 100644 index 000000000..d3ee97a70 --- /dev/null +++ b/numpy/random/_bounded_integers.pxd @@ -0,0 +1,29 @@ +from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, + int8_t, int16_t, int32_t, int64_t, intptr_t) +import numpy as np +cimport numpy as np +ctypedef np.npy_bool bool_t + +from ._bit_generator cimport bitgen_t + +cdef inline uint64_t _gen_mask(uint64_t max_val) nogil: + """Mask generator for use in bounded random numbers""" + # Smallest bit mask >= max + cdef uint64_t mask = max_val + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + mask |= mask >> 32 + return mask + +cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) diff --git a/numpy/random/bounded_integers.pxd.in b/numpy/random/_bounded_integers.pxd.in index 7a3f224dc..320d35774 100644 --- a/numpy/random/bounded_integers.pxd.in +++ b/numpy/random/_bounded_integers.pxd.in @@ -4,7 +4,7 @@ import numpy as np cimport numpy as np ctypedef np.npy_bool bool_t -from .common cimport bitgen_t +from ._bit_generator cimport bitgen_t cdef inline uint64_t _gen_mask(uint64_t max_val) nogil: """Mask generator for use in bounded random numbers""" diff --git a/numpy/random/_bounded_integers.pyx b/numpy/random/_bounded_integers.pyx new file mode 100644 index 000000000..d6a534b43 --- /dev/null +++ b/numpy/random/_bounded_integers.pyx @@ -0,0 +1,1564 @@ +#!python +#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True + +import numpy as np +cimport numpy as np + +__all__ = [] + +np.import_array() + +cdef extern from "include/distributions.h": + # Generate random numbers in closed interval [off, off + rng]. + uint64_t random_bounded_uint64(bitgen_t *bitgen_state, + uint64_t off, uint64_t rng, + uint64_t mask, bint use_masked) nogil + uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state, + uint32_t off, uint32_t rng, + uint32_t mask, bint use_masked, + int *bcnt, uint32_t *buf) nogil + uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state, + uint16_t off, uint16_t rng, + uint16_t mask, bint use_masked, + int *bcnt, uint32_t *buf) nogil + uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, + uint8_t off, uint8_t rng, + uint8_t mask, bint use_masked, + int *bcnt, uint32_t *buf) nogil + np.npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, + np.npy_bool off, np.npy_bool rng, + np.npy_bool mask, bint use_masked, + int *bcnt, uint32_t *buf) nogil + void random_bounded_uint64_fill(bitgen_t *bitgen_state, + uint64_t off, uint64_t rng, np.npy_intp cnt, + bint use_masked, + uint64_t *out) nogil + void random_bounded_uint32_fill(bitgen_t *bitgen_state, + uint32_t off, uint32_t rng, np.npy_intp cnt, + bint use_masked, + uint32_t *out) nogil + void random_bounded_uint16_fill(bitgen_t *bitgen_state, + uint16_t off, uint16_t rng, np.npy_intp cnt, + bint use_masked, + uint16_t *out) nogil + void random_bounded_uint8_fill(bitgen_t *bitgen_state, + uint8_t off, uint8_t rng, np.npy_intp cnt, + bint use_masked, + uint8_t *out) nogil + void random_bounded_bool_fill(bitgen_t *bitgen_state, + np.npy_bool off, np.npy_bool rng, np.npy_intp cnt, + bint use_masked, + np.npy_bool *out) nogil + + + +_integers_types = {'bool': (0, 2), + 'int8': (-2**7, 2**7), + 'int16': (-2**15, 2**15), + 'int32': (-2**31, 2**31), + 'int64': (-2**63, 2**63), + 'uint8': (0, 2**8), + 'uint16': (0, 2**16), + 'uint32': (0, 2**32), + 'uint64': (0, 2**64)} + + +cdef object _rand_uint32_broadcast(np.ndarray low, np.ndarray high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + Array path for smaller integer types + + This path is simpler since the high value in the open interval [low, high) + must be in-range for the next larger type, uint64. Here we case to + this type for checking and the recast to uint32 when producing the + random integers. + """ + cdef uint32_t rng, last_rng, off, val, mask, out_val, is_open + cdef uint32_t buf + cdef uint32_t *out_data + cdef uint64_t low_v, high_v + cdef np.ndarray low_arr, high_arr, out_arr + cdef np.npy_intp i, cnt + cdef np.broadcast it + cdef int buf_rem = 0 + + # Array path + is_open = not closed + low_arr = <np.ndarray>low + high_arr = <np.ndarray>high + if np.any(np.less(low_arr, 0)): + raise ValueError('low is out of bounds for uint32') + if closed: + high_comp = np.greater_equal + low_high_comp = np.greater + else: + high_comp = np.greater + low_high_comp = np.greater_equal + + if np.any(high_comp(high_arr, 0X100000000ULL)): + raise ValueError('high is out of bounds for uint32') + if np.any(low_high_comp(low_arr, high_arr)): + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_UINT64, np.NPY_ALIGNED | np.NPY_FORCECAST) + high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_UINT64, np.NPY_ALIGNED | np.NPY_FORCECAST) + + if size is not None: + out_arr = <np.ndarray>np.empty(size, np.uint32) + else: + it = np.PyArray_MultiIterNew2(low_arr, high_arr) + out_arr = <np.ndarray>np.empty(it.shape, np.uint32) + + it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr) + out_data = <uint32_t *>np.PyArray_DATA(out_arr) + cnt = np.PyArray_SIZE(out_arr) + mask = last_rng = 0 + with lock, nogil: + for i in range(cnt): + low_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 0))[0] + high_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 1))[0] + # Subtract 1 since generator produces values on the closed int [off, off+rng] + rng = <uint32_t>((high_v - is_open) - low_v) + off = <uint32_t>(<uint64_t>low_v) + + if rng != last_rng: + # Smallest bit mask >= max + mask = <uint32_t>_gen_mask(rng) + + out_data[i] = random_buffered_bounded_uint32(state, off, rng, mask, use_masked, &buf_rem, &buf) + + np.PyArray_MultiIter_NEXT(it) + return out_arr + +cdef object _rand_uint16_broadcast(np.ndarray low, np.ndarray high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + Array path for smaller integer types + + This path is simpler since the high value in the open interval [low, high) + must be in-range for the next larger type, uint32. Here we case to + this type for checking and the recast to uint16 when producing the + random integers. + """ + cdef uint16_t rng, last_rng, off, val, mask, out_val, is_open + cdef uint32_t buf + cdef uint16_t *out_data + cdef uint32_t low_v, high_v + cdef np.ndarray low_arr, high_arr, out_arr + cdef np.npy_intp i, cnt + cdef np.broadcast it + cdef int buf_rem = 0 + + # Array path + is_open = not closed + low_arr = <np.ndarray>low + high_arr = <np.ndarray>high + if np.any(np.less(low_arr, 0)): + raise ValueError('low is out of bounds for uint16') + if closed: + high_comp = np.greater_equal + low_high_comp = np.greater + else: + high_comp = np.greater + low_high_comp = np.greater_equal + + if np.any(high_comp(high_arr, 0X10000UL)): + raise ValueError('high is out of bounds for uint16') + if np.any(low_high_comp(low_arr, high_arr)): + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_UINT32, np.NPY_ALIGNED | np.NPY_FORCECAST) + high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_UINT32, np.NPY_ALIGNED | np.NPY_FORCECAST) + + if size is not None: + out_arr = <np.ndarray>np.empty(size, np.uint16) + else: + it = np.PyArray_MultiIterNew2(low_arr, high_arr) + out_arr = <np.ndarray>np.empty(it.shape, np.uint16) + + it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr) + out_data = <uint16_t *>np.PyArray_DATA(out_arr) + cnt = np.PyArray_SIZE(out_arr) + mask = last_rng = 0 + with lock, nogil: + for i in range(cnt): + low_v = (<uint32_t*>np.PyArray_MultiIter_DATA(it, 0))[0] + high_v = (<uint32_t*>np.PyArray_MultiIter_DATA(it, 1))[0] + # Subtract 1 since generator produces values on the closed int [off, off+rng] + rng = <uint16_t>((high_v - is_open) - low_v) + off = <uint16_t>(<uint32_t>low_v) + + if rng != last_rng: + # Smallest bit mask >= max + mask = <uint16_t>_gen_mask(rng) + + out_data[i] = random_buffered_bounded_uint16(state, off, rng, mask, use_masked, &buf_rem, &buf) + + np.PyArray_MultiIter_NEXT(it) + return out_arr + +cdef object _rand_uint8_broadcast(np.ndarray low, np.ndarray high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + Array path for smaller integer types + + This path is simpler since the high value in the open interval [low, high) + must be in-range for the next larger type, uint16. Here we case to + this type for checking and the recast to uint8 when producing the + random integers. + """ + cdef uint8_t rng, last_rng, off, val, mask, out_val, is_open + cdef uint32_t buf + cdef uint8_t *out_data + cdef uint16_t low_v, high_v + cdef np.ndarray low_arr, high_arr, out_arr + cdef np.npy_intp i, cnt + cdef np.broadcast it + cdef int buf_rem = 0 + + # Array path + is_open = not closed + low_arr = <np.ndarray>low + high_arr = <np.ndarray>high + if np.any(np.less(low_arr, 0)): + raise ValueError('low is out of bounds for uint8') + if closed: + high_comp = np.greater_equal + low_high_comp = np.greater + else: + high_comp = np.greater + low_high_comp = np.greater_equal + + if np.any(high_comp(high_arr, 0X100UL)): + raise ValueError('high is out of bounds for uint8') + if np.any(low_high_comp(low_arr, high_arr)): + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_UINT16, np.NPY_ALIGNED | np.NPY_FORCECAST) + high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_UINT16, np.NPY_ALIGNED | np.NPY_FORCECAST) + + if size is not None: + out_arr = <np.ndarray>np.empty(size, np.uint8) + else: + it = np.PyArray_MultiIterNew2(low_arr, high_arr) + out_arr = <np.ndarray>np.empty(it.shape, np.uint8) + + it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr) + out_data = <uint8_t *>np.PyArray_DATA(out_arr) + cnt = np.PyArray_SIZE(out_arr) + mask = last_rng = 0 + with lock, nogil: + for i in range(cnt): + low_v = (<uint16_t*>np.PyArray_MultiIter_DATA(it, 0))[0] + high_v = (<uint16_t*>np.PyArray_MultiIter_DATA(it, 1))[0] + # Subtract 1 since generator produces values on the closed int [off, off+rng] + rng = <uint8_t>((high_v - is_open) - low_v) + off = <uint8_t>(<uint16_t>low_v) + + if rng != last_rng: + # Smallest bit mask >= max + mask = <uint8_t>_gen_mask(rng) + + out_data[i] = random_buffered_bounded_uint8(state, off, rng, mask, use_masked, &buf_rem, &buf) + + np.PyArray_MultiIter_NEXT(it) + return out_arr + +cdef object _rand_bool_broadcast(np.ndarray low, np.ndarray high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + Array path for smaller integer types + + This path is simpler since the high value in the open interval [low, high) + must be in-range for the next larger type, uint8. Here we case to + this type for checking and the recast to bool when producing the + random integers. + """ + cdef bool_t rng, last_rng, off, val, mask, out_val, is_open + cdef uint32_t buf + cdef bool_t *out_data + cdef uint8_t low_v, high_v + cdef np.ndarray low_arr, high_arr, out_arr + cdef np.npy_intp i, cnt + cdef np.broadcast it + cdef int buf_rem = 0 + + # Array path + is_open = not closed + low_arr = <np.ndarray>low + high_arr = <np.ndarray>high + if np.any(np.less(low_arr, 0)): + raise ValueError('low is out of bounds for bool') + if closed: + high_comp = np.greater_equal + low_high_comp = np.greater + else: + high_comp = np.greater + low_high_comp = np.greater_equal + + if np.any(high_comp(high_arr, 0x2UL)): + raise ValueError('high is out of bounds for bool') + if np.any(low_high_comp(low_arr, high_arr)): + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_UINT8, np.NPY_ALIGNED | np.NPY_FORCECAST) + high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_UINT8, np.NPY_ALIGNED | np.NPY_FORCECAST) + + if size is not None: + out_arr = <np.ndarray>np.empty(size, np.bool_) + else: + it = np.PyArray_MultiIterNew2(low_arr, high_arr) + out_arr = <np.ndarray>np.empty(it.shape, np.bool_) + + it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr) + out_data = <bool_t *>np.PyArray_DATA(out_arr) + cnt = np.PyArray_SIZE(out_arr) + mask = last_rng = 0 + with lock, nogil: + for i in range(cnt): + low_v = (<uint8_t*>np.PyArray_MultiIter_DATA(it, 0))[0] + high_v = (<uint8_t*>np.PyArray_MultiIter_DATA(it, 1))[0] + # Subtract 1 since generator produces values on the closed int [off, off+rng] + rng = <bool_t>((high_v - is_open) - low_v) + off = <bool_t>(<uint8_t>low_v) + + if rng != last_rng: + # Smallest bit mask >= max + mask = <bool_t>_gen_mask(rng) + + out_data[i] = random_buffered_bounded_bool(state, off, rng, mask, use_masked, &buf_rem, &buf) + + np.PyArray_MultiIter_NEXT(it) + return out_arr + +cdef object _rand_int32_broadcast(np.ndarray low, np.ndarray high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + Array path for smaller integer types + + This path is simpler since the high value in the open interval [low, high) + must be in-range for the next larger type, uint64. Here we case to + this type for checking and the recast to int32 when producing the + random integers. + """ + cdef uint32_t rng, last_rng, off, val, mask, out_val, is_open + cdef uint32_t buf + cdef uint32_t *out_data + cdef uint64_t low_v, high_v + cdef np.ndarray low_arr, high_arr, out_arr + cdef np.npy_intp i, cnt + cdef np.broadcast it + cdef int buf_rem = 0 + + # Array path + is_open = not closed + low_arr = <np.ndarray>low + high_arr = <np.ndarray>high + if np.any(np.less(low_arr, -0x80000000LL)): + raise ValueError('low is out of bounds for int32') + if closed: + high_comp = np.greater_equal + low_high_comp = np.greater + else: + high_comp = np.greater + low_high_comp = np.greater_equal + + if np.any(high_comp(high_arr, 0x80000000LL)): + raise ValueError('high is out of bounds for int32') + if np.any(low_high_comp(low_arr, high_arr)): + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_INT64, np.NPY_ALIGNED | np.NPY_FORCECAST) + high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_INT64, np.NPY_ALIGNED | np.NPY_FORCECAST) + + if size is not None: + out_arr = <np.ndarray>np.empty(size, np.int32) + else: + it = np.PyArray_MultiIterNew2(low_arr, high_arr) + out_arr = <np.ndarray>np.empty(it.shape, np.int32) + + it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr) + out_data = <uint32_t *>np.PyArray_DATA(out_arr) + cnt = np.PyArray_SIZE(out_arr) + mask = last_rng = 0 + with lock, nogil: + for i in range(cnt): + low_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 0))[0] + high_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 1))[0] + # Subtract 1 since generator produces values on the closed int [off, off+rng] + rng = <uint32_t>((high_v - is_open) - low_v) + off = <uint32_t>(<uint64_t>low_v) + + if rng != last_rng: + # Smallest bit mask >= max + mask = <uint32_t>_gen_mask(rng) + + out_data[i] = random_buffered_bounded_uint32(state, off, rng, mask, use_masked, &buf_rem, &buf) + + np.PyArray_MultiIter_NEXT(it) + return out_arr + +cdef object _rand_int16_broadcast(np.ndarray low, np.ndarray high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + Array path for smaller integer types + + This path is simpler since the high value in the open interval [low, high) + must be in-range for the next larger type, uint32. Here we case to + this type for checking and the recast to int16 when producing the + random integers. + """ + cdef uint16_t rng, last_rng, off, val, mask, out_val, is_open + cdef uint32_t buf + cdef uint16_t *out_data + cdef uint32_t low_v, high_v + cdef np.ndarray low_arr, high_arr, out_arr + cdef np.npy_intp i, cnt + cdef np.broadcast it + cdef int buf_rem = 0 + + # Array path + is_open = not closed + low_arr = <np.ndarray>low + high_arr = <np.ndarray>high + if np.any(np.less(low_arr, -0x8000LL)): + raise ValueError('low is out of bounds for int16') + if closed: + high_comp = np.greater_equal + low_high_comp = np.greater + else: + high_comp = np.greater + low_high_comp = np.greater_equal + + if np.any(high_comp(high_arr, 0x8000LL)): + raise ValueError('high is out of bounds for int16') + if np.any(low_high_comp(low_arr, high_arr)): + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_INT32, np.NPY_ALIGNED | np.NPY_FORCECAST) + high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_INT32, np.NPY_ALIGNED | np.NPY_FORCECAST) + + if size is not None: + out_arr = <np.ndarray>np.empty(size, np.int16) + else: + it = np.PyArray_MultiIterNew2(low_arr, high_arr) + out_arr = <np.ndarray>np.empty(it.shape, np.int16) + + it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr) + out_data = <uint16_t *>np.PyArray_DATA(out_arr) + cnt = np.PyArray_SIZE(out_arr) + mask = last_rng = 0 + with lock, nogil: + for i in range(cnt): + low_v = (<uint32_t*>np.PyArray_MultiIter_DATA(it, 0))[0] + high_v = (<uint32_t*>np.PyArray_MultiIter_DATA(it, 1))[0] + # Subtract 1 since generator produces values on the closed int [off, off+rng] + rng = <uint16_t>((high_v - is_open) - low_v) + off = <uint16_t>(<uint32_t>low_v) + + if rng != last_rng: + # Smallest bit mask >= max + mask = <uint16_t>_gen_mask(rng) + + out_data[i] = random_buffered_bounded_uint16(state, off, rng, mask, use_masked, &buf_rem, &buf) + + np.PyArray_MultiIter_NEXT(it) + return out_arr + +cdef object _rand_int8_broadcast(np.ndarray low, np.ndarray high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + Array path for smaller integer types + + This path is simpler since the high value in the open interval [low, high) + must be in-range for the next larger type, uint16. Here we case to + this type for checking and the recast to int8 when producing the + random integers. + """ + cdef uint8_t rng, last_rng, off, val, mask, out_val, is_open + cdef uint32_t buf + cdef uint8_t *out_data + cdef uint16_t low_v, high_v + cdef np.ndarray low_arr, high_arr, out_arr + cdef np.npy_intp i, cnt + cdef np.broadcast it + cdef int buf_rem = 0 + + # Array path + is_open = not closed + low_arr = <np.ndarray>low + high_arr = <np.ndarray>high + if np.any(np.less(low_arr, -0x80LL)): + raise ValueError('low is out of bounds for int8') + if closed: + high_comp = np.greater_equal + low_high_comp = np.greater + else: + high_comp = np.greater + low_high_comp = np.greater_equal + + if np.any(high_comp(high_arr, 0x80LL)): + raise ValueError('high is out of bounds for int8') + if np.any(low_high_comp(low_arr, high_arr)): + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_INT16, np.NPY_ALIGNED | np.NPY_FORCECAST) + high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_INT16, np.NPY_ALIGNED | np.NPY_FORCECAST) + + if size is not None: + out_arr = <np.ndarray>np.empty(size, np.int8) + else: + it = np.PyArray_MultiIterNew2(low_arr, high_arr) + out_arr = <np.ndarray>np.empty(it.shape, np.int8) + + it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr) + out_data = <uint8_t *>np.PyArray_DATA(out_arr) + cnt = np.PyArray_SIZE(out_arr) + mask = last_rng = 0 + with lock, nogil: + for i in range(cnt): + low_v = (<uint16_t*>np.PyArray_MultiIter_DATA(it, 0))[0] + high_v = (<uint16_t*>np.PyArray_MultiIter_DATA(it, 1))[0] + # Subtract 1 since generator produces values on the closed int [off, off+rng] + rng = <uint8_t>((high_v - is_open) - low_v) + off = <uint8_t>(<uint16_t>low_v) + + if rng != last_rng: + # Smallest bit mask >= max + mask = <uint8_t>_gen_mask(rng) + + out_data[i] = random_buffered_bounded_uint8(state, off, rng, mask, use_masked, &buf_rem, &buf) + + np.PyArray_MultiIter_NEXT(it) + return out_arr + + +cdef object _rand_uint64_broadcast(object low, object high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + Array path for 64-bit integer types + + Requires special treatment since the high value can be out-of-range for + the largest (64 bit) integer type since the generator is specified on the + interval [low,high). + + The internal generator does not have this issue since it generates from + the closes interval [low, high-1] and high-1 is always in range for the + 64 bit integer type. + """ + + cdef np.ndarray low_arr, high_arr, out_arr, highm1_arr + cdef np.npy_intp i, cnt, n + cdef np.broadcast it + cdef object closed_upper + cdef uint64_t *out_data + cdef uint64_t *highm1_data + cdef uint64_t low_v, high_v + cdef uint64_t rng, last_rng, val, mask, off, out_val + + low_arr = <np.ndarray>low + high_arr = <np.ndarray>high + + if np.any(np.less(low_arr, 0x0ULL)): + raise ValueError('low is out of bounds for uint64') + dt = high_arr.dtype + if closed or np.issubdtype(dt, np.integer): + # Avoid object dtype path if already an integer + high_lower_comp = np.less if closed else np.less_equal + if np.any(high_lower_comp(high_arr, 0x0ULL)): + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + high_m1 = high_arr if closed else high_arr - dt.type(1) + if np.any(np.greater(high_m1, 0xFFFFFFFFFFFFFFFFULL)): + raise ValueError('high is out of bounds for uint64') + highm1_arr = <np.ndarray>np.PyArray_FROM_OTF(high_m1, np.NPY_UINT64, np.NPY_ALIGNED | np.NPY_FORCECAST) + else: + # If input is object or a floating type + highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.uint64) + highm1_data = <uint64_t *>np.PyArray_DATA(highm1_arr) + cnt = np.PyArray_SIZE(high_arr) + flat = high_arr.flat + for i in range(cnt): + # Subtract 1 since generator produces values on the closed int [off, off+rng] + closed_upper = int(flat[i]) - 1 + if closed_upper > 0xFFFFFFFFFFFFFFFFULL: + raise ValueError('high is out of bounds for uint64') + if closed_upper < 0x0ULL: + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + highm1_data[i] = <uint64_t>closed_upper + + if np.any(np.greater(low_arr, highm1_arr)): + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + high_arr = highm1_arr + low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_UINT64, np.NPY_ALIGNED | np.NPY_FORCECAST) + + if size is not None: + out_arr = <np.ndarray>np.empty(size, np.uint64) + else: + it = np.PyArray_MultiIterNew2(low_arr, high_arr) + out_arr = <np.ndarray>np.empty(it.shape, np.uint64) + + it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr) + out_data = <uint64_t *>np.PyArray_DATA(out_arr) + n = np.PyArray_SIZE(out_arr) + mask = last_rng = 0 + with lock, nogil: + for i in range(n): + low_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 0))[0] + high_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 1))[0] + # Generator produces values on the closed int [off, off+rng], -1 subtracted above + rng = <uint64_t>(high_v - low_v) + off = <uint64_t>(<uint64_t>low_v) + + if rng != last_rng: + mask = _gen_mask(rng) + out_data[i] = random_bounded_uint64(state, off, rng, mask, use_masked) + + np.PyArray_MultiIter_NEXT(it) + + return out_arr + +cdef object _rand_int64_broadcast(object low, object high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + Array path for 64-bit integer types + + Requires special treatment since the high value can be out-of-range for + the largest (64 bit) integer type since the generator is specified on the + interval [low,high). + + The internal generator does not have this issue since it generates from + the closes interval [low, high-1] and high-1 is always in range for the + 64 bit integer type. + """ + + cdef np.ndarray low_arr, high_arr, out_arr, highm1_arr + cdef np.npy_intp i, cnt, n + cdef np.broadcast it + cdef object closed_upper + cdef uint64_t *out_data + cdef int64_t *highm1_data + cdef int64_t low_v, high_v + cdef uint64_t rng, last_rng, val, mask, off, out_val + + low_arr = <np.ndarray>low + high_arr = <np.ndarray>high + + if np.any(np.less(low_arr, -0x8000000000000000LL)): + raise ValueError('low is out of bounds for int64') + dt = high_arr.dtype + if closed or np.issubdtype(dt, np.integer): + # Avoid object dtype path if already an integer + high_lower_comp = np.less if closed else np.less_equal + if np.any(high_lower_comp(high_arr, -0x8000000000000000LL)): + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + high_m1 = high_arr if closed else high_arr - dt.type(1) + if np.any(np.greater(high_m1, 0x7FFFFFFFFFFFFFFFLL)): + raise ValueError('high is out of bounds for int64') + highm1_arr = <np.ndarray>np.PyArray_FROM_OTF(high_m1, np.NPY_INT64, np.NPY_ALIGNED | np.NPY_FORCECAST) + else: + # If input is object or a floating type + highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.int64) + highm1_data = <int64_t *>np.PyArray_DATA(highm1_arr) + cnt = np.PyArray_SIZE(high_arr) + flat = high_arr.flat + for i in range(cnt): + # Subtract 1 since generator produces values on the closed int [off, off+rng] + closed_upper = int(flat[i]) - 1 + if closed_upper > 0x7FFFFFFFFFFFFFFFLL: + raise ValueError('high is out of bounds for int64') + if closed_upper < -0x8000000000000000LL: + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + highm1_data[i] = <int64_t>closed_upper + + if np.any(np.greater(low_arr, highm1_arr)): + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + high_arr = highm1_arr + low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_INT64, np.NPY_ALIGNED | np.NPY_FORCECAST) + + if size is not None: + out_arr = <np.ndarray>np.empty(size, np.int64) + else: + it = np.PyArray_MultiIterNew2(low_arr, high_arr) + out_arr = <np.ndarray>np.empty(it.shape, np.int64) + + it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr) + out_data = <uint64_t *>np.PyArray_DATA(out_arr) + n = np.PyArray_SIZE(out_arr) + mask = last_rng = 0 + with lock, nogil: + for i in range(n): + low_v = (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0] + high_v = (<int64_t*>np.PyArray_MultiIter_DATA(it, 1))[0] + # Generator produces values on the closed int [off, off+rng], -1 subtracted above + rng = <uint64_t>(high_v - low_v) + off = <uint64_t>(<int64_t>low_v) + + if rng != last_rng: + mask = _gen_mask(rng) + out_data[i] = random_bounded_uint64(state, off, rng, mask, use_masked) + + np.PyArray_MultiIter_NEXT(it) + + return out_arr + + +cdef object _rand_uint64(object low, object high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + _rand_uint64(low, high, size, use_masked, *state, lock) + + Return random np.uint64 integers from `low` (inclusive) to `high` (exclusive). + + Return random integers from the "discrete uniform" distribution in the + interval [`low`, `high`). If `high` is None (the default), + then results are from [0, `low`). On entry the arguments are presumed + to have been validated for size and order for the np.uint64 type. + + Parameters + ---------- + low : int or array-like + Lowest (signed) integer to be drawn from the distribution (unless + ``high=None``, in which case this parameter is the *highest* such + integer). + high : int or array-like + If provided, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if ``high=None``). + size : int or tuple of ints + Output shape. If the given shape is, e.g., ``(m, n, k)``, then + ``m * n * k`` samples are drawn. Default is None, in which case a + single value is returned. + use_masked : bool + If True then rejection sampling with a range mask is used else Lemire's algorithm is used. + closed : bool + If True then sample from [low, high]. If False, sample [low, high) + state : bit generator + Bit generator state to use in the core random number generators + lock : threading.Lock + Lock to prevent multiple using a single generator simultaneously + + Returns + ------- + out : python scalar or ndarray of np.uint64 + `size`-shaped array of random integers from the appropriate + distribution, or a single such random int if `size` not provided. + + Notes + ----- + The internal integer generator produces values from the closed + interval [low, high-(not closed)]. This requires some care since + high can be out-of-range for uint64. The scalar path leaves + integers as Python integers until the 1 has been subtracted to + avoid needing to cast to a larger type. + """ + cdef np.ndarray out_arr, low_arr, high_arr + cdef uint64_t rng, off, out_val + cdef uint64_t *out_data + cdef np.npy_intp i, n, cnt + + if size is not None: + if (np.prod(size) == 0): + return np.empty(size, dtype=np.uint64) + + low_arr = <np.ndarray>np.array(low, copy=False) + high_arr = <np.ndarray>np.array(high, copy=False) + low_ndim = np.PyArray_NDIM(low_arr) + high_ndim = np.PyArray_NDIM(high_arr) + if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and + (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))): + low = int(low_arr) + high = int(high_arr) + # Subtract 1 since internal generator produces on closed interval [low, high] + if not closed: + high -= 1 + + if low < 0x0ULL: + raise ValueError("low is out of bounds for uint64") + if high > 0xFFFFFFFFFFFFFFFFULL: + raise ValueError("high is out of bounds for uint64") + if low > high: # -1 already subtracted, closed interval + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + rng = <uint64_t>(high - low) + off = <uint64_t>(<uint64_t>low) + if size is None: + with lock: + random_bounded_uint64_fill(state, off, rng, 1, use_masked, &out_val) + return np.uint64(<uint64_t>out_val) + else: + out_arr = <np.ndarray>np.empty(size, np.uint64) + cnt = np.PyArray_SIZE(out_arr) + out_data = <uint64_t *>np.PyArray_DATA(out_arr) + with lock, nogil: + random_bounded_uint64_fill(state, off, rng, cnt, use_masked, out_data) + return out_arr + return _rand_uint64_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock) + +cdef object _rand_uint32(object low, object high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + _rand_uint32(low, high, size, use_masked, *state, lock) + + Return random np.uint32 integers from `low` (inclusive) to `high` (exclusive). + + Return random integers from the "discrete uniform" distribution in the + interval [`low`, `high`). If `high` is None (the default), + then results are from [0, `low`). On entry the arguments are presumed + to have been validated for size and order for the np.uint32 type. + + Parameters + ---------- + low : int or array-like + Lowest (signed) integer to be drawn from the distribution (unless + ``high=None``, in which case this parameter is the *highest* such + integer). + high : int or array-like + If provided, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if ``high=None``). + size : int or tuple of ints + Output shape. If the given shape is, e.g., ``(m, n, k)``, then + ``m * n * k`` samples are drawn. Default is None, in which case a + single value is returned. + use_masked : bool + If True then rejection sampling with a range mask is used else Lemire's algorithm is used. + closed : bool + If True then sample from [low, high]. If False, sample [low, high) + state : bit generator + Bit generator state to use in the core random number generators + lock : threading.Lock + Lock to prevent multiple using a single generator simultaneously + + Returns + ------- + out : python scalar or ndarray of np.uint32 + `size`-shaped array of random integers from the appropriate + distribution, or a single such random int if `size` not provided. + + Notes + ----- + The internal integer generator produces values from the closed + interval [low, high-(not closed)]. This requires some care since + high can be out-of-range for uint32. The scalar path leaves + integers as Python integers until the 1 has been subtracted to + avoid needing to cast to a larger type. + """ + cdef np.ndarray out_arr, low_arr, high_arr + cdef uint32_t rng, off, out_val + cdef uint32_t *out_data + cdef np.npy_intp i, n, cnt + + if size is not None: + if (np.prod(size) == 0): + return np.empty(size, dtype=np.uint32) + + low_arr = <np.ndarray>np.array(low, copy=False) + high_arr = <np.ndarray>np.array(high, copy=False) + low_ndim = np.PyArray_NDIM(low_arr) + high_ndim = np.PyArray_NDIM(high_arr) + if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and + (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))): + low = int(low_arr) + high = int(high_arr) + # Subtract 1 since internal generator produces on closed interval [low, high] + if not closed: + high -= 1 + + if low < 0x0UL: + raise ValueError("low is out of bounds for uint32") + if high > 0XFFFFFFFFUL: + raise ValueError("high is out of bounds for uint32") + if low > high: # -1 already subtracted, closed interval + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + rng = <uint32_t>(high - low) + off = <uint32_t>(<uint32_t>low) + if size is None: + with lock: + random_bounded_uint32_fill(state, off, rng, 1, use_masked, &out_val) + return np.uint32(<uint32_t>out_val) + else: + out_arr = <np.ndarray>np.empty(size, np.uint32) + cnt = np.PyArray_SIZE(out_arr) + out_data = <uint32_t *>np.PyArray_DATA(out_arr) + with lock, nogil: + random_bounded_uint32_fill(state, off, rng, cnt, use_masked, out_data) + return out_arr + return _rand_uint32_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock) + +cdef object _rand_uint16(object low, object high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + _rand_uint16(low, high, size, use_masked, *state, lock) + + Return random np.uint16 integers from `low` (inclusive) to `high` (exclusive). + + Return random integers from the "discrete uniform" distribution in the + interval [`low`, `high`). If `high` is None (the default), + then results are from [0, `low`). On entry the arguments are presumed + to have been validated for size and order for the np.uint16 type. + + Parameters + ---------- + low : int or array-like + Lowest (signed) integer to be drawn from the distribution (unless + ``high=None``, in which case this parameter is the *highest* such + integer). + high : int or array-like + If provided, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if ``high=None``). + size : int or tuple of ints + Output shape. If the given shape is, e.g., ``(m, n, k)``, then + ``m * n * k`` samples are drawn. Default is None, in which case a + single value is returned. + use_masked : bool + If True then rejection sampling with a range mask is used else Lemire's algorithm is used. + closed : bool + If True then sample from [low, high]. If False, sample [low, high) + state : bit generator + Bit generator state to use in the core random number generators + lock : threading.Lock + Lock to prevent multiple using a single generator simultaneously + + Returns + ------- + out : python scalar or ndarray of np.uint16 + `size`-shaped array of random integers from the appropriate + distribution, or a single such random int if `size` not provided. + + Notes + ----- + The internal integer generator produces values from the closed + interval [low, high-(not closed)]. This requires some care since + high can be out-of-range for uint16. The scalar path leaves + integers as Python integers until the 1 has been subtracted to + avoid needing to cast to a larger type. + """ + cdef np.ndarray out_arr, low_arr, high_arr + cdef uint16_t rng, off, out_val + cdef uint16_t *out_data + cdef np.npy_intp i, n, cnt + + if size is not None: + if (np.prod(size) == 0): + return np.empty(size, dtype=np.uint16) + + low_arr = <np.ndarray>np.array(low, copy=False) + high_arr = <np.ndarray>np.array(high, copy=False) + low_ndim = np.PyArray_NDIM(low_arr) + high_ndim = np.PyArray_NDIM(high_arr) + if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and + (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))): + low = int(low_arr) + high = int(high_arr) + # Subtract 1 since internal generator produces on closed interval [low, high] + if not closed: + high -= 1 + + if low < 0x0UL: + raise ValueError("low is out of bounds for uint16") + if high > 0XFFFFUL: + raise ValueError("high is out of bounds for uint16") + if low > high: # -1 already subtracted, closed interval + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + rng = <uint16_t>(high - low) + off = <uint16_t>(<uint16_t>low) + if size is None: + with lock: + random_bounded_uint16_fill(state, off, rng, 1, use_masked, &out_val) + return np.uint16(<uint16_t>out_val) + else: + out_arr = <np.ndarray>np.empty(size, np.uint16) + cnt = np.PyArray_SIZE(out_arr) + out_data = <uint16_t *>np.PyArray_DATA(out_arr) + with lock, nogil: + random_bounded_uint16_fill(state, off, rng, cnt, use_masked, out_data) + return out_arr + return _rand_uint16_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock) + +cdef object _rand_uint8(object low, object high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + _rand_uint8(low, high, size, use_masked, *state, lock) + + Return random np.uint8 integers from `low` (inclusive) to `high` (exclusive). + + Return random integers from the "discrete uniform" distribution in the + interval [`low`, `high`). If `high` is None (the default), + then results are from [0, `low`). On entry the arguments are presumed + to have been validated for size and order for the np.uint8 type. + + Parameters + ---------- + low : int or array-like + Lowest (signed) integer to be drawn from the distribution (unless + ``high=None``, in which case this parameter is the *highest* such + integer). + high : int or array-like + If provided, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if ``high=None``). + size : int or tuple of ints + Output shape. If the given shape is, e.g., ``(m, n, k)``, then + ``m * n * k`` samples are drawn. Default is None, in which case a + single value is returned. + use_masked : bool + If True then rejection sampling with a range mask is used else Lemire's algorithm is used. + closed : bool + If True then sample from [low, high]. If False, sample [low, high) + state : bit generator + Bit generator state to use in the core random number generators + lock : threading.Lock + Lock to prevent multiple using a single generator simultaneously + + Returns + ------- + out : python scalar or ndarray of np.uint8 + `size`-shaped array of random integers from the appropriate + distribution, or a single such random int if `size` not provided. + + Notes + ----- + The internal integer generator produces values from the closed + interval [low, high-(not closed)]. This requires some care since + high can be out-of-range for uint8. The scalar path leaves + integers as Python integers until the 1 has been subtracted to + avoid needing to cast to a larger type. + """ + cdef np.ndarray out_arr, low_arr, high_arr + cdef uint8_t rng, off, out_val + cdef uint8_t *out_data + cdef np.npy_intp i, n, cnt + + if size is not None: + if (np.prod(size) == 0): + return np.empty(size, dtype=np.uint8) + + low_arr = <np.ndarray>np.array(low, copy=False) + high_arr = <np.ndarray>np.array(high, copy=False) + low_ndim = np.PyArray_NDIM(low_arr) + high_ndim = np.PyArray_NDIM(high_arr) + if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and + (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))): + low = int(low_arr) + high = int(high_arr) + # Subtract 1 since internal generator produces on closed interval [low, high] + if not closed: + high -= 1 + + if low < 0x0UL: + raise ValueError("low is out of bounds for uint8") + if high > 0XFFUL: + raise ValueError("high is out of bounds for uint8") + if low > high: # -1 already subtracted, closed interval + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + rng = <uint8_t>(high - low) + off = <uint8_t>(<uint8_t>low) + if size is None: + with lock: + random_bounded_uint8_fill(state, off, rng, 1, use_masked, &out_val) + return np.uint8(<uint8_t>out_val) + else: + out_arr = <np.ndarray>np.empty(size, np.uint8) + cnt = np.PyArray_SIZE(out_arr) + out_data = <uint8_t *>np.PyArray_DATA(out_arr) + with lock, nogil: + random_bounded_uint8_fill(state, off, rng, cnt, use_masked, out_data) + return out_arr + return _rand_uint8_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock) + +cdef object _rand_bool(object low, object high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + _rand_bool(low, high, size, use_masked, *state, lock) + + Return random np.bool integers from `low` (inclusive) to `high` (exclusive). + + Return random integers from the "discrete uniform" distribution in the + interval [`low`, `high`). If `high` is None (the default), + then results are from [0, `low`). On entry the arguments are presumed + to have been validated for size and order for the np.bool type. + + Parameters + ---------- + low : int or array-like + Lowest (signed) integer to be drawn from the distribution (unless + ``high=None``, in which case this parameter is the *highest* such + integer). + high : int or array-like + If provided, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if ``high=None``). + size : int or tuple of ints + Output shape. If the given shape is, e.g., ``(m, n, k)``, then + ``m * n * k`` samples are drawn. Default is None, in which case a + single value is returned. + use_masked : bool + If True then rejection sampling with a range mask is used else Lemire's algorithm is used. + closed : bool + If True then sample from [low, high]. If False, sample [low, high) + state : bit generator + Bit generator state to use in the core random number generators + lock : threading.Lock + Lock to prevent multiple using a single generator simultaneously + + Returns + ------- + out : python scalar or ndarray of np.bool + `size`-shaped array of random integers from the appropriate + distribution, or a single such random int if `size` not provided. + + Notes + ----- + The internal integer generator produces values from the closed + interval [low, high-(not closed)]. This requires some care since + high can be out-of-range for bool. The scalar path leaves + integers as Python integers until the 1 has been subtracted to + avoid needing to cast to a larger type. + """ + cdef np.ndarray out_arr, low_arr, high_arr + cdef bool_t rng, off, out_val + cdef bool_t *out_data + cdef np.npy_intp i, n, cnt + + if size is not None: + if (np.prod(size) == 0): + return np.empty(size, dtype=np.bool) + + low_arr = <np.ndarray>np.array(low, copy=False) + high_arr = <np.ndarray>np.array(high, copy=False) + low_ndim = np.PyArray_NDIM(low_arr) + high_ndim = np.PyArray_NDIM(high_arr) + if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and + (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))): + low = int(low_arr) + high = int(high_arr) + # Subtract 1 since internal generator produces on closed interval [low, high] + if not closed: + high -= 1 + + if low < 0x0UL: + raise ValueError("low is out of bounds for bool") + if high > 0x1UL: + raise ValueError("high is out of bounds for bool") + if low > high: # -1 already subtracted, closed interval + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + rng = <bool_t>(high - low) + off = <bool_t>(<bool_t>low) + if size is None: + with lock: + random_bounded_bool_fill(state, off, rng, 1, use_masked, &out_val) + return np.bool_(<bool_t>out_val) + else: + out_arr = <np.ndarray>np.empty(size, np.bool) + cnt = np.PyArray_SIZE(out_arr) + out_data = <bool_t *>np.PyArray_DATA(out_arr) + with lock, nogil: + random_bounded_bool_fill(state, off, rng, cnt, use_masked, out_data) + return out_arr + return _rand_bool_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock) + +cdef object _rand_int64(object low, object high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + _rand_int64(low, high, size, use_masked, *state, lock) + + Return random np.int64 integers from `low` (inclusive) to `high` (exclusive). + + Return random integers from the "discrete uniform" distribution in the + interval [`low`, `high`). If `high` is None (the default), + then results are from [0, `low`). On entry the arguments are presumed + to have been validated for size and order for the np.int64 type. + + Parameters + ---------- + low : int or array-like + Lowest (signed) integer to be drawn from the distribution (unless + ``high=None``, in which case this parameter is the *highest* such + integer). + high : int or array-like + If provided, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if ``high=None``). + size : int or tuple of ints + Output shape. If the given shape is, e.g., ``(m, n, k)``, then + ``m * n * k`` samples are drawn. Default is None, in which case a + single value is returned. + use_masked : bool + If True then rejection sampling with a range mask is used else Lemire's algorithm is used. + closed : bool + If True then sample from [low, high]. If False, sample [low, high) + state : bit generator + Bit generator state to use in the core random number generators + lock : threading.Lock + Lock to prevent multiple using a single generator simultaneously + + Returns + ------- + out : python scalar or ndarray of np.int64 + `size`-shaped array of random integers from the appropriate + distribution, or a single such random int if `size` not provided. + + Notes + ----- + The internal integer generator produces values from the closed + interval [low, high-(not closed)]. This requires some care since + high can be out-of-range for uint64. The scalar path leaves + integers as Python integers until the 1 has been subtracted to + avoid needing to cast to a larger type. + """ + cdef np.ndarray out_arr, low_arr, high_arr + cdef uint64_t rng, off, out_val + cdef uint64_t *out_data + cdef np.npy_intp i, n, cnt + + if size is not None: + if (np.prod(size) == 0): + return np.empty(size, dtype=np.int64) + + low_arr = <np.ndarray>np.array(low, copy=False) + high_arr = <np.ndarray>np.array(high, copy=False) + low_ndim = np.PyArray_NDIM(low_arr) + high_ndim = np.PyArray_NDIM(high_arr) + if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and + (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))): + low = int(low_arr) + high = int(high_arr) + # Subtract 1 since internal generator produces on closed interval [low, high] + if not closed: + high -= 1 + + if low < -0x8000000000000000LL: + raise ValueError("low is out of bounds for int64") + if high > 0x7FFFFFFFFFFFFFFFL: + raise ValueError("high is out of bounds for int64") + if low > high: # -1 already subtracted, closed interval + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + rng = <uint64_t>(high - low) + off = <uint64_t>(<int64_t>low) + if size is None: + with lock: + random_bounded_uint64_fill(state, off, rng, 1, use_masked, &out_val) + return np.int64(<int64_t>out_val) + else: + out_arr = <np.ndarray>np.empty(size, np.int64) + cnt = np.PyArray_SIZE(out_arr) + out_data = <uint64_t *>np.PyArray_DATA(out_arr) + with lock, nogil: + random_bounded_uint64_fill(state, off, rng, cnt, use_masked, out_data) + return out_arr + return _rand_int64_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock) + +cdef object _rand_int32(object low, object high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + _rand_int32(low, high, size, use_masked, *state, lock) + + Return random np.int32 integers from `low` (inclusive) to `high` (exclusive). + + Return random integers from the "discrete uniform" distribution in the + interval [`low`, `high`). If `high` is None (the default), + then results are from [0, `low`). On entry the arguments are presumed + to have been validated for size and order for the np.int32 type. + + Parameters + ---------- + low : int or array-like + Lowest (signed) integer to be drawn from the distribution (unless + ``high=None``, in which case this parameter is the *highest* such + integer). + high : int or array-like + If provided, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if ``high=None``). + size : int or tuple of ints + Output shape. If the given shape is, e.g., ``(m, n, k)``, then + ``m * n * k`` samples are drawn. Default is None, in which case a + single value is returned. + use_masked : bool + If True then rejection sampling with a range mask is used else Lemire's algorithm is used. + closed : bool + If True then sample from [low, high]. If False, sample [low, high) + state : bit generator + Bit generator state to use in the core random number generators + lock : threading.Lock + Lock to prevent multiple using a single generator simultaneously + + Returns + ------- + out : python scalar or ndarray of np.int32 + `size`-shaped array of random integers from the appropriate + distribution, or a single such random int if `size` not provided. + + Notes + ----- + The internal integer generator produces values from the closed + interval [low, high-(not closed)]. This requires some care since + high can be out-of-range for uint32. The scalar path leaves + integers as Python integers until the 1 has been subtracted to + avoid needing to cast to a larger type. + """ + cdef np.ndarray out_arr, low_arr, high_arr + cdef uint32_t rng, off, out_val + cdef uint32_t *out_data + cdef np.npy_intp i, n, cnt + + if size is not None: + if (np.prod(size) == 0): + return np.empty(size, dtype=np.int32) + + low_arr = <np.ndarray>np.array(low, copy=False) + high_arr = <np.ndarray>np.array(high, copy=False) + low_ndim = np.PyArray_NDIM(low_arr) + high_ndim = np.PyArray_NDIM(high_arr) + if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and + (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))): + low = int(low_arr) + high = int(high_arr) + # Subtract 1 since internal generator produces on closed interval [low, high] + if not closed: + high -= 1 + + if low < -0x80000000L: + raise ValueError("low is out of bounds for int32") + if high > 0x7FFFFFFFL: + raise ValueError("high is out of bounds for int32") + if low > high: # -1 already subtracted, closed interval + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + rng = <uint32_t>(high - low) + off = <uint32_t>(<int32_t>low) + if size is None: + with lock: + random_bounded_uint32_fill(state, off, rng, 1, use_masked, &out_val) + return np.int32(<int32_t>out_val) + else: + out_arr = <np.ndarray>np.empty(size, np.int32) + cnt = np.PyArray_SIZE(out_arr) + out_data = <uint32_t *>np.PyArray_DATA(out_arr) + with lock, nogil: + random_bounded_uint32_fill(state, off, rng, cnt, use_masked, out_data) + return out_arr + return _rand_int32_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock) + +cdef object _rand_int16(object low, object high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + _rand_int16(low, high, size, use_masked, *state, lock) + + Return random np.int16 integers from `low` (inclusive) to `high` (exclusive). + + Return random integers from the "discrete uniform" distribution in the + interval [`low`, `high`). If `high` is None (the default), + then results are from [0, `low`). On entry the arguments are presumed + to have been validated for size and order for the np.int16 type. + + Parameters + ---------- + low : int or array-like + Lowest (signed) integer to be drawn from the distribution (unless + ``high=None``, in which case this parameter is the *highest* such + integer). + high : int or array-like + If provided, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if ``high=None``). + size : int or tuple of ints + Output shape. If the given shape is, e.g., ``(m, n, k)``, then + ``m * n * k`` samples are drawn. Default is None, in which case a + single value is returned. + use_masked : bool + If True then rejection sampling with a range mask is used else Lemire's algorithm is used. + closed : bool + If True then sample from [low, high]. If False, sample [low, high) + state : bit generator + Bit generator state to use in the core random number generators + lock : threading.Lock + Lock to prevent multiple using a single generator simultaneously + + Returns + ------- + out : python scalar or ndarray of np.int16 + `size`-shaped array of random integers from the appropriate + distribution, or a single such random int if `size` not provided. + + Notes + ----- + The internal integer generator produces values from the closed + interval [low, high-(not closed)]. This requires some care since + high can be out-of-range for uint16. The scalar path leaves + integers as Python integers until the 1 has been subtracted to + avoid needing to cast to a larger type. + """ + cdef np.ndarray out_arr, low_arr, high_arr + cdef uint16_t rng, off, out_val + cdef uint16_t *out_data + cdef np.npy_intp i, n, cnt + + if size is not None: + if (np.prod(size) == 0): + return np.empty(size, dtype=np.int16) + + low_arr = <np.ndarray>np.array(low, copy=False) + high_arr = <np.ndarray>np.array(high, copy=False) + low_ndim = np.PyArray_NDIM(low_arr) + high_ndim = np.PyArray_NDIM(high_arr) + if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and + (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))): + low = int(low_arr) + high = int(high_arr) + # Subtract 1 since internal generator produces on closed interval [low, high] + if not closed: + high -= 1 + + if low < -0x8000L: + raise ValueError("low is out of bounds for int16") + if high > 0x7FFFL: + raise ValueError("high is out of bounds for int16") + if low > high: # -1 already subtracted, closed interval + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + rng = <uint16_t>(high - low) + off = <uint16_t>(<int16_t>low) + if size is None: + with lock: + random_bounded_uint16_fill(state, off, rng, 1, use_masked, &out_val) + return np.int16(<int16_t>out_val) + else: + out_arr = <np.ndarray>np.empty(size, np.int16) + cnt = np.PyArray_SIZE(out_arr) + out_data = <uint16_t *>np.PyArray_DATA(out_arr) + with lock, nogil: + random_bounded_uint16_fill(state, off, rng, cnt, use_masked, out_data) + return out_arr + return _rand_int16_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock) + +cdef object _rand_int8(object low, object high, object size, + bint use_masked, bint closed, + bitgen_t *state, object lock): + """ + _rand_int8(low, high, size, use_masked, *state, lock) + + Return random np.int8 integers from `low` (inclusive) to `high` (exclusive). + + Return random integers from the "discrete uniform" distribution in the + interval [`low`, `high`). If `high` is None (the default), + then results are from [0, `low`). On entry the arguments are presumed + to have been validated for size and order for the np.int8 type. + + Parameters + ---------- + low : int or array-like + Lowest (signed) integer to be drawn from the distribution (unless + ``high=None``, in which case this parameter is the *highest* such + integer). + high : int or array-like + If provided, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if ``high=None``). + size : int or tuple of ints + Output shape. If the given shape is, e.g., ``(m, n, k)``, then + ``m * n * k`` samples are drawn. Default is None, in which case a + single value is returned. + use_masked : bool + If True then rejection sampling with a range mask is used else Lemire's algorithm is used. + closed : bool + If True then sample from [low, high]. If False, sample [low, high) + state : bit generator + Bit generator state to use in the core random number generators + lock : threading.Lock + Lock to prevent multiple using a single generator simultaneously + + Returns + ------- + out : python scalar or ndarray of np.int8 + `size`-shaped array of random integers from the appropriate + distribution, or a single such random int if `size` not provided. + + Notes + ----- + The internal integer generator produces values from the closed + interval [low, high-(not closed)]. This requires some care since + high can be out-of-range for uint8. The scalar path leaves + integers as Python integers until the 1 has been subtracted to + avoid needing to cast to a larger type. + """ + cdef np.ndarray out_arr, low_arr, high_arr + cdef uint8_t rng, off, out_val + cdef uint8_t *out_data + cdef np.npy_intp i, n, cnt + + if size is not None: + if (np.prod(size) == 0): + return np.empty(size, dtype=np.int8) + + low_arr = <np.ndarray>np.array(low, copy=False) + high_arr = <np.ndarray>np.array(high, copy=False) + low_ndim = np.PyArray_NDIM(low_arr) + high_ndim = np.PyArray_NDIM(high_arr) + if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and + (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))): + low = int(low_arr) + high = int(high_arr) + # Subtract 1 since internal generator produces on closed interval [low, high] + if not closed: + high -= 1 + + if low < -0x80L: + raise ValueError("low is out of bounds for int8") + if high > 0x7FL: + raise ValueError("high is out of bounds for int8") + if low > high: # -1 already subtracted, closed interval + comp = '>' if closed else '>=' + raise ValueError('low {comp} high'.format(comp=comp)) + + rng = <uint8_t>(high - low) + off = <uint8_t>(<int8_t>low) + if size is None: + with lock: + random_bounded_uint8_fill(state, off, rng, 1, use_masked, &out_val) + return np.int8(<int8_t>out_val) + else: + out_arr = <np.ndarray>np.empty(size, np.int8) + cnt = np.PyArray_SIZE(out_arr) + out_data = <uint8_t *>np.PyArray_DATA(out_arr) + with lock, nogil: + random_bounded_uint8_fill(state, off, rng, cnt, use_masked, out_data) + return out_arr + return _rand_int8_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock) diff --git a/numpy/random/bounded_integers.pyx.in b/numpy/random/_bounded_integers.pyx.in index 411b65a37..47cb13b3a 100644 --- a/numpy/random/bounded_integers.pyx.in +++ b/numpy/random/_bounded_integers.pyx.in @@ -4,12 +4,54 @@ import numpy as np cimport numpy as np -from .distributions cimport * - __all__ = [] np.import_array() +cdef extern from "include/distributions.h": + # Generate random numbers in closed interval [off, off + rng]. + uint64_t random_bounded_uint64(bitgen_t *bitgen_state, + uint64_t off, uint64_t rng, + uint64_t mask, bint use_masked) nogil + uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state, + uint32_t off, uint32_t rng, + uint32_t mask, bint use_masked, + int *bcnt, uint32_t *buf) nogil + uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state, + uint16_t off, uint16_t rng, + uint16_t mask, bint use_masked, + int *bcnt, uint32_t *buf) nogil + uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, + uint8_t off, uint8_t rng, + uint8_t mask, bint use_masked, + int *bcnt, uint32_t *buf) nogil + np.npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, + np.npy_bool off, np.npy_bool rng, + np.npy_bool mask, bint use_masked, + int *bcnt, uint32_t *buf) nogil + void random_bounded_uint64_fill(bitgen_t *bitgen_state, + uint64_t off, uint64_t rng, np.npy_intp cnt, + bint use_masked, + uint64_t *out) nogil + void random_bounded_uint32_fill(bitgen_t *bitgen_state, + uint32_t off, uint32_t rng, np.npy_intp cnt, + bint use_masked, + uint32_t *out) nogil + void random_bounded_uint16_fill(bitgen_t *bitgen_state, + uint16_t off, uint16_t rng, np.npy_intp cnt, + bint use_masked, + uint16_t *out) nogil + void random_bounded_uint8_fill(bitgen_t *bitgen_state, + uint8_t off, uint8_t rng, np.npy_intp cnt, + bint use_masked, + uint8_t *out) nogil + void random_bounded_bool_fill(bitgen_t *bitgen_state, + np.npy_bool off, np.npy_bool rng, np.npy_intp cnt, + bint use_masked, + np.npy_bool *out) nogil + + + _integers_types = {'bool': (0, 2), 'int8': (-2**7, 2**7), 'int16': (-2**15, 2**15), diff --git a/numpy/random/common.pxd b/numpy/random/_common.pxd index 2f7baa06e..74bebca83 100644 --- a/numpy/random/common.pxd +++ b/numpy/random/_common.pxd @@ -1,23 +1,12 @@ #cython: language_level=3 -from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, - int8_t, int16_t, int32_t, int64_t, intptr_t, - uintptr_t) -from libc.math cimport sqrt - -cdef extern from "numpy/random/bitgen.h": - struct bitgen: - void *state - uint64_t (*next_uint64)(void *st) nogil - uint32_t (*next_uint32)(void *st) nogil - double (*next_double)(void *st) nogil - uint64_t (*next_raw)(void *st) nogil - - ctypedef bitgen bitgen_t +from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t import numpy as np cimport numpy as np +from ._bit_generator cimport bitgen_t + cdef double POISSON_LAM_MAX cdef double LEGACY_POISSON_LAM_MAX cdef uint64_t MAXSIZE @@ -44,7 +33,7 @@ cdef object prepare_ctypes(bitgen_t *bitgen) cdef int check_constraint(double val, object name, constraint_type cons) except -1 cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 -cdef extern from "src/aligned_malloc/aligned_malloc.h": +cdef extern from "include/aligned_malloc.h": cdef void *PyArray_realloc_aligned(void *p, size_t n) cdef void *PyArray_malloc_aligned(size_t n) cdef void *PyArray_calloc_aligned(size_t n, size_t s) @@ -56,6 +45,7 @@ ctypedef double (*random_double_1)(void *state, double a) nogil ctypedef double (*random_double_2)(void *state, double a, double b) nogil ctypedef double (*random_double_3)(void *state, double a, double b, double c) nogil +ctypedef double (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) nogil ctypedef float (*random_float_0)(bitgen_t *state) nogil ctypedef float (*random_float_1)(bitgen_t *state, float a) nogil diff --git a/numpy/random/common.pyx b/numpy/random/_common.pyx index 74cd5f033..ef1afac7c 100644 --- a/numpy/random/common.pyx +++ b/numpy/random/_common.pyx @@ -6,7 +6,7 @@ import sys import numpy as np cimport numpy as np -from .common cimport * +from libc.stdint cimport uintptr_t __all__ = ['interface'] @@ -262,14 +262,16 @@ cdef object double_fill(void *func, bitgen_t *state, object size, object lock, o return out_array cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out): - cdef random_float_0 random_func = (<random_float_0>func) + cdef random_float_fill random_func = (<random_float_fill>func) + cdef float out_val cdef float *out_array_data cdef np.ndarray out_array cdef np.npy_intp i, n if size is None and out is None: with lock: - return random_func(state) + random_func(state, 1, &out_val) + return out_val if out is not None: check_output(out, np.float32, size) @@ -280,8 +282,7 @@ cdef object float_fill(void *func, bitgen_t *state, object size, object lock, ob n = np.PyArray_SIZE(out_array) out_array_data = <float *>np.PyArray_DATA(out_array) with lock, nogil: - for i in range(n): - out_array_data[i] = random_func(state) + random_func(state, n, out_array_data) return out_array cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out): diff --git a/numpy/random/generator.pyx b/numpy/random/_generator.pyx index 26fd95129..6d9fe20f9 100644 --- a/numpy/random/generator.pyx +++ b/numpy/random/_generator.pyx @@ -3,35 +3,159 @@ import operator import warnings -import numpy as np - -from .bounded_integers import _integers_types -from .pcg64 import PCG64 - from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer from cpython cimport (Py_INCREF, PyFloat_AsDouble) -from libc cimport string cimport cython +import numpy as np cimport numpy as np +from numpy.core.multiarray import normalize_axis_index + +from libc cimport string +from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, + int32_t, int64_t, INT64_MAX, SIZE_MAX) +from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64, + _rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16, + _rand_uint8, _gen_mask) +from ._bounded_integers import _integers_types +from ._pcg64 import PCG64 +from ._bit_generator cimport bitgen_t +from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE, + CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1, + CONS_GT_1, CONS_POSITIVE_NOT_NAN, CONS_POISSON, + double_fill, cont, kahan_sum, cont_broadcast_3, float_fill, cont_f, + check_array_constraint, check_constraint, disc, discrete_broadcast_iii, + ) + + +cdef extern from "include/distributions.h": + + struct s_binomial_t: + int has_binomial + double psave + int64_t nsave + double r + double q + double fm + int64_t m + double p1 + double xm + double xl + double xr + double c + double laml + double lamr + double p2 + double p3 + double p4 + + ctypedef s_binomial_t binomial_t + + double random_standard_uniform(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill(bitgen_t* bitgen_state, np.npy_intp cnt, double *out) nogil + double random_standard_exponential(bitgen_t *bitgen_state) nogil + void random_standard_exponential_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil + double random_standard_exponential_zig(bitgen_t *bitgen_state) nogil + void random_standard_exponential_zig_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil + double random_standard_normal(bitgen_t* bitgen_state) nogil + void random_standard_normal_fill(bitgen_t *bitgen_state, np.npy_intp count, double *out) nogil + void random_standard_normal_fill_f(bitgen_t *bitgen_state, np.npy_intp count, float *out) nogil + double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil + + float random_standard_uniform_f(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill_f(bitgen_t* bitgen_state, np.npy_intp cnt, float *out) nogil + float random_standard_exponential_f(bitgen_t *bitgen_state) nogil + float random_standard_exponential_zig_f(bitgen_t *bitgen_state) nogil + void random_standard_exponential_fill_f(bitgen_t *bitgen_state, np.npy_intp cnt, float *out) nogil + void random_standard_exponential_zig_fill_f(bitgen_t *bitgen_state, np.npy_intp cnt, float *out) nogil + float random_standard_normal_f(bitgen_t* bitgen_state) nogil + float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil + + int64_t random_positive_int64(bitgen_t *bitgen_state) nogil + int32_t random_positive_int32(bitgen_t *bitgen_state) nogil + int64_t random_positive_int(bitgen_t *bitgen_state) nogil + uint64_t random_uint(bitgen_t *bitgen_state) nogil + + double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil + + double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil + float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil + + double random_exponential(bitgen_t *bitgen_state, double scale) nogil + double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil + double random_beta(bitgen_t *bitgen_state, double a, double b) nogil + double random_chisquare(bitgen_t *bitgen_state, double df) nogil + double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil + double random_standard_cauchy(bitgen_t *bitgen_state) nogil + double random_pareto(bitgen_t *bitgen_state, double a) nogil + double random_weibull(bitgen_t *bitgen_state, double a) nogil + double random_power(bitgen_t *bitgen_state, double a) nogil + double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil + double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil + double random_standard_t(bitgen_t *bitgen_state, double df) nogil + double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, + double nonc) nogil + double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, + double dfden, double nonc) nogil + double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil + double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil + double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right) nogil + + int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil + int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil + int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil + int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil + int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil + int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, + int64_t sample) nogil + + uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil + + # Generate random uint64 numbers in closed interval [off, off + rng]. + uint64_t random_bounded_uint64(bitgen_t *bitgen_state, + uint64_t off, uint64_t rng, + uint64_t mask, bint use_masked) nogil + + void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix, + double *pix, np.npy_intp d, binomial_t *binomial) nogil + + int random_mvhg_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil + void random_mvhg_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil -from .bounded_integers cimport * -from .common cimport * -from .distributions cimport * +np.import_array() -__all__ = ['Generator', 'beta', 'binomial', 'bytes', 'chisquare', 'choice', - 'dirichlet', 'exponential', 'f', 'gamma', - 'geometric', 'gumbel', 'hypergeometric', 'integers', 'laplace', - 'logistic', 'lognormal', 'logseries', 'multinomial', - 'multivariate_normal', 'negative_binomial', 'noncentral_chisquare', - 'noncentral_f', 'normal', 'pareto', 'permutation', - 'poisson', 'power', 'random', 'rayleigh', 'shuffle', - 'standard_cauchy', 'standard_exponential', 'standard_gamma', - 'standard_normal', 'standard_t', 'triangular', - 'uniform', 'vonmises', 'wald', 'weibull', 'zipf'] +cdef int64_t _safe_sum_nonneg_int64(size_t num_colors, int64_t *colors): + """ + Sum the values in the array `colors`. -np.import_array() + Return -1 if an overflow occurs. + The values in *colors are assumed to be nonnegative. + """ + cdef size_t i + cdef int64_t sum + + sum = 0 + for i in range(num_colors): + if colors[i] > INT64_MAX - sum: + return -1 + sum += colors[i] + return sum cdef bint _check_bit_generator(object bitgen): @@ -192,9 +316,9 @@ cdef class Generator: cdef double temp key = np.dtype(dtype).name if key == 'float64': - return double_fill(&random_double_fill, &self._bitgen, size, self.lock, out) + return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, out) elif key == 'float32': - return float_fill(&random_float, &self._bitgen, size, self.lock, out) + return float_fill(&random_standard_uniform_fill_f, &self._bitgen, size, self.lock, out) else: raise TypeError('Unsupported dtype "%s" for random' % key) @@ -340,9 +464,9 @@ cdef class Generator: return double_fill(&random_standard_exponential_fill, &self._bitgen, size, self.lock, out) elif key == 'float32': if method == u'zig': - return float_fill(&random_standard_exponential_zig_f, &self._bitgen, size, self.lock, out) + return float_fill(&random_standard_exponential_zig_fill_f, &self._bitgen, size, self.lock, out) else: - return float_fill(&random_standard_exponential_f, &self._bitgen, size, self.lock, out) + return float_fill(&random_standard_exponential_fill_f, &self._bitgen, size, self.lock, out) else: raise TypeError('Unsupported dtype "%s" for standard_exponential' % key) @@ -919,9 +1043,9 @@ cdef class Generator: """ key = np.dtype(dtype).name if key == 'float64': - return double_fill(&random_gauss_zig_fill, &self._bitgen, size, self.lock, out) + return double_fill(&random_standard_normal_fill, &self._bitgen, size, self.lock, out) elif key == 'float32': - return float_fill(&random_gauss_zig_f, &self._bitgen, size, self.lock, out) + return float_fill(&random_standard_normal_fill_f, &self._bitgen, size, self.lock, out) else: raise TypeError('Unsupported dtype "%s" for standard_normal' % key) @@ -1022,7 +1146,7 @@ cdef class Generator: [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random """ - return cont(&random_normal_zig, &self._bitgen, size, self.lock, 2, + return cont(&random_normal, &self._bitgen, size, self.lock, 2, loc, '', CONS_NONE, scale, 'scale', CONS_NON_NEGATIVE, 0.0, '', CONS_NONE, @@ -1108,13 +1232,13 @@ cdef class Generator: cdef void *func key = np.dtype(dtype).name if key == 'float64': - return cont(&random_standard_gamma_zig, &self._bitgen, size, self.lock, 1, + return cont(&random_standard_gamma, &self._bitgen, size, self.lock, 1, shape, 'shape', CONS_NON_NEGATIVE, 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, out) if key == 'float32': - return cont_f(&random_standard_gamma_zig_f, &self._bitgen, size, self.lock, + return cont_f(&random_standard_gamma_f, &self._bitgen, size, self.lock, shape, 'shape', CONS_NON_NEGATIVE, out) else: @@ -3146,6 +3270,8 @@ cdef class Generator: See Also -------- + multivariate_hypergeometric : Draw samples from the multivariate + hypergeometric distribution. scipy.stats.hypergeom : probability density function, distribution or cumulative density function, etc. @@ -3644,6 +3770,222 @@ cdef class Generator: return multin + def multivariate_hypergeometric(self, object colors, object nsample, + size=None, method='marginals'): + """ + multivariate_hypergeometric(colors, nsample, size=None, + method='marginals') + + Generate variates from a multivariate hypergeometric distribution. + + The multivariate hypergeometric distribution is a generalization + of the hypergeometric distribution. + + Choose ``nsample`` items at random without replacement from a + collection with ``N`` distinct types. ``N`` is the length of + ``colors``, and the values in ``colors`` are the number of occurrences + of that type in the collection. The total number of items in the + collection is ``sum(colors)``. Each random variate generated by this + function is a vector of length ``N`` holding the counts of the + different types that occurred in the ``nsample`` items. + + The name ``colors`` comes from a common description of the + distribution: it is the probability distribution of the number of + marbles of each color selected without replacement from an urn + containing marbles of different colors; ``colors[i]`` is the number + of marbles in the urn with color ``i``. + + Parameters + ---------- + colors : sequence of integers + The number of each type of item in the collection from which + a sample is drawn. The values in ``colors`` must be nonnegative. + To avoid loss of precision in the algorithm, ``sum(colors)`` + must be less than ``10**9`` when `method` is "marginals". + nsample : int + The number of items selected. ``nsample`` must not be greater + than ``sum(colors)``. + size : int or tuple of ints, optional + The number of variates to generate, either an integer or a tuple + holding the shape of the array of variates. If the given size is, + e.g., ``(k, m)``, then ``k * m`` variates are drawn, where one + variate is a vector of length ``len(colors)``, and the return value + has shape ``(k, m, len(colors))``. If `size` is an integer, the + output has shape ``(size, len(colors))``. Default is None, in + which case a single variate is returned as an array with shape + ``(len(colors),)``. + method : string, optional + Specify the algorithm that is used to generate the variates. + Must be 'count' or 'marginals' (the default). See the Notes + for a description of the methods. + + Returns + ------- + variates : ndarray + Array of variates drawn from the multivariate hypergeometric + distribution. + + See Also + -------- + hypergeometric : Draw samples from the (univariate) hypergeometric + distribution. + + Notes + ----- + The two methods do not return the same sequence of variates. + + The "count" algorithm is roughly equivalent to the following numpy + code:: + + choices = np.repeat(np.arange(len(colors)), colors) + selection = np.random.choice(choices, nsample, replace=False) + variate = np.bincount(selection, minlength=len(colors)) + + The "count" algorithm uses a temporary array of integers with length + ``sum(colors)``. + + The "marginals" algorithm generates a variate by using repeated + calls to the univariate hypergeometric sampler. It is roughly + equivalent to:: + + variate = np.zeros(len(colors), dtype=np.int64) + # `remaining` is the cumulative sum of `colors` from the last + # element to the first; e.g. if `colors` is [3, 1, 5], then + # `remaining` is [9, 6, 5]. + remaining = np.cumsum(colors[::-1])[::-1] + for i in range(len(colors)-1): + if nsample < 1: + break + variate[i] = hypergeometric(colors[i], remaining[i+1], + nsample) + nsample -= variate[i] + variate[-1] = nsample + + The default method is "marginals". For some cases (e.g. when + `colors` contains relatively small integers), the "count" method + can be significantly faster than the "marginals" method. If + performance of the algorithm is important, test the two methods + with typical inputs to decide which works best. + + .. versionadded:: 1.18.0 + + Examples + -------- + >>> colors = [16, 8, 4] + >>> seed = 4861946401452 + >>> gen = np.random.Generator(np.random.PCG64(seed)) + >>> gen.multivariate_hypergeometric(colors, 6) + array([5, 0, 1]) + >>> gen.multivariate_hypergeometric(colors, 6, size=3) + array([[5, 0, 1], + [2, 2, 2], + [3, 3, 0]]) + >>> gen.multivariate_hypergeometric(colors, 6, size=(2, 2)) + array([[[3, 2, 1], + [3, 2, 1]], + [[4, 1, 1], + [3, 2, 1]]]) + """ + cdef int64_t nsamp + cdef size_t num_colors + cdef int64_t total + cdef int64_t *colors_ptr + cdef int64_t max_index + cdef size_t num_variates + cdef int64_t *variates_ptr + cdef int result + + if method not in ['count', 'marginals']: + raise ValueError('method must be "count" or "marginals".') + + try: + operator.index(nsample) + except TypeError: + raise ValueError('nsample must be an integer') + + if nsample < 0: + raise ValueError("nsample must be nonnegative.") + if nsample > INT64_MAX: + raise ValueError("nsample must not exceed %d" % INT64_MAX) + nsamp = nsample + + # Validation of colors, a 1-d sequence of nonnegative integers. + invalid_colors = False + try: + colors = np.asarray(colors) + if colors.ndim != 1: + invalid_colors = True + elif colors.size > 0 and not np.issubdtype(colors.dtype, + np.integer): + invalid_colors = True + elif np.any((colors < 0) | (colors > INT64_MAX)): + invalid_colors = True + except ValueError: + invalid_colors = True + if invalid_colors: + raise ValueError('colors must be a one-dimensional sequence ' + 'of nonnegative integers not exceeding %d.' % + INT64_MAX) + + colors = np.ascontiguousarray(colors, dtype=np.int64) + num_colors = colors.size + + colors_ptr = <int64_t *> np.PyArray_DATA(colors) + + total = _safe_sum_nonneg_int64(num_colors, colors_ptr) + if total == -1: + raise ValueError("sum(colors) must not exceed the maximum value " + "of a 64 bit signed integer (%d)" % INT64_MAX) + + if method == 'marginals' and total >= 1000000000: + raise ValueError('When method is "marginals", sum(colors) must ' + 'be less than 1000000000.') + + # The C code that implements the 'count' method will malloc an + # array of size total*sizeof(size_t). Here we ensure that that + # product does not overflow. + if SIZE_MAX > <uint64_t>INT64_MAX: + max_index = INT64_MAX // sizeof(size_t) + else: + max_index = SIZE_MAX // sizeof(size_t) + if method == 'count' and total > max_index: + raise ValueError("When method is 'count', sum(colors) must not " + "exceed %d" % max_index) + if nsamp > total: + raise ValueError("nsample > sum(colors)") + + # Figure out the shape of the return array. + if size is None: + shape = (num_colors,) + elif np.isscalar(size): + shape = (size, num_colors) + else: + shape = tuple(size) + (num_colors,) + variates = np.zeros(shape, dtype=np.int64) + + if num_colors == 0: + return variates + + # One variate is a vector of length num_colors. + num_variates = variates.size // num_colors + variates_ptr = <int64_t *> np.PyArray_DATA(variates) + + if method == 'count': + with self.lock, nogil: + result = random_mvhg_count(&self._bitgen, total, + num_colors, colors_ptr, nsamp, + num_variates, variates_ptr) + if result == -1: + raise MemoryError("Insufficent memory for multivariate_" + "hypergeometric with method='count' and " + "sum(colors)=%d" % total) + else: + with self.lock, nogil: + random_mvhg_marginals(&self._bitgen, total, + num_colors, colors_ptr, nsamp, + num_variates, variates_ptr) + return variates + def dirichlet(self, object alpha, size=None): """ dirichlet(alpha, size=None) @@ -3772,7 +4114,7 @@ cdef class Generator: while i < totsize: acc = 0.0 for j in range(k): - val_data[i+j] = random_standard_gamma_zig(&self._bitgen, + val_data[i+j] = random_standard_gamma(&self._bitgen, alpha_data[j]) acc = acc + val_data[i + j] invacc = 1/acc @@ -3783,20 +4125,21 @@ cdef class Generator: return diric # Shuffling and permutations: - def shuffle(self, object x): + def shuffle(self, object x, axis=0): """ - shuffle(x) + shuffle(x, axis=0) Modify a sequence in-place by shuffling its contents. - This function only shuffles the array along the first axis of a - multi-dimensional array. The order of sub-arrays is changed but - their contents remains the same. + The order of sub-arrays is changed but their contents remains the same. Parameters ---------- x : array_like The array or list to be shuffled. + axis : int, optional + The axis which `x` is shuffled along. Default is 0. + It is only supported on `ndarray` objects. Returns ------- @@ -3810,8 +4153,6 @@ cdef class Generator: >>> arr [1 7 5 2 9 4 3 6 0 8] # random - Multi-dimensional arrays are only shuffled along the first axis: - >>> arr = np.arange(9).reshape((3, 3)) >>> rng.shuffle(arr) >>> arr @@ -3819,17 +4160,25 @@ cdef class Generator: [6, 7, 8], [0, 1, 2]]) + >>> arr = np.arange(9).reshape((3, 3)) + >>> rng.shuffle(arr, axis=1) + >>> arr + array([[2, 0, 1], # random + [5, 3, 4], + [8, 6, 7]]) """ cdef: np.npy_intp i, j, n = len(x), stride, itemsize char* x_ptr char* buf_ptr + axis = normalize_axis_index(axis, np.ndim(x)) + if type(x) is np.ndarray and x.ndim == 1 and x.size: # Fast, statically typed path: shuffle the underlying buffer. # Only for non-empty, 1d objects of class ndarray (subclasses such # as MaskedArrays may not support this approach). - x_ptr = <char*><size_t>x.ctypes.data + x_ptr = <char*><size_t>np.PyArray_DATA(x) stride = x.strides[0] itemsize = x.dtype.itemsize # As the array x could contain python objects we use a buffer @@ -3837,7 +4186,7 @@ cdef class Generator: # within the buffer and erroneously decrementing it's refcount # when the function exits. buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit - buf_ptr = <char*><size_t>buf.ctypes.data + buf_ptr = <char*><size_t>np.PyArray_DATA(buf) with self.lock: # We trick gcc into providing a specialized implementation for # the most common case, yielding a ~33% performance improvement. @@ -3847,9 +4196,10 @@ cdef class Generator: else: self._shuffle_raw(n, 1, itemsize, stride, x_ptr, buf_ptr) elif isinstance(x, np.ndarray) and x.ndim and x.size: + x = np.swapaxes(x, 0, axis) buf = np.empty_like(x[0, ...]) with self.lock: - for i in reversed(range(1, n)): + for i in reversed(range(1, len(x))): j = random_interval(&self._bitgen, i) if i == j: # i == j is not needed and memcpy is undefined. @@ -3859,6 +4209,9 @@ cdef class Generator: x[i] = buf else: # Untyped path. + if axis != 0: + raise NotImplementedError("Axis argument is only supported " + "on ndarray objects") with self.lock: for i in reversed(range(1, n)): j = random_interval(&self._bitgen, i) @@ -3914,13 +4267,11 @@ cdef class Generator: data[j] = data[i] data[i] = temp - def permutation(self, object x): + def permutation(self, object x, axis=0): """ - permutation(x) + permutation(x, axis=0) Randomly permute a sequence, or return a permuted range. - If `x` is a multi-dimensional array, it is only shuffled along its - first index. Parameters ---------- @@ -3928,6 +4279,8 @@ cdef class Generator: If `x` is an integer, randomly permute ``np.arange(x)``. If `x` is an array, make a copy and shuffle the elements randomly. + axis : int, optional + The axis which `x` is shuffled along. Default is 0. Returns ------- @@ -3953,16 +4306,22 @@ cdef class Generator: Traceback (most recent call last): ... numpy.AxisError: x must be an integer or at least 1-dimensional - """ + >>> arr = np.arange(9).reshape((3, 3)) + >>> rng.permutation(arr, axis=1) + array([[0, 2, 1], # random + [3, 5, 4], + [6, 8, 7]]) + + """ if isinstance(x, (int, np.integer)): arr = np.arange(x) self.shuffle(arr) return arr arr = np.asarray(x) - if arr.ndim < 1: - raise np.AxisError("x must be an integer or at least 1-dimensional") + + axis = normalize_axis_index(axis, arr.ndim) # shuffle has fast-path for 1-d if arr.ndim == 1: @@ -3973,9 +4332,11 @@ cdef class Generator: return arr # Shuffle index array, dtype to ensure fast path - idx = np.arange(arr.shape[0], dtype=np.intp) + idx = np.arange(arr.shape[axis], dtype=np.intp) self.shuffle(idx) - return arr[idx] + slices = [slice(None)]*arr.ndim + slices[axis] = idx + return arr[tuple(slices)] def default_rng(seed=None): @@ -3983,19 +4344,18 @@ def default_rng(seed=None): Parameters ---------- - seed : {None, int, array_like[ints], ISeedSequence, BitGenerator, Generator}, optional + seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional A seed to initialize the `BitGenerator`. If None, then fresh, unpredictable entropy will be pulled from the OS. If an ``int`` or ``array_like[ints]`` is passed, then it will be passed to `SeedSequence` to derive the initial `BitGenerator` state. One may also - pass in an implementor of the `ISeedSequence` interface like - `SeedSequence`. + pass in a`SeedSequence` instance Additionally, when passed a `BitGenerator`, it will be wrapped by `Generator`. If passed a `Generator`, it will be returned unaltered. Notes ----- - When `seed` is omitted or ``None``, a new `BitGenerator` and `Generator` will + When ``seed`` is omitted or ``None``, a new `BitGenerator` and `Generator` will be instantiated each time. This function does not manage a default global instance. """ diff --git a/numpy/random/mt19937.pyx b/numpy/random/_mt19937.pyx index 49c3622f5..e99652b73 100644 --- a/numpy/random/mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -3,9 +3,8 @@ import operator import numpy as np cimport numpy as np -from .common cimport * -from .bit_generator cimport BitGenerator, SeedSequence -from .entropy import random_entropy +from libc.stdint cimport uint32_t, uint64_t +from ._bit_generator cimport BitGenerator, SeedSequence __all__ = ['MT19937'] @@ -49,13 +48,12 @@ cdef class MT19937(BitGenerator): Parameters ---------- - seed : {None, int, array_like[ints], ISeedSequence}, optional + seed : {None, int, array_like[ints], SeedSequence}, optional A seed to initialize the `BitGenerator`. If None, then fresh, unpredictable entropy will be pulled from the OS. If an ``int`` or ``array_like[ints]`` is passed, then it will be passed to `SeedSequence` to derive the initial `BitGenerator` state. One may also - pass in an implementor of the `ISeedSequence` interface like - `SeedSequence`. + pass in a `SeedSequence` instance. Attributes ---------- @@ -156,7 +154,8 @@ cdef class MT19937(BitGenerator): Random seed initializing the pseudo-random number generator. Can be an integer in [0, 2**32-1], array of integers in [0, 2**32-1], a `SeedSequence, or ``None``. If `seed` - is ``None``, then sample entropy for a seed. + is ``None``, then fresh, unpredictable entropy will be pulled from + the OS. Raises ------ @@ -167,7 +166,8 @@ cdef class MT19937(BitGenerator): with self.lock: try: if seed is None: - val = random_entropy(RK_STATE_LEN) + seed = SeedSequence() + val = seed.generate_state(RK_STATE_LEN) # MSB is 1; assuring non-zero initial array self.rng_state.key[0] = 0x80000000UL for i in range(1, RK_STATE_LEN): diff --git a/numpy/random/pcg64.pyx b/numpy/random/_pcg64.pyx index 585520139..1a5d852a2 100644 --- a/numpy/random/pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -1,8 +1,9 @@ import numpy as np cimport numpy as np -from .common cimport * -from .bit_generator cimport BitGenerator +from libc.stdint cimport uint32_t, uint64_t +from ._common cimport uint64_to_double, wrap_int +from ._bit_generator cimport BitGenerator __all__ = ['PCG64'] @@ -43,13 +44,12 @@ cdef class PCG64(BitGenerator): Parameters ---------- - seed : {None, int, array_like[ints], ISeedSequence}, optional + seed : {None, int, array_like[ints], SeedSequence}, optional A seed to initialize the `BitGenerator`. If None, then fresh, unpredictable entropy will be pulled from the OS. If an ``int`` or ``array_like[ints]`` is passed, then it will be passed to `SeedSequence` to derive the initial `BitGenerator` state. One may also - pass in an implementor of the `ISeedSequence` interface like - `SeedSequence`. + pass in a `SeedSequence` instance. Notes ----- diff --git a/numpy/random/philox.pyx b/numpy/random/_philox.pyx index 8b7683017..9f136c32f 100644 --- a/numpy/random/philox.pyx +++ b/numpy/random/_philox.pyx @@ -6,9 +6,11 @@ except ImportError: from dummy_threading import Lock import numpy as np +cimport numpy as np -from .common cimport * -from .bit_generator cimport BitGenerator +from libc.stdint cimport uint32_t, uint64_t +from ._common cimport uint64_to_double, int_to_array, wrap_int +from ._bit_generator cimport BitGenerator __all__ = ['Philox'] @@ -62,21 +64,20 @@ cdef class Philox(BitGenerator): Parameters ---------- - seed : {None, int, array_like[ints], ISeedSequence}, optional + seed : {None, int, array_like[ints], SeedSequence}, optional A seed to initialize the `BitGenerator`. If None, then fresh, unpredictable entropy will be pulled from the OS. If an ``int`` or ``array_like[ints]`` is passed, then it will be passed to `SeedSequence` to derive the initial `BitGenerator` state. One may also - pass in an implementor of the `ISeedSequence` interface like - `SeedSequence`. + pass in a `SeedSequence` instance. counter : {None, int, array_like}, optional Counter to use in the Philox state. Can be either a Python int (long in 2.x) in [0, 2**256) or a 4-element uint64 array. If not provided, the RNG is initialized at 0. key : {None, int, array_like}, optional - Key to use in the Philox state. Unlike seed, the value in key is + Key to use in the Philox state. Unlike ``seed``, the value in key is directly set. Can be either a Python int in [0, 2**128) or a 2-element - uint64 array. `key` and `seed` cannot both be used. + uint64 array. `key` and ``seed`` cannot both be used. Attributes ---------- @@ -108,10 +109,10 @@ cdef class Philox(BitGenerator): randoms produced. The second is a key which determined the sequence produced. Using different keys produces independent sequences. - The input seed is processed by `SeedSequence` to generate the key. The + The input ``seed`` is processed by `SeedSequence` to generate the key. The counter is set to 0. - Alternately, one can omit the seed parameter and set the ``key`` and + Alternately, one can omit the ``seed`` parameter and set the ``key`` and ``counter`` directly. **Parallel Features** @@ -146,7 +147,7 @@ cdef class Philox(BitGenerator): **Compatibility Guarantee** - ``Philox`` makes a guarantee that a fixed seed will always produce + ``Philox`` makes a guarantee that a fixed ``seed`` will always produce the same random integer stream. Examples diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py index 3b58f21e8..29ff69644 100644 --- a/numpy/random/_pickle.py +++ b/numpy/random/_pickle.py @@ -1,10 +1,10 @@ from .mtrand import RandomState -from .philox import Philox -from .pcg64 import PCG64 -from .sfc64 import SFC64 +from ._philox import Philox +from ._pcg64 import PCG64 +from ._sfc64 import SFC64 -from .generator import Generator -from .mt19937 import MT19937 +from ._generator import Generator +from ._mt19937 import MT19937 BitGenerators = {'MT19937': MT19937, 'PCG64': PCG64, diff --git a/numpy/random/sfc64.pyx b/numpy/random/_sfc64.pyx index a881096e9..1633669d5 100644 --- a/numpy/random/sfc64.pyx +++ b/numpy/random/_sfc64.pyx @@ -1,8 +1,9 @@ import numpy as np cimport numpy as np -from .common cimport * -from .bit_generator cimport BitGenerator +from libc.stdint cimport uint32_t, uint64_t +from ._common cimport uint64_to_double +from ._bit_generator cimport BitGenerator __all__ = ['SFC64'] @@ -38,13 +39,12 @@ cdef class SFC64(BitGenerator): Parameters ---------- - seed : {None, int, array_like[ints], ISeedSequence}, optional + seed : {None, int, array_like[ints], SeedSequence}, optional A seed to initialize the `BitGenerator`. If None, then fresh, unpredictable entropy will be pulled from the OS. If an ``int`` or ``array_like[ints]`` is passed, then it will be passed to `SeedSequence` to derive the initial `BitGenerator` state. One may also - pass in an implementor of the `ISeedSequence` interface like - `SeedSequence`. + pass in a `SeedSequence` instance. Notes ----- diff --git a/numpy/random/distributions.pxd b/numpy/random/distributions.pxd deleted file mode 100644 index 75edaee9d..000000000 --- a/numpy/random/distributions.pxd +++ /dev/null @@ -1,140 +0,0 @@ -#cython: language_level=3 - -from .common cimport (uint8_t, uint16_t, uint32_t, uint64_t, - int32_t, int64_t, bitgen_t) -import numpy as np -cimport numpy as np - -cdef extern from "src/distributions/distributions.h": - - struct s_binomial_t: - int has_binomial - double psave - int64_t nsave - double r - double q - double fm - int64_t m - double p1 - double xm - double xl - double xr - double c - double laml - double lamr - double p2 - double p3 - double p4 - - ctypedef s_binomial_t binomial_t - - double random_double(bitgen_t *bitgen_state) nogil - void random_double_fill(bitgen_t* bitgen_state, np.npy_intp cnt, double *out) nogil - double random_standard_exponential(bitgen_t *bitgen_state) nogil - void random_standard_exponential_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil - double random_standard_exponential_zig(bitgen_t *bitgen_state) nogil - void random_standard_exponential_zig_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil - double random_gauss_zig(bitgen_t* bitgen_state) nogil - void random_gauss_zig_fill(bitgen_t *bitgen_state, np.npy_intp count, double *out) nogil - double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape) nogil - - float random_float(bitgen_t *bitgen_state) nogil - float random_standard_exponential_f(bitgen_t *bitgen_state) nogil - float random_standard_exponential_zig_f(bitgen_t *bitgen_state) nogil - float random_gauss_zig_f(bitgen_t* bitgen_state) nogil - float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil - float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape) nogil - - int64_t random_positive_int64(bitgen_t *bitgen_state) nogil - int32_t random_positive_int32(bitgen_t *bitgen_state) nogil - int64_t random_positive_int(bitgen_t *bitgen_state) nogil - uint64_t random_uint(bitgen_t *bitgen_state) nogil - - double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale) nogil - - double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil - float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale) nogil - - double random_exponential(bitgen_t *bitgen_state, double scale) nogil - double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil - double random_beta(bitgen_t *bitgen_state, double a, double b) nogil - double random_chisquare(bitgen_t *bitgen_state, double df) nogil - double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil - double random_standard_cauchy(bitgen_t *bitgen_state) nogil - double random_pareto(bitgen_t *bitgen_state, double a) nogil - double random_weibull(bitgen_t *bitgen_state, double a) nogil - double random_power(bitgen_t *bitgen_state, double a) nogil - double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil - double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil - double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil - double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil - double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil - double random_standard_t(bitgen_t *bitgen_state, double df) nogil - double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, - double nonc) nogil - double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, - double dfden, double nonc) nogil - double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil - double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil - double random_triangular(bitgen_t *bitgen_state, double left, double mode, - double right) nogil - - int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil - int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil - int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil - int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil - int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil - int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil - int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil - int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil - int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, - int64_t sample) nogil - - uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil - - # Generate random uint64 numbers in closed interval [off, off + rng]. - uint64_t random_bounded_uint64(bitgen_t *bitgen_state, - uint64_t off, uint64_t rng, - uint64_t mask, bint use_masked) nogil - - # Generate random uint32 numbers in closed interval [off, off + rng]. - uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state, - uint32_t off, uint32_t rng, - uint32_t mask, bint use_masked, - int *bcnt, uint32_t *buf) nogil - uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state, - uint16_t off, uint16_t rng, - uint16_t mask, bint use_masked, - int *bcnt, uint32_t *buf) nogil - uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, - uint8_t off, uint8_t rng, - uint8_t mask, bint use_masked, - int *bcnt, uint32_t *buf) nogil - np.npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, - np.npy_bool off, np.npy_bool rng, - np.npy_bool mask, bint use_masked, - int *bcnt, uint32_t *buf) nogil - - void random_bounded_uint64_fill(bitgen_t *bitgen_state, - uint64_t off, uint64_t rng, np.npy_intp cnt, - bint use_masked, - uint64_t *out) nogil - void random_bounded_uint32_fill(bitgen_t *bitgen_state, - uint32_t off, uint32_t rng, np.npy_intp cnt, - bint use_masked, - uint32_t *out) nogil - void random_bounded_uint16_fill(bitgen_t *bitgen_state, - uint16_t off, uint16_t rng, np.npy_intp cnt, - bint use_masked, - uint16_t *out) nogil - void random_bounded_uint8_fill(bitgen_t *bitgen_state, - uint8_t off, uint8_t rng, np.npy_intp cnt, - bint use_masked, - uint8_t *out) nogil - void random_bounded_bool_fill(bitgen_t *bitgen_state, - np.npy_bool off, np.npy_bool rng, np.npy_intp cnt, - bint use_masked, - np.npy_bool *out) nogil - - void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix, - double *pix, np.npy_intp d, binomial_t *binomial) nogil diff --git a/numpy/random/entropy.pyx b/numpy/random/entropy.pyx deleted file mode 100644 index 95bf7c177..000000000 --- a/numpy/random/entropy.pyx +++ /dev/null @@ -1,155 +0,0 @@ -cimport numpy as np -import numpy as np - -from libc.stdint cimport uint32_t, uint64_t - -__all__ = ['random_entropy', 'seed_by_array'] - -np.import_array() - -cdef extern from "src/splitmix64/splitmix64.h": - cdef uint64_t splitmix64_next(uint64_t *state) nogil - -cdef extern from "src/entropy/entropy.h": - cdef bint entropy_getbytes(void* dest, size_t size) - cdef bint entropy_fallback_getbytes(void *dest, size_t size) - -cdef Py_ssize_t compute_numel(size): - cdef Py_ssize_t i, n = 1 - if isinstance(size, tuple): - for i in range(len(size)): - n *= size[i] - else: - n = size - return n - - -def seed_by_array(object seed, Py_ssize_t n): - """ - Transforms a seed array into an initial state - - Parameters - ---------- - seed: ndarray, 1d, uint64 - Array to use. If seed is a scalar, promote to array. - n : int - Number of 64-bit unsigned integers required - - Notes - ----- - Uses splitmix64 to perform the transformation - """ - cdef uint64_t seed_copy = 0 - cdef uint64_t[::1] seed_array - cdef uint64_t[::1] initial_state - cdef Py_ssize_t seed_size, iter_bound - cdef int i, loc = 0 - - if hasattr(seed, 'squeeze'): - seed = seed.squeeze() - arr = np.asarray(seed) - if arr.shape == (): - err_msg = 'Scalar seeds must be integers between 0 and 2**64 - 1' - if not np.isreal(arr): - raise TypeError(err_msg) - int_seed = int(seed) - if int_seed != seed: - raise TypeError(err_msg) - if int_seed < 0 or int_seed > 2**64 - 1: - raise ValueError(err_msg) - seed_array = np.array([int_seed], dtype=np.uint64) - elif issubclass(arr.dtype.type, np.inexact): - raise TypeError('seed array must be integers') - else: - err_msg = "Seed values must be integers between 0 and 2**64 - 1" - obj = np.asarray(seed).astype(np.object) - if obj.ndim != 1: - raise ValueError('Array-valued seeds must be 1-dimensional') - if not np.isreal(obj).all(): - raise TypeError(err_msg) - if ((obj > int(2**64 - 1)) | (obj < 0)).any(): - raise ValueError(err_msg) - try: - obj_int = obj.astype(np.uint64, casting='unsafe') - except ValueError: - raise ValueError(err_msg) - if not (obj == obj_int).all(): - raise TypeError(err_msg) - seed_array = obj_int - - seed_size = seed_array.shape[0] - iter_bound = n if n > seed_size else seed_size - - initial_state = <np.ndarray>np.empty(n, dtype=np.uint64) - for i in range(iter_bound): - if i < seed_size: - seed_copy ^= seed_array[i] - initial_state[loc] = splitmix64_next(&seed_copy) - loc += 1 - if loc == n: - loc = 0 - - return np.array(initial_state) - - -def random_entropy(size=None, source='system'): - """ - random_entropy(size=None, source='system') - - Read entropy from the system cryptographic provider - - Parameters - ---------- - size : int or tuple of ints, optional - Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. - source : str {'system', 'fallback'} - Source of entropy. 'system' uses system cryptographic pool. - 'fallback' uses a hash of the time and process id. - - Returns - ------- - entropy : scalar or array - Entropy bits in 32-bit unsigned integers. A scalar is returned if size - is `None`. - - Notes - ----- - On Unix-like machines, reads from ``/dev/urandom``. On Windows machines - reads from the RSA algorithm provided by the cryptographic service - provider. - - This function reads from the system entropy pool and so samples are - not reproducible. In particular, it does *NOT* make use of a - BitGenerator, and so ``seed`` and setting ``state`` have no - effect. - - Raises RuntimeError if the command fails. - """ - cdef bint success = True - cdef Py_ssize_t n = 0 - cdef uint32_t random = 0 - cdef uint32_t [:] randoms - - if source not in ('system', 'fallback'): - raise ValueError('Unknown value in source.') - - if size is None: - if source == 'system': - success = entropy_getbytes(<void *>&random, 4) - else: - success = entropy_fallback_getbytes(<void *>&random, 4) - else: - n = compute_numel(size) - randoms = np.zeros(n, dtype=np.uint32) - if source == 'system': - success = entropy_getbytes(<void *>(&randoms[0]), 4 * n) - else: - success = entropy_fallback_getbytes(<void *>(&randoms[0]), 4 * n) - if not success: - raise RuntimeError('Unable to read from system cryptographic provider') - - if n == 0: - return random - return np.asarray(randoms).reshape(size) diff --git a/numpy/random/src/aligned_malloc/aligned_malloc.h b/numpy/random/include/aligned_malloc.h index ea24f6d23..ea24f6d23 100644 --- a/numpy/random/src/aligned_malloc/aligned_malloc.h +++ b/numpy/random/include/aligned_malloc.h diff --git a/numpy/core/include/numpy/random/bitgen.h b/numpy/random/include/bitgen.h index 0adaaf2ee..83c2858dd 100644 --- a/numpy/core/include/numpy/random/bitgen.h +++ b/numpy/random/include/bitgen.h @@ -6,7 +6,7 @@ #include <stdbool.h> #include <stdint.h> -/* Must match the declaration in numpy/random/common.pxd */ +/* Must match the declaration in numpy/random/<any>.pxd */ typedef struct bitgen { void *state; diff --git a/numpy/random/src/distributions/distributions.h b/numpy/random/include/distributions.h index c8cdfd20f..c02ea605e 100644 --- a/numpy/random/src/distributions/distributions.h +++ b/numpy/random/include/distributions.h @@ -1,15 +1,14 @@ #ifndef _RANDOMDGEN__DISTRIBUTIONS_H_ #define _RANDOMDGEN__DISTRIBUTIONS_H_ -#pragma once +#include "Python.h" +#include "numpy/npy_common.h" #include <stddef.h> #include <stdbool.h> #include <stdint.h> -#include "Python.h" -#include "numpy/npy_common.h" #include "numpy/npy_math.h" -#include "numpy/random/bitgen.h" +#include "include/bitgen.h" /* * RAND_INT_TYPE is used to share integer generators with RandomState which @@ -43,11 +42,11 @@ typedef struct s_binomial_t { int has_binomial; /* !=0: following parameters initialized for binomial */ double psave; - int64_t nsave; + RAND_INT_TYPE nsave; double r; double q; double fm; - int64_t m; + RAND_INT_TYPE m; double p1; double xm; double xl; @@ -60,28 +59,10 @@ typedef struct s_binomial_t { double p4; } binomial_t; -/* Inline generators for internal use */ -static NPY_INLINE uint32_t next_uint32(bitgen_t *bitgen_state) { - return bitgen_state->next_uint32(bitgen_state->state); -} - -static NPY_INLINE uint64_t next_uint64(bitgen_t *bitgen_state) { - return bitgen_state->next_uint64(bitgen_state->state); -} - -static NPY_INLINE float next_float(bitgen_t *bitgen_state) { - return (next_uint32(bitgen_state) >> 9) * (1.0f / 8388608.0f); -} - -static NPY_INLINE double next_double(bitgen_t *bitgen_state) { - return bitgen_state->next_double(bitgen_state->state); -} - -DECLDIR double loggam(double x); - -DECLDIR float random_float(bitgen_t *bitgen_state); -DECLDIR double random_double(bitgen_t *bitgen_state); -DECLDIR void random_double_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out); +DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state); +DECLDIR double random_standard_uniform(bitgen_t *bitgen_state); +DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *); DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state); DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state); @@ -89,37 +70,25 @@ DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state); DECLDIR uint64_t random_uint(bitgen_t *bitgen_state); DECLDIR double random_standard_exponential(bitgen_t *bitgen_state); -DECLDIR void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, - double *out); DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state); DECLDIR double random_standard_exponential_zig(bitgen_t *bitgen_state); -DECLDIR void random_standard_exponential_zig_fill(bitgen_t *bitgen_state, - npy_intp cnt, double *out); DECLDIR float random_standard_exponential_zig_f(bitgen_t *bitgen_state); - -/* -DECLDIR double random_gauss(bitgen_t *bitgen_state); -DECLDIR float random_gauss_f(bitgen_t *bitgen_state); -*/ -DECLDIR double random_gauss_zig(bitgen_t *bitgen_state); -DECLDIR float random_gauss_zig_f(bitgen_t *bitgen_state); -DECLDIR void random_gauss_zig_fill(bitgen_t *bitgen_state, npy_intp cnt, - double *out); - -/* +DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *); +DECLDIR void random_standard_exponential_zig_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_zig_fill_f(bitgen_t *, npy_intp, float *); + +DECLDIR double random_standard_normal(bitgen_t *bitgen_state); +DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state); +DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *); DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape); DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape); -*/ -DECLDIR double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape); -DECLDIR float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape); -/* DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale); -*/ -DECLDIR double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale); DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale); -DECLDIR float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale); +DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale); DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale); DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range); @@ -147,17 +116,16 @@ DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mod DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam); DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n, - double p); -DECLDIR RAND_INT_TYPE random_binomial(bitgen_t *bitgen_state, double p, RAND_INT_TYPE n, - binomial_t *binomial); + double p); + +DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p, + int64_t n, binomial_t *binomial); + DECLDIR RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p); -DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p); -DECLDIR RAND_INT_TYPE random_geometric_inversion(bitgen_t *bitgen_state, double p); DECLDIR RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p); DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a); DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample); - DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max); /* Generate random uint64 numbers in closed interval [off, off + rng]. */ @@ -202,4 +170,33 @@ DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off, DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, double *pix, npy_intp d, binomial_t *binomial); +/* multivariate hypergeometric, "count" method */ +DECLDIR int random_mvhg_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* multivariate hypergeometric, "marginals" method */ +DECLDIR void random_mvhg_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* Common to legacy-distributions.c and distributions.c but not exported */ + +RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +double random_loggam(double x); +static NPY_INLINE double next_double(bitgen_t *bitgen_state) { + return bitgen_state->next_double(bitgen_state->state); +} + #endif diff --git a/numpy/random/src/legacy/legacy-distributions.h b/numpy/random/include/legacy-distributions.h index 005c4e5d2..6a0fc7dc4 100644 --- a/numpy/random/src/legacy/legacy-distributions.h +++ b/numpy/random/include/legacy-distributions.h @@ -2,7 +2,7 @@ #define _RANDOMDGEN__DISTRIBUTIONS_LEGACY_H_ -#include "../distributions/distributions.h" +#include "distributions.h" typedef struct aug_bitgen { bitgen_t *bit_generator; @@ -16,26 +16,23 @@ extern double legacy_pareto(aug_bitgen_t *aug_state, double a); extern double legacy_weibull(aug_bitgen_t *aug_state, double a); extern double legacy_power(aug_bitgen_t *aug_state, double a); extern double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale); -extern double legacy_pareto(aug_bitgen_t *aug_state, double a); -extern double legacy_weibull(aug_bitgen_t *aug_state, double a); extern double legacy_chisquare(aug_bitgen_t *aug_state, double df); extern double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df, double nonc); - extern double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum, double dfden, double nonc); extern double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale); extern double legacy_lognormal(aug_bitgen_t *aug_state, double mean, double sigma); extern double legacy_standard_t(aug_bitgen_t *aug_state, double df); -extern int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, - double p); extern double legacy_standard_cauchy(aug_bitgen_t *state); extern double legacy_beta(aug_bitgen_t *aug_state, double a, double b); extern double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden); extern double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale); extern double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape); extern double legacy_exponential(aug_bitgen_t *aug_state, double scale); +extern int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p, + int64_t n, binomial_t *binomial); extern int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, double p); extern int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, diff --git a/numpy/random/info.py b/numpy/random/info.py deleted file mode 100644 index b9fd7f26a..000000000 --- a/numpy/random/info.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from .. import __doc__ - -depends = ['core'] diff --git a/numpy/random/legacy_distributions.pxd b/numpy/random/legacy_distributions.pxd deleted file mode 100644 index 7ba058054..000000000 --- a/numpy/random/legacy_distributions.pxd +++ /dev/null @@ -1,48 +0,0 @@ -#cython: language_level=3 - -from libc.stdint cimport int64_t - -import numpy as np -cimport numpy as np - -from .distributions cimport bitgen_t, binomial_t - -cdef extern from "legacy-distributions.h": - - struct aug_bitgen: - bitgen_t *bit_generator - int has_gauss - double gauss - - ctypedef aug_bitgen aug_bitgen_t - - double legacy_gauss(aug_bitgen_t *aug_state) nogil - double legacy_pareto(aug_bitgen_t *aug_state, double a) nogil - double legacy_weibull(aug_bitgen_t *aug_state, double a) nogil - double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape) nogil - double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale) nogil - double legacy_standard_t(aug_bitgen_t *aug_state, double df) nogil - - double legacy_standard_exponential(aug_bitgen_t *aug_state) nogil - double legacy_power(aug_bitgen_t *aug_state, double a) nogil - double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale) nogil - double legacy_power(aug_bitgen_t *aug_state, double a) nogil - double legacy_chisquare(aug_bitgen_t *aug_state, double df) nogil - double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df, - double nonc) nogil - double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum, double dfden, - double nonc) nogil - double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale) nogil - double legacy_lognormal(aug_bitgen_t *aug_state, double mean, double sigma) nogil - int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, double p) nogil - int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample) nogil - int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) nogil - int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) nogil - int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) nogil - int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) nogil - void legacy_random_multinomial(bitgen_t *bitgen_state, long n, long *mnix, double *pix, np.npy_intp d, binomial_t *binomial) nogil - double legacy_standard_cauchy(aug_bitgen_t *state) nogil - double legacy_beta(aug_bitgen_t *aug_state, double a, double b) nogil - double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden) nogil - double legacy_exponential(aug_bitgen_t *aug_state, double scale) nogil - double legacy_power(aug_bitgen_t *state, double a) nogil diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index eb263cd2d..683a771cc 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -5,19 +5,100 @@ import warnings import numpy as np -from .bounded_integers import _integers_types -from .mt19937 import MT19937 as _MT19937 from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer from cpython cimport (Py_INCREF, PyFloat_AsDouble) -from libc cimport string - cimport cython cimport numpy as np -from .bounded_integers cimport * -from .common cimport * -from .distributions cimport * -from .legacy_distributions cimport * +from libc cimport string +from libc.stdint cimport int64_t, uint64_t +from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64, + _rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16, + _rand_uint8,) +from ._bounded_integers import _integers_types +from ._mt19937 import MT19937 as _MT19937 +from ._bit_generator cimport bitgen_t +from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE, + CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1, CONS_GTE_1, + CONS_GT_1, LEGACY_CONS_POISSON, + double_fill, cont, kahan_sum, cont_broadcast_3, + check_array_constraint, check_constraint, disc, discrete_broadcast_iii, + ) + +cdef extern from "include/distributions.h": + struct s_binomial_t: + int has_binomial + double psave + int64_t nsave + double r + double q + double fm + int64_t m + double p1 + double xm + double xl + double xr + double c + double laml + double lamr + double p2 + double p3 + double p4 + + ctypedef s_binomial_t binomial_t + + void random_standard_uniform_fill(bitgen_t* bitgen_state, np.npy_intp cnt, double *out) nogil + int64_t random_positive_int(bitgen_t *bitgen_state) nogil + double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil + double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil + double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil + double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right) nogil + uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil + +cdef extern from "include/legacy-distributions.h": + struct aug_bitgen: + bitgen_t *bit_generator + int has_gauss + double gauss + + ctypedef aug_bitgen aug_bitgen_t + + double legacy_gauss(aug_bitgen_t *aug_state) nogil + double legacy_pareto(aug_bitgen_t *aug_state, double a) nogil + double legacy_weibull(aug_bitgen_t *aug_state, double a) nogil + double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape) nogil + double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale) nogil + double legacy_standard_t(aug_bitgen_t *aug_state, double df) nogil + + double legacy_standard_exponential(aug_bitgen_t *aug_state) nogil + double legacy_power(aug_bitgen_t *aug_state, double a) nogil + double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale) nogil + double legacy_power(aug_bitgen_t *aug_state, double a) nogil + double legacy_chisquare(aug_bitgen_t *aug_state, double df) nogil + double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df, + double nonc) nogil + double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum, double dfden, + double nonc) nogil + double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale) nogil + double legacy_lognormal(aug_bitgen_t *aug_state, double mean, double sigma) nogil + int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p, + int64_t n, binomial_t *binomial) nogil + int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, double p) nogil + int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample) nogil + int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) nogil + int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) nogil + int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) nogil + int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) nogil + void legacy_random_multinomial(bitgen_t *bitgen_state, long n, long *mnix, double *pix, np.npy_intp d, binomial_t *binomial) nogil + double legacy_standard_cauchy(aug_bitgen_t *state) nogil + double legacy_beta(aug_bitgen_t *aug_state, double a, double b) nogil + double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden) nogil + double legacy_exponential(aug_bitgen_t *aug_state, double scale) nogil + double legacy_power(aug_bitgen_t *state, double a) nogil np.import_array() @@ -83,8 +164,8 @@ cdef class RandomState: See Also -------- Generator - mt19937.MT19937 - Bit_Generators + MT19937 + numpy.random.BitGenerator """ cdef public object _bit_generator @@ -329,7 +410,7 @@ cdef class RandomState: """ cdef double temp - return double_fill(&random_double_fill, &self._bitgen, size, self.lock, None) + return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, None) def random(self, size=None): """ @@ -567,7 +648,7 @@ cdef class RandomState: See Also -------- - random.random_integers : similar to `randint`, only for the closed + random_integers : similar to `randint`, only for the closed interval [`low`, `high`], and 1 is the lowest value if `high` is omitted. @@ -985,7 +1066,7 @@ cdef class RandomState: .. note:: This is a convenience function for users porting code from Matlab, - and wraps `numpy.random.random_sample`. That function takes a + and wraps `random_sample`. That function takes a tuple to specify the size of the output, which is consistent with other NumPy functions like `numpy.zeros` and `numpy.ones`. @@ -1029,7 +1110,7 @@ cdef class RandomState: .. note:: This is a convenience function for users porting code from Matlab, - and wraps `numpy.random.standard_normal`. That function takes a + and wraps `standard_normal`. That function takes a tuple to specify the size of the output, which is consistent with other NumPy functions like `numpy.zeros` and `numpy.ones`. @@ -1289,8 +1370,8 @@ cdef class RandomState: The function has its peak at the mean, and its "spread" increases with the standard deviation (the function reaches 0.607 times its maximum at :math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that - `numpy.random.normal` is more likely to return samples lying close to - the mean, rather than those far away. + normal is more likely to return samples lying close to the mean, rather + than those far away. References ---------- @@ -3086,7 +3167,9 @@ cdef class RandomState: for i in range(cnt): _dp = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0] _in = (<long*>np.PyArray_MultiIter_DATA(it, 2))[0] - (<long*>np.PyArray_MultiIter_DATA(it, 0))[0] = random_binomial(&self._bitgen, _dp, _in, &self._binomial) + (<long*>np.PyArray_MultiIter_DATA(it, 0))[0] = \ + legacy_random_binomial(&self._bitgen, _dp, _in, + &self._binomial) np.PyArray_MultiIter_NEXT(it) @@ -3099,7 +3182,8 @@ cdef class RandomState: if size is None: with self.lock: - return random_binomial(&self._bitgen, _dp, _in, &self._binomial) + return <long>legacy_random_binomial(&self._bitgen, _dp, _in, + &self._binomial) randoms = <np.ndarray>np.empty(size, int) cnt = np.PyArray_SIZE(randoms) @@ -3107,8 +3191,8 @@ cdef class RandomState: with self.lock, nogil: for i in range(cnt): - randoms_data[i] = random_binomial(&self._bitgen, _dp, _in, - &self._binomial) + randoms_data[i] = legacy_random_binomial(&self._bitgen, _dp, _in, + &self._binomial) return randoms @@ -3517,7 +3601,7 @@ cdef class RandomState: # Convert to int64, if necessary, to use int64 infrastructure ongood = ongood.astype(np.int64) onbad = onbad.astype(np.int64) - onbad = onbad.astype(np.int64) + onsample = onsample.astype(np.int64) out = discrete_broadcast_iii(&legacy_random_hypergeometric,&self._bitgen, size, self.lock, ongood, 'ngood', CONS_NON_NEGATIVE, onbad, 'nbad', CONS_NON_NEGATIVE, @@ -4070,7 +4154,7 @@ cdef class RandomState: # Fast, statically typed path: shuffle the underlying buffer. # Only for non-empty, 1d objects of class ndarray (subclasses such # as MaskedArrays may not support this approach). - x_ptr = <char*><size_t>x.ctypes.data + x_ptr = <char*><size_t>np.PyArray_DATA(x) stride = x.strides[0] itemsize = x.dtype.itemsize # As the array x could contain python objects we use a buffer @@ -4078,7 +4162,7 @@ cdef class RandomState: # within the buffer and erroneously decrementing it's refcount # when the function exits. buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit - buf_ptr = <char*><size_t>buf.ctypes.data + buf_ptr = <char*><size_t>np.PyArray_DATA(buf) with self.lock: # We trick gcc into providing a specialized implementation for # the most common case, yielding a ~33% performance improvement. diff --git a/numpy/random/setup.py b/numpy/random/setup.py index a820d326e..ca01250f4 100644 --- a/numpy/random/setup.py +++ b/numpy/random/setup.py @@ -34,8 +34,6 @@ def configuration(parent_package='', top_path=None): defs.append(('NPY_NO_DEPRECATED_API', 0)) config.add_data_dir('tests') - config.add_data_files('common.pxd') - config.add_data_files('bit_generator.pxd') EXTRA_LINK_ARGS = [] # Math lib @@ -61,46 +59,34 @@ def configuration(parent_package='', top_path=None): # One can force emulated 128-bit arithmetic if one wants. #PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')] - config.add_extension('entropy', - sources=['entropy.c', 'src/entropy/entropy.c'] + - [generate_libraries], - libraries=EXTRA_LIBRARIES, - extra_compile_args=EXTRA_COMPILE_ARGS, - extra_link_args=EXTRA_LINK_ARGS, - depends=[join('src', 'splitmix64', 'splitmix.h'), - join('src', 'entropy', 'entropy.h'), - 'entropy.pyx', - ], - define_macros=defs, - ) for gen in ['mt19937']: # gen.pyx, src/gen/gen.c, src/gen/gen-jump.c - config.add_extension(gen, - sources=['{0}.c'.format(gen), + config.add_extension('_{0}'.format(gen), + sources=['_{0}.c'.format(gen), 'src/{0}/{0}.c'.format(gen), 'src/{0}/{0}-jump.c'.format(gen)], include_dirs=['.', 'src', join('src', gen)], libraries=EXTRA_LIBRARIES, extra_compile_args=EXTRA_COMPILE_ARGS, extra_link_args=EXTRA_LINK_ARGS, - depends=['%s.pyx' % gen], + depends=['_%s.pyx' % gen], define_macros=defs, ) for gen in ['philox', 'pcg64', 'sfc64']: # gen.pyx, src/gen/gen.c _defs = defs + PCG64_DEFS if gen == 'pcg64' else defs - config.add_extension(gen, - sources=['{0}.c'.format(gen), + config.add_extension('_{0}'.format(gen), + sources=['_{0}.c'.format(gen), 'src/{0}/{0}.c'.format(gen)], include_dirs=['.', 'src', join('src', gen)], libraries=EXTRA_LIBRARIES, extra_compile_args=EXTRA_COMPILE_ARGS, extra_link_args=EXTRA_LINK_ARGS, - depends=['%s.pyx' % gen, 'bit_generator.pyx', + depends=['_%s.pyx' % gen, 'bit_generator.pyx', 'bit_generator.pxd'], define_macros=_defs, ) - for gen in ['common', 'bit_generator']: + for gen in ['_common', '_bit_generator']: # gen.pyx config.add_extension(gen, sources=['{0}.c'.format(gen)], @@ -114,9 +100,11 @@ def configuration(parent_package='', top_path=None): other_srcs = [ 'src/distributions/logfactorial.c', 'src/distributions/distributions.c', + 'src/distributions/random_mvhg_count.c', + 'src/distributions/random_mvhg_marginals.c', 'src/distributions/random_hypergeometric.c', ] - for gen in ['generator', 'bounded_integers']: + for gen in ['_generator', '_bounded_integers']: # gen.pyx, src/distributions/distributions.c config.add_extension(gen, sources=['{0}.c'.format(gen)] + other_srcs, @@ -128,7 +116,6 @@ def configuration(parent_package='', top_path=None): define_macros=defs, ) config.add_extension('mtrand', - # mtrand does not depend on random_hypergeometric.c. sources=['mtrand.c', 'src/legacy/legacy-distributions.c', 'src/distributions/logfactorial.c', diff --git a/numpy/random/src/aligned_malloc/aligned_malloc.c b/numpy/random/src/aligned_malloc/aligned_malloc.c deleted file mode 100644 index 6e8192cfb..000000000 --- a/numpy/random/src/aligned_malloc/aligned_malloc.c +++ /dev/null @@ -1,9 +0,0 @@ -#include "aligned_malloc.h" - -static NPY_INLINE void *PyArray_realloc_aligned(void *p, size_t n); - -static NPY_INLINE void *PyArray_malloc_aligned(size_t n); - -static NPY_INLINE void *PyArray_calloc_aligned(size_t n, size_t s); - -static NPY_INLINE void PyArray_free_aligned(void *p);
\ No newline at end of file diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 65257ecbf..b382ead0b 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -1,4 +1,4 @@ -#include "distributions.h" +#include "include/distributions.h" #include "ziggurat_constants.h" #include "logfactorial.h" @@ -6,90 +6,52 @@ #include <intrin.h> #endif -/* Random generators for external use */ -float random_float(bitgen_t *bitgen_state) { return next_float(bitgen_state); } - -double random_double(bitgen_t *bitgen_state) { - return next_double(bitgen_state); +/* Inline generators for internal use */ +static NPY_INLINE uint32_t next_uint32(bitgen_t *bitgen_state) { + return bitgen_state->next_uint32(bitgen_state->state); } - -static NPY_INLINE double next_standard_exponential(bitgen_t *bitgen_state) { - return -log(1.0 - next_double(bitgen_state)); +static NPY_INLINE uint64_t next_uint64(bitgen_t *bitgen_state) { + return bitgen_state->next_uint64(bitgen_state->state); } -double random_standard_exponential(bitgen_t *bitgen_state) { - return next_standard_exponential(bitgen_state); +static NPY_INLINE float next_float(bitgen_t *bitgen_state) { + return (next_uint32(bitgen_state) >> 9) * (1.0f / 8388608.0f); } -void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, - double *out) { - npy_intp i; - for (i = 0; i < cnt; i++) { - out[i] = next_standard_exponential(bitgen_state); - } +/* Random generators for external use */ +float random_standard_uniform_f(bitgen_t *bitgen_state) { + return next_float(bitgen_state); } -float random_standard_exponential_f(bitgen_t *bitgen_state) { - return -logf(1.0f - next_float(bitgen_state)); +double random_standard_uniform(bitgen_t *bitgen_state) { + return next_double(bitgen_state); } -void random_double_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) { +void random_standard_uniform_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) { npy_intp i; for (i = 0; i < cnt; i++) { out[i] = next_double(bitgen_state); } } -#if 0 -double random_gauss(bitgen_t *bitgen_state) { - if (bitgen_state->has_gauss) { - const double temp = bitgen_state->gauss; - bitgen_state->has_gauss = false; - bitgen_state->gauss = 0.0; - return temp; - } else { - double f, x1, x2, r2; - - do { - x1 = 2.0 * next_double(bitgen_state) - 1.0; - x2 = 2.0 * next_double(bitgen_state) - 1.0; - r2 = x1 * x1 + x2 * x2; - } while (r2 >= 1.0 || r2 == 0.0); - /* Polar method, a more efficient version of the Box-Muller approach. */ - f = sqrt(-2.0 * log(r2) / r2); - /* Keep for next call */ - bitgen_state->gauss = f * x1; - bitgen_state->has_gauss = true; - return f * x2; +void random_standard_uniform_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) { + npy_intp i; + for (i = 0; i < cnt; i++) { + out[i] = next_float(bitgen_state); } } -float random_gauss_f(bitgen_t *bitgen_state) { - if (bitgen_state->has_gauss_f) { - const float temp = bitgen_state->gauss_f; - bitgen_state->has_gauss_f = false; - bitgen_state->gauss_f = 0.0f; - return temp; - } else { - float f, x1, x2, r2; - - do { - x1 = 2.0f * next_float(bitgen_state) - 1.0f; - x2 = 2.0f * next_float(bitgen_state) - 1.0f; - r2 = x1 * x1 + x2 * x2; - } while (r2 >= 1.0 || r2 == 0.0); +double random_standard_exponential(bitgen_t *bitgen_state) { + return -log(1.0 - next_double(bitgen_state)); +} - /* Polar method, a more efficient version of the Box-Muller approach. */ - f = sqrtf(-2.0f * logf(r2) / r2); - /* Keep for next call */ - bitgen_state->gauss_f = f * x1; - bitgen_state->has_gauss_f = true; - return f * x2; +void random_standard_exponential_fill(bitgen_t * bitgen_state, npy_intp cnt, double * out) +{ + npy_intp i; + for (i = 0; i < cnt; i++) { + out[i] = random_standard_exponential(bitgen_state); } } -#endif - -static NPY_INLINE double standard_exponential_zig(bitgen_t *bitgen_state); static double standard_exponential_zig_unlikely(bitgen_t *bitgen_state, uint8_t idx, double x) { @@ -101,11 +63,11 @@ static double standard_exponential_zig_unlikely(bitgen_t *bitgen_state, exp(-x)) { return x; } else { - return standard_exponential_zig(bitgen_state); + return random_standard_exponential_zig(bitgen_state); } } -static NPY_INLINE double standard_exponential_zig(bitgen_t *bitgen_state) { +double random_standard_exponential_zig(bitgen_t *bitgen_state) { uint64_t ri; uint8_t idx; double x; @@ -120,20 +82,26 @@ static NPY_INLINE double standard_exponential_zig(bitgen_t *bitgen_state) { return standard_exponential_zig_unlikely(bitgen_state, idx, x); } -double random_standard_exponential_zig(bitgen_t *bitgen_state) { - return standard_exponential_zig(bitgen_state); +void random_standard_exponential_zig_fill(bitgen_t * bitgen_state, npy_intp cnt, double * out) +{ + npy_intp i; + for (i = 0; i < cnt; i++) { + out[i] = random_standard_exponential_zig(bitgen_state); + } +} + +float random_standard_exponential_f(bitgen_t *bitgen_state) { + return -logf(1.0f - next_float(bitgen_state)); } -void random_standard_exponential_zig_fill(bitgen_t *bitgen_state, npy_intp cnt, - double *out) { +void random_standard_exponential_fill_f(bitgen_t * bitgen_state, npy_intp cnt, float * out) +{ npy_intp i; for (i = 0; i < cnt; i++) { - out[i] = standard_exponential_zig(bitgen_state); + out[i] = random_standard_exponential_f(bitgen_state); } } -static NPY_INLINE float standard_exponential_zig_f(bitgen_t *bitgen_state); - static float standard_exponential_zig_unlikely_f(bitgen_t *bitgen_state, uint8_t idx, float x) { if (idx == 0) { @@ -144,11 +112,11 @@ static float standard_exponential_zig_unlikely_f(bitgen_t *bitgen_state, expf(-x)) { return x; } else { - return standard_exponential_zig_f(bitgen_state); + return random_standard_exponential_zig_f(bitgen_state); } } -static NPY_INLINE float standard_exponential_zig_f(bitgen_t *bitgen_state) { +float random_standard_exponential_zig_f(bitgen_t *bitgen_state) { uint32_t ri; uint8_t idx; float x; @@ -163,11 +131,15 @@ static NPY_INLINE float standard_exponential_zig_f(bitgen_t *bitgen_state) { return standard_exponential_zig_unlikely_f(bitgen_state, idx, x); } -float random_standard_exponential_zig_f(bitgen_t *bitgen_state) { - return standard_exponential_zig_f(bitgen_state); +void random_standard_exponential_zig_fill_f(bitgen_t * bitgen_state, npy_intp cnt, float * out) +{ + npy_intp i; + for (i = 0; i < cnt; i++) { + out[i] = random_standard_exponential_zig_f(bitgen_state); + } } -static NPY_INLINE double next_gauss_zig(bitgen_t *bitgen_state) { +double random_standard_normal(bitgen_t *bitgen_state) { uint64_t r; int sign; uint64_t rabs; @@ -202,18 +174,14 @@ static NPY_INLINE double next_gauss_zig(bitgen_t *bitgen_state) { } } -double random_gauss_zig(bitgen_t *bitgen_state) { - return next_gauss_zig(bitgen_state); -} - -void random_gauss_zig_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) { +void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) { npy_intp i; for (i = 0; i < cnt; i++) { - out[i] = next_gauss_zig(bitgen_state); + out[i] = random_standard_normal(bitgen_state); } } -float random_gauss_zig_f(bitgen_t *bitgen_state) { +float random_standard_normal_f(bitgen_t *bitgen_state) { uint32_t r; int sign; uint32_t rabs; @@ -247,101 +215,14 @@ float random_gauss_zig_f(bitgen_t *bitgen_state) { } } -/* -static NPY_INLINE double standard_gamma(bitgen_t *bitgen_state, double shape) { - double b, c; - double U, V, X, Y; - - if (shape == 1.0) { - return random_standard_exponential(bitgen_state); - } else if (shape < 1.0) { - for (;;) { - U = next_double(bitgen_state); - V = random_standard_exponential(bitgen_state); - if (U <= 1.0 - shape) { - X = pow(U, 1. / shape); - if (X <= V) { - return X; - } - } else { - Y = -log((1 - U) / shape); - X = pow(1.0 - shape + shape * Y, 1. / shape); - if (X <= (V + Y)) { - return X; - } - } - } - } else { - b = shape - 1. / 3.; - c = 1. / sqrt(9 * b); - for (;;) { - do { - X = random_gauss(bitgen_state); - V = 1.0 + c * X; - } while (V <= 0.0); - - V = V * V * V; - U = next_double(bitgen_state); - if (U < 1.0 - 0.0331 * (X * X) * (X * X)) - return (b * V); - if (log(U) < 0.5 * X * X + b * (1. - V + log(V))) - return (b * V); - } - } -} - -static NPY_INLINE float standard_gamma_float(bitgen_t *bitgen_state, float -shape) { float b, c; float U, V, X, Y; - - if (shape == 1.0f) { - return random_standard_exponential_f(bitgen_state); - } else if (shape < 1.0f) { - for (;;) { - U = next_float(bitgen_state); - V = random_standard_exponential_f(bitgen_state); - if (U <= 1.0f - shape) { - X = powf(U, 1.0f / shape); - if (X <= V) { - return X; - } - } else { - Y = -logf((1.0f - U) / shape); - X = powf(1.0f - shape + shape * Y, 1.0f / shape); - if (X <= (V + Y)) { - return X; - } - } - } - } else { - b = shape - 1.0f / 3.0f; - c = 1.0f / sqrtf(9.0f * b); - for (;;) { - do { - X = random_gauss_f(bitgen_state); - V = 1.0f + c * X; - } while (V <= 0.0f); - - V = V * V * V; - U = next_float(bitgen_state); - if (U < 1.0f - 0.0331f * (X * X) * (X * X)) - return (b * V); - if (logf(U) < 0.5f * X * X + b * (1.0f - V + logf(V))) - return (b * V); - } +void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) { + npy_intp i; + for (i = 0; i < cnt; i++) { + out[i] = random_standard_normal_f(bitgen_state); } } - -double random_standard_gamma(bitgen_t *bitgen_state, double shape) { - return standard_gamma(bitgen_state, shape); -} - -float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) { - return standard_gamma_float(bitgen_state, shape); -} -*/ - -static NPY_INLINE double standard_gamma_zig(bitgen_t *bitgen_state, +double random_standard_gamma(bitgen_t *bitgen_state, double shape) { double b, c; double U, V, X, Y; @@ -372,7 +253,7 @@ static NPY_INLINE double standard_gamma_zig(bitgen_t *bitgen_state, c = 1. / sqrt(9 * b); for (;;) { do { - X = random_gauss_zig(bitgen_state); + X = random_standard_normal(bitgen_state); V = 1.0 + c * X; } while (V <= 0.0); @@ -387,7 +268,7 @@ static NPY_INLINE double standard_gamma_zig(bitgen_t *bitgen_state, } } -static NPY_INLINE float standard_gamma_zig_f(bitgen_t *bitgen_state, +float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) { float b, c; float U, V, X, Y; @@ -418,7 +299,7 @@ static NPY_INLINE float standard_gamma_zig_f(bitgen_t *bitgen_state, c = 1.0f / sqrtf(9.0f * b); for (;;) { do { - X = random_gauss_zig_f(bitgen_state); + X = random_standard_normal_f(bitgen_state); V = 1.0f + c * X; } while (V <= 0.0f); @@ -433,14 +314,6 @@ static NPY_INLINE float standard_gamma_zig_f(bitgen_t *bitgen_state, } } -double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape) { - return standard_gamma_zig(bitgen_state, shape); -} - -float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape) { - return standard_gamma_zig_f(bitgen_state, shape); -} - int64_t random_positive_int64(bitgen_t *bitgen_state) { return next_uint64(bitgen_state) >> 1; } @@ -470,10 +343,10 @@ uint64_t random_uint(bitgen_t *bitgen_state) { * algorithm comes from SPECFUN by Shanjie Zhang and Jianming Jin and their * book "Computation of Special Functions", 1996, John Wiley & Sons, Inc. * - * If loggam(k+1) is being used to compute log(k!) for an integer k, consider + * If random_loggam(k+1) is being used to compute log(k!) for an integer k, consider * using logfactorial(k) instead. */ -double loggam(double x) { +double random_loggam(double x) { double x0, x2, xp, gl, gl0; RAND_INT_TYPE k, n; @@ -513,12 +386,12 @@ double random_normal(bitgen_t *bitgen_state, double loc, double scale) { } */ -double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale) { - return loc + scale * random_gauss_zig(bitgen_state); +double random_normal(bitgen_t *bitgen_state, double loc, double scale) { + return loc + scale * random_standard_normal(bitgen_state); } double random_exponential(bitgen_t *bitgen_state, double scale) { - return scale * standard_exponential_zig(bitgen_state); + return scale * random_standard_exponential_zig(bitgen_state); } double random_uniform(bitgen_t *bitgen_state, double lower, double range) { @@ -526,11 +399,11 @@ double random_uniform(bitgen_t *bitgen_state, double lower, double range) { } double random_gamma(bitgen_t *bitgen_state, double shape, double scale) { - return scale * random_standard_gamma_zig(bitgen_state, shape); + return scale * random_standard_gamma(bitgen_state, shape); } -float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale) { - return scale * random_standard_gamma_zig_f(bitgen_state, shape); +float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) { + return scale * random_standard_gamma_f(bitgen_state, shape); } double random_beta(bitgen_t *bitgen_state, double a, double b) { @@ -562,14 +435,14 @@ double random_beta(bitgen_t *bitgen_state, double a, double b) { } } } else { - Ga = random_standard_gamma_zig(bitgen_state, a); - Gb = random_standard_gamma_zig(bitgen_state, b); + Ga = random_standard_gamma(bitgen_state, a); + Gb = random_standard_gamma(bitgen_state, b); return Ga / (Ga + Gb); } } double random_chisquare(bitgen_t *bitgen_state, double df) { - return 2.0 * random_standard_gamma_zig(bitgen_state, df / 2.0); + return 2.0 * random_standard_gamma(bitgen_state, df / 2.0); } double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) { @@ -578,22 +451,22 @@ double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) { } double random_standard_cauchy(bitgen_t *bitgen_state) { - return random_gauss_zig(bitgen_state) / random_gauss_zig(bitgen_state); + return random_standard_normal(bitgen_state) / random_standard_normal(bitgen_state); } double random_pareto(bitgen_t *bitgen_state, double a) { - return exp(standard_exponential_zig(bitgen_state) / a) - 1; + return exp(random_standard_exponential_zig(bitgen_state) / a) - 1; } double random_weibull(bitgen_t *bitgen_state, double a) { if (a == 0.0) { return 0.0; } - return pow(standard_exponential_zig(bitgen_state), 1. / a); + return pow(random_standard_exponential_zig(bitgen_state), 1. / a); } double random_power(bitgen_t *bitgen_state, double a) { - return pow(1 - exp(-standard_exponential_zig(bitgen_state)), 1. / a); + return pow(1 - exp(-random_standard_exponential_zig(bitgen_state)), 1. / a); } double random_laplace(bitgen_t *bitgen_state, double loc, double scale) { @@ -634,7 +507,7 @@ double random_logistic(bitgen_t *bitgen_state, double loc, double scale) { } double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) { - return exp(random_normal_zig(bitgen_state, mean, sigma)); + return exp(random_normal(bitgen_state, mean, sigma)); } double random_rayleigh(bitgen_t *bitgen_state, double mode) { @@ -644,8 +517,8 @@ double random_rayleigh(bitgen_t *bitgen_state, double mode) { double random_standard_t(bitgen_t *bitgen_state, double df) { double num, denom; - num = random_gauss_zig(bitgen_state); - denom = random_standard_gamma_zig(bitgen_state, df / 2); + num = random_standard_normal(bitgen_state); + denom = random_standard_gamma(bitgen_state, df / 2); return sqrt(df / 2) * num / sqrt(denom); } @@ -699,7 +572,7 @@ static RAND_INT_TYPE random_poisson_ptrs(bitgen_t *bitgen_state, double lam) { /* log(V) == log(0.0) ok here */ /* if U==0.0 so that us==0.0, log is ok since always returns */ if ((log(V) + log(invalpha) - log(a / (us * us) + b)) <= - (-lam + k * loglam - loggam(k + 1))) { + (-lam + k * loglam - random_loggam(k + 1))) { return k; } } @@ -901,8 +774,8 @@ RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, RAND_INT_TYPE n, return X; } -RAND_INT_TYPE random_binomial(bitgen_t *bitgen_state, double p, RAND_INT_TYPE n, - binomial_t *binomial) { +int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, + binomial_t *binomial) { double q; if ((n == 0LL) || (p == 0.0f)) @@ -934,7 +807,7 @@ double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, } if (1 < df) { const double Chi2 = random_chisquare(bitgen_state, df - 1); - const double n = random_gauss_zig(bitgen_state) + sqrt(nonc); + const double n = random_standard_normal(bitgen_state) + sqrt(nonc); return Chi2 + n * n; } else { const RAND_INT_TYPE i = random_poisson(bitgen_state, nonc / 2.0); @@ -953,7 +826,7 @@ double random_wald(bitgen_t *bitgen_state, double mean, double scale) { double mu_2l; mu_2l = mean / (2 * scale); - Y = random_gauss_zig(bitgen_state); + Y = random_standard_normal(bitgen_state); Y = mean * Y * Y; X = mean + mu_2l * (Y - sqrt(4 * scale * Y + Y * Y)); U = next_double(bitgen_state); @@ -1092,8 +965,8 @@ RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a) { while (1) { double T, U, V, X; - U = 1.0 - random_double(bitgen_state); - V = random_double(bitgen_state); + U = 1.0 - next_double(bitgen_state); + V = next_double(bitgen_state); X = floor(pow(U, -1.0 / am1)); /* * The real result may be above what can be represented in a signed @@ -1478,7 +1351,7 @@ uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off, uint64_t rng, uint64_t mask, bool use_masked) { if (rng == 0) { return off; - } else if (rng < 0xFFFFFFFFUL) { + } else if (rng <= 0xFFFFFFFFUL) { /* Call 32-bit generator if range in 32-bit. */ if (use_masked) { return off + buffered_bounded_masked_uint32(bitgen_state, rng, mask, NULL, @@ -1592,7 +1465,7 @@ void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off, for (i = 0; i < cnt; i++) { out[i] = off; } - } else if (rng < 0xFFFFFFFFUL) { + } else if (rng <= 0xFFFFFFFFUL) { uint32_t buf = 0; int bcnt = 0; diff --git a/numpy/random/src/distributions/random_hypergeometric.c b/numpy/random/src/distributions/random_hypergeometric.c index 59a3a4b9b..da5ea9c68 100644 --- a/numpy/random/src/distributions/random_hypergeometric.c +++ b/numpy/random/src/distributions/random_hypergeometric.c @@ -1,6 +1,6 @@ -#include <stdint.h> -#include "distributions.h" +#include "include/distributions.h" #include "logfactorial.h" +#include <stdint.h> /* * Generate a sample from the hypergeometric distribution. @@ -188,8 +188,8 @@ static int64_t hypergeometric_hrua(bitgen_t *bitgen_state, while (1) { double U, V, X, T; double gp; - U = random_double(bitgen_state); - V = random_double(bitgen_state); // "U star" in Stadlober (1989) + U = next_double(bitgen_state); + V = next_double(bitgen_state); // "U star" in Stadlober (1989) X = a + h*(V - 0.5) / U; // fast rejection: diff --git a/numpy/random/src/distributions/random_mvhg_count.c b/numpy/random/src/distributions/random_mvhg_count.c new file mode 100644 index 000000000..9c0cc045d --- /dev/null +++ b/numpy/random/src/distributions/random_mvhg_count.c @@ -0,0 +1,131 @@ +#include <stdint.h> +#include <stdlib.h> +#include <stdbool.h> + +#include "include/distributions.h" + +/* + * random_mvhg_count + * + * Draw variates from the multivariate hypergeometric distribution-- + * the "count" algorithm. + * + * Parameters + * ---------- + * bitgen_t *bitgen_state + * Pointer to a `bitgen_t` instance. + * int64_t total + * The sum of the values in the array `colors`. (This is redundant + * information, but we know the caller has already computed it, so + * we might as well use it.) + * size_t num_colors + * The length of the `colors` array. + * int64_t *colors + * The array of colors (i.e. the number of each type in the collection + * from which the random variate is drawn). + * int64_t nsample + * The number of objects drawn without replacement for each variate. + * `nsample` must not exceed sum(colors). This condition is not checked; + * it is assumed that the caller has already validated the value. + * size_t num_variates + * The number of variates to be produced and put in the array + * pointed to by `variates`. One variate is a vector of length + * `num_colors`, so the array pointed to by `variates` must have length + * `num_variates * num_colors`. + * int64_t *variates + * The array that will hold the result. It must have length + * `num_variates * num_colors`. + * The array is not initialized in the function; it is expected that the + * array has been initialized with zeros when the function is called. + * + * Notes + * ----- + * The "count" algorithm for drawing one variate is roughly equivalent to the + * following numpy code: + * + * choices = np.repeat(np.arange(len(colors)), colors) + * selection = np.random.choice(choices, nsample, replace=False) + * variate = np.bincount(selection, minlength=len(colors)) + * + * This function uses a temporary array with length sum(colors). + * + * Assumptions on the arguments (not checked in the function): + * * colors[k] >= 0 for k in range(num_colors) + * * total = sum(colors) + * * 0 <= nsample <= total + * * the product total * sizeof(size_t) does not exceed SIZE_MAX + * * the product num_variates * num_colors does not overflow + */ + +int random_mvhg_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) +{ + size_t *choices; + bool more_than_half; + + if ((total == 0) || (nsample == 0) || (num_variates == 0)) { + // Nothing to do. + return 0; + } + + choices = malloc(total * (sizeof *choices)); + if (choices == NULL) { + return -1; + } + + /* + * If colors contains, for example, [3 2 5], then choices + * will contain [0 0 0 1 1 2 2 2 2 2]. + */ + for (size_t i = 0, k = 0; i < num_colors; ++i) { + for (int64_t j = 0; j < colors[i]; ++j) { + choices[k] = i; + ++k; + } + } + + more_than_half = nsample > (total / 2); + if (more_than_half) { + nsample = total - nsample; + } + + for (size_t i = 0; i < num_variates * num_colors; i += num_colors) { + /* + * Fisher-Yates shuffle, but only loop through the first + * `nsample` entries of `choices`. After the loop, + * choices[:nsample] contains a random sample from the + * the full array. + */ + for (size_t j = 0; j < (size_t) nsample; ++j) { + size_t tmp, k; + // Note: nsample is not greater than total, so there is no danger + // of integer underflow in `(size_t) total - j - 1`. + k = j + (size_t) random_interval(bitgen_state, + (size_t) total - j - 1); + tmp = choices[k]; + choices[k] = choices[j]; + choices[j] = tmp; + } + /* + * Count the number of occurrences of each value in choices[:nsample]. + * The result, stored in sample[i:i+num_colors], is the sample from + * the multivariate hypergeometric distribution. + */ + for (size_t j = 0; j < (size_t) nsample; ++j) { + variates[i + choices[j]] += 1; + } + + if (more_than_half) { + for (size_t k = 0; k < num_colors; ++k) { + variates[i + k] = colors[k] - variates[i + k]; + } + } + } + + free(choices); + + return 0; +} diff --git a/numpy/random/src/distributions/random_mvhg_marginals.c b/numpy/random/src/distributions/random_mvhg_marginals.c new file mode 100644 index 000000000..301a4acad --- /dev/null +++ b/numpy/random/src/distributions/random_mvhg_marginals.c @@ -0,0 +1,138 @@ +#include <stdint.h> +#include <stddef.h> +#include <stdbool.h> +#include <math.h> + +#include "include/distributions.h" +#include "logfactorial.h" + + +/* + * random_mvhg_marginals + * + * Draw samples from the multivariate hypergeometric distribution-- + * the "marginals" algorithm. + * + * This version generates the sample by iteratively calling + * hypergeometric() (the univariate hypergeometric distribution). + * + * Parameters + * ---------- + * bitgen_t *bitgen_state + * Pointer to a `bitgen_t` instance. + * int64_t total + * The sum of the values in the array `colors`. (This is redundant + * information, but we know the caller has already computed it, so + * we might as well use it.) + * size_t num_colors + * The length of the `colors` array. The functions assumes + * num_colors > 0. + * int64_t *colors + * The array of colors (i.e. the number of each type in the collection + * from which the random variate is drawn). + * int64_t nsample + * The number of objects drawn without replacement for each variate. + * `nsample` must not exceed sum(colors). This condition is not checked; + * it is assumed that the caller has already validated the value. + * size_t num_variates + * The number of variates to be produced and put in the array + * pointed to by `variates`. One variate is a vector of length + * `num_colors`, so the array pointed to by `variates` must have length + * `num_variates * num_colors`. + * int64_t *variates + * The array that will hold the result. It must have length + * `num_variates * num_colors`. + * The array is not initialized in the function; it is expected that the + * array has been initialized with zeros when the function is called. + * + * Notes + * ----- + * Here's an example that demonstrates the idea of this algorithm. + * + * Suppose the urn contains red, green, blue and yellow marbles. + * Let nred be the number of red marbles, and define the quantities for + * the other colors similarly. The total number of marbles is + * + * total = nred + ngreen + nblue + nyellow. + * + * To generate a sample using rk_hypergeometric: + * + * red_sample = hypergeometric(ngood=nred, nbad=total - nred, + * nsample=nsample) + * + * This gives us the number of red marbles in the sample. The number of + * marbles in the sample that are *not* red is nsample - red_sample. + * To figure out the distribution of those marbles, we again use + * rk_hypergeometric: + * + * green_sample = hypergeometric(ngood=ngreen, + * nbad=total - nred - ngreen, + * nsample=nsample - red_sample) + * + * Similarly, + * + * blue_sample = hypergeometric( + * ngood=nblue, + * nbad=total - nred - ngreen - nblue, + * nsample=nsample - red_sample - green_sample) + * + * Finally, + * + * yellow_sample = total - (red_sample + green_sample + blue_sample). + * + * The above sequence of steps is implemented as a loop for an arbitrary + * number of colors in the innermost loop in the code below. `remaining` + * is the value passed to `nbad`; it is `total - colors[0]` in the first + * call to random_hypergeometric(), and then decreases by `colors[j]` in + * each iteration. `num_to_sample` is the `nsample` argument. It + * starts at this function's `nsample` input, and is decreased by the + * result of the call to random_hypergeometric() in each iteration. + * + * Assumptions on the arguments (not checked in the function): + * * colors[k] >= 0 for k in range(num_colors) + * * total = sum(colors) + * * 0 <= nsample <= total + * * the product num_variates * num_colors does not overflow + */ + +void random_mvhg_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) +{ + bool more_than_half; + + if ((total == 0) || (nsample == 0) || (num_variates == 0)) { + // Nothing to do. + return; + } + + more_than_half = nsample > (total / 2); + if (more_than_half) { + nsample = total - nsample; + } + + for (size_t i = 0; i < num_variates * num_colors; i += num_colors) { + int64_t num_to_sample = nsample; + int64_t remaining = total; + for (size_t j = 0; (num_to_sample > 0) && (j + 1 < num_colors); ++j) { + int64_t r; + remaining -= colors[j]; + r = random_hypergeometric(bitgen_state, + colors[j], remaining, num_to_sample); + variates[i + j] = r; + num_to_sample -= r; + } + + if (num_to_sample > 0) { + variates[i + num_colors - 1] = num_to_sample; + } + + if (more_than_half) { + for (size_t k = 0; k < num_colors; ++k) { + variates[i + k] = colors[k] - variates[i + k]; + } + } + } +} diff --git a/numpy/random/src/entropy/entropy.c b/numpy/random/src/entropy/entropy.c deleted file mode 100644 index eaca37a9c..000000000 --- a/numpy/random/src/entropy/entropy.c +++ /dev/null @@ -1,114 +0,0 @@ -#include <stddef.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> - -#include "entropy.h" -#ifdef _WIN32 -/* Windows */ -#include <sys/timeb.h> -#include <time.h> -#include <windows.h> - -#include <wincrypt.h> -#else -/* Unix */ -#include <sys/time.h> -#include <time.h> -#include <unistd.h> -#include <fcntl.h> -#endif - -bool entropy_getbytes(void *dest, size_t size) { -#ifndef _WIN32 - - int fd = open("/dev/urandom", O_RDONLY); - if (fd < 0) - return false; - ssize_t sz = read(fd, dest, size); - if ((sz < 0) || ((size_t)sz < size)) - return false; - return close(fd) == 0; - -#else - - HCRYPTPROV hCryptProv; - BOOL done; - - if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, - CRYPT_VERIFYCONTEXT) || - !hCryptProv) { - return true; - } - done = CryptGenRandom(hCryptProv, (DWORD)size, (unsigned char *)dest); - CryptReleaseContext(hCryptProv, 0); - if (!done) { - return false; - } - - return true; -#endif -} - -/* Thomas Wang 32/64 bits integer hash function */ -uint32_t entropy_hash_32(uint32_t key) { - key += ~(key << 15); - key ^= (key >> 10); - key += (key << 3); - key ^= (key >> 6); - key += ~(key << 11); - key ^= (key >> 16); - return key; -} - -uint64_t entropy_hash_64(uint64_t key) { - key = (~key) + (key << 21); // key = (key << 21) - key - 1; - key = key ^ (key >> 24); - key = (key + (key << 3)) + (key << 8); // key * 265 - key = key ^ (key >> 14); - key = (key + (key << 2)) + (key << 4); // key * 21 - key = key ^ (key >> 28); - key = key + (key << 31); - return key; -} - -uint32_t entropy_randombytes(void) { - -#ifndef _WIN32 - struct timeval tv; - gettimeofday(&tv, NULL); - return entropy_hash_32(getpid()) ^ entropy_hash_32(tv.tv_sec) ^ - entropy_hash_32(tv.tv_usec) ^ entropy_hash_32(clock()); -#else - uint32_t out = 0; - int64_t counter; - struct _timeb tv; - _ftime_s(&tv); - out = entropy_hash_32(GetCurrentProcessId()) ^ - entropy_hash_32((uint32_t)tv.time) ^ entropy_hash_32(tv.millitm) ^ - entropy_hash_32(clock()); - if (QueryPerformanceCounter((LARGE_INTEGER *)&counter) != 0) - out ^= entropy_hash_32((uint32_t)(counter & 0xffffffff)); - return out; -#endif -} - -bool entropy_fallback_getbytes(void *dest, size_t size) { - int hashes = (int)size; - uint32_t *hash = malloc(hashes * sizeof(uint32_t)); - int i; - for (i = 0; i < hashes; i++) { - hash[i] = entropy_randombytes(); - } - memcpy(dest, (void *)hash, size); - free(hash); - return true; -} - -void entropy_fill(void *dest, size_t size) { - bool success; - success = entropy_getbytes(dest, size); - if (!success) { - entropy_fallback_getbytes(dest, size); - } -} diff --git a/numpy/random/src/entropy/entropy.h b/numpy/random/src/entropy/entropy.h deleted file mode 100644 index f00caf61d..000000000 --- a/numpy/random/src/entropy/entropy.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _RANDOMDGEN__ENTROPY_H_ -#define _RANDOMDGEN__ENTROPY_H_ - -#include <stddef.h> -#include <stdbool.h> -#include <stdint.h> - -extern void entropy_fill(void *dest, size_t size); - -extern bool entropy_getbytes(void *dest, size_t size); - -extern bool entropy_fallback_getbytes(void *dest, size_t size); - -#endif diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index 4741a0352..fd067fe8d 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -1,4 +1,4 @@ -#include "legacy-distributions.h" +#include "include/legacy-distributions.h" static NPY_INLINE double legacy_double(aug_bitgen_t *aug_state) { @@ -215,6 +215,37 @@ double legacy_exponential(aug_bitgen_t *aug_state, double scale) { } +static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, + double p, + RAND_INT_TYPE n, + binomial_t *binomial) { + double q; + + if (p <= 0.5) { + if (p * n <= 30.0) { + return random_binomial_inversion(bitgen_state, n, p, binomial); + } else { + return random_binomial_btpe(bitgen_state, n, p, binomial); + } + } else { + q = 1.0 - p; + if (q * n <= 30.0) { + return n - random_binomial_inversion(bitgen_state, n, q, binomial); + } else { + return n - random_binomial_btpe(bitgen_state, n, q, binomial); + } + } +} + + +int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p, + int64_t n, binomial_t *binomial) { + return (int64_t) legacy_random_binomial_original(bitgen_state, p, + (RAND_INT_TYPE) n, + binomial); +} + + static RAND_INT_TYPE random_hypergeometric_hyp(bitgen_t *bitgen_state, RAND_INT_TYPE good, RAND_INT_TYPE bad, @@ -263,8 +294,8 @@ static RAND_INT_TYPE random_hypergeometric_hrua(bitgen_t *bitgen_state, d7 = sqrt((double)(popsize - m) * sample * d4 * d5 / (popsize - 1) + 0.5); d8 = D1 * d7 + D2; d9 = (RAND_INT_TYPE)floor((double)(m + 1) * (mingoodbad + 1) / (popsize + 2)); - d10 = (loggam(d9 + 1) + loggam(mingoodbad - d9 + 1) + loggam(m - d9 + 1) + - loggam(maxgoodbad - m + d9 + 1)); + d10 = (random_loggam(d9 + 1) + random_loggam(mingoodbad - d9 + 1) + + random_loggam(m - d9 + 1) + random_loggam(maxgoodbad - m + d9 + 1)); d11 = MIN(MIN(m, mingoodbad) + 1.0, floor(d6 + 16 * d7)); /* 16 for 16-decimal-digit precision in D1 and D2 */ @@ -278,8 +309,8 @@ static RAND_INT_TYPE random_hypergeometric_hrua(bitgen_t *bitgen_state, continue; Z = (RAND_INT_TYPE)floor(W); - T = d10 - (loggam(Z + 1) + loggam(mingoodbad - Z + 1) + loggam(m - Z + 1) + - loggam(maxgoodbad - m + Z + 1)); + T = d10 - (random_loggam(Z + 1) + random_loggam(mingoodbad - Z + 1) + + random_loggam(m - Z + 1) + random_loggam(maxgoodbad - m + Z + 1)); /* fast acceptance: */ if ((X * (4.0 - X) - 3.0) <= T) diff --git a/numpy/random/src/philox/philox.h b/numpy/random/src/philox/philox.h index 309d89eae..c72424a97 100644 --- a/numpy/random/src/philox/philox.h +++ b/numpy/random/src/philox/philox.h @@ -1,8 +1,8 @@ #ifndef _RANDOMDGEN__PHILOX_H_ #define _RANDOMDGEN__PHILOX_H_ -#include <inttypes.h> #include "numpy/npy_common.h" +#include <inttypes.h> #define PHILOX_BUFFER_SIZE 4L diff --git a/numpy/random/src/sfc64/sfc64.h b/numpy/random/src/sfc64/sfc64.h index 6674ae69c..75c4118d3 100644 --- a/numpy/random/src/sfc64/sfc64.h +++ b/numpy/random/src/sfc64/sfc64.h @@ -1,11 +1,11 @@ #ifndef _RANDOMDGEN__SFC64_H_ #define _RANDOMDGEN__SFC64_H_ +#include "numpy/npy_common.h" #include <inttypes.h> #ifdef _WIN32 #include <stdlib.h> #endif -#include "numpy/npy_common.h" typedef struct s_sfc64_state { uint64_t s[4]; diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index 0f57c4bd4..34d7bd278 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -10,7 +10,7 @@ from numpy.random import ( Generator, MT19937, PCG64, Philox, RandomState, SeedSequence, SFC64, default_rng ) -from numpy.random.common import interface +from numpy.random._common import interface try: import cffi # noqa: F401 @@ -120,7 +120,7 @@ def gauss_from_uint(x, n, bits): return gauss[:n] def test_seedsequence(): - from numpy.random.bit_generator import (ISeedSequence, + from numpy.random._bit_generator import (ISeedSequence, ISpawnableSeedSequence, SeedlessSeedSequence) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 853d86fba..526275dda 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -4,7 +4,7 @@ import pytest import numpy as np from numpy.testing import ( - assert_, assert_raises, assert_equal, + assert_, assert_raises, assert_equal, assert_allclose, assert_warns, assert_no_warnings, assert_array_equal, assert_array_almost_equal, suppress_warnings) @@ -115,6 +115,140 @@ class TestMultinomial(object): assert_array_equal(non_contig, contig) +class TestMultivariateHypergeometric(object): + + def setup(self): + self.seed = 8675309 + + def test_argument_validation(self): + # Error cases... + + # `colors` must be a 1-d sequence + assert_raises(ValueError, random.multivariate_hypergeometric, + 10, 4) + + # Negative nsample + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], -1) + + # Negative color + assert_raises(ValueError, random.multivariate_hypergeometric, + [-1, 2, 3], 2) + + # nsample exceeds sum(colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], 10) + + # nsample exceeds sum(colors) (edge case of empty colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [], 1) + + # Validation errors associated with very large values in colors. + assert_raises(ValueError, random.multivariate_hypergeometric, + [999999999, 101], 5, 1, 'marginals') + + int64_info = np.iinfo(np.int64) + max_int64 = int64_info.max + max_int64_index = max_int64 // int64_info.dtype.itemsize + assert_raises(ValueError, random.multivariate_hypergeometric, + [max_int64_index - 100, 101], 5, 1, 'count') + + @pytest.mark.parametrize('method', ['count', 'marginals']) + def test_edge_cases(self, method): + # Set the seed, but in fact, all the results in this test are + # deterministic, so we don't really need this. + random = Generator(MT19937(self.seed)) + + x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([], 0, method=method) + assert_array_equal(x, []) + + x = random.multivariate_hypergeometric([], 0, size=1, method=method) + assert_array_equal(x, np.empty((1, 0), dtype=np.int64)) + + x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method) + assert_array_equal(x, [3, 0, 0]) + + colors = [1, 1, 0, 1, 1] + x = random.multivariate_hypergeometric(colors, sum(colors), + method=method) + assert_array_equal(x, colors) + + x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3, + method=method) + assert_array_equal(x, [[3, 4, 5]]*3) + + # Cases for nsample: + # nsample < 10 + # 10 <= nsample < colors.sum()/2 + # colors.sum()/2 < nsample < colors.sum() - 10 + # colors.sum() - 10 < nsample < colors.sum() + @pytest.mark.parametrize('nsample', [8, 25, 45, 55]) + @pytest.mark.parametrize('method', ['count', 'marginals']) + @pytest.mark.parametrize('size', [5, (2, 3), 150000]) + def test_typical_cases(self, nsample, method, size): + random = Generator(MT19937(self.seed)) + + colors = np.array([10, 5, 20, 25]) + sample = random.multivariate_hypergeometric(colors, nsample, size, + method=method) + if isinstance(size, int): + expected_shape = (size,) + colors.shape + else: + expected_shape = size + colors.shape + assert_equal(sample.shape, expected_shape) + assert_((sample >= 0).all()) + assert_((sample <= colors).all()) + assert_array_equal(sample.sum(axis=-1), + np.full(size, fill_value=nsample, dtype=int)) + if isinstance(size, int) and size >= 100000: + # This sample is large enough to compare its mean to + # the expected values. + assert_allclose(sample.mean(axis=0), + nsample * colors / colors.sum(), + rtol=1e-3, atol=0.005) + + def test_repeatability1(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5, + method='count') + expected = np.array([[2, 1, 2], + [2, 1, 2], + [1, 1, 3], + [2, 0, 3], + [2, 1, 2]]) + assert_array_equal(sample, expected) + + def test_repeatability2(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 50, + size=5, + method='marginals') + expected = np.array([[ 9, 17, 24], + [ 7, 13, 30], + [ 9, 15, 26], + [ 9, 17, 24], + [12, 14, 24]]) + assert_array_equal(sample, expected) + + def test_repeatability3(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 12, + size=5, + method='marginals') + expected = np.array([[2, 3, 7], + [5, 3, 4], + [2, 5, 5], + [5, 3, 4], + [1, 5, 6]]) + assert_array_equal(sample, expected) + + class TestSetState(object): def setup(self): self.seed = 1234567890 @@ -732,6 +866,29 @@ class TestRandomDist(object): desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7]) assert_array_equal(actual, desired) + def test_shuffle_custom_axis(self): + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=1) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=-1) + assert_array_equal(actual, desired) + + def test_shuffle_axis_nonsquare(self): + y1 = np.arange(20).reshape(2, 10) + y2 = y1.copy() + random = Generator(MT19937(self.seed)) + random.shuffle(y1, axis=1) + random = Generator(MT19937(self.seed)) + random.shuffle(y2.T) + assert_array_equal(y1, y2) + def test_shuffle_masked(self): # gh-3263 a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) @@ -746,6 +903,16 @@ class TestRandomDist(object): assert_equal( sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + def test_shuffle_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(np.AxisError, random.shuffle, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(np.AxisError, random.shuffle, arr, 3) + assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None)) + arr = [[1, 2, 3], [4, 5, 6]] + assert_raises(NotImplementedError, random.shuffle, arr, 1) + def test_permutation(self): random = Generator(MT19937(self.seed)) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] @@ -771,6 +938,27 @@ class TestRandomDist(object): actual = random.permutation(integer_val) assert_array_equal(actual, desired) + def test_permutation_custom_axis(self): + a = np.arange(16).reshape((4, 4)) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=1) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=-1) + assert_array_equal(actual, desired) + + def test_permutation_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(np.AxisError, random.permutation, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(np.AxisError, random.permutation, arr, 3) + assert_raises(TypeError, random.permutation, arr, slice(1, 2, None)) + def test_beta(self): random = Generator(MT19937(self.seed)) actual = random.beta(.1, .9, size=(3, 2)) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index a0edc5c23..5131f1839 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -11,7 +11,8 @@ from numpy.testing import ( suppress_warnings ) -from numpy.random import MT19937, PCG64, mtrand as random +from numpy.random import MT19937, PCG64 +from numpy import random INT_FUNCS = {'binomial': (100.0, 0.6), 'geometric': (.5,), diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 29870534a..bdc2214b6 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -8,7 +8,7 @@ from numpy.testing import ( from numpy.compat import long import numpy as np -from numpy.random import mtrand as random +from numpy import random class TestRegression(object): @@ -181,3 +181,30 @@ class TestRegression(object): assert c.dtype == np.dtype(int) c = np.random.choice(10, replace=False, size=2) assert c.dtype == np.dtype(int) + + @pytest.mark.skipif(np.iinfo('l').max < 2**32, + reason='Cannot test with 32-bit C long') + def test_randint_117(self): + # GH 14189 + random.seed(0) + expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, + 2588848963, 3684848379, 2340255427, 3638918503, + 1819583497, 2678185683], dtype='int64') + actual = random.randint(2**32, size=10) + assert_array_equal(actual, expected) + + def test_p_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + np.random.seed(12345) + assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]), + [0, 0, 0, 1, 1]) + + def test_n_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + np.random.seed(8675309) + expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) + assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), + expected) diff --git a/numpy/random/tests/test_seed_sequence.py b/numpy/random/tests/test_seed_sequence.py index 8d6d604a2..fe23680ed 100644 --- a/numpy/random/tests/test_seed_sequence.py +++ b/numpy/random/tests/test_seed_sequence.py @@ -1,7 +1,7 @@ import numpy as np from numpy.testing import assert_array_equal -from numpy.random.bit_generator import SeedSequence +from numpy.random import SeedSequence def test_reference_data(): diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py index 84d261e5e..6e641b5f4 100644 --- a/numpy/random/tests/test_smoke.py +++ b/numpy/random/tests/test_smoke.py @@ -5,7 +5,7 @@ from functools import partial import numpy as np import pytest from numpy.testing import assert_equal, assert_, assert_array_equal -from numpy.random import (Generator, MT19937, PCG64, Philox, SFC64, entropy) +from numpy.random import (Generator, MT19937, PCG64, Philox, SFC64) @pytest.fixture(scope='module', params=(np.bool, np.int8, np.int16, np.int32, np.int64, @@ -806,23 +806,3 @@ class TestDefaultRNG(RNG): np.random.default_rng(-1) with pytest.raises(ValueError): np.random.default_rng([12345, -1]) - - -class TestEntropy(object): - def test_entropy(self): - e1 = entropy.random_entropy() - e2 = entropy.random_entropy() - assert_((e1 != e2)) - e1 = entropy.random_entropy(10) - e2 = entropy.random_entropy(10) - assert_((e1 != e2).all()) - e1 = entropy.random_entropy(10, source='system') - e2 = entropy.random_entropy(10, source='system') - assert_((e1 != e2).all()) - - def test_fallback(self): - e1 = entropy.random_entropy(source='fallback') - time.sleep(0.1) - e2 = entropy.random_entropy(source='fallback') - assert_((e1 != e2)) - diff --git a/numpy/testing/_private/parameterized.py b/numpy/testing/_private/parameterized.py index a5fa4fb5e..489d8e09a 100644 --- a/numpy/testing/_private/parameterized.py +++ b/numpy/testing/_private/parameterized.py @@ -45,11 +45,18 @@ except ImportError: from unittest import TestCase -PY3 = sys.version_info[0] == 3 PY2 = sys.version_info[0] == 2 -if PY3: +if PY2: + from types import InstanceType + lzip = zip + text_type = unicode + bytes_type = str + string_types = basestring, + def make_method(func, instance, type): + return MethodType(func, instance, type) +else: # Python 3 doesn't have an InstanceType, so just use a dummy type. class InstanceType(): pass @@ -61,14 +68,6 @@ if PY3: if instance is None: return func return MethodType(func, instance) -else: - from types import InstanceType - lzip = zip - text_type = unicode - bytes_type = str - string_types = basestring, - def make_method(func, instance, type): - return MethodType(func, instance, type) _param = namedtuple("param", "args kwargs") diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 97a5eac17..8a31fcf15 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -686,7 +686,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', precision=6, equal_nan=True, equal_inf=True): __tracebackhide__ = True # Hide traceback for py.test - from numpy.core import array, array2string, isnan, inf, bool_, errstate + from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_ x = array(x, copy=False, subok=True) y = array(y, copy=False, subok=True) @@ -788,17 +788,18 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, # np.ma.masked, which is falsy). if cond != True: n_mismatch = reduced.size - reduced.sum(dtype=intp) - percent_mismatch = 100 * n_mismatch / ox.size + n_elements = flagged.size if flagged.ndim != 0 else reduced.size + percent_mismatch = 100 * n_mismatch / n_elements remarks = [ 'Mismatched elements: {} / {} ({:.3g}%)'.format( - n_mismatch, ox.size, percent_mismatch)] + n_mismatch, n_elements, percent_mismatch)] with errstate(invalid='ignore', divide='ignore'): # ignore errors for non-numeric types with contextlib.suppress(TypeError): error = abs(x - y) - max_abs_error = error.max() - if error.dtype == 'object': + max_abs_error = max(error) + if getattr(error, 'dtype', object_) == object_: remarks.append('Max absolute difference: ' + str(max_abs_error)) else: @@ -807,8 +808,13 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, # note: this definition of relative error matches that one # used by assert_allclose (found in np.isclose) - max_rel_error = (error / abs(y)).max() - if error.dtype == 'object': + # Filter values where the divisor would be zero + nonzero = bool_(y != 0) + if all(~nonzero): + max_rel_error = array(inf) + else: + max_rel_error = max(error[nonzero] / abs(y[nonzero])) + if getattr(error, 'dtype', object_) == object_: remarks.append('Max relative difference: ' + str(max_rel_error)) else: diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py deleted file mode 100644 index bf78be500..000000000 --- a/numpy/testing/decorators.py +++ /dev/null @@ -1,15 +0,0 @@ -""" -Back compatibility decorators module. It will import the appropriate -set of tools - -""" -from __future__ import division, absolute_import, print_function - -import warnings - -# 2018-04-04, numpy 1.15.0 -warnings.warn("Importing from numpy.testing.decorators is deprecated " - "since numpy 1.15.0, import from numpy.testing instead.", - DeprecationWarning, stacklevel=2) - -from ._private.decorators import * diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py deleted file mode 100644 index 5748a9a0f..000000000 --- a/numpy/testing/noseclasses.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -Back compatibility noseclasses module. It will import the appropriate -set of tools -""" -from __future__ import division, absolute_import, print_function - -import warnings - -# 2018-04-04, numpy 1.15.0 -warnings.warn("Importing from numpy.testing.noseclasses is deprecated " - "since 1.15.0, import from numpy.testing instead", - DeprecationWarning, stacklevel=2) - -from ._private.noseclasses import * diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py deleted file mode 100644 index 2ac212eee..000000000 --- a/numpy/testing/nosetester.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -Back compatibility nosetester module. It will import the appropriate -set of tools - -""" -from __future__ import division, absolute_import, print_function - -import warnings - -# 2018-04-04, numpy 1.15.0 -warnings.warn("Importing from numpy.testing.nosetester is deprecated " - "since 1.15.0, import from numpy.testing instead.", - DeprecationWarning, stacklevel=2) - -from ._private.nosetester import * - -__all__ = ['get_package_name', 'run_module_suite', 'NoseTester', - '_numpy_tester', 'get_package_name', 'import_nose', - 'suppress_warnings'] diff --git a/numpy/testing/print_coercion_tables.py b/numpy/testing/print_coercion_tables.py index 3a359f472..72b22cee1 100755 --- a/numpy/testing/print_coercion_tables.py +++ b/numpy/testing/print_coercion_tables.py @@ -70,22 +70,24 @@ def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, print(char, end=' ') print() -print("can cast") -print_cancast_table(np.typecodes['All']) -print() -print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'") -print() -print("scalar + scalar") -print_coercion_table(np.typecodes['All'], 0, 0, False) -print() -print("scalar + neg scalar") -print_coercion_table(np.typecodes['All'], 0, -1, False) -print() -print("array + scalar") -print_coercion_table(np.typecodes['All'], 0, 0, True) -print() -print("array + neg scalar") -print_coercion_table(np.typecodes['All'], 0, -1, True) -print() -print("promote_types") -print_coercion_table(np.typecodes['All'], 0, 0, False, True) + +if __name__ == '__main__': + print("can cast") + print_cancast_table(np.typecodes['All']) + print() + print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'") + print() + print("scalar + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, False) + print() + print("scalar + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, False) + print() + print("array + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, True) + print() + print("array + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, True) + print() + print("promote_types") + print_coercion_table(np.typecodes['All'], 0, 0, False, True) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 4f1b46d4f..44f93a693 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -564,6 +564,26 @@ class TestAlmostEqual(_GenericTest): assert_equal(msgs[4], 'Max absolute difference: 2') assert_equal(msgs[5], 'Max relative difference: inf') + def test_error_message_2(self): + """Check the message is formatted correctly when either x or y is a scalar.""" + x = 2 + y = np.ones(20) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 1.') + + y = 2 + x = np.ones(20) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 0.5') + def test_subclass_that_cannot_be_bool(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having @@ -881,6 +901,15 @@ class TestAssertAllclose(object): assert_array_less(a, b) assert_allclose(a, b) + def test_report_max_relative_error(self): + a = np.array([0, 1]) + b = np.array([0, 2]) + + with pytest.raises(AssertionError) as exc_info: + assert_allclose(a, b) + msg = str(exc_info.value) + assert_('Max relative difference: 0.5' in msg) + class TestArrayAlmostEqualNulp(object): diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 1e7d65b89..975f6ad5d 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -7,10 +7,11 @@ from __future__ import division, absolute_import, print_function import warnings -# 2018-04-04, numpy 1.15.0 +# 2018-04-04, numpy 1.15.0 ImportWarning +# 2019-09-18, numpy 1.18.0 DeprecatonWarning (changed) warnings.warn("Importing from numpy.testing.utils is deprecated " "since 1.15.0, import from numpy.testing instead.", - ImportWarning, stacklevel=2) + DeprecationWarning, stacklevel=2) from ._private.utils import * diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index df2fc4802..c71d03432 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -2,14 +2,21 @@ from __future__ import division, absolute_import, print_function import sys import subprocess +import pkgutil +import types +import importlib +import warnings import numpy as np +import numpy import pytest + try: import ctypes except ImportError: ctypes = None + def check_dir(module, module_name=None): """Returns a mapping of all objects with the wrong __module__ attribute.""" if module_name is None: @@ -27,7 +34,8 @@ def check_dir(module, module_name=None): sys.version_info[0] < 3, reason="NumPy exposes slightly different functions on Python 2") def test_numpy_namespace(): - # None of these objects are publicly documented. + # None of these objects are publicly documented to be part of the main + # NumPy namespace (some are useful though, others need to be cleaned up) undocumented = { 'Tester': 'numpy.testing._private.nosetester.NoseTester', '_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc', @@ -72,7 +80,7 @@ def test_numpy_namespace(): @pytest.mark.parametrize('name', ['testing', 'Tester']) def test_import_lazy_import(name): - """Make sure we can actually the the modules we lazy load. + """Make sure we can actually use the modules we lazy load. While not exported as part of the public API, it was accessible. With the use of __getattr__ and __dir__, this isn't always true It can happen that @@ -101,6 +109,7 @@ def test_numpy_fft(): bad_results = check_dir(np.fft) assert bad_results == {} + @pytest.mark.skipif(ctypes is None, reason="ctypes not available in this python") def test_NPY_NO_EXPORT(): @@ -109,3 +118,373 @@ def test_NPY_NO_EXPORT(): f = getattr(cdll, 'test_not_exported', None) assert f is None, ("'test_not_exported' is mistakenly exported, " "NPY_NO_EXPORT does not work") + + +# Historically NumPy has not used leading underscores for private submodules +# much. This has resulted in lots of things that look like public modules +# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`), +# but were never intended to be public. The PUBLIC_MODULES list contains +# modules that are either public because they were meant to be, or because they +# contain public functions/objects that aren't present in any other namespace +# for whatever reason and therefore should be treated as public. +# +# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack +# of underscores) but should not be used. For many of those modules the +# current status is fine. For others it may make sense to work on making them +# private, to clean up our public API and avoid confusion. +PUBLIC_MODULES = ['numpy.' + s for s in [ + "ctypeslib", + "distutils", + "distutils.cpuinfo", + "distutils.exec_command", + "distutils.misc_util", + "distutils.log", + "distutils.system_info", + "doc", + "doc.basics", + "doc.broadcasting", + "doc.byteswapping", + "doc.constants", + "doc.creation", + "doc.dispatch", + "doc.glossary", + "doc.indexing", + "doc.internals", + "doc.misc", + "doc.structured_arrays", + "doc.subclassing", + "doc.ufuncs", + "dual", + "f2py", + "fft", + "lib", + "lib.format", # was this meant to be public? + "lib.mixins", + "lib.recfunctions", + "lib.scimath", + "linalg", + "ma", + "ma.extras", + "ma.mrecords", + "matlib", + "polynomial", + "polynomial.chebyshev", + "polynomial.hermite", + "polynomial.hermite_e", + "polynomial.laguerre", + "polynomial.legendre", + "polynomial.polynomial", + "polynomial.polyutils", + "random", + "testing", + "version", +]] + + +PUBLIC_ALIASED_MODULES = [ + "numpy.char", + "numpy.emath", + "numpy.rec", +] + + +PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ + "compat", + "compat.py3k", + "conftest", + "core", + "core.arrayprint", + "core.defchararray", + "core.einsumfunc", + "core.fromnumeric", + "core.function_base", + "core.getlimits", + "core.machar", + "core.memmap", + "core.multiarray", + "core.numeric", + "core.numerictypes", + "core.overrides", + "core.records", + "core.shape_base", + "core.umath", + "core.umath_tests", + "distutils.ccompiler", + "distutils.command", + "distutils.command.autodist", + "distutils.command.bdist_rpm", + "distutils.command.build", + "distutils.command.build_clib", + "distutils.command.build_ext", + "distutils.command.build_py", + "distutils.command.build_scripts", + "distutils.command.build_src", + "distutils.command.config", + "distutils.command.config_compiler", + "distutils.command.develop", + "distutils.command.egg_info", + "distutils.command.install", + "distutils.command.install_clib", + "distutils.command.install_data", + "distutils.command.install_headers", + "distutils.command.sdist", + "distutils.compat", + "distutils.conv_template", + "distutils.core", + "distutils.extension", + "distutils.fcompiler", + "distutils.fcompiler.absoft", + "distutils.fcompiler.compaq", + "distutils.fcompiler.environment", + "distutils.fcompiler.g95", + "distutils.fcompiler.gnu", + "distutils.fcompiler.hpux", + "distutils.fcompiler.ibm", + "distutils.fcompiler.intel", + "distutils.fcompiler.lahey", + "distutils.fcompiler.mips", + "distutils.fcompiler.nag", + "distutils.fcompiler.none", + "distutils.fcompiler.pathf95", + "distutils.fcompiler.pg", + "distutils.fcompiler.sun", + "distutils.fcompiler.vast", + "distutils.from_template", + "distutils.intelccompiler", + "distutils.lib2def", + "distutils.line_endings", + "distutils.mingw32ccompiler", + "distutils.msvccompiler", + "distutils.npy_pkg_config", + "distutils.numpy_distribution", + "distutils.pathccompiler", + "distutils.unixccompiler", + "f2py.auxfuncs", + "f2py.capi_maps", + "f2py.cb_rules", + "f2py.cfuncs", + "f2py.common_rules", + "f2py.crackfortran", + "f2py.diagnose", + "f2py.f2py2e", + "f2py.f2py_testing", + "f2py.f90mod_rules", + "f2py.func2subr", + "f2py.rules", + "f2py.use_rules", + "fft.helper", + "lib.arraypad", + "lib.arraysetops", + "lib.arrayterator", + "lib.financial", + "lib.function_base", + "lib.histograms", + "lib.index_tricks", + "lib.nanfunctions", + "lib.npyio", + "lib.polynomial", + "lib.shape_base", + "lib.stride_tricks", + "lib.twodim_base", + "lib.type_check", + "lib.ufunclike", + "lib.user_array", # note: not in np.lib, but probably should just be deleted + "lib.utils", + "linalg.lapack_lite", + "linalg.linalg", + "ma.bench", + "ma.core", + "ma.testutils", + "ma.timer_comparison", + "matrixlib", + "matrixlib.defmatrix", + "random.mtrand", + "testing.print_coercion_tables", + "testing.utils", +]] + + +def is_unexpected(name): + """Check if this needs to be considered.""" + if '._' in name or '.tests' in name or '.setup' in name: + return False + + if name in PUBLIC_MODULES: + return False + + if name in PUBLIC_ALIASED_MODULES: + return False + + if name in PRIVATE_BUT_PRESENT_MODULES: + return False + + return True + + +# These are present in a directory with an __init__.py but cannot be imported +# code_generators/ isn't installed, but present for an inplace build +SKIP_LIST = [ + "numpy.core.code_generators", + "numpy.core.code_generators.genapi", + "numpy.core.code_generators.generate_umath", + "numpy.core.code_generators.ufunc_docstrings", + "numpy.core.code_generators.generate_numpy_api", + "numpy.core.code_generators.generate_ufunc_api", + "numpy.core.code_generators.numpy_api", + "numpy.core.cversions", + "numpy.core.generate_numpy_api", + "numpy.distutils.msvc9compiler", +] + + +def test_all_modules_are_expected(): + """ + Test that we don't add anything that looks like a new public module by + accident. Check is based on filenames. + """ + + modnames = [] + for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__, + prefix=np.__name__ + '.', + onerror=None): + if is_unexpected(modname) and modname not in SKIP_LIST: + # We have a name that is new. If that's on purpose, add it to + # PUBLIC_MODULES. We don't expect to have to add anything to + # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! + modnames.append(modname) + + if modnames: + raise AssertionError("Found unexpected modules: {}".format(modnames)) + + +# Stuff that clearly shouldn't be in the API and is detected by the next test +# below +SKIP_LIST_2 = [ + 'numpy.math', + 'numpy.distutils.log.sys', + 'numpy.distutils.system_info.copy', + 'numpy.distutils.system_info.distutils', + 'numpy.distutils.system_info.log', + 'numpy.distutils.system_info.os', + 'numpy.distutils.system_info.platform', + 'numpy.distutils.system_info.re', + 'numpy.distutils.system_info.shutil', + 'numpy.distutils.system_info.subprocess', + 'numpy.distutils.system_info.sys', + 'numpy.distutils.system_info.tempfile', + 'numpy.distutils.system_info.textwrap', + 'numpy.distutils.system_info.warnings', + 'numpy.doc.constants.re', + 'numpy.doc.constants.textwrap', + 'numpy.lib.emath', + 'numpy.lib.math', + 'numpy.matlib.char', + 'numpy.matlib.rec', + 'numpy.matlib.emath', + 'numpy.matlib.math', + 'numpy.matlib.linalg', + 'numpy.matlib.fft', + 'numpy.matlib.random', + 'numpy.matlib.ctypeslib', + 'numpy.matlib.ma' +] + + +def test_all_modules_are_expected_2(): + """ + Method checking all objects. The pkgutil-based method in + `test_all_modules_are_expected` does not catch imports into a namespace, + only filenames. So this test is more thorough, and checks this like: + + import .lib.scimath as emath + + To check if something in a module is (effectively) public, one can check if + there's anything in that namespace that's a public function/object but is + not exposed in a higher-level namespace. For example for a `numpy.lib` + submodule:: + + mod = np.lib.mixins + for obj in mod.__all__: + if obj in np.__all__: + continue + elif obj in np.lib.__all__: + continue + + else: + print(obj) + + """ + + def find_unexpected_members(mod_name): + members = [] + module = importlib.import_module(mod_name) + if hasattr(module, '__all__'): + objnames = module.__all__ + else: + objnames = dir(module) + + for objname in objnames: + if not objname.startswith('_'): + fullobjname = mod_name + '.' + objname + if isinstance(getattr(module, objname), types.ModuleType): + if is_unexpected(fullobjname): + if fullobjname not in SKIP_LIST_2: + members.append(fullobjname) + + return members + + unexpected_members = find_unexpected_members("numpy") + for modname in PUBLIC_MODULES: + unexpected_members.extend(find_unexpected_members(modname)) + + if unexpected_members: + raise AssertionError("Found unexpected object(s) that look like " + "modules: {}".format(unexpected_members)) + + +def test_api_importable(): + """ + Check that all submodules listed higher up in this file can be imported + + Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may + simply need to be removed from the list (deprecation may or may not be + needed - apply common sense). + """ + def check_importable(module_name): + try: + importlib.import_module(module_name) + except (ImportError, AttributeError): + return False + + return True + + module_names = [] + for module_name in PUBLIC_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that cannot be " + "imported: {}".format(module_names)) + + for module_name in PUBLIC_ALIASED_MODULES: + try: + eval(module_name) + except AttributeError: + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that were not " + "found: {}".format(module_names)) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', category=DeprecationWarning) + warnings.filterwarnings('always', category=ImportWarning) + for module_name in PRIVATE_BUT_PRESENT_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules that are not really public but looked " + "public and can not be imported: " + "{}".format(module_names)) diff --git a/pyproject.toml b/pyproject.toml index 949e12c9e..918cbb278 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,17 +1,23 @@ [build-system] # Minimum requirements for the build system to execute. -requires = ["setuptools", "wheel", "cython"] # PEP 508 specification +requires = [ + "setuptools", + "wheel", + "Cython>=0.29.13", # Note: keep in sync with tools/cythonize.py +] [tool.towncrier] # Do no set this since it is hard to import numpy inside the source directory - # Use "--version Numpy" instead - #project = "numpy" - filename = "doc/release/latest-note.rst" - directory = "doc/release/upcoming_changes" + # the name is hardcoded. Use "--version 1.18.0" to set the version + single_file = true + filename = "doc/source/release/{version}-notes.rst" + directory = "doc/release/upcoming_changes/" issue_format = "`gh-{issue} <https://github.com/numpy/numpy/pull/{issue}>`__" template = "doc/release/upcoming_changes/template.rst" - underlines="~=" + underlines = "~=" + all_bullets = false + [[tool.towncrier.type]] directory = "highlight" @@ -62,3 +68,4 @@ requires = ["setuptools", "wheel", "cython"] # PEP 508 specification directory = "change" name = "Changes" showcontent = true + diff --git a/runtests.py b/runtests.py index 23245aeac..a38054f86 100755 --- a/runtests.py +++ b/runtests.py @@ -18,6 +18,10 @@ Run a debugger: $ gdb --args python runtests.py [...other args...] +Disable pytest capturing of output by using its '-s' option: + + $ python runtests.py -- -s + Generate C code coverage listing under build/lcov/: (requires http://ltp.sourceforge.net/coverage/lcov.php) @@ -67,6 +71,10 @@ def main(argv): parser = ArgumentParser(usage=__doc__.lstrip()) parser.add_argument("--verbose", "-v", action="count", default=1, help="more verbosity") + parser.add_argument("--debug-info", action="store_true", + help=("add --verbose-cfg to build_src to show compiler " + "configuration output while creating " + "_numpyconfig.h and config.h")) parser.add_argument("--no-build", "-n", action="store_true", default=False, help="do not build the project (use system installed version)") parser.add_argument("--build-only", "-b", action="store_true", default=False, @@ -106,6 +114,8 @@ def main(argv): help="Debug build") parser.add_argument("--parallel", "-j", type=int, default=0, help="Number of parallel jobs during build") + parser.add_argument("--warn-error", action="store_true", + help="Set -Werror to convert all compiler warnings to errors") parser.add_argument("--show-build-log", action="store_true", help="Show build output rather than using a log file") parser.add_argument("--bench", action="store_true", @@ -366,6 +376,10 @@ def build_project(args): cmd += ["build"] if args.parallel > 1: cmd += ["-j", str(args.parallel)] + if args.debug_info: + cmd += ["build_src", "--verbose-cfg"] + if args.warn_error: + cmd += ["--warn-error"] # Install; avoid producing eggs so numpy can be imported from dst_dir. cmd += ['install', '--prefix=' + dst_dir, '--single-version-externally-managed', @@ -44,6 +44,7 @@ Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 +Programming Language :: Python :: 3 :: Only Programming Language :: Python :: Implementation :: CPython Topic :: Software Development Topic :: Scientific/Engineering @@ -82,6 +83,10 @@ def git_version(): except (subprocess.SubprocessError, OSError): GIT_REVISION = "Unknown" + if not GIT_REVISION: + # this shouldn't happen but apparently can (see gh-8512) + GIT_REVISION = "Unknown" + return GIT_REVISION # BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be @@ -262,7 +267,7 @@ def parse_setuppy_commands(): # below and not standalone. Hence they're not added to good_commands. good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py', 'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm', - 'bdist_wininst', 'bdist_msi', 'bdist_mpkg') + 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src') for command in good_commands: if command in args: @@ -402,7 +407,8 @@ def setup_package(): classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], test_suite='nose.collector', - cmdclass={"sdist": sdist_checked}, + cmdclass={"sdist": sdist_checked, + }, python_requires='>=3.5', zip_safe=False, entry_points={ @@ -421,8 +427,8 @@ def setup_package(): if run_build: from numpy.distutils.core import setup cwd = os.path.abspath(os.path.dirname(__file__)) - if not os.path.exists(os.path.join(cwd, 'PKG-INFO')): - # Generate Cython sources, unless building from source release + if not 'sdist' in sys.argv: + # Generate Cython sources, unless we're generating an sdist generate_cython() metadata['configuration'] = configuration diff --git a/shippable.yml b/shippable.yml index 7d134a20e..af3cfaa04 100644 --- a/shippable.yml +++ b/shippable.yml @@ -31,9 +31,7 @@ build: # we will pay the ~13 minute cost of compiling Cython only when a new # version is scraped in by pip; otherwise, use the cached # wheel shippable places on Amazon S3 after we build it once - - pip install cython --cache-dir=/root/.cache/pip/wheels/$SHIPPABLE_PYTHON_VERSION - # install pytz for datetime testing - - pip install pytz + - pip install -r test_requirements.txt --cache-dir=/root/.cache/pip/wheels/$SHIPPABLE_PYTHON_VERSION # install pytest-xdist to leverage a second core # for unit tests - pip install pytest-xdist @@ -50,7 +48,7 @@ build: # check OpenBLAS version - python tools/openblas_support.py --check_version 0.3.7 # run the test suite - - python runtests.py -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10 + - python runtests.py --debug-info --show-build-log -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10 cache: true cache_dir_list: diff --git a/test_requirements.txt b/test_requirements.txt new file mode 100644 index 000000000..ea2a4bfbf --- /dev/null +++ b/test_requirements.txt @@ -0,0 +1,7 @@ +cython==0.29.13 +pytest==5.2.1 +pytz==2019.3 +pytest-cov==2.8.1 +pickle5; python_version == '3.7' +pickle5; python_version == '3.6' and platform_python_implementation != 'PyPy' +nose diff --git a/tools/ci/appveyor/requirements.txt b/tools/ci/appveyor/requirements.txt deleted file mode 100644 index fba8260da..000000000 --- a/tools/ci/appveyor/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -cython -nose -pytest-timeout -pytest-xdist -pytest-env -pytest-faulthandler diff --git a/tools/ci/test_all_newsfragments_used.py b/tools/ci/test_all_newsfragments_used.py new file mode 100755 index 000000000..6c4591fd8 --- /dev/null +++ b/tools/ci/test_all_newsfragments_used.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python + +import sys +import toml +import os + +path = toml.load("pyproject.toml")["tool"]["towncrier"]["directory"] + +fragments = os.listdir(path) +fragments.remove("README.rst") +fragments.remove("template.rst") + +if fragments: + print("The following files were not found by towncrier:") + print(" " + " \n".join(fragments)) + sys.exit(1) diff --git a/tools/cythonize.py b/tools/cythonize.py index c81b72d25..5bea2d4ec 100755 --- a/tools/cythonize.py +++ b/tools/cythonize.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ cythonize Cythonize pyx files into C files as needed. @@ -54,30 +54,25 @@ except NameError: def process_pyx(fromfile, tofile): flags = ['-3', '--fast-fail'] if tofile.endswith('.cxx'): - flags += ['--cplus'] + flags.append('--cplus') try: # try the cython in the installed python first (somewhat related to scipy/scipy#2397) from Cython.Compiler.Version import version as cython_version except ImportError: - # if that fails, use the one on the path, which might be the wrong version - try: - # Try the one on the path as a last resort - subprocess.check_call( - ['cython'] + flags + ["-o", tofile, fromfile]) - except OSError: - raise OSError('Cython needs to be installed') + # The `cython` command need not point to the version installed in the + # Python running this script, so raise an error to avoid the chance of + # using the wrong version of Cython. + raise OSError('Cython needs to be installed in Python as a module') else: # check the version, and invoke through python from distutils.version import LooseVersion - # requiring the newest version on all pythons doesn't work, since - # we're relying on the version of the distribution cython. Add new - # versions as they become required for new python versions. - if sys.version_info[:2] < (3, 7): - required_version = LooseVersion('0.19') - else: - required_version = LooseVersion('0.28') + # Cython 0.29.13 is required for Python 3.8 and there are + # other fixes in the 0.29 series that are needed even for earlier + # Python versions. + # Note: keep in sync with that in pyproject.toml + required_version = LooseVersion('0.29.13') if LooseVersion(cython_version) < required_version: raise RuntimeError('Building {} requires Cython >= {}'.format( diff --git a/tools/npy_tempita/compat3.py b/tools/npy_tempita/compat3.py index eb890ca14..01d771345 100644 --- a/tools/npy_tempita/compat3.py +++ b/tools/npy_tempita/compat3.py @@ -5,7 +5,7 @@ import sys __all__ = ['PY3', 'b', 'basestring_', 'bytes', 'next', 'is_unicode', 'iteritems'] -PY3 = True if sys.version_info[0] == 3 else False +PY3 = True if sys.version_info[0] >= 3 else False if sys.version_info[0] < 3: diff --git a/tools/pypy-test.sh b/tools/pypy-test.sh index 038748af9..f4d56ba1a 100755 --- a/tools/pypy-test.sh +++ b/tools/pypy-test.sh @@ -32,14 +32,14 @@ mkdir -p pypy3 (cd pypy3; tar --strip-components=1 -xf ../pypy.tar.bz2) pypy3/bin/pypy3 -mensurepip pypy3/bin/pypy3 -m pip install --upgrade pip setuptools -pypy3/bin/pypy3 -m pip install --user cython==0.29.0 pytest pytz --no-warn-script-location +pypy3/bin/pypy3 -m pip install --user -r test_requirements.txt --no-warn-script-location echo echo pypy3 version pypy3/bin/pypy3 -c "import sys; print(sys.version)" echo -pypy3/bin/pypy3 runtests.py --show-build-log -- -rsx \ +pypy3/bin/pypy3 runtests.py --debug-info --show-build-log -v -- -rsx \ --junitxml=junit/test-results.xml --durations 10 echo Make sure the correct openblas has been linked in diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py index 0037dc9b3..e8bf711c5 100755 --- a/tools/swig/test/testFarray.py +++ b/tools/swig/test/testFarray.py @@ -15,7 +15,7 @@ else: BadListError = ValueError # Add the distutils-generated build directory to the python search path and then # import the extension module -libDir = "lib.%s-%s" % (get_platform(), sys.version[:3]) +libDir = "lib.{}-{}.{}".format(get_platform(), *sys.version_info[:2]) sys.path.insert(0, os.path.join("build", libDir)) import Farray diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh index 448273db0..072ad3bf6 100755 --- a/tools/travis-before-install.sh +++ b/tools/travis-before-install.sh @@ -30,12 +30,8 @@ fi source venv/bin/activate python -V -if [ -n "$INSTALL_PICKLE5" ]; then - pip install pickle5 -fi - +popd pip install --upgrade pip setuptools -pip install pytz cython pytest +pip install -r test_requirements.txt if [ -n "$USE_ASV" ]; then pip install asv; fi -popd diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 1e7e09525..6094f0ee6 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -32,7 +32,7 @@ werrors="$werrors -Werror=implicit-function-declaration" setup_base() { - # use default python flags but remoge sign-compare + # use default python flags but remove sign-compare sysflags="$($PYTHON -c "from distutils import sysconfig; \ print (sysconfig.get_config_var('CFLAGS'))")" export CFLAGS="$sysflags $werrors -Wlogical-op -Wno-sign-compare" @@ -52,7 +52,7 @@ setup_base() else # Python3.5-dbg on travis seems to need this export CFLAGS=$CFLAGS" -Wno-maybe-uninitialized" - $PYTHON setup.py build_ext --inplace 2>&1 | tee log + $PYTHON setup.py build build_src --verbose-cfg build_ext --inplace 2>&1 | tee log fi grep -v "_configtest" log \ | grep -vE "ld returned 1|no previously-included files matching|manifest_maker: standard file '-c'" \ @@ -65,12 +65,12 @@ setup_base() run_test() { + $PIP install -r test_requirements.txt if [ -n "$USE_DEBUG" ]; then export PYTHONPATH=$PWD fi if [ -n "$RUN_COVERAGE" ]; then - $PIP install pytest-cov COVERAGE_FLAG=--coverage fi @@ -88,7 +88,7 @@ run_test() if [ -n "$RUN_FULL_TESTS" ]; then export PYTHONWARNINGS="ignore::DeprecationWarning:virtualenv" - $PYTHON ../runtests.py -n -v --durations 10 --mode=full $COVERAGE_FLAG + $PYTHON -b ../runtests.py -n -v --durations 10 --mode=full $COVERAGE_FLAG else # disable --durations temporarily, pytest currently aborts # when that is used with python3.6-dbg @@ -151,21 +151,17 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then export F90='gfortran --coverage' export LDFLAGS='--coverage' fi - $PYTHON setup.py bdist_wheel + $PYTHON setup.py build build_src --verbose-cfg bdist_wheel # Make another virtualenv to install into virtualenv --python=`which $PYTHON` venv-for-wheel . venv-for-wheel/bin/activate # Move out of source directory to avoid finding local numpy pushd dist $PIP install --pre --no-index --upgrade --find-links=. numpy - $PIP install nose pytest - - if [ -n "$INSTALL_PICKLE5" ]; then - $PIP install pickle5 - fi - popd + run_test + elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then # use an up-to-date pip / setuptools inside the venv $PIP install -U virtualenv @@ -182,11 +178,6 @@ elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then # Move out of source directory to avoid finding local numpy pushd dist $PIP install numpy* - $PIP install nose pytest - if [ -n "$INSTALL_PICKLE5" ]; then - $PIP install pickle5 - fi - popd run_test else @@ -30,10 +30,9 @@ envlist = py37-not-relaxed-strides [testenv] -deps= - pytest +deps= -Ur{toxinidir}/test_requirements.txt changedir={envdir} -commands={envpython} {toxinidir}/runtests.py --mode=full {posargs:} +commands={envpython} -b {toxinidir}/runtests.py --mode=full {posargs:} [testenv:py37-not-relaxed-strides] basepython=python3.7 |