summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.circleci/config.yml2
-rw-r--r--.github/workflows/build_test.yml2
-rw-r--r--.github/workflows/wheels.yml44
-rw-r--r--.gitignore1
-rw-r--r--azure-pipelines.yml2
-rw-r--r--azure-steps-windows.yml9
-rw-r--r--doc/release/upcoming_changes/17530.improvement.rst5
-rw-r--r--doc/release/upcoming_changes/17582.new_feature.rst10
-rw-r--r--doc/release/upcoming_changes/18536.improvement.rst7
-rw-r--r--doc/release/upcoming_changes/18585.new_feature.rst15
-rw-r--r--doc/release/upcoming_changes/18884.new_feature.rst7
-rw-r--r--doc/release/upcoming_changes/19062.new_feature.rst21
-rw-r--r--doc/release/upcoming_changes/19083.new_feature.rst6
-rw-r--r--doc/release/upcoming_changes/19135.change.rst10
-rw-r--r--doc/release/upcoming_changes/19151.improvement.rst6
-rw-r--r--doc/release/upcoming_changes/19211.new_feature.rst7
-rw-r--r--doc/release/upcoming_changes/19259.c_api.rst12
-rw-r--r--doc/release/upcoming_changes/19355.new_feature.rst13
-rw-r--r--doc/release/upcoming_changes/19356.change.rst7
-rw-r--r--doc/release/upcoming_changes/19459.new_feature.rst4
-rw-r--r--doc/release/upcoming_changes/19462.change.rst3
-rw-r--r--doc/release/upcoming_changes/19478.performance.rst11
-rw-r--r--doc/release/upcoming_changes/19479.compatibility.rst7
-rw-r--r--doc/release/upcoming_changes/19513.new_feature.rst4
-rw-r--r--doc/release/upcoming_changes/19527.new_feature.rst3
-rw-r--r--doc/release/upcoming_changes/19539.expired.rst2
-rw-r--r--doc/release/upcoming_changes/19615.expired.rst8
-rw-r--r--doc/release/upcoming_changes/19665.change.rst4
-rw-r--r--doc/release/upcoming_changes/19680.improvement.rst5
-rw-r--r--doc/release/upcoming_changes/19687.change.rst8
-rw-r--r--doc/release/upcoming_changes/19754.new_feature.rst7
-rw-r--r--doc/release/upcoming_changes/19803.new_feature.rst14
-rw-r--r--doc/release/upcoming_changes/19805.new_feature.rst5
-rw-r--r--doc/release/upcoming_changes/19857.improvement.rst13
-rw-r--r--doc/release/upcoming_changes/19879.new_feature.rst15
-rw-r--r--doc/release/upcoming_changes/19921.deprecation.rst3
-rw-r--r--doc/release/upcoming_changes/20000.deprecation.rst5
-rw-r--r--doc/release/upcoming_changes/20027.improvement.rst17
-rw-r--r--doc/release/upcoming_changes/20049.change.rst5
-rw-r--r--doc/release/upcoming_changes/20201.deprecation.rst5
-rw-r--r--doc/release/upcoming_changes/20217.improvement.rst10
-rw-r--r--doc/release/upcoming_changes/20314.change.rst10
-rw-r--r--doc/release/upcoming_changes/20394.deprecation.rst6
-rw-r--r--doc/release/upcoming_changes/20414.expired.rst4
-rw-r--r--doc/source/dev/development_advanced_debugging.rst2
-rw-r--r--doc/source/dev/development_workflow.rst21
-rw-r--r--doc/source/f2py/buildtools/cmake.rst6
-rw-r--r--doc/source/f2py/buildtools/index.rst11
-rw-r--r--doc/source/f2py/buildtools/skbuild.rst6
-rw-r--r--doc/source/f2py/code/CMakeLists.txt53
-rw-r--r--doc/source/f2py/code/CMakeLists_skbuild.txt86
-rw-r--r--doc/source/f2py/code/pyproj_skbuild.toml6
-rw-r--r--doc/source/f2py/code/setup_skbuild.py2
-rw-r--r--doc/source/reference/c-api/data_memory.rst16
-rw-r--r--doc/source/reference/c-api/types-and-structures.rst5
-rw-r--r--doc/source/reference/routines.array-manipulation.rst1
-rw-r--r--doc/source/release.rst1
-rw-r--r--doc/source/release/1.23.0-notes.rst45
-rw-r--r--doc/source/user/basics.copies.rst2
-rw-r--r--doc/source/user/basics.indexing.rst6
-rw-r--r--doc/source/user/basics.io.genfromtxt.rst14
-rw-r--r--doc/source/user/basics.rec.rst11
-rw-r--r--doc/source/user/how-to-index.rst351
-rw-r--r--doc/source/user/howtos_index.rst1
-rw-r--r--environment.yml2
-rw-r--r--numpy/__init__.pyi6
-rw-r--r--numpy/array_api/_array_object.py14
-rw-r--r--numpy/array_api/_statistical_functions.py4
-rw-r--r--numpy/array_api/tests/test_array_object.py21
-rw-r--r--numpy/compat/py3k.py4
-rw-r--r--numpy/core/_add_newdocs.py5
-rw-r--r--numpy/core/code_generators/cversions.txt1
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py1
-rw-r--r--numpy/core/fromnumeric.py59
-rw-r--r--numpy/core/function_base.pyi184
-rw-r--r--numpy/core/include/numpy/experimental_dtype_api.h6
-rw-r--r--numpy/core/include/numpy/numpyconfig.h15
-rw-r--r--numpy/core/numeric.py8
-rw-r--r--numpy/core/setup.py2
-rw-r--r--numpy/core/setup_common.py1
-rw-r--r--numpy/core/src/multiarray/alloc.c45
-rw-r--r--numpy/core/src/multiarray/methods.c2
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c2
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c20
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src10
-rw-r--r--numpy/core/src/umath/_operand_flag_tests.c (renamed from numpy/core/src/umath/_operand_flag_tests.c.src)0
-rw-r--r--numpy/core/src/umath/dispatching.c410
-rw-r--r--numpy/core/src/umath/dispatching.h14
-rw-r--r--numpy/core/src/umath/legacy_array_method.c34
-rw-r--r--numpy/core/src/umath/loops_exponent_log.dispatch.c.src18
-rw-r--r--numpy/core/src/umath/ufunc_object.c85
-rw-r--r--numpy/core/tests/test_datetime.py8
-rw-r--r--numpy/core/tests/test_deprecations.py27
-rw-r--r--numpy/core/tests/test_multiarray.py36
-rw-r--r--numpy/core/tests/test_ufunc.py63
-rw-r--r--numpy/core/tests/test_umath.py2
-rw-r--r--numpy/distutils/ccompiler_opt.py4
-rw-r--r--numpy/distutils/checks/cpu_asimdfhm.c4
-rw-r--r--numpy/distutils/misc_util.py27
-rw-r--r--numpy/f2py/__init__.py2
-rw-r--r--numpy/f2py/cfuncs.py59
-rw-r--r--numpy/f2py/tests/test_abstract_interface.py29
-rw-r--r--numpy/f2py/tests/test_array_from_pyobj.py384
-rw-r--r--numpy/f2py/tests/test_assumed_shape.py19
-rw-r--r--numpy/f2py/tests/test_block_docstring.py5
-rw-r--r--numpy/f2py/tests/test_callback.py62
-rw-r--r--numpy/f2py/tests/test_common.py13
-rw-r--r--numpy/f2py/tests/test_compile_function.py85
-rw-r--r--numpy/f2py/tests/test_crackfortran.py82
-rw-r--r--numpy/f2py/tests/test_kind.py26
-rw-r--r--numpy/f2py/tests/test_mixed.py12
-rw-r--r--numpy/f2py/tests/test_module_doc.py22
-rw-r--r--numpy/f2py/tests/test_parameter.py33
-rw-r--r--numpy/f2py/tests/test_quoted_character.py6
-rw-r--r--numpy/f2py/tests/test_regression.py23
-rw-r--r--numpy/f2py/tests/test_return_character.py42
-rw-r--r--numpy/f2py/tests/test_return_complex.py56
-rw-r--r--numpy/f2py/tests/test_return_integer.py36
-rw-r--r--numpy/f2py/tests/test_return_logical.py33
-rw-r--r--numpy/f2py/tests/test_return_real.py51
-rw-r--r--numpy/f2py/tests/test_semicolon_split.py27
-rw-r--r--numpy/f2py/tests/test_size.py6
-rw-r--r--numpy/f2py/tests/test_string.py93
-rw-r--r--numpy/f2py/tests/test_symbolic.py459
-rw-r--r--numpy/f2py/tests/util.py161
-rw-r--r--numpy/lib/index_tricks.py4
-rw-r--r--numpy/lib/npyio.py24
-rw-r--r--numpy/lib/recfunctions.py21
-rw-r--r--numpy/lib/scimath.py9
-rw-r--r--numpy/lib/scimath.pyi101
-rw-r--r--numpy/lib/shape_base.pyi4
-rw-r--r--numpy/lib/tests/test_io.py4
-rw-r--r--numpy/lib/type_check.py74
-rw-r--r--numpy/lib/type_check.pyi3
-rw-r--r--numpy/lib/utils.py13
-rw-r--r--numpy/linalg/tests/test_build.py53
-rw-r--r--numpy/ma/core.py10
-rw-r--r--numpy/random/_examples/cython/setup.py1
-rw-r--r--numpy/random/_mt19937.pyx2
-rw-r--r--numpy/random/mtrand.pyx17
-rw-r--r--numpy/random/tests/test_extending.py8
-rw-r--r--numpy/random/tests/test_randomstate_regression.py13
-rw-r--r--numpy/testing/_private/utils.py4
-rw-r--r--numpy/typing/tests/data/fail/array_constructors.pyi6
-rw-r--r--numpy/typing/tests/data/fail/shape_base.pyi8
-rw-r--r--numpy/typing/tests/data/reveal/array_constructors.pyi22
-rw-r--r--numpy/typing/tests/data/reveal/emath.pyi52
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_misc.pyi3
-rw-r--r--numpy/typing/tests/test_typing.py8
-rw-r--r--pavement.py2
-rw-r--r--pyproject.toml4
-rw-r--r--test_requirements.txt5
-rw-r--r--tools/allocation_tracking/README.md8
-rw-r--r--tools/allocation_tracking/alloc_hook.pyx42
-rw-r--r--tools/allocation_tracking/setup.py9
-rw-r--r--tools/allocation_tracking/sorttable.js493
-rw-r--r--tools/allocation_tracking/track_allocations.py140
-rwxr-xr-xtools/functions_missing_types.py1
-rw-r--r--tools/wheels/LICENSE_win32.txt938
-rw-r--r--tools/wheels/cibw_before_build.sh34
-rw-r--r--tools/wheels/cibw_test_command.sh8
-rw-r--r--tools/wheels/gfortran_utils.sh168
162 files changed, 3814 insertions, 2482 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index de7f52f81..182f7e678 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -44,8 +44,8 @@ jobs:
. venv/bin/activate
pip install --progress-bar=off --upgrade pip 'setuptools<49.2.0'
pip install --progress-bar=off -r test_requirements.txt
- pip install .
pip install --progress-bar=off -r doc_requirements.txt
+ pip install .
- run:
name: create release notes
diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml
index 86fb094c6..620d9c1ef 100644
--- a/.github/workflows/build_test.yml
+++ b/.github/workflows/build_test.yml
@@ -212,7 +212,7 @@ jobs:
fetch-depth: 0
- uses: actions/setup-python@v2
with:
- python-version: pypy-3.8-v7.3.6rc1
+ python-version: pypy-3.8-v7.3.7
- uses: ./.github/actions
sdist:
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
index 3c382f8b3..cd5d8484a 100644
--- a/.github/workflows/wheels.yml
+++ b/.github/workflows/wheels.yml
@@ -50,26 +50,37 @@ jobs:
matrix:
include:
# manylinux builds
- - os: ubuntu-latest
+ - os: ubuntu-20.04
python: "38"
platform: manylinux_x86_64
- - os: ubuntu-latest
+ - os: ubuntu-20.04
python: "39"
platform: manylinux_x86_64
- - os: ubuntu-latest
+ - os: ubuntu-20.04
python: "310"
platform: manylinux_x86_64
- # macos builds
- - os: macos-latest
+ # MacOS builds
+ - os: macos-10.15
python: "38"
- platform: macosx_x86_64
- - os: macos-latest
+ platform: macosx_*
+ - os: macos-10.15
python: "39"
- platform: macosx_x86_64
- - os: macos-latest
+ platform: macosx_*
+ - os: macos-10.15
python: "310"
- platform: macosx_x86_64
+ platform: macosx_*
+
+ # Windows builds
+ - os: windows-2019
+ python: "38"
+ platform: win_amd64
+ - os: windows-2019
+ python: "39"
+ platform: win_amd64
+ - os: windows-2019
+ python: "310"
+ platform: win_amd64
steps:
- name: Checkout numpy
@@ -91,9 +102,22 @@ jobs:
CIBW_ENVIRONMENT_LINUX: CFLAGS='-std=c99 -fno-strict-aliasing'
LDFLAGS='-Wl,--strip-debug'
OPENBLAS64_=/usr/local
+ RUNNER_OS='Linux'
# MACOS linker doesn't support stripping symbols
CIBW_ENVIRONMENT_MACOS: CFLAGS='-std=c99 -fno-strict-aliasing'
OPENBLAS64_=/usr/local
+ CC=clang
+ CXX=clang++
+ # TODO: Add universal2 wheels, we need to fuse them manually
+ # instead of going through cibuildwheel
+ # This is because cibuildwheel tries to make a fat wheel
+ # https://github.com/multi-build/multibuild/blame/devel/README.rst#L541-L565
+ # for more info
+ CIBW_ARCHS_MACOS: x86_64 arm64
+ CIBW_TEST_SKIP: "*_arm64 *_universal2:arm64"
+ # Hardcode for now,blas stuff needs changes for 32-bit
+ CIBW_ENVIRONMENT_WINDOWS: NPY_USE_BLAS_ILP64=1
+ OPENBLAS64_=openblas
CIBW_BUILD_VERBOSITY: 3
CIBW_BEFORE_BUILD: bash {project}/tools/wheels/cibw_before_build.sh {project}
CIBW_BEFORE_TEST: pip install -r {project}/test_requirements.txt
diff --git a/.gitignore b/.gitignore
index 52997523c..b7c776b2f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -157,7 +157,6 @@ numpy/core/src/npysort/sort.c
numpy/core/src/private/npy_binsearch.h
numpy/core/src/private/npy_partition.h
numpy/core/src/private/templ_common.h
-numpy/core/src/umath/_operand_flag_tests.c
numpy/core/src/umath/_rational_tests.c
numpy/core/src/umath/_struct_ufunc_tests.c
numpy/core/src/umath/_umath_tests.c
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 9d2973b59..9e65f9a20 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -230,7 +230,7 @@ stages:
- job: Windows
pool:
- vmImage: 'windows-latest'
+ vmImage: 'windows-2019'
strategy:
maxParallel: 6
matrix:
diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml
index 34f9797de..95a359c89 100644
--- a/azure-steps-windows.yml
+++ b/azure-steps-windows.yml
@@ -6,22 +6,23 @@ steps:
architecture: $(PYTHON_ARCH)
condition: not(contains(variables['PYTHON_VERSION'], 'PyPy'))
- powershell: |
- $url = "http://buildbot.pypy.org/nightly/py3.8/pypy-c-jit-latest-win64.zip"
+ # $url = "http://buildbot.pypy.org/nightly/py3.8/pypy-c-jit-latest-win64.zip"
+ $url = "https://downloads.python.org/pypy/pypy3.8-v7.3.7-win64.zip"
$output = "pypy.zip"
$wc = New-Object System.Net.WebClient
$wc.DownloadFile($url, $output)
echo "downloaded $url to $output"
mkdir pypy3
Expand-Archive $output -DestinationPath pypy3
- move pypy3/pypy-c-*/* pypy3
- cp pypy3/pypy3.exe pypy3/python.exe
+ # move pypy3/pypy-c-*/* pypy3
+ move pypy3/pypy*/* pypy3
$pypypath = Join-Path (Get-Item .).FullName pypy3
$env:Path = $pypypath + ";" + $env:Path
setx PATH $env:Path
python -mensurepip
echo "##vso[task.prependpath]$pypypath"
condition: contains(variables['PYTHON_VERSION'], 'PyPy')
- displayName: "Install PyPy pre-release"
+ displayName: "Install PyPy3.8 "
- script: python -m pip install --upgrade pip wheel
displayName: 'Install tools'
diff --git a/doc/release/upcoming_changes/17530.improvement.rst b/doc/release/upcoming_changes/17530.improvement.rst
deleted file mode 100644
index 07a23f0e5..000000000
--- a/doc/release/upcoming_changes/17530.improvement.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-`ctypeslib.load_library` can now take any path-like object
------------------------------------------------------------------------
-All parameters in the can now take any :term:`python:path-like object`.
-This includes the likes of strings, bytes and objects implementing the
-:meth:`__fspath__<os.PathLike.__fspath__>` protocol.
diff --git a/doc/release/upcoming_changes/17582.new_feature.rst b/doc/release/upcoming_changes/17582.new_feature.rst
deleted file mode 100644
index c2426330c..000000000
--- a/doc/release/upcoming_changes/17582.new_feature.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-NEP 49 configurable allocators
-------------------------------
-As detailed in `NEP 49`_, the function used for allocation of the data segment
-of a ndarray can be changed. The policy can be set globally or in a context.
-For more information see the NEP and the :ref:`data_memory` reference docs.
-Also add a ``NUMPY_WARN_IF_NO_MEM_POLICY`` override to warn on dangerous use
-of transfering ownership by setting ``NPY_ARRAY_OWNDATA``.
-
-.. _`NEP 49`: https://numpy.org/neps/nep-0049.html
-
diff --git a/doc/release/upcoming_changes/18536.improvement.rst b/doc/release/upcoming_changes/18536.improvement.rst
deleted file mode 100644
index 8693916db..000000000
--- a/doc/release/upcoming_changes/18536.improvement.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Add ``smallest_normal`` and ``smallest_subnormal`` attributes to `finfo`
--------------------------------------------------------------------------
-
-The attributes ``smallest_normal`` and ``smallest_subnormal`` are available as
-an extension of `finfo` class for any floating-point data type. To use these
-new attributes, write ``np.finfo(np.float64).smallest_normal`` or
-``np.finfo(np.float64).smallest_subnormal``.
diff --git a/doc/release/upcoming_changes/18585.new_feature.rst b/doc/release/upcoming_changes/18585.new_feature.rst
deleted file mode 100644
index bb83d755c..000000000
--- a/doc/release/upcoming_changes/18585.new_feature.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-Implementation of the NEP 47 (adopting the array API standard)
---------------------------------------------------------------
-
-An initial implementation of `NEP 47`_ (adoption the array API standard) has
-been added as ``numpy.array_api``. The implementation is experimental and will
-issue a UserWarning on import, as the `array API standard
-<https://data-apis.org/array-api/latest/index.html>`_ is still in draft state.
-``numpy.array_api`` is a conforming implementation of the array API standard,
-which is also minimal, meaning that only those functions and behaviors that
-are required by the standard are implemented (see the NEP for more info).
-Libraries wishing to make use of the array API standard are encouraged to use
-``numpy.array_api`` to check that they are only using functionality that is
-guaranteed to be present in standard conforming implementations.
-
-.. _`NEP 47`: https://numpy.org/neps/nep-0047-array-api-standard.html
diff --git a/doc/release/upcoming_changes/18884.new_feature.rst b/doc/release/upcoming_changes/18884.new_feature.rst
deleted file mode 100644
index 41503b00e..000000000
--- a/doc/release/upcoming_changes/18884.new_feature.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Generate C/C++ API reference documentation from comments blocks is now possible
--------------------------------------------------------------------------------
-This feature depends on Doxygen_ in the generation process and on Breathe_
-to integrate it with Sphinx.
-
-.. _`Doxygen`: https://www.doxygen.nl/index.html
-.. _`Breathe`: https://breathe.readthedocs.io/en/latest/
diff --git a/doc/release/upcoming_changes/19062.new_feature.rst b/doc/release/upcoming_changes/19062.new_feature.rst
deleted file mode 100644
index 171715568..000000000
--- a/doc/release/upcoming_changes/19062.new_feature.rst
+++ /dev/null
@@ -1,21 +0,0 @@
-Assign the platform-specific ``c_intp`` precision via a mypy plugin
--------------------------------------------------------------------
-
-The mypy_ plugin, introduced in `numpy/numpy#17843`_, has again been expanded:
-the plugin now is now responsible for setting the platform-specific precision
-of `numpy.ctypeslib.c_intp`, the latter being used as data type for various
-`numpy.ndarray.ctypes` attributes.
-
-Without the plugin, aforementioned type will default to `ctypes.c_int64`.
-
-To enable the plugin, one must add it to their mypy `configuration file`_:
-
-.. code-block:: ini
-
- [mypy]
- plugins = numpy.typing.mypy_plugin
-
-
-.. _mypy: http://mypy-lang.org/
-.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html
-.. _`numpy/numpy#17843`: https://github.com/numpy/numpy/pull/17843
diff --git a/doc/release/upcoming_changes/19083.new_feature.rst b/doc/release/upcoming_changes/19083.new_feature.rst
deleted file mode 100644
index 92f00c0d6..000000000
--- a/doc/release/upcoming_changes/19083.new_feature.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-Add NEP 47-compatible dlpack support
-------------------------------------
-
-Add a ``ndarray.__dlpack__()`` method which returns a ``dlpack`` C structure
-wrapped in a ``PyCapsule``. Also add a ``np._from_dlpack(obj)`` function, where
-``obj`` supports ``__dlpack__()``, and returns an ``ndarray``.
diff --git a/doc/release/upcoming_changes/19135.change.rst b/doc/release/upcoming_changes/19135.change.rst
deleted file mode 100644
index 0b900a16a..000000000
--- a/doc/release/upcoming_changes/19135.change.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-Removed floor division support for complex types
-------------------------------------------------
-
-Floor division of complex types will now result in a `TypeError`
-
-.. code-block:: python
-
- >>> a = np.arange(10) + 1j* np.arange(10)
- >>> a // 1
- TypeError: ufunc 'floor_divide' not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ''safe''
diff --git a/doc/release/upcoming_changes/19151.improvement.rst b/doc/release/upcoming_changes/19151.improvement.rst
deleted file mode 100644
index 2108b9c4f..000000000
--- a/doc/release/upcoming_changes/19151.improvement.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-`numpy.linalg.qr` accepts stacked matrices as inputs
-----------------------------------------------------
-
-`numpy.linalg.qr` is able to produce results for stacked matrices as inputs.
-Moreover, the implementation of QR decomposition has been shifted to C
-from Python.
diff --git a/doc/release/upcoming_changes/19211.new_feature.rst b/doc/release/upcoming_changes/19211.new_feature.rst
deleted file mode 100644
index 40e42387c..000000000
--- a/doc/release/upcoming_changes/19211.new_feature.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-``keepdims`` optional argument added to `numpy.argmin`, `numpy.argmax`
-----------------------------------------------------------------------
-
-``keepdims`` argument is added to `numpy.argmin`, `numpy.argmax`.
-If set to ``True``, the axes which are reduced are left in the result as dimensions with size one.
-The resulting array has the same number of dimensions and will broadcast with the
-input array.
diff --git a/doc/release/upcoming_changes/19259.c_api.rst b/doc/release/upcoming_changes/19259.c_api.rst
deleted file mode 100644
index dac9f520a..000000000
--- a/doc/release/upcoming_changes/19259.c_api.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-Masked inner-loops cannot be customized anymore
------------------------------------------------
-The masked inner-loop selector is now never used. A warning
-will be given in the unlikely event that it was customized.
-
-We do not expect that any code uses this. If you do use it,
-you must unset the selector on newer NumPy version.
-Please also contact the NumPy developers, we do anticipate
-providing a new, more specific, mechanism.
-
-The customization was part of a never-implemented feature to allow
-for faster masked operations.
diff --git a/doc/release/upcoming_changes/19355.new_feature.rst b/doc/release/upcoming_changes/19355.new_feature.rst
deleted file mode 100644
index cfa50b7a1..000000000
--- a/doc/release/upcoming_changes/19355.new_feature.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-``bit_count`` to compute the number of 1-bits in an integer
------------------------------------------------------------
-
-Computes the number of 1-bits in the absolute value of the input.
-This works on all the numpy integer types. Analogous to the builtin
-``int.bit_count`` or ``popcount`` in C++.
-
-.. code-block:: python
-
- >>> np.uint32(1023).bit_count()
- 10
- >>> np.int32(-127).bit_count()
- 7
diff --git a/doc/release/upcoming_changes/19356.change.rst b/doc/release/upcoming_changes/19356.change.rst
deleted file mode 100644
index 3c5ef4a91..000000000
--- a/doc/release/upcoming_changes/19356.change.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-`numpy.vectorize` functions now produce the same output class as the base function
-----------------------------------------------------------------------------------
-When a function that respects `numpy.ndarray` subclasses is vectorized using
-`numpy.vectorize`, the vectorized function will now be subclass-safe
-also for cases that a signature is given (i.e., when creating a ``gufunc``):
-the output class will be the same as that returned by the first call to
-the underlying function.
diff --git a/doc/release/upcoming_changes/19459.new_feature.rst b/doc/release/upcoming_changes/19459.new_feature.rst
deleted file mode 100644
index aecae670f..000000000
--- a/doc/release/upcoming_changes/19459.new_feature.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-The ``ndim`` and ``axis`` attributes have been added to `numpy.AxisError`
--------------------------------------------------------------------------
-The ``ndim`` and ``axis`` parameters are now also stored as attributes
-within each `numpy.AxisError` instance.
diff --git a/doc/release/upcoming_changes/19462.change.rst b/doc/release/upcoming_changes/19462.change.rst
deleted file mode 100644
index 8fbadb394..000000000
--- a/doc/release/upcoming_changes/19462.change.rst
+++ /dev/null
@@ -1,3 +0,0 @@
-OpenBLAS v0.3.17
-----------------
-Update the OpenBLAS used in testing and in wheels to v0.3.17
diff --git a/doc/release/upcoming_changes/19478.performance.rst b/doc/release/upcoming_changes/19478.performance.rst
deleted file mode 100644
index 6a389c20e..000000000
--- a/doc/release/upcoming_changes/19478.performance.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-Vectorize umath module using AVX-512
--------------------------------------
-
-By leveraging Intel Short Vector Math Library (SVML), 18 umath functions
-(``exp2``, ``log2``, ``log10``, ``expm1``, ``log1p``, ``cbrt``, ``sin``,
-``cos``, ``tan``, ``arcsin``, ``arccos``, ``arctan``, ``sinh``, ``cosh``,
-``tanh``, ``arcsinh``, ``arccosh``, ``arctanh``) are vectorized using AVX-512
-instruction set for both single and double precision implementations. This
-change is currently enabled only for Linux users and on processors with
-AVX-512 instruction set. It provides an average speed up of 32x and 14x for
-single and double precision functions respectively.
diff --git a/doc/release/upcoming_changes/19479.compatibility.rst b/doc/release/upcoming_changes/19479.compatibility.rst
deleted file mode 100644
index 83533a305..000000000
--- a/doc/release/upcoming_changes/19479.compatibility.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Distutils forces strict floating point model on clang
------------------------------------------------------
-NumPy now sets the ``-ftrapping-math`` option on clang to enforce correct
-floating point error handling for universal functions.
-Clang defaults to non-IEEE and C99 conform behaviour otherwise.
-This change (using the equivalent but newer ``-ffp-exception-behavior=strict``)
-was attempted in NumPy 1.21, but was effectively never used.
diff --git a/doc/release/upcoming_changes/19513.new_feature.rst b/doc/release/upcoming_changes/19513.new_feature.rst
deleted file mode 100644
index 5f945cea2..000000000
--- a/doc/release/upcoming_changes/19513.new_feature.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Preliminary support for `windows/arm64` target
-----------------------------------------------
-``numpy`` added support for windows/arm64 target. Please note
-``OpenBLAS`` support is not yet available for windows/arm64 target.
diff --git a/doc/release/upcoming_changes/19527.new_feature.rst b/doc/release/upcoming_changes/19527.new_feature.rst
deleted file mode 100644
index 3967f1841..000000000
--- a/doc/release/upcoming_changes/19527.new_feature.rst
+++ /dev/null
@@ -1,3 +0,0 @@
-Added support for LoongArch
-------------------------------------------------
-LoongArch is a new instruction set, numpy compilation failure on LoongArch architecture, so add the commit.
diff --git a/doc/release/upcoming_changes/19539.expired.rst b/doc/release/upcoming_changes/19539.expired.rst
deleted file mode 100644
index 6e94f175d..000000000
--- a/doc/release/upcoming_changes/19539.expired.rst
+++ /dev/null
@@ -1,2 +0,0 @@
-* Using the strings ``"Bytes0"``, ``"Datetime64"``, ``"Str0"``, ``"Uint32"``,
- and ``"Uint64"`` as a dtype will now raise a ``TypeError``. \ No newline at end of file
diff --git a/doc/release/upcoming_changes/19615.expired.rst b/doc/release/upcoming_changes/19615.expired.rst
deleted file mode 100644
index 4e02771e3..000000000
--- a/doc/release/upcoming_changes/19615.expired.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-Expired deprecations for ``loads``, ``ndfromtxt``, and ``mafromtxt`` in npyio
------------------------------------------------------------------------------
-
-``numpy.loads`` was deprecated in v1.15, with the recommendation that users
-use `pickle.loads` instead.
-``ndfromtxt`` and ``mafromtxt`` were both deprecated in v1.17 - users should
-use `numpy.genfromtxt` instead with the appropriate value for the
-``usemask`` parameter.
diff --git a/doc/release/upcoming_changes/19665.change.rst b/doc/release/upcoming_changes/19665.change.rst
deleted file mode 100644
index 2c2315dd2..000000000
--- a/doc/release/upcoming_changes/19665.change.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Python 3.7 is no longer supported
----------------------------------
-Python support has been dropped. This is rather strict, there are
-changes that require Python >=3.8.
diff --git a/doc/release/upcoming_changes/19680.improvement.rst b/doc/release/upcoming_changes/19680.improvement.rst
deleted file mode 100644
index 1a2a3496b..000000000
--- a/doc/release/upcoming_changes/19680.improvement.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-`numpy.fromregex` now accepts ``os.PathLike`` implementations
--------------------------------------------------------------
-
-`numpy.fromregex` now accepts objects implementing the `__fspath__<os.PathLike>`
-protocol, *e.g.* `pathlib.Path`.
diff --git a/doc/release/upcoming_changes/19687.change.rst b/doc/release/upcoming_changes/19687.change.rst
deleted file mode 100644
index c7f7512b6..000000000
--- a/doc/release/upcoming_changes/19687.change.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-str/repr of complex dtypes now include space after punctuation
---------------------------------------------------------------
-
-The repr of ``np.dtype({"names": ["a"], "formats": [int], "offsets": [2]})`` is
-now ``dtype({'names': ['a'], 'formats': ['<i8'], 'offsets': [2], 'itemsize': 10})``,
-whereas spaces where previously omitted after colons and between fields.
-
-The old behavior can be restored via ``np.set_printoptions(legacy="1.21")``.
diff --git a/doc/release/upcoming_changes/19754.new_feature.rst b/doc/release/upcoming_changes/19754.new_feature.rst
deleted file mode 100644
index 4e91e4cb3..000000000
--- a/doc/release/upcoming_changes/19754.new_feature.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-A ``.clang-format`` file has been added
----------------------------------------
-Clang-format is a C/C++ code formatter, together with the added
-``.clang-format`` file, it produces code close enough to the NumPy
-C_STYLE_GUIDE for general use. Clang-format version 12+ is required
-due to the use of several new features, it is available in
-Fedora 34 and Ubuntu Focal among other distributions.
diff --git a/doc/release/upcoming_changes/19803.new_feature.rst b/doc/release/upcoming_changes/19803.new_feature.rst
deleted file mode 100644
index 942325822..000000000
--- a/doc/release/upcoming_changes/19803.new_feature.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-``is_integer`` is now available to `numpy.floating` and `numpy.integer`
------------------------------------------------------------------------
-Based on its counterpart in `float` and `int`, the numpy floating point and
-integer types now support `~float.is_integer`. Returns ``True`` if the
-number is finite with integral value, and ``False`` otherwise.
-
-.. code-block:: python
-
- >>> np.float32(-2.0).is_integer()
- True
- >>> np.float64(3.2).is_integer()
- False
- >>> np.int32(-2).is_integer()
- True
diff --git a/doc/release/upcoming_changes/19805.new_feature.rst b/doc/release/upcoming_changes/19805.new_feature.rst
deleted file mode 100644
index f59409254..000000000
--- a/doc/release/upcoming_changes/19805.new_feature.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Symbolic parser for Fortran dimension specifications
-----------------------------------------------------
-A new symbolic parser has been added to f2py in order to correctly parse
-dimension specifications. The parser is the basis for future improvements
-and provides compatibility with Draft Fortran 202x.
diff --git a/doc/release/upcoming_changes/19857.improvement.rst b/doc/release/upcoming_changes/19857.improvement.rst
deleted file mode 100644
index cbeff08b1..000000000
--- a/doc/release/upcoming_changes/19857.improvement.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-Add new methods for ``quantile`` and ``percentile``
----------------------------------------------------
-
-``quantile`` and ``percentile`` now have have a ``method=``
-keyword argument supporting 13 different methods.
-This replaces the ``interpolation=`` keyword argument.
-
-The methods are now aligned with nine methods which can be
-found in scientific literature and the R language.
-The remaining methods are the previous discontinuous variations
-of the default "linear" one.
-
-Please see the documentation of `numpy.percentile` for more information.
diff --git a/doc/release/upcoming_changes/19879.new_feature.rst b/doc/release/upcoming_changes/19879.new_feature.rst
deleted file mode 100644
index c6624138b..000000000
--- a/doc/release/upcoming_changes/19879.new_feature.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-``ndarray``, ``dtype`` and ``number`` are now runtime-subscriptable
--------------------------------------------------------------------
-Mimicking :pep:`585`, the `~numpy.ndarray`, `~numpy.dtype` and `~numpy.number`
-classes are now subscriptable for python 3.9 and later.
-Consequently, expressions that were previously only allowed in .pyi stub files
-or with the help of ``from __future__ import annotations`` are now also legal
-during runtime.
-
-.. code-block:: python
-
- >>> import numpy as np
- >>> from typing import Any
-
- >>> np.ndarray[Any, np.dtype[np.float64]]
- numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]]
diff --git a/doc/release/upcoming_changes/19921.deprecation.rst b/doc/release/upcoming_changes/19921.deprecation.rst
deleted file mode 100644
index 17fa0f605..000000000
--- a/doc/release/upcoming_changes/19921.deprecation.rst
+++ /dev/null
@@ -1,3 +0,0 @@
-* the misspelled keyword argument ``delimitor`` of
- ``numpy.ma.mrecords.fromtextfile()`` has been changed into
- ``delimiter``, using it will emit a deprecation warning.
diff --git a/doc/release/upcoming_changes/20000.deprecation.rst b/doc/release/upcoming_changes/20000.deprecation.rst
deleted file mode 100644
index e0a56cd47..000000000
--- a/doc/release/upcoming_changes/20000.deprecation.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Passing boolean ``kth`` values to (arg-)partition has been deprecated
----------------------------------------------------------------------
-`~numpy.partition` and `~numpy.argpartition` would previously accept boolean
-values for the ``kth`` parameter, which would subsequently be converted into
-integers. This behavior has now been deprecated.
diff --git a/doc/release/upcoming_changes/20027.improvement.rst b/doc/release/upcoming_changes/20027.improvement.rst
deleted file mode 100644
index 86b3bed74..000000000
--- a/doc/release/upcoming_changes/20027.improvement.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-Missing parameters have been added to the ``nan<x>`` functions
---------------------------------------------------------------
-A number of the ``nan<x>`` functions previously lacked parameters that were
-present in their ``<x>``-based counterpart, *e.g.* the ``where`` parameter was
-present in `~numpy.mean` but absent from `~numpy.nanmean`.
-
-The following parameters have now been added to the ``nan<x>`` functions:
-
-* nanmin: ``initial`` & ``where``
-* nanmax: ``initial`` & ``where``
-* nanargmin: ``keepdims`` & ``out``
-* nanargmax: ``keepdims`` & ``out``
-* nansum: ``initial`` & ``where``
-* nanprod: ``initial`` & ``where``
-* nanmean: ``where``
-* nanvar: ``where``
-* nanstd: ``where``
diff --git a/doc/release/upcoming_changes/20049.change.rst b/doc/release/upcoming_changes/20049.change.rst
deleted file mode 100644
index e1f08b343..000000000
--- a/doc/release/upcoming_changes/20049.change.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Corrected ``advance`` in ``PCG64DSXM`` and ``PCG64``
-----------------------------------------------------
-Fixed a bug in the ``advance`` method of ``PCG64DSXM`` and ``PCG64``. The bug only
-affects results when the step was larger than :math:`2^{64}` on platforms
-that do not support 128-bit integers(e.g., Windows and 32-bit Linux).
diff --git a/doc/release/upcoming_changes/20201.deprecation.rst b/doc/release/upcoming_changes/20201.deprecation.rst
deleted file mode 100644
index db8cda21f..000000000
--- a/doc/release/upcoming_changes/20201.deprecation.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-The ``np.MachAr`` class has been deprecated
--------------------------------------------
-The `~numpy.MachAr` class and `finfo.machar <numpy.finfo>` attribute have
-been deprecated. Users are encouraged to access the property if interest
-directly from the corresponding `~numpy.finfo` attribute.
diff --git a/doc/release/upcoming_changes/20217.improvement.rst b/doc/release/upcoming_changes/20217.improvement.rst
deleted file mode 100644
index 28e5c8ff7..000000000
--- a/doc/release/upcoming_changes/20217.improvement.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-Annotating the main Numpy namespace
---------------------------------------
-Starting from the 1.20 release, PEP 484 type annotations have been included
-for parts of the NumPy library; annotating the remaining functions being a
-work in progress. With the release of 1.22 this process has been completed for
-the main NumPy namespace, which is now fully annotated.
-
-Besides the main namespace, a limited number of sub-packages contain
-annotations as well. This includes, among others, `numpy.testing`,
-`numpy.linalg` and `numpy.random` (available since 1.21).
diff --git a/doc/release/upcoming_changes/20314.change.rst b/doc/release/upcoming_changes/20314.change.rst
deleted file mode 100644
index ea7e29aff..000000000
--- a/doc/release/upcoming_changes/20314.change.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-Change in generation of random 32 bit floating point variates
--------------------------------------------------------------
-There was bug in the generation of 32 bit floating point values from
-the uniform distribution that would result in the least significant
-bit of the random variate always being 0. This has been fixed.
-
-This change affects the variates produced by the `random.Generator`
-methods ``random``, ``standard_normal``, ``standard_exponential``, and
-``standard_gamma``, but only when the dtype is specified as
-``numpy.float32``.
diff --git a/doc/release/upcoming_changes/20394.deprecation.rst b/doc/release/upcoming_changes/20394.deprecation.rst
new file mode 100644
index 000000000..44d1c8a20
--- /dev/null
+++ b/doc/release/upcoming_changes/20394.deprecation.rst
@@ -0,0 +1,6 @@
+Deprecate PyDataMem_SetEventHook
+--------------------------------
+
+The ability to track allocations is now built-in to python via ``tracemalloc``.
+The hook function ``PyDataMem_SetEventHook`` has been deprecated and the
+demonstration of its use in tool/allocation_tracking has been removed.
diff --git a/doc/release/upcoming_changes/20414.expired.rst b/doc/release/upcoming_changes/20414.expired.rst
new file mode 100644
index 000000000..51f113ab3
--- /dev/null
+++ b/doc/release/upcoming_changes/20414.expired.rst
@@ -0,0 +1,4 @@
+``alen`` and ``asscalar`` removed
+---------------------------------
+
+The deprecated ``np.alen`` and ``np.asscalar`` functions were removed.
diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst
index 18a7f6ae9..2dbd6ac22 100644
--- a/doc/source/dev/development_advanced_debugging.rst
+++ b/doc/source/dev/development_advanced_debugging.rst
@@ -106,7 +106,7 @@ Valgrind is a powerful tool to find certain memory access problems and should
be run on complicated C code.
Basic use of ``valgrind`` usually requires no more than::
- PYTHONMALLOC=malloc python runtests.py
+ PYTHONMALLOC=malloc valgrind python runtests.py
where ``PYTHONMALLOC=malloc`` is necessary to avoid false positives from python
itself.
diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst
index 8c56f6fb2..585aacfc9 100644
--- a/doc/source/dev/development_workflow.rst
+++ b/doc/source/dev/development_workflow.rst
@@ -187,6 +187,27 @@ Standard acronyms to start the commit message with are::
TST: addition or modification of tests
REL: related to releasing numpy
+Commands to skip continuous integration
+```````````````````````````````````````
+
+By default a lot of continuous integration (CI) jobs are run for every PR,
+from running the test suite on different operating systems and hardware
+platforms to building the docs. In some cases you already know that CI isn't
+needed (or not all of it), for example if you work on CI config files, text in
+the README, or other files that aren't involved in regular build, test or docs
+sequences. In such cases you may explicitly skip CI by including one of these
+fragments in your commit message::
+
+ ``[ci skip]``: skip as much CI as possible (not all jobs can be skipped)
+ ``[skip github]``: skip GitHub Actions "build numpy and run tests" jobs
+ ``[skip travis]``: skip TravisCI jobs
+ ``[skip azurepipelines]``: skip Azure jobs
+
+*Note: unfortunately not all CI systems implement this feature well, or at all.
+CircleCI supports ``ci skip`` but has no command to skip only CircleCI.
+Azure chooses to still run jobs with skip commands on PRs, the jobs only get
+skipped on merging to master.*
+
.. _workflow_mailing_list:
diff --git a/doc/source/f2py/buildtools/cmake.rst b/doc/source/f2py/buildtools/cmake.rst
index 3ed5a2bee..8c654c73e 100644
--- a/doc/source/f2py/buildtools/cmake.rst
+++ b/doc/source/f2py/buildtools/cmake.rst
@@ -48,9 +48,9 @@ with the ``cython`` information.
ls .
# CMakeLists.txt fib1.f
- mkdir build && cd build
- cmake ..
- make
+ cmake -S . -B build
+ cmake --build build
+ cd build
python -c "import numpy as np; import fibby; a = np.zeros(9); fibby.fib(a); print (a)"
# [ 0. 1. 1. 2. 3. 5. 8. 13. 21.]
diff --git a/doc/source/f2py/buildtools/index.rst b/doc/source/f2py/buildtools/index.rst
index aa41fd37f..e7492f191 100644
--- a/doc/source/f2py/buildtools/index.rst
+++ b/doc/source/f2py/buildtools/index.rst
@@ -27,6 +27,7 @@ Building an extension module which includes Python and Fortran consists of:
+ A ``C`` wrapper file is always created
+ Code with modules require an additional ``.f90`` wrapper
+ + Code with functions generate an additional ``.f`` wrapper
- ``fortranobject.{c,h}``
@@ -46,7 +47,7 @@ Fortran 77 programs
- Generates
+ ``blahmodule.c``
- + ``f2pywrappers.f``
+ + ``blah-f2pywrappers.f``
When no ``COMMON`` blocks are present only a ``C`` wrapper file is generated.
Wrappers are also generated to rewrite assumed shape arrays as automatic
@@ -57,10 +58,12 @@ Fortran 90 programs
- Generates:
+ ``blahmodule.c``
+ + ``blah-f2pywrappers.f``
+ ``blah-f2pywrappers2.f90``
- The secondary wrapper is used to handle code which is subdivided into
- modules. It rewrites assumed shape arrays as automatic arrays.
+ The ``f90`` wrapper is used to handle code which is subdivided into
+ modules. The ``f`` wrapper makes ``subroutines`` for ``functions``. It
+ rewrites assumed shape arrays as automatic arrays.
Signature files
- Input file ``blah.pyf``
@@ -68,7 +71,7 @@ Signature files
+ ``blahmodule.c``
+ ``blah-f2pywrappers2.f90`` (occasionally)
- + ``f2pywrappers.f`` (occasionally)
+ + ``blah-f2pywrappers.f`` (occasionally)
Signature files ``.pyf`` do not signal their language standard via the file
extension, they may generate the F90 and F77 specific wrappers depending on
diff --git a/doc/source/f2py/buildtools/skbuild.rst b/doc/source/f2py/buildtools/skbuild.rst
index af18ea43b..f1a0bf65e 100644
--- a/doc/source/f2py/buildtools/skbuild.rst
+++ b/doc/source/f2py/buildtools/skbuild.rst
@@ -44,9 +44,9 @@ The resulting extension can be built and loaded in the standard workflow.
ls .
# CMakeLists.txt fib1.f
- mkdir build && cd build
- cmake ..
- make
+ cmake -S . -B build
+ cmake --build build
+ cd build
python -c "import numpy as np; import fibby; a = np.zeros(9); fibby.fib(a); print (a)"
# [ 0. 1. 1. 2. 3. 5. 8. 13. 21.]
diff --git a/doc/source/f2py/code/CMakeLists.txt b/doc/source/f2py/code/CMakeLists.txt
index 62ff193bb..d16ddf77e 100644
--- a/doc/source/f2py/code/CMakeLists.txt
+++ b/doc/source/f2py/code/CMakeLists.txt
@@ -1,12 +1,10 @@
-### setup project ###
-cmake_minimum_required(VERSION 3.17.3) # 3.17 > for Python3_SOABI
-set(CMAKE_CXX_STANDARD_REQUIRED ON)
+cmake_minimum_required(VERSION 3.18) # Needed to avoid requiring embedded Python libs too
project(fibby
VERSION 1.0
DESCRIPTION "FIB module"
LANGUAGES C Fortran
- )
+)
# Safety net
if(PROJECT_SOURCE_DIR STREQUAL PROJECT_BINARY_DIR)
@@ -16,65 +14,52 @@ if(PROJECT_SOURCE_DIR STREQUAL PROJECT_BINARY_DIR)
)
endif()
-# Grab Python
-find_package(Python3 3.9 REQUIRED
- COMPONENTS Interpreter Development NumPy)
+# Grab Python, 3.7 or newer
+find_package(Python 3.7 REQUIRED
+ COMPONENTS Interpreter Development.Module NumPy)
# Grab the variables from a local Python installation
# F2PY headers
execute_process(
- COMMAND "${Python3_EXECUTABLE}"
+ COMMAND "${Python_EXECUTABLE}"
-c "import numpy.f2py; print(numpy.f2py.get_include())"
OUTPUT_VARIABLE F2PY_INCLUDE_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE
)
-# Project scope; consider using target_include_directories instead
-include_directories(
- BEFORE
- ${Python3_INCLUDE_DIRS}
- ${Python3_NumPy_INCLUDE_DIRS}
- ${F2PY_INCLUDE_DIR}
- )
-
-message(STATUS ${Python3_INCLUDE_DIRS})
-message(STATUS ${F2PY_INCLUDE_DIR})
-message(STATUS ${Python3_NumPy_INCLUDE_DIRS})
+# Print out the discovered paths
+include(CMakePrintHelpers)
+cmake_print_variables(Python_INCLUDE_DIRS)
+cmake_print_variables(F2PY_INCLUDE_DIR)
+cmake_print_variables(Python_NumPy_INCLUDE_DIRS)
-# Vars
+# Common variables
set(f2py_module_name "fibby")
set(fortran_src_file "${CMAKE_SOURCE_DIR}/fib1.f")
set(f2py_module_c "${f2py_module_name}module.c")
-set(generated_module_file "${f2py_module_name}${Python3_SOABI}")
# Generate sources
add_custom_target(
genpyf
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/${f2py_module_c}"
- )
+)
add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${f2py_module_c}"
- COMMAND ${Python3_EXECUTABLE} -m "numpy.f2py"
+ COMMAND ${Python_EXECUTABLE} -m "numpy.f2py"
"${fortran_src_file}"
-m "fibby"
--lower # Important
DEPENDS fib1.f # Fortran source
- )
+)
# Set up target
-add_library(${CMAKE_PROJECT_NAME} SHARED
+Python_add_library(${CMAKE_PROJECT_NAME} MODULE WITH_SOABI
"${CMAKE_CURRENT_BINARY_DIR}/${f2py_module_c}" # Generated
"${F2PY_INCLUDE_DIR}/fortranobject.c" # From NumPy
"${fortran_src_file}" # Fortran source(s)
- )
+)
# Depend on sources
+target_link_libraries(${CMAKE_PROJECT_NAME} PRIVATE Python::NumPy)
add_dependencies(${CMAKE_PROJECT_NAME} genpyf)
-
-set_target_properties(
- ${CMAKE_PROJECT_NAME}
- PROPERTIES
- PREFIX ""
- OUTPUT_NAME "${CMAKE_PROJECT_NAME}"
- LINKER_LANGUAGE C
- )
+target_include_directories(${CMAKE_PROJECT_NAME} PRIVATE "${F2PY_INCLUDE_DIR}")
diff --git a/doc/source/f2py/code/CMakeLists_skbuild.txt b/doc/source/f2py/code/CMakeLists_skbuild.txt
index 97bc5c744..3d092760b 100644
--- a/doc/source/f2py/code/CMakeLists_skbuild.txt
+++ b/doc/source/f2py/code/CMakeLists_skbuild.txt
@@ -1,6 +1,5 @@
### setup project ###
-cmake_minimum_required(VERSION 3.17.3)
-set(CMAKE_CXX_STANDARD_REQUIRED ON)
+cmake_minimum_required(VERSION 3.9)
project(fibby
VERSION 1.0
@@ -16,74 +15,81 @@ if(PROJECT_SOURCE_DIR STREQUAL PROJECT_BINARY_DIR)
)
endif()
-# Grab Python
-find_package(Python3 3.9 REQUIRED
- COMPONENTS Interpreter Development)
-
# Ensure scikit-build modules
if (NOT SKBUILD)
- # Kanged -->https://github.com/Kitware/torch_liberator/blob/master/CMakeLists.txt
+ find_package(PythonInterp 3.7 REQUIRED)
+ # Kanged --> https://github.com/Kitware/torch_liberator/blob/master/CMakeLists.txt
# If skbuild is not the driver; include its utilities in CMAKE_MODULE_PATH
execute_process(
- COMMAND "${Python3_EXECUTABLE}"
- -c "import os, skbuild; print(os.path.dirname(skbuild.__file__))"
- OUTPUT_VARIABLE SKBLD_DIR
- OUTPUT_STRIP_TRAILING_WHITESPACE
+ COMMAND "${PYTHON_EXECUTABLE}"
+ -c "import os, skbuild; print(os.path.dirname(skbuild.__file__))"
+ OUTPUT_VARIABLE SKBLD_DIR
+ OUTPUT_STRIP_TRAILING_WHITESPACE
)
- set(SKBLD_CMAKE_DIR "${SKBLD_DIR}/resources/cmake")
- list(APPEND CMAKE_MODULE_PATH ${SKBLD_CMAKE_DIR})
+ list(APPEND CMAKE_MODULE_PATH "${SKBLD_DIR}/resources/cmake")
+ message(STATUS "Looking in ${SKBLD_DIR}/resources/cmake for CMake modules")
endif()
# scikit-build style includes
find_package(PythonExtensions REQUIRED) # for ${PYTHON_EXTENSION_MODULE_SUFFIX}
-find_package(NumPy REQUIRED) # for ${NumPy_INCLUDE_DIRS}
-find_package(F2PY REQUIRED) # for ${F2PY_INCLUDE_DIR}
+
+# Grab the variables from a local Python installation
+# NumPy headers
+execute_process(
+ COMMAND "${PYTHON_EXECUTABLE}"
+ -c "import numpy; print(numpy.get_include())"
+ OUTPUT_VARIABLE NumPy_INCLUDE_DIRS
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+)
+# F2PY headers
+execute_process(
+ COMMAND "${PYTHON_EXECUTABLE}"
+ -c "import numpy.f2py; print(numpy.f2py.get_include())"
+ OUTPUT_VARIABLE F2PY_INCLUDE_DIR
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+)
# Prepping the module
set(f2py_module_name "fibby")
set(fortran_src_file "${CMAKE_SOURCE_DIR}/fib1.f")
-set(generated_module_file ${f2py_module_name}${PYTHON_EXTENSION_MODULE_SUFFIX})
+set(f2py_module_c "${f2py_module_name}module.c")
# Target for enforcing dependencies
-add_custom_target(${f2py_module_name} ALL
+add_custom_target(genpyf
DEPENDS "${fortran_src_file}"
- )
-
-# Custom command for generating .c
+)
add_custom_command(
- OUTPUT "${f2py_module_name}module.c"
- COMMAND ${F2PY_EXECUTABLE}
- -m ${f2py_module_name}
- ${fortran_src_file}
- --lower
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- DEPENDS ${fortran_src_file}
- )
+ OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${f2py_module_c}"
+ COMMAND ${PYTHON_EXECUTABLE} -m "numpy.f2py"
+ "${fortran_src_file}"
+ -m "fibby"
+ --lower # Important
+ DEPENDS fib1.f # Fortran source
+)
-add_library(${generated_module_file} MODULE
+add_library(${CMAKE_PROJECT_NAME} MODULE
"${f2py_module_name}module.c"
"${F2PY_INCLUDE_DIR}/fortranobject.c"
"${fortran_src_file}")
-target_include_directories(${generated_module_file} PUBLIC
- ${F2PY_INCLUDE_DIRS}
+target_include_directories(${CMAKE_PROJECT_NAME} PUBLIC
+ ${F2PY_INCLUDE_DIR}
+ ${NumPy_INCLUDE_DIRS}
${PYTHON_INCLUDE_DIRS})
-set_target_properties(${generated_module_file} PROPERTIES SUFFIX "")
-set_target_properties(${generated_module_file} PROPERTIES PREFIX "")
+set_target_properties(${CMAKE_PROJECT_NAME} PROPERTIES SUFFIX "${PYTHON_EXTENSION_MODULE_SUFFIX}")
+set_target_properties(${CMAKE_PROJECT_NAME} PROPERTIES PREFIX "")
# Linker fixes
if (UNIX)
if (APPLE)
- set_target_properties(${generated_module_file} PROPERTIES
+ set_target_properties(${CMAKE_PROJECT_NAME} PROPERTIES
LINK_FLAGS '-Wl,-dylib,-undefined,dynamic_lookup')
else()
- set_target_properties(${generated_module_file} PROPERTIES
+ set_target_properties(${CMAKE_PROJECT_NAME} PROPERTIES
LINK_FLAGS '-Wl,--allow-shlib-undefined')
endif()
endif()
-if (SKBUILD)
- install(TARGETS ${generated_module_file} DESTINATION fibby)
-else()
- install(TARGETS ${generated_module_file} DESTINATION ${CMAKE_SOURCE_DIR}/fibby)
-endif()
+add_dependencies(${CMAKE_PROJECT_NAME} genpyf)
+
+install(TARGETS ${CMAKE_PROJECT_NAME} DESTINATION fibby)
diff --git a/doc/source/f2py/code/pyproj_skbuild.toml b/doc/source/f2py/code/pyproj_skbuild.toml
index 6686d1736..bcd6ae99c 100644
--- a/doc/source/f2py/code/pyproj_skbuild.toml
+++ b/doc/source/f2py/code/pyproj_skbuild.toml
@@ -1,5 +1,3 @@
-[project]
-requires-python = ">=3.7"
-
[build-system]
-requires = ["setuptools>=42", "wheel", "scikit-build", "cmake>=3.18", "numpy>=1.21"]
+requires = ["setuptools>=42", "wheel", "scikit-build", "cmake>=3.9", "numpy>=1.21"]
+build-backend = "setuptools.build_meta"
diff --git a/doc/source/f2py/code/setup_skbuild.py b/doc/source/f2py/code/setup_skbuild.py
index 4dfc6af8b..28dcdcb1f 100644
--- a/doc/source/f2py/code/setup_skbuild.py
+++ b/doc/source/f2py/code/setup_skbuild.py
@@ -6,5 +6,5 @@ setup(
description="a minimal example package (fortran version)",
license="MIT",
packages=['fibby'],
- cmake_args=['-DSKBUILD=ON']
+ python_requires=">=3.7",
)
diff --git a/doc/source/reference/c-api/data_memory.rst b/doc/source/reference/c-api/data_memory.rst
index b779026b4..2084ab5d0 100644
--- a/doc/source/reference/c-api/data_memory.rst
+++ b/doc/source/reference/c-api/data_memory.rst
@@ -20,8 +20,8 @@ Historical overview
Since version 1.7.0, NumPy has exposed a set of ``PyDataMem_*`` functions
(:c:func:`PyDataMem_NEW`, :c:func:`PyDataMem_FREE`, :c:func:`PyDataMem_RENEW`)
which are backed by `alloc`, `free`, `realloc` respectively. In that version
-NumPy also exposed the `PyDataMem_EventHook` function described below, which
-wrap the OS-level calls.
+NumPy also exposed the `PyDataMem_EventHook` function (now deprecated)
+described below, which wrap the OS-level calls.
Since those early days, Python also improved its memory management
capabilities, and began providing
@@ -50,10 +50,10 @@ management routines can use :c:func:`PyDataMem_SetHandler`, which uses a
:c:type:`PyDataMem_Handler` structure to hold pointers to functions used to
manage the data memory. The calls are still wrapped by internal routines to
call :c:func:`PyTraceMalloc_Track`, :c:func:`PyTraceMalloc_Untrack`, and will
-use the :c:func:`PyDataMem_EventHookFunc` mechanism. Since the functions may
-change during the lifetime of the process, each ``ndarray`` carries with it the
-functions used at the time of its instantiation, and these will be used to
-reallocate or free the data memory of the instance.
+use the deprecated :c:func:`PyDataMem_EventHookFunc` mechanism. Since the
+functions may change during the lifetime of the process, each ``ndarray``
+carries with it the functions used at the time of its instantiation, and these
+will be used to reallocate or free the data memory of the instance.
.. c:type:: PyDataMem_Handler
@@ -119,7 +119,9 @@ For an example of setting up and using the PyDataMem_Handler, see the test in
thread. The hook should be written to be reentrant, if it performs
operations that might cause new allocation events (such as the
creation/destruction numpy objects, or creating/destroying Python
- objects which might cause a gc)
+ objects which might cause a gc).
+
+ Deprecated in v1.23
What happens when deallocating if there is no policy set
--------------------------------------------------------
diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst
index 605a4ae71..1ea47b498 100644
--- a/doc/source/reference/c-api/types-and-structures.rst
+++ b/doc/source/reference/c-api/types-and-structures.rst
@@ -286,6 +286,11 @@ PyArrayDescr_Type and PyArray_Descr
array like behavior. Each bit in this member is a flag which are named
as:
+ .. c:member:: int alignment
+
+ Non-NULL if this type is an array (C-contiguous) of some other type
+
+
..
dedented to allow internal linking, pending a refactoring
diff --git a/doc/source/reference/routines.array-manipulation.rst b/doc/source/reference/routines.array-manipulation.rst
index 1c96495d9..95fc39876 100644
--- a/doc/source/reference/routines.array-manipulation.rst
+++ b/doc/source/reference/routines.array-manipulation.rst
@@ -59,7 +59,6 @@ Changing kind of array
asfortranarray
ascontiguousarray
asarray_chkfinite
- asscalar
require
Joining arrays
diff --git a/doc/source/release.rst b/doc/source/release.rst
index a4a5bde63..9504c6e97 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -5,6 +5,7 @@ Release notes
.. toctree::
:maxdepth: 3
+ 1.23.0 <release/1.23.0-notes>
1.22.0 <release/1.22.0-notes>
1.21.4 <release/1.21.4-notes>
1.21.3 <release/1.21.3-notes>
diff --git a/doc/source/release/1.23.0-notes.rst b/doc/source/release/1.23.0-notes.rst
new file mode 100644
index 000000000..330e7fd44
--- /dev/null
+++ b/doc/source/release/1.23.0-notes.rst
@@ -0,0 +1,45 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.23.0 Release Notes
+==========================
+
+
+Highlights
+==========
+
+
+New functions
+=============
+
+
+Deprecations
+============
+
+
+Future Changes
+==============
+
+
+Expired deprecations
+====================
+
+
+Compatibility notes
+===================
+
+
+C API changes
+=============
+
+
+New Features
+============
+
+
+Improvements
+============
+
+
+Changes
+=======
diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst
index 583a59b95..e8ba68bc0 100644
--- a/doc/source/user/basics.copies.rst
+++ b/doc/source/user/basics.copies.rst
@@ -39,6 +39,8 @@ do not reflect on the original array. Making a copy is slower and
memory-consuming but sometimes necessary. A copy can be forced by using
:meth:`.ndarray.copy`.
+.. _indexing-operations:
+
Indexing operations
===================
diff --git a/doc/source/user/basics.indexing.rst b/doc/source/user/basics.indexing.rst
index 264c3d721..e99682f02 100644
--- a/doc/source/user/basics.indexing.rst
+++ b/doc/source/user/basics.indexing.rst
@@ -28,6 +28,7 @@ Note that in Python, ``x[(exp1, exp2, ..., expN)]`` is equivalent to
``x[exp1, exp2, ..., expN]``; the latter is just syntactic sugar
for the former.
+.. _basic-indexing:
Basic indexing
--------------
@@ -88,6 +89,7 @@ that is subsequently indexed by 2.
rapidly changing location in memory. This difference represents a
great potential for confusion.
+.. _slicing-and-striding:
Slicing and striding
^^^^^^^^^^^^^^^^^^^^
@@ -226,6 +228,7 @@ concepts to remember include:
.. index::
pair: ndarray; view
+.. _dimensional-indexing-tools:
Dimensional indexing tools
^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -470,6 +473,7 @@ such an array with an image with shape (ny, nx) with dtype=np.uint8
lookup table) will result in an array of shape (ny, nx, 3) where a
triple of RGB values is associated with each pixel location.
+.. _boolean-indexing:
Boolean array indexing
^^^^^^^^^^^^^^^^^^^^^^
@@ -851,7 +855,7 @@ For this reason, it is possible to use the output from the
:meth:`np.nonzero() <ndarray.nonzero>` function directly as an index since
it always returns a tuple of index arrays.
-Because the special treatment of tuples, they are not automatically
+Because of the special treatment of tuples, they are not automatically
converted to an array as a list would be. As an example: ::
>>> z[[1, 1, 1, 1]] # produces a large array
diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst
index 8fe7565aa..6a1ba75dd 100644
--- a/doc/source/user/basics.io.genfromtxt.rst
+++ b/doc/source/user/basics.io.genfromtxt.rst
@@ -231,9 +231,7 @@ When ``dtype=None``, the type of each column is determined iteratively from
its data. We start by checking whether a string can be converted to a
boolean (that is, if the string matches ``true`` or ``false`` in lower
cases); then whether it can be converted to an integer, then to a float,
-then to a complex and eventually to a string. This behavior may be changed
-by modifying the default mapper of the
-:class:`~numpy.lib._iotools.StringConverter` class.
+then to a complex and eventually to a string.
The option ``dtype=None`` is provided for convenience. However, it is
significantly slower than setting the dtype explicitly.
@@ -514,15 +512,15 @@ output array will then be a :class:`~numpy.ma.MaskedArray`.
Shortcut functions
==================
-In addition to :func:`~numpy.genfromtxt`, the :mod:`numpy.lib.npyio` module
+In addition to :func:`~numpy.genfromtxt`, the ``numpy.lib.npyio`` module
provides several convenience functions derived from
:func:`~numpy.genfromtxt`. These functions work the same way as the
original, but they have different default values.
-:func:`~numpy.npyio.recfromtxt`
+``numpy.lib.npyio.recfromtxt``
Returns a standard :class:`numpy.recarray` (if ``usemask=False``) or a
- :class:`~numpy.ma.mrecords.MaskedRecords` array (if ``usemaske=True``). The
+ ``numpy.ma.mrecords.MaskedRecords`` array (if ``usemaske=True``). The
default dtype is ``dtype=None``, meaning that the types of each column
will be automatically determined.
-:func:`~numpy.npyio.recfromcsv`
- Like :func:`~numpy.npyio.recfromtxt`, but with a default ``delimiter=","``.
+``numpy.lib.npyio.recfromcsv``
+ Like ``numpy.lib.npyio.recfromtxt``, but with a default ``delimiter=","``.
diff --git a/doc/source/user/basics.rec.rst b/doc/source/user/basics.rec.rst
index 1e6f30506..7f487f39b 100644
--- a/doc/source/user/basics.rec.rst
+++ b/doc/source/user/basics.rec.rst
@@ -579,12 +579,13 @@ As an optional convenience numpy provides an ndarray subclass,
attribute instead of only by index.
Record arrays use a special datatype, :class:`numpy.record`, that allows
field access by attribute on the structured scalars obtained from the array.
-The :mod:`numpy.rec` module provides functions for creating recarrays from
+The ``numpy.rec`` module provides functions for creating recarrays from
various objects.
Additional helper functions for creating and manipulating structured arrays
can be found in :mod:`numpy.lib.recfunctions`.
-The simplest way to create a record array is with ``numpy.rec.array``::
+The simplest way to create a record array is with
+:func:`numpy.rec.array <numpy.core.records.array>`::
>>> recordarr = np.rec.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
@@ -600,14 +601,14 @@ The simplest way to create a record array is with ``numpy.rec.array``::
>>> recordarr[1].baz
b'World'
-:func:`numpy.rec.array` can convert a wide variety of arguments into record
-arrays, including structured arrays::
+:func:`numpy.rec.array <numpy.core.records.array>` can convert a wide variety
+of arguments into record arrays, including structured arrays::
>>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
>>> recordarr = np.rec.array(arr)
-The :mod:`numpy.rec` module provides a number of other convenience functions for
+The ``numpy.rec`` module provides a number of other convenience functions for
creating record arrays, see :ref:`record array creation routines
<routines.array-creation.rec>`.
diff --git a/doc/source/user/how-to-index.rst b/doc/source/user/how-to-index.rst
new file mode 100644
index 000000000..41061d5f4
--- /dev/null
+++ b/doc/source/user/how-to-index.rst
@@ -0,0 +1,351 @@
+.. currentmodule:: numpy
+
+.. _how-to-index.rst:
+
+*****************************************
+How to index :class:`ndarrays <.ndarray>`
+*****************************************
+
+.. seealso:: :ref:`basics.indexing`
+
+This page tackles common examples. For an in-depth look into indexing, refer
+to :ref:`basics.indexing`.
+
+Access specific/arbitrary rows and columns
+==========================================
+
+Use :ref:`basic-indexing` features like :ref:`slicing-and-striding`, and
+:ref:`dimensional-indexing-tools`.
+
+ >>> a = np.arange(30).reshape(2, 3, 5)
+ >>> a
+ array([[[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14]],
+ <BLANKLINE>
+ [[15, 16, 17, 18, 19],
+ [20, 21, 22, 23, 24],
+ [25, 26, 27, 28, 29]]])
+ >>> a[0, 2, :]
+ array([10, 11, 12, 13, 14])
+ >>> a[0, :, 3]
+ array([ 3, 8, 13])
+
+Note that the output from indexing operations can have different shape from the
+original object. To preserve the original dimensions after indexing, you can
+use :func:`newaxis`. To use other such tools, refer to
+:ref:`dimensional-indexing-tools`.
+
+ >>> a[0, :, 3].shape
+ (3,)
+ >>> a[0, :, 3, np.newaxis].shape
+ (3, 1)
+ >>> a[0, :, 3, np.newaxis, np.newaxis].shape
+ (3, 1, 1)
+
+Variables can also be used to index::
+
+ >>> y = 0
+ >>> a[y, :, y+3]
+ array([ 3, 8, 13])
+
+Refer to :ref:`dealing-with-variable-indices` to see how to use
+:term:`python:slice` and :py:data:`Ellipsis` in your index variables.
+
+Index columns
+-------------
+
+To index columns, you have to index the last axis. Use
+:ref:`dimensional-indexing-tools` to get the desired number of dimensions::
+
+ >>> a = np.arange(24).reshape(2, 3, 4)
+ >>> a
+ array([[[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]],
+ <BLANKLINE>
+ [[12, 13, 14, 15],
+ [16, 17, 18, 19],
+ [20, 21, 22, 23]]])
+ >>> a[..., 3]
+ array([[ 3, 7, 11],
+ [15, 19, 23]])
+
+To index specific elements in each column, make use of :ref:`advanced-indexing`
+as below::
+
+ >>> arr = np.arange(3*4).reshape(3, 4)
+ >>> arr
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> column_indices = [[1, 3], [0, 2], [2, 2]]
+ >>> np.arange(arr.shape[0])
+ array([0, 1, 2])
+ >>> row_indices = np.arange(arr.shape[0])[:, np.newaxis]
+ >>> row_indices
+ array([[0],
+ [1],
+ [2]])
+
+Use the ``row_indices`` and ``column_indices`` for advanced
+indexing::
+
+ >>> arr[row_indices, column_indices]
+ array([[ 1, 3],
+ [ 4, 6],
+ [10, 10]])
+
+Index along a specific axis
+---------------------------
+
+Use :meth:`take`. See also :meth:`take_along_axis` and
+:meth:`put_along_axis`.
+
+ >>> a = np.arange(30).reshape(2, 3, 5)
+ >>> a
+ array([[[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14]],
+ <BLANKLINE>
+ [[15, 16, 17, 18, 19],
+ [20, 21, 22, 23, 24],
+ [25, 26, 27, 28, 29]]])
+ >>> np.take(a, [2, 3], axis=2)
+ array([[[ 2, 3],
+ [ 7, 8],
+ [12, 13]],
+ <BLANKLINE>
+ [[17, 18],
+ [22, 23],
+ [27, 28]]])
+ >>> np.take(a, [2], axis=1)
+ array([[[10, 11, 12, 13, 14]],
+ <BLANKLINE>
+ [[25, 26, 27, 28, 29]]])
+
+Create subsets of larger matrices
+=================================
+
+Use :ref:`slicing-and-striding` to access chunks of a large array::
+
+ >>> a = np.arange(100).reshape(10, 10)
+ >>> a
+ array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
+ [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
+ [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
+ [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
+ [50, 51, 52, 53, 54, 55, 56, 57, 58, 59],
+ [60, 61, 62, 63, 64, 65, 66, 67, 68, 69],
+ [70, 71, 72, 73, 74, 75, 76, 77, 78, 79],
+ [80, 81, 82, 83, 84, 85, 86, 87, 88, 89],
+ [90, 91, 92, 93, 94, 95, 96, 97, 98, 99]])
+ >>> a[2:5, 2:5]
+ array([[22, 23, 24],
+ [32, 33, 34],
+ [42, 43, 44]])
+ >>> a[2:5, 1:3]
+ array([[21, 22],
+ [31, 32],
+ [41, 42]])
+ >>> a[:5, :5]
+ array([[ 0, 1, 2, 3, 4],
+ [10, 11, 12, 13, 14],
+ [20, 21, 22, 23, 24],
+ [30, 31, 32, 33, 34],
+ [40, 41, 42, 43, 44]])
+
+The same thing can be done with advanced indexing in a slightly more complex
+way. Remember that
+:ref:`advanced indexing creates a copy <indexing-operations>`::
+
+ >>> a[np.arange(5)[:, None], np.arange(5)[None, :]]
+ array([[ 0, 1, 2, 3, 4],
+ [10, 11, 12, 13, 14],
+ [20, 21, 22, 23, 24],
+ [30, 31, 32, 33, 34],
+ [40, 41, 42, 43, 44]])
+
+You can also use :meth:`mgrid` to generate indices::
+
+ >>> indices = np.mgrid[0:6:2]
+ >>> indices
+ array([0, 2, 4])
+ >>> a[:, indices]
+ array([[ 0, 2, 4],
+ [10, 12, 14],
+ [20, 22, 24],
+ [30, 32, 34],
+ [40, 42, 44],
+ [50, 52, 54],
+ [60, 62, 64],
+ [70, 72, 74],
+ [80, 82, 84],
+ [90, 92, 94]])
+
+Filter values
+=============
+
+Non-zero elements
+-----------------
+
+Use :meth:`nonzero` to get a tuple of array indices of non-zero elements
+corresponding to every dimension::
+
+ >>> z = np.array([[1, 2, 3, 0], [0, 0, 5, 3], [4, 6, 0, 0]])
+ >>> z
+ array([[1, 2, 3, 0],
+ [0, 0, 5, 3],
+ [4, 6, 0, 0]])
+ >>> np.nonzero(z)
+ (array([0, 0, 0, 1, 1, 2, 2]), array([0, 1, 2, 2, 3, 0, 1]))
+
+Use :meth:`flatnonzero` to fetch indices of elements that are non-zero in
+the flattened version of the ndarray::
+
+ >>> np.flatnonzero(z)
+ array([0, 1, 2, 6, 7, 8, 9])
+
+Arbitrary conditions
+--------------------
+
+Use :meth:`where` to generate indices based on conditions and then
+use :ref:`advanced-indexing`.
+
+ >>> a = np.arange(30).reshape(2, 3, 5)
+ >>> indices = np.where(a % 2 == 0)
+ >>> indices
+ (array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]),
+ array([0, 0, 0, 1, 1, 2, 2, 2, 0, 0, 1, 1, 1, 2, 2]),
+ array([0, 2, 4, 1, 3, 0, 2, 4, 1, 3, 0, 2, 4, 1, 3]))
+ >>> a[indices]
+ array([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28])
+
+Or, use :ref:`boolean-indexing`::
+
+ >>> a > 14
+ array([[[False, False, False, False, False],
+ [False, False, False, False, False],
+ [False, False, False, False, False]],
+ <BLANKLINE>
+ [[ True, True, True, True, True],
+ [ True, True, True, True, True],
+ [ True, True, True, True, True]]])
+ >>> a[a > 14]
+ array([15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
+
+Replace values after filtering
+------------------------------
+
+Use assignment with filtering to replace desired values::
+
+ >>> p = np.arange(-10, 10).reshape(2, 2, 5)
+ >>> p
+ array([[[-10, -9, -8, -7, -6],
+ [ -5, -4, -3, -2, -1]],
+ <BLANKLINE>
+ [[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9]]])
+ >>> q = p < 0
+ >>> q
+ array([[[ True, True, True, True, True],
+ [ True, True, True, True, True]],
+ <BLANKLINE>
+ [[False, False, False, False, False],
+ [False, False, False, False, False]]])
+ >>> p[q] = 0
+ >>> p
+ array([[[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0]],
+ <BLANKLINE>
+ [[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]]])
+
+Fetch indices of max/min values
+===============================
+
+Use :meth:`argmax` and :meth:`argmin`::
+
+ >>> a = np.arange(30).reshape(2, 3, 5)
+ >>> np.argmax(a)
+ 29
+ >>> np.argmin(a)
+ 0
+
+Use the ``axis`` keyword to get the indices of maximum and minimum
+values along a specific axis::
+
+ >>> np.argmax(a, axis=0)
+ array([[1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1]])
+ >>> np.argmax(a, axis=1)
+ array([[2, 2, 2, 2, 2],
+ [2, 2, 2, 2, 2]])
+ >>> np.argmax(a, axis=2)
+ array([[4, 4, 4],
+ [4, 4, 4]])
+ <BLANKLINE>
+ >>> np.argmin(a, axis=1)
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0]])
+ >>> np.argmin(a, axis=2)
+ array([[0, 0, 0],
+ [0, 0, 0]])
+
+Set ``keepdims`` to ``True`` to keep the axes which are reduced in the
+result as dimensions with size one::
+
+ >>> np.argmin(a, axis=2, keepdims=True)
+ array([[[0],
+ [0],
+ [0]],
+ <BLANKLINE>
+ [[0],
+ [0],
+ [0]]])
+ >>> np.argmax(a, axis=1, keepdims=True)
+ array([[[2, 2, 2, 2, 2]],
+ <BLANKLINE>
+ [[2, 2, 2, 2, 2]]])
+
+Index the same ndarray multiple times efficiently
+=================================================
+
+It must be kept in mind that basic indexing produces :term:`views <view>`
+and advanced indexing produces :term:`copies <copy>`, which are
+computationally less efficient. Hence, you should take care to use basic
+indexing wherever possible instead of advanced indexing.
+
+Further reading
+===============
+
+Nicolas Rougier's `100 NumPy exercises <https://github.com/rougier/numpy-100>`_
+provide a good insight into how indexing is combined with other operations.
+Exercises `6`_, `8`_, `10`_, `15`_, `16`_, `19`_, `20`_, `45`_, `59`_,
+`64`_, `65`_, `70`_, `71`_, `72`_, `76`_, `80`_, `81`_, `84`_, `87`_, `90`_,
+`93`_, `94`_ are specially focused on indexing.
+
+.. _6: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#6-create-a-null-vector-of-size-10-but-the-fifth-value-which-is-1-
+.. _8: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#8-reverse-a-vector-first-element-becomes-last-
+.. _10: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#10-find-indices-of-non-zero-elements-from-120040-
+.. _15: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#15-create-a-2d-array-with-1-on-the-border-and-0-inside-
+.. _16: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#16-how-to-add-a-border-filled-with-0s-around-an-existing-array-
+.. _19: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#19-create-a-8x8-matrix-and-fill-it-with-a-checkerboard-pattern-
+.. _20: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#20-consider-a-678-shape-array-what-is-the-index-xyz-of-the-100th-element-
+.. _45: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#45-create-random-vector-of-size-10-and-replace-the-maximum-value-by-0-
+.. _59: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#59-how-to-sort-an-array-by-the-nth-column-
+.. _64: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#64-consider-a-given-vector-how-to-add-1-to-each-element-indexed-by-a-second-vector-be-careful-with-repeated-indices-
+.. _65: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#65-how-to-accumulate-elements-of-a-vector-x-to-an-array-f-based-on-an-index-list-i-
+.. _70: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#70-consider-the-vector-1-2-3-4-5-how-to-build-a-new-vector-with-3-consecutive-zeros-interleaved-between-each-value-
+.. _71: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#71-consider-an-array-of-dimension-553-how-to-mulitply-it-by-an-array-with-dimensions-55-
+.. _72: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#72-how-to-swap-two-rows-of-an-array-
+.. _76: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#76-consider-a-one-dimensional-array-z-build-a-two-dimensional-array-whose-first-row-is-z0z1z2-and-each-subsequent-row-is--shifted-by-1-last-row-should-be-z-3z-2z-1-
+.. _80: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#80-consider-an-arbitrary-array-write-a-function-that-extract-a-subpart-with-a-fixed-shape-and-centered-on-a-given-element-pad-with-a-fill-value-when-necessary-
+.. _81: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#81-consider-an-array-z--1234567891011121314-how-to-generate-an-array-r--1234-2345-3456--11121314-
+.. _84: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#84-extract-all-the-contiguous-3x3-blocks-from-a-random-10x10-matrix-
+.. _87: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#87-consider-a-16x16-array-how-to-get-the-block-sum-block-size-is-4x4-
+.. _90: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#90-given-an-arbitrary-number-of-vectors-build-the-cartesian-product-every-combinations-of-every-item-
+.. _93: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#93-consider-two-arrays-a-and-b-of-shape-83-and-22-how-to-find-rows-of-a-that-contain-elements-of-each-row-of-b-regardless-of-the-order-of-the-elements-in-b-
+.. _94: https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#94-considering-a-10x3-matrix-extract-rows-with-unequal-values-eg-223- \ No newline at end of file
diff --git a/doc/source/user/howtos_index.rst b/doc/source/user/howtos_index.rst
index 89a6f54e7..2d66d0638 100644
--- a/doc/source/user/howtos_index.rst
+++ b/doc/source/user/howtos_index.rst
@@ -13,3 +13,4 @@ the package, see the :ref:`API reference <reference>`.
how-to-how-to
how-to-io
+ how-to-index
diff --git a/environment.yml b/environment.yml
index 1bc8b44a7..701f7d46c 100644
--- a/environment.yml
+++ b/environment.yml
@@ -12,7 +12,7 @@ dependencies:
- compilers
- openblas
- nomkl
- - setuptools=58.4
+ - setuptools=59.2.0
# For testing
- pytest
- pytest-cov
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index e01df7c90..eb1e81c6a 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -2445,11 +2445,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
- @overload
- def __ior__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ...
- @overload
+
def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ...
- @overload
def __dlpack_device__(self) -> Tuple[int, L[0]]: ...
# Keep `dtype` at the bottom to avoid name conflicts with `np.dtype`
@@ -4342,4 +4339,3 @@ class _SupportsDLPack(Protocol[_T_contra]):
def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ...
def _from_dlpack(__obj: _SupportsDLPack[None]) -> NDArray[Any]: ...
-
diff --git a/numpy/array_api/_array_object.py b/numpy/array_api/_array_object.py
index 8794c5ea5..75baf34b0 100644
--- a/numpy/array_api/_array_object.py
+++ b/numpy/array_api/_array_object.py
@@ -33,6 +33,7 @@ from typing import TYPE_CHECKING, Optional, Tuple, Union, Any
if TYPE_CHECKING:
from ._typing import Any, PyCapsule, Device, Dtype
+ import numpy.typing as npt
import numpy as np
@@ -108,6 +109,17 @@ class Array:
mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix)
return prefix + mid + suffix
+ # This function is not required by the spec, but we implement it here for
+ # convenience so that np.asarray(np.array_api.Array) will work.
+ def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]:
+ """
+ Warning: this method is NOT part of the array API spec. Implementers
+ of other libraries need not include it, and users should not assume it
+ will be present in other implementations.
+
+ """
+ return np.asarray(self._array, dtype=dtype)
+
# These are various helper functions to make the array behavior match the
# spec in places where it either deviates from or is more strict than
# NumPy behavior
@@ -1072,4 +1084,4 @@ class Array:
# https://data-apis.org/array-api/latest/API_specification/array_object.html#t
if self.ndim != 2:
raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.")
- return self._array.T
+ return self.__class__._new(self._array.T)
diff --git a/numpy/array_api/_statistical_functions.py b/numpy/array_api/_statistical_functions.py
index 7bee3f4db..5bc831ac2 100644
--- a/numpy/array_api/_statistical_functions.py
+++ b/numpy/array_api/_statistical_functions.py
@@ -65,8 +65,8 @@ def prod(
# Note: sum() and prod() always upcast float32 to float64 for dtype=None
# We need to do so here before computing the product to avoid overflow
if dtype is None and x.dtype == float32:
- x = asarray(x, dtype=float64)
- return Array._new(np.prod(x._array, axis=axis, keepdims=keepdims))
+ dtype = float64
+ return Array._new(np.prod(x._array, dtype=dtype, axis=axis, keepdims=keepdims))
def std(
diff --git a/numpy/array_api/tests/test_array_object.py b/numpy/array_api/tests/test_array_object.py
index 12479d765..b980bacca 100644
--- a/numpy/array_api/tests/test_array_object.py
+++ b/numpy/array_api/tests/test_array_object.py
@@ -4,6 +4,7 @@ from numpy.testing import assert_raises
import numpy as np
from .. import ones, asarray, result_type, all, equal
+from .._array_object import Array
from .._dtypes import (
_all_dtypes,
_boolean_dtypes,
@@ -301,3 +302,23 @@ def test_device_property():
assert all(equal(asarray(a, device='cpu'), a))
assert_raises(ValueError, lambda: asarray(a, device='gpu'))
+
+def test_array_properties():
+ a = ones((1, 2, 3))
+ b = ones((2, 3))
+ assert_raises(ValueError, lambda: a.T)
+
+ assert isinstance(b.T, Array)
+ assert b.T.shape == (3, 2)
+
+ assert isinstance(a.mT, Array)
+ assert a.mT.shape == (1, 3, 2)
+ assert isinstance(b.mT, Array)
+ assert b.mT.shape == (3, 2)
+
+def test___array__():
+ a = ones((2, 3), dtype=int16)
+ assert np.asarray(a) is a._array
+ b = np.asarray(a, dtype=np.float64)
+ assert np.all(np.equal(b, np.ones((2, 3), dtype=np.float64)))
+ assert b.dtype == np.float64
diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py
index 1fa17621a..3d10bb988 100644
--- a/numpy/compat/py3k.py
+++ b/numpy/compat/py3k.py
@@ -107,7 +107,9 @@ class contextlib_nullcontext:
def npy_load_module(name, fn, info=None):
"""
- Load a module.
+ Load a module. Uses ``load_module`` which will be deprecated in python
+ 3.12. An alternative that uses ``exec_module`` is in
+ numpy.distutils.misc_util.exec_mod_from_location
.. versionadded:: 1.11.2
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 078c58976..7d009ad9f 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -2658,8 +2658,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
See Also
--------
- numpy.reshape : similar function
- ndarray.reshape : similar method
+ numpy.shape : Equivalent getter function.
+ numpy.reshape : Function similar to setting ``shape``.
+ ndarray.reshape : Method similar to setting ``shape``.
"""))
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index e7b3ef697..e1ee8a860 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -61,4 +61,5 @@
0x0000000e = 17a0f366e55ec05e5c5c149123478452
# Version 15 (NumPy 1.22) Configurable memory allocations
+# Version 14 (NumPy 1.23) No change.
0x0000000f = b8783365b873681cd204be50cdfb448d
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index c9be94569..cd584eea7 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -3827,6 +3827,7 @@ add_newdoc('numpy.core.umath', 'sqrt',
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
+ Note: 0.0 and -0.0 are handled differently for complex inputs.
Notes
-----
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 3242124ac..f26f306fa 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -17,7 +17,7 @@ _dt_ = nt.sctype2char
# functions that are methods
__all__ = [
- 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
+ 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
@@ -1980,25 +1980,27 @@ def shape(a):
See Also
--------
- len
+ len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with
+ ``N>=1``.
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
- >>> np.shape([[1, 2]])
+ >>> np.shape([[1, 3]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
- >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ >>> a = np.array([(1, 2), (3, 4), (5, 6)],
+ ... dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
- (2,)
+ (3,)
>>> a.shape
- (2,)
+ (3,)
"""
try:
@@ -2917,51 +2919,6 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
keepdims=keepdims, initial=initial, where=where)
-def _alen_dispathcer(a):
- return (a,)
-
-
-@array_function_dispatch(_alen_dispathcer)
-def alen(a):
- """
- Return the length of the first dimension of the input array.
-
- .. deprecated:: 1.18
- `numpy.alen` is deprecated, use `len` instead.
-
- Parameters
- ----------
- a : array_like
- Input array.
-
- Returns
- -------
- alen : int
- Length of the first dimension of `a`.
-
- See Also
- --------
- shape, size
-
- Examples
- --------
- >>> a = np.zeros((7,4,5))
- >>> a.shape[0]
- 7
- >>> np.alen(a)
- 7
-
- """
- # NumPy 1.18.0, 2019-08-02
- warnings.warn(
- "`np.alen` is deprecated, use `len` instead",
- DeprecationWarning, stacklevel=2)
- try:
- return len(a)
- except TypeError:
- return len(array(a, ndmin=1))
-
-
def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi
index 68d3b3a98..6e0843a0e 100644
--- a/numpy/core/function_base.pyi
+++ b/numpy/core/function_base.pyi
@@ -1,55 +1,195 @@
-from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal, List
+from typing import (
+ Literal as L,
+ overload,
+ Tuple,
+ Union,
+ Any,
+ SupportsIndex,
+ List,
+ Type,
+ TypeVar,
+)
-from numpy import ndarray
-from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co
+from numpy import floating, complexfloating, generic, dtype
+from numpy.typing import (
+ NDArray,
+ ArrayLike,
+ DTypeLike,
+ _SupportsDType,
+ _SupportsArray,
+ _NumberLike_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+)
-# TODO: wait for support for recursive types
-_ArrayLikeNested = Sequence[Sequence[Any]]
-_ArrayLikeNumber = Union[
- _NumberLike_co, Sequence[_NumberLike_co], ndarray, _SupportsArray, _ArrayLikeNested
+_SCT = TypeVar("_SCT", bound=generic)
+
+_DTypeLike = Union[
+ dtype[_SCT],
+ Type[_SCT],
+ _SupportsDType[dtype[_SCT]],
]
__all__: List[str]
@overload
def linspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
- retstep: Literal[False] = ...,
+ retstep: L[False] = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> ndarray: ...
+) -> NDArray[Any]: ...
@overload
def linspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
- retstep: Literal[True] = ...,
+ retstep: L[True] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> Tuple[NDArray[floating[Any]], floating[Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> Tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> Tuple[NDArray[_SCT], _SCT]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> Tuple[ndarray, Any]: ...
+) -> Tuple[NDArray[Any], Any]: ...
+@overload
def logspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
- base: _ArrayLikeNumber = ...,
+ base: _ArrayLikeFloat_co = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> ndarray: ...
+) -> NDArray[Any]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
def geomspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> ndarray: ...
+) -> NDArray[Any]: ...
# Re-exported to `np.lib.function_base`
def add_newdoc(
diff --git a/numpy/core/include/numpy/experimental_dtype_api.h b/numpy/core/include/numpy/experimental_dtype_api.h
index 554c7fb6c..effa66baf 100644
--- a/numpy/core/include/numpy/experimental_dtype_api.h
+++ b/numpy/core/include/numpy/experimental_dtype_api.h
@@ -181,6 +181,12 @@ typedef PyObject *_ufunc_addloop_fromspec_func(
/*
* Type of the C promoter function, which must be wrapped into a
* PyCapsule with name "numpy._ufunc_promoter".
+ *
+ * Note that currently the output dtypes are always NULL unless they are
+ * also part of the signature. This is an implementation detail and could
+ * change in the future. However, in general promoters should not have a
+ * need for output dtypes.
+ * (There are potential use-cases, these are currently unsupported.)
*/
typedef int promoter_function(PyObject *ufunc,
PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h
index f761555b9..b2e7c458e 100644
--- a/numpy/core/include/numpy/numpyconfig.h
+++ b/numpy/core/include/numpy/numpyconfig.h
@@ -23,12 +23,18 @@
#undef NPY_SIZEOF_LONGDOUBLE
#undef NPY_SIZEOF_COMPLEX_LONGDOUBLE
- #ifdef __x86_64
- #define NPY_SIZEOF_LONGDOUBLE 16
- #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
- #elif defined(__arm64__)
+ #if defined(__arm64__)
#define NPY_SIZEOF_LONGDOUBLE 8
#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
+ #elif defined(__x86_64)
+ #define NPY_SIZEOF_LONGDOUBLE 16
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+ #elif defined (__i386)
+ #define NPY_SIZEOF_LONGDOUBLE 12
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24
+ #elif defined(__ppc__) || defined (__ppc64__)
+ #define NPY_SIZEOF_LONGDOUBLE 16
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
#else
#error "unknown architecture"
#endif
@@ -57,5 +63,6 @@
#define NPY_1_20_API_VERSION 0x0000000e
#define NPY_1_21_API_VERSION 0x0000000e
#define NPY_1_22_API_VERSION 0x0000000f
+#define NPY_1_23_API_VERSION 0x0000000f
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 344d40d93..014fa0a39 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1829,6 +1829,14 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
Examples
--------
+ >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float)
+ array([[0., 0.],
+ [1., 1.]])
+
+ >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float)
+ array([[0., 1.],
+ [0., 1.]])
+
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index a5f423d8f..17fbd99af 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -1065,7 +1065,7 @@ def configuration(parent_package='',top_path=None):
#######################################################################
config.add_extension('_operand_flag_tests',
- sources=[join('src', 'umath', '_operand_flag_tests.c.src')])
+ sources=[join('src', 'umath', '_operand_flag_tests.c')])
#######################################################################
# SIMD module #
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 70e8fc897..772c87c96 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -44,6 +44,7 @@ C_ABI_VERSION = 0x01000009
# 0x0000000e - 1.20.x
# 0x0000000e - 1.21.x
# 0x0000000f - 1.22.x
+# 0x0000000f - 1.23.x
C_API_VERSION = 0x0000000f
class MismatchCAPIWarning(Warning):
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index 0a694cf62..94a7daa83 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -186,6 +186,24 @@ npy_free_cache_dim(void * p, npy_uintp sz)
&PyArray_free);
}
+/* Similar to array_dealloc in arrayobject.c */
+static NPY_INLINE void
+WARN_NO_RETURN(PyObject* warning, const char * msg) {
+ if (PyErr_WarnEx(warning, msg, 1) < 0) {
+ PyObject * s;
+
+ s = PyUnicode_FromString("PyDataMem_UserFREE");
+ if (s) {
+ PyErr_WriteUnraisable(s);
+ Py_DECREF(s);
+ }
+ else {
+ PyErr_WriteUnraisable(Py_None);
+ }
+ }
+}
+
+
/* malloc/free/realloc hook */
NPY_NO_EXPORT PyDataMem_EventHookFunc *_PyDataMem_eventhook = NULL;
@@ -210,6 +228,8 @@ NPY_NO_EXPORT void *_PyDataMem_eventhook_user_data = NULL;
* operations that might cause new allocation events (such as the
* creation/destruction numpy objects, or creating/destroying Python
* objects which might cause a gc)
+ *
+ * Deprecated in 1.23
*/
NPY_NO_EXPORT PyDataMem_EventHookFunc *
PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook,
@@ -218,6 +238,10 @@ PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook,
PyDataMem_EventHookFunc *temp;
NPY_ALLOW_C_API_DEF
NPY_ALLOW_C_API
+ /* 2021-11-18, 1.23 */
+ WARN_NO_RETURN(PyExc_DeprecationWarning,
+ "PyDataMem_SetEventHook is deprecated, use tracemalloc "
+ "and the 'np.lib.tracemalloc_domain' domain");
temp = _PyDataMem_eventhook;
_PyDataMem_eventhook = newhook;
if (old_data != NULL) {
@@ -435,33 +459,14 @@ PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler)
return result;
}
-/* Similar to array_dealloc in arrayobject.c */
-static NPY_INLINE void
-WARN_IN_FREE(PyObject* warning, const char * msg) {
- if (PyErr_WarnEx(warning, msg, 1) < 0) {
- PyObject * s;
-
- s = PyUnicode_FromString("PyDataMem_UserFREE");
- if (s) {
- PyErr_WriteUnraisable(s);
- Py_DECREF(s);
- }
- else {
- PyErr_WriteUnraisable(Py_None);
- }
- }
-}
-
-
NPY_NO_EXPORT void
PyDataMem_UserFREE(void *ptr, size_t size, PyObject *mem_handler)
{
PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler");
if (handler == NULL) {
- WARN_IN_FREE(PyExc_RuntimeWarning,
+ WARN_NO_RETURN(PyExc_RuntimeWarning,
"Could not get pointer to 'mem_handler' from PyCapsule");
- PyErr_Clear();
return;
}
PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr);
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 627096b3c..b0b6f42f1 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -2246,7 +2246,7 @@ array_dumps(PyArrayObject *self, PyObject *args, PyObject *kwds)
static PyObject *
-array_sizeof(PyArrayObject *self)
+array_sizeof(PyArrayObject *self, PyObject *NPY_UNUSED(args))
{
/* object + dimension and strides */
Py_ssize_t nbytes = Py_TYPE(self)->tp_basicsize +
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index dbf5ab161..cf0160a2b 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -4212,7 +4212,7 @@ normalize_axis_index(PyObject *NPY_UNUSED(self),
static PyObject *
-_reload_guard(PyObject *NPY_UNUSED(self)) {
+_reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) {
static int initialized = 0;
#if !defined(PYPY_VERSION)
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 8e072d5f4..2675496ab 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -1190,7 +1190,7 @@ npyiter_resetbasepointers(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_reset(NewNpyArrayIterObject *self)
+npyiter_reset(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter == NULL) {
PyErr_SetString(PyExc_ValueError,
@@ -1227,7 +1227,7 @@ npyiter_reset(NewNpyArrayIterObject *self)
* copied.
*/
static PyObject *
-npyiter_copy(NewNpyArrayIterObject *self)
+npyiter_copy(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
NewNpyArrayIterObject *iter;
@@ -1263,7 +1263,7 @@ npyiter_copy(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_iternext(NewNpyArrayIterObject *self)
+npyiter_iternext(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter != NULL && self->iternext != NULL &&
!self->finished && self->iternext(self->iter)) {
@@ -1320,7 +1320,8 @@ npyiter_remove_axis(NewNpyArrayIterObject *self, PyObject *args)
}
static PyObject *
-npyiter_remove_multi_index(NewNpyArrayIterObject *self)
+npyiter_remove_multi_index(
+ NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter == NULL) {
PyErr_SetString(PyExc_ValueError,
@@ -1345,7 +1346,8 @@ npyiter_remove_multi_index(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_enable_external_loop(NewNpyArrayIterObject *self)
+npyiter_enable_external_loop(
+ NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter == NULL) {
PyErr_SetString(PyExc_ValueError,
@@ -1370,7 +1372,7 @@ npyiter_enable_external_loop(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_debug_print(NewNpyArrayIterObject *self)
+npyiter_debug_print(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter != NULL) {
NpyIter_DebugPrint(self->iter);
@@ -2315,7 +2317,7 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
}
static PyObject *
-npyiter_enter(NewNpyArrayIterObject *self)
+npyiter_enter(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter == NULL) {
PyErr_SetString(PyExc_RuntimeError, "operation on non-initialized iterator");
@@ -2326,7 +2328,7 @@ npyiter_enter(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_close(NewNpyArrayIterObject *self)
+npyiter_close(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
NpyIter *iter = self->iter;
int ret;
@@ -2347,7 +2349,7 @@ static PyObject *
npyiter_exit(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
/* even if called via exception handling, writeback any data */
- return npyiter_close(self);
+ return npyiter_close(self, NULL);
}
static PyMethodDef npyiter_methods[] = {
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index bbbc5bfa2..db1e49db8 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -229,7 +229,7 @@ gentype_multiply(PyObject *m1, PyObject *m2)
* #convert = Long*8, LongLong*2#
*/
static PyObject *
-@type@_bit_count(PyObject *self)
+@type@_bit_count(PyObject *self, PyObject *NPY_UNUSED(args))
{
@type@ scalar = PyArrayScalar_VAL(self, @Name@);
uint8_t count = npy_popcount@c@(scalar);
@@ -1160,7 +1160,7 @@ gentype_size_get(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored))
}
static PyObject *
-gentype_sizeof(PyObject *self)
+gentype_sizeof(PyObject *self, PyObject *NPY_UNUSED(args))
{
Py_ssize_t nbytes;
PyObject * isz = gentype_itemsize_get(self, NULL);
@@ -1918,7 +1918,7 @@ static PyObject *
*/
/* Heavily copied from the builtin float.as_integer_ratio */
static PyObject *
-@name@_as_integer_ratio(PyObject *self)
+@name@_as_integer_ratio(PyObject *self, PyObject *NPY_UNUSED(args))
{
#if @is_half@
npy_double val = npy_half_to_double(PyArrayScalar_VAL(self, @Name@));
@@ -1999,7 +1999,7 @@ error:
* #c = f, f, , l#
*/
static PyObject *
-@name@_is_integer(PyObject *self)
+@name@_is_integer(PyObject *self, PyObject *NPY_UNUSED(args))
{
#if @is_half@
npy_double val = npy_half_to_double(PyArrayScalar_VAL(self, @Name@));
@@ -2022,7 +2022,7 @@ static PyObject *
/**end repeat**/
static PyObject *
-integer_is_integer(PyObject *self) {
+integer_is_integer(PyObject *self, PyObject *NPY_UNUSED(args)) {
Py_RETURN_TRUE;
}
diff --git a/numpy/core/src/umath/_operand_flag_tests.c.src b/numpy/core/src/umath/_operand_flag_tests.c
index c59e13baf..c59e13baf 100644
--- a/numpy/core/src/umath/_operand_flag_tests.c.src
+++ b/numpy/core/src/umath/_operand_flag_tests.c
diff --git a/numpy/core/src/umath/dispatching.c b/numpy/core/src/umath/dispatching.c
index 8e99c0420..4c6b09b80 100644
--- a/numpy/core/src/umath/dispatching.c
+++ b/numpy/core/src/umath/dispatching.c
@@ -46,19 +46,23 @@
#include "dispatching.h"
#include "dtypemeta.h"
+#include "common_dtype.h"
#include "npy_hashtable.h"
#include "legacy_array_method.h"
#include "ufunc_object.h"
#include "ufunc_type_resolution.h"
+#define PROMOTION_DEBUG_TRACING 0
+
+
/* forward declaration */
static NPY_INLINE PyObject *
promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
PyArrayObject *const ops[],
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
- npy_bool allow_legacy_promotion, npy_bool cache);
+ npy_bool allow_legacy_promotion);
/**
@@ -147,6 +151,23 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate)
* (Based on `isinstance()`, the knowledge that non-abstract DTypes cannot
* be subclassed is used, however.)
*
+ * NOTE: This currently does not take into account output dtypes which do not
+ * have to match. The possible extension here is that if an output
+ * is given (and thus an output dtype), but not part of the signature
+ * we could ignore it for matching, but *prefer* a loop that matches
+ * better.
+ * Why is this not done currently? First, it seems a niche feature that
+ * loops can only be distinguished based on the output dtype. Second,
+ * there are some nasty theoretical things because:
+ *
+ * np.add(f4, f4, out=f8)
+ * np.add(f4, f4, out=f8, dtype=f8)
+ *
+ * are different, the first uses the f4 loop, the second the f8 loop.
+ * The problem is, that the current cache only uses the op_dtypes and
+ * both are `(f4, f4, f8)`. The cache would need to store also which
+ * output was provided by `dtype=`/`signature=`.
+ *
* @param ufunc
* @param op_dtypes The DTypes that are either passed in (defined by an
* operand) or defined by the `signature` as also passed in as
@@ -159,17 +180,35 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate)
*/
static int
resolve_implementation_info(PyUFuncObject *ufunc,
- PyArray_DTypeMeta *op_dtypes[], PyObject **out_info)
+ PyArray_DTypeMeta *op_dtypes[], npy_bool only_promoters,
+ PyObject **out_info)
{
int nin = ufunc->nin, nargs = ufunc->nargs;
Py_ssize_t size = PySequence_Length(ufunc->_loops);
PyObject *best_dtypes = NULL;
PyObject *best_resolver_info = NULL;
+#if PROMOTION_DEBUG_TRACING
+ printf("Promoting for '%s' promoters only: %d\n",
+ ufunc->name ? ufunc->name : "<unknown>", (int)only_promoters);
+ printf(" DTypes: ");
+ PyObject *tmp = PyArray_TupleFromItems(ufunc->nargs, op_dtypes, 1);
+ PyObject_Print(tmp, stdout, 0);
+ Py_DECREF(tmp);
+ printf("\n");
+ Py_DECREF(tmp);
+#endif
+
for (Py_ssize_t res_idx = 0; res_idx < size; res_idx++) {
/* Test all resolvers */
PyObject *resolver_info = PySequence_Fast_GET_ITEM(
ufunc->_loops, res_idx);
+
+ if (only_promoters && PyObject_TypeCheck(
+ PyTuple_GET_ITEM(resolver_info, 1), &PyArrayMethod_Type)) {
+ continue;
+ }
+
PyObject *curr_dtypes = PyTuple_GET_ITEM(resolver_info, 0);
/*
* Test if the current resolver matches, it could make sense to
@@ -179,20 +218,31 @@ resolve_implementation_info(PyUFuncObject *ufunc,
npy_bool matches = NPY_TRUE;
/*
- * NOTE: We check also the output DType. In principle we do not
- * have to strictly match it (unless it is provided by the
- * `signature`). This assumes that a (fallback) promoter will
- * unset the output DType if no exact match is found.
+ * NOTE: We currently match the output dtype exactly here, this is
+ * actually only necessary if the signature includes.
+ * Currently, we rely that op-dtypes[nin:nout] is NULLed if not.
*/
for (Py_ssize_t i = 0; i < nargs; i++) {
PyArray_DTypeMeta *given_dtype = op_dtypes[i];
PyArray_DTypeMeta *resolver_dtype = (
(PyArray_DTypeMeta *)PyTuple_GET_ITEM(curr_dtypes, i));
assert((PyObject *)given_dtype != Py_None);
- if (given_dtype == NULL && i >= nin) {
- /* Unspecified out always matches (see below for inputs) */
- continue;
+ if (given_dtype == NULL) {
+ if (i >= nin) {
+ /* Unspecified out always matches (see below for inputs) */
+ continue;
+ }
+ /*
+ * This is a reduce-like operation, which always have the form
+ * `(res_DType, op_DType, res_DType)`. If the first and last
+ * dtype of the loops match, this should be reduce-compatible.
+ */
+ if (PyTuple_GET_ITEM(curr_dtypes, 0)
+ == PyTuple_GET_ITEM(curr_dtypes, 2)) {
+ continue;
+ }
}
+
if (resolver_dtype == (PyArray_DTypeMeta *)Py_None) {
/* always matches */
continue;
@@ -204,24 +254,7 @@ resolve_implementation_info(PyUFuncObject *ufunc,
matches = NPY_FALSE;
break;
}
- if (given_dtype == NULL) {
- /*
- * If an input was not specified, this is a reduce-like
- * operation: reductions use `(operand_DType, NULL, out_DType)`
- * as they only have a single operand. This allows special
- * reduce promotion rules useful for example for sum/product.
- * E.g. `np.add.reduce([True, True])` promotes to integer.
- *
- * Continuing here allows a promoter to handle reduce-like
- * promotions explicitly if necessary.
- * TODO: The `!NPY_DT_is_abstract(resolver_dtype)` currently
- * ensures that this is a promoter. If we allow
- * `ArrayMethods` to use abstract DTypes, we may have to
- * reject it here or the `ArrayMethod` has to implement
- * the reduce promotion.
- */
- continue;
- }
+
int subclass = PyObject_IsSubclass(
(PyObject *)given_dtype, (PyObject *)resolver_dtype);
if (subclass < 0) {
@@ -254,8 +287,12 @@ resolve_implementation_info(PyUFuncObject *ufunc,
* In all cases, we give up resolution, since it would be
* necessary to compare to two "best" cases.
*/
- int unambiguously_equally_good = 1;
for (Py_ssize_t i = 0; i < nargs; i++) {
+ if (i == ufunc->nin && current_best != -1) {
+ /* inputs prefer one loop and outputs have lower priority */
+ break;
+ }
+
int best;
PyObject *prev_dtype = PyTuple_GET_ITEM(best_dtypes, i);
@@ -265,50 +302,18 @@ resolve_implementation_info(PyUFuncObject *ufunc,
/* equivalent, so this entry does not matter */
continue;
}
- /*
- * TODO: Even if the input is not specified, if we have
- * abstract DTypes and one is a subclass of the other,
- * the subclass should be considered a better match
- * (subclasses are always more specific).
- */
- /* Whether this (normally output) dtype was specified at all */
if (op_dtypes[i] == NULL) {
/*
- * When DType is completely unspecified, prefer abstract
- * over concrete, assuming it will resolve.
- * Furthermore, we cannot decide which abstract/None
- * is "better", only concrete ones which are subclasses
- * of Abstract ones are defined as worse.
+ * If an a dtype is NULL it always matches, so there is no
+ * point in defining one as more precise than the other.
*/
- npy_bool prev_is_concrete = NPY_FALSE;
- npy_bool new_is_concrete = NPY_FALSE;
- if ((prev_dtype != Py_None) &&
- !NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) {
- prev_is_concrete = NPY_TRUE;
- }
- if ((new_dtype != Py_None) &&
- !NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) {
- new_is_concrete = NPY_TRUE;
- }
- if (prev_is_concrete == new_is_concrete) {
- best = -1;
- }
- else if (prev_is_concrete) {
- unambiguously_equally_good = 0;
- best = 1;
- }
- else {
- unambiguously_equally_good = 0;
- best = 0;
- }
+ continue;
}
/* If either is None, the other is strictly more specific */
- else if (prev_dtype == Py_None) {
- unambiguously_equally_good = 0;
+ if (prev_dtype == Py_None) {
best = 1;
}
else if (new_dtype == Py_None) {
- unambiguously_equally_good = 0;
best = 0;
}
/*
@@ -318,20 +323,25 @@ resolve_implementation_info(PyUFuncObject *ufunc,
else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype) &&
!NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) {
/*
- * Ambiguous unless the are identical (checked above),
- * but since they are concrete it does not matter which
- * best to compare.
+ * Ambiguous unless they are identical (checked above),
+ * or one matches exactly.
*/
- best = -1;
+ if (prev_dtype == (PyObject *)op_dtypes[i]) {
+ best = 0;
+ }
+ else if (new_dtype == (PyObject *)op_dtypes[i]) {
+ best = 1;
+ }
+ else {
+ best = -1;
+ }
}
else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) {
/* old is not abstract, so better (both not possible) */
- unambiguously_equally_good = 0;
best = 0;
}
else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) {
/* new is not abstract, so better (both not possible) */
- unambiguously_equally_good = 0;
best = 1;
}
/*
@@ -349,6 +359,10 @@ resolve_implementation_info(PyUFuncObject *ufunc,
return -1;
}
+ if (best == -1) {
+ /* no new info, nothing to update */
+ continue;
+ }
if ((current_best != -1) && (current_best != best)) {
/*
* We need a clear best, this could be tricky, unless
@@ -367,15 +381,34 @@ resolve_implementation_info(PyUFuncObject *ufunc,
if (current_best == -1) {
/*
- * TODO: It would be nice to have a "diagnostic mode" that
- * informs if this happens! (An immediate error currently
- * blocks later legacy resolution, but may work in the
- * future.)
+ * We could not find a best loop, but promoters should be
+ * designed in a way to disambiguate such scenarios, so we
+ * retry the whole lookup using only promoters.
+ * (There is a small chance we already got two promoters.
+ * We just redo it anyway for simplicity.)
*/
- if (unambiguously_equally_good) {
- /* unset the best resolver to indicate this */
- best_resolver_info = NULL;
- continue;
+ if (!only_promoters) {
+ return resolve_implementation_info(ufunc,
+ op_dtypes, NPY_TRUE, out_info);
+ }
+ /*
+ * If this is already the retry, we are out of luck. Promoters
+ * should be designed in a way that this cannot happen!
+ * (It should be noted, that the retry might not find anything
+ * and we still do a legacy lookup later.)
+ */
+ PyObject *given = PyArray_TupleFromItems(
+ ufunc->nargs, (PyObject **)op_dtypes, 1);
+ if (given != NULL) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Could not find a loop for the inputs:\n %S\n"
+ "The two promoters %S and %S matched the input "
+ "equally well. Promoters must be designed "
+ "to be unambiguous. NOTE: This indicates an error "
+ "in NumPy or an extending library and should be "
+ "reported.",
+ given, best_dtypes, curr_dtypes);
+ Py_DECREF(given);
}
*out_info = NULL;
return 0;
@@ -457,10 +490,9 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter,
if (Py_EnterRecursiveCall(" during ufunc promotion.") != 0) {
goto finish;
}
- /* TODO: The caching logic here may need revising: */
resolved_info = promote_and_get_info_and_ufuncimpl(ufunc,
operands, signature, new_op_dtypes,
- /* no legacy promotion */ NPY_FALSE, /* cache */ NPY_TRUE);
+ /* no legacy promotion */ NPY_FALSE);
Py_LeaveRecursiveCall();
@@ -625,7 +657,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
PyArrayObject *const ops[],
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
- npy_bool allow_legacy_promotion, npy_bool cache)
+ npy_bool allow_legacy_promotion)
{
/*
* Fetch the dispatching info which consists of the implementation and
@@ -644,11 +676,12 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
}
/*
- * If `info == NULL`, the caching failed, repeat using the full resolution
- * in `resolve_implementation_info`.
+ * If `info == NULL`, loading from cache failed, use the full resolution
+ * in `resolve_implementation_info` (which caches its result on success).
*/
if (info == NULL) {
- if (resolve_implementation_info(ufunc, op_dtypes, &info) < 0) {
+ if (resolve_implementation_info(ufunc,
+ op_dtypes, NPY_FALSE, &info) < 0) {
return NULL;
}
if (info != NULL && PyObject_TypeCheck(
@@ -657,41 +690,12 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
* Found the ArrayMethod and NOT promoter. Before returning it
* add it to the cache for faster lookup in the future.
*/
- if (cache && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
+ if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
(PyObject **)op_dtypes, info, 0) < 0) {
return NULL;
}
return info;
}
- else if (info == NULL && op_dtypes[0] == NULL) {
- /*
- * If we have a reduction, fill in the unspecified input/array
- * assuming it should have the same dtype as the operand input
- * (or the output one if given).
- * Then, try again. In some cases, this will choose different
- * paths, such as `ll->?` instead of an `??->?` loop for `np.equal`
- * when the input is `.l->.` (`.` meaning undefined). This will
- * then cause an error. But cast to `?` would always lose
- * information, and in many cases important information:
- *
- * ```python
- * from operator import eq
- * from functools import reduce
- *
- * reduce(eq, [1, 2, 3]) != reduce(eq, [True, True, True])
- * ```
- *
- * The special cases being `logical_(and|or|xor)` which can always
- * cast to boolean ahead of time and still give the right answer
- * (unsafe cast to bool is fine here). We special case these at
- * the time of this comment (NumPy 1.21).
- */
- assert(ufunc->nin == 2 && ufunc->nout == 1);
- op_dtypes[0] = op_dtypes[2] != NULL ? op_dtypes[2] : op_dtypes[1];
- Py_INCREF(op_dtypes[0]);
- return promote_and_get_info_and_ufuncimpl(ufunc,
- ops, signature, op_dtypes, allow_legacy_promotion, 1);
- }
}
/*
@@ -707,6 +711,11 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
return NULL;
}
else if (info != NULL) {
+ /* Add result to the cache using the original types: */
+ if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
+ (PyObject **)op_dtypes, info, 0) < 0) {
+ return NULL;
+ }
return info;
}
}
@@ -730,7 +739,12 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
return NULL;
}
info = promote_and_get_info_and_ufuncimpl(ufunc,
- ops, signature, new_op_dtypes, NPY_FALSE, cacheable);
+ ops, signature, new_op_dtypes, NPY_FALSE);
+ /* Add this to the cache using the original types: */
+ if (cacheable && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
+ (PyObject **)op_dtypes, info, 0) < 0) {
+ return NULL;
+ }
for (int i = 0; i < ufunc->nargs; i++) {
Py_XDECREF(new_op_dtypes);
}
@@ -745,6 +759,14 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
* only work with DType (classes/types). This is because it has to ensure
* that legacy (value-based promotion) is used when necessary.
*
+ * NOTE: The machinery here currently ignores output arguments unless
+ * they are part of the signature. This slightly limits unsafe loop
+ * specializations, which is important for the `ensure_reduce_compatible`
+ * fallback mode.
+ * To fix this, the caching mechanism (and dispatching) can be extended.
+ * When/if that happens, the `ensure_reduce_compatible` could be
+ * deprecated (it should never kick in because promotion kick in first).
+ *
* @param ufunc The ufunc object, used mainly for the fallback.
* @param ops The array operands (used only for the fallback).
* @param signature As input, the DType signature fixed explicitly by the user.
@@ -754,9 +776,16 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
* either by the `signature` or by an `operand`.
* (outputs and the second input can be NULL for reductions).
* NOTE: In some cases, the promotion machinery may currently modify
- * these.
+ * these including clearing the output.
* @param force_legacy_promotion If set, we have to use the old type resolution
* to implement value-based promotion/casting.
+ * @param ensure_reduce_compatible Must be set for reductions, in which case
+ * the found implementation is checked for reduce-like compatibility.
+ * If it is *not* compatible and `signature[2] != NULL`, we assume its
+ * output DType is correct (see NOTE above).
+ * If removed, promotion may require information about whether this
+ * is a reduction, so the more likely case is to always keep fixing this
+ * when necessary, but push down the handling so it can be cached.
*/
NPY_NO_EXPORT PyArrayMethodObject *
promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
@@ -764,9 +793,10 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
npy_bool force_legacy_promotion,
- npy_bool allow_legacy_promotion)
+ npy_bool allow_legacy_promotion,
+ npy_bool ensure_reduce_compatible)
{
- int nargs = ufunc->nargs;
+ int nin = ufunc->nin, nargs = ufunc->nargs;
/*
* Get the actual DTypes we operate with by mixing the operand array
@@ -782,6 +812,15 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
Py_XSETREF(op_dtypes[i], signature[i]);
assert(i >= ufunc->nin || !NPY_DT_is_abstract(signature[i]));
}
+ else if (i >= nin) {
+ /*
+ * We currently just ignore outputs if not in signature, this will
+ * always give the/a correct result (limits registering specialized
+ * loops which include the cast).
+ * (See also comment in resolve_implementation_info.)
+ */
+ Py_CLEAR(op_dtypes[i]);
+ }
}
if (force_legacy_promotion) {
@@ -798,7 +837,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
}
PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc,
- ops, signature, op_dtypes, allow_legacy_promotion, NPY_TRUE);
+ ops, signature, op_dtypes, allow_legacy_promotion);
if (info == NULL) {
if (!PyErr_Occurred()) {
@@ -809,8 +848,26 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1);
- /* Fill `signature` with final DTypes used by the ArrayMethod/inner-loop */
+ /*
+ * In certain cases (only the logical ufuncs really), the loop we found may
+ * not be reduce-compatible. Since the machinery can't distinguish a
+ * reduction with an output from a normal ufunc call, we have to assume
+ * the result DType is correct and force it for the input (if not forced
+ * already).
+ * NOTE: This does assume that all loops are "safe" see the NOTE in this
+ * comment. That could be relaxed, in which case we may need to
+ * cache if a call was for a reduction.
+ */
PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0);
+ if (ensure_reduce_compatible && signature[0] == NULL &&
+ PyTuple_GET_ITEM(all_dtypes, 0) != PyTuple_GET_ITEM(all_dtypes, 2)) {
+ signature[0] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM(all_dtypes, 2);
+ Py_INCREF(signature[0]);
+ return promote_and_get_ufuncimpl(ufunc,
+ ops, signature, op_dtypes,
+ force_legacy_promotion, allow_legacy_promotion, NPY_FALSE);
+ }
+
for (int i = 0; i < nargs; i++) {
if (signature[i] == NULL) {
signature[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM(all_dtypes, i);
@@ -826,6 +883,112 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
/*
+ * Generic promoter used by as a final fallback on ufuncs. Most operations are
+ * homogeneous, so we can try to find the homogeneous dtype on the inputs
+ * and use that.
+ * We need to special case the reduction case, where op_dtypes[0] == NULL
+ * is possible.
+ */
+NPY_NO_EXPORT int
+default_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[])
+{
+ if (ufunc->type_resolver == &PyUFunc_SimpleBinaryComparisonTypeResolver
+ && signature[0] == NULL && signature[1] == NULL
+ && signature[2] != NULL && signature[2]->type_num != NPY_BOOL) {
+ /* bail out, this is _only_ to give future/deprecation warning! */
+ return -1;
+ }
+
+ /* If nin < 2 promotion is a no-op, so it should not be registered */
+ assert(ufunc->nin > 1);
+ if (op_dtypes[0] == NULL) {
+ assert(ufunc->nin == 2 && ufunc->nout == 1); /* must be reduction */
+ Py_INCREF(op_dtypes[1]);
+ new_op_dtypes[0] = op_dtypes[1];
+ Py_INCREF(op_dtypes[1]);
+ new_op_dtypes[1] = op_dtypes[1];
+ Py_INCREF(op_dtypes[1]);
+ new_op_dtypes[2] = op_dtypes[1];
+ return 0;
+ }
+ PyArray_DTypeMeta *common = NULL;
+ /*
+ * If a signature is used and homogeneous in its outputs use that
+ * (Could/should likely be rather applied to inputs also, although outs
+ * only could have some advantage and input dtypes are rarely enforced.)
+ */
+ for (int i = ufunc->nin; i < ufunc->nargs; i++) {
+ if (signature[i] != NULL) {
+ if (common == NULL) {
+ Py_INCREF(signature[i]);
+ common = signature[i];
+ }
+ else if (common != signature[i]) {
+ Py_CLEAR(common); /* Not homogeneous, unset common */
+ break;
+ }
+ }
+ }
+ /* Otherwise, use the common DType of all input operands */
+ if (common == NULL) {
+ common = PyArray_PromoteDTypeSequence(ufunc->nin, op_dtypes);
+ if (common == NULL) {
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Clear(); /* Do not propagate normal promotion errors */
+ }
+ return -1;
+ }
+ }
+
+ for (int i = 0; i < ufunc->nargs; i++) {
+ PyArray_DTypeMeta *tmp = common;
+ if (signature[i]) {
+ tmp = signature[i]; /* never replace a fixed one. */
+ }
+ Py_INCREF(tmp);
+ new_op_dtypes[i] = tmp;
+ }
+ for (int i = ufunc->nin; i < ufunc->nargs; i++) {
+ Py_XINCREF(op_dtypes[i]);
+ new_op_dtypes[i] = op_dtypes[i];
+ }
+
+ Py_DECREF(common);
+ return 0;
+}
+
+
+/*
+ * In some cases, we assume that there will only ever be object loops,
+ * and the object loop should *always* be chosen.
+ * (in those cases more specific loops should not really be registered, but
+ * we do not check that.)
+ *
+ * We default to this for "old-style" ufuncs which have exactly one loop
+ * consisting only of objects (during registration time, numba mutates this
+ * but presumably).
+ */
+NPY_NO_EXPORT int
+object_only_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *NPY_UNUSED(op_dtypes[]),
+ PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[])
+{
+ PyArray_DTypeMeta *object_DType = PyArray_DTypeFromTypeNum(NPY_OBJECT);
+
+ for (int i = 0; i < ufunc->nargs; i++) {
+ if (signature[i] == NULL) {
+ Py_INCREF(object_DType);
+ new_op_dtypes[i] = object_DType;
+ }
+ }
+ Py_DECREF(object_DType);
+ return 0;
+}
+
+/*
* Special promoter for the logical ufuncs. The logical ufuncs can always
* use the ??->? and still get the correct output (as long as the output
* is not supposed to be `object`).
@@ -843,6 +1006,12 @@ logical_ufunc_promoter(PyUFuncObject *NPY_UNUSED(ufunc),
*/
int force_object = 0;
+ if (signature[0] == NULL && signature[1] == NULL
+ && signature[2] != NULL && signature[2]->type_num != NPY_BOOL) {
+ /* bail out, this is _only_ to give future/deprecation warning! */
+ return -1;
+ }
+
for (int i = 0; i < 3; i++) {
PyArray_DTypeMeta *item;
if (signature[i] != NULL) {
@@ -913,4 +1082,3 @@ install_logical_ufunc_promoter(PyObject *ufunc)
return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0);
}
-
diff --git a/numpy/core/src/umath/dispatching.h b/numpy/core/src/umath/dispatching.h
index 2f314615d..a7e9e88d0 100644
--- a/numpy/core/src/umath/dispatching.h
+++ b/numpy/core/src/umath/dispatching.h
@@ -20,13 +20,25 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
npy_bool force_legacy_promotion,
- npy_bool allow_legacy_promotion);
+ npy_bool allow_legacy_promotion,
+ npy_bool ensure_reduce_compatible);
NPY_NO_EXPORT PyObject *
add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc,
PyArray_DTypeMeta *operation_dtypes[], int ignore_duplicate);
NPY_NO_EXPORT int
+default_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[]);
+
+NPY_NO_EXPORT int
+object_only_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *NPY_UNUSED(op_dtypes[]),
+ PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[]);
+
+NPY_NO_EXPORT int
install_logical_ufunc_promoter(PyObject *ufunc);
diff --git a/numpy/core/src/umath/legacy_array_method.c b/numpy/core/src/umath/legacy_array_method.c
index a423823d4..99de63aac 100644
--- a/numpy/core/src/umath/legacy_array_method.c
+++ b/numpy/core/src/umath/legacy_array_method.c
@@ -123,10 +123,40 @@ simple_legacy_resolve_descriptors(
PyArray_Descr **given_descrs,
PyArray_Descr **output_descrs)
{
+ int i = 0;
int nin = method->nin;
int nout = method->nout;
- for (int i = 0; i < nin + nout; i++) {
+ if (nin == 2 && nout == 1 && given_descrs[2] != NULL
+ && dtypes[0] == dtypes[2]) {
+ /*
+ * Could be a reduction, which requires `descr[0] is descr[2]`
+ * (identity) at least currently. This is because `op[0] is op[2]`.
+ * (If the output descriptor is not passed, the below works.)
+ */
+ output_descrs[2] = ensure_dtype_nbo(given_descrs[2]);
+ if (output_descrs[2] == NULL) {
+ Py_CLEAR(output_descrs[2]);
+ return -1;
+ }
+ Py_INCREF(output_descrs[2]);
+ output_descrs[0] = output_descrs[2];
+ if (dtypes[1] == dtypes[2]) {
+ /* Same for the second one (accumulation is stricter) */
+ Py_INCREF(output_descrs[2]);
+ output_descrs[1] = output_descrs[2];
+ }
+ else {
+ output_descrs[1] = ensure_dtype_nbo(given_descrs[1]);
+ if (output_descrs[1] == NULL) {
+ i = 2;
+ goto fail;
+ }
+ }
+ return NPY_NO_CASTING;
+ }
+
+ for (; i < nin + nout; i++) {
if (given_descrs[i] != NULL) {
output_descrs[i] = ensure_dtype_nbo(given_descrs[i]);
}
@@ -146,7 +176,7 @@ simple_legacy_resolve_descriptors(
return NPY_NO_CASTING;
fail:
- for (int i = 0; i < nin + nout; i++) {
+ for (; i >= 0; i--) {
Py_CLEAR(output_descrs[i]);
}
return -1;
diff --git a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
index 95cce553a..2dd43fb85 100644
--- a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
+++ b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
@@ -386,7 +386,7 @@ avx512_permute_x8var_pd(__m512d t0, __m512d t1, __m512d t2, __m512d t3,
* #and_masks =_mm256_and_ps, _mm512_kand#
* #xor_masks =_mm256_xor_ps, _mm512_kxor#
* #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps#
- * #mask_to_int = _mm256_movemask_ps, #
+ * #mask_to_int = _mm256_movemask_ps, npyv_tobits_b32#
* #full_mask= 0xFF, 0xFFFF#
* #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps#
* #cvtps_epi32 = _mm256_cvtps_epi32, #
@@ -833,11 +833,19 @@ AVX512F_exp_DOUBLE(npy_double * op,
op += num_lanes;
num_remaining_elements -= num_lanes;
}
- if (overflow_mask) {
+ /*
+ * Don't count on the compiler for cast between mask and int registers.
+ * On gcc7 with flags -march>=nocona -O3 can cause FP stack overflow
+ * which may lead to putting NaN into certain HW/FP calculations.
+ *
+ * For more details, please check the comments in:
+ * - https://github.com/numpy/numpy/issues/20356
+ */
+ if (npyv_tobits_b64(overflow_mask)) {
npy_set_floatstatus_overflow();
}
- if (underflow_mask) {
+ if (npyv_tobits_b64(underflow_mask)) {
npy_set_floatstatus_underflow();
}
}
@@ -1062,10 +1070,10 @@ AVX512F_log_DOUBLE(npy_double * op,
num_remaining_elements -= num_lanes;
}
- if (invalid_mask) {
+ if (npyv_tobits_b64(invalid_mask)) {
npy_set_floatstatus_invalid();
}
- if (divide_by_zero_mask) {
+ if (npyv_tobits_b64(divide_by_zero_mask)) {
npy_set_floatstatus_divbyzero();
}
}
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 186f18a62..9107323b0 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -998,10 +998,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc,
}
if (*allow_legacy_promotion && (!all_scalar && any_scalar)) {
*force_legacy_promotion = should_use_min_scalar(nin, out_op, 0, NULL);
- /*
- * TODO: if this is False, we end up in a "very slow" path that should
- * be avoided. This makes `int_arr + 0.` ~40% slower.
- */
}
/* Convert and fill in output arguments */
@@ -2717,11 +2713,11 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
char *method)
{
/*
- * Note that the `ops` is not realy correct. But legacy resolution
+ * Note that the `ops` is not really correct. But legacy resolution
* cannot quite handle the correct ops (e.g. a NULL first item if `out`
- * is NULL), and it should only matter in very strange cases.
+ * is NULL) so we pass `arr` instead in that case.
*/
- PyArrayObject *ops[3] = {arr, arr, NULL};
+ PyArrayObject *ops[3] = {out ? out : arr, arr, out};
/*
* TODO: If `out` is not provided, arguably `initial` could define
* the first DType (and maybe also the out one), that way
@@ -2741,11 +2737,12 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
}
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
- ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE);
- Py_DECREF(operation_DTypes[1]);
+ ops, signature, operation_DTypes, NPY_FALSE, NPY_FALSE, NPY_TRUE);
+ /* Output can currently get cleared, others XDECREF in case of error */
+ Py_XDECREF(operation_DTypes[1]);
if (out != NULL) {
- Py_DECREF(operation_DTypes[0]);
- Py_DECREF(operation_DTypes[2]);
+ Py_XDECREF(operation_DTypes[0]);
+ Py_XDECREF(operation_DTypes[2]);
}
if (ufuncimpl == NULL) {
return NULL;
@@ -2771,8 +2768,10 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
if (out_descrs[0] != out_descrs[2] || (
enforce_uniform_args && out_descrs[0] != out_descrs[1])) {
PyErr_Format(PyExc_TypeError,
- "the resolved dtypes are not compatible with %s.%s",
- ufunc_get_name_cstr(ufunc), method);
+ "the resolved dtypes are not compatible with %s.%s. "
+ "Resolved (%R, %R, %R)",
+ ufunc_get_name_cstr(ufunc), method,
+ out_descrs[0], out_descrs[1], out_descrs[2]);
goto fail;
}
/* TODO: This really should _not_ be unsafe casting (same above)! */
@@ -4852,7 +4851,8 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
*/
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
operands, signature,
- operand_DTypes, force_legacy_promotion, allow_legacy_promotion);
+ operand_DTypes, force_legacy_promotion, allow_legacy_promotion,
+ NPY_FALSE);
if (ufuncimpl == NULL) {
goto fail;
}
@@ -5190,6 +5190,61 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi
info = add_and_return_legacy_wrapping_ufunc_loop(ufunc, op_dtypes, 1);
if (info == NULL) {
+ Py_DECREF(ufunc);
+ return NULL;
+ }
+ }
+
+ PyObject *promoter = NULL;
+ if (ufunc->ntypes == 1) {
+ npy_bool all_object = NPY_TRUE;
+ for (int i = 0; i < ufunc->nargs; i++) {
+ if (ufunc->types[i] != NPY_OBJECT) {
+ all_object = NPY_FALSE;
+ break;
+ }
+ }
+ if (all_object) {
+ promoter = PyCapsule_New(&object_only_ufunc_promoter,
+ "numpy._ufunc_promoter", NULL);
+ if (promoter == NULL) {
+ Py_DECREF(ufunc);
+ return NULL;
+ }
+ }
+ }
+ if (promoter == NULL && ufunc->nin > 1) {
+ promoter = PyCapsule_New(&default_ufunc_promoter,
+ "numpy._ufunc_promoter", NULL);
+ if (promoter == NULL) {
+ Py_DECREF(ufunc);
+ return NULL;
+ }
+ }
+ if (promoter != NULL) {
+ /* Always install default promoter using the common DType */
+ PyObject *dtype_tuple = PyTuple_New(ufunc->nargs);
+ if (dtype_tuple == NULL) {
+ Py_DECREF(promoter);
+ Py_DECREF(ufunc);
+ return NULL;
+ }
+ for (int i = 0; i < ufunc->nargs; i++) {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(dtype_tuple, i, Py_None);
+ }
+ PyObject *info = PyTuple_Pack(2, dtype_tuple, promoter);
+ Py_DECREF(dtype_tuple);
+ Py_DECREF(promoter);
+ if (info == NULL) {
+ Py_DECREF(ufunc);
+ return NULL;
+ }
+
+ int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0);
+ Py_DECREF(info);
+ if (res < 0) {
+ Py_DECREF(ufunc);
return NULL;
}
}
@@ -5963,7 +6018,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
operands, signature, operand_DTypes,
- force_legacy_promotion, allow_legacy_promotion);
+ force_legacy_promotion, allow_legacy_promotion, NPY_FALSE);
if (ufuncimpl == NULL) {
goto fail;
}
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index b95d669a8..50da7b800 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -2033,15 +2033,15 @@ class TestDateTime:
# subtracting two datetime64 works, but we cannot reduce it, since
# the result of that subtraction will have a different dtype.
arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]")
- msg = r"the resolved dtypes are not compatible with subtract\."
+ msg = r"ufunc 'subtract' did not contain a loop with signature "
- with pytest.raises(TypeError, match=msg + "reduce"):
+ with pytest.raises(TypeError, match=msg):
np.subtract.reduce(arr)
- with pytest.raises(TypeError, match=msg + "accumulate"):
+ with pytest.raises(TypeError, match=msg):
np.subtract.accumulate(arr)
- with pytest.raises(TypeError, match=msg + "reduceat"):
+ with pytest.raises(TypeError, match=msg):
np.subtract.reduceat(arr, [0])
def test_datetime_busday_offset(self):
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 94583a5ee..a269eb519 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -13,7 +13,8 @@ import sys
import numpy as np
from numpy.testing import (
- assert_raises, assert_warns, assert_, assert_array_equal, SkipTest, KnownFailureException
+ assert_raises, assert_warns, assert_, assert_array_equal, SkipTest,
+ KnownFailureException, break_cycles,
)
from numpy.core._multiarray_tests import fromstring_null_term_c_api
@@ -426,11 +427,6 @@ class TestBincount(_DeprecationTestCase):
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
-class TestAlen(_DeprecationTestCase):
- # 2019-08-02, 1.18.0
- def test_alen(self):
- self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3])))
-
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
@@ -1250,3 +1246,22 @@ class TestQuantileInterpolationDeprecation(_DeprecationTestCase):
warnings.simplefilter("always", DeprecationWarning)
with pytest.raises(TypeError):
func([0., 1.], 0., interpolation="nearest", method="nearest")
+
+
+class TestMemEventHook(_DeprecationTestCase):
+ # Deprecated 2021-11-18, NumPy 1.23
+ def test_mem_seteventhook(self):
+ # The actual tests are within the C code in
+ # multiarray/_multiarray_tests.c.src
+ import numpy.core._multiarray_tests as ma_tests
+ with pytest.warns(DeprecationWarning,
+ match='PyDataMem_SetEventHook is deprecated'):
+ ma_tests.test_pydatamem_seteventhook_start()
+ # force an allocation and free of a numpy array
+ # needs to be larger then limit of small memory cacher in ctors.c
+ a = np.zeros(1000)
+ del a
+ break_cycles()
+ with pytest.warns(DeprecationWarning,
+ match='PyDataMem_SetEventHook is deprecated'):
+ ma_tests.test_pydatamem_seteventhook_end()
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 4413cd0d0..9d728afa4 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -6888,26 +6888,6 @@ class TestInner:
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
-class TestAlen:
- def test_basic(self):
- with pytest.warns(DeprecationWarning):
- m = np.array([1, 2, 3])
- assert_equal(np.alen(m), 3)
-
- m = np.array([[1, 2, 3], [4, 5, 7]])
- assert_equal(np.alen(m), 2)
-
- m = [1, 2, 3]
- assert_equal(np.alen(m), 3)
-
- m = [[1, 2, 3], [4, 5, 7]]
- assert_equal(np.alen(m), 2)
-
- def test_singleton(self):
- with pytest.warns(DeprecationWarning):
- assert_equal(np.alen(5), 1)
-
-
class TestChoose:
def setup(self):
self.x = 2*np.ones((3,), dtype=int)
@@ -7832,9 +7812,9 @@ class TestArrayCreationCopyArgument(object):
pyscalar = arr.item(0)
# Test never-copy raises error:
- assert_raises(ValueError, np.array, scalar,
+ assert_raises(ValueError, np.array, scalar,
copy=np._CopyMode.NEVER)
- assert_raises(ValueError, np.array, pyscalar,
+ assert_raises(ValueError, np.array, pyscalar,
copy=np._CopyMode.NEVER)
assert_raises(ValueError, np.array, pyscalar,
copy=self.RaiseOnBool())
@@ -8187,18 +8167,6 @@ def test_scalar_element_deletion():
assert_raises(ValueError, a[0].__delitem__, 'x')
-class TestMemEventHook:
- def test_mem_seteventhook(self):
- # The actual tests are within the C code in
- # multiarray/_multiarray_tests.c.src
- _multiarray_tests.test_pydatamem_seteventhook_start()
- # force an allocation and free of a numpy array
- # needs to be larger then limit of small memory cacher in ctors.c
- a = np.zeros(1000)
- del a
- break_cycles()
- _multiarray_tests.test_pydatamem_seteventhook_end()
-
class TestMapIter:
def test_mapiter(self):
# The actual tests are within the C code in
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index ef0bac957..76e4cdcfd 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1762,12 +1762,15 @@ class TestUfunc:
result = _rational_tests.test_add(a, b)
assert_equal(result, target)
- # But since we use the old type resolver, this may not work
- # for dtype variations unless the output dtype is given:
+ # This works even more generally, so long the default common-dtype
+ # promoter works out:
result = _rational_tests.test_add(a, b.astype(np.uint16), out=c)
assert_equal(result, target)
+
+ # But, it can be fooled, e.g. (use scalars, which forces legacy
+ # type resolution to kick in, which then fails):
with assert_raises(TypeError):
- _rational_tests.test_add(a, b.astype(np.uint16))
+ _rational_tests.test_add(a, np.uint16(2))
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
@@ -2123,6 +2126,17 @@ class TestUfunc:
c = np.array([1., 2.])
assert_array_equal(ufunc(a, c), ufunc([True, True], True))
assert ufunc.reduce(a) == True
+ # check that the output has no effect:
+ out = np.zeros(2, dtype=np.int32)
+ expected = ufunc([True, True], True).astype(out.dtype)
+ assert_array_equal(ufunc(a, c, out=out), expected)
+ out = np.zeros((), dtype=np.int32)
+ assert ufunc.reduce(a, out=out) == True
+ # Last check, test reduction when out and a match (the complexity here
+ # is that the "i,i->?" may seem right, but should not match.
+ a = np.array([3], dtype="i")
+ out = np.zeros((), dtype=a.dtype)
+ assert ufunc.reduce(a, out=out) == 1
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
@@ -2134,6 +2148,49 @@ class TestUfunc:
# It would be safe, but not equiv casting:
ufunc(a, c, out=out, casting="equiv")
+ def test_reducelike_out_promotes(self):
+ # Check that the out argument to reductions is considered for
+ # promotion. See also gh-20455.
+ # Note that these paths could prefer `initial=` in the future and
+ # do not up-cast to the default integer for add and prod
+ arr = np.ones(1000, dtype=np.uint8)
+ out = np.zeros((), dtype=np.uint16)
+ assert np.add.reduce(arr, out=out) == 1000
+ arr[:10] = 2
+ assert np.multiply.reduce(arr, out=out) == 2**10
+
+ # For legacy dtypes, the signature currently has to be forced if `out=`
+ # is passed. The two paths below should differ, without `dtype=` the
+ # expected result should be: `np.prod(arr.astype("f8")).astype("f4")`!
+ arr = np.full(5, 2**25-1, dtype=np.int64)
+
+ # float32 and int64 promote to float64:
+ res = np.zeros((), dtype=np.float32)
+ # If `dtype=` is passed, the calculation is forced to float32:
+ single_res = np.zeros((), dtype=np.float32)
+ np.multiply.reduce(arr, out=single_res, dtype=np.float32)
+ assert single_res != res
+
+ def test_reducelike_output_needs_identical_cast(self):
+ # Checks the case where the we have a simple byte-swap works, maily
+ # tests that this is not rejected directly.
+ # (interesting because we require descriptor identity in reducelikes).
+ arr = np.ones(20, dtype="f8")
+ out = np.empty((), dtype=arr.dtype.newbyteorder())
+ expected = np.add.reduce(arr)
+ np.add.reduce(arr, out=out)
+ assert_array_equal(expected, out)
+ # Check reduceat:
+ out = np.empty(2, dtype=arr.dtype.newbyteorder())
+ expected = np.add.reduceat(arr, [0, 1])
+ np.add.reduceat(arr, [0, 1], out=out)
+ assert_array_equal(expected, out)
+ # And accumulate:
+ out = np.empty(arr.shape, dtype=arr.dtype.newbyteorder())
+ expected = np.add.accumulate(arr)
+ np.add.accumulate(arr, out=out)
+ assert_array_equal(expected, out)
+
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
# appropriately.
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index fc7c592f0..c0b26e75b 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -3433,7 +3433,7 @@ class TestComplexFunctions:
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
- if glibc_older_than("2.19") and dtype is np.longcomplex:
+ if dtype is np.longcomplex:
if (platform.machine() == 'aarch64' and bad_arcsinh()):
pytest.skip("Trig functions of np.longcomplex values known "
"to be inaccurate on aarch64 for some compilation "
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index 39847c20f..b38e47c13 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -654,9 +654,9 @@ class _Distutils:
@staticmethod
def dist_load_module(name, path):
"""Load a module from file, required by the abstract class '_Cache'."""
- from numpy.compat import npy_load_module
+ from .misc_util import exec_mod_from_location
try:
- return npy_load_module(name, path)
+ return exec_mod_from_location(name, path)
except Exception as e:
_Distutils.dist_log(e, stderr=True)
return None
diff --git a/numpy/distutils/checks/cpu_asimdfhm.c b/numpy/distutils/checks/cpu_asimdfhm.c
index bb437aa40..cb49751c4 100644
--- a/numpy/distutils/checks/cpu_asimdfhm.c
+++ b/numpy/distutils/checks/cpu_asimdfhm.c
@@ -10,8 +10,8 @@ int main(void)
float32x4_t vf = vdupq_n_f32(1.0f);
float32x2_t vlf = vdup_n_f32(1.0f);
- int ret = (int)vget_lane_f32(vfmlal_low_u32(vlf, vlhp, vlhp), 0);
- ret += (int)vgetq_lane_f32(vfmlslq_high_u32(vf, vhp, vhp), 0);
+ int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0);
+ ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0);
return ret;
}
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index f0f9b4bd7..513be75db 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -31,8 +31,6 @@ def clean_up_temporary_directory():
atexit.register(clean_up_temporary_directory)
-from numpy.compat import npy_load_module
-
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
@@ -44,7 +42,8 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'get_build_architecture', 'get_info', 'get_pkg_info',
- 'get_num_build_jobs', 'sanitize_cxx_flags']
+ 'get_num_build_jobs', 'sanitize_cxx_flags',
+ 'exec_mod_from_location']
class InstallableLib:
"""
@@ -945,9 +944,8 @@ class Configuration:
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
- setup_module = npy_load_module('_'.join(n.split('.')),
- setup_py,
- ('.py', 'U', 1))
+ setup_module = exec_mod_from_location(
+ '_'.join(n.split('.')), setup_py)
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
@@ -1993,8 +1991,8 @@ class Configuration:
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
- version_module = npy_load_module('_'.join(n.split('.')),
- fn, info)
+ version_module = exec_mod_from_location(
+ '_'.join(n.split('.')), fn)
except ImportError as e:
self.warn(str(e))
version_module = None
@@ -2481,7 +2479,7 @@ def get_build_architecture():
return get_build_architecture()
-_cxx_ignore_flags = {'-Werror=implicit-function-declaration'}
+_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'}
def sanitize_cxx_flags(cxxflags):
@@ -2491,3 +2489,14 @@ def sanitize_cxx_flags(cxxflags):
return [flag for flag in cxxflags if flag not in _cxx_ignore_flags]
+def exec_mod_from_location(modname, modfile):
+ '''
+ Use importlib machinery to import a module `modname` from the file
+ `modfile`. Depending on the `spec.loader`, the module may not be
+ registered in sys.modules.
+ '''
+ spec = importlib.util.spec_from_file_location(modname, modfile)
+ foo = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(foo)
+ return foo
+
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
index f147f1b97..b1cb74fae 100644
--- a/numpy/f2py/__init__.py
+++ b/numpy/f2py/__init__.py
@@ -145,7 +145,7 @@ def get_include():
Notes
-----
- .. versionadded:: 1.22.0
+ .. versionadded:: 1.21.1
Unless the build system you are using has specific support for f2py,
building a Python extension using a ``.pyf`` signature file is a two-step
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index 1d9236dcd..528c4adee 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -845,20 +845,26 @@ int_from_pyobj(int* v, PyObject *obj, const char *errmess)
return !(*v == -1 && PyErr_Occurred());
}
- if (PyComplex_Check(obj))
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
- else if (PySequence_Check(obj))
+ }
+ else if (PySequence_Check(obj)) {
+ PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
+ }
+
if (tmp) {
- PyErr_Clear();
if (int_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
+
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
@@ -888,15 +894,19 @@ long_from_pyobj(long* v, PyObject *obj, const char *errmess) {
return !(*v == -1 && PyErr_Occurred());
}
- if (PyComplex_Check(obj))
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
- else if (PySequence_Check(obj))
- tmp = PySequence_GetItem(obj,0);
+ }
+ else if (PySequence_Check(obj)) {
+ PyErr_Clear();
+ tmp = PySequence_GetItem(obj, 0);
+ }
if (tmp) {
- PyErr_Clear();
if (long_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
@@ -934,14 +944,19 @@ long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess)
return !(*v == -1 && PyErr_Occurred());
}
- if (PyComplex_Check(obj))
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
- else if (PySequence_Check(obj))
- tmp = PySequence_GetItem(obj,0);
- if (tmp) {
+ }
+ else if (PySequence_Check(obj)) {
PyErr_Clear();
+ tmp = PySequence_GetItem(obj, 0);
+ }
+
+ if (tmp) {
if (long_long_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
@@ -1001,14 +1016,20 @@ double_from_pyobj(double* v, PyObject *obj, const char *errmess)
Py_DECREF(tmp);
return !(*v == -1.0 && PyErr_Occurred());
}
- if (PyComplex_Check(obj))
+
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
- else if (PySequence_Check(obj))
- tmp = PySequence_GetItem(obj,0);
- if (tmp) {
+ }
+ else if (PySequence_Check(obj)) {
PyErr_Clear();
+ tmp = PySequence_GetItem(obj, 0);
+ }
+
+ if (tmp) {
if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
Py_DECREF(tmp);
}
diff --git a/numpy/f2py/tests/test_abstract_interface.py b/numpy/f2py/tests/test_abstract_interface.py
index 936c1f7bc..7aecf57fc 100644
--- a/numpy/f2py/tests/test_abstract_interface.py
+++ b/numpy/f2py/tests/test_abstract_interface.py
@@ -1,12 +1,13 @@
+from pathlib import Path
import textwrap
from . import util
from numpy.f2py import crackfortran
class TestAbstractInterface(util.F2PyTest):
- suffix = '.f90'
+ suffix = ".f90"
- skip = ['add1', 'add2']
+ skip = ["add1", "add2"]
code = textwrap.dedent("""
module ops_module
@@ -50,17 +51,17 @@ class TestAbstractInterface(util.F2PyTest):
def test_parse_abstract_interface(self, tmp_path):
# Test gh18403
- f_path = tmp_path / "gh18403_mod.f90"
- with f_path.open('w') as ff:
- ff.write(textwrap.dedent("""\
- module test
- abstract interface
- subroutine foo()
- end subroutine
- end interface
- end module test
- """))
+ f_path = Path(tmp_path / "gh18403_mod.f90")
+ f_path.write_text(
+ textwrap.dedent("""\
+ module test
+ abstract interface
+ subroutine foo()
+ end subroutine
+ end interface
+ end module test
+ """))
mod = crackfortran.crackfortran([str(f_path)])
assert len(mod) == 1
- assert len(mod[0]['body']) == 1
- assert mod[0]['body'][0]['block'] == 'abstract interface'
+ assert len(mod[0]["body"]) == 1
+ assert mod[0]["body"][0]["block"] == "abstract interface"
diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py
index 649fd1c48..78569a8d6 100644
--- a/numpy/f2py/tests/test_array_from_pyobj.py
+++ b/numpy/f2py/tests/test_array_from_pyobj.py
@@ -31,11 +31,13 @@ def setup_module():
define_macros=[])
"""
d = os.path.dirname(__file__)
- src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'),
- os.path.join(d, '..', 'src', 'fortranobject.c'),
- os.path.join(d, '..', 'src', 'fortranobject.h')]
+ src = [
+ util.getpath("tests", "src", "array_from_pyobj", "wrapmodule.c"),
+ util.getpath("src", "fortranobject.c"),
+ util.getpath("src", "fortranobject.h"),
+ ]
wrap = util.build_module_distutils(src, config_code,
- 'test_array_from_pyobj_ext')
+ "test_array_from_pyobj_ext")
def flags_info(arr):
@@ -45,39 +47,49 @@ def flags_info(arr):
def flags2names(flags):
info = []
- for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY',
- 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE',
- 'WRITEBACKIFCOPY', 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO',
- 'CARRAY', 'FARRAY'
- ]:
+ for flagname in [
+ "CONTIGUOUS",
+ "FORTRAN",
+ "OWNDATA",
+ "ENSURECOPY",
+ "ENSUREARRAY",
+ "ALIGNED",
+ "NOTSWAPPED",
+ "WRITEABLE",
+ "WRITEBACKIFCOPY",
+ "UPDATEIFCOPY",
+ "BEHAVED",
+ "BEHAVED_RO",
+ "CARRAY",
+ "FARRAY",
+ ]:
if abs(flags) & getattr(wrap, flagname, 0):
info.append(flagname)
return info
class Intent:
-
def __init__(self, intent_list=[]):
self.intent_list = intent_list[:]
flags = 0
for i in intent_list:
- if i == 'optional':
+ if i == "optional":
flags |= wrap.F2PY_OPTIONAL
else:
- flags |= getattr(wrap, 'F2PY_INTENT_' + i.upper())
+ flags |= getattr(wrap, "F2PY_INTENT_" + i.upper())
self.flags = flags
def __getattr__(self, name):
name = name.lower()
- if name == 'in_':
- name = 'in'
+ if name == "in_":
+ name = "in"
return self.__class__(self.intent_list + [name])
def __str__(self):
- return 'intent(%s)' % (','.join(self.intent_list))
+ return "intent(%s)" % (",".join(self.intent_list))
def __repr__(self):
- return 'Intent(%r)' % (self.intent_list)
+ return "Intent(%r)" % (self.intent_list)
def is_intent(self, *names):
for name in names:
@@ -88,32 +100,46 @@ class Intent:
def is_intent_exact(self, *names):
return len(self.intent_list) == len(names) and self.is_intent(*names)
-intent = Intent()
-
-_type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT',
- 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG',
- 'FLOAT', 'DOUBLE', 'CFLOAT']
-
-_cast_dict = {'BOOL': ['BOOL']}
-_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE']
-_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE']
-_cast_dict['BYTE'] = ['BYTE']
-_cast_dict['UBYTE'] = ['UBYTE']
-_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT']
-_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT']
-_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT']
-_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT']
-_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG']
-_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG']
-
-_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG']
-_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG']
-
-_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT']
-_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE']
+intent = Intent()
-_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT']
+_type_names = [
+ "BOOL",
+ "BYTE",
+ "UBYTE",
+ "SHORT",
+ "USHORT",
+ "INT",
+ "UINT",
+ "LONG",
+ "ULONG",
+ "LONGLONG",
+ "ULONGLONG",
+ "FLOAT",
+ "DOUBLE",
+ "CFLOAT",
+]
+
+_cast_dict = {"BOOL": ["BOOL"]}
+_cast_dict["BYTE"] = _cast_dict["BOOL"] + ["BYTE"]
+_cast_dict["UBYTE"] = _cast_dict["BOOL"] + ["UBYTE"]
+_cast_dict["BYTE"] = ["BYTE"]
+_cast_dict["UBYTE"] = ["UBYTE"]
+_cast_dict["SHORT"] = _cast_dict["BYTE"] + ["UBYTE", "SHORT"]
+_cast_dict["USHORT"] = _cast_dict["UBYTE"] + ["BYTE", "USHORT"]
+_cast_dict["INT"] = _cast_dict["SHORT"] + ["USHORT", "INT"]
+_cast_dict["UINT"] = _cast_dict["USHORT"] + ["SHORT", "UINT"]
+
+_cast_dict["LONG"] = _cast_dict["INT"] + ["LONG"]
+_cast_dict["ULONG"] = _cast_dict["UINT"] + ["ULONG"]
+
+_cast_dict["LONGLONG"] = _cast_dict["LONG"] + ["LONGLONG"]
+_cast_dict["ULONGLONG"] = _cast_dict["ULONG"] + ["ULONGLONG"]
+
+_cast_dict["FLOAT"] = _cast_dict["SHORT"] + ["USHORT", "FLOAT"]
+_cast_dict["DOUBLE"] = _cast_dict["INT"] + ["UINT", "FLOAT", "DOUBLE"]
+
+_cast_dict["CFLOAT"] = _cast_dict["FLOAT"] + ["CFLOAT"]
# 32 bit system malloc typically does not provide the alignment required by
# 16 byte long double types this means the inout intent cannot be satisfied
@@ -121,15 +147,22 @@ _cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT']
# when numpy gains an aligned allocator the tests could be enabled again
#
# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE.
-if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) and
- sys.platform != 'win32' and
- (platform.system(), platform.processor()) != ('Darwin', 'arm')):
- _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE'])
- _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \
- ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE']
- _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \
- ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE']
- _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE']
+if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8)
+ and sys.platform != "win32"
+ and (platform.system(), platform.processor()) != ("Darwin", "arm")):
+ _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"])
+ _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [
+ "ULONG",
+ "FLOAT",
+ "DOUBLE",
+ "LONGDOUBLE",
+ ]
+ _cast_dict["CLONGDOUBLE"] = _cast_dict["LONGDOUBLE"] + [
+ "CFLOAT",
+ "CDOUBLE",
+ "CLONGDOUBLE",
+ ]
+ _cast_dict["CDOUBLE"] = _cast_dict["DOUBLE"] + ["CFLOAT", "CDOUBLE"]
class Type:
@@ -154,7 +187,7 @@ class Type:
def _init(self, name):
self.NAME = name.upper()
info = typeinfo[self.NAME]
- self.type_num = getattr(wrap, 'NPY_' + self.NAME)
+ self.type_num = getattr(wrap, "NPY_" + self.NAME)
assert_equal(self.type_num, info.num)
self.dtype = np.dtype(info.type)
self.type = info.type
@@ -195,7 +228,6 @@ class Type:
class Array:
-
def __init__(self, typ, dims, intent, obj):
self.type = typ
self.dims = dims
@@ -211,16 +243,18 @@ class Array:
self.arr_attr = wrap.array_attrs(self.arr)
if len(dims) > 1:
- if self.intent.is_intent('c'):
+ if self.intent.is_intent("c"):
assert_(intent.flags & wrap.F2PY_INTENT_C)
- assert_(not self.arr.flags['FORTRAN'],
- repr((self.arr.flags, getattr(obj, 'flags', None))))
- assert_(self.arr.flags['CONTIGUOUS'])
+ assert_(
+ not self.arr.flags["FORTRAN"],
+ repr((self.arr.flags, getattr(obj, "flags", None))),
+ )
+ assert_(self.arr.flags["CONTIGUOUS"])
assert_(not self.arr_attr[6] & wrap.FORTRAN)
else:
assert_(not intent.flags & wrap.F2PY_INTENT_C)
- assert_(self.arr.flags['FORTRAN'])
- assert_(not self.arr.flags['CONTIGUOUS'])
+ assert_(self.arr.flags["FORTRAN"])
+ assert_(not self.arr.flags["CONTIGUOUS"])
assert_(self.arr_attr[6] & wrap.FORTRAN)
if obj is None:
@@ -228,53 +262,71 @@ class Array:
self.pyarr_attr = None
return
- if intent.is_intent('cache'):
+ if intent.is_intent("cache"):
assert_(isinstance(obj, np.ndarray), repr(type(obj)))
self.pyarr = np.array(obj).reshape(*dims).copy()
else:
self.pyarr = np.array(
- np.array(obj, dtype=typ.dtypechar).reshape(*dims),
- order=self.intent.is_intent('c') and 'C' or 'F')
- assert_(self.pyarr.dtype == typ,
- repr((self.pyarr.dtype, typ)))
- self.pyarr.setflags(write=self.arr.flags['WRITEABLE'])
- assert_(self.pyarr.flags['OWNDATA'], (obj, intent))
+ np.array(obj, dtype=typ.dtypechar).reshape(*dims),
+ order=self.intent.is_intent("c") and "C" or "F",
+ )
+ assert_(self.pyarr.dtype == typ, repr((self.pyarr.dtype, typ)))
+ self.pyarr.setflags(write=self.arr.flags["WRITEABLE"])
+ assert_(self.pyarr.flags["OWNDATA"], (obj, intent))
self.pyarr_attr = wrap.array_attrs(self.pyarr)
if len(dims) > 1:
- if self.intent.is_intent('c'):
- assert_(not self.pyarr.flags['FORTRAN'])
- assert_(self.pyarr.flags['CONTIGUOUS'])
+ if self.intent.is_intent("c"):
+ assert_(not self.pyarr.flags["FORTRAN"])
+ assert_(self.pyarr.flags["CONTIGUOUS"])
assert_(not self.pyarr_attr[6] & wrap.FORTRAN)
else:
- assert_(self.pyarr.flags['FORTRAN'])
- assert_(not self.pyarr.flags['CONTIGUOUS'])
+ assert_(self.pyarr.flags["FORTRAN"])
+ assert_(not self.pyarr.flags["CONTIGUOUS"])
assert_(self.pyarr_attr[6] & wrap.FORTRAN)
assert_(self.arr_attr[1] == self.pyarr_attr[1]) # nd
assert_(self.arr_attr[2] == self.pyarr_attr[2]) # dimensions
if self.arr_attr[1] <= 1:
- assert_(self.arr_attr[3] == self.pyarr_attr[3],
- repr((self.arr_attr[3], self.pyarr_attr[3],
- self.arr.tobytes(), self.pyarr.tobytes()))) # strides
- assert_(self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:],
- repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr
- assert_(self.arr_attr[6] == self.pyarr_attr[6],
- repr((self.arr_attr[6], self.pyarr_attr[6],
- flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]),
- flags2names(self.arr_attr[6]), intent))) # flags
-
- if intent.is_intent('cache'):
- assert_(self.arr_attr[5][3] >= self.type.elsize,
- repr((self.arr_attr[5][3], self.type.elsize)))
+ assert_(
+ self.arr_attr[3] == self.pyarr_attr[3],
+ repr((
+ self.arr_attr[3],
+ self.pyarr_attr[3],
+ self.arr.tobytes(),
+ self.pyarr.tobytes(),
+ )),
+ ) # strides
+ assert_(
+ self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:],
+ repr((self.arr_attr[5], self.pyarr_attr[5])),
+ ) # descr
+ assert_(
+ self.arr_attr[6] == self.pyarr_attr[6],
+ repr((
+ self.arr_attr[6],
+ self.pyarr_attr[6],
+ flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]),
+ flags2names(self.arr_attr[6]),
+ intent,
+ )),
+ ) # flags
+
+ if intent.is_intent("cache"):
+ assert_(
+ self.arr_attr[5][3] >= self.type.elsize,
+ repr((self.arr_attr[5][3], self.type.elsize)),
+ )
else:
- assert_(self.arr_attr[5][3] == self.type.elsize,
- repr((self.arr_attr[5][3], self.type.elsize)))
+ assert_(
+ self.arr_attr[5][3] == self.type.elsize,
+ repr((self.arr_attr[5][3], self.type.elsize)),
+ )
assert_(self.arr_equal(self.pyarr, self.arr))
if isinstance(self.obj, np.ndarray):
if typ.elsize == Type(obj.dtype).elsize:
- if not intent.is_intent('copy') and self.arr_attr[1] <= 1:
+ if not intent.is_intent("copy") and self.arr_attr[1] <= 1:
assert_(self.has_shared_memory())
def arr_equal(self, arr1, arr2):
@@ -286,8 +338,7 @@ class Array:
return str(self.arr)
def has_shared_memory(self):
- """Check that created array shares data with input array.
- """
+ """Check that created array shares data with input array."""
if self.obj is self.arr:
return True
if not isinstance(self.obj, np.ndarray):
@@ -297,25 +348,24 @@ class Array:
class TestIntent:
-
def test_in_out(self):
- assert_equal(str(intent.in_.out), 'intent(in,out)')
- assert_(intent.in_.c.is_intent('c'))
- assert_(not intent.in_.c.is_intent_exact('c'))
- assert_(intent.in_.c.is_intent_exact('c', 'in'))
- assert_(intent.in_.c.is_intent_exact('in', 'c'))
- assert_(not intent.in_.is_intent('c'))
+ assert_equal(str(intent.in_.out), "intent(in,out)")
+ assert_(intent.in_.c.is_intent("c"))
+ assert_(not intent.in_.c.is_intent_exact("c"))
+ assert_(intent.in_.c.is_intent_exact("c", "in"))
+ assert_(intent.in_.c.is_intent_exact("in", "c"))
+ assert_(not intent.in_.is_intent("c"))
class TestSharedMemory:
num2seq = [1, 2]
num23seq = [[1, 2, 3], [4, 5, 6]]
- @pytest.fixture(autouse=True, scope='class', params=_type_names)
+ @pytest.fixture(autouse=True, scope="class", params=_type_names)
def setup_type(self, request):
request.cls.type = Type(request.param)
- request.cls.array = lambda self, dims, intent, obj: \
- Array(Type(request.param), dims, intent, obj)
+ request.cls.array = lambda self, dims, intent, obj: Array(
+ Type(request.param), dims, intent, obj)
def test_in_from_2seq(self):
a = self.array([2], intent.in_, self.num2seq)
@@ -326,21 +376,21 @@ class TestSharedMemory:
obj = np.array(self.num2seq, dtype=t.dtype)
a = self.array([len(self.num2seq)], intent.in_, obj)
if t.elsize == self.type.elsize:
- assert_(
- a.has_shared_memory(), repr((self.type.dtype, t.dtype)))
+ assert_(a.has_shared_memory(), repr(
+ (self.type.dtype, t.dtype)))
else:
assert_(not a.has_shared_memory(), repr(t.dtype))
- @pytest.mark.parametrize('write', ['w', 'ro'])
- @pytest.mark.parametrize('order', ['C', 'F'])
- @pytest.mark.parametrize('inp', ['2seq', '23seq'])
+ @pytest.mark.parametrize("write", ["w", "ro"])
+ @pytest.mark.parametrize("order", ["C", "F"])
+ @pytest.mark.parametrize("inp", ["2seq", "23seq"])
def test_in_nocopy(self, write, order, inp):
- """Test if intent(in) array can be passed without copies
- """
- seq = getattr(self, 'num' + inp)
+ """Test if intent(in) array can be passed without copies"""
+ seq = getattr(self, "num" + inp)
obj = np.array(seq, dtype=self.type.dtype, order=order)
- obj.setflags(write=(write == 'w'))
- a = self.array(obj.shape, ((order=='C' and intent.in_.c) or intent.in_), obj)
+ obj.setflags(write=(write == "w"))
+ a = self.array(obj.shape,
+ ((order == "C" and intent.in_.c) or intent.in_), obj)
assert a.has_shared_memory()
def test_inout_2seq(self):
@@ -351,29 +401,29 @@ class TestSharedMemory:
try:
a = self.array([2], intent.in_.inout, self.num2seq)
except TypeError as msg:
- if not str(msg).startswith('failed to initialize intent'
- '(inout|inplace|cache) array'):
+ if not str(msg).startswith(
+ "failed to initialize intent(inout|inplace|cache) array"):
raise
else:
- raise SystemError('intent(inout) should have failed on sequence')
+ raise SystemError("intent(inout) should have failed on sequence")
def test_f_inout_23seq(self):
- obj = np.array(self.num23seq, dtype=self.type.dtype, order='F')
+ obj = np.array(self.num23seq, dtype=self.type.dtype, order="F")
shape = (len(self.num23seq), len(self.num23seq[0]))
a = self.array(shape, intent.in_.inout, obj)
assert_(a.has_shared_memory())
- obj = np.array(self.num23seq, dtype=self.type.dtype, order='C')
+ obj = np.array(self.num23seq, dtype=self.type.dtype, order="C")
shape = (len(self.num23seq), len(self.num23seq[0]))
try:
a = self.array(shape, intent.in_.inout, obj)
except ValueError as msg:
- if not str(msg).startswith('failed to initialize intent'
- '(inout) array'):
+ if not str(msg).startswith(
+ "failed to initialize intent(inout) array"):
raise
else:
raise SystemError(
- 'intent(inout) should have failed on improper array')
+ "intent(inout) should have failed on improper array")
def test_c_inout_23seq(self):
obj = np.array(self.num23seq, dtype=self.type.dtype)
@@ -388,22 +438,23 @@ class TestSharedMemory:
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_c_in_from_23seq(self):
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_, self.num23seq)
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_,
+ self.num23seq)
assert_(not a.has_shared_memory())
def test_in_from_23casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num23seq, dtype=t.dtype)
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_, obj)
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj)
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_f_in_from_23casttype(self):
for t in self.type.cast_types():
- obj = np.array(self.num23seq, dtype=t.dtype, order='F')
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_, obj)
+ obj = np.array(self.num23seq, dtype=t.dtype, order="F")
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj)
if t.elsize == self.type.elsize:
assert_(a.has_shared_memory(), repr(t.dtype))
else:
@@ -412,8 +463,8 @@ class TestSharedMemory:
def test_c_in_from_23casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num23seq, dtype=t.dtype)
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_.c, obj)
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj)
if t.elsize == self.type.elsize:
assert_(a.has_shared_memory(), repr(t.dtype))
else:
@@ -421,16 +472,18 @@ class TestSharedMemory:
def test_f_copy_in_from_23casttype(self):
for t in self.type.cast_types():
- obj = np.array(self.num23seq, dtype=t.dtype, order='F')
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_.copy, obj)
+ obj = np.array(self.num23seq, dtype=t.dtype, order="F")
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy,
+ obj)
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_c_copy_in_from_23casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num23seq, dtype=t.dtype)
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_.c.copy, obj)
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy,
+ obj)
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_in_cache_from_2casttype(self):
@@ -438,14 +491,14 @@ class TestSharedMemory:
if t.elsize != self.type.elsize:
continue
obj = np.array(self.num2seq, dtype=t.dtype)
- shape = (len(self.num2seq),)
+ shape = (len(self.num2seq), )
a = self.array(shape, intent.in_.c.cache, obj)
assert_(a.has_shared_memory(), repr(t.dtype))
a = self.array(shape, intent.in_.cache, obj)
assert_(a.has_shared_memory(), repr(t.dtype))
- obj = np.array(self.num2seq, dtype=t.dtype, order='F')
+ obj = np.array(self.num2seq, dtype=t.dtype, order="F")
a = self.array(shape, intent.in_.c.cache, obj)
assert_(a.has_shared_memory(), repr(t.dtype))
@@ -455,31 +508,31 @@ class TestSharedMemory:
try:
a = self.array(shape, intent.in_.cache, obj[::-1])
except ValueError as msg:
- if not str(msg).startswith('failed to initialize'
- ' intent(cache) array'):
+ if not str(msg).startswith(
+ "failed to initialize intent(cache) array"):
raise
else:
raise SystemError(
- 'intent(cache) should have failed on multisegmented array')
+ "intent(cache) should have failed on multisegmented array")
def test_in_cache_from_2casttype_failure(self):
for t in self.type.all_types():
if t.elsize >= self.type.elsize:
continue
obj = np.array(self.num2seq, dtype=t.dtype)
- shape = (len(self.num2seq),)
+ shape = (len(self.num2seq), )
try:
self.array(shape, intent.in_.cache, obj) # Should succeed
except ValueError as msg:
- if not str(msg).startswith('failed to initialize'
- ' intent(cache) array'):
+ if not str(msg).startswith(
+ "failed to initialize intent(cache) array"):
raise
else:
raise SystemError(
- 'intent(cache) should have failed on smaller array')
+ "intent(cache) should have failed on smaller array")
def test_cache_hidden(self):
- shape = (2,)
+ shape = (2, )
a = self.array(shape, intent.cache.hide, None)
assert_(a.arr.shape == shape)
@@ -491,15 +544,15 @@ class TestSharedMemory:
try:
a = self.array(shape, intent.cache.hide, None)
except ValueError as msg:
- if not str(msg).startswith('failed to create intent'
- '(cache|hide)|optional array'):
+ if not str(msg).startswith(
+ "failed to create intent(cache|hide)|optional array"):
raise
else:
raise SystemError(
- 'intent(cache) should have failed on undefined dimensions')
+ "intent(cache) should have failed on undefined dimensions")
def test_hidden(self):
- shape = (2,)
+ shape = (2, )
a = self.array(shape, intent.hide, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
@@ -508,27 +561,27 @@ class TestSharedMemory:
a = self.array(shape, intent.hide, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
- assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS'])
+ assert_(a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"])
shape = (2, 3)
a = self.array(shape, intent.c.hide, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
- assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS'])
+ assert_(not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"])
shape = (-1, 3)
try:
a = self.array(shape, intent.hide, None)
except ValueError as msg:
- if not str(msg).startswith('failed to create intent'
- '(cache|hide)|optional array'):
+ if not str(msg).startswith(
+ "failed to create intent(cache|hide)|optional array"):
raise
else:
- raise SystemError('intent(hide) should have failed'
- ' on undefined dimensions')
+ raise SystemError(
+ "intent(hide) should have failed on undefined dimensions")
def test_optional_none(self):
- shape = (2,)
+ shape = (2, )
a = self.array(shape, intent.optional, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
@@ -537,17 +590,17 @@ class TestSharedMemory:
a = self.array(shape, intent.optional, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
- assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS'])
+ assert_(a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"])
shape = (2, 3)
a = self.array(shape, intent.c.optional, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
- assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS'])
+ assert_(not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"])
def test_optional_from_2seq(self):
obj = self.num2seq
- shape = (len(obj),)
+ shape = (len(obj), )
a = self.array(shape, intent.optional, obj)
assert_(a.arr.shape == shape)
assert_(not a.has_shared_memory())
@@ -565,16 +618,18 @@ class TestSharedMemory:
def test_inplace(self):
obj = np.array(self.num23seq, dtype=self.type.dtype)
- assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS'])
+ assert_(not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"])
shape = obj.shape
a = self.array(shape, intent.inplace, obj)
assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr)))
a.arr[1][2] = 54
- assert_(obj[1][2] == a.arr[1][2] ==
- np.array(54, dtype=self.type.dtype), repr((obj, a.arr)))
+ assert_(
+ obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype),
+ repr((obj, a.arr)),
+ )
assert_(a.arr is obj)
- assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace!
- assert_(not obj.flags['CONTIGUOUS'])
+ assert_(obj.flags["FORTRAN"]) # obj attributes are changed inplace!
+ assert_(not obj.flags["CONTIGUOUS"])
def test_inplace_from_casttype(self):
for t in self.type.cast_types():
@@ -583,14 +638,17 @@ class TestSharedMemory:
obj = np.array(self.num23seq, dtype=t.dtype)
assert_(obj.dtype.type == t.type)
assert_(obj.dtype.type is not self.type.type)
- assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS'])
+ assert_(not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"])
shape = obj.shape
a = self.array(shape, intent.inplace, obj)
assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr)))
a.arr[1][2] = 54
- assert_(obj[1][2] == a.arr[1][2] ==
- np.array(54, dtype=self.type.dtype), repr((obj, a.arr)))
+ assert_(
+ obj[1][2] == a.arr[1][2] == np.array(54,
+ dtype=self.type.dtype),
+ repr((obj, a.arr)),
+ )
assert_(a.arr is obj)
- assert_(obj.flags['FORTRAN']) # obj attributes changed inplace!
- assert_(not obj.flags['CONTIGUOUS'])
+ assert_(obj.flags["FORTRAN"]) # obj attributes changed inplace!
+ assert_(not obj.flags["CONTIGUOUS"])
assert_(obj.dtype.type is self.type.type) # obj changed inplace!
diff --git a/numpy/f2py/tests/test_assumed_shape.py b/numpy/f2py/tests/test_assumed_shape.py
index 79e3ad138..0d226cb44 100644
--- a/numpy/f2py/tests/test_assumed_shape.py
+++ b/numpy/f2py/tests/test_assumed_shape.py
@@ -6,17 +6,14 @@ from numpy.testing import assert_
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestAssumedShapeSumExample(util.F2PyTest):
- sources = [_path('src', 'assumed_shape', 'foo_free.f90'),
- _path('src', 'assumed_shape', 'foo_use.f90'),
- _path('src', 'assumed_shape', 'precision.f90'),
- _path('src', 'assumed_shape', 'foo_mod.f90'),
- _path('src', 'assumed_shape', '.f2py_f2cmap'),
- ]
+ sources = [
+ util.getpath("tests", "src", "assumed_shape", "foo_free.f90"),
+ util.getpath("tests", "src", "assumed_shape", "foo_use.f90"),
+ util.getpath("tests", "src", "assumed_shape", "precision.f90"),
+ util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"),
+ util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"),
+ ]
@pytest.mark.slow
def test_all(self):
@@ -40,7 +37,7 @@ class TestF2cmapOption(TestAssumedShapeSumExample):
f2cmap_src = self.sources.pop(-1)
self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False)
- with open(f2cmap_src, 'rb') as f:
+ with open(f2cmap_src, "rb") as f:
self.f2cmap_file.write(f.read())
self.f2cmap_file.close()
diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py
index 7d725165b..36446fe64 100644
--- a/numpy/f2py/tests/test_block_docstring.py
+++ b/numpy/f2py/tests/test_block_docstring.py
@@ -4,6 +4,7 @@ from . import util
from numpy.testing import assert_equal, IS_PYPY
+
class TestBlockDocString(util.F2PyTest):
code = """
SUBROUTINE FOO()
@@ -14,8 +15,8 @@ class TestBlockDocString(util.F2PyTest):
END
"""
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_block_docstring(self):
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index 5d2aab94d..8682afe05 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -77,7 +77,7 @@ cf2py intent(out) r
end
"""
- @pytest.mark.parametrize('name', 't,t2'.split(','))
+ @pytest.mark.parametrize("name", "t,t2".split(","))
def test_all(self, name):
self.check_function(name)
@@ -116,18 +116,18 @@ cf2py intent(out) r
t = getattr(self.module, name)
r = t(lambda: 4)
assert_(r == 4, repr(r))
- r = t(lambda a: 5, fun_extra_args=(6,))
+ r = t(lambda a: 5, fun_extra_args=(6, ))
assert_(r == 5, repr(r))
- r = t(lambda a: a, fun_extra_args=(6,))
+ r = t(lambda a: a, fun_extra_args=(6, ))
assert_(r == 6, repr(r))
- r = t(lambda a: 5 + a, fun_extra_args=(7,))
+ r = t(lambda a: 5 + a, fun_extra_args=(7, ))
assert_(r == 12, repr(r))
- r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi,))
+ r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, ))
assert_(r == 180, repr(r))
- r = t(math.degrees, fun_extra_args=(math.pi,))
+ r = t(math.degrees, fun_extra_args=(math.pi, ))
assert_(r == 180, repr(r))
- r = t(self.module.func, fun_extra_args=(6,))
+ r = t(self.module.func, fun_extra_args=(6, ))
assert_(r == 17, repr(r))
r = t(self.module.func0)
assert_(r == 11, repr(r))
@@ -135,48 +135,47 @@ cf2py intent(out) r
assert_(r == 11, repr(r))
class A:
-
def __call__(self):
return 7
def mth(self):
return 9
+
a = A()
r = t(a)
assert_(r == 7, repr(r))
r = t(a.mth)
assert_(r == 9, repr(r))
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_string_callback(self):
-
def callback(code):
- if code == 'r':
+ if code == "r":
return 0
else:
return 1
- f = getattr(self.module, 'string_callback')
+ f = getattr(self.module, "string_callback")
r = f(callback)
assert_(r == 0, repr(r))
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_string_callback_array(self):
# See gh-10027
- cu = np.zeros((1, 8), 'S1')
+ cu = np.zeros((1, 8), "S1")
def callback(cu, lencu):
if cu.shape != (lencu, 8):
return 1
- if cu.dtype != 'S1':
+ if cu.dtype != "S1":
return 2
- if not np.all(cu == b''):
+ if not np.all(cu == b""):
return 3
return 0
- f = getattr(self.module, 'string_callback_array')
+ f = getattr(self.module, "string_callback_array")
res = f(callback, cu, len(cu))
assert_(res == 0, repr(res))
@@ -205,8 +204,10 @@ cf2py intent(out) r
except Exception:
errors.append(traceback.format_exc())
- threads = [threading.Thread(target=runner, args=(arg,))
- for arg in ("t", "t2") for n in range(20)]
+ threads = [
+ threading.Thread(target=runner, args=(arg, ))
+ for arg in ("t", "t2") for n in range(20)
+ ]
for t in threads:
t.start()
@@ -222,12 +223,12 @@ cf2py intent(out) r
try:
self.module.hidden_callback(2)
except Exception as msg:
- assert_(str(msg).startswith('Callback global_f not defined'))
+ assert_(str(msg).startswith("Callback global_f not defined"))
try:
self.module.hidden_callback2(2)
except Exception as msg:
- assert_(str(msg).startswith('cb: Callback global_f not defined'))
+ assert_(str(msg).startswith("cb: Callback global_f not defined"))
self.module.global_f = lambda x: x + 1
r = self.module.hidden_callback(2)
@@ -241,7 +242,7 @@ cf2py intent(out) r
try:
self.module.hidden_callback(2)
except Exception as msg:
- assert_(str(msg).startswith('Callback global_f not defined'))
+ assert_(str(msg).startswith("Callback global_f not defined"))
self.module.global_f = lambda x=0: x + 3
r = self.module.hidden_callback(2)
@@ -257,15 +258,15 @@ class TestF77CallbackPythonTLS(TestF77Callback):
Callback tests using Python thread-local storage instead of
compiler-provided
"""
+
options = ["-DF2PY_USE_PYTHON_TLS"]
class TestF90Callback(util.F2PyTest):
- suffix = '.f90'
+ suffix = ".f90"
- code = textwrap.dedent(
- """
+ code = textwrap.dedent("""
function gh17797(f, y) result(r)
external f
integer(8) :: r, f
@@ -276,7 +277,6 @@ class TestF90Callback(util.F2PyTest):
""")
def test_gh17797(self):
-
def incr(x):
return x + 123
@@ -292,10 +292,9 @@ class TestGH18335(util.F2PyTest):
other tests!
"""
- suffix = '.f90'
+ suffix = ".f90"
- code = textwrap.dedent(
- """
+ code = textwrap.dedent("""
! When gh18335_workaround is defined as an extension,
! the issue cannot be reproduced.
!subroutine gh18335_workaround(f, y)
@@ -316,7 +315,6 @@ class TestGH18335(util.F2PyTest):
""")
def test_gh18335(self):
-
def foo(x):
x[0] += 1
diff --git a/numpy/f2py/tests/test_common.py b/numpy/f2py/tests/test_common.py
index e4bf35504..056ae5ee8 100644
--- a/numpy/f2py/tests/test_common.py
+++ b/numpy/f2py/tests/test_common.py
@@ -7,19 +7,16 @@ from . import util
from numpy.testing import assert_array_equal
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
class TestCommonBlock(util.F2PyTest):
- sources = [_path('src', 'common', 'block.f')]
+ sources = [util.getpath("tests", "src", "common", "block.f")]
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_common_block(self):
self.module.initcb()
assert_array_equal(self.module.block.long_bn,
np.array(1.0, dtype=np.float64))
assert_array_equal(self.module.block.string_bn,
- np.array('2', dtype='|S1'))
- assert_array_equal(self.module.block.ok,
- np.array(3, dtype=np.int32))
+ np.array("2", dtype="|S1"))
+ assert_array_equal(self.module.block.ok, np.array(3, dtype=np.int32))
diff --git a/numpy/f2py/tests/test_compile_function.py b/numpy/f2py/tests/test_compile_function.py
index f76fd6448..e92362d82 100644
--- a/numpy/f2py/tests/test_compile_function.py
+++ b/numpy/f2py/tests/test_compile_function.py
@@ -17,14 +17,13 @@ def setup_module():
if not util.has_c_compiler():
pytest.skip("Needs C compiler")
if not util.has_f77_compiler():
- pytest.skip('Needs FORTRAN 77 compiler')
+ pytest.skip("Needs FORTRAN 77 compiler")
# extra_args can be a list (since gh-11937) or string.
# also test absence of extra_args
-@pytest.mark.parametrize(
- "extra_args", [['--noopt', '--debug'], '--noopt --debug', '']
- )
+@pytest.mark.parametrize("extra_args",
+ [["--noopt", "--debug"], "--noopt --debug", ""])
@pytest.mark.leaks_references(reason="Imported module seems never deleted.")
def test_f2py_init_compile(extra_args):
# flush through the f2py __init__ compile() function code path as a
@@ -33,7 +32,7 @@ def test_f2py_init_compile(extra_args):
# the Fortran 77 syntax requires 6 spaces before any commands, but
# more space may be added/
- fsource = """
+ fsource = """
integer function foo()
foo = 10 + 5
return
@@ -45,7 +44,7 @@ def test_f2py_init_compile(extra_args):
modname = util.get_temp_module_name()
cwd = os.getcwd()
- target = os.path.join(moddir, str(uuid.uuid4()) + '.f')
+ target = os.path.join(moddir, str(uuid.uuid4()) + ".f")
# try running compile() with and without a source_fn provided so
# that the code path where a temporary file for writing Fortran
# source is created is also explored
@@ -54,33 +53,28 @@ def test_f2py_init_compile(extra_args):
# util.py, but don't actually use build_module() because it has
# its own invocation of subprocess that circumvents the
# f2py.compile code block under test
- try:
- os.chdir(moddir)
- ret_val = numpy.f2py.compile(
- fsource,
- modulename=modname,
- extra_args=extra_args,
- source_fn=source_fn
- )
- finally:
- os.chdir(cwd)
-
- # check for compile success return value
- assert_equal(ret_val, 0)
-
- # we are not currently able to import the Python-Fortran
- # interface module on Windows / Appveyor, even though we do get
- # successful compilation on that platform with Python 3.x
- if sys.platform != 'win32':
- # check for sensible result of Fortran function; that means
- # we can import the module name in Python and retrieve the
- # result of the sum operation
- return_check = import_module(modname)
- calc_result = return_check.foo()
- assert_equal(calc_result, 15)
- # Removal from sys.modules, is not as such necessary. Even with
- # removal, the module (dict) stays alive.
- del sys.modules[modname]
+ with util.switchdir(moddir):
+ ret_val = numpy.f2py.compile(fsource,
+ modulename=modname,
+ extra_args=extra_args,
+ source_fn=source_fn)
+
+ # check for compile success return value
+ assert_equal(ret_val, 0)
+
+ # we are not currently able to import the Python-Fortran
+ # interface module on Windows / Appveyor, even though we do get
+ # successful compilation on that platform with Python 3.x
+ if sys.platform != "win32":
+ # check for sensible result of Fortran function; that means
+ # we can import the module name in Python and retrieve the
+ # result of the sum operation
+ return_check = import_module(modname)
+ calc_result = return_check.foo()
+ assert_equal(calc_result, 15)
+ # Removal from sys.modules, is not as such necessary. Even with
+ # removal, the module (dict) stays alive.
+ del sys.modules[modname]
def test_f2py_init_compile_failure():
@@ -99,7 +93,7 @@ def test_f2py_init_compile_bad_cmd():
# downstream NOTE: how bad of an idea is this patching?
try:
temp = sys.executable
- sys.executable = 'does not exist'
+ sys.executable = "does not exist"
# the OSError should take precedence over invalid Fortran
ret_val = numpy.f2py.compile(b"invalid")
@@ -108,18 +102,17 @@ def test_f2py_init_compile_bad_cmd():
sys.executable = temp
-@pytest.mark.parametrize('fsource',
- ['program test_f2py\nend program test_f2py',
- b'program test_f2py\nend program test_f2py',])
+@pytest.mark.parametrize(
+ "fsource",
+ [
+ "program test_f2py\nend program test_f2py",
+ b"program test_f2py\nend program test_f2py",
+ ],
+)
def test_compile_from_strings(tmpdir, fsource):
# Make sure we can compile str and bytes gh-12796
- cwd = os.getcwd()
- try:
- os.chdir(str(tmpdir))
- ret_val = numpy.f2py.compile(
- fsource,
- modulename='test_compile_from_strings',
- extension='.f90')
+ with util.switchdir(tmpdir):
+ ret_val = numpy.f2py.compile(fsource,
+ modulename="test_compile_from_strings",
+ extension=".f90")
assert_equal(ret_val, 0)
- finally:
- os.chdir(cwd)
diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py
index 039e085b4..41d9840ed 100644
--- a/numpy/f2py/tests/test_crackfortran.py
+++ b/numpy/f2py/tests/test_crackfortran.py
@@ -36,15 +36,14 @@ class TestNoSpace(util.F2PyTest):
assert_array_equal(k, w + 1)
self.module.subc([w, k])
assert_array_equal(k, w + 1)
- assert self.module.t0(23) == b'2'
+ assert self.module.t0(23) == b"2"
-class TestPublicPrivate():
-
+class TestPublicPrivate:
def test_defaultPrivate(self, tmp_path):
f_path = tmp_path / "mod.f90"
- with f_path.open('w') as ff:
- ff.write(textwrap.dedent("""\
+ f_path.write_text(
+ textwrap.dedent("""\
module foo
private
integer :: a
@@ -60,17 +59,18 @@ class TestPublicPrivate():
mod = crackfortran.crackfortran([str(f_path)])
assert len(mod) == 1
mod = mod[0]
- assert 'private' in mod['vars']['a']['attrspec']
- assert 'public' not in mod['vars']['a']['attrspec']
- assert 'private' in mod['vars']['b']['attrspec']
- assert 'public' not in mod['vars']['b']['attrspec']
- assert 'private' not in mod['vars']['seta']['attrspec']
- assert 'public' in mod['vars']['seta']['attrspec']
+ assert "private" in mod["vars"]["a"]["attrspec"]
+ assert "public" not in mod["vars"]["a"]["attrspec"]
+ assert "private" in mod["vars"]["b"]["attrspec"]
+ assert "public" not in mod["vars"]["b"]["attrspec"]
+ assert "private" not in mod["vars"]["seta"]["attrspec"]
+ assert "public" in mod["vars"]["seta"]["attrspec"]
def test_defaultPublic(self, tmp_path):
f_path = tmp_path / "mod.f90"
- with f_path.open('w') as ff:
- ff.write(textwrap.dedent("""\
+ with f_path.open("w") as ff:
+ ff.write(
+ textwrap.dedent("""\
module foo
public
integer, private :: a
@@ -85,10 +85,10 @@ class TestPublicPrivate():
mod = crackfortran.crackfortran([str(f_path)])
assert len(mod) == 1
mod = mod[0]
- assert 'private' in mod['vars']['a']['attrspec']
- assert 'public' not in mod['vars']['a']['attrspec']
- assert 'private' not in mod['vars']['seta']['attrspec']
- assert 'public' in mod['vars']['seta']['attrspec']
+ assert "private" in mod["vars"]["a"]["attrspec"]
+ assert "public" not in mod["vars"]["a"]["attrspec"]
+ assert "private" not in mod["vars"]["seta"]["attrspec"]
+ assert "public" in mod["vars"]["seta"]["attrspec"]
class TestExternal(util.F2PyTest):
@@ -111,19 +111,21 @@ class TestExternal(util.F2PyTest):
def test_external_as_statement(self):
def incr(x):
return x + 123
+
r = self.module.external_as_statement(incr)
assert r == 123
def test_external_as_attribute(self):
def incr(x):
return x + 123
+
r = self.module.external_as_attribute(incr)
assert r == 123
class TestCrackFortran(util.F2PyTest):
- suffix = '.f90'
+ suffix = ".f90"
code = textwrap.dedent("""
subroutine gh2848( &
@@ -146,7 +148,7 @@ class TestCrackFortran(util.F2PyTest):
assert r == (1, 2)
-class TestMarkinnerspaces():
+class TestMarkinnerspaces:
# issue #14118: markinnerspaces does not handle multiple quotations
def test_do_not_touch_normal_spaces(self):
@@ -155,13 +157,13 @@ class TestMarkinnerspaces():
assert_equal(markinnerspaces(i), i)
def test_one_relevant_space(self):
- assert_equal(markinnerspaces("a 'b c' \\\' \\\'"), "a 'b@_@c' \\' \\'")
+ assert_equal(markinnerspaces("a 'b c' \\' \\'"), "a 'b@_@c' \\' \\'")
assert_equal(markinnerspaces(r'a "b c" \" \"'), r'a "b@_@c" \" \"')
def test_ignore_inner_quotes(self):
- assert_equal(markinnerspaces('a \'b c" " d\' e'),
+ assert_equal(markinnerspaces("a 'b c\" \" d' e"),
"a 'b@_@c\"@_@\"@_@d' e")
- assert_equal(markinnerspaces('a "b c\' \' d" e'),
+ assert_equal(markinnerspaces("a \"b c' ' d\" e"),
"a \"b@_@c'@_@'@_@d\" e")
def test_multiple_relevant_spaces(self):
@@ -200,7 +202,7 @@ class TestDimSpec(util.F2PyTest):
"""
- suffix = '.f90'
+ suffix = ".f90"
code_template = textwrap.dedent("""
function get_arr_size_{count}(a, n) result (length)
@@ -221,33 +223,36 @@ class TestDimSpec(util.F2PyTest):
end subroutine
""")
- linear_dimspecs = ['n', '2*n', '2:n', 'n/2', '5 - n/2', '3*n:20',
- 'n*(n+1):n*(n+5)']
- nonlinear_dimspecs = ['2*n:3*n*n+2*n']
+ linear_dimspecs = [
+ "n", "2*n", "2:n", "n/2", "5 - n/2", "3*n:20", "n*(n+1):n*(n+5)"
+ ]
+ nonlinear_dimspecs = ["2*n:3*n*n+2*n"]
all_dimspecs = linear_dimspecs + nonlinear_dimspecs
- code = ''
+ code = ""
for count, dimspec in enumerate(all_dimspecs):
code += code_template.format(
- count=count, dimspec=dimspec,
- first=dimspec.split(':')[0] if ':' in dimspec else '1')
+ count=count,
+ dimspec=dimspec,
+ first=dimspec.split(":")[0] if ":" in dimspec else "1",
+ )
- @pytest.mark.parametrize('dimspec', all_dimspecs)
+ @pytest.mark.parametrize("dimspec", all_dimspecs)
def test_array_size(self, dimspec):
count = self.all_dimspecs.index(dimspec)
- get_arr_size = getattr(self.module, f'get_arr_size_{count}')
+ get_arr_size = getattr(self.module, f"get_arr_size_{count}")
for n in [1, 2, 3, 4, 5]:
sz, a = get_arr_size(n)
assert len(a) == sz
- @pytest.mark.parametrize('dimspec', all_dimspecs)
+ @pytest.mark.parametrize("dimspec", all_dimspecs)
def test_inv_array_size(self, dimspec):
count = self.all_dimspecs.index(dimspec)
- get_arr_size = getattr(self.module, f'get_arr_size_{count}')
- get_inv_arr_size = getattr(self.module, f'get_inv_arr_size_{count}')
+ get_arr_size = getattr(self.module, f"get_arr_size_{count}")
+ get_inv_arr_size = getattr(self.module, f"get_inv_arr_size_{count}")
for n in [1, 2, 3, 4, 5]:
sz, a = get_arr_size(n)
@@ -266,11 +271,12 @@ class TestDimSpec(util.F2PyTest):
assert sz == sz1, (n, n1, sz, sz1)
-class TestModuleDeclaration():
+class TestModuleDeclaration:
def test_dependencies(self, tmp_path):
f_path = tmp_path / "mod.f90"
- with f_path.open('w') as ff:
- ff.write(textwrap.dedent("""\
+ with f_path.open("w") as ff:
+ ff.write(
+ textwrap.dedent("""\
module foo
type bar
character(len = 4) :: text
@@ -280,4 +286,4 @@ class TestModuleDeclaration():
"""))
mod = crackfortran.crackfortran([str(f_path)])
assert len(mod) == 1
- assert mod[0]['vars']['abar']['='] == "bar('abar')"
+ assert mod[0]["vars"]["abar"]["="] == "bar('abar')"
diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py
index a7e2b28ed..78a11fc6c 100644
--- a/numpy/f2py/tests/test_kind.py
+++ b/numpy/f2py/tests/test_kind.py
@@ -4,17 +4,13 @@ import pytest
from numpy.testing import assert_
from numpy.f2py.crackfortran import (
_selected_int_kind_func as selected_int_kind,
- _selected_real_kind_func as selected_real_kind
- )
+ _selected_real_kind_func as selected_real_kind,
+)
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestKind(util.F2PyTest):
- sources = [_path('src', 'kind', 'foo.f90')]
+ sources = [util.getpath("tests", "src", "kind", "foo.f90")]
@pytest.mark.slow
def test_all(self):
@@ -22,11 +18,15 @@ class TestKind(util.F2PyTest):
selectedintkind = self.module.selectedintkind
for i in range(40):
- assert_(selectedintkind(i) in [selected_int_kind(i), -1],
- 'selectedintkind(%s): expected %r but got %r' %
- (i, selected_int_kind(i), selectedintkind(i)))
+ assert_(
+ selectedintkind(i) in [selected_int_kind(i), -1],
+ "selectedintkind(%s): expected %r but got %r" %
+ (i, selected_int_kind(i), selectedintkind(i)),
+ )
for i in range(20):
- assert_(selectedrealkind(i) in [selected_real_kind(i), -1],
- 'selectedrealkind(%s): expected %r but got %r' %
- (i, selected_real_kind(i), selectedrealkind(i)))
+ assert_(
+ selectedrealkind(i) in [selected_real_kind(i), -1],
+ "selectedrealkind(%s): expected %r but got %r" %
+ (i, selected_real_kind(i), selectedrealkind(i)),
+ )
diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py
index 04266ca5b..95444bea5 100644
--- a/numpy/f2py/tests/test_mixed.py
+++ b/numpy/f2py/tests/test_mixed.py
@@ -6,14 +6,12 @@ from numpy.testing import assert_, assert_equal, IS_PYPY
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestMixed(util.F2PyTest):
- sources = [_path('src', 'mixed', 'foo.f'),
- _path('src', 'mixed', 'foo_fixed.f90'),
- _path('src', 'mixed', 'foo_free.f90')]
+ sources = [
+ util.getpath("tests", "src", "mixed", "foo.f"),
+ util.getpath("tests", "src", "mixed", "foo_fixed.f90"),
+ util.getpath("tests", "src", "mixed", "foo_free.f90"),
+ ]
def test_all(self):
assert_(self.module.bar11() == 11)
diff --git a/numpy/f2py/tests/test_module_doc.py b/numpy/f2py/tests/test_module_doc.py
index 4b9555cee..b66cff000 100644
--- a/numpy/f2py/tests/test_module_doc.py
+++ b/numpy/f2py/tests/test_module_doc.py
@@ -7,24 +7,24 @@ from . import util
from numpy.testing import assert_equal, IS_PYPY
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestModuleDocString(util.F2PyTest):
- sources = [_path('src', 'module_data', 'module_data_docstring.f90')]
+ sources = [
+ util.getpath("tests", "src", "module_data",
+ "module_data_docstring.f90")
+ ]
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_module_docstring(self):
- assert_equal(self.module.mod.__doc__,
- textwrap.dedent('''\
+ assert_equal(
+ self.module.mod.__doc__,
+ textwrap.dedent("""\
i : 'i'-scalar
x : 'i'-array(4)
a : 'f'-array(2,3)
b : 'f'-array(-1,-1), not allocated\x00
foo()\n
- Wrapper for ``foo``.\n\n''')
- )
+ Wrapper for ``foo``.\n\n"""),
+ )
diff --git a/numpy/f2py/tests/test_parameter.py b/numpy/f2py/tests/test_parameter.py
index b61827169..4ea102e84 100644
--- a/numpy/f2py/tests/test_parameter.py
+++ b/numpy/f2py/tests/test_parameter.py
@@ -7,17 +7,14 @@ from numpy.testing import assert_raises, assert_equal
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestParameters(util.F2PyTest):
# Check that intent(in out) translates as intent(inout)
- sources = [_path('src', 'parameter', 'constant_real.f90'),
- _path('src', 'parameter', 'constant_integer.f90'),
- _path('src', 'parameter', 'constant_both.f90'),
- _path('src', 'parameter', 'constant_compound.f90'),
- _path('src', 'parameter', 'constant_non_compound.f90'),
+ sources = [
+ util.getpath("tests", "src", "parameter", "constant_real.f90"),
+ util.getpath("tests", "src", "parameter", "constant_integer.f90"),
+ util.getpath("tests", "src", "parameter", "constant_both.f90"),
+ util.getpath("tests", "src", "parameter", "constant_compound.f90"),
+ util.getpath("tests", "src", "parameter", "constant_non_compound.f90"),
]
@pytest.mark.slow
@@ -29,7 +26,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.float32)
self.module.foo_single(x)
- assert_equal(x, [0 + 1 + 2*3, 1, 2])
+ assert_equal(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_real_double(self):
@@ -40,7 +37,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_double(x)
- assert_equal(x, [0 + 1 + 2*3, 1, 2])
+ assert_equal(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_compound_int(self):
@@ -51,14 +48,14 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_compound_int(x)
- assert_equal(x, [0 + 1 + 2*6, 1, 2])
+ assert_equal(x, [0 + 1 + 2 * 6, 1, 2])
@pytest.mark.slow
def test_constant_non_compound_int(self):
# check values
x = np.arange(4, dtype=np.int32)
self.module.foo_non_compound_int(x)
- assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3])
+ assert_equal(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3])
@pytest.mark.slow
def test_constant_integer_int(self):
@@ -69,7 +66,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_int(x)
- assert_equal(x, [0 + 1 + 2*3, 1, 2])
+ assert_equal(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_integer_long(self):
@@ -80,7 +77,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.int64)
self.module.foo_long(x)
- assert_equal(x, [0 + 1 + 2*3, 1, 2])
+ assert_equal(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_both(self):
@@ -91,7 +88,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo(x)
- assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
+ assert_equal(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
@pytest.mark.slow
def test_constant_no(self):
@@ -102,7 +99,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_no(x)
- assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
+ assert_equal(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
@pytest.mark.slow
def test_constant_sum(self):
@@ -113,4 +110,4 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_sum(x)
- assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
+ assert_equal(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
diff --git a/numpy/f2py/tests/test_quoted_character.py b/numpy/f2py/tests/test_quoted_character.py
index 20c77666c..efb9ad08b 100644
--- a/numpy/f2py/tests/test_quoted_character.py
+++ b/numpy/f2py/tests/test_quoted_character.py
@@ -26,7 +26,7 @@ Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6
END
"""
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_quoted_character(self):
- assert_equal(self.module.foo(), (b"'", b'"', b';', b'!', b'(', b')'))
+ assert_equal(self.module.foo(), (b"'", b'"', b";", b"!", b"(", b")"))
diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py
index b91499e4a..682b9e98c 100644
--- a/numpy/f2py/tests/test_regression.py
+++ b/numpy/f2py/tests/test_regression.py
@@ -7,13 +7,9 @@ from numpy.testing import assert_, assert_raises, assert_equal, assert_string_eq
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestIntentInOut(util.F2PyTest):
# Check that intent(in out) translates as intent(inout)
- sources = [_path('src', 'regression', 'inout.f90')]
+ sources = [util.getpath("tests", "src", "regression", "inout.f90")]
@pytest.mark.slow
def test_inout(self):
@@ -30,18 +26,22 @@ class TestIntentInOut(util.F2PyTest):
class TestNumpyVersionAttribute(util.F2PyTest):
# Check that th attribute __f2py_numpy_version__ is present
# in the compiled module and that has the value np.__version__.
- sources = [_path('src', 'regression', 'inout.f90')]
+ sources = [util.getpath("tests", "src", "regression", "inout.f90")]
@pytest.mark.slow
def test_numpy_version_attribute(self):
# Check that self.module has an attribute named "__f2py_numpy_version__"
- assert_(hasattr(self.module, "__f2py_numpy_version__"),
- msg="Fortran module does not have __f2py_numpy_version__")
+ assert_(
+ hasattr(self.module, "__f2py_numpy_version__"),
+ msg="Fortran module does not have __f2py_numpy_version__",
+ )
# Check that the attribute __f2py_numpy_version__ is a string
- assert_(isinstance(self.module.__f2py_numpy_version__, str),
- msg="__f2py_numpy_version__ is not a string")
+ assert_(
+ isinstance(self.module.__f2py_numpy_version__, str),
+ msg="__f2py_numpy_version__ is not a string",
+ )
# Check that __f2py_numpy_version__ has the value numpy.__version__
assert_string_equal(np.__version__, self.module.__f2py_numpy_version__)
@@ -50,6 +50,5 @@ class TestNumpyVersionAttribute(util.F2PyTest):
def test_include_path():
incdir = np.f2py.get_include()
fnames_in_dir = os.listdir(incdir)
- for fname in ('fortranobject.c', 'fortranobject.h'):
+ for fname in ("fortranobject.c", "fortranobject.h"):
assert fname in fnames_in_dir
-
diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py
index 2c999ed0b..3c3a43e1b 100644
--- a/numpy/f2py/tests/test_return_character.py
+++ b/numpy/f2py/tests/test_return_character.py
@@ -4,29 +4,29 @@ from numpy import array
from numpy.testing import assert_
from . import util
import platform
-IS_S390X = platform.machine() == 's390x'
+IS_S390X = platform.machine() == "s390x"
-class TestReturnCharacter(util.F2PyTest):
+class TestReturnCharacter(util.F2PyTest):
def check_function(self, t, tname):
- if tname in ['t0', 't1', 's0', 's1']:
- assert_(t(23) == b'2')
- r = t('ab')
- assert_(r == b'a', repr(r))
- r = t(array('ab'))
- assert_(r == b'a', repr(r))
- r = t(array(77, 'u1'))
- assert_(r == b'M', repr(r))
- #assert_(_raises(ValueError, t, array([77,87])))
- #assert_(_raises(ValueError, t, array(77)))
- elif tname in ['ts', 'ss']:
- assert_(t(23) == b'23', repr(t(23)))
- assert_(t('123456789abcdef') == b'123456789a')
- elif tname in ['t5', 's5']:
- assert_(t(23) == b'23', repr(t(23)))
- assert_(t('ab') == b'ab', repr(t('ab')))
- assert_(t('123456789abcdef') == b'12345')
+ if tname in ["t0", "t1", "s0", "s1"]:
+ assert_(t(23) == b"2")
+ r = t("ab")
+ assert_(r == b"a", repr(r))
+ r = t(array("ab"))
+ assert_(r == b"a", repr(r))
+ r = t(array(77, "u1"))
+ assert_(r == b"M", repr(r))
+ # assert_(_raises(ValueError, t, array([77,87])))
+ # assert_(_raises(ValueError, t, array(77)))
+ elif tname in ["ts", "ss"]:
+ assert_(t(23) == b"23", repr(t(23)))
+ assert_(t("123456789abcdef") == b"123456789a")
+ elif tname in ["t5", "s5"]:
+ assert_(t(23) == b"23", repr(t(23)))
+ assert_(t("ab") == b"ab", repr(t("ab")))
+ assert_(t("123456789abcdef") == b"12345")
else:
raise NotImplementedError
@@ -81,7 +81,7 @@ cf2py intent(out) ts
"""
@pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
- @pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(','))
+ @pytest.mark.parametrize("name", "t0,t1,t5,s0,s1,s5,ss".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -140,6 +140,6 @@ end module f90_return_char
"""
@pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
- @pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(','))
+ @pytest.mark.parametrize("name", "t0,t1,t5,ts,s0,s1,s5,ss".split(","))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_char, name), name)
diff --git a/numpy/f2py/tests/test_return_complex.py b/numpy/f2py/tests/test_return_complex.py
index 3d2e2b94f..ae0e3ab25 100644
--- a/numpy/f2py/tests/test_return_complex.py
+++ b/numpy/f2py/tests/test_return_complex.py
@@ -6,9 +6,8 @@ from . import util
class TestReturnComplex(util.F2PyTest):
-
def check_function(self, t, tname):
- if tname in ['t0', 't8', 's0', 's8']:
+ if tname in ["t0", "t8", "s0", "s8"]:
err = 1e-5
else:
err = 0.0
@@ -16,27 +15,27 @@ class TestReturnComplex(util.F2PyTest):
assert_(abs(t(234.6) - 234.6) <= err)
assert_(abs(t(234) - 234.0) <= err)
assert_(abs(t(234.6 + 3j) - (234.6 + 3j)) <= err)
- #assert_( abs(t('234')-234.)<=err)
- #assert_( abs(t('234.6')-234.6)<=err)
- assert_(abs(t(-234) + 234.) <= err)
- assert_(abs(t([234]) - 234.) <= err)
- assert_(abs(t((234,)) - 234.) <= err)
- assert_(abs(t(array(234)) - 234.) <= err)
- assert_(abs(t(array(23 + 4j, 'F')) - (23 + 4j)) <= err)
- assert_(abs(t(array([234])) - 234.) <= err)
- assert_(abs(t(array([[234]])) - 234.) <= err)
- assert_(abs(t(array([234], 'b')) + 22.) <= err)
- assert_(abs(t(array([234], 'h')) - 234.) <= err)
- assert_(abs(t(array([234], 'i')) - 234.) <= err)
- assert_(abs(t(array([234], 'l')) - 234.) <= err)
- assert_(abs(t(array([234], 'q')) - 234.) <= err)
- assert_(abs(t(array([234], 'f')) - 234.) <= err)
- assert_(abs(t(array([234], 'd')) - 234.) <= err)
- assert_(abs(t(array([234 + 3j], 'F')) - (234 + 3j)) <= err)
- assert_(abs(t(array([234], 'D')) - 234.) <= err)
-
- #assert_raises(TypeError, t, array([234], 'a1'))
- assert_raises(TypeError, t, 'abc')
+ # assert_( abs(t('234')-234.)<=err)
+ # assert_( abs(t('234.6')-234.6)<=err)
+ assert_(abs(t(-234) + 234.0) <= err)
+ assert_(abs(t([234]) - 234.0) <= err)
+ assert_(abs(t((234, )) - 234.0) <= err)
+ assert_(abs(t(array(234)) - 234.0) <= err)
+ assert_(abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err)
+ assert_(abs(t(array([234])) - 234.0) <= err)
+ assert_(abs(t(array([[234]])) - 234.0) <= err)
+ assert_(abs(t(array([234], "b")) + 22.0) <= err)
+ assert_(abs(t(array([234], "h")) - 234.0) <= err)
+ assert_(abs(t(array([234], "i")) - 234.0) <= err)
+ assert_(abs(t(array([234], "l")) - 234.0) <= err)
+ assert_(abs(t(array([234], "q")) - 234.0) <= err)
+ assert_(abs(t(array([234], "f")) - 234.0) <= err)
+ assert_(abs(t(array([234], "d")) - 234.0) <= err)
+ assert_(abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err)
+ assert_(abs(t(array([234], "D")) - 234.0) <= err)
+
+ # assert_raises(TypeError, t, array([234], 'a1'))
+ assert_raises(TypeError, t, "abc")
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
@@ -45,8 +44,8 @@ class TestReturnComplex(util.F2PyTest):
assert_raises(TypeError, t, {})
try:
- r = t(10 ** 400)
- assert_(repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r))
+ r = t(10**400)
+ assert_(repr(r) in ["(inf+0j)", "(Infinity+0j)"], repr(r))
except OverflowError:
pass
@@ -100,7 +99,7 @@ cf2py intent(out) td
end
"""
- @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
+ @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -158,6 +157,7 @@ module f90_return_complex
end module f90_return_complex
"""
- @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
+ @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(","))
def test_all(self, name):
- self.check_function(getattr(self.module.f90_return_complex, name), name)
+ self.check_function(getattr(self.module.f90_return_complex, name),
+ name)
diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py
index 0a8121dc1..9c2bdbce2 100644
--- a/numpy/f2py/tests/test_return_integer.py
+++ b/numpy/f2py/tests/test_return_integer.py
@@ -6,27 +6,26 @@ from . import util
class TestReturnInteger(util.F2PyTest):
-
def check_function(self, t, tname):
assert_(t(123) == 123, repr(t(123)))
assert_(t(123.6) == 123)
- assert_(t('123') == 123)
+ assert_(t("123") == 123)
assert_(t(-123) == -123)
assert_(t([123]) == 123)
- assert_(t((123,)) == 123)
+ assert_(t((123, )) == 123)
assert_(t(array(123)) == 123)
assert_(t(array([123])) == 123)
assert_(t(array([[123]])) == 123)
- assert_(t(array([123], 'b')) == 123)
- assert_(t(array([123], 'h')) == 123)
- assert_(t(array([123], 'i')) == 123)
- assert_(t(array([123], 'l')) == 123)
- assert_(t(array([123], 'B')) == 123)
- assert_(t(array([123], 'f')) == 123)
- assert_(t(array([123], 'd')) == 123)
+ assert_(t(array([123], "b")) == 123)
+ assert_(t(array([123], "h")) == 123)
+ assert_(t(array([123], "i")) == 123)
+ assert_(t(array([123], "l")) == 123)
+ assert_(t(array([123], "B")) == 123)
+ assert_(t(array([123], "f")) == 123)
+ assert_(t(array([123], "d")) == 123)
- #assert_raises(ValueError, t, array([123],'S3'))
- assert_raises(ValueError, t, 'abc')
+ # assert_raises(ValueError, t, array([123],'S3'))
+ assert_raises(ValueError, t, "abc")
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
@@ -34,7 +33,7 @@ class TestReturnInteger(util.F2PyTest):
assert_raises(Exception, t, t)
assert_raises(Exception, t, {})
- if tname in ['t8', 's8']:
+ if tname in ["t8", "s8"]:
assert_raises(OverflowError, t, 100000000000000000000000)
assert_raises(OverflowError, t, 10000000011111111111111.23)
@@ -99,8 +98,8 @@ cf2py intent(out) t8
end
"""
- @pytest.mark.parametrize('name',
- 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
+ @pytest.mark.parametrize("name",
+ "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -169,7 +168,8 @@ module f90_return_integer
end module f90_return_integer
"""
- @pytest.mark.parametrize('name',
- 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
+ @pytest.mark.parametrize("name",
+ "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
def test_all(self, name):
- self.check_function(getattr(self.module.f90_return_integer, name), name)
+ self.check_function(getattr(self.module.f90_return_integer, name),
+ name)
diff --git a/numpy/f2py/tests/test_return_logical.py b/numpy/f2py/tests/test_return_logical.py
index 9db939c7e..c1a365c7a 100644
--- a/numpy/f2py/tests/test_return_logical.py
+++ b/numpy/f2py/tests/test_return_logical.py
@@ -6,7 +6,6 @@ from . import util
class TestReturnLogical(util.F2PyTest):
-
def check_function(self, t):
assert_(t(True) == 1, repr(t(True)))
assert_(t(False) == 0, repr(t(False)))
@@ -18,28 +17,28 @@ class TestReturnLogical(util.F2PyTest):
assert_(t(234) == 1)
assert_(t(234.6) == 1)
assert_(t(234.6 + 3j) == 1)
- assert_(t('234') == 1)
- assert_(t('aaa') == 1)
- assert_(t('') == 0)
+ assert_(t("234") == 1)
+ assert_(t("aaa") == 1)
+ assert_(t("") == 0)
assert_(t([]) == 0)
assert_(t(()) == 0)
assert_(t({}) == 0)
assert_(t(t) == 1)
assert_(t(-234) == 1)
- assert_(t(10 ** 100) == 1)
+ assert_(t(10**100) == 1)
assert_(t([234]) == 1)
- assert_(t((234,)) == 1)
+ assert_(t((234, )) == 1)
assert_(t(array(234)) == 1)
assert_(t(array([234])) == 1)
assert_(t(array([[234]])) == 1)
- assert_(t(array([234], 'b')) == 1)
- assert_(t(array([234], 'h')) == 1)
- assert_(t(array([234], 'i')) == 1)
- assert_(t(array([234], 'l')) == 1)
- assert_(t(array([234], 'f')) == 1)
- assert_(t(array([234], 'd')) == 1)
- assert_(t(array([234 + 3j], 'F')) == 1)
- assert_(t(array([234], 'D')) == 1)
+ assert_(t(array([234], "b")) == 1)
+ assert_(t(array([234], "h")) == 1)
+ assert_(t(array([234], "i")) == 1)
+ assert_(t(array([234], "l")) == 1)
+ assert_(t(array([234], "f")) == 1)
+ assert_(t(array([234], "d")) == 1)
+ assert_(t(array([234 + 3j], "F")) == 1)
+ assert_(t(array([234], "D")) == 1)
assert_(t(array(0)) == 0)
assert_(t(array([0])) == 0)
assert_(t(array([[0]])) == 0)
@@ -109,7 +108,7 @@ c end
"""
@pytest.mark.slow
- @pytest.mark.parametrize('name', 't0,t1,t2,t4,s0,s1,s2,s4'.split(','))
+ @pytest.mark.parametrize("name", "t0,t1,t2,t4,s0,s1,s2,s4".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name))
@@ -179,7 +178,7 @@ end module f90_return_logical
"""
@pytest.mark.slow
- @pytest.mark.parametrize('name',
- 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
+ @pytest.mark.parametrize("name",
+ "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_logical, name))
diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py
index 8e5022a8e..d5e5ee482 100644
--- a/numpy/f2py/tests/test_return_real.py
+++ b/numpy/f2py/tests/test_return_real.py
@@ -7,34 +7,33 @@ from . import util
class TestReturnReal(util.F2PyTest):
-
def check_function(self, t, tname):
- if tname in ['t0', 't4', 's0', 's4']:
+ if tname in ["t0", "t4", "s0", "s4"]:
err = 1e-5
else:
err = 0.0
assert_(abs(t(234) - 234.0) <= err)
assert_(abs(t(234.6) - 234.6) <= err)
- assert_(abs(t('234') - 234) <= err)
- assert_(abs(t('234.6') - 234.6) <= err)
+ assert_(abs(t("234") - 234) <= err)
+ assert_(abs(t("234.6") - 234.6) <= err)
assert_(abs(t(-234) + 234) <= err)
assert_(abs(t([234]) - 234) <= err)
- assert_(abs(t((234,)) - 234.) <= err)
- assert_(abs(t(array(234)) - 234.) <= err)
- assert_(abs(t(array([234])) - 234.) <= err)
- assert_(abs(t(array([[234]])) - 234.) <= err)
- assert_(abs(t(array([234], 'b')) + 22) <= err)
- assert_(abs(t(array([234], 'h')) - 234.) <= err)
- assert_(abs(t(array([234], 'i')) - 234.) <= err)
- assert_(abs(t(array([234], 'l')) - 234.) <= err)
- assert_(abs(t(array([234], 'B')) - 234.) <= err)
- assert_(abs(t(array([234], 'f')) - 234.) <= err)
- assert_(abs(t(array([234], 'd')) - 234.) <= err)
- if tname in ['t0', 't4', 's0', 's4']:
+ assert_(abs(t((234, )) - 234.0) <= err)
+ assert_(abs(t(array(234)) - 234.0) <= err)
+ assert_(abs(t(array([234])) - 234.0) <= err)
+ assert_(abs(t(array([[234]])) - 234.0) <= err)
+ assert_(abs(t(array([234], "b")) + 22) <= err)
+ assert_(abs(t(array([234], "h")) - 234.0) <= err)
+ assert_(abs(t(array([234], "i")) - 234.0) <= err)
+ assert_(abs(t(array([234], "l")) - 234.0) <= err)
+ assert_(abs(t(array([234], "B")) - 234.0) <= err)
+ assert_(abs(t(array([234], "f")) - 234.0) <= err)
+ assert_(abs(t(array([234], "d")) - 234.0) <= err)
+ if tname in ["t0", "t4", "s0", "s4"]:
assert_(t(1e200) == t(1e300)) # inf
- #assert_raises(ValueError, t, array([234], 'S1'))
- assert_raises(ValueError, t, 'abc')
+ # assert_raises(ValueError, t, array([234], 'S1'))
+ assert_raises(ValueError, t, "abc")
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
@@ -43,17 +42,17 @@ class TestReturnReal(util.F2PyTest):
assert_raises(Exception, t, {})
try:
- r = t(10 ** 400)
- assert_(repr(r) in ['inf', 'Infinity'], repr(r))
+ r = t(10**400)
+ assert_(repr(r) in ["inf", "Infinity"], repr(r))
except OverflowError:
pass
-
@pytest.mark.skipif(
- platform.system() == 'Darwin',
+ platform.system() == "Darwin",
reason="Prone to error when run with numpy/f2py/tests on mac os, "
- "but not when run in isolation")
+ "but not when run in isolation",
+)
class TestCReturnReal(TestReturnReal):
suffix = ".pyf"
module_name = "c_ext_return_real"
@@ -86,7 +85,7 @@ end interface
end python module c_ext_return_real
"""
- @pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(','))
+ @pytest.mark.parametrize("name", "t4,t8,s4,s8".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -140,7 +139,7 @@ cf2py intent(out) td
end
"""
- @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
+ @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -198,6 +197,6 @@ module f90_return_real
end module f90_return_real
"""
- @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
+ @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(","))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_real, name), name)
diff --git a/numpy/f2py/tests/test_semicolon_split.py b/numpy/f2py/tests/test_semicolon_split.py
index d8b4bf222..745c472f8 100644
--- a/numpy/f2py/tests/test_semicolon_split.py
+++ b/numpy/f2py/tests/test_semicolon_split.py
@@ -4,15 +4,17 @@ import pytest
from . import util
from numpy.testing import assert_equal
+
@pytest.mark.skipif(
- platform.system() == 'Darwin',
+ platform.system() == "Darwin",
reason="Prone to error when run with numpy/f2py/tests on mac os, "
- "but not when run in isolation")
+ "but not when run in isolation",
+)
class TestMultiline(util.F2PyTest):
suffix = ".pyf"
module_name = "multiline"
- code = """
-python module {module}
+ code = f"""
+python module {module_name}
usercode '''
void foo(int* x) {{
char dummy = ';';
@@ -25,22 +27,23 @@ void foo(int* x) {{
integer intent(out) :: x
end subroutine foo
end interface
-end python module {module}
- """.format(module=module_name)
+end python module {module_name}
+ """
def test_multiline(self):
assert_equal(self.module.foo(), 42)
@pytest.mark.skipif(
- platform.system() == 'Darwin',
+ platform.system() == "Darwin",
reason="Prone to error when run with numpy/f2py/tests on mac os, "
- "but not when run in isolation")
+ "but not when run in isolation",
+)
class TestCallstatement(util.F2PyTest):
suffix = ".pyf"
module_name = "callstatement"
- code = """
-python module {module}
+ code = f"""
+python module {module_name}
usercode '''
void foo(int* x) {{
}}
@@ -56,8 +59,8 @@ void foo(int* x) {{
}}
end subroutine foo
end interface
-end python module {module}
- """.format(module=module_name)
+end python module {module_name}
+ """
def test_callstatement(self):
assert_equal(self.module.foo(), 42)
diff --git a/numpy/f2py/tests/test_size.py b/numpy/f2py/tests/test_size.py
index b609fa77f..3360e2a3d 100644
--- a/numpy/f2py/tests/test_size.py
+++ b/numpy/f2py/tests/test_size.py
@@ -5,12 +5,8 @@ from numpy.testing import assert_equal
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestSizeSumExample(util.F2PyTest):
- sources = [_path('src', 'size', 'foo.f90')]
+ sources = [util.getpath("tests", "src", "size", "foo.f90")]
@pytest.mark.slow
def test_all(self):
diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py
index 7b27f8786..1a6d59610 100644
--- a/numpy/f2py/tests/test_string.py
+++ b/numpy/f2py/tests/test_string.py
@@ -6,26 +6,22 @@ import numpy as np
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestString(util.F2PyTest):
- sources = [_path('src', 'string', 'char.f90')]
+ sources = [util.getpath("tests", "src", "string", "char.f90")]
@pytest.mark.slow
def test_char(self):
- strings = np.array(['ab', 'cd', 'ef'], dtype='c').T
- inp, out = self.module.char_test.change_strings(strings,
- strings.shape[1])
+ strings = np.array(["ab", "cd", "ef"], dtype="c").T
+ inp, out = self.module.char_test.change_strings(
+ strings, strings.shape[1])
assert_array_equal(inp, strings)
expected = strings.copy()
- expected[1, :] = 'AAA'
+ expected[1, :] = "AAA"
assert_array_equal(out, expected)
class TestDocStringArguments(util.F2PyTest):
- suffix = '.f'
+ suffix = ".f"
code = """
C FILE: STRING.F
@@ -34,39 +30,30 @@ C FILE: STRING.F
CHARACTER*(*) C,D
Cf2py intent(in) a,c
Cf2py intent(inout) b,d
- PRINT*, "A=",A
- PRINT*, "B=",B
- PRINT*, "C=",C
- PRINT*, "D=",D
- PRINT*, "CHANGE A,B,C,D"
A(1:1) = 'A'
B(1:1) = 'B'
C(1:1) = 'C'
D(1:1) = 'D'
- PRINT*, "A=",A
- PRINT*, "B=",B
- PRINT*, "C=",C
- PRINT*, "D=",D
END
C END OF FILE STRING.F
"""
def test_example(self):
- a = np.array(b'123\0\0')
- b = np.array(b'123\0\0')
- c = np.array(b'123')
- d = np.array(b'123')
+ a = np.array(b"123\0\0")
+ b = np.array(b"123\0\0")
+ c = np.array(b"123")
+ d = np.array(b"123")
self.module.foo(a, b, c, d)
- assert a.tobytes() == b'123\0\0'
- assert b.tobytes() == b'B23\0\0', (b.tobytes(),)
- assert c.tobytes() == b'123'
- assert d.tobytes() == b'D23'
+ assert a.tobytes() == b"123\0\0"
+ assert b.tobytes() == b"B23\0\0"
+ assert c.tobytes() == b"123"
+ assert d.tobytes() == b"D23"
class TestFixedString(util.F2PyTest):
- suffix = '.f90'
+ suffix = ".f90"
code = textwrap.dedent("""
function sint(s) result(i)
@@ -122,41 +109,41 @@ class TestFixedString(util.F2PyTest):
end = len(s)
i = 0
for j in range(start, min(end, len(s))):
- i += s[j] * 10 ** j
+ i += s[j] * 10**j
return i
- def _get_input(self, intent='in'):
- if intent in ['in']:
- yield ''
- yield '1'
- yield '1234'
- yield '12345'
- yield b''
- yield b'\0'
- yield b'1'
- yield b'\01'
- yield b'1\0'
- yield b'1234'
- yield b'12345'
- yield np.ndarray((), np.bytes_, buffer=b'') # array(b'', dtype='|S0')
- yield np.array(b'') # array(b'', dtype='|S1')
- yield np.array(b'\0')
- yield np.array(b'1')
- yield np.array(b'1\0')
- yield np.array(b'\01')
- yield np.array(b'1234')
- yield np.array(b'123\0')
- yield np.array(b'12345')
+ def _get_input(self, intent="in"):
+ if intent in ["in"]:
+ yield ""
+ yield "1"
+ yield "1234"
+ yield "12345"
+ yield b""
+ yield b"\0"
+ yield b"1"
+ yield b"\01"
+ yield b"1\0"
+ yield b"1234"
+ yield b"12345"
+ yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0')
+ yield np.array(b"") # array(b'', dtype='|S1')
+ yield np.array(b"\0")
+ yield np.array(b"1")
+ yield np.array(b"1\0")
+ yield np.array(b"\01")
+ yield np.array(b"1234")
+ yield np.array(b"123\0")
+ yield np.array(b"12345")
def test_intent_in(self):
for s in self._get_input():
r = self.module.test_in_bytes4(s)
# also checks that s is not changed inplace
expected = self._sint(s, end=4)
- assert r == expected, (s)
+ assert r == expected, s
def test_intent_inout(self):
- for s in self._get_input(intent='inout'):
+ for s in self._get_input(intent="inout"):
rest = self._sint(s, start=4)
r = self.module.test_inout_bytes4(s)
expected = self._sint(s, end=4)
diff --git a/numpy/f2py/tests/test_symbolic.py b/numpy/f2py/tests/test_symbolic.py
index 52cabac53..4b8993886 100644
--- a/numpy/f2py/tests/test_symbolic.py
+++ b/numpy/f2py/tests/test_symbolic.py
@@ -1,35 +1,55 @@
from numpy.testing import assert_raises
from numpy.f2py.symbolic import (
- Expr, Op, ArithOp, Language,
- as_symbol, as_number, as_string, as_array, as_complex,
- as_terms, as_factors, eliminate_quotes, insert_quotes,
- fromstring, as_expr, as_apply,
- as_numer_denom, as_ternary, as_ref, as_deref,
- normalize, as_eq, as_ne, as_lt, as_gt, as_le, as_ge
- )
+ Expr,
+ Op,
+ ArithOp,
+ Language,
+ as_symbol,
+ as_number,
+ as_string,
+ as_array,
+ as_complex,
+ as_terms,
+ as_factors,
+ eliminate_quotes,
+ insert_quotes,
+ fromstring,
+ as_expr,
+ as_apply,
+ as_numer_denom,
+ as_ternary,
+ as_ref,
+ as_deref,
+ normalize,
+ as_eq,
+ as_ne,
+ as_lt,
+ as_gt,
+ as_le,
+ as_ge,
+)
from . import util
class TestSymbolic(util.F2PyTest):
-
def test_eliminate_quotes(self):
def worker(s):
r, d = eliminate_quotes(s)
s1 = insert_quotes(r, d)
assert s1 == s
- for kind in ['', 'mykind_']:
+ for kind in ["", "mykind_"]:
worker(kind + '"1234" // "ABCD"')
worker(kind + '"1234" // ' + kind + '"ABCD"')
- worker(kind + '"1234" // \'ABCD\'')
- worker(kind + '"1234" // ' + kind + '\'ABCD\'')
+ worker(kind + "\"1234\" // 'ABCD'")
+ worker(kind + '"1234" // ' + kind + "'ABCD'")
worker(kind + '"1\\"2\'AB\'34"')
- worker('a = ' + kind + "'1\\'2\"AB\"34'")
+ worker("a = " + kind + "'1\\'2\"AB\"34'")
def test_sanity(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
assert x.op == Op.SYMBOL
assert repr(x) == "Expr(Op.SYMBOL, 'x')"
@@ -70,7 +90,7 @@ class TestSymbolic(util.F2PyTest):
assert s != s2
a = as_array((n, m))
- b = as_array((n,))
+ b = as_array((n, ))
assert a.op == Op.ARRAY
assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4)),"
" Expr(Op.INTEGER, (456, 4))))")
@@ -108,88 +128,90 @@ class TestSymbolic(util.F2PyTest):
assert hash(e) is not None
def test_tostring_fortran(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
n = as_number(123)
m = as_number(456)
a = as_array((n, m))
c = as_complex(n, m)
- assert str(x) == 'x'
- assert str(n) == '123'
- assert str(a) == '[123, 456]'
- assert str(c) == '(123, 456)'
-
- assert str(Expr(Op.TERMS, {x: 1})) == 'x'
- assert str(Expr(Op.TERMS, {x: 2})) == '2 * x'
- assert str(Expr(Op.TERMS, {x: -1})) == '-x'
- assert str(Expr(Op.TERMS, {x: -2})) == '-2 * x'
- assert str(Expr(Op.TERMS, {x: 1, y: 1})) == 'x + y'
- assert str(Expr(Op.TERMS, {x: -1, y: -1})) == '-x - y'
- assert str(Expr(Op.TERMS, {x: 2, y: 3})) == '2 * x + 3 * y'
- assert str(Expr(Op.TERMS, {x: -2, y: 3})) == '-2 * x + 3 * y'
- assert str(Expr(Op.TERMS, {x: 2, y: -3})) == '2 * x - 3 * y'
-
- assert str(Expr(Op.FACTORS, {x: 1})) == 'x'
- assert str(Expr(Op.FACTORS, {x: 2})) == 'x ** 2'
- assert str(Expr(Op.FACTORS, {x: -1})) == 'x ** -1'
- assert str(Expr(Op.FACTORS, {x: -2})) == 'x ** -2'
- assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == 'x * y'
- assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == 'x ** 2 * y ** 3'
+ assert str(x) == "x"
+ assert str(n) == "123"
+ assert str(a) == "[123, 456]"
+ assert str(c) == "(123, 456)"
+
+ assert str(Expr(Op.TERMS, {x: 1})) == "x"
+ assert str(Expr(Op.TERMS, {x: 2})) == "2 * x"
+ assert str(Expr(Op.TERMS, {x: -1})) == "-x"
+ assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x"
+ assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y"
+ assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y"
+ assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y"
+ assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y"
+ assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y"
+
+ assert str(Expr(Op.FACTORS, {x: 1})) == "x"
+ assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2"
+ assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1"
+ assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2"
+ assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y"
+ assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3"
v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3})
- assert str(v) == 'x ** 2 * (x + y) ** 3', str(v)
+ assert str(v) == "x ** 2 * (x + y) ** 3", str(v)
v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3})
- assert str(v) == 'x ** 2 * (x * y) ** 3', str(v)
+ assert str(v) == "x ** 2 * (x * y) ** 3", str(v)
- assert str(Expr(Op.APPLY, ('f', (), {}))) == 'f()'
- assert str(Expr(Op.APPLY, ('f', (x,), {}))) == 'f(x)'
- assert str(Expr(Op.APPLY, ('f', (x, y), {}))) == 'f(x, y)'
- assert str(Expr(Op.INDEXING, ('f', x))) == 'f[x]'
+ assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()"
+ assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)"
+ assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)"
+ assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]"
- assert str(as_ternary(x, y, z)) == 'merge(y, z, x)'
- assert str(as_eq(x, y)) == 'x .eq. y'
- assert str(as_ne(x, y)) == 'x .ne. y'
- assert str(as_lt(x, y)) == 'x .lt. y'
- assert str(as_le(x, y)) == 'x .le. y'
- assert str(as_gt(x, y)) == 'x .gt. y'
- assert str(as_ge(x, y)) == 'x .ge. y'
+ assert str(as_ternary(x, y, z)) == "merge(y, z, x)"
+ assert str(as_eq(x, y)) == "x .eq. y"
+ assert str(as_ne(x, y)) == "x .ne. y"
+ assert str(as_lt(x, y)) == "x .lt. y"
+ assert str(as_le(x, y)) == "x .le. y"
+ assert str(as_gt(x, y)) == "x .gt. y"
+ assert str(as_ge(x, y)) == "x .ge. y"
def test_tostring_c(self):
language = Language.C
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
n = as_number(123)
- assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == 'x * x'
- assert Expr(Op.FACTORS, {x + y: 2}).tostring(
- language=language) == '(x + y) * (x + y)'
- assert Expr(Op.FACTORS, {x: 12}).tostring(
- language=language) == 'pow(x, 12)'
-
- assert as_apply(ArithOp.DIV, x, y).tostring(
- language=language) == 'x / y'
- assert as_apply(ArithOp.DIV, x, x + y).tostring(
- language=language) == 'x / (x + y)'
- assert as_apply(ArithOp.DIV, x - y, x + y).tostring(
- language=language) == '(x - y) / (x + y)'
- assert (x + (x - y) / (x + y) + n).tostring(
- language=language) == '123 + x + (x - y) / (x + y)'
-
- assert as_ternary(x, y, z).tostring(language=language) == '(x ? y : z)'
- assert as_eq(x, y).tostring(language=language) == 'x == y'
- assert as_ne(x, y).tostring(language=language) == 'x != y'
- assert as_lt(x, y).tostring(language=language) == 'x < y'
- assert as_le(x, y).tostring(language=language) == 'x <= y'
- assert as_gt(x, y).tostring(language=language) == 'x > y'
- assert as_ge(x, y).tostring(language=language) == 'x >= y'
+ assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x"
+ assert (Expr(Op.FACTORS, {
+ x + y: 2
+ }).tostring(language=language) == "(x + y) * (x + y)")
+ assert Expr(Op.FACTORS, {
+ x: 12
+ }).tostring(language=language) == "pow(x, 12)"
+
+ assert as_apply(ArithOp.DIV, x,
+ y).tostring(language=language) == "x / y"
+ assert (as_apply(ArithOp.DIV, x,
+ x + y).tostring(language=language) == "x / (x + y)")
+ assert (as_apply(ArithOp.DIV, x - y, x +
+ y).tostring(language=language) == "(x - y) / (x + y)")
+ assert (x + (x - y) / (x + y) +
+ n).tostring(language=language) == "123 + x + (x - y) / (x + y)"
+
+ assert as_ternary(x, y, z).tostring(language=language) == "(x ? y : z)"
+ assert as_eq(x, y).tostring(language=language) == "x == y"
+ assert as_ne(x, y).tostring(language=language) == "x != y"
+ assert as_lt(x, y).tostring(language=language) == "x < y"
+ assert as_le(x, y).tostring(language=language) == "x <= y"
+ assert as_gt(x, y).tostring(language=language) == "x > y"
+ assert as_ge(x, y).tostring(language=language) == "x >= y"
def test_operations(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
assert x + x == Expr(Op.TERMS, {x: 2})
assert x - x == Expr(Op.INTEGER, (0, 4))
@@ -205,28 +227,35 @@ class TestSymbolic(util.F2PyTest):
assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3})
assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2})
- assert x ** 2 == Expr(Op.FACTORS, {x: 2})
- assert (x + y) ** 2 == Expr(Op.TERMS,
- {Expr(Op.FACTORS, {x: 2}): 1,
- Expr(Op.FACTORS, {y: 2}): 1,
- Expr(Op.FACTORS, {x: 1, y: 1}): 2})
- assert (x + y) * x == x ** 2 + x * y
- assert (x + y) ** 2 == x ** 2 + 2 * x * y + y ** 2
- assert (x + y) ** 2 + (x - y) ** 2 == 2 * x ** 2 + 2 * y ** 2
+ assert x**2 == Expr(Op.FACTORS, {x: 2})
+ assert (x + y)**2 == Expr(
+ Op.TERMS,
+ {
+ Expr(Op.FACTORS, {x: 2}): 1,
+ Expr(Op.FACTORS, {y: 2}): 1,
+ Expr(Op.FACTORS, {
+ x: 1,
+ y: 1
+ }): 2,
+ },
+ )
+ assert (x + y) * x == x**2 + x * y
+ assert (x + y)**2 == x**2 + 2 * x * y + y**2
+ assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2
assert (x + y) * z == x * z + y * z
assert z * (x + y) == x * z + y * z
assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2))
assert (2 * x / 2) == x
- assert (3 * x / 2) == as_apply(ArithOp.DIV, 3*x, as_number(2))
+ assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2))
assert (4 * x / 2) == 2 * x
- assert (5 * x / 2) == as_apply(ArithOp.DIV, 5*x, as_number(2))
+ assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2))
assert (6 * x / 2) == 3 * x
- assert ((3*5) * x / 6) == as_apply(ArithOp.DIV, 5*x, as_number(2))
- assert (30*x**2*y**4 / (24*x**3*y**3)) == as_apply(ArithOp.DIV,
- 5*y, 4*x)
- assert ((15 * x / 6) / 5) == as_apply(
- ArithOp.DIV, x, as_number(2)), ((15 * x / 6) / 5)
+ assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2))
+ assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply(
+ ArithOp.DIV, 5 * y, 4 * x)
+ assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x,
+ as_number(2)), (15 * x / 6) / 5
assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5))
assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5})
@@ -238,127 +267,128 @@ class TestSymbolic(util.F2PyTest):
assert s // x == Expr(Op.CONCAT, (s, x))
assert x // s == Expr(Op.CONCAT, (x, s))
- c = as_complex(1., 2.)
- assert -c == as_complex(-1., -2.)
- assert c + c == as_expr((1+2j)*2)
- assert c * c == as_expr((1+2j)**2)
+ c = as_complex(1.0, 2.0)
+ assert -c == as_complex(-1.0, -2.0)
+ assert c + c == as_expr((1 + 2j) * 2)
+ assert c * c == as_expr((1 + 2j)**2)
def test_substitute(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
a = as_array((x, y))
assert x.substitute({x: y}) == y
assert (x + y).substitute({x: z}) == y + z
assert (x * y).substitute({x: z}) == y * z
- assert (x ** 4).substitute({x: z}) == z ** 4
+ assert (x**4).substitute({x: z}) == z**4
assert (x / y).substitute({x: z}) == z / y
assert x.substitute({x: y + z}) == y + z
assert a.substitute({x: y + z}) == as_array((y + z, y))
- assert as_ternary(x, y, z).substitute(
- {x: y + z}) == as_ternary(y + z, y, z)
- assert as_eq(x, y).substitute(
- {x: y + z}) == as_eq(y + z, y)
+ assert as_ternary(x, y,
+ z).substitute({x: y + z}) == as_ternary(y + z, y, z)
+ assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y)
def test_fromstring(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
- f = as_symbol('f')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+ f = as_symbol("f")
s = as_string('"ABC"')
t = as_string('"123"')
a = as_array((x, y))
- assert fromstring('x') == x
- assert fromstring('+ x') == x
- assert fromstring('- x') == -x
- assert fromstring('x + y') == x + y
- assert fromstring('x + 1') == x + 1
- assert fromstring('x * y') == x * y
- assert fromstring('x * 2') == x * 2
- assert fromstring('x / y') == x / y
- assert fromstring('x ** 2',
- language=Language.Python) == x ** 2
- assert fromstring('x ** 2 ** 3',
- language=Language.Python) == x ** 2 ** 3
- assert fromstring('(x + y) * z') == (x + y) * z
-
- assert fromstring('f(x)') == f(x)
- assert fromstring('f(x,y)') == f(x, y)
- assert fromstring('f[x]') == f[x]
- assert fromstring('f[x][y]') == f[x][y]
+ assert fromstring("x") == x
+ assert fromstring("+ x") == x
+ assert fromstring("- x") == -x
+ assert fromstring("x + y") == x + y
+ assert fromstring("x + 1") == x + 1
+ assert fromstring("x * y") == x * y
+ assert fromstring("x * 2") == x * 2
+ assert fromstring("x / y") == x / y
+ assert fromstring("x ** 2", language=Language.Python) == x**2
+ assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3
+ assert fromstring("(x + y) * z") == (x + y) * z
+
+ assert fromstring("f(x)") == f(x)
+ assert fromstring("f(x,y)") == f(x, y)
+ assert fromstring("f[x]") == f[x]
+ assert fromstring("f[x][y]") == f[x][y]
assert fromstring('"ABC"') == s
- assert normalize(fromstring('"ABC" // "123" ',
- language=Language.Fortran)) == s // t
+ assert (normalize(
+ fromstring('"ABC" // "123" ',
+ language=Language.Fortran)) == s // t)
assert fromstring('f("ABC")') == f(s)
- assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', 'MYSTRKIND')
-
- assert fromstring('(/x, y/)') == a, fromstring('(/x, y/)')
- assert fromstring('f((/x, y/))') == f(a)
- assert fromstring('(/(x+y)*z/)') == as_array(((x+y)*z,))
-
- assert fromstring('123') == as_number(123)
- assert fromstring('123_2') == as_number(123, 2)
- assert fromstring('123_myintkind') == as_number(123, 'myintkind')
-
- assert fromstring('123.0') == as_number(123.0, 4)
- assert fromstring('123.0_4') == as_number(123.0, 4)
- assert fromstring('123.0_8') == as_number(123.0, 8)
- assert fromstring('123.0e0') == as_number(123.0, 4)
- assert fromstring('123.0d0') == as_number(123.0, 8)
- assert fromstring('123d0') == as_number(123.0, 8)
- assert fromstring('123e-0') == as_number(123.0, 4)
- assert fromstring('123d+0') == as_number(123.0, 8)
- assert fromstring('123.0_myrealkind') == as_number(123.0, 'myrealkind')
- assert fromstring('3E4') == as_number(30000.0, 4)
-
- assert fromstring('(1, 2)') == as_complex(1, 2)
- assert fromstring('(1e2, PI)') == as_complex(
- as_number(100.0), as_symbol('PI'))
-
- assert fromstring('[1, 2]') == as_array((as_number(1), as_number(2)))
-
- assert fromstring('POINT(x, y=1)') == as_apply(
- as_symbol('POINT'), x, y=as_number(1))
- assert (fromstring('PERSON(name="John", age=50, shape=(/34, 23/))')
- == as_apply(as_symbol('PERSON'),
- name=as_string('"John"'),
- age=as_number(50),
- shape=as_array((as_number(34), as_number(23)))))
-
- assert fromstring('x?y:z') == as_ternary(x, y, z)
-
- assert fromstring('*x') == as_deref(x)
- assert fromstring('**x') == as_deref(as_deref(x))
- assert fromstring('&x') == as_ref(x)
- assert fromstring('(*x) * (*y)') == as_deref(x) * as_deref(y)
- assert fromstring('(*x) * *y') == as_deref(x) * as_deref(y)
- assert fromstring('*x * *y') == as_deref(x) * as_deref(y)
- assert fromstring('*x**y') == as_deref(x) * as_deref(y)
-
- assert fromstring('x == y') == as_eq(x, y)
- assert fromstring('x != y') == as_ne(x, y)
- assert fromstring('x < y') == as_lt(x, y)
- assert fromstring('x > y') == as_gt(x, y)
- assert fromstring('x <= y') == as_le(x, y)
- assert fromstring('x >= y') == as_ge(x, y)
-
- assert fromstring('x .eq. y', language=Language.Fortran) == as_eq(x, y)
- assert fromstring('x .ne. y', language=Language.Fortran) == as_ne(x, y)
- assert fromstring('x .lt. y', language=Language.Fortran) == as_lt(x, y)
- assert fromstring('x .gt. y', language=Language.Fortran) == as_gt(x, y)
- assert fromstring('x .le. y', language=Language.Fortran) == as_le(x, y)
- assert fromstring('x .ge. y', language=Language.Fortran) == as_ge(x, y)
+ assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND")
+
+ assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)")
+ assert fromstring("f((/x, y/))") == f(a)
+ assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, ))
+
+ assert fromstring("123") == as_number(123)
+ assert fromstring("123_2") == as_number(123, 2)
+ assert fromstring("123_myintkind") == as_number(123, "myintkind")
+
+ assert fromstring("123.0") == as_number(123.0, 4)
+ assert fromstring("123.0_4") == as_number(123.0, 4)
+ assert fromstring("123.0_8") == as_number(123.0, 8)
+ assert fromstring("123.0e0") == as_number(123.0, 4)
+ assert fromstring("123.0d0") == as_number(123.0, 8)
+ assert fromstring("123d0") == as_number(123.0, 8)
+ assert fromstring("123e-0") == as_number(123.0, 4)
+ assert fromstring("123d+0") == as_number(123.0, 8)
+ assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind")
+ assert fromstring("3E4") == as_number(30000.0, 4)
+
+ assert fromstring("(1, 2)") == as_complex(1, 2)
+ assert fromstring("(1e2, PI)") == as_complex(as_number(100.0),
+ as_symbol("PI"))
+
+ assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2)))
+
+ assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"),
+ x,
+ y=as_number(1))
+ assert fromstring(
+ 'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply(
+ as_symbol("PERSON"),
+ name=as_string('"John"'),
+ age=as_number(50),
+ shape=as_array((as_number(34), as_number(23))),
+ )
+
+ assert fromstring("x?y:z") == as_ternary(x, y, z)
+
+ assert fromstring("*x") == as_deref(x)
+ assert fromstring("**x") == as_deref(as_deref(x))
+ assert fromstring("&x") == as_ref(x)
+ assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y)
+ assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y)
+ assert fromstring("*x * *y") == as_deref(x) * as_deref(y)
+ assert fromstring("*x**y") == as_deref(x) * as_deref(y)
+
+ assert fromstring("x == y") == as_eq(x, y)
+ assert fromstring("x != y") == as_ne(x, y)
+ assert fromstring("x < y") == as_lt(x, y)
+ assert fromstring("x > y") == as_gt(x, y)
+ assert fromstring("x <= y") == as_le(x, y)
+ assert fromstring("x >= y") == as_ge(x, y)
+
+ assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y)
+ assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y)
+ assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y)
+ assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y)
+ assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y)
+ assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y)
def test_traverse(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
- f = as_symbol('f')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+ f = as_symbol("f")
# Use traverse to substitute a symbol
def replace_visit(s, r=z):
@@ -373,8 +403,9 @@ class TestSymbolic(util.F2PyTest):
assert (f[y]).traverse(replace_visit) == f[y]
assert (f[z]).traverse(replace_visit) == f[z]
assert (x + y + z).traverse(replace_visit) == (2 * z + y)
- assert (x + f(y, x - z)).traverse(
- replace_visit) == (z + f(y, as_number(0)))
+ assert (x +
+ f(y, x - z)).traverse(replace_visit) == (z +
+ f(y, as_number(0)))
assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y)
# Use traverse to collect symbols, method 1
@@ -416,28 +447,28 @@ class TestSymbolic(util.F2PyTest):
assert symbols == {x}
def test_linear_solve(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
assert x.linear_solve(x) == (as_number(1), as_number(0))
- assert (x+1).linear_solve(x) == (as_number(1), as_number(1))
- assert (2*x).linear_solve(x) == (as_number(2), as_number(0))
- assert (2*x+3).linear_solve(x) == (as_number(2), as_number(3))
+ assert (x + 1).linear_solve(x) == (as_number(1), as_number(1))
+ assert (2 * x).linear_solve(x) == (as_number(2), as_number(0))
+ assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3))
assert as_number(3).linear_solve(x) == (as_number(0), as_number(3))
assert y.linear_solve(x) == (as_number(0), y)
- assert (y*z).linear_solve(x) == (as_number(0), y * z)
+ assert (y * z).linear_solve(x) == (as_number(0), y * z)
- assert (x+y).linear_solve(x) == (as_number(1), y)
- assert (z*x+y).linear_solve(x) == (z, y)
- assert ((z+y)*x+y).linear_solve(x) == (z + y, y)
- assert (z*y*x+y).linear_solve(x) == (z * y, y)
+ assert (x + y).linear_solve(x) == (as_number(1), y)
+ assert (z * x + y).linear_solve(x) == (z, y)
+ assert ((z + y) * x + y).linear_solve(x) == (z + y, y)
+ assert (z * y * x + y).linear_solve(x) == (z * y, y)
- assert_raises(RuntimeError, lambda: (x*x).linear_solve(x))
+ assert_raises(RuntimeError, lambda: (x * x).linear_solve(x))
def test_as_numer_denom(self):
- x = as_symbol('x')
- y = as_symbol('y')
+ x = as_symbol("x")
+ y = as_symbol("y")
n = as_number(123)
assert as_numer_denom(x) == (x, as_number(1))
@@ -446,11 +477,11 @@ class TestSymbolic(util.F2PyTest):
assert as_numer_denom(x / y) == (x, y)
assert as_numer_denom(x * y) == (x * y, as_number(1))
assert as_numer_denom(n + x / y) == (x + n * y, y)
- assert as_numer_denom(n + x / (y - x / n)) == (y * n ** 2, y * n - x)
+ assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x)
def test_polynomial_atoms(self):
- x = as_symbol('x')
- y = as_symbol('y')
+ x = as_symbol("x")
+ y = as_symbol("y")
n = as_number(123)
assert x.polynomial_atoms() == {x}
@@ -459,4 +490,4 @@ class TestSymbolic(util.F2PyTest):
assert (y(x)).polynomial_atoms() == {y(x)}
assert (y(x) + x).polynomial_atoms() == {y(x), x}
assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]}
- assert (y(x) ** x).polynomial_atoms() == {y(x)}
+ assert (y(x)**x).polynomial_atoms() == {y(x)}
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index 1a6805e75..c115970f4 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -3,6 +3,7 @@ Utility functions for
- building and importing modules on test time, using a temporary location
- detecting if compilers are present
+- determining paths to tests
"""
import os
@@ -14,7 +15,10 @@ import atexit
import textwrap
import re
import pytest
+import contextlib
+import numpy
+from pathlib import Path
from numpy.compat import asbytes, asstr
from numpy.testing import temppath
from importlib import import_module
@@ -78,9 +82,11 @@ def _memoize(func):
if isinstance(ret, Exception):
raise ret
return ret
+
wrapper.__name__ = func.__name__
return wrapper
+
#
# Building modules
#
@@ -93,8 +99,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None):
"""
- code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; "
- "f2py2e.main()" % repr(sys.path))
+ code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()"
d = get_module_dir()
@@ -109,29 +114,30 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None):
dst_sources.append(dst)
base, ext = os.path.splitext(dst)
- if ext in ('.f90', '.f', '.c', '.pyf'):
+ if ext in (".f90", ".f", ".c", ".pyf"):
f2py_sources.append(dst)
# Prepare options
if module_name is None:
module_name = get_temp_module_name()
- f2py_opts = ['-c', '-m', module_name] + options + f2py_sources
+ f2py_opts = ["-c", "-m", module_name] + options + f2py_sources
if skip:
- f2py_opts += ['skip:'] + skip
+ f2py_opts += ["skip:"] + skip
if only:
- f2py_opts += ['only:'] + only
+ f2py_opts += ["only:"] + only
# Build
cwd = os.getcwd()
try:
os.chdir(d)
- cmd = [sys.executable, '-c', code] + f2py_opts
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ cmd = [sys.executable, "-c", code] + f2py_opts
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
- raise RuntimeError("Running f2py failed: %s\n%s"
- % (cmd[4:], asstr(out)))
+ raise RuntimeError("Running f2py failed: %s\n%s" %
+ (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
@@ -144,20 +150,28 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None):
@_memoize
-def build_code(source_code, options=[], skip=[], only=[], suffix=None,
+def build_code(source_code,
+ options=[],
+ skip=[],
+ only=[],
+ suffix=None,
module_name=None):
"""
Compile and import Fortran code using f2py.
"""
if suffix is None:
- suffix = '.f'
+ suffix = ".f"
with temppath(suffix=suffix) as path:
- with open(path, 'w') as f:
+ with open(path, "w") as f:
f.write(source_code)
- return build_module([path], options=options, skip=skip, only=only,
+ return build_module([path],
+ options=options,
+ skip=skip,
+ only=only,
module_name=module_name)
+
#
# Check if compilers are available at all...
#
@@ -174,10 +188,10 @@ def _get_compiler_status():
# XXX: this is really ugly. But I don't know how to invoke Distutils
# in a safer way...
- code = textwrap.dedent("""\
+ code = textwrap.dedent(f"""\
import os
import sys
- sys.path = %(syspath)s
+ sys.path = {repr(sys.path)}
def configuration(parent_name='',top_path=None):
global config
@@ -189,7 +203,7 @@ def _get_compiler_status():
setup(configuration=configuration)
config_cmd = config.get_config_cmd()
- have_c = config_cmd.try_compile('void foo() {}')
+ have_c = config_cmd.try_compile('void foo() {{}}')
print('COMPILERS:%%d,%%d,%%d' %% (have_c,
config.have_f77c(),
config.have_f90c()))
@@ -199,23 +213,27 @@ def _get_compiler_status():
tmpdir = tempfile.mkdtemp()
try:
- script = os.path.join(tmpdir, 'setup.py')
+ script = os.path.join(tmpdir, "setup.py")
- with open(script, 'w') as f:
+ with open(script, "w") as f:
f.write(code)
- cmd = [sys.executable, 'setup.py', 'config']
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ cmd = [sys.executable, "setup.py", "config"]
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmpdir)
out, err = p.communicate()
finally:
shutil.rmtree(tmpdir)
- m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out)
+ m = re.search(br"COMPILERS:(\d+),(\d+),(\d+)", out)
if m:
- _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))),
- bool(int(m.group(3))))
+ _compiler_status = (
+ bool(int(m.group(1))),
+ bool(int(m.group(2))),
+ bool(int(m.group(3))),
+ )
# Finished
return _compiler_status
@@ -231,6 +249,7 @@ def has_f77_compiler():
def has_f90_compiler():
return _get_compiler_status()[2]
+
#
# Building with distutils
#
@@ -256,38 +275,38 @@ def build_module_distutils(source_files, config_code, module_name, **kw):
# Build script
config_code = textwrap.dedent(config_code).replace("\n", "\n ")
- code = textwrap.dedent("""\
- import os
- import sys
- sys.path = %(syspath)s
-
- def configuration(parent_name='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('', parent_name, top_path)
- %(config_code)s
- return config
+ code = fr"""
+import os
+import sys
+sys.path = {repr(sys.path)}
- if __name__ == "__main__":
- from numpy.distutils.core import setup
- setup(configuration=configuration)
- """) % dict(config_code=config_code, syspath=repr(sys.path))
+def configuration(parent_name='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('', parent_name, top_path)
+ {config_code}
+ return config
- script = os.path.join(d, get_temp_module_name() + '.py')
+if __name__ == "__main__":
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
+ """
+ script = os.path.join(d, get_temp_module_name() + ".py")
dst_sources.append(script)
- with open(script, 'wb') as f:
+ with open(script, "wb") as f:
f.write(asbytes(code))
# Build
cwd = os.getcwd()
try:
os.chdir(d)
- cmd = [sys.executable, script, 'build_ext', '-i']
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ cmd = [sys.executable, script, "build_ext", "-i"]
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
- raise RuntimeError("Running distutils build failed: %s\n%s"
- % (cmd[4:], asstr(out)))
+ raise RuntimeError("Running distutils build failed: %s\n%s" %
+ (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
@@ -299,6 +318,7 @@ def build_module_distutils(source_files, config_code, module_name, **kw):
__import__(module_name)
return sys.modules[module_name]
+
#
# Unittest convenience
#
@@ -310,13 +330,13 @@ class F2PyTest:
options = []
skip = []
only = []
- suffix = '.f'
+ suffix = ".f"
module = None
module_name = None
def setup(self):
- if sys.platform == 'win32':
- pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)')
+ if sys.platform == "win32":
+ pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)")
if self.module is not None:
return
@@ -334,9 +354,9 @@ class F2PyTest:
needs_f77 = False
needs_f90 = False
for fn in codes:
- if fn.endswith('.f'):
+ if str(fn).endswith(".f"):
needs_f77 = True
- elif fn.endswith('.f90'):
+ elif str(fn).endswith(".f90"):
needs_f90 = True
if needs_f77 and not has_f77_compiler():
pytest.skip("No Fortran 77 compiler available")
@@ -345,12 +365,41 @@ class F2PyTest:
# Build the module
if self.code is not None:
- self.module = build_code(self.code, options=self.options,
- skip=self.skip, only=self.only,
- suffix=self.suffix,
- module_name=self.module_name)
+ self.module = build_code(
+ self.code,
+ options=self.options,
+ skip=self.skip,
+ only=self.only,
+ suffix=self.suffix,
+ module_name=self.module_name,
+ )
if self.sources is not None:
- self.module = build_module(self.sources, options=self.options,
- skip=self.skip, only=self.only,
- module_name=self.module_name)
+ self.module = build_module(
+ self.sources,
+ options=self.options,
+ skip=self.skip,
+ only=self.only,
+ module_name=self.module_name,
+ )
+
+
+#
+# Helper functions
+#
+
+
+def getpath(*a):
+ # Package root
+ d = Path(numpy.f2py.__file__).parent.resolve()
+ return d.joinpath(*a)
+
+
+@contextlib.contextmanager
+def switchdir(path):
+ curpath = Path.cwd()
+ os.chdir(path)
+ try:
+ yield
+ finally:
+ os.chdir(curpath)
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 2a4402c89..b69226d48 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -227,13 +227,13 @@ class MGridClass(nd_grid):
See Also
--------
- numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
+ lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
ogrid : like mgrid but returns open (not fleshed out) mesh grids
r_ : array concatenator
Examples
--------
- >>> np.mgrid[0:5,0:5]
+ >>> np.mgrid[0:5, 0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 6c34e95fe..a839b892a 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -285,7 +285,8 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
- ``seek()`` and ``read()`` methods. Pickled files require that the
+ ``seek()`` and ``read()`` methods and must always
+ be opened in binary mode. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
@@ -1806,22 +1807,21 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
byte_converters = False
# Initialize the filehandle, the LineSplitter and the NameValidator
+ if isinstance(fname, os_PathLike):
+ fname = os_fspath(fname)
+ if isinstance(fname, str):
+ fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
+ fid_ctx = contextlib.closing(fid)
+ else:
+ fid = fname
+ fid_ctx = contextlib.nullcontext(fid)
try:
- if isinstance(fname, os_PathLike):
- fname = os_fspath(fname)
- if isinstance(fname, str):
- fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
- fid_ctx = contextlib.closing(fid)
- else:
- fid = fname
- fid_ctx = contextlib.nullcontext(fid)
fhd = iter(fid)
except TypeError as e:
raise TypeError(
- f"fname must be a string, filehandle, list of strings,\n"
- f"or generator. Got {type(fname)} instead."
+ "fname must be a string, a filehandle, a sequence of strings,\n"
+ f"or an iterator of strings. Got {type(fname)} instead."
) from e
-
with fid_ctx:
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip, encoding=encoding)
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index a491f612e..ee4fbcd74 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -784,7 +784,8 @@ def repack_fields(a, align=False, recurse=False):
This method removes any overlaps and reorders the fields in memory so they
have increasing byte offsets, and adds or removes padding bytes depending
- on the `align` option, which behaves like the `align` option to `np.dtype`.
+ on the `align` option, which behaves like the `align` option to
+ `numpy.dtype`.
If `align=False`, this method produces a "packed" memory layout in which
each field starts at the byte the previous field ended, and any padding
@@ -917,11 +918,12 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
dtype : dtype, optional
The dtype of the output unstructured array.
copy : bool, optional
- See copy argument to `ndarray.astype`. If true, always return a copy.
- If false, and `dtype` requirements are satisfied, a view is returned.
+ See copy argument to `numpy.ndarray.astype`. If true, always return a
+ copy. If false, and `dtype` requirements are satisfied, a view is
+ returned.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
- See casting argument of `ndarray.astype`. Controls what kind of data
- casting may occur.
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
+ data casting may occur.
Returns
-------
@@ -1020,11 +1022,12 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
align : boolean, optional
Whether to create an aligned memory layout.
copy : bool, optional
- See copy argument to `ndarray.astype`. If true, always return a copy.
- If false, and `dtype` requirements are satisfied, a view is returned.
+ See copy argument to `numpy.ndarray.astype`. If true, always return a
+ copy. If false, and `dtype` requirements are satisfied, a view is
+ returned.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
- See casting argument of `ndarray.astype`. Controls what kind of data
- casting may occur.
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
+ data casting may occur.
Returns
-------
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index 308f1328b..b7ef0d710 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -234,6 +234,15 @@ def sqrt(x):
>>> np.emath.sqrt([-1,4])
array([0.+1.j, 2.+0.j])
+ Different results are expected because:
+ floating point 0.0 and -0.0 are distinct.
+
+ For more control, explicitly use complex() as follows:
+
+ >>> np.emath.sqrt(complex(-4.0, 0.0))
+ 2j
+ >>> np.emath.sqrt(complex(-4.0, -0.0))
+ -2j
"""
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
diff --git a/numpy/lib/scimath.pyi b/numpy/lib/scimath.pyi
index d0d4af41e..6f196497d 100644
--- a/numpy/lib/scimath.pyi
+++ b/numpy/lib/scimath.pyi
@@ -1,13 +1,94 @@
-from typing import List
+from typing import List, overload, Any
+
+from numpy import complexfloating
+
+from numpy.typing import (
+ NDArray,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ComplexLike_co,
+ _FloatLike_co,
+)
__all__: List[str]
-def sqrt(x): ...
-def log(x): ...
-def log10(x): ...
-def logn(n, x): ...
-def log2(x): ...
-def power(x, p): ...
-def arccos(x): ...
-def arcsin(x): ...
-def arctanh(x): ...
+@overload
+def sqrt(x: _FloatLike_co) -> Any: ...
+@overload
+def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def log(x: _FloatLike_co) -> Any: ...
+@overload
+def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def log10(x: _FloatLike_co) -> Any: ...
+@overload
+def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def log2(x: _FloatLike_co) -> Any: ...
+@overload
+def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ...
+@overload
+def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ...
+@overload
+def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def arccos(x: _FloatLike_co) -> Any: ...
+@overload
+def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def arcsin(x: _FloatLike_co) -> Any: ...
+@overload
+def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def arctanh(x: _FloatLike_co) -> Any: ...
+@overload
+def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
diff --git a/numpy/lib/shape_base.pyi b/numpy/lib/shape_base.pyi
index 8aa283d02..17016c999 100644
--- a/numpy/lib/shape_base.pyi
+++ b/numpy/lib/shape_base.pyi
@@ -18,7 +18,7 @@ from numpy.typing import (
NDArray,
_ShapeLike,
_FiniteNestedSequence,
- _SupportsDType,
+ _SupportsArray,
_ArrayLikeBool_co,
_ArrayLikeUInt_co,
_ArrayLikeInt_co,
@@ -31,7 +31,7 @@ from numpy.core.shape_base import vstack
_SCT = TypeVar("_SCT", bound=generic)
-_ArrayLike = _FiniteNestedSequence[_SupportsDType[dtype[_SCT]]]
+_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]]
# The signatures of `__array_wrap__` and `__array_prepare__` are the same;
# give them unique names for the sake of clarity
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 5201b8e6e..c19660cf0 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -1425,6 +1425,10 @@ class TestFromTxt(LoadTxtBase):
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
+ def test_bad_fname(self):
+ with pytest.raises(TypeError, match='fname must be a string,'):
+ np.genfromtxt(123)
+
def test_commented_header(self):
# Check that names can be retrieved even if the line is commented out.
data = TextIO("""
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 56afd83ce..94d525f51 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -6,7 +6,7 @@ import warnings
__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'isreal', 'nan_to_num', 'real', 'real_if_close',
- 'typename', 'asfarray', 'mintypecode', 'asscalar',
+ 'typename', 'asfarray', 'mintypecode',
'common_type']
import numpy.core.numeric as _nx
@@ -276,22 +276,22 @@ def isreal(x):
>>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex)
>>> np.isreal(a)
array([False, True, True, True, True, False])
-
+
The function does not work on string arrays.
>>> a = np.array([2j, "a"], dtype="U")
>>> np.isreal(a) # Warns about non-elementwise comparison
False
-
+
Returns True for all elements in input array of ``dtype=object`` even if
any of the elements is complex.
>>> a = np.array([1, "2", 3+4j], dtype=object)
>>> np.isreal(a)
array([ True, True, True])
-
+
isreal should not be used with object arrays
-
+
>>> a = np.array([1+2j, 2+1j], dtype=object)
>>> np.isreal(a)
array([ True, True])
@@ -405,14 +405,14 @@ def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
"""
Replace NaN with zero and infinity with large finite numbers (default
- behaviour) or with the numbers defined by the user using the `nan`,
+ behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
- `nan` keyword, infinity is replaced by the largest finite floating point
- values representable by ``x.dtype`` or by the user defined value in
- `posinf` keyword and -infinity is replaced by the most negative finite
- floating point values representable by ``x.dtype`` or by the user defined
+ `nan` keyword, infinity is replaced by the largest finite floating point
+ values representable by ``x.dtype`` or by the user defined value in
+ `posinf` keyword and -infinity is replaced by the most negative finite
+ floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
@@ -429,27 +429,27 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
-
+
.. versionadded:: 1.13
nan : int, float, optional
- Value to be used to fill NaN values. If no value is passed
+ Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
-
+
.. versionadded:: 1.17
posinf : int, float, optional
- Value to be used to fill positive infinity values. If no value is
+ Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
-
+
.. versionadded:: 1.17
neginf : int, float, optional
- Value to be used to fill negative infinity values. If no value is
+ Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
-
+
.. versionadded:: 1.17
-
+
Returns
-------
@@ -483,7 +483,7 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
- array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
+ array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
-1.2800000e+02, 1.2800000e+02])
>>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
@@ -529,7 +529,7 @@ def _real_if_close_dispatcher(a, tol=None):
@array_function_dispatch(_real_if_close_dispatcher)
def real_if_close(a, tol=100):
"""
- If input is complex with all imaginary parts close to zero, return
+ If input is complex with all imaginary parts close to zero, return
real parts.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
@@ -583,40 +583,6 @@ def real_if_close(a, tol=100):
return a
-def _asscalar_dispatcher(a):
- # 2018-10-10, 1.16
- warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use '
- 'a.item() instead', DeprecationWarning, stacklevel=3)
- return (a,)
-
-
-@array_function_dispatch(_asscalar_dispatcher)
-def asscalar(a):
- """
- Convert an array of size 1 to its scalar equivalent.
-
- .. deprecated:: 1.16
-
- Deprecated, use `numpy.ndarray.item()` instead.
-
- Parameters
- ----------
- a : ndarray
- Input array of size 1.
-
- Returns
- -------
- out : scalar
- Scalar representation of `a`. The output data type is the same type
- returned by the input's `item` method.
-
- Examples
- --------
- >>> np.asscalar(np.array([24]))
- 24
- """
- return a.item()
-
#-----------------------------------------------------------------------------
_namefromtype = {'S1': 'character',
diff --git a/numpy/lib/type_check.pyi b/numpy/lib/type_check.pyi
index 0a55dbf21..510f36cd7 100644
--- a/numpy/lib/type_check.pyi
+++ b/numpy/lib/type_check.pyi
@@ -151,9 +151,6 @@ def real_if_close(
tol: float = ...,
) -> NDArray[Any]: ...
-# NOTE: deprecated
-# def asscalar(a): ...
-
@overload
def typename(char: L['S1']) -> L['character']: ...
@overload
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 1df2ab09b..c74ee127d 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -429,7 +429,7 @@ def _makenamedict(module='numpy'):
return thedict, dictlist
-def _info(obj, output=sys.stdout):
+def _info(obj, output=None):
"""Provide information about ndarray obj.
Parameters
@@ -455,6 +455,9 @@ def _info(obj, output=sys.stdout):
strides = obj.strides
endian = obj.dtype.byteorder
+ if output is None:
+ output = sys.stdout
+
print("class: ", nm, file=output)
print("shape: ", obj.shape, file=output)
print("strides: ", strides, file=output)
@@ -481,7 +484,7 @@ def _info(obj, output=sys.stdout):
@set_module('numpy')
-def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
+def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
"""
Get help information for a function, class, or module.
@@ -496,7 +499,8 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
Printing width.
output : file like object, optional
File like object that the output is written to, default is
- ``stdout``. The object has to be opened in 'w' or 'a' mode.
+ ``None``, in which case ``sys.stdout`` will be used.
+ The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
@@ -541,6 +545,9 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
+ if output is None:
+ output = sys.stdout
+
if object is None:
info(info)
elif isinstance(object, ndarray):
diff --git a/numpy/linalg/tests/test_build.py b/numpy/linalg/tests/test_build.py
deleted file mode 100644
index 868341ff2..000000000
--- a/numpy/linalg/tests/test_build.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from subprocess import PIPE, Popen
-import sys
-import re
-import pytest
-
-from numpy.linalg import lapack_lite
-from numpy.testing import assert_
-
-
-class FindDependenciesLdd:
-
- def __init__(self):
- self.cmd = ['ldd']
-
- try:
- p = Popen(self.cmd, stdout=PIPE, stderr=PIPE)
- stdout, stderr = p.communicate()
- except OSError as e:
- raise RuntimeError(f'command {self.cmd} cannot be run') from e
-
- def get_dependencies(self, lfile):
- p = Popen(self.cmd + [lfile], stdout=PIPE, stderr=PIPE)
- stdout, stderr = p.communicate()
- if not (p.returncode == 0):
- raise RuntimeError(f'failed dependencies check for {lfile}')
-
- return stdout
-
- def grep_dependencies(self, lfile, deps):
- stdout = self.get_dependencies(lfile)
-
- rdeps = dict([(dep, re.compile(dep)) for dep in deps])
- founds = []
- for l in stdout.splitlines():
- for k, v in rdeps.items():
- if v.search(l):
- founds.append(k)
-
- return founds
-
-
-class TestF77Mismatch:
-
- @pytest.mark.skipif(not(sys.platform[:5] == 'linux'),
- reason="no fortran compiler on non-Linux platform")
- def test_lapack(self):
- f = FindDependenciesLdd()
- deps = f.grep_dependencies(lapack_lite.__file__,
- [b'libg2c', b'libgfortran'])
- assert_(len(deps) <= 1,
- """Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
-cause random crashes and wrong results. See numpy INSTALL.txt for more
-information.""")
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 491c2c605..12836967c 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -5666,9 +5666,12 @@ class MaskedArray(ndarray):
Parameters
----------
- axis : {None, int}, optional
+ axis : None or int or tuple of ints, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
+ .. versionadded:: 1.7.0
+ If this is a tuple of ints, the minimum is selected over multiple
+ axes, instead of a single axis or all the axes as before.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
@@ -5800,9 +5803,12 @@ class MaskedArray(ndarray):
Parameters
----------
- axis : {None, int}, optional
+ axis : None or int or tuple of ints, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
+ .. versionadded:: 1.7.0
+ If this is a tuple of ints, the maximum is selected over multiple
+ axes, instead of a single axis or all the axes as before.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
diff --git a/numpy/random/_examples/cython/setup.py b/numpy/random/_examples/cython/setup.py
index 7e0dd3e05..f41150fdb 100644
--- a/numpy/random/_examples/cython/setup.py
+++ b/numpy/random/_examples/cython/setup.py
@@ -4,6 +4,7 @@ Build the Cython demonstrations of low-level access to NumPy random
Usage: python setup.py build_ext -i
"""
+import setuptools # triggers monkeypatching distutils
from distutils.core import setup
from os.path import dirname, join, abspath
diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx
index 16a377cc6..e9a703e2f 100644
--- a/numpy/random/_mt19937.pyx
+++ b/numpy/random/_mt19937.pyx
@@ -109,7 +109,7 @@ cdef class MT19937(BitGenerator):
**Compatibility Guarantee**
- ``MT19937`` makes a guarantee that a fixed seed and will always produce
+ ``MT19937`` makes a guarantee that a fixed seed will always produce
the same random integer stream.
References
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index 3e13503d0..ce09a041c 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -4234,18 +4234,21 @@ cdef class RandomState:
ValueError: pvals < 0, pvals > 1 or pvals contains NaNs
"""
- cdef np.npy_intp d, i, sz, offset
+ cdef np.npy_intp d, i, sz, offset, niter
cdef np.ndarray parr, mnarr
cdef double *pix
cdef long *mnix
cdef long ni
- d = len(pvals)
parr = <np.ndarray>np.PyArray_FROMANY(
- pvals, np.NPY_DOUBLE, 1, 1, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ pvals, np.NPY_DOUBLE, 0, 1, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ if np.PyArray_NDIM(parr) == 0:
+ raise TypeError("pvals must be a 1-d sequence")
+ d = np.PyArray_SIZE(parr)
pix = <double*>np.PyArray_DATA(parr)
check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1)
- if kahan_sum(pix, d-1) > (1.0 + 1e-12):
+ # Only check if pvals is non-empty due no checks in kahan_sum
+ if d and kahan_sum(pix, d-1) > (1.0 + 1e-12):
# When floating, but not float dtype, and close, improve the error
# 1.0001 works for float16 and float32
if (isinstance(pvals, np.ndarray)
@@ -4260,7 +4263,6 @@ cdef class RandomState:
else:
msg = "sum(pvals[:-1]) > 1.0"
raise ValueError(msg)
-
if size is None:
shape = (d,)
else:
@@ -4268,7 +4270,6 @@ cdef class RandomState:
shape = (operator.index(size), d)
except:
shape = tuple(size) + (d,)
-
multin = np.zeros(shape, dtype=int)
mnarr = <np.ndarray>multin
mnix = <long*>np.PyArray_DATA(mnarr)
@@ -4276,8 +4277,10 @@ cdef class RandomState:
ni = n
check_constraint(ni, 'n', CONS_NON_NEGATIVE)
offset = 0
+ # gh-20483: Avoids divide by 0
+ niter = sz // d if d else 0
with self.lock, nogil:
- for i in range(sz // d):
+ for i in range(niter):
legacy_random_multinomial(&self._bitgen, ni, &mnix[offset], pix, d, &self._binomial)
offset += d
diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py
index 99a819efb..d362092b5 100644
--- a/numpy/random/tests/test_extending.py
+++ b/numpy/random/tests/test_extending.py
@@ -5,6 +5,7 @@ import subprocess
import sys
import warnings
import numpy as np
+from numpy.distutils.misc_util import exec_mod_from_location
try:
import cffi
@@ -75,10 +76,9 @@ def test_cython(tmp_path):
assert so1 is not None
assert so2 is not None
# import the so's without adding the directory to sys.path
- from importlib.machinery import ExtensionFileLoader
- extending = ExtensionFileLoader('extending', so1).load_module()
- extending_distributions = ExtensionFileLoader('extending_distributions', so2).load_module()
-
+ exec_mod_from_location('extending', so1)
+ extending_distributions = exec_mod_from_location(
+ 'extending_distributions', so2)
# actually test the cython c-extension
from numpy.random import PCG64
values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd')
diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py
index 595fb5fd3..7ad19ab55 100644
--- a/numpy/random/tests/test_randomstate_regression.py
+++ b/numpy/random/tests/test_randomstate_regression.py
@@ -201,3 +201,16 @@ class TestRegression:
[3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
expected)
+
+
+def test_multinomial_empty():
+ # gh-20483
+ # Ensure that empty p-vals are correctly handled
+ assert random.multinomial(10, []).shape == (0,)
+ assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0)
+
+
+def test_multinomial_1d_pval():
+ # gh-20483
+ with pytest.raises(TypeError, match="pvals must be a 1-d"):
+ random.multinomial(10, 0.3)
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 4c6b64bc9..0eb945d15 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -1228,13 +1228,13 @@ def rundocs(filename=None, raise_on_error=True):
>>> np.lib.test(doctests=True) # doctest: +SKIP
"""
- from numpy.compat import npy_load_module
+ from numpy.distutils.misc_util import exec_mod_from_location
import doctest
if filename is None:
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
- m = npy_load_module(name, filename)
+ m = exec_mod_from_location(name, filename)
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
diff --git a/numpy/typing/tests/data/fail/array_constructors.pyi b/numpy/typing/tests/data/fail/array_constructors.pyi
index 4f0a60b5b..065b7d8a0 100644
--- a/numpy/typing/tests/data/fail/array_constructors.pyi
+++ b/numpy/typing/tests/data/fail/array_constructors.pyi
@@ -21,10 +21,10 @@ np.linspace(0, 2, retstep=b'False') # E: No overload variant
np.linspace(0, 2, dtype=0) # E: No overload variant
np.linspace(0, 2, axis=None) # E: No overload variant
-np.logspace(None, 'bob') # E: Argument 1
-np.logspace(0, 2, base=None) # E: Argument "base"
+np.logspace(None, 'bob') # E: No overload variant
+np.logspace(0, 2, base=None) # E: No overload variant
-np.geomspace(None, 'bob') # E: Argument 1
+np.geomspace(None, 'bob') # E: No overload variant
np.stack(generator) # E: No overload variant
np.hstack({1, 2}) # E: No overload variant
diff --git a/numpy/typing/tests/data/fail/shape_base.pyi b/numpy/typing/tests/data/fail/shape_base.pyi
new file mode 100644
index 000000000..e709741b7
--- /dev/null
+++ b/numpy/typing/tests/data/fail/shape_base.pyi
@@ -0,0 +1,8 @@
+import numpy as np
+
+class DTypeLike:
+ dtype: np.dtype[np.int_]
+
+dtype_like: DTypeLike
+
+np.expand_dims(dtype_like, (5, 10)) # E: No overload variant
diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi
index 233988e63..ba5710e0f 100644
--- a/numpy/typing/tests/data/reveal/array_constructors.pyi
+++ b/numpy/typing/tests/data/reveal/array_constructors.pyi
@@ -114,10 +114,24 @@ reveal_type(np.require(B, requirements="W")) # E: SubClass[{float64}]
reveal_type(np.require(B, requirements="A")) # E: SubClass[{float64}]
reveal_type(np.require(C)) # E: ndarray[Any, Any]
-reveal_type(np.linspace(0, 10)) # E: ndarray[Any, Any]
-reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[ndarray[Any, Any], Any]
-reveal_type(np.logspace(0, 10)) # E: ndarray[Any, Any]
-reveal_type(np.geomspace(1, 10)) # E: ndarray[Any, Any]
+reveal_type(np.linspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.linspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.linspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.linspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], floating[Any]]
+reveal_type(np.linspace(0j, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], complexfloating[Any, Any]]
+reveal_type(np.linspace(0, 10, retstep=True, dtype=np.int64)) # E: Tuple[ndarray[Any, dtype[{int64}]], {int64}]
+reveal_type(np.linspace(0j, 10, retstep=True, dtype=int)) # E: Tuple[ndarray[Any, dtype[Any]], Any]
+
+reveal_type(np.logspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.logspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.logspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.logspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.geomspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.geomspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.geomspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.geomspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.zeros_like(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.zeros_like(C)) # E: ndarray[Any, dtype[Any]]
diff --git a/numpy/typing/tests/data/reveal/emath.pyi b/numpy/typing/tests/data/reveal/emath.pyi
new file mode 100644
index 000000000..9ab2d72d2
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/emath.pyi
@@ -0,0 +1,52 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+f8: np.float64
+c16: np.complex128
+
+reveal_type(np.emath.sqrt(f8)) # E: Any
+reveal_type(np.emath.sqrt(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.sqrt(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.sqrt(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.log(f8)) # E: Any
+reveal_type(np.emath.log(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.log(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.log(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.log10(f8)) # E: Any
+reveal_type(np.emath.log10(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.log10(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.log10(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.log2(f8)) # E: Any
+reveal_type(np.emath.log2(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.log2(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.log2(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.logn(f8, 2)) # E: Any
+reveal_type(np.emath.logn(AR_f8, 4)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.logn(f8, 1j)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.logn(AR_c16, 1.5)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.power(f8, 2)) # E: Any
+reveal_type(np.emath.power(AR_f8, 4)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.power(f8, 2j)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.power(AR_c16, 1.5)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.arccos(f8)) # E: Any
+reveal_type(np.emath.arccos(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.arccos(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.arccos(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.arcsin(f8)) # E: Any
+reveal_type(np.emath.arcsin(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.arcsin(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.arcsin(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.arctanh(f8)) # E: Any
+reveal_type(np.emath.arctanh(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.arctanh(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.arctanh(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi
index cd1c3136f..f91d6351b 100644
--- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi
+++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi
@@ -24,6 +24,9 @@ AR_V: NDArray[np.void]
ctypes_obj = AR_f8.ctypes
+reveal_type(AR_f8.__dlpack__()) # E: Any
+reveal_type(AR_f8.__dlpack_device__()) # E: Tuple[int, Literal[0]]
+
reveal_type(ctypes_obj.data) # E: int
reveal_type(ctypes_obj.shape) # E: ctypes.Array[{c_intp}]
reveal_type(ctypes_obj.strides) # E: ctypes.Array[{c_intp}]
diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py
index fe58a8f4c..bb3914434 100644
--- a/numpy/typing/tests/test_typing.py
+++ b/numpy/typing/tests/test_typing.py
@@ -136,7 +136,7 @@ def test_fail(path: str) -> None:
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
- error_line = _strip_filename(error_line)
+ error_line = _strip_filename(error_line).split("\n", 1)[0]
match = re.match(
r"(?P<lineno>\d+): (error|note): .+$",
error_line,
@@ -368,6 +368,7 @@ Expression: {}
Expected reveal: {!r}
Observed reveal: {!r}
"""
+_STRIP_PATTERN = re.compile(r"(\w+\.)+(\w+)")
def _test_reveal(
@@ -378,9 +379,8 @@ def _test_reveal(
lineno: int,
) -> None:
"""Error-reporting helper function for `test_reveal`."""
- strip_pattern = re.compile(r"(\w+\.)+(\w+)")
- stripped_reveal = strip_pattern.sub(strip_func, reveal)
- stripped_expected_reveal = strip_pattern.sub(strip_func, expected_reveal)
+ stripped_reveal = _STRIP_PATTERN.sub(strip_func, reveal)
+ stripped_expected_reveal = _STRIP_PATTERN.sub(strip_func, expected_reveal)
if stripped_reveal not in stripped_expected_reveal:
raise AssertionError(
_REVEAL_MSG.format(lineno,
diff --git a/pavement.py b/pavement.py
index 6fdaae975..025489cbd 100644
--- a/pavement.py
+++ b/pavement.py
@@ -38,7 +38,7 @@ from paver.easy import Bunch, options, task, sh
#-----------------------------------
# Path to the release notes
-RELEASE_NOTES = 'doc/source/release/1.22.0-notes.rst'
+RELEASE_NOTES = 'doc/source/release/1.23.0-notes.rst'
#-------------------------------------------------------
diff --git a/pyproject.toml b/pyproject.toml
index 941c8fa8c..39d6fcd98 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -2,8 +2,8 @@
# Minimum requirements for the build system to execute.
requires = [
"packaging==20.5; platform_machine=='arm64'", # macos M1
- "setuptools<49.2.0",
- "wheel==0.36.2",
+ "setuptools==59.2.0",
+ "wheel==0.37.0",
"Cython>=0.29.24,<3.0", # Note: keep in sync with tools/cythonize.py
]
diff --git a/test_requirements.txt b/test_requirements.txt
index 256b26d9b..9532e3346 100644
--- a/test_requirements.txt
+++ b/test_requirements.txt
@@ -1,11 +1,10 @@
cython==0.29.24
-wheel<0.37.1
-setuptools<49.2.0
+wheel==0.37.0
+setuptools==59.2.0
hypothesis==6.24.1
pytest==6.2.5
pytz==2021.3
pytest-cov==3.0.0
-pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy'
# for numpy.random.test.test_extending
cffi; python_version < '3.10'
# For testing types. Notes on the restrictions:
diff --git a/tools/allocation_tracking/README.md b/tools/allocation_tracking/README.md
index fd4f2c871..6cc4c2a58 100644
--- a/tools/allocation_tracking/README.md
+++ b/tools/allocation_tracking/README.md
@@ -1,11 +1,7 @@
-Example for using the `PyDataMem_SetEventHook` to track allocations inside numpy.
-
-`alloc_hook.pyx` implements a hook in Cython that calls back into a python
-function. `track_allocations.py` uses it for a simple listing of allocations.
-It can be built with the `setup.py` file in this folder.
-
Note that since Python 3.6 the builtin tracemalloc module can be used to
track allocations inside numpy.
Numpy places its CPU memory allocations into the `np.lib.tracemalloc_domain`
domain.
See https://docs.python.org/3/library/tracemalloc.html.
+
+The tool that used to be here has been deprecated.
diff --git a/tools/allocation_tracking/alloc_hook.pyx b/tools/allocation_tracking/alloc_hook.pyx
deleted file mode 100644
index eeefe1704..000000000
--- a/tools/allocation_tracking/alloc_hook.pyx
+++ /dev/null
@@ -1,42 +0,0 @@
-# A cython wrapper for using python functions as callbacks for
-# PyDataMem_SetEventHook.
-
-cimport numpy as np
-
-cdef extern from "Python.h":
- object PyLong_FromVoidPtr(void *)
- void *PyLong_AsVoidPtr(object)
-
-ctypedef void PyDataMem_EventHookFunc(void *inp, void *outp, size_t size,
- void *user_data)
-cdef extern from "numpy/arrayobject.h":
- PyDataMem_EventHookFunc * \
- PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook,
- void *user_data, void **old_data)
-
-np.import_array()
-
-cdef void pyhook(void *old, void *new, size_t size, void *user_data):
- cdef object pyfunc = <object> user_data
- pyfunc(PyLong_FromVoidPtr(old),
- PyLong_FromVoidPtr(new),
- size)
-
-class NumpyAllocHook:
- def __init__(self, callback):
- self.callback = callback
-
- def __enter__(self):
- cdef void *old_hook, *old_data
- old_hook = <void *> \
- PyDataMem_SetEventHook(<PyDataMem_EventHookFunc *> pyhook,
- <void *> self.callback,
- <void **> &old_data)
- self.old_hook = PyLong_FromVoidPtr(old_hook)
- self.old_data = PyLong_FromVoidPtr(old_data)
-
- def __exit__(self):
- PyDataMem_SetEventHook(<PyDataMem_EventHookFunc *> \
- PyLong_AsVoidPtr(self.old_hook),
- <void *> PyLong_AsVoidPtr(self.old_data),
- <void **> 0)
diff --git a/tools/allocation_tracking/setup.py b/tools/allocation_tracking/setup.py
deleted file mode 100644
index 4462f9f4e..000000000
--- a/tools/allocation_tracking/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from distutils.core import setup
-from distutils.extension import Extension
-from Cython.Distutils import build_ext
-import numpy
-
-setup(
- cmdclass = {'build_ext': build_ext},
- ext_modules = [Extension("alloc_hook", ["alloc_hook.pyx"],
- include_dirs=[numpy.get_include()])])
diff --git a/tools/allocation_tracking/sorttable.js b/tools/allocation_tracking/sorttable.js
deleted file mode 100644
index c9528873e..000000000
--- a/tools/allocation_tracking/sorttable.js
+++ /dev/null
@@ -1,493 +0,0 @@
-/*
- SortTable
- version 2
- 7th April 2007
- Stuart Langridge, https://www.kryogenix.org/code/browser/sorttable/
-
- Instructions:
- Download this file
- Add <script src="sorttable.js"></script> to your HTML
- Add class="sortable" to any table you'd like to make sortable
- Click on the headers to sort
-
- Thanks to many, many people for contributions and suggestions.
- Licenced as X11: https://www.kryogenix.org/code/browser/licence.html
- This basically means: do what you want with it.
-*/
-
-
-var stIsIE = /*@cc_on!@*/false;
-
-sorttable = {
- init: function() {
- // quit if this function has already been called
- if (arguments.callee.done) return;
- // flag this function so we don't do the same thing twice
- arguments.callee.done = true;
- // kill the timer
- if (_timer) clearInterval(_timer);
-
- if (!document.createElement || !document.getElementsByTagName) return;
-
- sorttable.DATE_RE = /^(\d\d?)[\/\.-](\d\d?)[\/\.-]((\d\d)?\d\d)$/;
-
- forEach(document.getElementsByTagName('table'), function(table) {
- if (table.className.search(/\bsortable\b/) != -1) {
- sorttable.makeSortable(table);
- }
- });
-
- },
-
- makeSortable: function(table) {
- if (table.getElementsByTagName('thead').length == 0) {
- // table doesn't have a tHead. Since it should have, create one and
- // put the first table row in it.
- the = document.createElement('thead');
- the.appendChild(table.rows[0]);
- table.insertBefore(the,table.firstChild);
- }
- // Safari doesn't support table.tHead, sigh
- if (table.tHead == null) table.tHead = table.getElementsByTagName('thead')[0];
-
- if (table.tHead.rows.length != 1) return; // can't cope with two header rows
-
- // Sorttable v1 put rows with a class of "sortbottom" at the bottom (as
- // "total" rows, for example). This is B&R, since what you're supposed
- // to do is put them in a tfoot. So, if there are sortbottom rows,
- // for backwards compatibility, move them to tfoot (creating it if needed).
- sortbottomrows = [];
- for (var i=0; i<table.rows.length; i++) {
- if (table.rows[i].className.search(/\bsortbottom\b/) != -1) {
- sortbottomrows[sortbottomrows.length] = table.rows[i];
- }
- }
- if (sortbottomrows) {
- if (table.tFoot == null) {
- // table doesn't have a tfoot. Create one.
- tfo = document.createElement('tfoot');
- table.appendChild(tfo);
- }
- for (var i=0; i<sortbottomrows.length; i++) {
- tfo.appendChild(sortbottomrows[i]);
- }
- delete sortbottomrows;
- }
-
- // work through each column and calculate its type
- headrow = table.tHead.rows[0].cells;
- for (var i=0; i<headrow.length; i++) {
- // manually override the type with a sorttable_type attribute
- if (!headrow[i].className.match(/\bsorttable_nosort\b/)) { // skip this col
- mtch = headrow[i].className.match(/\bsorttable_([a-z0-9]+)\b/);
- if (mtch) { override = mtch[1]; }
- if (mtch && typeof sorttable["sort_"+override] == 'function') {
- headrow[i].sorttable_sortfunction = sorttable["sort_"+override];
- } else {
- headrow[i].sorttable_sortfunction = sorttable.guessType(table,i);
- }
- // make it clickable to sort
- headrow[i].sorttable_columnindex = i;
- headrow[i].sorttable_tbody = table.tBodies[0];
- dean_addEvent(headrow[i],"click", function(e) {
-
- if (this.className.search(/\bsorttable_sorted\b/) != -1) {
- // if we're already sorted by this column, just
- // reverse the table, which is quicker
- sorttable.reverse(this.sorttable_tbody);
- this.className = this.className.replace('sorttable_sorted',
- 'sorttable_sorted_reverse');
- this.removeChild(document.getElementById('sorttable_sortfwdind'));
- sortrevind = document.createElement('span');
- sortrevind.id = "sorttable_sortrevind";
- sortrevind.innerHTML = stIsIE ? '&nbsp<font face="webdings">5</font>' : '&nbsp;&#x25B4;';
- this.appendChild(sortrevind);
- return;
- }
- if (this.className.search(/\bsorttable_sorted_reverse\b/) != -1) {
- // if we're already sorted by this column in reverse, just
- // re-reverse the table, which is quicker
- sorttable.reverse(this.sorttable_tbody);
- this.className = this.className.replace('sorttable_sorted_reverse',
- 'sorttable_sorted');
- this.removeChild(document.getElementById('sorttable_sortrevind'));
- sortfwdind = document.createElement('span');
- sortfwdind.id = "sorttable_sortfwdind";
- sortfwdind.innerHTML = stIsIE ? '&nbsp<font face="webdings">6</font>' : '&nbsp;&#x25BE;';
- this.appendChild(sortfwdind);
- return;
- }
-
- // remove sorttable_sorted classes
- theadrow = this.parentNode;
- forEach(theadrow.childNodes, function(cell) {
- if (cell.nodeType == 1) { // an element
- cell.className = cell.className.replace('sorttable_sorted_reverse','');
- cell.className = cell.className.replace('sorttable_sorted','');
- }
- });
- sortfwdind = document.getElementById('sorttable_sortfwdind');
- if (sortfwdind) { sortfwdind.parentNode.removeChild(sortfwdind); }
- sortrevind = document.getElementById('sorttable_sortrevind');
- if (sortrevind) { sortrevind.parentNode.removeChild(sortrevind); }
-
- this.className += ' sorttable_sorted';
- sortfwdind = document.createElement('span');
- sortfwdind.id = "sorttable_sortfwdind";
- sortfwdind.innerHTML = stIsIE ? '&nbsp<font face="webdings">6</font>' : '&nbsp;&#x25BE;';
- this.appendChild(sortfwdind);
-
- // build an array to sort. This is a Schwartzian transform thing,
- // i.e., we "decorate" each row with the actual sort key,
- // sort based on the sort keys, and then put the rows back in order
- // which is a lot faster because you only do getInnerText once per row
- row_array = [];
- col = this.sorttable_columnindex;
- rows = this.sorttable_tbody.rows;
- for (var j=0; j<rows.length; j++) {
- row_array[row_array.length] = [sorttable.getInnerText(rows[j].cells[col]), rows[j]];
- }
- /* If you want a stable sort, uncomment the following line */
- //sorttable.shaker_sort(row_array, this.sorttable_sortfunction);
- /* and comment out this one */
- row_array.sort(this.sorttable_sortfunction);
-
- tb = this.sorttable_tbody;
- for (var j=0; j<row_array.length; j++) {
- tb.appendChild(row_array[j][1]);
- }
-
- delete row_array;
- });
- }
- }
- },
-
- guessType: function(table, column) {
- // guess the type of a column based on its first non-blank row
- sortfn = sorttable.sort_alpha;
- for (var i=0; i<table.tBodies[0].rows.length; i++) {
- text = sorttable.getInnerText(table.tBodies[0].rows[i].cells[column]);
- if (text != '') {
- if (text.match(/^-?[£$¤]?[\d,.]+%?$/)) {
- return sorttable.sort_numeric;
- }
- // check for a date: dd/mm/yyyy or dd/mm/yy
- // can have / or . or - as separator
- // can be mm/dd as well
- possdate = text.match(sorttable.DATE_RE)
- if (possdate) {
- // looks like a date
- first = parseInt(possdate[1]);
- second = parseInt(possdate[2]);
- if (first > 12) {
- // definitely dd/mm
- return sorttable.sort_ddmm;
- } else if (second > 12) {
- return sorttable.sort_mmdd;
- } else {
- // looks like a date, but we can't tell which, so assume
- // that it's dd/mm (English imperialism!) and keep looking
- sortfn = sorttable.sort_ddmm;
- }
- }
- }
- }
- return sortfn;
- },
-
- getInnerText: function(node) {
- // gets the text we want to use for sorting for a cell.
- // strips leading and trailing whitespace.
- // this is *not* a generic getInnerText function; it's special to sorttable.
- // for example, you can override the cell text with a customkey attribute.
- // it also gets .value for <input> fields.
-
- hasInputs = (typeof node.getElementsByTagName == 'function') &&
- node.getElementsByTagName('input').length;
-
- if (node.getAttribute("sorttable_customkey") != null) {
- return node.getAttribute("sorttable_customkey");
- }
- else if (typeof node.textContent != 'undefined' && !hasInputs) {
- return node.textContent.replace(/^\s+|\s+$/g, '');
- }
- else if (typeof node.innerText != 'undefined' && !hasInputs) {
- return node.innerText.replace(/^\s+|\s+$/g, '');
- }
- else if (typeof node.text != 'undefined' && !hasInputs) {
- return node.text.replace(/^\s+|\s+$/g, '');
- }
- else {
- switch (node.nodeType) {
- case 3:
- if (node.nodeName.toLowerCase() == 'input') {
- return node.value.replace(/^\s+|\s+$/g, '');
- }
- case 4:
- return node.nodeValue.replace(/^\s+|\s+$/g, '');
- break;
- case 1:
- case 11:
- var innerText = '';
- for (var i = 0; i < node.childNodes.length; i++) {
- innerText += sorttable.getInnerText(node.childNodes[i]);
- }
- return innerText.replace(/^\s+|\s+$/g, '');
- break;
- default:
- return '';
- }
- }
- },
-
- reverse: function(tbody) {
- // reverse the rows in a tbody
- newrows = [];
- for (var i=0; i<tbody.rows.length; i++) {
- newrows[newrows.length] = tbody.rows[i];
- }
- for (var i=newrows.length-1; i>=0; i--) {
- tbody.appendChild(newrows[i]);
- }
- delete newrows;
- },
-
- /* sort functions
- each sort function takes two parameters, a and b
- you are comparing a[0] and b[0] */
- sort_numeric: function(a,b) {
- aa = parseFloat(a[0].replace(/[^0-9.-]/g,''));
- if (isNaN(aa)) aa = 0;
- bb = parseFloat(b[0].replace(/[^0-9.-]/g,''));
- if (isNaN(bb)) bb = 0;
- return aa-bb;
- },
- sort_alpha: function(a,b) {
- if (a[0]==b[0]) return 0;
- if (a[0]<b[0]) return -1;
- return 1;
- },
- sort_ddmm: function(a,b) {
- mtch = a[0].match(sorttable.DATE_RE);
- y = mtch[3]; m = mtch[2]; d = mtch[1];
- if (m.length == 1) m = '0'+m;
- if (d.length == 1) d = '0'+d;
- dt1 = y+m+d;
- mtch = b[0].match(sorttable.DATE_RE);
- y = mtch[3]; m = mtch[2]; d = mtch[1];
- if (m.length == 1) m = '0'+m;
- if (d.length == 1) d = '0'+d;
- dt2 = y+m+d;
- if (dt1==dt2) return 0;
- if (dt1<dt2) return -1;
- return 1;
- },
- sort_mmdd: function(a,b) {
- mtch = a[0].match(sorttable.DATE_RE);
- y = mtch[3]; d = mtch[2]; m = mtch[1];
- if (m.length == 1) m = '0'+m;
- if (d.length == 1) d = '0'+d;
- dt1 = y+m+d;
- mtch = b[0].match(sorttable.DATE_RE);
- y = mtch[3]; d = mtch[2]; m = mtch[1];
- if (m.length == 1) m = '0'+m;
- if (d.length == 1) d = '0'+d;
- dt2 = y+m+d;
- if (dt1==dt2) return 0;
- if (dt1<dt2) return -1;
- return 1;
- },
-
- shaker_sort: function(list, comp_func) {
- // A stable sort function to allow multi-level sorting of data
- // see: https://en.wikipedia.org/wiki/Cocktail_shaker_sort
- // thanks to Joseph Nahmias
- var b = 0;
- var t = list.length - 1;
- var swap = true;
-
- while(swap) {
- swap = false;
- for(var i = b; i < t; ++i) {
- if ( comp_func(list[i], list[i+1]) > 0 ) {
- var q = list[i]; list[i] = list[i+1]; list[i+1] = q;
- swap = true;
- }
- } // for
- t--;
-
- if (!swap) break;
-
- for(var i = t; i > b; --i) {
- if ( comp_func(list[i], list[i-1]) < 0 ) {
- var q = list[i]; list[i] = list[i-1]; list[i-1] = q;
- swap = true;
- }
- } // for
- b++;
-
- } // while(swap)
- }
-}
-
-/* ******************************************************************
- Supporting functions: bundled here to avoid depending on a library
- ****************************************************************** */
-
-// Dean Edwards/Matthias Miller/John Resig
-
-/* for Mozilla/Opera9 */
-if (document.addEventListener) {
- document.addEventListener("DOMContentLoaded", sorttable.init, false);
-}
-
-/* for Internet Explorer */
-/*@cc_on @*/
-/*@if (@_win32)
- document.write("<script id=__ie_onload defer src=javascript:void(0)><\/script>");
- var script = document.getElementById("__ie_onload");
- script.onreadystatechange = function() {
- if (this.readyState == "complete") {
- sorttable.init(); // call the onload handler
- }
- };
-/*@end @*/
-
-/* for Safari */
-if (/WebKit/i.test(navigator.userAgent)) { // sniff
- var _timer = setInterval(function() {
- if (/loaded|complete/.test(document.readyState)) {
- sorttable.init(); // call the onload handler
- }
- }, 10);
-}
-
-/* for other browsers */
-window.onload = sorttable.init;
-
-// written by Dean Edwards, 2005
-// with input from Tino Zijdel, Matthias Miller, Diego Perini
-
-// http://dean.edwards.name/weblog/2005/10/add-event/
-
-function dean_addEvent(element, type, handler) {
- if (element.addEventListener) {
- element.addEventListener(type, handler, false);
- } else {
- // assign each event handler a unique ID
- if (!handler.$$guid) handler.$$guid = dean_addEvent.guid++;
- // create a hash table of event types for the element
- if (!element.events) element.events = {};
- // create a hash table of event handlers for each element/event pair
- var handlers = element.events[type];
- if (!handlers) {
- handlers = element.events[type] = {};
- // store the existing event handler (if there is one)
- if (element["on" + type]) {
- handlers[0] = element["on" + type];
- }
- }
- // store the event handler in the hash table
- handlers[handler.$$guid] = handler;
- // assign a global event handler to do all the work
- element["on" + type] = handleEvent;
- }
-};
-// a counter used to create unique IDs
-dean_addEvent.guid = 1;
-
-function removeEvent(element, type, handler) {
- if (element.removeEventListener) {
- element.removeEventListener(type, handler, false);
- } else {
- // delete the event handler from the hash table
- if (element.events && element.events[type]) {
- delete element.events[type][handler.$$guid];
- }
- }
-};
-
-function handleEvent(event) {
- var returnValue = true;
- // grab the event object (IE uses a global event object)
- event = event || fixEvent(((this.ownerDocument || this.document || this).parentWindow || window).event);
- // get a reference to the hash table of event handlers
- var handlers = this.events[event.type];
- // execute each event handler
- for (var i in handlers) {
- this.$$handleEvent = handlers[i];
- if (this.$$handleEvent(event) === false) {
- returnValue = false;
- }
- }
- return returnValue;
-};
-
-function fixEvent(event) {
- // add W3C standard event methods
- event.preventDefault = fixEvent.preventDefault;
- event.stopPropagation = fixEvent.stopPropagation;
- return event;
-};
-fixEvent.preventDefault = function() {
- this.returnValue = false;
-};
-fixEvent.stopPropagation = function() {
- this.cancelBubble = true;
-}
-
-// Dean's forEach: http://dean.edwards.name/base/forEach.js
-/*
- forEach, version 1.0
- Copyright 2006, Dean Edwards
- License: https://www.opensource.org/licenses/mit-license.php
-*/
-
-// array-like enumeration
-if (!Array.forEach) { // mozilla already supports this
- Array.forEach = function(array, block, context) {
- for (var i = 0; i < array.length; i++) {
- block.call(context, array[i], i, array);
- }
- };
-}
-
-// generic enumeration
-Function.prototype.forEach = function(object, block, context) {
- for (var key in object) {
- if (typeof this.prototype[key] == "undefined") {
- block.call(context, object[key], key, object);
- }
- }
-};
-
-// character enumeration
-String.forEach = function(string, block, context) {
- Array.forEach(string.split(""), function(chr, index) {
- block.call(context, chr, index, string);
- });
-};
-
-// globally resolve forEach enumeration
-var forEach = function(object, block, context) {
- if (object) {
- var resolve = Object; // default
- if (object instanceof Function) {
- // functions have a "length" property
- resolve = Function;
- } else if (object.forEach instanceof Function) {
- // the object implements a custom forEach method so use that
- object.forEach(block, context);
- return;
- } else if (typeof object == "string") {
- // the object is a string
- resolve = String;
- } else if (typeof object.length == "number") {
- // the object is array-like
- resolve = Array;
- }
- resolve.forEach(object, block, context);
- }
-};
-
diff --git a/tools/allocation_tracking/track_allocations.py b/tools/allocation_tracking/track_allocations.py
deleted file mode 100644
index 2a80d8f87..000000000
--- a/tools/allocation_tracking/track_allocations.py
+++ /dev/null
@@ -1,140 +0,0 @@
-import numpy as np
-import gc
-import inspect
-from alloc_hook import NumpyAllocHook
-
-class AllocationTracker:
- def __init__(self, threshold=0):
- '''track numpy allocations of size threshold bytes or more.'''
-
- self.threshold = threshold
-
- # The total number of bytes currently allocated with size above
- # threshold
- self.total_bytes = 0
-
- # We buffer requests line by line and move them into the allocation
- # trace when a new line occurs
- self.current_line = None
- self.pending_allocations = []
-
- self.blocksizes = {}
-
- # list of (lineinfo, bytes allocated, bytes freed, # allocations, #
- # frees, maximum memory usage, long-lived bytes allocated)
- self.allocation_trace = []
-
- self.numpy_hook = NumpyAllocHook(self.hook)
-
- def __enter__(self):
- self.numpy_hook.__enter__()
-
- def __exit__(self, type, value, traceback):
- self.check_line_changed() # forces pending events to be handled
- self.numpy_hook.__exit__()
-
- def hook(self, inptr, outptr, size):
- # minimize the chances that the garbage collector kicks in during a
- # cython __dealloc__ call and causes a double delete of the current
- # object. To avoid this fully the hook would have to avoid all python
- # api calls, e.g. by being implemented in C like python 3.4's
- # tracemalloc module
- gc_on = gc.isenabled()
- gc.disable()
- if outptr == 0: # it's a free
- self.free_cb(inptr)
- elif inptr != 0: # realloc
- self.realloc_cb(inptr, outptr, size)
- else: # malloc
- self.alloc_cb(outptr, size)
- if gc_on:
- gc.enable()
-
- def alloc_cb(self, ptr, size):
- if size >= self.threshold:
- self.check_line_changed()
- self.blocksizes[ptr] = size
- self.pending_allocations.append(size)
-
- def free_cb(self, ptr):
- size = self.blocksizes.pop(ptr, 0)
- if size:
- self.check_line_changed()
- self.pending_allocations.append(-size)
-
- def realloc_cb(self, newptr, oldptr, size):
- if (size >= self.threshold) or (oldptr in self.blocksizes):
- self.check_line_changed()
- oldsize = self.blocksizes.pop(oldptr, 0)
- self.pending_allocations.append(size - oldsize)
- self.blocksizes[newptr] = size
-
- def get_code_line(self):
- # first frame is this line, then check_line_changed(), then 2 callbacks,
- # then actual code.
- try:
- return inspect.stack()[4][1:]
- except Exception:
- return inspect.stack()[0][1:]
-
- def check_line_changed(self):
- line = self.get_code_line()
- if line != self.current_line and (self.current_line is not None):
- # move pending events into the allocation_trace
- max_size = self.total_bytes
- bytes_allocated = 0
- bytes_freed = 0
- num_allocations = 0
- num_frees = 0
- before_size = self.total_bytes
- for allocation in self.pending_allocations:
- self.total_bytes += allocation
- if allocation > 0:
- bytes_allocated += allocation
- num_allocations += 1
- else:
- bytes_freed += -allocation
- num_frees += 1
- max_size = max(max_size, self.total_bytes)
- long_lived = max(self.total_bytes - before_size, 0)
- self.allocation_trace.append((self.current_line, bytes_allocated,
- bytes_freed, num_allocations,
- num_frees, max_size, long_lived))
- # clear pending allocations
- self.pending_allocations = []
- # move to the new line
- self.current_line = line
-
- def write_html(self, filename):
- with open(filename, "w") as f:
- f.write('<HTML><HEAD><script src="sorttable.js"></script></HEAD><BODY>\n')
- f.write('<TABLE class="sortable" width=100%>\n')
- f.write("<TR>\n")
- cols = "event#,lineinfo,bytes allocated,bytes freed,#allocations,#frees,max memory usage,long lived bytes".split(',')
- for header in cols:
- f.write(" <TH>{0}</TH>".format(header))
- f.write("\n</TR>\n")
- for idx, event in enumerate(self.allocation_trace):
- f.write("<TR>\n")
- event = [idx] + list(event)
- for col, val in zip(cols, event):
- if col == 'lineinfo':
- # special handling
- try:
- filename, line, module, code, index = val
- val = "{0}({1}): {2}".format(filename, line, code[index])
- except Exception:
- # sometimes this info is not available (from eval()?)
- val = str(val)
- f.write(" <TD>{0}</TD>".format(val))
- f.write("\n</TR>\n")
- f.write("</TABLE></BODY></HTML>\n")
-
-
-if __name__ == '__main__':
- tracker = AllocationTracker(1000)
- with tracker:
- for i in range(100):
- np.zeros(i * 100)
- np.zeros(i * 200)
- tracker.write_html("allocations.html")
diff --git a/tools/functions_missing_types.py b/tools/functions_missing_types.py
index 0461aabd3..99c6887a9 100755
--- a/tools/functions_missing_types.py
+++ b/tools/functions_missing_types.py
@@ -32,7 +32,6 @@ EXCLUDE_LIST = {
"math",
# Accidentally public, deprecated, or shouldn't be used
"Tester",
- "alen",
"add_docstring",
"add_newdoc",
"add_newdoc_ufunc",
diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt
new file mode 100644
index 000000000..1014d77cd
--- /dev/null
+++ b/tools/wheels/LICENSE_win32.txt
@@ -0,0 +1,938 @@
+
+----
+
+This binary distribution of NumPy also bundles the following software:
+
+
+Name: OpenBLAS
+Files: extra-dll\libopenb*.dll
+Description: bundled as a dynamically linked library
+Availability: https://github.com/xianyi/OpenBLAS/
+License: 3-clause BSD
+ Copyright (c) 2011-2014, The OpenBLAS Project
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ 3. Neither the name of the OpenBLAS project nor the names of
+ its contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Name: LAPACK
+Files: extra-dll\libopenb*.dll
+Description: bundled in OpenBLAS
+Availability: https://github.com/xianyi/OpenBLAS/
+License 3-clause BSD
+ Copyright (c) 1992-2013 The University of Tennessee and The University
+ of Tennessee Research Foundation. All rights
+ reserved.
+ Copyright (c) 2000-2013 The University of California Berkeley. All
+ rights reserved.
+ Copyright (c) 2006-2013 The University of Colorado Denver. All rights
+ reserved.
+
+ $COPYRIGHT$
+
+ Additional copyrights may follow
+
+ $HEADER$
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer listed
+ in this license in the documentation and/or other materials
+ provided with the distribution.
+
+ - Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ The copyright holders provide no reassurances that the source code
+ provided does not infringe any patent, copyright, or any other
+ intellectual property rights of third parties. The copyright holders
+ disclaim any liability to any recipient for claims brought against
+ recipient by any third party for infringement of that parties
+ intellectual property rights.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Name: GCC runtime library
+Files: extra-dll\*.dll
+Description: statically linked, in DLL files compiled with gfortran only
+Availability: https://gcc.gnu.org/viewcvs/gcc/
+License: GPLv3 + runtime exception
+ Copyright (C) 2002-2017 Free Software Foundation, Inc.
+
+ Libgfortran is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ Libgfortran is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>.
+
+
+Name: Microsoft Visual C++ Runtime Files
+Files: extra-dll\msvcp140.dll
+License: MSVC
+ https://www.visualstudio.com/license-terms/distributable-code-microsoft-visual-studio-2015-rc-microsoft-visual-studio-2015-sdk-rc-includes-utilities-buildserver-files/#visual-c-runtime
+
+ Subject to the License Terms for the software, you may copy and
+ distribute with your program any of the files within the followng
+ folder and its subfolders except as noted below. You may not modify
+ these files.
+
+ C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist
+
+ You may not distribute the contents of the following folders:
+
+ C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\debug_nonredist
+ C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\onecore\debug_nonredist
+
+ Subject to the License Terms for the software, you may copy and
+ distribute the following files with your program in your program’s
+ application local folder or by deploying them into the Global
+ Assembly Cache (GAC):
+
+ VC\atlmfc\lib\mfcmifc80.dll
+ VC\atlmfc\lib\amd64\mfcmifc80.dll
+
+
+Name: Microsoft Visual C++ Runtime Files
+Files: extra-dll\msvc*90.dll, extra-dll\Microsoft.VC90.CRT.manifest
+License: MSVC
+ For your convenience, we have provided the following folders for
+ use when redistributing VC++ runtime files. Subject to the license
+ terms for the software, you may redistribute the folder
+ (unmodified) in the application local folder as a sub-folder with
+ no change to the folder name. You may also redistribute all the
+ files (*.dll and *.manifest) within a folder, listed below the
+ folder for your convenience, as an entire set.
+
+ \VC\redist\x86\Microsoft.VC90.ATL\
+ atl90.dll
+ Microsoft.VC90.ATL.manifest
+ \VC\redist\ia64\Microsoft.VC90.ATL\
+ atl90.dll
+ Microsoft.VC90.ATL.manifest
+ \VC\redist\amd64\Microsoft.VC90.ATL\
+ atl90.dll
+ Microsoft.VC90.ATL.manifest
+ \VC\redist\x86\Microsoft.VC90.CRT\
+ msvcm90.dll
+ msvcp90.dll
+ msvcr90.dll
+ Microsoft.VC90.CRT.manifest
+ \VC\redist\ia64\Microsoft.VC90.CRT\
+ msvcm90.dll
+ msvcp90.dll
+ msvcr90.dll
+ Microsoft.VC90.CRT.manifest
+
+----
+
+Full text of license texts referred to above follows (that they are
+listed below does not necessarily imply the conditions apply to the
+present binary release):
+
+----
+
+GCC RUNTIME LIBRARY EXCEPTION
+
+Version 3.1, 31 March 2009
+
+Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/>
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+This GCC Runtime Library Exception ("Exception") is an additional
+permission under section 7 of the GNU General Public License, version
+3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
+bears a notice placed by the copyright holder of the file stating that
+the file is governed by GPLv3 along with this Exception.
+
+When you use GCC to compile a program, GCC may combine portions of
+certain GCC header files and runtime libraries with the compiled
+program. The purpose of this Exception is to allow compilation of
+non-GPL (including proprietary) programs to use, in this way, the
+header files and runtime libraries covered by this Exception.
+
+0. Definitions.
+
+A file is an "Independent Module" if it either requires the Runtime
+Library for execution after a Compilation Process, or makes use of an
+interface provided by the Runtime Library, but is not otherwise based
+on the Runtime Library.
+
+"GCC" means a version of the GNU Compiler Collection, with or without
+modifications, governed by version 3 (or a specified later version) of
+the GNU General Public License (GPL) with the option of using any
+subsequent versions published by the FSF.
+
+"GPL-compatible Software" is software whose conditions of propagation,
+modification and use would permit combination with GCC in accord with
+the license of GCC.
+
+"Target Code" refers to output from any compiler for a real or virtual
+target processor architecture, in executable form or suitable for
+input to an assembler, loader, linker and/or execution
+phase. Notwithstanding that, Target Code does not include data in any
+format that is used as a compiler intermediate representation, or used
+for producing a compiler intermediate representation.
+
+The "Compilation Process" transforms code entirely represented in
+non-intermediate languages designed for human-written code, and/or in
+Java Virtual Machine byte code, into Target Code. Thus, for example,
+use of source code generators and preprocessors need not be considered
+part of the Compilation Process, since the Compilation Process can be
+understood as starting with the output of the generators or
+preprocessors.
+
+A Compilation Process is "Eligible" if it is done using GCC, alone or
+with other GPL-compatible software, or if it is done without using any
+work based on GCC. For example, using non-GPL-compatible Software to
+optimize any GCC intermediate representations would not qualify as an
+Eligible Compilation Process.
+
+1. Grant of Additional Permission.
+
+You have permission to propagate a work of Target Code formed by
+combining the Runtime Library with Independent Modules, even if such
+propagation would otherwise violate the terms of GPLv3, provided that
+all Target Code was generated by Eligible Compilation Processes. You
+may then convey such a combination under terms of your choice,
+consistent with the licensing of the Independent Modules.
+
+2. No Weakening of GCC Copyleft.
+
+The availability of this Exception does not imply any general
+presumption that third-party software is unaffected by the copyleft
+requirements of the license of GCC.
+
+----
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>. \ No newline at end of file
diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh
index 36410ba1f..d77a52731 100644
--- a/tools/wheels/cibw_before_build.sh
+++ b/tools/wheels/cibw_before_build.sh
@@ -1,24 +1,39 @@
set -xe
PROJECT_DIR="$1"
-UNAME="$(uname)"
+PLATFORM=$(PYTHONPATH=tools python -c "import openblas_support; print(openblas_support.get_plat())")
# Update license
-if [[ $UNAME == "Linux" ]] ; then
+if [[ $RUNNER_OS == "Linux" ]] ; then
cat $PROJECT_DIR/tools/wheels/LICENSE_linux.txt >> $PROJECT_DIR/LICENSE.txt
-elif [[ $UNAME == "Darwin" ]]; then
+elif [[ $RUNNER_OS == "macOS" ]]; then
cat $PROJECT_DIR/tools/wheels/LICENSE_osx.txt >> $PROJECT_DIR/LICENSE.txt
+elif [[ $RUNNER_OS == "Windows" ]]; then
+ cat $PROJECT_DIR/tools/wheels/LICENSE_win32.txt >> $PROJECT_DIR/LICENSE.txt
fi
# Install Openblas
-if [[ $UNAME == "Linux" || $UNAME == "Darwin" ]] ; then
+if [[ $RUNNER_OS == "Linux" || $RUNNER_OS == "macOS" ]] ; then
basedir=$(python tools/openblas_support.py)
cp -r $basedir/lib/* /usr/local/lib
cp $basedir/include/* /usr/local/include
+ # TODO: don't copy directories if not arm64,
+ # but we need a way to know if cibuildwheel is cross-compiling arm64 to do this
+ if [[ $RUNNER_OS == "macOS" && $PLATFORM == "macosx-arm64" ]]; then
+ sudo mkdir -p /opt/arm64-builds/lib /opt/arm64-builds/include
+ sudo chown -R $USER /opt/arm64-builds
+ cp -r $basedir/lib/* /opt/arm64-builds/lib
+ cp $basedir/include/* /opt/arm64-builds/include
+ fi
+elif [[ $RUNNER_OS == "Windows" ]]; then
+ PYTHONPATH=tools python -c "import openblas_support; openblas_support.make_init('numpy')"
+ target=$(python tools/openblas_support.py)
+ mkdir -p openblas
+ cp $target openblas
fi
# Install GFortran
-if [[ $UNAME == "Darwin" ]]; then
+if [[ $RUNNER_OS == "macOS" ]]; then
# same version of gfortran as the openblas-libs and numpy-wheel builds
curl -L https://github.com/MacPython/gfortran-install/raw/master/archives/gfortran-4.9.0-Mavericks.dmg -o gfortran.dmg
GFORTRAN_SHA256=$(shasum -a 256 gfortran.dmg)
@@ -27,9 +42,18 @@ if [[ $UNAME == "Darwin" ]]; then
echo sha256 mismatch
exit 1
fi
+
hdiutil attach -mountpoint /Volumes/gfortran gfortran.dmg
sudo installer -pkg /Volumes/gfortran/gfortran.pkg -target /
otool -L /usr/local/gfortran/lib/libgfortran.3.dylib
+
+ # arm64 stuff from gfortran_utils
+ # TODO: figure out a way not to do this on x86_64, see above comment for openblas
+ if [[ $PLATFORM == "macosx-arm64" ]]; then
+ source $PROJECT_DIR/tools/wheels/gfortran_utils.sh
+ install_arm64_cross_gfortran
+ fi
+
# Manually symlink gfortran-4.9 to plain gfortran for f2py.
# No longer needed after Feb 13 2020 as gfortran is already present
# and the attempted link errors. Keep this for future reference.
diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh
index f09395e84..ef67172d8 100644
--- a/tools/wheels/cibw_test_command.sh
+++ b/tools/wheels/cibw_test_command.sh
@@ -4,10 +4,14 @@
set -xe
PROJECT_DIR="$1"
-UNAME="$(uname)"
python -c "import numpy; numpy.show_config()"
-python -c "import sys; import numpy; sys.exit(not numpy.test('full', extra_argv=['-vv']))"
+if [[ $RUNNER_OS == "Windows" ]]; then
+ # GH 20391
+ PY_DIR=$(python -c "import sys; print(sys.prefix)")
+ mkdir $PY_DIR/libs
+fi
+python -c "import sys; import numpy; sys.exit(not numpy.test('full', extra_argv=['-v']))"
python $PROJECT_DIR/tools/wheels/check_license.py
if [[ $UNAME == "Linux" || $UNAME == "Darwin" ]] ; then
diff --git a/tools/wheels/gfortran_utils.sh b/tools/wheels/gfortran_utils.sh
new file mode 100644
index 000000000..e0c7054a5
--- /dev/null
+++ b/tools/wheels/gfortran_utils.sh
@@ -0,0 +1,168 @@
+# This file is vendored from github.com/MacPython/gfortran-install It is
+# licensed under BSD-2 which is copied as a comment below
+
+# Copyright 2016-2021 Matthew Brett, Isuru Fernando, Matti Picus
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+
+# Redistributions in binary form must reproduce the above copyright notice, this
+# list of conditions and the following disclaimer in the documentation and/or
+# other materials provided with the distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Bash utilities for use with gfortran
+
+GF_LIB_URL="https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com"
+ARCHIVE_SDIR="${ARCHIVE_SDIR:-archives}"
+
+GF_UTIL_DIR=$(dirname "${BASH_SOURCE[0]}")
+
+function get_distutils_platform {
+ # Report platform as in form of distutils get_platform.
+ # This is like the platform tag that pip will use.
+ # Modify fat architecture tags on macOS to reflect compiled architecture
+
+ # Deprecate this function once get_distutils_platform_ex is used in all
+ # downstream projects
+ local plat=$1
+ case $plat in
+ i686|x86_64|arm64|universal2|intel|aarch64|s390x|ppc64le) ;;
+ *) echo Did not recognize plat $plat; return 1 ;;
+ esac
+ local uname=${2:-$(uname)}
+ if [ "$uname" != "Darwin" ]; then
+ if [ "$plat" == "intel" ]; then
+ echo plat=intel not allowed for Manylinux
+ return 1
+ fi
+ echo "manylinux1_$plat"
+ return
+ fi
+ # macOS 32-bit arch is i386
+ [ "$plat" == "i686" ] && plat="i386"
+ local target=$(echo $MACOSX_DEPLOYMENT_TARGET | tr .- _)
+ echo "macosx_${target}_${plat}"
+}
+
+function get_distutils_platform_ex {
+ # Report platform as in form of distutils get_platform.
+ # This is like the platform tag that pip will use.
+ # Modify fat architecture tags on macOS to reflect compiled architecture
+ # For non-darwin, report manylinux version
+ local plat=$1
+ local mb_ml_ver=${MB_ML_VER:-1}
+ case $plat in
+ i686|x86_64|arm64|universal2|intel|aarch64|s390x|ppc64le) ;;
+ *) echo Did not recognize plat $plat; return 1 ;;
+ esac
+ local uname=${2:-$(uname)}
+ if [ "$uname" != "Darwin" ]; then
+ if [ "$plat" == "intel" ]; then
+ echo plat=intel not allowed for Manylinux
+ return 1
+ fi
+ echo "manylinux${mb_ml_ver}_${plat}"
+ return
+ fi
+ # macOS 32-bit arch is i386
+ [ "$plat" == "i686" ] && plat="i386"
+ local target=$(echo $MACOSX_DEPLOYMENT_TARGET | tr .- _)
+ echo "macosx_${target}_${plat}"
+}
+
+function get_macosx_target {
+ # Report MACOSX_DEPLOYMENT_TARGET as given by distutils get_platform.
+ python -c "import sysconfig as s; print(s.get_config_vars()['MACOSX_DEPLOYMENT_TARGET'])"
+}
+
+function check_gfortran {
+ # Check that gfortran exists on the path
+ if [ -z "$(which gfortran)" ]; then
+ echo Missing gfortran
+ exit 1
+ fi
+}
+
+function get_gf_lib_for_suf {
+ local suffix=$1
+ local prefix=$2
+ local plat=${3:-$PLAT}
+ local uname=${4:-$(uname)}
+ if [ -z "$prefix" ]; then echo Prefix not defined; exit 1; fi
+ local plat_tag=$(get_distutils_platform_ex $plat $uname)
+ if [ -n "$suffix" ]; then suffix="-$suffix"; fi
+ local fname="$prefix-${plat_tag}${suffix}.tar.gz"
+ local out_fname="${ARCHIVE_SDIR}/$fname"
+ if [ ! -e "$out_fname" ]; then
+ curl -L "${GF_LIB_URL}/$fname" > $out_fname || (echo "Fetch of $out_fname failed"; exit 1)
+ fi
+ [ -s $out_fname ] || (echo "$out_fname is empty"; exit 24)
+ echo "$out_fname"
+}
+
+if [ "$(uname)" == "Darwin" ]; then
+ mac_target=${MACOSX_DEPLOYMENT_TARGET:-$(get_macosx_target)}
+ export MACOSX_DEPLOYMENT_TARGET=$mac_target
+ GFORTRAN_DMG="${GF_UTIL_DIR}/archives/gfortran-4.9.0-Mavericks.dmg"
+ export GFORTRAN_SHA="$(shasum $GFORTRAN_DMG)"
+
+ function install_arm64_cross_gfortran {
+ curl -L -O https://github.com/isuruf/gcc/releases/download/gcc-10-arm-20210228/gfortran-darwin-arm64.tar.gz
+ export GFORTRAN_SHA=f26990f6f08e19b2ec150b9da9d59bd0558261dd
+ if [[ "$(shasum gfortran-darwin-arm64.tar.gz)" != "${GFORTRAN_SHA} gfortran-darwin-arm64.tar.gz" ]]; then
+ echo "shasum mismatch for gfortran-darwin-arm64"
+ exit 1
+ fi
+ sudo mkdir -p /opt/
+ sudo cp "gfortran-darwin-arm64.tar.gz" /opt/gfortran-darwin-arm64.tar.gz
+ pushd /opt
+ sudo tar -xvf gfortran-darwin-arm64.tar.gz
+ sudo rm gfortran-darwin-arm64.tar.gz
+ popd
+ export FC_ARM64="$(find /opt/gfortran-darwin-arm64/bin -name "*-gfortran")"
+ local libgfortran="$(find /opt/gfortran-darwin-arm64/lib -name libgfortran.dylib)"
+ local libdir=$(dirname $libgfortran)
+
+ export FC_ARM64_LDFLAGS="-L$libdir -Wl,-rpath,$libdir"
+ if [[ "${PLAT:-}" == "arm64" ]]; then
+ export FC=$FC_ARM64
+ fi
+ }
+ function install_gfortran {
+ hdiutil attach -mountpoint /Volumes/gfortran $GFORTRAN_DMG
+ sudo installer -pkg /Volumes/gfortran/gfortran.pkg -target /
+ check_gfortran
+ if [[ "${PLAT:-}" == "universal2" || "${PLAT:-}" == "arm64" ]]; then
+ install_arm64_cross_gfortran
+ fi
+ }
+
+ function get_gf_lib {
+ # Get lib with gfortran suffix
+ get_gf_lib_for_suf "gf_${GFORTRAN_SHA:0:7}" $@
+ }
+else
+ function install_gfortran {
+ # No-op - already installed on manylinux image
+ check_gfortran
+ }
+
+ function get_gf_lib {
+ # Get library with no suffix
+ get_gf_lib_for_suf "" $@
+ }
+fi