diff options
220 files changed, 4511 insertions, 2235 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml index 772c3fbfd..38695bdff 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -21,7 +21,7 @@ jobs: python3 -m venv venv ln -s $(which python3) venv/bin/python3.6 . venv/bin/activate - pip install cython sphinx==2.2.0 matplotlib ipython + pip install cython sphinx==2.3.1 matplotlib ipython sudo apt-get update sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex diff --git a/.travis.yml b/.travis.yml index 73106b4c1..84fdbb8f7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ language: python group: travis_latest os: linux -dist: xenial +dist: bionic # Travis whitelists the installable packages, additions can be requested # https://github.com/travis-ci/apt-package-whitelist @@ -11,6 +11,8 @@ addons: apt: packages: &common_packages - gfortran + - libgfortran5 + - libgfortran3 - libatlas-base-dev # Speedup builds, particularly when USE_CHROOT=1 - eatmydata @@ -27,7 +29,7 @@ stages: env: global: - - OpenBLAS_version=0.3.7 + - OpenBLAS_version=0.3.8 - WHEELHOUSE_UPLOADER_USERNAME=travis.numpy # The following is generated with the command: # travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY @@ -47,7 +49,6 @@ jobs: - python: 3.7 - python: 3.6 - dist: bionic env: USE_DEBUG=1 addons: apt: @@ -86,6 +87,8 @@ jobs: packages: - gfortran - eatmydata + - libgfortran5 + - libgfortran3 - python: 3.7 env: USE_WHEEL=1 NPY_RELAXED_STRIDES_DEBUG=1 @@ -112,8 +115,18 @@ jobs: arch: s390x env: # use s390x OpenBLAS build, not system ATLAS + - DOWNLOAD_OPENBLAS=1 - ATLAS=None + - python: 3.7 + os: linux + arch: aarch64 + env: + # use ppc64le OpenBLAS build, not system ATLAS + - DOWNLOAD_OPENBLAS=1 + - ATLAS=None + + before_install: - ./tools/travis-before-install.sh diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 1851df71a..7bd7a1e1f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -8,7 +8,7 @@ trigger: variables: # OpenBLAS_version should be updated # to match numpy-wheels repo - OpenBLAS_version: 0.3.7 + OpenBLAS_version: 0.3.8 stages: - stage: InitialTests @@ -187,7 +187,7 @@ stages: - template: azure-steps-windows.yml - job: Linux_PyPy3 pool: - vmIMage: 'ubuntu-16.04' + vmIMage: 'ubuntu-18.04' steps: - script: source tools/pypy-test.sh displayName: 'Run PyPy3 Build / Tests' diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json index b3c7f9f20..1046f10f2 100644 --- a/benchmarks/asv.conf.json +++ b/benchmarks/asv.conf.json @@ -42,7 +42,6 @@ // list indicates to just test against the default (latest) // version. "matrix": { - "six": [], "Cython": [], }, diff --git a/benchmarks/benchmarks/bench_app.py b/benchmarks/benchmarks/bench_app.py index 2a649f39b..bee95c201 100644 --- a/benchmarks/benchmarks/bench_app.py +++ b/benchmarks/benchmarks/bench_app.py @@ -2,8 +2,6 @@ from .common import Benchmark import numpy as np -from six.moves import xrange - class LaplaceInplace(Benchmark): params = ['inplace', 'normal'] @@ -59,7 +57,7 @@ class MaxesOfDots(Benchmark): ntime = 200 self.arrays = [np.random.normal(size=(ntime, nfeat)) - for i in xrange(nsubj)] + for i in range(nsubj)] def maxes_of_dots(self, arrays): """ diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py index bb7596d0a..9ee0d1fb5 100644 --- a/benchmarks/benchmarks/bench_indexing.py +++ b/benchmarks/benchmarks/bench_indexing.py @@ -2,8 +2,6 @@ from .common import Benchmark, get_squares_, get_indexes_, get_indexes_rand_ from os.path import join as pjoin import shutil -import sys -import six from numpy import memmap, float32, array import numpy as np from tempfile import mkdtemp @@ -23,13 +21,10 @@ class Indexing(Benchmark): 'indexes_': get_indexes_(), 'indexes_rand_': get_indexes_rand_()} - if sys.version_info[0] >= 3: - code = "def run():\n for a in squares_.values(): a[%s]%s" - else: - code = "def run():\n for a in squares_.itervalues(): a[%s]%s" + code = "def run():\n for a in squares_.values(): a[%s]%s" code = code % (sel, op) - six.exec_(code, ns) + exec(code, ns) self.func = ns['run'] def time_op(self, indexes, sel, op): diff --git a/doc/changelog/1.12.0-changelog.rst b/doc/changelog/1.12.0-changelog.rst index 75d9964e3..2e91f510f 100644 --- a/doc/changelog/1.12.0-changelog.rst +++ b/doc/changelog/1.12.0-changelog.rst @@ -225,7 +225,7 @@ A total of 418 pull requests were merged for this release. * `#7240 <https://github.com/numpy/numpy/pull/7240>`__: Change 'pubic' to 'public'. * `#7241 <https://github.com/numpy/numpy/pull/7241>`__: MAINT: update doc/sphinxext to numpydoc 0.6.0, and fix up some... * `#7243 <https://github.com/numpy/numpy/pull/7243>`__: ENH: Adding support to the range keyword for estimation of the... -* `#7246 <https://github.com/numpy/numpy/pull/7246>`__: DOC: metion writeable keyword in as_strided in release notes +* `#7246 <https://github.com/numpy/numpy/pull/7246>`__: DOC: mention writeable keyword in as_strided in release notes * `#7247 <https://github.com/numpy/numpy/pull/7247>`__: TST: Fail quickly on AppVeyor for superseded PR builds * `#7248 <https://github.com/numpy/numpy/pull/7248>`__: DOC: remove link to documentation wiki editor from HOWTO_DOCUMENT. * `#7250 <https://github.com/numpy/numpy/pull/7250>`__: DOC,REL: Update 1.11.0 notes. @@ -333,7 +333,7 @@ A total of 418 pull requests were merged for this release. * `#7534 <https://github.com/numpy/numpy/pull/7534>`__: MAINT: Update setup.py to reflect supported python versions. * `#7536 <https://github.com/numpy/numpy/pull/7536>`__: MAINT: Always use PyCapsule instead of PyCObject in mtrand.pyx * `#7539 <https://github.com/numpy/numpy/pull/7539>`__: MAINT: Cleanup of random stuff -* `#7549 <https://github.com/numpy/numpy/pull/7549>`__: BUG: allow graceful recovery for no Liux compiler +* `#7549 <https://github.com/numpy/numpy/pull/7549>`__: BUG: allow graceful recovery for no Linux compiler * `#7562 <https://github.com/numpy/numpy/pull/7562>`__: BUG: Fix test_from_object_array_unicode (test_defchararray.TestBasic)… * `#7565 <https://github.com/numpy/numpy/pull/7565>`__: BUG: Fix test_ctypeslib and test_indexing for debug interpreter * `#7566 <https://github.com/numpy/numpy/pull/7566>`__: MAINT: use manylinux1 wheel for cython @@ -459,7 +459,7 @@ A total of 418 pull requests were merged for this release. * `#8016 <https://github.com/numpy/numpy/pull/8016>`__: BUG: Fix numpy.ma.median. * `#8018 <https://github.com/numpy/numpy/pull/8018>`__: BUG: Fixes return for np.ma.count if keepdims is True and axis... * `#8021 <https://github.com/numpy/numpy/pull/8021>`__: DOC: change all non-code instances of Numpy to NumPy -* `#8027 <https://github.com/numpy/numpy/pull/8027>`__: ENH: Add platform indepedent lib dir to PYTHONPATH +* `#8027 <https://github.com/numpy/numpy/pull/8027>`__: ENH: Add platform independent lib dir to PYTHONPATH * `#8028 <https://github.com/numpy/numpy/pull/8028>`__: DOC: Update 1.11.2 release notes. * `#8030 <https://github.com/numpy/numpy/pull/8030>`__: BUG: fix np.ma.median with only one non-masked value and an axis... * `#8038 <https://github.com/numpy/numpy/pull/8038>`__: MAINT: Update error message in rollaxis. diff --git a/doc/changelog/1.15.0-changelog.rst b/doc/changelog/1.15.0-changelog.rst index 4e3d3680b..dd5544ac9 100644 --- a/doc/changelog/1.15.0-changelog.rst +++ b/doc/changelog/1.15.0-changelog.rst @@ -316,7 +316,7 @@ A total of 438 pull requests were merged for this release. * `#10618 <https://github.com/numpy/numpy/pull/10618>`__: MAINT: Stop using non-tuple indices internally * `#10619 <https://github.com/numpy/numpy/pull/10619>`__: BUG: np.ma.flatnotmasked_contiguous behaves differently on mask=nomask... * `#10621 <https://github.com/numpy/numpy/pull/10621>`__: BUG: deallocate recursive closure in arrayprint.py -* `#10623 <https://github.com/numpy/numpy/pull/10623>`__: BUG: Correctly identify comma seperated dtype strings +* `#10623 <https://github.com/numpy/numpy/pull/10623>`__: BUG: Correctly identify comma separated dtype strings * `#10625 <https://github.com/numpy/numpy/pull/10625>`__: BUG: Improve the accuracy of the FFT implementation * `#10635 <https://github.com/numpy/numpy/pull/10635>`__: ENH: Implement initial kwarg for ufunc.add.reduce * `#10641 <https://github.com/numpy/numpy/pull/10641>`__: MAINT: Post 1.14.1 release updates for master branch @@ -343,7 +343,7 @@ A total of 438 pull requests were merged for this release. * `#10699 <https://github.com/numpy/numpy/pull/10699>`__: DOC: Grammar of np.gradient docstring * `#10702 <https://github.com/numpy/numpy/pull/10702>`__: TST, DOC: Upload devdocs and neps after circleci build * `#10703 <https://github.com/numpy/numpy/pull/10703>`__: MAINT: NEP process updates -* `#10708 <https://github.com/numpy/numpy/pull/10708>`__: BUG: fix problem with modifing pyf lines containing ';' in f2py +* `#10708 <https://github.com/numpy/numpy/pull/10708>`__: BUG: fix problem with modifying pyf lines containing ';' in f2py * `#10710 <https://github.com/numpy/numpy/pull/10710>`__: BUG: fix error message in numpy.select * `#10711 <https://github.com/numpy/numpy/pull/10711>`__: MAINT: Hard tab and whitespace cleanup. * `#10715 <https://github.com/numpy/numpy/pull/10715>`__: MAINT: Fixed C++ guard in f2py test. diff --git a/doc/changelog/1.16.5-changelog.rst b/doc/changelog/1.16.5-changelog.rst index 19374058d..c609d214c 100644 --- a/doc/changelog/1.16.5-changelog.rst +++ b/doc/changelog/1.16.5-changelog.rst @@ -37,14 +37,14 @@ A total of 23 pull requests were merged for this release. * `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs * `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns] * `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation -* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject +* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occurred in PyMemoryView_FromObject * `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors. * `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked. * `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers * `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher. * `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7 * `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API... -* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor +* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict constructor * `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level. * `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__` * `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7 diff --git a/doc/changelog/1.17.0-changelog.rst b/doc/changelog/1.17.0-changelog.rst index debfb6f5b..4177c848f 100644 --- a/doc/changelog/1.17.0-changelog.rst +++ b/doc/changelog/1.17.0-changelog.rst @@ -276,7 +276,7 @@ A total of 531 pull requests were merged for this release. * `#12696 <https://github.com/numpy/numpy/pull/12696>`__: BUG: Fix leak of void scalar buffer info * `#12698 <https://github.com/numpy/numpy/pull/12698>`__: DOC: improve comments in copycast_isaligned * `#12700 <https://github.com/numpy/numpy/pull/12700>`__: ENH: chain additional exception on ufunc method lookup error -* `#12702 <https://github.com/numpy/numpy/pull/12702>`__: TST: Check FFT results for C/Fortran ordered and non contigous... +* `#12702 <https://github.com/numpy/numpy/pull/12702>`__: TST: Check FFT results for C/Fortran ordered and non contiguous... * `#12704 <https://github.com/numpy/numpy/pull/12704>`__: TST: pin Azure brew version for stability * `#12709 <https://github.com/numpy/numpy/pull/12709>`__: TST: add ppc64le to Travis CI matrix * `#12713 <https://github.com/numpy/numpy/pull/12713>`__: BUG: loosen kwargs requirements in ediff1d @@ -536,7 +536,7 @@ A total of 531 pull requests were merged for this release. * `#13503 <https://github.com/numpy/numpy/pull/13503>`__: ENH: Support object arrays in matmul * `#13504 <https://github.com/numpy/numpy/pull/13504>`__: DOC: Update links in PULL_REQUEST_TEMPLATE.md * `#13506 <https://github.com/numpy/numpy/pull/13506>`__: ENH: Add sparse option to np.core.numeric.indices -* `#13507 <https://github.com/numpy/numpy/pull/13507>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject +* `#13507 <https://github.com/numpy/numpy/pull/13507>`__: BUG: np.array cleared errors occurred in PyMemoryView_FromObject * `#13508 <https://github.com/numpy/numpy/pull/13508>`__: BUG: Removes ValueError for empty kwargs in arraymultiter_new * `#13518 <https://github.com/numpy/numpy/pull/13518>`__: MAINT: implement assert_array_compare without converting array... * `#13520 <https://github.com/numpy/numpy/pull/13520>`__: BUG: exp, log AVX loops do not use steps @@ -643,7 +643,7 @@ A total of 531 pull requests were merged for this release. * `#13815 <https://github.com/numpy/numpy/pull/13815>`__: MAINT: Correct intrinsic use on Windows * `#13818 <https://github.com/numpy/numpy/pull/13818>`__: TST: Add tests for ComplexWarning in astype * `#13819 <https://github.com/numpy/numpy/pull/13819>`__: DOC: Fix documented default value of ``__array_priority__`` for... -* `#13820 <https://github.com/numpy/numpy/pull/13820>`__: MAINT, DOC: Fix misspelled words in documetation. +* `#13820 <https://github.com/numpy/numpy/pull/13820>`__: MAINT, DOC: Fix misspelled words in documentation. * `#13821 <https://github.com/numpy/numpy/pull/13821>`__: MAINT: core: Fix a compiler warning. * `#13830 <https://github.com/numpy/numpy/pull/13830>`__: MAINT: Update tox for supported Python versions * `#13832 <https://github.com/numpy/numpy/pull/13832>`__: MAINT: remove pcg32 BitGenerator @@ -656,7 +656,7 @@ A total of 531 pull requests were merged for this release. * `#13849 <https://github.com/numpy/numpy/pull/13849>`__: DOC: np.random documentation cleanup and expansion. * `#13850 <https://github.com/numpy/numpy/pull/13850>`__: DOC: Update performance numbers * `#13851 <https://github.com/numpy/numpy/pull/13851>`__: MAINT: Update shippable.yml to remove Python 2 dependency -* `#13855 <https://github.com/numpy/numpy/pull/13855>`__: BUG: Fix memory leak in dtype from dict contructor +* `#13855 <https://github.com/numpy/numpy/pull/13855>`__: BUG: Fix memory leak in dtype from dict constructor * `#13856 <https://github.com/numpy/numpy/pull/13856>`__: MAINT: move location of bitgen.h * `#13858 <https://github.com/numpy/numpy/pull/13858>`__: BUG: do not force emulation of 128-bit arithmetic. * `#13859 <https://github.com/numpy/numpy/pull/13859>`__: DOC: Update performance numbers for PCG64 diff --git a/doc/neps/nep-0000.rst b/doc/neps/nep-0000.rst index 0a2dbdefb..56a332626 100644 --- a/doc/neps/nep-0000.rst +++ b/doc/neps/nep-0000.rst @@ -76,7 +76,7 @@ where ``<n>`` is an appropriately assigned four-digit number (e.g., ``nep-0000.rst``). The draft must use the :doc:`nep-template` file. Once the PR for the NEP is in place, a post should be made to the -mailing list containing the sections upto "Backward compatibility", +mailing list containing the sections up to "Backward compatibility", with the purpose of limiting discussion there to usage and impact. Discussion on the pull request will have a broader scope, also including details of implementation. diff --git a/doc/neps/nep-0021-advanced-indexing.rst b/doc/neps/nep-0021-advanced-indexing.rst index dab9ab022..8e525b0cb 100644 --- a/doc/neps/nep-0021-advanced-indexing.rst +++ b/doc/neps/nep-0021-advanced-indexing.rst @@ -108,7 +108,7 @@ array ``arr`` with shape ``(X, Y, Z)``: 3. ``arr[0, :, [0, 1]]`` has shape ``(2, Y)``, not ``(Y, 2)``! These first two cases are intuitive and consistent with outer indexing, but -this last case is quite surprising, even to many higly experienced NumPy users. +this last case is quite surprising, even to many highly experienced NumPy users. Mixed cases involving multiple array indices are also surprising, and only less problematic because the current behavior is so useless that it is rarely @@ -240,7 +240,7 @@ be deduced: 7. To ensure that existing subclasses of `ndarray` that override indexing do not inadvertently revert to default behavior for indexing attributes, these attribute should have explicit checks that disable them if - ``__getitem__`` or ``__setitem__`` has been overriden. + ``__getitem__`` or ``__setitem__`` has been overridden. Unlike plain indexing, the new indexing attributes are explicitly aimed at higher dimensional indexing, several additional changes should be implemented: @@ -319,7 +319,7 @@ of ``__getitem__`` on these attributes should test subclass has special handling for indexing and ``NotImplementedError`` should be raised, requiring that the indexing attributes is also explicitly overwritten. Likewise, implementations of ``__setitem__`` should check to see -if ``__setitem__`` is overriden. +if ``__setitem__`` is overridden. A further question is how to facilitate implementing the special attributes. Also there is the weird functionality where ``__setitem__`` calls diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index f98449ed6..635f10165 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -24,7 +24,7 @@ Detailed description NumPy's API, including array definitions, is implemented and mimicked in countless other projects. By definition, many of those arrays are fairly similar in how they operate to the NumPy standard. The introduction of -``__array_function__`` allowed dispathing of functions implemented by several +``__array_function__`` allowed dispatching of functions implemented by several of these projects directly via NumPy's API. This introduces a new requirement, returning the NumPy-like array itself, rather than forcing a coercion into a pure NumPy array. diff --git a/doc/neps/nep-0037-array-module.rst b/doc/neps/nep-0037-array-module.rst index 387356490..596798964 100644 --- a/doc/neps/nep-0037-array-module.rst +++ b/doc/neps/nep-0037-array-module.rst @@ -448,7 +448,7 @@ input arguments: This might be useful, but it's not clear if we really need it. Pint seems to get along OK without any explicit array creation routines (favoring multiplication by units, e.g., ``np.ones(5) * ureg.m``), and for the most part -Dask is also OK with existing ``__array_function__`` style overides (e.g., +Dask is also OK with existing ``__array_function__`` style overrides (e.g., favoring ``np.ones_like`` over ``np.ones``). Choosing whether to place an array on the CPU or GPU could be solved by `making array creation lazy <https://github.com/google/jax/pull/1668>`_. diff --git a/doc/release/upcoming_changes/14942.compatibility.rst b/doc/release/upcoming_changes/14942.compatibility.rst new file mode 100644 index 000000000..461758c95 --- /dev/null +++ b/doc/release/upcoming_changes/14942.compatibility.rst @@ -0,0 +1,6 @@ +Fasttake and fastputmask slots are deprecated and NULL'ed +--------------------------------------------------------- +The fasttake and fastputmask slots are now never used and +must always be set to NULL. This will result in no change in behaviour. +However, if a user dtype should set one of these a DeprecationWarning +will be given. diff --git a/doc/release/upcoming_changes/15118.change.rst b/doc/release/upcoming_changes/15118.change.rst new file mode 100644 index 000000000..f14beebbe --- /dev/null +++ b/doc/release/upcoming_changes/15118.change.rst @@ -0,0 +1,7 @@ +Remove handling of extra argument to ``__array__`` +-------------------------------------------------- +A code path and test have been in the code since NumPy 0.4 for a two-argument +variant of ``__array__(dtype=None, context=None)``. It was activated when +calling ``ufunc(op)`` or ``ufunc.reduce(op)`` if ``op.__array__`` existed. +However that variant is not documented, and it is not clear what the intention +was for its use. It has been removed. diff --git a/doc/release/upcoming_changes/15119.deprecation.rst b/doc/release/upcoming_changes/15119.deprecation.rst new file mode 100644 index 000000000..d18e440fe --- /dev/null +++ b/doc/release/upcoming_changes/15119.deprecation.rst @@ -0,0 +1,8 @@ + +Deprecate automatic ``dtype=object`` for ragged input +----------------------------------------------------- +Calling ``np.array([[1, [1, 2, 3]])`` will issue a ``DeprecationWarning`` as +per `NEP 34`_. Users should explicitly use ``dtype=object`` to avoid the +warning. + +.. _`NEP 34`: https://numpy.org/neps/nep-0034.html diff --git a/doc/release/upcoming_changes/15217.deprecation.rst b/doc/release/upcoming_changes/15217.deprecation.rst new file mode 100644 index 000000000..d49de20b5 --- /dev/null +++ b/doc/release/upcoming_changes/15217.deprecation.rst @@ -0,0 +1,13 @@ +Passing ``shape=0`` to factory functions in ``numpy.rec`` is deprecated +----------------------------------------------------------------------- + +``0`` is treated as a special case and is aliased to ``None`` in the functions: + +* `numpy.core.records.fromarrays` +* `numpy.core.records.fromrecords` +* `numpy.core.records.fromstring` +* `numpy.core.records.fromfile` + +In future, ``0`` will not be special cased, and will be treated as an array +length like any other integer. + diff --git a/doc/release/upcoming_changes/8255.new_feature.rst b/doc/release/upcoming_changes/8255.new_feature.rst new file mode 100644 index 000000000..c0bc21b3e --- /dev/null +++ b/doc/release/upcoming_changes/8255.new_feature.rst @@ -0,0 +1,5 @@ +`numpy.frompyfunc` now accepts an identity argument +--------------------------------------------------- +This allows the :attr:`numpy.ufunc.identity` attribute to be set on the +resulting ufunc, meaning it can be used for empty and multi-dimensional +calls to :meth:`numpy.ufunc.reduce`. diff --git a/doc/release/upcoming_changes/template.rst b/doc/release/upcoming_changes/template.rst index 9c8a3b5fc..997b4850e 100644 --- a/doc/release/upcoming_changes/template.rst +++ b/doc/release/upcoming_changes/template.rst @@ -17,6 +17,7 @@ {% if definitions[category]['showcontent'] %} {% for text, values in sections[section][category].items() %} {{ text }} + {{ get_indent(text) }}({{values|join(', ') }}) {% endfor %} diff --git a/doc/source/conf.py b/doc/source/conf.py index 7e3a145f5..957cb17e6 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -179,6 +179,7 @@ latex_elements = { latex_elements['preamble'] = r''' % In the parameters section, place a newline after the Parameters % header +\usepackage{xcolor} \usepackage{expdlist} \let\latexdescription=\description \def\description{\latexdescription{}{} \breaklabel} diff --git a/doc/source/f2py/advanced.rst b/doc/source/f2py/advanced.rst index 375922033..1b4625dde 100644 --- a/doc/source/f2py/advanced.rst +++ b/doc/source/f2py/advanced.rst @@ -25,7 +25,7 @@ In Python: Modifying the dictionary of a F2PY generated module =================================================== -The following example illustrates how to add a user-defined +The following example illustrates how to add user-defined variables to a F2PY generated extension module. Given the following signature file diff --git a/doc/source/f2py/allocarr_session.dat b/doc/source/f2py/allocarr_session.dat index fc91959b7..754d9cb8b 100644 --- a/doc/source/f2py/allocarr_session.dat +++ b/doc/source/f2py/allocarr_session.dat @@ -1,27 +1,29 @@ ->>> import allocarr ->>> print allocarr.mod.__doc__ +>>> import allocarr +>>> print(allocarr.mod.__doc__) b - 'f'-array(-1,-1), not allocated foo - Function signature: foo() >>> allocarr.mod.foo() b is not allocated ->>> allocarr.mod.b = [[1,2,3],[4,5,6]] # allocate/initialize b +>>> allocarr.mod.b = [[1, 2, 3], [4, 5, 6]] # allocate/initialize b >>> allocarr.mod.foo() b=[ 1.000000 2.000000 3.000000 4.000000 5.000000 6.000000 ] ->>> allocarr.mod.b # b is Fortran-contiguous +>>> allocarr.mod.b # b is Fortran-contiguous array([[ 1., 2., 3.], - [ 4., 5., 6.]],'f') ->>> allocarr.mod.b = [[1,2,3],[4,5,6],[7,8,9]] # reallocate/initialize b + [ 4., 5., 6.]], dtype=float32) +>>> allocarr.mod.b.flags.f_contiguous +True +>>> allocarr.mod.b = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] # reallocate/initialize b >>> allocarr.mod.foo() b=[ 1.000000 2.000000 3.000000 4.000000 5.000000 6.000000 7.000000 8.000000 9.000000 ] ->>> allocarr.mod.b = None # deallocate array +>>> allocarr.mod.b = None # deallocate array >>> allocarr.mod.foo() b is not allocated diff --git a/doc/source/f2py/array_session.dat b/doc/source/f2py/array_session.dat index 069530d03..714c03651 100644 --- a/doc/source/f2py/array_session.dat +++ b/doc/source/f2py/array_session.dat @@ -1,65 +1,87 @@ >>> import arr ->>> from numpy import array ->>> print arr.foo.__doc__ -foo - Function signature: - a = foo(a,[overwrite_a]) -Required arguments: - a : input rank-2 array('d') with bounds (n,m) -Optional arguments: - overwrite_a := 0 input int -Return objects: - a : rank-2 array('d') with bounds (n,m) +>>> from numpy import asfortranarray +>>> print(arr.foo.__doc__) +a = foo(a,[overwrite_a]) ->>> a=arr.foo([[1,2,3], -... [4,5,6]]) -copied an array using PyArray_CopyFromObject: size=6, elsize=8 ->>> print a +Wrapper for ``foo``. + +Parameters +---------- +a : input rank-2 array('d') with bounds (n,m) + +Other Parameters +---------------- +overwrite_a : input int, optional + Default: 0 + +Returns +------- +a : rank-2 array('d') with bounds (n,m) + +>>> a = arr.foo([[1, 2, 3], +... [4, 5, 6]]) +created an array from object +>>> print(a) [[ 1. 3. 4.] [ 3. 5. 6.]] ->>> a.iscontiguous(), arr.has_column_major_storage(a) -(0, 1) ->>> b=arr.foo(a) # even if a is proper-contiguous -... # and has proper type, a copy is made -... # forced by intent(copy) attribute -... # to preserve its original contents -... -copied an array using copy_ND_array: size=6, elsize=8 ->>> print a +>>> a.flags.c_contiguous +False +>>> a.flags.f_contiguous +True +# even if a is proper-contiguous and has proper type, +# a copy is made forced by intent(copy) attribute +# to preserve its original contents +>>> b = arr.foo(a) +copied an array: size=6, elsize=8 +>>> print(a) [[ 1. 3. 4.] [ 3. 5. 6.]] ->>> print b +>>> print(b) [[ 1. 4. 5.] [ 2. 5. 6.]] ->>> b=arr.foo(a,overwrite_a=1) # a is passed directly to Fortran -... # routine and its contents is discarded +>>> b = arr.foo(a, overwrite_a = 1) # a is passed directly to Fortran +... # routine and its contents is discarded ... ->>> print a +>>> print(a) [[ 1. 4. 5.] [ 2. 5. 6.]] ->>> print b +>>> print(b) [[ 1. 4. 5.] [ 2. 5. 6.]] ->>> a is b # a and b are actually the same objects -1 ->>> print arr.foo([1,2,3]) # different rank arrays are allowed -copied an array using PyArray_CopyFromObject: size=3, elsize=8 +>>> a is b # a and b are actually the same objects +True +>>> print(arr.foo([1, 2, 3])) # different rank arrays are allowed +created an array from object [ 1. 1. 2.] ->>> print arr.foo([[[1],[2],[3]]]) -copied an array using PyArray_CopyFromObject: size=3, elsize=8 -[ [[ 1.] - [ 3.] - [ 4.]]] +>>> print(arr.foo([[[1], [2], [3]]])) +created an array from object +[[[ 1.] + [ 1.] + [ 2.]]] >>> >>> # Creating arrays with column major data storage order: -... ->>> s = arr.as_column_major_storage(array([[1,2,3],[4,5,6]])) -copied an array using copy_ND_array: size=6, elsize=4 ->>> arr.has_column_major_storage(s) -1 ->>> print s + ... +>>> s = asfortranarray([[1, 2, 3], [4, 5, 6]]) +>>> s.flags.f_contiguous +True +>>> print(s) [[1 2 3] [4 5 6]] ->>> s2 = arr.as_column_major_storage(s) +>>> print(arr.foo(s)) +>>> s2 = asfortranarray(s) >>> s2 is s # an array with column major storage order # is returned immediately -1 +True +>>> # Note that arr.foo returns a column major data storage order array: + ... +>>> s3 = ascontiguousarray(s) +>>> s3.flags.f_contiguous +False +>>> s3.flags.c_contiguous +True +>>> s3 = arr.foo(s3) +copied an array: size=6, elsize=8 +>>> s3.flags.f_contiguous +True +>>> s3.flags.c_contiguous +False diff --git a/doc/source/f2py/calculate.f b/doc/source/f2py/calculate.f index 1cda1c8dd..4ff570d28 100644 --- a/doc/source/f2py/calculate.f +++ b/doc/source/f2py/calculate.f @@ -7,7 +7,7 @@ cf2py y = func(y) c cf2py intent(in,out,copy) x integer n,i - real*8 x(n) + real*8 x(n), func do i=1,n x(i) = func(x(i)) end do diff --git a/doc/source/f2py/calculate_session.dat b/doc/source/f2py/calculate_session.dat index 2fe64f522..c4c380700 100644 --- a/doc/source/f2py/calculate_session.dat +++ b/doc/source/f2py/calculate_session.dat @@ -3,4 +3,4 @@ array([ 0., 1., 4., 9., 16.]) >>> import math >>> foo.calculate(range(5), math.exp) -array([ 1. , 2.71828175, 7.38905621, 20.08553696, 54.59814835]) +array([ 1. , 2.71828183, 7.3890561, 20.08553692, 54.59815003]) diff --git a/doc/source/f2py/callback.f b/doc/source/f2py/callback.f index 6e9bfb920..d5cfc7574 100644 --- a/doc/source/f2py/callback.f +++ b/doc/source/f2py/callback.f @@ -2,7 +2,7 @@ C FILE: CALLBACK.F SUBROUTINE FOO(FUN,R) EXTERNAL FUN INTEGER I - REAL*8 R + REAL*8 R, FUN Cf2py intent(out) r R = 0D0 DO I=-5,5 diff --git a/doc/source/f2py/callback_session.dat b/doc/source/f2py/callback_session.dat index cd2f26084..460c9ce28 100644 --- a/doc/source/f2py/callback_session.dat +++ b/doc/source/f2py/callback_session.dat @@ -1,14 +1,26 @@ >>> import callback ->>> print callback.foo.__doc__ -foo - Function signature: - r = foo(fun,[fun_extra_args]) -Required arguments: - fun : call-back function -Optional arguments: - fun_extra_args := () input tuple -Return objects: - r : float -Call-back functions: +>>> print(callback.foo.__doc__) +r = foo(fun,[fun_extra_args]) + +Wrapper for ``foo``. + +Parameters +---------- +fun : call-back function + +Other Parameters +---------------- +fun_extra_args : input tuple, optional + Default: () + +Returns +------- +r : float + +Notes +----- +Call-back functions:: + def fun(i): return r Required arguments: i : input int @@ -17,7 +29,7 @@ Call-back functions: >>> def f(i): return i*i ... ->>> print callback.foo(f) +>>> print(callback.foo(f)) 110.0 ->>> print callback.foo(lambda i:1) +>>> print(callback.foo(lambda i:1)) 11.0 diff --git a/doc/source/f2py/common_session.dat b/doc/source/f2py/common_session.dat index 846fdaa07..0a38bec27 100644 --- a/doc/source/f2py/common_session.dat +++ b/doc/source/f2py/common_session.dat @@ -1,5 +1,5 @@ >>> import common ->>> print common.data.__doc__ +>>> print(common.data.__doc__) i - 'i'-scalar x - 'i'-array(4) a - 'f'-array(2,3) @@ -8,20 +8,23 @@ a - 'f'-array(2,3) >>> common.data.x[1] = 2 >>> common.data.a = [[1,2,3],[4,5,6]] >>> common.foo() - I= 5 - X=[ 0 2 0 0] +>>> common.foo() + I= 5 + X=[ 0 2 0 0 ] A=[ - [ 1., 2., 3.] - [ 4., 5., 6.] + [ 1.00000000 , 2.00000000 , 3.00000000 ] + [ 4.00000000 , 5.00000000 , 6.00000000 ] ] >>> common.data.a[1] = 45 >>> common.foo() - I= 5 - X=[ 0 2 0 0] + I= 5 + X=[ 0 2 0 0 ] A=[ - [ 1., 2., 3.] - [ 45., 45., 45.] + [ 1.00000000 , 2.00000000 , 3.00000000 ] + [ 45.0000000 , 45.0000000 , 45.0000000 ] ] >>> common.data.a # a is Fortran-contiguous array([[ 1., 2., 3.], - [ 45., 45., 45.]],'f') + [ 45., 45., 45.]], dtype=float32) +>>> common.data.a.flags.f_contiguous +True
\ No newline at end of file diff --git a/doc/source/f2py/extcallback_session.dat b/doc/source/f2py/extcallback_session.dat index c22935ea0..5b97ab7cf 100644 --- a/doc/source/f2py/extcallback_session.dat +++ b/doc/source/f2py/extcallback_session.dat @@ -1,10 +1,10 @@ >>> import pfromf >>> pfromf.f2() Traceback (most recent call last): - File "<stdin>", line 1, in ? + File "<stdin>", line 1, in <module> pfromf.error: Callback fpy not defined (as an argument or module pfromf attribute). ->>> def f(): print "python f" +>>> def f(): print("python f") ... >>> pfromf.fpy = f >>> pfromf.f2() diff --git a/doc/source/f2py/ftype_session.dat b/doc/source/f2py/ftype_session.dat index 01f9febaf..e39cc128d 100644 --- a/doc/source/f2py/ftype_session.dat +++ b/doc/source/f2py/ftype_session.dat @@ -1,13 +1,13 @@ >>> import ftype ->>> print ftype.__doc__ -This module 'ftype' is auto-generated with f2py (version:2.28.198-1366). +>>> print(ftype.__doc__) +This module 'ftype' is auto-generated with f2py (version:2). Functions: foo(n=13) COMMON blocks: /data/ a,x(3) . ->>> type(ftype.foo),type(ftype.data) -(<type 'fortran'>, <type 'fortran'>) +>>> type(ftype.foo), type(ftype.data) +(<class 'fortran'>, <class 'fortran'>) >>> ftype.foo() IN FOO: N= 13 A= 0. X=[ 0. 0. 0.] >>> ftype.data.a = 3 @@ -18,4 +18,4 @@ COMMON blocks: >>> ftype.foo(24) IN FOO: N= 24 A= 3. X=[ 1. 45. 3.] >>> ftype.data.x -array([ 1., 45., 3.],'f') +array([ 1., 45., 3.], dtype=float32) diff --git a/doc/source/f2py/getting-started.rst b/doc/source/f2py/getting-started.rst index 3d8ea24e4..c600eee01 100644 --- a/doc/source/f2py/getting-started.rst +++ b/doc/source/f2py/getting-started.rst @@ -54,17 +54,23 @@ Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: >>> import numpy >>> import fib1 - >>> print fib1.fib.__doc__ - fib - Function signature: - fib(a,[n]) - Required arguments: - a : input rank-1 array('d') with bounds (n) - Optional arguments: - n := len(a) input int - - >>> a = numpy.zeros(8,'d') + >>> print(fib1.fib.__doc__) + fib(a,[n]) + + Wrapper for ``fib``. + + Parameters + ---------- + a : input rank-1 array('d') with bounds (n) + + Other Parameters + ---------------- + n : input int, optional + Default: len(a) + + >>> a = numpy.zeros(8, 'd') >>> fib1.fib(a) - >>> print a + >>> print(a) [ 0. 1. 1. 2. 3. 5. 8. 13.] .. note:: @@ -76,22 +82,20 @@ Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: * One can use different values for optional ``n``:: - >>> a1 = numpy.zeros(8,'d') - >>> fib1.fib(a1,6) - >>> print a1 + >>> a1 = numpy.zeros(8, 'd') + >>> fib1.fib(a1, 6) + >>> print(a1) [ 0. 1. 1. 2. 3. 5. 0. 0.] but an exception is raised when it is incompatible with the input array ``a``:: - >>> fib1.fib(a,10) - fib:n=10 + >>> fib1.fib(a, 10) Traceback (most recent call last): - File "<stdin>", line 1, in ? - fib.error: (len(a)>=n) failed for 1st keyword n + File "<stdin>", line 1, in <module> + fib.error: (len(a)>=n) failed for 1st keyword n: fib:n=10 >>> - This demonstrates one of the useful features in F2PY, that it, F2PY implements basic compatibility checks between related arguments in order to avoid any unexpected crashes. @@ -105,9 +109,9 @@ Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: input array have no effect to the original argument, as demonstrated below:: - >>> a = numpy.ones(8,'i') + >>> a = numpy.ones(8, 'i') >>> fib1.fib(a) - >>> print a + >>> print(a) [1 1 1 1 1 1 1 1] Clearly, this is not an expected behaviour. The fact that the @@ -118,15 +122,15 @@ Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: the attributes of an input array so that any changes made by Fortran routine will be effective also in input argument. For example, if one specifies ``intent(inplace) a`` (see below, how), then - the example above would read: + the example above would read:: - >>> a = numpy.ones(8,'i') + >>> a = numpy.ones(8, 'i') >>> fib1.fib(a) - >>> print a + >>> print(a) [ 0. 1. 1. 2. 3. 5. 8. 13.] However, the recommended way to get changes made by Fortran - subroutine back to python is to use ``intent(out)`` attribute. It + subroutine back to Python is to use ``intent(out)`` attribute. It is more efficient and a cleaner solution. * The usage of ``fib1.fib`` in Python is very similar to using @@ -193,15 +197,20 @@ one. In Python:: >>> import fib2 - >>> print fib2.fib.__doc__ - fib - Function signature: - a = fib(n) - Required arguments: - n : input int - Return objects: - a : rank-1 array('d') with bounds (n) - - >>> print fib2.fib(8) + >>> print(fib2.fib.__doc__) + a = fib(n) + + Wrapper for ``fib``. + + Parameters + ---------- + n : input int + + Returns + ------- + a : rank-1 array('d') with bounds (n) + + >>> print(fib2.fib(8)) [ 0. 1. 1. 2. 3. 5. 8. 13.] .. note:: @@ -213,8 +222,8 @@ In Python:: rules out any surprises that we experienced with ``fib1.fib``. * Note that by default using single ``intent(out)`` also implies - ``intent(hide)``. Argument that has ``intent(hide)`` attribute - specified, will not be listed in the argument list of a wrapper + ``intent(hide)``. Arguments that have the ``intent(hide)`` attribute + specified will not be listed in the argument list of a wrapper function. The quick and smart way @@ -249,13 +258,18 @@ Notice that the resulting wrapper to ``FIB`` is as "smart" as in previous case:: >>> import fib3 - >>> print fib3.fib.__doc__ - fib - Function signature: - a = fib(n) - Required arguments: - n : input int - Return objects: - a : rank-1 array('d') with bounds (n) - - >>> print fib3.fib(8) + >>> print(fib3.fib.__doc__) + a = fib(n) + + Wrapper for ``fib``. + + Parameters + ---------- + n : input int + + Returns + ------- + a : rank-1 array('d') with bounds (n) + + >>> print(fib3.fib(8)) [ 0. 1. 1. 2. 3. 5. 8. 13.] diff --git a/doc/source/f2py/moddata_session.dat b/doc/source/f2py/moddata_session.dat index 1ec212f8b..e3c758041 100644 --- a/doc/source/f2py/moddata_session.dat +++ b/doc/source/f2py/moddata_session.dat @@ -1,12 +1,11 @@ >>> import moddata ->>> print moddata.mod.__doc__ +>>> print(moddata.mod.__doc__) i - 'i'-scalar x - 'i'-array(4) a - 'f'-array(2,3) foo - Function signature: foo() - >>> moddata.mod.i = 5 >>> moddata.mod.x[:2] = [1,2] >>> moddata.mod.a = [[1,2,3],[4,5,6]] @@ -20,4 +19,6 @@ foo - Function signature: Setting a(1,2)=a(1,2)+3 >>> moddata.mod.a # a is Fortran-contiguous array([[ 1., 5., 3.], - [ 4., 5., 6.]],'f') + [ 4., 5., 6.]], dtype=float32) +>>> moddata.mod.a.flags.f_contiguous +True diff --git a/doc/source/f2py/python-usage.rst b/doc/source/f2py/python-usage.rst index 60167d01a..a7f2b3d86 100644 --- a/doc/source/f2py/python-usage.rst +++ b/doc/source/f2py/python-usage.rst @@ -8,7 +8,7 @@ type objects. Routine wrappers are callable ``fortran`` type objects while wrappers to Fortran data have attributes referring to data objects. -All ``fortran`` type object have attribute ``_cpointer`` that contains +All ``fortran`` type objects have attribute ``_cpointer`` that contains CObject referring to the C pointer of the corresponding Fortran/C function or variable in C level. Such CObjects can be used as a callback argument of F2PY generated functions to bypass Python C/API @@ -34,7 +34,7 @@ Scalar arguments ================= In general, a scalar argument of a F2PY generated wrapper function can -be ordinary Python scalar (integer, float, complex number) as well as +be an ordinary Python scalar (integer, float, complex number) as well as an arbitrary sequence object (list, tuple, array, string) of scalars. In the latter case, the first element of the sequence object is passed to Fortran routine as a scalar argument. @@ -45,7 +45,7 @@ float), F2PY does not raise any exception. In complex to real type-casting only the real part of a complex number is used. ``intent(inout)`` scalar arguments are assumed to be array objects in -order to *in situ* changes to be effective. It is recommended to use +order to have *in situ* changes be effective. It is recommended to use arrays with proper type but also other types work. Consider the following Fortran 77 code: @@ -75,8 +75,7 @@ expected, the string is truncated. If the length is smaller that expected, additional memory is allocated and filled with ``\0``. Because Python strings are immutable, an ``intent(inout)`` argument -expects an array version of a string in order to *in situ* changes to -be effective. +expects an array version of a string in order to have *in situ* changes be effective. Consider the following Fortran 77 code: @@ -99,7 +98,7 @@ arbitrary sequences that can be transformed to NumPy array objects. An exception is ``intent(inout)`` array arguments that always must be proper-contiguous and have proper type, otherwise an exception is raised. Another exception is ``intent(inplace)`` array arguments that -attributes will be changed in-situ if the argument has different type +attributes will be changed *in situ* if the argument has different type than expected (see ``intent(inplace)`` attribute for more information). @@ -129,11 +128,9 @@ and C-contiguous if the order is as follows:: A[0,0] A[0,1] A[1,0] A[1,1] -To test whether an array is C-contiguous, use ``.iscontiguous()`` -method of NumPy arrays. To test for Fortran contiguity, all -F2PY generated extension modules provide a function -``has_column_major_storage(<array>)``. This function is equivalent to -``<array>.flags.f_contiguous`` but more efficient. +To test whether an array is C-contiguous, use the ``.flags.c_contiguous`` +attribute of NumPy arrays. To test for Fortran contiguity, use the +``.flags.f_contiguous`` attribute. Usually there is no need to worry about how the arrays are stored in memory and whether the wrapped functions, being either Fortran or C @@ -146,11 +143,9 @@ the physical memory in your computer, then a care must be taken to use always proper-contiguous and proper type arguments. To transform input arrays to column major storage order before passing -them to Fortran routines, use a function -``as_column_major_storage(<array>)`` that is provided by all F2PY -generated extension modules. +them to Fortran routines, use the function ``numpy.asfortranarray(<array>)``. -Consider Fortran 77 code: +Consider the following Fortran 77 code: .. include:: array.f :literal: diff --git a/doc/source/f2py/scalar_session.dat b/doc/source/f2py/scalar_session.dat index 8aff097c2..3bb45ed68 100644 --- a/doc/source/f2py/scalar_session.dat +++ b/doc/source/f2py/scalar_session.dat @@ -1,21 +1,24 @@ >>> import scalar ->>> print scalar.foo.__doc__ -foo - Function signature: - foo(a,b) -Required arguments: - a : input float - b : in/output rank-0 array(float,'d') +>>> print(scalar.foo.__doc__) +foo(a,b) + +Wrapper for ``foo``. + +Parameters +---------- +a : input float +b : in/output rank-0 array(float,'d') ->>> scalar.foo(2,3) +>>> scalar.foo(2, 3) A= 2. B= 3. INCREMENT A AND B NEW A= 3. B= 4. >>> import numpy ->>> a=numpy.array(2) # these are integer rank-0 arrays ->>> b=numpy.array(3) ->>> scalar.foo(a,b) +>>> a = numpy.array(2) # these are integer rank-0 arrays +>>> b = numpy.array(3) +>>> scalar.foo(a, b) A= 2. B= 3. INCREMENT A AND B NEW A= 3. B= 4. ->>> print a,b # note that only b is changed in situ +>>> print(a, b) # note that only b is changed in situ 2 4 diff --git a/doc/source/f2py/signature-file.rst b/doc/source/f2py/signature-file.rst index 8e5a9710c..3a163ee23 100644 --- a/doc/source/f2py/signature-file.rst +++ b/doc/source/f2py/signature-file.rst @@ -14,7 +14,7 @@ Signature files may contain arbitrary Fortran code (so that Fortran codes can be considered as signature files). F2PY silently ignores Fortran constructs that are irrelevant for creating the interface. However, this includes also syntax errors. So, be careful not making -ones;-). +ones ;-). In general, the contents of signature files is case-sensitive. When scanning Fortran codes and writing a signature file, F2PY lowers all diff --git a/doc/source/f2py/spam_session.dat b/doc/source/f2py/spam_session.dat index 7f99d13f9..bd5832d88 100644 --- a/doc/source/f2py/spam_session.dat +++ b/doc/source/f2py/spam_session.dat @@ -1,5 +1,5 @@ >>> import spam >>> status = spam.system('whoami') pearu ->> status = spam.system('blah') +>>> status = spam.system('blah') sh: line 1: blah: command not found
\ No newline at end of file diff --git a/doc/source/f2py/string_session.dat b/doc/source/f2py/string_session.dat index cbae6b784..e8f7854d9 100644 --- a/doc/source/f2py/string_session.dat +++ b/doc/source/f2py/string_session.dat @@ -1,19 +1,22 @@ >>> import mystring ->>> print mystring.foo.__doc__ -foo - Function signature: - foo(a,b,c,d) -Required arguments: - a : input string(len=5) - b : in/output rank-0 array(string(len=5),'c') - c : input string(len=-1) - d : in/output rank-0 array(string(len=-1),'c') +>>> print(mystring.foo.__doc__) +foo(a,b,c,d) ->>> import numpy ->>> a=numpy.array('123') ->>> b=numpy.array('123') ->>> c=numpy.array('123') ->>> d=numpy.array('123') ->>> mystring.foo(a,b,c,d) +Wrapper for ``foo``. + +Parameters +---------- +a : input string(len=5) +b : in/output rank-0 array(string(len=5),'c') +c : input string(len=-1) +d : in/output rank-0 array(string(len=-1),'c') + +>>> from numpy import array +>>> a = array(b'123\0\0') +>>> b = array(b'123\0\0') +>>> c = array(b'123') +>>> d = array(b'123') +>>> mystring.foo(a, b, c, d) A=123 B=123 C=123 @@ -23,5 +26,5 @@ Required arguments: B=B23 C=C23 D=D23 ->>> a.tostring(),b.tostring(),c.tostring(),d.tostring() -('123', 'B23', '123', 'D23') +>>> a[()], b[()], c[()], d[()] +(b'123', b'B23', b'123', b'D2') diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index 5043ec430..6c3b4b6ef 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -6,11 +6,9 @@ F2PY can be used either as a command line tool ``f2py`` or as a Python module ``numpy.f2py``. While we try to install the command line tool as part of the numpy setup, some platforms like Windows make it difficult to reliably put the executable on the ``PATH``. We will refer to ``f2py`` -in this document but you may have to run it as a module +in this document but you may have to run it as a module:: -``` -python -m numpy.f2py -``` + python -m numpy.f2py If you run ``f2py`` with no arguments, and the line ``numpy Version`` at the end matches the NumPy version printed from ``python -m numpy.f2py``, then you @@ -50,9 +48,9 @@ distinguished by the usage of ``-c`` and ``-h`` switches: :: - f2py <options> <fortran files> \ - [[ only: <fortran functions> : ] \ - [ skip: <fortran functions> : ]]... \ + f2py -m <modulename> <options> <fortran files> \ + [[ only: <fortran functions> : ] \ + [ skip: <fortran functions> : ]]... \ [<fortran files> ...] The constructed extension module is saved as @@ -79,11 +77,9 @@ distinguished by the usage of ``-c`` and ``-h`` switches: functions. This feature enables using arbitrary C functions (defined in ``<includefile>``) in F2PY generated wrappers. - This option is deprecated. Use ``usercode`` statement to specify - C code snippets directly in signature files + .. note:: This option is deprecated. Use ``usercode`` statement to specify C code snippets directly in signature files. ``--[no-]wrap-functions`` - Create Fortran subroutine wrappers to Fortran functions. ``--wrap-functions`` is default because it ensures maximum portability and compiler independence. @@ -161,12 +157,29 @@ distinguished by the usage of ``-c`` and ``-h`` switches: for ``-l``. ``link-<resource>`` - Link extension module with <resource> as defined by ``numpy_distutils/system_info.py``. E.g. to link with optimized LAPACK libraries (vecLib on MacOSX, ATLAS elsewhere), use ``--link-lapack_opt``. See also ``--help-link`` switch. - + + .. note:: The ``f2py -c`` option must be applied either to an existing ``.pyf`` file (plus the source/object/library files) or one must specify the ``-m <modulename>`` option (plus the sources/object/library files). Use one of the following options: + + :: + + f2py -c -m fib1 fib1.f + + or + + :: + + f2py -m fib1 fib1.f -h fib1.pyf + f2py -c fib1.pyf fib1.f + + For more information, see `Building C and C++ Extensions`__ Python documentation for details. + + __ https://docs.python.org/3/extending/building.html + + When building an extension module, a combination of the following macros may be required for non-gcc Fortran compilers:: diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 2eaf3a27a..ce8671a51 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -426,10 +426,8 @@ From other objects may be 0. Also, if *op* is not already an array (or does not expose the array interface), then a new array will be created (and filled from *op* using the sequence protocol). The new array will - have :c:data:`NPY_ARRAY_DEFAULT` as its flags member. The *context* argument - is passed to the :obj:`~numpy.class.__array__` method of *op* and is only used if - the array is constructed that way. Almost always this - parameter is ``NULL``. + have :c:data:`NPY_ARRAY_DEFAULT` as its flags member. The *context* + argument is unused. .. c:var:: NPY_ARRAY_C_CONTIGUOUS @@ -574,6 +572,8 @@ From other objects :c:data:`NPY_ARRAY_WRITEABLE` to PyArray_FromAny, where the writeable array may be a copy of the input. + `context` is not used. + When success (0 return value) is returned, either out_arr is filled with a non-NULL PyArrayObject and the rest of the parameters are untouched, or out_arr is @@ -677,10 +677,8 @@ From other objects PyObject* op, PyArray_Descr* dtype, PyObject* context) Return an ndarray object from a Python object that exposes the - :obj:`~numpy.class.__array__` method. The :obj:`~numpy.class.__array__` method can take 0, 1, or 2 - arguments ([dtype, context]) where *context* is used to pass - information about where the :obj:`~numpy.class.__array__` method is being called - from (currently only used in ufuncs). + :obj:`~numpy.class.__array__` method. The :obj:`~numpy.class.__array__` + method can take 0, or 1 argument ``([dtype])``. ``context`` is unused. .. c:function:: PyObject* PyArray_ContiguousFromAny( \ PyObject* op, int typenum, int min_depth, int max_depth) @@ -859,15 +857,16 @@ General check of Python Type conversion occurs. Otherwise, out will contain a borrowed reference to :c:data:`Py_NotImplemented` and no error condition is set. -.. c:function:: PyArray_HasArrayInterfaceType(op, type, context, out) +.. c:function:: PyArray_HasArrayInterfaceType(op, dtype, context, out) If ``op`` implements any part of the array interface, then ``out`` will contain a new reference to the newly created ndarray using the interface or ``out`` will contain ``NULL`` if an error during conversion occurs. Otherwise, out will contain a borrowed reference to Py_NotImplemented and no error condition is set. - This version allows setting of the type and context in the part of - the array interface that looks for the :obj:`~numpy.class.__array__` attribute. + This version allows setting of the dtype in the part of the array interface + that looks for the :obj:`~numpy.class.__array__` attribute. `context` is + unused. .. c:function:: PyArray_IsZeroDim(op) @@ -1223,7 +1222,7 @@ Converting data types .. c:function:: int PyArray_ObjectType(PyObject* op, int mintype) - This function is superceded by :c:func:`PyArray_MinScalarType` and/or + This function is superseded by :c:func:`PyArray_MinScalarType` and/or :c:func:`PyArray_ResultType`. This function is useful for determining a common type that two or @@ -1237,7 +1236,7 @@ Converting data types .. c:function:: void PyArray_ArrayType( \ PyObject* op, PyArray_Descr* mintype, PyArray_Descr* outtype) - This function is superceded by :c:func:`PyArray_ResultType`. + This function is superseded by :c:func:`PyArray_ResultType`. This function works similarly to :c:func:`PyArray_ObjectType` (...) except it handles flexible arrays. The *mintype* argument can have @@ -1248,7 +1247,7 @@ Converting data types .. c:function:: PyArrayObject** PyArray_ConvertToCommonType( \ PyObject* op, int* n) - The functionality this provides is largely superceded by iterator + The functionality this provides is largely superseded by iterator :c:type:`NpyIter` introduced in 1.6, with flag :c:data:`NPY_ITER_COMMON_DTYPE` or with the same dtype parameter for all operands. @@ -1439,7 +1438,7 @@ An ndarray can have a data segment that is not a simple contiguous chunk of well-behaved memory you can manipulate. It may not be aligned with word boundaries (very important on some platforms). It might have its data in a different byte-order than the machine recognizes. It -might not be writeable. It might be in Fortan-contiguous order. The +might not be writeable. It might be in Fortran-contiguous order. The array flags are used to indicate what can be said about data associated with an array. @@ -2488,7 +2487,7 @@ an element copier function as a primitive.:: Array Iterators --------------- -As of NumPy 1.6.0, these array iterators are superceded by +As of NumPy 1.6.0, these array iterators are superseded by the new array iterator, :c:type:`NpyIter`. An array iterator is a simple way to access the elements of an diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 336dff211..60d8e420b 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -452,9 +452,9 @@ PyArrayDescr_Type and PyArray_Descr PyArray_ScalarKindFunc *scalarkind; int **cancastscalarkindto; int *cancastto; - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; - PyArray_FastTakeFunc *fasttake; + PyArray_FastClipFunc *fastclip; /* deprecated */ + PyArray_FastPutmaskFunc *fastputmask; /* deprecated */ + PyArray_FastTakeFunc *fasttake; /* deprecated */ PyArray_ArgFunc *argmin; } PyArray_ArrFuncs; @@ -641,6 +641,16 @@ PyArrayDescr_Type and PyArray_Descr .. c:member:: void fastclip( \ void *in, npy_intp n_in, void *min, void *max, void *out) + .. deprecated:: 1.17 + The use of this function will give a deprecation warning when + ``np.clip``. Instead of this function, the datatype must + instead use ``PyUFunc_RegisterLoopForDescr`` to attach a custom + loop to ``np.core.umath.clip``, ``np.minimum``, and ``np.maximum``. + + .. deprecated:: 1.19 + Setting this function is deprecated and should always be ``NULL``, + if set, it will be ignored. + A function that reads ``n_in`` items from ``in``, and writes to ``out`` the read value if it is within the limits pointed to by ``min`` and ``max``, or the corresponding limit if outside. The @@ -650,6 +660,10 @@ PyArrayDescr_Type and PyArray_Descr .. c:member:: void fastputmask( \ void *in, void *mask, npy_intp n_in, void *values, npy_intp nv) + .. deprecated:: 1.19 + Setting this function is deprecated and should always be ``NULL``, + if set, it will be ignored. + A function that takes a pointer ``in`` to an array of ``n_in`` items, a pointer ``mask`` to an array of ``n_in`` boolean values, and a pointer ``vals`` to an array of ``nv`` items. @@ -662,6 +676,10 @@ PyArrayDescr_Type and PyArray_Descr npy_intp n_outer, npy_intp m_middle, npy_intp nelem, \ NPY_CLIPMODE clipmode) + .. deprecated:: 1.19 + Setting this function is deprecated and should always be ``NULL``, + if set, it will be ignored. + A function that takes a pointer ``src`` to a C contiguous, behaved segment, interpreted as a 3-dimensional array of shape ``(n_outer, nindarray, nelem)``, a pointer ``indarray`` to a diff --git a/doc/source/reference/internals.rst b/doc/source/reference/internals.rst index 03d081bf9..aacfabcd3 100644 --- a/doc/source/reference/internals.rst +++ b/doc/source/reference/internals.rst @@ -1,3 +1,5 @@ +.. _numpy-internals: + *************** NumPy internals *************** diff --git a/doc/source/reference/routines.io.rst b/doc/source/reference/routines.io.rst index 8bb29b793..cf66eab49 100644 --- a/doc/source/reference/routines.io.rst +++ b/doc/source/reference/routines.io.rst @@ -1,3 +1,5 @@ +.. _routines.io: + Input and output **************** diff --git a/doc/source/release/1.13.0-notes.rst b/doc/source/release/1.13.0-notes.rst index 5d8c932fe..9da9a99d7 100644 --- a/doc/source/release/1.13.0-notes.rst +++ b/doc/source/release/1.13.0-notes.rst @@ -243,7 +243,7 @@ It is similar to Matlab's square bracket notation for creating block matrices. ``isin`` function, improving on ``in1d`` ---------------------------------------- -The new function ``isin`` tests whether each element of an N-dimensonal +The new function ``isin`` tests whether each element of an N-dimensional array is present anywhere within a second array. It is an enhancement of ``in1d`` that preserves the shape of the first array. diff --git a/doc/source/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst index 462631de6..8ee876fd3 100644 --- a/doc/source/release/1.14.0-notes.rst +++ b/doc/source/release/1.14.0-notes.rst @@ -43,7 +43,7 @@ New functions floating-point scalars unambiguously with control of rounding and padding. * ``PyArray_ResolveWritebackIfCopy`` and ``PyArray_SetWritebackIfCopyBase``, - new C-API functions useful in achieving PyPy compatibity. + new C-API functions useful in achieving PyPy compatibility. Deprecations diff --git a/doc/source/release/1.16.5-notes.rst b/doc/source/release/1.16.5-notes.rst index 5b6eb585b..5bf576fd0 100644 --- a/doc/source/release/1.16.5-notes.rst +++ b/doc/source/release/1.16.5-notes.rst @@ -51,14 +51,14 @@ A total of 23 pull requests were merged for this release. * `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs * `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns] * `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation -* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject +* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occurred in PyMemoryView_FromObject * `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors. * `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked. * `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers * `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher. * `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7 * `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API... -* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor +* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict constructor * `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level. * `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__` * `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7 diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst index d59f985cd..15e0ad77f 100644 --- a/doc/source/release/1.18.0-notes.rst +++ b/doc/source/release/1.18.0-notes.rst @@ -195,7 +195,7 @@ adjustment to user- facing code. Specifically, code that either disallowed the calls to ``numpy.isinf`` or ``numpy.isnan`` or checked that they raised an exception will require adaptation, and code that mistakenly called ``numpy.fmax`` and ``numpy.fmin`` instead of ``numpy.maximum`` or -``numpy.minimum`` respectively will requre adjustment. This also affects +``numpy.minimum`` respectively will require adjustment. This also affects ``numpy.nanmax`` and ``numpy.nanmin``. (`gh-14841 <https://github.com/numpy/numpy/pull/14841>`__) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst new file mode 100644 index 000000000..acf9a35b5 --- /dev/null +++ b/doc/source/user/absolute_beginners.rst @@ -0,0 +1,1639 @@ + +**************************************** +NumPy: the absolute basics for beginners +**************************************** + +.. currentmodule:: numpy + +Welcome to the absolute beginner's guide to NumPy! If you have comments or +suggestions, please don’t hesitate to reach out! + + +Welcome to NumPy! +----------------- + +NumPy (**Numerical Python**) is an open source Python library that's used in +almost every field of science and engineering. It's the universal standard for +working with numerical data in Python, and it's at the core of the scientific +Python and PyData ecosystems. NumPy users include everyone from beginning coders +to experienced researchers doing state-of-the-art scientific and industrial +research and development. The NumPy API is used extensively in Pandas, SciPy, +Matplotlib, scikit-learn, scikit-image and most other data science and +scientific Python packages. + +The NumPy library contains multidimensional array and matrix data structures +(you'll find more information about this in later sections). It provides +**ndarray**, a homogeneous n-dimensional array object, with methods to +efficiently operate on it. NumPy can be used to perform a wide variety of +mathematical operations on arrays. It adds powerful data structures to Python +that guarantee efficient calculations with arrays and matrices and it supplies +an enormous library of high-level mathematical functions that operate on these +arrays and matrices. + +Learn more about :ref:`NumPy here <whatisnumpy>`! + +Installing NumPy +---------------- + +To install NumPy, we strongly recommend using a scientific Python distribution. +If you're looking for the full instructions for installing NumPy on your +operating system, you can `find all of the details here +<https://www.scipy.org/install.html>`_. + + + +If you already have Python, you can install NumPy with:: + + conda install numpy + +or :: + + pip install numpy + +If you don't have Python yet, you might want to consider using `Anaconda +<https://www.anaconda.com/>`_. It's the easiest way to get started. The good +thing about getting this distribution is the fact that you don’t need to worry +too much about separately installing NumPy or any of the major packages that +you’ll be using for your data analyses, like pandas, Scikit-Learn, etc. + +You can find all of the installation details in the +`Installation <https://www.scipy.org/install.html>`_ section +at `SciPy <https://www.scipy.org>`_. + +How to import NumPy +------------------- + +Any time you want to use a package or library in your code, you first need to +make it accessible. + +In order to start using NumPy and all of the functions available in NumPy, +you'll need to import it. This can be easily done with this import statement:: + + import numpy as np + +(We shorten ``numpy`` to ``np`` in order to save time and also to keep code +standardized so that anyone working with your code can easily understand and +run it.) + +Reading the example code +------------------------ + +If you aren't already comfortable with reading tutorials that contain a lot of code, +you might not know how to interpret a code block that looks +like this:: + + >>> a2 = a[np.newaxis, :] + >>> a2.shape + (1, 6) + +If you aren't familiar with this style, it's very easy to understand. +If you see ``>>>``, you're looking at **input**, or the code that +you would enter. Everything that doesn't have ``>>>`` in front of it +is **output**, or the results of running your code. This is the style +you see when you run ``python`` on the command line, but if you're using IPython, you might see a different style. + + +What’s the difference between a Python list and a NumPy array? +-------------------------------------------------------------- + +NumPy gives you an enormous range of fast and efficient ways of creating arrays +and manipulating numerical data inside them. While a Python list can contain +different data types within a single list, all of the elements in a NumPy array +should be homogenous. The mathematical operations that are meant to be performed +on arrays would be extremely inefficient if the arrays weren't homogenous. + +**Why use NumPy?** + +NumPy arrays are faster and more compact than Python lists. An array consumes +less memory and is convenient to use. NumPy uses much less memory to store data +and it provides a mechanism of specifying the data types. This allows the code +to be optimized even further. + +What is an array? +----------------- + +An array is a central data structure of the NumPy library. An array is a grid of +values and it contains information about the raw data, how to locate an element, +and how to interpret an element. It has a grid of elements that can be indexed +in :ref:`various ways <quickstart.indexing-slicing-and-iterating>`. +The elements are all of the same type, referred to as the array ``dtype``. + +An array can be indexed by a tuple of nonnegative integers, by booleans, by +another array, or by integers. The ``rank`` of the array is the number of +dimensions. The ``shape`` of the array is a tuple of integers giving the size of +the array along each dimension. + +One way we can initialize NumPy arrays is from Python lists, using nested lists +for two- or higher-dimensional data. + +For example:: + + >>> a = np.array([1, 2, 3, 4, 5, 6]) + +or:: + + >>> a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + +We can access the elements in the array using square brackets. When you're +accessing elements, remember that indexing in NumPy starts at 0. That means that +if you want to access the first element in your array, you'll be accessing +element "0". + +:: + + >>> print(a[0]) + [1 2 3 4] + + +More information about arrays +----------------------------- + +*This section covers* ``1D array``, ``2D array``, ``ndarray``, ``vector``, ``matrix`` + +------ + +You might occasionally hear an array referred to as a "ndarray," which is +shorthand for "N-dimensional array." An N-dimensional array is simply an array +with any number of dimensions. You might also hear **1-D**, or one-dimensional +array, **2-D**, or two-dimensional array, and so on. The NumPy ``ndarray`` class +is used to represent both matrices and vectors. A **vector** is an array with a +single dimension (there's no difference +between row and column vectors), while a **matrix** refers to an +array with two dimensions. For **3-D** or higher dimensional arrays, the term +**tensor** is also commonly used. + +**What are the attributes of an array?** + +An array is usually a fixed-size container of items of the same type and size. +The number of dimensions and items in an array is defined by its shape. The +shape of an array is a tuple of non-negative integers that specify the sizes of +each dimension. + +In NumPy, dimensions are called **axes**. This means that if you have a 2D array +that looks like this:: + + [[0., 0., 0.], + [1., 1., 1.]] + +Your array has 2 axes. The first axis has a length of 2 and the second axis has +a length of 3. + +Just like in other Python container objects, the contents of an array can be +accessed and modified by indexing or slicing the array. Unlike the typical container +objects, different arrays can share the same data, so changes made on one array might +be visible in another. + +Array **attributes** reflect information intrinsic to the array itself. If you +need to get, or even set, properties of an array without creating a new array, +you can often access an array through its attributes. + +:ref:`Read more about array attributes here <arrays.ndarray>` and learn about +:ref:`array objects here <arrays>`. + + +How to create a basic array +--------------------------- + + +*This section covers* ``np.array()``, ``np.zeros()``, ``np.ones()``, +``np.empty()``, ``np.arange()``, ``np.linspace()``, ``dtype`` + +----- + +To create a NumPy array, you can use the function ``np.array()``. + +All you need to do to create a simple array is pass a list to it. If you choose +to, you can also specify the type of data in your list. +:ref:`You can find more information about data types here <arrays.dtypes>`. :: + + >>> import numpy as np + >>> a = np.array([1, 2, 3]) + +You can visualize your array this way: + +.. image:: images/np_array.png + +*Be aware that these visualizations are meant to simplify ideas and give you a basic understanding of NumPy concepts and mechanics. Arrays and array operations are much more complicated than are captured here!* + +Besides creating an array from a sequence of elements, you can easily create an +array filled with ``0``'s:: + + >>> np.zeros(2) + array([0., 0.]) + +Or an array filled with ``1``'s:: + + >>> np.ones(2) + array([1., 1.]) + +Or even an empty array! The function ``empty`` creates an array whose initial +content is random and depends on the state of the memory. The reason to use +``empty`` over ``zeros`` (or something similar) is speed - just make sure to +fill every element afterwards! :: + + >>> # Create an empty array with 2 elements + >>> np.empty(2) + +You can create an array with a range of elements:: + + >>> np.arange(4) + array([0, 1, 2, 3]) + +And even an array that contains a range of evenly spaced intervals. To do this, +you will specify the **first number**, **last number**, and the **step size**. :: + + >>> np.arange(2, 9, 2) + array([2, 4, 6, 8]) + +You can also use ``np.linspace()`` to create an array with values that are +spaced linearly in a specified interval:: + + >>> np.linspace(0, 10, num=5) + array([ 0. , 2.5, 5. , 7.5, 10. ]) + +**Specifying your data type** + +While the default data type is floating point (``np.float64``), you can explicitly +specify which data type you want using the ``dtype`` keyword. :: + + >>> x = np.ones(2, dtype=np.int64) + >>> x + array([1, 1]) + +:ref:`Learn more about creating arrays here <quickstart.array-creation>` + +Adding, removing, and sorting elements +-------------------------------------- + +*This section covers* ``np.sort()``, ``np.concatenate()`` + +----- + +Sorting an element is simple with ``np.sort()``. You can specify the axis, kind, +and order when you call the function. + +If you start with this array:: + + >>> arr = np.array([2, 1, 5, 3, 7, 4, 6, 8]) + +You can quickly sort the numbers in ascending order with:: + + >>> np.sort(arr) + array([1, 2, 3, 4, 5, 6, 7, 8]) + +In addition to sort, which returns a sorted copy of an array, you can use: + +- `argsort`, which is an indirect sort along a specified axis, +- `lexsort`, which is an indirect stable sort on multiple keys, +- `searchsorted`, which will find elements in a sorted array, and +- `partition`, which is a partial sort. + +To read more about sorting an array, see: `sort`. + +If you start with these arrays:: + + >>> a = np.array([1, 2, 3, 4]) + >>> b = np.array([5, 6, 7, 8]) + +You can concatenate them with ``np.concatenate()``. :: + + >>> np.concatenate((a, b)) + array([1, 2, 3, 4, 5, 6, 7, 8]) + +Or, if you start with these arrays:: + + >>> x = np.array([[1, 2], [3, 4]]) + >>> y = np.array([[5, 6]]) + +You can concatenate them with:: + + >>> np.concatenate((x, y), axis=0) + array([[1, 2], + [3, 4], + [5, 6]]) + +In order to remove elements from an array, it's simple to use indexing to select +the elements that you want to keep. + +To read more about concatenate, see: `concatenate`. + + +How do you know the shape and size of an array? +----------------------------------------------- + +*This section covers* ``ndarray.ndim``, ``ndarray.size``, ``ndarray.shape`` + +----- + +``ndarray.ndim`` will tell you the number of axes, or dimensions, of the array. + +``ndarray.size`` will tell you the total number of elements of the array. This +is the *product* of the elements of the array's shape. + +``ndarray.shape`` will display a tuple of integers that indicate the number of +elements stored along each dimension of the array. If, for example, you have a +2-D array with 2 rows and 3 columns, the shape of your array is ``(2, 3)``. + +For example, if you create this array:: + + >>> array_example = np.array([[[0, 1, 2, 3], + ... [4, 5, 6, 7]], + ... + ... [[0, 1, 2, 3], + ... [4, 5, 6, 7]], + ... + ... [[0 ,1 ,2, 3], + ... [4, 5, 6, 7]]]) + +To find the number of dimensions of the array, run:: + + >>> array_example.ndim + 3 + +To find the total number of elements in the array, run:: + + >>> array_example.size + 24 + +And to find the shape of your array, run:: + + >>> array_example.shape + (3, 2, 4) + + +Can you reshape an array? +------------------------- + +*This section covers* ``arr.reshape()`` + +----- + +**Yes!** + +Using ``arr.reshape()`` will give a new shape to an array without changing the +data. Just remember that when you use the reshape method, the array you want to +produce needs to have the same number of elements as the original array. If you +start with an array with 12 elements, you'll need to make sure that your new +array also has a total of 12 elements. + +If you start with this array:: + + >>> a = np.arange(6) + >>> print(a) + [0 1 2 3 4 5] + +You can use ``reshape()`` to reshape your array. For example, you can reshape +this array to an array with three rows and two columns:: + + >>> b = a.reshape(3, 2) + >>> print(b) + [[0 1] + [2 3] + [4 5]] + +With ``np.reshape``, you can specify a few optional parameters:: + + >>> numpy.reshape(a, newshape, order) + +``a`` is the array to be reshaped. + +``newshape`` is the new shape you want. You can specify an integer or a tuple of +integers. If you specify an integer, the result will be an array of that length. +The shape should be compatible with the original shape. + +``order:`` ``C`` means to read/write the elements using C-like index order, +``F`` means to read/write the elements using Fortran-like index order, ``A`` +means to read/write the elements in Fortran-like index order if a is Fortran +contiguous in memory, C-like order otherwise. (This is an optional parameter and +doesn't need to be specified.) + +If you want to learn more about C and Fortran order, you can +:ref:`read more about the internal organization of NumPy arrays here <numpy-internals>`. +Essentially, C and Fortran orders have to do with how indices correspond +to the order the array is stored in memory. In Fortran, when moving through +the elements of a two-dimensional array as it is stored in memory, the **first** +index is the most rapidly varying index. As the first index moves to the next +row as it changes, the matrix is stored one column at a time. +This is why Fortran is thought of as a **Column-major language**. +In C on the other hand, the **last** index changes +the most rapidly. The matrix is stored by rows, making it a **Row-major +language**. What you do for C or Fortran depends on whether it's more important +to preserve the indexing convention or not reorder the data. + +:ref:`Learn more about shape manipulation here <quickstart.shape-manipulation>`. + + +How to convert a 1D array into a 2D array (how to add a new axis to an array) +----------------------------------------------------------------------------- + +*This section covers* ``np.newaxis``, ``np.expand_dims`` + +----- + +You can use ``np.newaxis`` and ``np.expand_dims`` to increase the dimensions of +your existing array. + +Using ``np.newaxis`` will increase the dimensions of your array by one dimension +when used once. This means that a **1D** array will become a **2D** array, a +**2D** array will become a **3D** array, and so on. + +For example, if you start with this array:: + + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> a.shape + (6,) + +You can use ``np.newaxis`` to add a new axis:: + + >>> a2 = a[np.newaxis, :] + >>> a2.shape + (1, 6) + +You can explicitly convert a 1D array with either a row vector or a column +vector using ``np.newaxis``. For example, you can convert a 1D array to a row +vector by inserting an axis along the first dimension:: + + >>> row_vector = a[np.newaxis, :] + >>> row_vector.shape + (1, 6) + +Or, for a column vector, you can insert an axis along the second dimension:: + + >>> col_vector = a[:, np.newaxis] + >>> col_vector.shape + (6, 1) + +You can also expand an array by inserting a new axis at a specified position +with ``np.expand_dims``. + +For example, if you start with this array:: + + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> a.shape + (6,) + +You can use ``np.expand_dims`` to add an axis at index position 1 with:: + + >>> b = np.expand_dims(a, axis=1) + >>> b.shape + (6, 1) + +You can add an axis at index position 0 with:: + + >>> c = np.expand_dims(a, axis=0) + >>> c.shape + (1, 6) + +Find more information about :ref:`newaxis here <arrays.indexing>` and +``expand_dims`` at `expand_dims`. + + +Indexing and slicing +-------------------- + +You can index and slice NumPy arrays in the same ways you can slice Python +lists. :: + + >>> data = np.array([1, 2, 3]) + + >>> data[1] + 2 + >>> data[0:2] + array([1, 2]) + >>> data[1:] + array([2, 3]) + >>> data[-2:] + array([2, 3]) + +You can visualize it this way: + +.. image:: images/np_indexing.png + + +You may want to take a section of your array or specific array elements to use +in further analysis or additional operations. To do that, you'll need to subset, +slice, and/or index your arrays. + +If you want to select values from your array that fulfill certain conditions, +it's straightforward with NumPy. + +For example, if you start with this array:: + + >>> a = np.array([[1 , 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + +You can easily print all of the values in the array that are less than 5. :: + + >>> print(a[a < 5]) + [1 2 3 4] + +You can also select, for example, numbers that are equal to or greater than 5, +and use that condition to index an array. :: + + >>> five_up = (a >= 5) + >>> print(a[five_up]) + [ 5 6 7 8 9 10 11 12] + +You can select elements that are divisible by 2:: + + >>> divisible_by_2 = a[a%2==0] + >>> print(divisible_by_2) + [ 2 4 6 8 10 12] + +Or you can select elements that satisfy two conditions using the ``&`` and ``|`` +operators:: + + >>> c = a[(a > 2) & (a < 11)] + >>> print(c) + [ 3 4 5 6 7 8 9 10] + +You can also make use of the logical operators **&** and **|** in order to +return boolean values that specify whether or not the values in an array fulfill +a certain condition. This can be useful with arrays that contain names or other +categorical values. :: + + >>> five_up = (a > 5) | (a == 5) + >>> print(five_up) + [[False False False False] + [ True True True True] + [ True True True True]] + +You can also use ``np.nonzero()`` to select elements or indices from an array. + +Starting with this array:: + + >>> a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + +You can use ``np.nonzero()`` to print the indices of elements that are, for +example, less than 5:: + + >>> b = np.nonzero(a < 5) + >>> print(b) + (array([0, 0, 0, 0]), array([0, 1, 2, 3])) + +In this example, a tuple of arrays was returned: one for each dimension. The +first array represents the row indices where these values are found, and the +second array represents the column indices where the values are found. + +If you want to generate a list of coordinates where the elements exist, you can +zip the arrays, iterate over the list of coordinates, and print them. For +example:: + + >>> list_of_coordinates= list(zip(b[0], b[1])) + + >>> for coord in list_of_coordinates: + ... print(coord) + (0, 0) + (0, 1) + (0, 2) + (0, 3) + +You can also use ``np.nonzero()`` to print the elements in an array that are less +than 5 with:: + + >>> print(a[b]) + [1 2 3 4] + +If the element you're looking for doesn't exist in the array, then the returned +array of indices will be empty. For example:: + + >>> not_there = np.nonzero(a == 42) + >>> print(not_there) + (array([], dtype=int64), array([], dtype=int64)) + +Learn more about :ref:`indexing and slicing here <quickstart.indexing-slicing-and-iterating>` +and :ref:`here <basics.indexing>`. + +Read more about using the nonzero function at: `nonzero`. + + +How to create an array from existing data +----------------------------------------- + +*This section covers* ``slicing and indexing``, ``np.vstack()``, ``np.hstack()``, +``np.hsplit()``, ``.view()``, ``copy()`` + +----- + +You can easily use create a new array from a section of an existing array. + +Let's say you have this array: + +:: + + array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + +You can create a new array from a section of your array any time by specifying +where you want to slice your array. :: + + >>> arr1 = np.array[3:8] + >>> arr1 + array([4, 5, 6, 7, 8]) + +Here, you grabbed a section of your array from index position 3 through index +position 8. + +You can also stack two existing arrays, both vertically and horizontally. Let's +say you have two arrays, ``a1`` and ``a2``:: + + >>> a1 = np.array([[1, 1], + ... [2, 2]]) + + >>> a2 = np.array([[3, 3], + ... [4, 4]]) + +You can stack them vertically with ``vstack``:: + + >>> np.vstack((a1, a2)) + array([[1, 1], + [2, 2], + [3, 3], + [4, 4]]) + +Or stack them horizontally with ``hstack``:: + + >>> np.hstack((a1, a2)) + array([[1, 1, 3, 3], + [2, 2, 4, 4]]) + +You can split an array into several smaller arrays using ``hsplit``. You can +specify either the number of equally shaped arrays to return or the columns +*after* which the division should occur. + +Let's say you have this array:: + + >>> x = np.arange(1, 25).reshape(2, 12) + >>> x + array([[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]]) + +If you wanted to split this array into three equally shaped arrays, you would +run:: + + >>> np.hsplit(x, 3) + [array([[1, 2, 3, 4], + [13, 14, 15, 16]]), array([[ 5, 6, 7, 8], + [17, 18, 19, 20]]), array([[ 9, 10, 11, 12], + [21, 22, 23, 24]])] + +If you wanted to split your array after the third and fourth column, you'd run:: + + >>> np.hsplit(array,(3, 4)) + [array([[1, 2, 3], + [13, 14, 15]]), array([[ 4], + [16]]), array([[ 5, 6, 7, 8, 9, 10, 11, 12], + [17, 18, 19, 20, 21, 22, 23, 24]])] + +:ref:`Learn more about stacking and splitting arrays here <quickstart.stacking-arrays>`. + +You can use the ``view`` method to create a new array object that looks at the +same data as the original array (a *shallow copy*). + +Views are an important NumPy concept! NumPy functions, as well as operations +like indexing and slicing, will return views whenever possible. This saves +memory and is faster (no copy of the data has to be made). However it's +important to be aware of this - modifying data in a view also modifies the +original array! + +Let's say you create this array:: + + >>> a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + +Now we create an array ``b1`` by slicing ``a`` and modify the first element of +``b1``. This will modify the corresponding element in ``a`` as well! :: + + >>> b1 = a[0, :] + >>> b1 + array([1, 2, 3, 4]) + >>> b1[0] = 99 + >>> b1 + array([99, 2, 3, 4]) + >>> a + array([[99, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12]]) + +Using the ``copy`` method will make a complete copy of the array and its data (a +*deep copy*). To use this on your array, you could run:: + + >>> b2 = a.copy() + +:ref:`Learn more about copies and views here <quickstart.copies-and-views>`. + + +Basic array operations +---------------------- + +*This section covers addition, subtraction, multiplication, division, and more* + +----- + +Once you've created your arrays, you can start to work with them. Let's say, +for example, that you've created two arrays, one called "data" and one called +"ones" + +.. image:: images/np_array_dataones.png + +You can add the arrays together with the plus sign. + +:: + + data + ones + +.. image:: images/np_data_plus_ones.png + +You can, of course, do more than just addition! + +:: + + data - ones + data * data + data / data + +.. image:: images/np_sub_mult_divide.png + +Basic operations are simple with NumPy. If you want to find the sum of the +elements in an array, you'd use ``sum()``. This works for 1D arrays, 2D arrays, +and arrays in higher dimensions. :: + + >>> a = np.array([1, 2, 3, 4]) + + >>> a.sum() + 10 + +To add the rows or the columns in a 2D array, you would specify the axis. + +If you start with this array:: + + >>> b = np.array([[1, 1], [2, 2]]) + +You can sum the rows with:: + + >>> b.sum(axis=0) + array([3, 3]) + +You can sum the columns with:: + + >>> b.sum(axis=1) + array([2, 4]) + +:ref:`Learn more about basic operations here <quickstart.basic-operations>`. + + +Broadcasting +------------ + +There are times when you might want to carry out an operation between an array +and a single number (also called *an operation between a vector and a scalar*) +or between arrays of two different sizes. For example, your array (we'll call it +"data") might contain information about distance in miles but you want to +convert the information to kilometers. You can perform this operation with:: + + >>> data * 1.6 + +.. image:: images/np_multiply_broadcasting.png + +NumPy understands that the multiplication should happen with each cell. That +concept is called **broadcasting**. Broadcasting is a mechanism that allows +NumPy to perform operations on arrays of different shapes. The dimensions of +your array must be compatible, for example, when the dimensions of both arrays +are equal or when one of them is 1. If the dimensions are not compatible, you +will get a ``ValueError``. + +:ref:`Learn more about broadcasting here <basics.broadcasting>`. + + +More useful array operations +---------------------------- + +*This section covers maximum, minimum, sum, mean, product, standard deviation, and more* + +----- + +NumPy also performs aggregation functions. In addition to ``min``, ``max``, and +``sum``, you can easily run ``mean`` to get the average, ``prod`` to get the +result of multiplying the elements together, ``std`` to get the standard +deviation, and more. :: + + >>> data.max() + >>> data.min() + >>> data.sum() + +.. image:: images/np_aggregation.png + +Let's start with this array, called "a" :: + + >>> a = np.array([[0.45053314, 0.17296777, 0.34376245, 0.5510652], + ... [0.54627315, 0.05093587, 0.40067661, 0.55645993], + ... [0.12697628, 0.82485143, 0.26590556, 0.56917101]]) + +It's very common to want to aggregate along a row or column. By default, every +NumPy aggregation function will return the aggregate of the entire array. To +find the sum or the minimum of the elements in your array, run:: + + >>> a.sum() + 4.8595784 + +Or:: + + >>> a.min() + 0.05093587 + +You can specify on which axis you want the aggregation function to be computed. +For example, you can find the minimum value within each column by specifying +``axis=0``. :: + + >>> a.min(axis=0) + array([0.12697628, 0.05093587, 0.26590556, 0.5510652 ]) + +The four values listed above correspond to the number of columns in your array. +With a four-column array, you will get four values as your result. + +Read more about :ref:`array methods here <array.ndarray.methods>`. + + +Creating matrices +----------------- + +You can pass Python lists of lists to create a 2-D array (or "matrix") to +represent them in NumPy. :: + + >>> np.array([[1, 2], [3, 4]]) + +.. image:: images/np_create_matrix.png + +Indexing and slicing operations are useful when you're manipulating matrices:: + + >>> data[0, 1] + >>> data[1 : 3] + >>> data[0 : 2, 0] + +.. image:: images/np_matrix_indexing.png + +You can aggregate matrices the same way you aggregated vectors:: + + >>> data.max() + >>> data.min() + >>> data.sum() + +.. image:: images/np_matrix_aggregation.png + +You can aggregate all the values in a matrix and you can aggregate them across +columns or rows using the ``axis`` parameter:: + + >>> data.max(axis=0) + >>> data.max(axis=1) + +.. image:: images/np_matrix_aggregation_row.png + +Once you've created your matrices, you can add and multiply them using +arithmetic operators if you have two matrices that are the same size. :: + + >>> data + ones + +.. image:: images/np_matrix_arithmetic.png + +You can do these arithmetic operations on matrices of different sizes, but only +if one matrix has only one column or one row. In this case, NumPy will use its +broadcast rules for the operation. :: + + >>> data + ones_row + +.. image:: images/np_matrix_broadcasting.png + +Be aware that when NumPy prints N-dimensional arrays, the last axis is looped +over the fastest while the first axis is the slowest. That means that:: + + >>> np.ones((4, 3, 2)) + +Will print out like this:: + + array([[[1., 1.], + [1., 1.], + [1., 1.]], + + [[1., 1.], + [1., 1.], + [1., 1.]], + + [[1., 1.], + [1., 1.], + [1., 1.]], + + [[1., 1.], + [1., 1.], + [1., 1.]]]) + +There are often instances where we want NumPy to initialize the values of an +array. NumPy offers functions like ``ones()`` and ``zeros()``, and the +``random.Generator`` class for random number generation for that. +All you need to do is pass in the number of elements you want it to generate:: + + >>> np.ones(3) + >>> np.zeros(3) + >>> np.random.random(3) # the simplest way to generate random numbers + +.. image:: images/np_ones_zeros_random.png + +You can also use ``ones()``, ``zeros()``, and ``random()`` to create +an array if you give them a tuple describing the dimensions of the matrix:: + + >>> np.ones((3, 2)) + >>> np.zeros((3, 2)) + >>> rng = np.random.default_rng() # the better way to generate random numbers + >>> rng.random() + +.. image:: images/np_ones_zeros_matrix.png + +Read more about creating arrays, filled with ``0``'s, ``1``'s, other values or +uninitialized, at :ref:`array creation routines <routines.array-creation>`. + + +Generating random numbers +------------------------- + +The use of random number generation is an important part of the configuration +and evaluation of many numerical and machine learning algorithms. Whether you +need to randomly initialize weights in an artificial neural network, split data +into random sets, or randomly shuffle your dataset, being able to generate +random numbers (actually, repeatable pseudo-random numbers) is essential. + +With ``Generator.integers``, you can generate random integers from low (remember +that this is inclusive with NumPy) to high (exclusive). You can set +``endpoint=True`` to make the high number inclusive. + +You can generate a 2 x 4 array of random integers between 0 and 4 with:: + + >>> rng.integers(5, size=(2, 4)) + array([[4, 0, 2, 1], + [3, 2, 2, 0]]) + +:ref:`Read more about random number generation here <numpyrandom>`. + + +How to get unique items and counts +---------------------------------- + +*This section covers* ``np.unique()`` + +----- + +You can find the unique elements in an array easily with ``np.unique``. + +For example, if you start with this array:: + + >>> a = np.array([11, 11, 12, 13, 14, 15, 16, 17, 12, 13, 11, 14, 18, 19, 20]) + +you can use ``np.unique`` to print the unique values in your array:: + + >>> unique_values = np.unique(a) + >>> print(unique_values) + [11 12 13 14 15 16 17 18 19 20] + +To get the indices of unique values in a NumPy array (an array of first index +positions of unique values in the array), just pass the ``return_index`` +argument in ``np.unique()`` as well as your array. :: + + >>> indices_list = np.unique(a, return_index=True) + >>> print(indices_list) + [ 0 2 3 4 5 6 7 12 13 14] + +You can pass the ``return_counts`` argument in ``np.unique()`` along with your +array to get the frequency count of unique values in a NumPy array. :: + + >>> unique_values, occurrence_count = np.unique(a, return_counts=True) + >>> print(occurrence_count) + [3 2 2 2 1 1 1 1 1 1] + +This also works with 2D arrays! +If you start with this array:: + + >>> a_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [1, 2, 3, 4]]) + +You can find unique values with:: + + >>> unique_values = np.unique(a_2d) + >>> print(unique_values) + [ 1 2 3 4 5 6 7 8 9 10 11 12] + +If the axis argument isn't passed, your 2D array will be flattened. + +If you want to get the unique rows or columns, make sure to pass the ``axis`` +argument. To find the unique rows, specify ``axis=0`` and for columns, specify +``axis=1``. :: + + >>> unique_rows = np.unique(a_2d, axis=0) + >>> print(unique_rows) + [[ 1 2 3 4] + [ 5 6 7 8] + [ 9 10 11 12]] + +To get the unique rows, occurrence count, and index position, you can use:: + + >>> unique_rows, occurence_count, indices = np.unique(a_2d, axis=0, + >>> return_counts=True, return_index=True) + >>> print('Unique Rows: ', '\n', unique_rows) + >>> print('Occurrence Count:', '\n', occurence_count) + >>> print('Indices: ', '\n', indices) + Unique Rows: + [[ 1 2 3 4] + [ 5 6 7 8] + [ 9 10 11 12]] + Occurrence Count: + [0 1 2] + Indices: + [2 1 1] + +To learn more about finding the unique elements in an array, see `unique`. + + +Transposing and reshaping a matrix +---------------------------------- + +*This section covers* ``arr.reshape()``, ``arr.transpose()``, ``arr.T()`` + +----- + +It's common to need to transpose your matrices. NumPy arrays have the property +``T`` that allows you to transpose a matrix. + +.. image:: images/np_transposing_reshaping.png + +You may also need to switch the dimensions of a matrix. This can happen when, +for example, you have a model that expects a certain input shape that is +different from your dataset. This is where the ``reshape`` method can be useful. +You simply need to pass in the new dimensions that you want for the matrix. :: + + >>> data.reshape(2, 3) + >>> data.reshape(3, 2) + +.. image:: images/np_reshape.png + +You can also use ``.transpose`` to reverse or change the axes of an array +according to the values you specify. + +If you start with this array:: + + >>> arr = np.arange(6).reshape((2, 3)) + >>> arr + array([[0, 1, 2], + [3, 4, 5]]) + +You can transpose your array with ``arr.transpose()``. :: + + >>> arr.transpose() + array([[0, 3], + [1, 4], + [2, 5]]) + +To learn more about transposing and reshaping arrays, see `transpose` and +`reshape`. + + +How to reverse an array +----------------------- + +*This section covers* ``np.flip`` + +----- + +NumPy's ``np.flip()`` function allows you to flip, or reverse, the contents of +an array along an axis. When using ``np.flip``, specify the array you would like +to reverse and the axis. If you don't specify the axis, NumPy will reverse the +contents along all of the axes of your input array. + +**Reversing a 1D array** + +If you begin with a 1D array like this one:: + + >>> arr = np.array([1, 2, 3, 4, 5, 6, 7, 8]) + +You can reverse it with:: + + >>> reversed_arr = np.flip(arr) + +If you want to print your reversed array, you can run:: + + >>> print('Reversed Array: ', reversed_arr) + Reversed Array: [8 7 6 5 4 3 2 1] + +**Reversing a 2D array** + +A 2D array works much the same way. + +If you start with this array:: + + >>> arr_2d = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + +You can reverse the content in all of the rows and all of the columns with:: + + >>> reversed_arr = np.flip(arr_2d) + + >>> print('Reversed Array: ') + >>> print(reversed_arr) + Reversed Array: + [[12 11 10 9] + [ 8 7 6 5] + [ 4 3 2 1]] + +You can easily reverse only the *rows* with:: + + >>> reversed_arr_rows = np.flip(arr_2d, axis=0) + + >>> print('Reversed Array: ') + >>> print(reversed_arr_rows) + Reversed Array: + [[ 9 10 11 12] + [ 5 6 7 8] + [ 1 2 3 4]] + +Or reverse only the *columns* with:: + + >>> reversed_arr_columns = np.flip(arr_2d, axis=1) + + >>> print('Reversed Array columns: ') + >>> print(reversed_arr_columns) + [[ 4 3 2 1] + [ 8 7 6 5] + [12 11 10 9]] + +You can also reverse the contents of only one column or row. For example, you +can reverse the contents of the row at index position 1 (the second row):: + + >>> arr_2d[1] = np.flip(arr_2d[1]) + + >>> print('Reversed Array: ') + >>> print(arr_2d) + Reversed Array: + [[ 1 2 3 4] + [ 5 6 7 8] + [ 9 10 11 12]] + +You can also reverse the column at index position 1 (the second column):: + + >>> arr_2d[:,1] = np.flip(arr_2d[:,1]) + + >>> print('Reversed Array: ') + >>> print(arr_2d) + Reversed Array: + [[ 1 10 3 4] + [ 5 6 7 8] + [ 9 2 11 12]] + +Read more about reversing arrays at `flip`. + + +Reshaping and flattening multidimensional arrays +------------------------------------------------ + +*This section covers* ``.flatten()``, ``ravel()`` + +----- + +There are two popular ways to flatten an array: ``.flatten()`` and ``.ravel()``. +The primary difference between the two is that the new array created using +``ravel()`` is actually a reference to the parent array (i.e., a "view"). This +means that any changes to the new array will affect the parent array as well. +Since ``ravel`` does not create a copy, it's memory efficient. + +If you start with this array:: + + >>> x = np.array([[1 , 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + +You can use ``flatten`` to flatten your array into a 1D array. :: + + >>> x.flatten() + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) + +When you use ``flatten``, changes to your new array won't change the parent +array. + +For example:: + + >>> a1 = x.flatten() + >>> a1[0] = 99 + >>> print(x) # Original array + [[ 1 2 3 4] + [ 5 6 7 8] + [ 9 10 11 12]] + >>> print(a1) # New array + [99 2 3 4 5 6 7 8 9 10 11 12] + +But when you use ``ravel``, the changes you make to the new array will affect +the parent array. + +For example:: + + >>> a2 = x.ravel() + >>> a2[0] = 98 + >>> print(x) # Original array + [[98 2 3 4] + [ 5 6 7 8] + [ 9 10 11 12]] + >>> print(a2) # New array + [98 2 3 4 5 6 7 8 9 10 11 12] + +Read more about ``flatten`` at `ndarray.flatten` and ``ravel`` at `ravel`. + + +How to access the docstring for more information +------------------------------------------------ + +*This section covers* ``help()``, ``?``, ``??`` + +----- + +When it comes to the data science ecosystem, Python and NumPy are built with the +user in mind. One of the best examples of this is the built-in access to +documentation. Every object contains the reference to a string, which is known +as the **docstring**. In most cases, this docstring contains a quick and concise +summary of the object and how to use it. Python has a built-in ``help()`` +function that can help you access this information. This means that nearly any +time you need more information, you can use ``help()`` to quickly find the +information that you need. + +For example, :: + + >>> help(max) + +Will return:: + + Help on built-in function max in module builtins: + + max(...) max(iterable, *[, default=obj, key=func]) -> value max(arg1, arg2, + *args, *[, key=func]) -> value + + With a single iterable argument, return its biggest item. The default + keyword-only argument specifies an object to return if the provided + iterable is empty. With two or more arguments, return the largest + argument. + +Because access to additional information is so useful, IPython uses the ``?`` +character as a shorthand for accessing this documentation along with other +relevant information. IPython is a command shell for interactive computing in +multiple languages. +`You can find more information about IPython here <https://ipython.org/>`_. + +For example, :: + + >>> max? + +Will return:: + + Docstring: + max(iterable, *[, default=obj, key=func]) -> value + max(arg1, arg2, *args, *[, key=func]) -> value + + With a single iterable argument, return its biggest item. The + default keyword-only argument specifies an object to return if + the provided iterable is empty. + With two or more arguments, return the largest argument. + Type: builtin_function_or_method + +You can even use this notation for object methods and objects themselves. + +Let's say you create this array:: + + >>> a = np.array([1, 2, 3, 4, 5, 6]) + +Running :: + + >>> a? + +Will return a lot of useful information (first details about ``a`` itself, +followed by the docstring of ``ndarray`` of which ``a`` is an instance):: + + Type: ndarray + String form: [1 2 3 4 5 6] + Length: 6 + File: ~/anaconda3/lib/python3.7/site-packages/numpy/__init__.py + Docstring: <no docstring> + Class docstring: + ndarray(shape, dtype=float, buffer=None, offset=0, + strides=None, order=None) + + An array object represents a multidimensional, homogeneous array + of fixed-size items. An associated data-type object describes the + format of each element in the array (its byte-order, how many bytes it + occupies in memory, whether it is an integer, a floating point number, + or something else, etc.) + + Arrays should be constructed using `array`, `zeros` or `empty` (refer + to the See Also section below). The parameters given here refer to + a low-level method (`ndarray(...)`) for instantiating an array. + + For more information, refer to the `numpy` module and examine the + methods and attributes of an array. + + Parameters + ---------- + (for the __new__ method; see Notes below) + + shape : tuple of ints + Shape of created array. + ... + +This also works for functions and other objects that **you** create. Just +remember to include a docstring with your function using a string literal +(``""" """`` or ``''' '''`` around your documentation). + +For example, if you create this function:: + + >>> def double(a): + >>> '''Return a * 2''' + >>> return a * 2 + +You can run:: + + >>> double? + +Which will return:: + + Signature: double(a) + Docstring: Return a * 2 + File: ~/Desktop/<ipython-input-23-b5adf20be596> + Type: function + +You can reach another level of information by reading the source code of the +object you're interested in. Using a double question mark (``??``) allows you to +access the source code. + +For example, running:: + + >>> double?? + +Will return :: + + Signature: double(a) + Source: def double(a): + '''Return a * 2''' + return a * 2 + File: ~/Desktop/<ipython-input-23-b5adf20be596> + Type: function + +If the object in question is compiled in a language other than Python, using +``??`` will return the same information as ``?``. You'll find this with a lot of +built-in objects and types, for example:: + + >>> len? + Signature: len(obj, /) + Docstring: Return the number of items in a container. + Type: builtin_function_or_method + +and :: + + >>> len?? + Signature: len(obj, /) + Docstring: Return the number of items in a container. + Type: builtin_function_or_method + +have the same output because they were compiled in a programming language other +than Python. + + +Working with mathematical formulas +---------------------------------- + +The ease of implementing mathematical formulas that work on arrays is one of +the things that make NumPy so widely used in the scientific Python community. + +For example, this is the mean square error formula (a central formula used in +supervised machine learning models that deal with regression): + +.. image:: images/np_MSE_formula.png + +Implementing this formula is simple and straightforward in NumPy: + +.. image:: images/np_MSE_implementation.png + +What makes this work so well is that ``predictions`` and ``labels`` can contain +one or a thousand values. They only need to be the same size. + +You can visualize it this way: + +.. image:: images/np_mse_viz1.png + +In this example, both the predictions and labels vectors contain three values, +meaning ``n`` has a value of three. After we carry out subtractions the values +in the vector are squared. Then NumPy sums the values, and your result is the +error value for that prediction and a score for the quality of the model. + +.. image:: images/np_mse_viz2.png + +.. image:: images/np_MSE_explanation2.png + + +How to save and load NumPy objects +---------------------------------- + +*This section covers* ``np.save``, ``np.savez``, ``np.savetxt``, +``np.load``, ``np.loadtxt`` + +----- + +You will, at some point, want to save your arrays to disk and load them back +without having to re-run the code. Fortunately, there are several ways to save +and load objects with NumPy. The ndarray objects can be saved to and loaded from +the disk files with ``loadtxt`` and ``savetxt`` functions that handle normal +text files, ``load`` and ``save`` functions that handle NumPy binary files with +a **.npy** file extension, and a ``savez`` function that handles NumPy files +with a **.npz** file extension. + +The **.npy** and **.npz** files store data, shape, dtype, and other information +required to reconstruct the ndarray in a way that allows the array to be +correctly retrieved, even when the file is on another machine with different +architecture. + +If you want to store a single ndarray object, store it as a .npy file using +``np.save``. If you want to store more than one ndarray object in a single file, +save it as a .npz file using ``np.savez``. You can also save several arrays +into a single file in compressed npz format with `savez_compressed`. + +It's easy to save and load and array with ``np.save()``. Just make sure to +specify the array you want to save and a file name. For example, if you create +this array:: + + >>> a = np.array([1, 2, 3, 4, 5, 6]) + +You can save it as "filename.npy" with:: + + >>> np.save('filename', a) + +You can use ``np.load()`` to reconstruct your array. :: + + >>> b = np.load('filename.npy') + +If you want to check your array, you can run::: + + >>> print(b) + [1 2 3 4 5 6] + +You can save a NumPy array as a plain text file like a **.csv** or **.txt** file +with ``np.savetxt``. + +For example, if you create this array:: + + >>> csv_arr = np.array([1, 2, 3, 4, 5, 6, 7, 8]) + +You can easily save it as a .csv file with the name "new_file.csv" like this:: + + >>> np.savetxt('new_file.csv', csv_arr) + +You can quickly and easily load your saved text file using ``loadtxt()``:: + + >>> np.loadtxt('new_file.csv') + array([1., 2., 3., 4., 5., 6., 7., 8.]) + +The ``savetxt()`` and ``loadtxt()`` functions accept additional optional +parameters such as header, footer, and delimiter. While text files can be easier +for sharing, .npy and .npz files are smaller and faster to read. If you need more +sophisticated handling of your text file (for example, if you need to work with +lines that contain missing values), you will want to use the `genfromtxt` +function. + +With `savetxt`, you can specify headers, footers, comments, and more. + +Learn more about :ref:`input and output routines here <routines.io>`. + + +Importing and exporting a CSV +----------------------------- + +It's simple to read in a CSV that contains existing information. The best and +easiest way to do this is to use +`Pandas <https://pandas.pydata.org/getpandas.html>`_. :: + + >>> import pandas as pd + + >>> # If all of your columns are the same type: + >>> x = pd.read_csv('music.csv').values + + >>> # You can also simply select the columns you need: + >>> x = pd.read_csv('music.csv', columns=['float_colname_1', ...]).values + +.. image:: images/np_pandas.png + +It's simple to use Pandas in order to export your array as well. If you are new +to NumPy, you may want to create a Pandas dataframe from the values in your +array and then write the data frame to a CSV file with Pandas. + +If you created this array "a" :: + + [[-2.58289208, 0.43014843, -1.24082018, 1.59572603], + [ 0.99027828, 1.17150989, 0.94125714, -0.14692469], + [ 0.76989341, 0.81299683, -0.95068423, 0.11769564], + [ 0.20484034, 0.34784527, 1.96979195, 0.51992837]] + +You could create a Pandas dataframe :: + + >>> df = pd.DataFrame(a) + >>> print(df) + +**Output:** + +:: + + 0 1 2 3 + 0 -2.582892 0.430148 -1.240820 1.595726 + 1 0.990278 1.171510 0.941257 -0.146925 + 2 0.769893 0.812997 -0.950684 0.117696 + 3 0.204840 0.347845 1.969792 0.519928 + +You can easily save your dataframe with:: + + >>> df.to_csv('pd.csv') + +And read your CSV with:: + + >>> pd.read_csv('pd.csv') + +.. image:: images/np_readcsv.png + +You can also save your array with the NumPy ``savetxt`` method. :: + + >>> np.savetxt('np.csv', a, fmt='%.2f', delimiter=',', header='1, 2, 3, 4') + +If you're using the command line, you can read your saved CSV any time with a +command such as:: + + >>> cat np.csv + # 1, 2, 3, 4 + -2.58,0.43,-1.24,1.60 + 0.99,1.17,0.94,-0.15 + 0.77,0.81,-0.95,0.12 + 0.20,0.35,1.97,0.52 + +Or you can open the file any time with a text editor! + +If you're interested in learning more about Pandas, take a look at the +`official Pandas documentation <https://pandas.pydata.org/index.html>`_. +Learn how to install Pandas with the +`official Pandas installation information <https://pandas.pydata.org/pandas-docs/stable/install.html>`_. + + +Plotting arrays with Matplotlib +------------------------------- + +If you need to generate a plot for your values, it's very simple with +`Matplotlib <https://matplotlib.org/>`_. + +For example, you may have an array like this one:: + + >>> a = np.array([2, 1, 5, 7, 4, 6, 8, 14, 10, 9, 18, 20, 22]) + +If you already have Matplotlib installed, you can import it with:: + + >>> import matplotlib.pyplot as plt + + >>> # If you're using Jupyter Notebook, you may also want to run the following + >>> line of code to display your code in the notebook: + + >>> %matplotlib inline + +All you need to do to plot your values is run:: + + >>> plt.plot(a) + >>> plt.show() + +.. plot:: user/plots/matplotlib1.py + :align: center + :include-source: 0 + +For example, you can plot a 1D array like this:: + + >>> x = np.linspace(0, 5, 20) + >>> y = np.linspace(0, 10, 20) + >>> plt.plot(x, y, 'purple') # line + >>> plt.plot(x, y, 'o') # dots + +.. plot:: user/plots/matplotlib2.py + :align: center + :include-source: 0 + +With Matplotlib, you have access to an enormous number of visualization options. :: + + >>> from mpl_toolkits.mplot3d import Axes3D + >>> fig = plt.figure() + >>> ax = Axes3D(fig) + >>> X = np.arange(-5, 5, 0.15) + >>> Y = np.arange(-5, 5, 0.15) + >>> X, Y = np.meshgrid(X, Y) + >>> R = np.sqrt(X**2 + Y**2) + >>> Z = np.sin(R) + + >>> ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='viridis') + +.. plot:: user/plots/matplotlib3.py + :align: center + :include-source: 0 + + +To read more about Matplotlib and what it can do, take a look at +`the official documentation <https://matplotlib.org/>`_. +For directions regarding installing Matplotlib, see the official +`installation section <https://matplotlib.org/users/installing.html>`_. + + +------------------------------------------------------- + +*Image credits: Jay Alammar http://jalammar.github.io/* + diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst index 4e9016ee0..00bf17a41 100644 --- a/doc/source/user/basics.broadcasting.rst +++ b/doc/source/user/basics.broadcasting.rst @@ -1,3 +1,5 @@ +.. _basics.broadcasting: + ************ Broadcasting ************ diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 62e8139fe..9e9cd3067 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -110,7 +110,7 @@ to a small(er) fraction of the total time. Even if the interior of the loop is performed without a function call it can be advantageous to perform the inner loop over the dimension with the highest number of elements to take advantage of speed enhancements available on micro- -processors that use pipelining to enhance fundmental operations. +processors that use pipelining to enhance fundamental operations. The :c:func:`PyArray_IterAllButAxis` ( ``array``, ``&dim`` ) constructs an iterator object that is modified so that it will not iterate over the diff --git a/doc/source/user/images/np_MSE_explanation.png b/doc/source/user/images/np_MSE_explanation.png Binary files differnew file mode 100644 index 000000000..6e20116f5 --- /dev/null +++ b/doc/source/user/images/np_MSE_explanation.png diff --git a/doc/source/user/images/np_MSE_explanation2.png b/doc/source/user/images/np_MSE_explanation2.png Binary files differnew file mode 100644 index 000000000..578e5022b --- /dev/null +++ b/doc/source/user/images/np_MSE_explanation2.png diff --git a/doc/source/user/images/np_MSE_formula.png b/doc/source/user/images/np_MSE_formula.png Binary files differnew file mode 100644 index 000000000..7e6982995 --- /dev/null +++ b/doc/source/user/images/np_MSE_formula.png diff --git a/doc/source/user/images/np_MSE_implementation.png b/doc/source/user/images/np_MSE_implementation.png Binary files differnew file mode 100644 index 000000000..004e82a1f --- /dev/null +++ b/doc/source/user/images/np_MSE_implementation.png diff --git a/doc/source/user/images/np_aggregation.png b/doc/source/user/images/np_aggregation.png Binary files differnew file mode 100644 index 000000000..4356193eb --- /dev/null +++ b/doc/source/user/images/np_aggregation.png diff --git a/doc/source/user/images/np_array.png b/doc/source/user/images/np_array.png Binary files differnew file mode 100644 index 000000000..24ba41294 --- /dev/null +++ b/doc/source/user/images/np_array.png diff --git a/doc/source/user/images/np_array_data_ones.png b/doc/source/user/images/np_array_data_ones.png Binary files differnew file mode 100644 index 000000000..9b49b6e29 --- /dev/null +++ b/doc/source/user/images/np_array_data_ones.png diff --git a/doc/source/user/images/np_array_dataones.png b/doc/source/user/images/np_array_dataones.png Binary files differnew file mode 100644 index 000000000..d9b132387 --- /dev/null +++ b/doc/source/user/images/np_array_dataones.png diff --git a/doc/source/user/images/np_create_array.png b/doc/source/user/images/np_create_array.png Binary files differnew file mode 100644 index 000000000..878bad95c --- /dev/null +++ b/doc/source/user/images/np_create_array.png diff --git a/doc/source/user/images/np_create_matrix.png b/doc/source/user/images/np_create_matrix.png Binary files differnew file mode 100644 index 000000000..cd685c4f5 --- /dev/null +++ b/doc/source/user/images/np_create_matrix.png diff --git a/doc/source/user/images/np_data_plus_ones.png b/doc/source/user/images/np_data_plus_ones.png Binary files differnew file mode 100644 index 000000000..b80c2648c --- /dev/null +++ b/doc/source/user/images/np_data_plus_ones.png diff --git a/doc/source/user/images/np_indexing.png b/doc/source/user/images/np_indexing.png Binary files differnew file mode 100644 index 000000000..4303ec35b --- /dev/null +++ b/doc/source/user/images/np_indexing.png diff --git a/doc/source/user/images/np_matrix_aggregation.png b/doc/source/user/images/np_matrix_aggregation.png Binary files differnew file mode 100644 index 000000000..9c2fc5110 --- /dev/null +++ b/doc/source/user/images/np_matrix_aggregation.png diff --git a/doc/source/user/images/np_matrix_aggregation_row.png b/doc/source/user/images/np_matrix_aggregation_row.png Binary files differnew file mode 100644 index 000000000..d474c271f --- /dev/null +++ b/doc/source/user/images/np_matrix_aggregation_row.png diff --git a/doc/source/user/images/np_matrix_arithmetic.png b/doc/source/user/images/np_matrix_arithmetic.png Binary files differnew file mode 100644 index 000000000..794702541 --- /dev/null +++ b/doc/source/user/images/np_matrix_arithmetic.png diff --git a/doc/source/user/images/np_matrix_broadcasting.png b/doc/source/user/images/np_matrix_broadcasting.png Binary files differnew file mode 100644 index 000000000..e8102a7d8 --- /dev/null +++ b/doc/source/user/images/np_matrix_broadcasting.png diff --git a/doc/source/user/images/np_matrix_indexing.png b/doc/source/user/images/np_matrix_indexing.png Binary files differnew file mode 100644 index 000000000..97f90f11e --- /dev/null +++ b/doc/source/user/images/np_matrix_indexing.png diff --git a/doc/source/user/images/np_mse_viz1.png b/doc/source/user/images/np_mse_viz1.png Binary files differnew file mode 100644 index 000000000..987a48c79 --- /dev/null +++ b/doc/source/user/images/np_mse_viz1.png diff --git a/doc/source/user/images/np_mse_viz2.png b/doc/source/user/images/np_mse_viz2.png Binary files differnew file mode 100644 index 000000000..5594b03e8 --- /dev/null +++ b/doc/source/user/images/np_mse_viz2.png diff --git a/doc/source/user/images/np_multiply_broadcasting.png b/doc/source/user/images/np_multiply_broadcasting.png Binary files differnew file mode 100644 index 000000000..02337d903 --- /dev/null +++ b/doc/source/user/images/np_multiply_broadcasting.png diff --git a/doc/source/user/images/np_ones_zeros_matrix.png b/doc/source/user/images/np_ones_zeros_matrix.png Binary files differnew file mode 100644 index 000000000..9cb54644f --- /dev/null +++ b/doc/source/user/images/np_ones_zeros_matrix.png diff --git a/doc/source/user/images/np_ones_zeros_random.png b/doc/source/user/images/np_ones_zeros_random.png Binary files differnew file mode 100644 index 000000000..17730713f --- /dev/null +++ b/doc/source/user/images/np_ones_zeros_random.png diff --git a/doc/source/user/images/np_pandas.png b/doc/source/user/images/np_pandas.png Binary files differnew file mode 100644 index 000000000..cc0cd069f --- /dev/null +++ b/doc/source/user/images/np_pandas.png diff --git a/doc/source/user/images/np_readcsv.png b/doc/source/user/images/np_readcsv.png Binary files differnew file mode 100644 index 000000000..9d2b9e0a0 --- /dev/null +++ b/doc/source/user/images/np_readcsv.png diff --git a/doc/source/user/images/np_reshape.png b/doc/source/user/images/np_reshape.png Binary files differnew file mode 100644 index 000000000..7ebb8d69d --- /dev/null +++ b/doc/source/user/images/np_reshape.png diff --git a/doc/source/user/images/np_sub_mult_divide.png b/doc/source/user/images/np_sub_mult_divide.png Binary files differnew file mode 100644 index 000000000..a5df2a687 --- /dev/null +++ b/doc/source/user/images/np_sub_mult_divide.png diff --git a/doc/source/user/images/np_transposing_reshaping.png b/doc/source/user/images/np_transposing_reshaping.png Binary files differnew file mode 100644 index 000000000..5399043c2 --- /dev/null +++ b/doc/source/user/images/np_transposing_reshaping.png diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index a45fec9ec..b321ee83d 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -14,6 +14,7 @@ classes contained in the package, see the :ref:`reference`. setting-up quickstart + absolute_beginners basics misc numpy-for-matlab-users diff --git a/doc/source/user/plots/matplotlib1.py b/doc/source/user/plots/matplotlib1.py new file mode 100644 index 000000000..2cbf87ffa --- /dev/null +++ b/doc/source/user/plots/matplotlib1.py @@ -0,0 +1,7 @@ +import matplotlib.pyplot as plt +import numpy as np + +a = np.array([2, 1, 5, 7, 4, 6, 8, 14, 10, 9, 18, 20, 22]) + +plt.plot(a) +plt.show()
\ No newline at end of file diff --git a/doc/source/user/plots/matplotlib2.py b/doc/source/user/plots/matplotlib2.py new file mode 100644 index 000000000..e15986c25 --- /dev/null +++ b/doc/source/user/plots/matplotlib2.py @@ -0,0 +1,8 @@ +import matplotlib.pyplot as plt +import numpy as np + +x = np.linspace(0, 5, 20) +y = np.linspace(0, 10, 20) +plt.plot(x, y, 'purple') # line +plt.plot(x, y, 'o') # dots +plt.show()
\ No newline at end of file diff --git a/doc/source/user/plots/matplotlib3.py b/doc/source/user/plots/matplotlib3.py new file mode 100644 index 000000000..af778979b --- /dev/null +++ b/doc/source/user/plots/matplotlib3.py @@ -0,0 +1,16 @@ +import numpy as np +import matplotlib.pyplot as plt +from matplotlib import cm +from mpl_toolkits.mplot3d import Axes3D + +fig = plt.figure() +ax = Axes3D(fig) +X = np.arange(-5, 5, 0.15) +Y = np.arange(-5, 5, 0.15) +X, Y = np.meshgrid(X, Y) +R = np.sqrt(X**2 + Y**2) +Z = np.sin(R) + +ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='viridis') + +plt.show()
\ No newline at end of file diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 34e327d75..8a5a863b1 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -20,6 +20,9 @@ If you wish to work the examples in this tutorial, you must also have some software installed on your computer. Please see https://scipy.org/install.html for instructions. + +.. _quickstart.the-basics: + The Basics ========== @@ -95,6 +98,7 @@ An example >>> type(b) <type 'numpy.ndarray'> +.. _quickstart.array-creation: Array Creation -------------- @@ -273,6 +277,8 @@ can change the printing options using ``set_printoptions``. >>> np.set_printoptions(threshold=sys.maxsize) # sys module should be imported +.. _quickstart.basic-operations: + Basic Operations ---------------- @@ -460,6 +466,8 @@ operate elementwise on an array, producing an array as output. `vectorize`, `where` +.. _quickstart.indexing-slicing-and-iterating: + Indexing, Slicing and Iterating ------------------------------- @@ -685,6 +693,9 @@ dimensions are automatically calculated:: `resize`, `ravel` + +.. _quickstart.stacking-arrays: + Stacking together different arrays ---------------------------------- @@ -801,6 +812,9 @@ which the division should occur:: axis, and `array_split` allows one to specify along which axis to split. + +.. _quickstart.copies-and-views: + Copies and Views ================ diff --git a/doc/source/user/whatisnumpy.rst b/doc/source/user/whatisnumpy.rst index abaa2bfed..8478a77c4 100644 --- a/doc/source/user/whatisnumpy.rst +++ b/doc/source/user/whatisnumpy.rst @@ -1,3 +1,5 @@ +.. _whatisnumpy: + ************** What is NumPy? ************** diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py index 3d3002744..fd9f8bd42 100644 --- a/numpy/compat/py3k.py +++ b/numpy/compat/py3k.py @@ -18,76 +18,48 @@ __all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', import sys import os -try: - from pathlib import Path, PurePath -except ImportError: - Path = PurePath = None - -if sys.version_info[0] >= 3: - import io - - try: - import pickle5 as pickle - except ImportError: - import pickle +from pathlib import Path, PurePath +import io - long = int - integer_types = (int,) - basestring = str - unicode = str - bytes = bytes - - def asunicode(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - - def asbytes(s): - if isinstance(s, bytes): - return s - return str(s).encode('latin1') +import abc +from abc import ABC as abc_ABC - def asstr(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) +try: + import pickle5 as pickle +except ImportError: + import pickle - def isfileobj(f): - return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)) +long = int +integer_types = (int,) +basestring = str +unicode = str +bytes = bytes - def open_latin1(filename, mode='r'): - return open(filename, mode=mode, encoding='iso-8859-1') +def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) - def sixu(s): +def asbytes(s): + if isinstance(s, bytes): return s + return str(s).encode('latin1') - strchar = 'U' - -else: - import cpickle as pickle +def asstr(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) - bytes = str - long = long - basestring = basestring - unicode = unicode - integer_types = (int, long) - asbytes = str - asstr = str - strchar = 'S' +def isfileobj(f): + return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)) - def isfileobj(f): - return isinstance(f, file) +def open_latin1(filename, mode='r'): + return open(filename, mode=mode, encoding='iso-8859-1') - def asunicode(s): - if isinstance(s, unicode): - return s - return str(s).decode('ascii') +def sixu(s): + return s - def open_latin1(filename, mode='r'): - return open(filename, mode=mode) - - def sixu(s): - return unicode(s, 'unicode_escape') +strchar = 'U' def getexception(): return sys.exc_info()[1] @@ -134,69 +106,30 @@ class contextlib_nullcontext: pass -if sys.version_info[0] >= 3 and sys.version_info[1] >= 4: - def npy_load_module(name, fn, info=None): - """ - Load a module. - - .. versionadded:: 1.11.2 - - Parameters - ---------- - name : str - Full module name. - fn : str - Path to module file. - info : tuple, optional - Only here for backward compatibility with Python 2.*. - - Returns - ------- - mod : module - - """ - import importlib.machinery - return importlib.machinery.SourceFileLoader(name, fn).load_module() -else: - def npy_load_module(name, fn, info=None): - """ - Load a module. - - .. versionadded:: 1.11.2 +def npy_load_module(name, fn, info=None): + """ + Load a module. - Parameters - ---------- - name : str - Full module name. - fn : str - Path to module file. - info : tuple, optional - Information as returned by `imp.find_module` - (suffix, mode, type). + .. versionadded:: 1.11.2 - Returns - ------- - mod : module + Parameters + ---------- + name : str + Full module name. + fn : str + Path to module file. + info : tuple, optional + Only here for backward compatibility with Python 2.*. - """ - import imp - if info is None: - path = os.path.dirname(fn) - fo, fn, info = imp.find_module(name, [path]) - else: - fo = open(fn, info[1]) - try: - mod = imp.load_module(name, fo, fn, info) - finally: - fo.close() - return mod + Returns + ------- + mod : module -# backport abc.ABC -import abc -if sys.version_info[:2] >= (3, 4): - abc_ABC = abc.ABC -else: - abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) + """ + # Explicitly lazy import this to avoid paying the cost + # of importing importlib at startup + from importlib.machinery import SourceFileLoader + return SourceFileLoader(name, fn).load_module() # Backport os.fs_path, os.PathLike, and PurePath.__fspath__ diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index 815c61924..c2d53fe3e 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -135,16 +135,11 @@ def _ufunc_reduce(func): return _ufunc_reconstruct, (whichmodule(func, name), name) -import sys -if sys.version_info[0] >= 3: - import copyreg -else: - import copy_reg as copyreg +import copyreg copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct) # Unclutter namespace (must keep _ufunc_reconstruct for unpickling) del copyreg -del sys del _ufunc_reduce from numpy._pytesttester import PytestTester diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index f36c6941f..cb68b8360 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -4180,7 +4180,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view', add_newdoc('numpy.core.umath', 'frompyfunc', """ - frompyfunc(func, nin, nout) + frompyfunc(func, nin, nout, *[, identity]) Takes an arbitrary Python function and returns a NumPy ufunc. @@ -4195,6 +4195,13 @@ add_newdoc('numpy.core.umath', 'frompyfunc', The number of input arguments. nout : int The number of objects returned by `func`. + identity : object, optional + The value to use for the `~numpy.ufunc.identity` attribute of the resulting + object. If specified, this is equivalent to setting the underlying + C ``identity`` field to ``PyUFunc_IdentityValue``. + If omitted, the identity is set to ``PyUFunc_None``. Note that this is + _not_ equivalent to setting the identity to ``None``, which implies the + operation is reorderable. Returns ------- diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index f8de6dd09..f21774cb6 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -556,7 +556,6 @@ class _Stream: def __bool__(self): return bool(self.s) - __nonzero__ = __bool__ def _dtype_from_pep3118(spec): @@ -839,7 +838,7 @@ def npy_ctypes_check(cls): # # (..., _ctypes._CData, object) ctype_base = cls.__mro__[-2] # right now, they're part of the _ctypes module - return 'ctypes' in ctype_base.__module__ + return '_ctypes' in ctype_base.__module__ except Exception: return False diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 918da4a72..ec7e4261f 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -22,19 +22,12 @@ __docformat__ = 'restructuredtext' # scalars are printed inside an ndarray. Only the latter strs are currently # user-customizable. -import sys import functools import numbers -if sys.version_info[0] >= 3: - try: - from _thread import get_ident - except ImportError: - from _dummy_thread import get_ident -else: - try: - from thread import get_ident - except ImportError: - from dummy_thread import get_ident +try: + from _thread import get_ident +except ImportError: + from _dummy_thread import get_ident import numpy as np from . import numerictypes as _nt diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index f5691d950..7599360f5 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -148,12 +148,8 @@ class Ufunc: # String-handling utilities to avoid locale-dependence. import string -if sys.version_info[0] < 3: - UPPER_TABLE = string.maketrans(string.ascii_lowercase, - string.ascii_uppercase) -else: - UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"), - bytes(string.ascii_uppercase, "ascii")) +UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"), + bytes(string.ascii_uppercase, "ascii")) def english_upper(s): """ Apply English case rules to convert ASCII strings to all upper case. @@ -1076,15 +1072,9 @@ def make_ufuncs(funcdict): uf = funcdict[name] mlist = [] docstring = textwrap.dedent(uf.docstring).strip() - if sys.version_info[0] < 3: - docstring = docstring.encode('string-escape') - docstring = docstring.replace(r'"', r'\"') - else: - docstring = docstring.encode('unicode-escape').decode('ascii') - docstring = docstring.replace(r'"', r'\"') - # XXX: I don't understand why the following replace is not - # necessary in the python 2 case. - docstring = docstring.replace(r"'", r"\'") + docstring = docstring.encode('unicode-escape').decode('ascii') + docstring = docstring.replace(r'"', r'\"') + docstring = docstring.replace(r"'", r"\'") # Split the docstring because some compilers (like MS) do not like big # string literal in C code. We split at endlines because textwrap.wrap # do not play well with \n diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py index ff1474d9d..1cfdc55c0 100644 --- a/numpy/core/defchararray.py +++ b/numpy/core/defchararray.py @@ -40,13 +40,6 @@ __all__ = [ _globalvar = 0 -if sys.version_info[0] >= 3: - _unicode = str - _bytes = bytes -else: - _unicode = unicode - _bytes = str -_len = len array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy.char') @@ -61,7 +54,7 @@ def _use_unicode(*args): result should be unicode. """ for x in args: - if (isinstance(x, _unicode) or + if (isinstance(x, str) or issubclass(numpy.asarray(x).dtype.type, unicode_)): return unicode_ return string_ @@ -1960,8 +1953,8 @@ class chararray(ndarray): # strings in the new array. itemsize = long(itemsize) - if sys.version_info[0] >= 3 and isinstance(buffer, _unicode): - # On Py3, unicode objects do not have the buffer interface + if isinstance(buffer, str): + # unicode objects do not have the buffer interface filler = buffer buffer = None else: @@ -1991,7 +1984,7 @@ class chararray(ndarray): if isinstance(val, character): temp = val.rstrip() - if _len(temp) == 0: + if len(temp) == 0: val = '' else: val = temp @@ -2675,16 +2668,16 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None): be in any order (either C-, Fortran-contiguous, or even discontiguous). """ - if isinstance(obj, (_bytes, _unicode)): + if isinstance(obj, (bytes, str)): if unicode is None: - if isinstance(obj, _unicode): + if isinstance(obj, str): unicode = True else: unicode = False if itemsize is None: - itemsize = _len(obj) - shape = _len(obj) // itemsize + itemsize = len(obj) + shape = len(obj) // itemsize if unicode: if sys.maxunicode == 0xffff: @@ -2699,11 +2692,11 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None): # should happen in native endianness. obj = obj.encode('utf_32') else: - obj = _unicode(obj) + obj = str(obj) else: # Let the default Unicode -> string encoding (if any) take # precedence. - obj = _bytes(obj) + obj = bytes(obj) return chararray(shape, itemsize=itemsize, unicode=unicode, buffer=obj, order=order) diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h index dbb5bd506..efe196c84 100644 --- a/numpy/core/include/numpy/npy_3kcompat.h +++ b/numpy/core/include/numpy/npy_3kcompat.h @@ -72,6 +72,22 @@ static NPY_INLINE int PyInt_Check(PyObject *op) { } while (0) #endif +/* introduced in https://github.com/python/cpython/commit/a24107b04c1277e3c1105f98aff5bfa3a98b33a0 */ +#if PY_VERSION_HEX < 0x030800A3 + static NPY_INLINE PyObject * + _PyDict_GetItemStringWithError(PyObject *v, const char *key) + { + PyObject *kv, *rv; + kv = PyUnicode_FromString(key); + if (kv == NULL) { + return NULL; + } + rv = PyDict_GetItemWithError(v, kv); + Py_DECREF(kv); + return rv; + } +#endif + /* * PyString -> PyBytes */ diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index f18ab6336..72c6089b8 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -35,12 +35,6 @@ bitwise_not = invert ufunc = type(sin) newaxis = None -if sys.version_info[0] >= 3: - import builtins -else: - import __builtin__ as builtins - - array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -282,7 +276,7 @@ def full(shape, fill_value, dtype=None, order='C'): ---------- shape : int or sequence of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. - fill_value : scalar + fill_value : scalar or array_like Fill value. dtype : data-type, optional The desired data-type for the array The default, None, means @@ -312,6 +306,10 @@ def full(shape, fill_value, dtype=None, order='C'): array([[10, 10], [10, 10]]) + >>> np.full((2, 2), [1, 2]) + array([[1, 2], + [1, 2]]) + """ if dtype is None: dtype = array(fill_value).dtype diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index c63ea08c7..c06552c4e 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -80,7 +80,6 @@ Exported symbols include: """ import types as _types -import sys import numbers import warnings @@ -120,11 +119,8 @@ from ._dtype import _kind_name # we don't export these for import *, but we do want them accessible # as numerictypes.bool, etc. -if sys.version_info[0] >= 3: - from builtins import bool, int, float, complex, object, str - unicode = str -else: - from __builtin__ import bool, int, float, complex, object, unicode, str +from builtins import bool, int, float, complex, object, str +unicode = str # We use this later diff --git a/numpy/core/records.py b/numpy/core/records.py index 6717dc69b..d4aa2feb9 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -581,6 +581,18 @@ class recarray(ndarray): return self.setfield(val, *res) +def _deprecate_shape_0_as_None(shape): + if shape == 0: + warnings.warn( + "Passing `shape=0` to have the shape be inferred is deprecated, " + "and in future will be equivalent to `shape=(0,)`. To infer " + "the shape and suppress this warning, pass `shape=None` instead.", + FutureWarning, stacklevel=3) + return None + else: + return shape + + def fromarrays(arrayList, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None): """ create a record array from a (flat) list of arrays @@ -598,10 +610,12 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None, arrayList = [sb.asarray(x) for x in arrayList] - if shape is None or shape == 0: - shape = arrayList[0].shape + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) - if isinstance(shape, int): + if shape is None: + shape = arrayList[0].shape + elif isinstance(shape, int): shape = (shape,) if formats is None and dtype is None: @@ -687,7 +701,9 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, try: retval = sb.array(recList, dtype=descr) except (TypeError, ValueError): - if (shape is None or shape == 0): + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + if shape is None: shape = len(recList) if isinstance(shape, (int, long)): shape = (shape,) @@ -726,7 +742,11 @@ def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, descr = format_parser(formats, names, titles, aligned, byteorder)._descr itemsize = descr.itemsize - if (shape is None or shape == 0 or shape == -1): + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape is None or shape == -1: shape = (len(datastring) - offset) // itemsize _array = recarray(shape, descr, buf=datastring, offset=offset) @@ -769,7 +789,10 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, if dtype is None and formats is None: raise TypeError("fromfile() needs a 'dtype' or 'formats' argument") - if (shape is None or shape == 0): + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape is None: shape = (-1,) elif isinstance(shape, (int, long)): shape = (shape,) diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index a947f7a3d..6d8e603a7 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -244,9 +244,9 @@ def check_long_double_representation(cmd): # Disable multi-file interprocedural optimization in the Intel compiler on Linux # which generates intermediary object files and prevents checking the # float representation. - elif (sys.platform != "win32" - and cmd.compiler.compiler_type.startswith('intel') - and '-ipo' in cmd.compiler.cc_exe): + elif (sys.platform != "win32" + and cmd.compiler.compiler_type.startswith('intel') + and '-ipo' in cmd.compiler.cc_exe): newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '') cmd.compiler.set_executables( compiler=newcompiler, diff --git a/numpy/core/src/common/numpyos.c b/numpy/core/src/common/numpyos.c index d60b1ca17..7a629f46f 100644 --- a/numpy/core/src/common/numpyos.c +++ b/numpy/core/src/common/numpyos.c @@ -283,7 +283,7 @@ fix_ascii_format(char* buf, size_t buflen, int decimal) * converting. * - value: The value to convert * - decimal: if != 0, always has a decimal, and at leasat one digit after - * the decimal. This has the same effect as passing 'Z' in the origianl + * the decimal. This has the same effect as passing 'Z' in the original * PyOS_ascii_formatd * * This is similar to PyOS_ascii_formatd in python > 2.6, except that it does diff --git a/numpy/core/src/common/ufunc_override.c b/numpy/core/src/common/ufunc_override.c index 3f699bcdd..d510f185a 100644 --- a/numpy/core/src/common/ufunc_override.c +++ b/numpy/core/src/common/ufunc_override.c @@ -94,8 +94,11 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * return -1; } /* borrowed reference */ - *out_kwd_obj = PyDict_GetItemString(kwds, "out"); + *out_kwd_obj = _PyDict_GetItemStringWithError(kwds, "out"); if (*out_kwd_obj == NULL) { + if (PyErr_Occurred()) { + return -1; + } Py_INCREF(Py_None); *out_kwd_obj = Py_None; return 0; diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index 0c554d31b..aef0db36c 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -1538,7 +1538,7 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) * - If other is not convertible to an array, pass on the error * (MHvK, 2018-06-18: not sure about this, but it's what we have). * - * However, for backwards compatibilty, we cannot yet return arrays, + * However, for backwards compatibility, we cannot yet return arrays, * so we raise warnings instead. Furthermore, we warn on python2 * for LT, LE, GE, GT, since fall-back behaviour is poorly defined. */ diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index 3d6a5eda8..808cfaa14 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -3888,172 +3888,6 @@ static void /* ***************************************************************************** - ** FASTPUTMASK ** - ***************************************************************************** - */ - - -/**begin repeat - * - * #name = BOOL, - * BYTE, UBYTE, SHORT, USHORT, INT, UINT, - * LONG, ULONG, LONGLONG, ULONGLONG, - * HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, - * DATETIME, TIMEDELTA# - * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, - * npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_half, npy_float, npy_double, npy_longdouble, - * npy_cfloat, npy_cdouble, npy_clongdouble, - * npy_datetime, npy_timedelta# -*/ -static void -@name@_fastputmask(@type@ *in, npy_bool *mask, npy_intp ni, @type@ *vals, - npy_intp nv) -{ - npy_intp i, j; - - if (nv == 1) { - @type@ s_val = *vals; - for (i = 0; i < ni; i++) { - if (mask[i]) { - in[i] = s_val; - } - } - } - else { - for (i = 0, j = 0; i < ni; i++, j++) { - if (j >= nv) { - j = 0; - } - if (mask[i]) { - in[i] = vals[j]; - } - } - } - return; -} -/**end repeat**/ - -#define OBJECT_fastputmask NULL - - -/* - ***************************************************************************** - ** FASTTAKE ** - ***************************************************************************** - */ - - -/**begin repeat - * - * #name = BOOL, - * BYTE, UBYTE, SHORT, USHORT, INT, UINT, - * LONG, ULONG, LONGLONG, ULONGLONG, - * HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE, - * DATETIME, TIMEDELTA# - * #type = npy_bool, - * npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, - * npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_half, npy_float, npy_double, npy_longdouble, - * npy_cfloat, npy_cdouble, npy_clongdouble, - * npy_datetime, npy_timedelta# -*/ -static int -@name@_fasttake(@type@ *dest, @type@ *src, npy_intp *indarray, - npy_intp nindarray, npy_intp n_outer, - npy_intp m_middle, npy_intp nelem, - NPY_CLIPMODE clipmode) -{ - npy_intp i, j, k, tmp; - NPY_BEGIN_THREADS_DEF; - - NPY_BEGIN_THREADS; - - switch(clipmode) { - case NPY_RAISE: - for (i = 0; i < n_outer; i++) { - for (j = 0; j < m_middle; j++) { - tmp = indarray[j]; - /* - * We don't know what axis we're operating on, - * so don't report it in case of an error. - */ - if (check_and_adjust_index(&tmp, nindarray, -1, _save) < 0) { - return 1; - } - if (NPY_LIKELY(nelem == 1)) { - *dest++ = *(src + tmp); - } - else { - for (k = 0; k < nelem; k++) { - *dest++ = *(src + tmp*nelem + k); - } - } - } - src += nelem*nindarray; - } - break; - case NPY_WRAP: - for (i = 0; i < n_outer; i++) { - for (j = 0; j < m_middle; j++) { - tmp = indarray[j]; - if (tmp < 0) { - while (tmp < 0) { - tmp += nindarray; - } - } - else if (tmp >= nindarray) { - while (tmp >= nindarray) { - tmp -= nindarray; - } - } - if (NPY_LIKELY(nelem == 1)) { - *dest++ = *(src+tmp); - } - else { - for (k = 0; k < nelem; k++) { - *dest++ = *(src+tmp*nelem+k); - } - } - } - src += nelem*nindarray; - } - break; - case NPY_CLIP: - for (i = 0; i < n_outer; i++) { - for (j = 0; j < m_middle; j++) { - tmp = indarray[j]; - if (tmp < 0) { - tmp = 0; - } - else if (tmp >= nindarray) { - tmp = nindarray - 1; - } - if (NPY_LIKELY(nelem == 1)) { - *dest++ = *(src + tmp); - } - else { - for (k = 0; k < nelem; k++) { - *dest++ = *(src + tmp*nelem + k); - } - } - } - src += nelem*nindarray; - } - break; - } - - NPY_END_THREADS; - return 0; -} -/**end repeat**/ - -#define OBJECT_fasttake NULL - -/* - ***************************************************************************** ** small correlate ** ***************************************************************************** */ @@ -4410,8 +4244,8 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { NULL, NULL, (PyArray_FastClipFunc*)NULL, - (PyArray_FastPutmaskFunc*)@from@_fastputmask, - (PyArray_FastTakeFunc*)@from@_fasttake, + (PyArray_FastPutmaskFunc*)NULL, + (PyArray_FastTakeFunc*)NULL, (PyArray_ArgFunc*)@from@_argmin }; @@ -4687,7 +4521,7 @@ set_typeinfo(PyObject *dict) infodict = PyDict_New(); if (infodict == NULL) return -1; - + int ret; /**begin repeat * * #name = BOOL, @@ -4730,10 +4564,15 @@ set_typeinfo(PyObject *dict) &Py@Name@ArrType_Type ); if (s == NULL) { + Py_DECREF(infodict); return -1; } - PyDict_SetItemString(infodict, "@name@", s); + ret = PyDict_SetItemString(infodict, "@name@", s); Py_DECREF(s); + if (ret < 0) { + Py_DECREF(infodict); + return -1; + } /**end repeat**/ @@ -4753,10 +4592,15 @@ set_typeinfo(PyObject *dict) _ALIGN(@type@), &Py@Name@ArrType_Type ); if (s == NULL) { + Py_DECREF(infodict); return -1; } - PyDict_SetItemString(infodict, "@name@", s); + ret = PyDict_SetItemString(infodict, "@name@", s); Py_DECREF(s); + if (ret < 0) { + Py_DECREF(infodict); + return -1; + } /**end repeat**/ @@ -4766,37 +4610,57 @@ set_typeinfo(PyObject *dict) &PyObjectArrType_Type ); if (s == NULL) { + Py_DECREF(infodict); return -1; } - PyDict_SetItemString(infodict, "OBJECT", s); + ret = PyDict_SetItemString(infodict, "OBJECT", s); Py_DECREF(s); + if (ret < 0) { + Py_DECREF(infodict); + return -1; + } s = PyArray_typeinfo( NPY_STRINGLTR, NPY_STRING, 0, _ALIGN(char), &PyStringArrType_Type ); if (s == NULL) { + Py_DECREF(infodict); return -1; } - PyDict_SetItemString(infodict, "STRING", s); + ret = PyDict_SetItemString(infodict, "STRING", s); Py_DECREF(s); + if (ret < 0) { + Py_DECREF(infodict); + return -1; + } s = PyArray_typeinfo( NPY_UNICODELTR, NPY_UNICODE, 0, _ALIGN(npy_ucs4), &PyUnicodeArrType_Type ); if (s == NULL) { + Py_DECREF(infodict); return -1; } - PyDict_SetItemString(infodict, "UNICODE", s); + ret = PyDict_SetItemString(infodict, "UNICODE", s); Py_DECREF(s); + if (ret < 0) { + Py_DECREF(infodict); + return -1; + } s = PyArray_typeinfo( NPY_VOIDLTR, NPY_VOID, 0, _ALIGN(char), &PyVoidArrType_Type ); if (s == NULL) { + Py_DECREF(infodict); return -1; } - PyDict_SetItemString(infodict, "VOID", s); + ret = PyDict_SetItemString(infodict, "VOID", s); Py_DECREF(s); + if (ret < 0) { + Py_DECREF(infodict); + return -1; + } s = PyArray_typeinforanged( NPY_DATETIMELTR, NPY_DATETIME, NPY_BITSOF_DATETIME, _ALIGN(npy_datetime), @@ -4805,10 +4669,15 @@ set_typeinfo(PyObject *dict) &PyDatetimeArrType_Type ); if (s == NULL) { + Py_DECREF(infodict); return -1; } - PyDict_SetItemString(infodict, "DATETIME", s); + ret = PyDict_SetItemString(infodict, "DATETIME", s); Py_DECREF(s); + if (ret < 0) { + Py_DECREF(infodict); + return -1; + } s = PyArray_typeinforanged( NPY_TIMEDELTALTR, NPY_TIMEDELTA, NPY_BITSOF_TIMEDELTA, _ALIGN(npy_timedelta), @@ -4817,15 +4686,23 @@ set_typeinfo(PyObject *dict) &PyTimedeltaArrType_Type ); if (s == NULL) { + Py_DECREF(infodict); return -1; } - PyDict_SetItemString(infodict, "TIMEDELTA", s); + ret = PyDict_SetItemString(infodict, "TIMEDELTA", s); Py_DECREF(s); + if (ret < 0) { + Py_DECREF(infodict); + return -1; + } -#define SETTYPE(name) \ - Py_INCREF(&Py##name##ArrType_Type); \ - PyDict_SetItemString(infodict, #name, \ - (PyObject *)&Py##name##ArrType_Type) +#define SETTYPE(name) \ + Py_INCREF(&Py##name##ArrType_Type); \ + if (PyDict_SetItemString(infodict, #name, \ + (PyObject *)&Py##name##ArrType_Type) < 0) { \ + Py_DECREF(infodict); \ + return -1; \ + } SETTYPE(Generic); SETTYPE(Number); @@ -4840,8 +4717,11 @@ set_typeinfo(PyObject *dict) #undef SETTYPE - PyDict_SetItemString(dict, "typeinfo", infodict); + ret = PyDict_SetItemString(dict, "typeinfo", infodict); Py_DECREF(infodict); + if (ret < 0) { + return -1; + } return 0; } diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c index b9ed5a9e8..92ab75053 100644 --- a/numpy/core/src/multiarray/calculation.c +++ b/numpy/core/src/multiarray/calculation.c @@ -926,14 +926,15 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o } } - /* NumPy 1.17.0, 2019-02-24 */ - if (DEPRECATE( - "->f->fastclip is deprecated. Use PyUFunc_RegisterLoopForDescr to " - "attach a custom loop to np.core.umath.clip, np.minimum, and " - "np.maximum") < 0) { - return NULL; - } - /* everything below can be removed once this deprecation completes */ + /* + * NumPy 1.17.0, 2019-02-24 + * NumPy 1.19.0, 2020-01-15 + * + * Setting `->f->fastclip to anything but NULL has been deprecated in 1.19 + * the code path below was previously deprecated since 1.17. + * (the deprecation moved to registration time instead of execution time) + * everything below can be removed once this deprecation completes + */ if (func == NULL || (min != NULL && !PyArray_CheckAnyScalar(min)) diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c index 3ec151368..c9ec32268 100644 --- a/numpy/core/src/multiarray/common.c +++ b/numpy/core/src/multiarray/common.c @@ -317,7 +317,10 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims, if (PyDict_Check(ip)) { PyObject *typestr; PyObject *tmp = NULL; - typestr = PyDict_GetItemString(ip, "typestr"); + typestr = _PyDict_GetItemStringWithError(ip, "typestr"); + if (typestr == NULL && PyErr_Occurred()) { + goto fail; + } /* Allow unicode type strings */ if (typestr && PyUnicode_Check(typestr)) { tmp = PyUnicode_AsASCIIString(typestr); @@ -887,5 +890,3 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, } } - - diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index e77e51f42..4913eb202 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -235,7 +235,7 @@ npy_uint_alignment(int itemsize) default: break; } - + return alignment; } @@ -343,3 +343,4 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, int nd, npy_intp dimensions[], int typenum, PyArrayObject **result); #endif + diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index d4b9edd57..308e72009 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1242,15 +1242,25 @@ arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds) */ if (kwds) { PyObject *dims_item, *shape_item; - dims_item = PyDict_GetItemString(kwds, "dims"); - shape_item = PyDict_GetItemString(kwds, "shape"); + dims_item = _PyDict_GetItemStringWithError(kwds, "dims"); + if (dims_item == NULL && PyErr_Occurred()){ + return NULL; + } + shape_item = _PyDict_GetItemStringWithError(kwds, "shape"); + if (shape_item == NULL && PyErr_Occurred()){ + return NULL; + } if (dims_item != NULL && shape_item == NULL) { if (DEPRECATE("'shape' argument should be" " used instead of 'dims'") < 0) { return NULL; } - PyDict_SetItemString(kwds, "shape", dims_item); - PyDict_DelItemString(kwds, "dims"); + if (PyDict_SetItemString(kwds, "shape", dims_item) < 0) { + return NULL; + } + if (PyDict_DelItemString(kwds, "dims") < 0) { + return NULL; + } } } @@ -1429,19 +1439,28 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args) if (PyGetSetDescr_TypePtr == NULL) { /* Get "subdescr" */ - myobj = PyDict_GetItemString(tp_dict, "fields"); + myobj = _PyDict_GetItemStringWithError(tp_dict, "fields"); + if (myobj == NULL && PyErr_Occurred()) { + return NULL; + } if (myobj != NULL) { PyGetSetDescr_TypePtr = Py_TYPE(myobj); } } if (PyMemberDescr_TypePtr == NULL) { - myobj = PyDict_GetItemString(tp_dict, "alignment"); + myobj = _PyDict_GetItemStringWithError(tp_dict, "alignment"); + if (myobj == NULL && PyErr_Occurred()) { + return NULL; + } if (myobj != NULL) { PyMemberDescr_TypePtr = Py_TYPE(myobj); } } if (PyMethodDescr_TypePtr == NULL) { - myobj = PyDict_GetItemString(tp_dict, "newbyteorder"); + myobj = _PyDict_GetItemStringWithError(tp_dict, "newbyteorder"); + if (myobj == NULL && PyErr_Occurred()) { + return NULL; + } if (myobj != NULL) { PyMethodDescr_TypePtr = Py_TYPE(myobj); } diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 07e269b57..0616bed65 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -41,7 +41,7 @@ */ /* - * Scanning function for next element parsing and seperator skipping. + * Scanning function for next element parsing and separator skipping. * These functions return: * - 0 to indicate more data to read * - -1 when reading stopped at the end of the string/file @@ -678,6 +678,12 @@ discover_itemsize(PyObject *s, int nd, int *itemsize, int string_type) return 0; } +typedef enum { + DISCOVERED_OK = 0, + DISCOVERED_RAGGED = 1, + DISCOVERED_OBJECT = 2 +} discovered_t; + /* * Take an arbitrary object and discover how many dimensions it * has, filling in the dimensions as we go. @@ -685,7 +691,7 @@ discover_itemsize(PyObject *s, int nd, int *itemsize, int string_type) static int discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it, int stop_at_string, int stop_at_tuple, - int *out_is_object) + discovered_t *out_is_object) { PyObject *e; npy_intp n, i; @@ -824,7 +830,11 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it, int nd = -1; if (PyDict_Check(e)) { PyObject *new; - new = PyDict_GetItemString(e, "shape"); + new = _PyDict_GetItemStringWithError(e, "shape"); + if (new == NULL && PyErr_Occurred()) { + Py_DECREF(e); + return -1; + } if (new && PyTuple_Check(new)) { nd = PyTuple_GET_SIZE(new); if (nd < *maxndim) { @@ -867,7 +877,7 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it, if (PyErr_ExceptionMatches(PyExc_KeyError)) { PyErr_Clear(); *maxndim = 0; - *out_is_object = 1; + *out_is_object = DISCOVERED_OBJECT; return 0; } else { @@ -926,7 +936,7 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it, *maxndim = all_elems_maxndim + 1; if (!all_dimensions_match) { /* typically results in an array containing variable-length lists */ - *out_is_object = 1; + *out_is_object = DISCOVERED_RAGGED; } } @@ -1619,6 +1629,8 @@ fail: * validate and possibly copy arr itself ... * } * ... use arr ... + * context is passed to PyArray_FromArrayAttr, which ignores it. Since this is + * a NUMPY_API function, we cannot remove it. */ NPY_NO_EXPORT int PyArray_GetArrayParamsFromObject(PyObject *op, @@ -1735,7 +1747,7 @@ PyArray_GetArrayParamsFromObject(PyObject *op, /* Try to treat op as a list of lists */ if (!writeable && PySequence_Check(op)) { - int check_it, stop_at_string, stop_at_tuple, is_object; + int check_it, stop_at_string, stop_at_tuple; int type_num, type; /* @@ -1785,7 +1797,7 @@ PyArray_GetArrayParamsFromObject(PyObject *op, ((*out_dtype)->names || (*out_dtype)->subarray)); *out_ndim = NPY_MAXDIMS; - is_object = 0; + discovered_t is_object = DISCOVERED_OK; if (discover_dimensions( op, out_ndim, out_dims, check_it, stop_at_string, stop_at_tuple, &is_object) < 0) { @@ -1802,7 +1814,27 @@ PyArray_GetArrayParamsFromObject(PyObject *op, return 0; } /* If object arrays are forced */ - if (is_object) { + if (is_object != DISCOVERED_OK) { + static PyObject *visibleDeprecationWarning = NULL; + npy_cache_import( + "numpy", "VisibleDeprecationWarning", + &visibleDeprecationWarning); + if (visibleDeprecationWarning == NULL) { + return -1; + } + if (is_object == DISCOVERED_RAGGED && requested_dtype == NULL) { + /* NumPy 1.19, 2019-11-01 */ + if (PyErr_WarnEx(visibleDeprecationWarning, "Creating an " + "ndarray from ragged nested sequences (which is a " + "list-or-tuple of lists-or-tuples-or ndarrays with " + "different lengths or shapes) is deprecated. If you " + "meant to do this, you must specify 'dtype=object' " + "when creating the ndarray", 1) < 0) + { + return -1; + } + } + /* either DISCOVERED_OBJECT or there is a requested_dtype */ Py_DECREF(*out_dtype); *out_dtype = PyArray_DescrFromType(NPY_OBJECT); if (*out_dtype == NULL) { @@ -1871,6 +1903,10 @@ PyArray_GetArrayParamsFromObject(PyObject *op, /*NUMPY_API * Does not check for NPY_ARRAY_ENSURECOPY and NPY_ARRAY_NOTSWAPPED in flags * Steals a reference to newtype --- which can be NULL + * + * context is passed to PyArray_GetArrayParamsFromObject, which passes it to + * PyArray_FromArrayAttr, which raises if it is not NULL. Since this is a + * NUMPY_API function, we cannot remove it. */ NPY_NO_EXPORT PyObject * PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, @@ -2049,6 +2085,8 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, * * NPY_ARRAY_FORCECAST will cause a cast to occur regardless of whether or not * it is safe. + * + * context is passed through to PyArray_GetArrayParamsFromObject */ /*NUMPY_API @@ -2403,11 +2441,13 @@ PyArray_FromInterface(PyObject *origin) } /* Get type string from interface specification */ - attr = PyDict_GetItemString(iface, "typestr"); + attr = _PyDict_GetItemStringWithError(iface, "typestr"); if (attr == NULL) { Py_DECREF(iface); - PyErr_SetString(PyExc_ValueError, - "Missing __array_interface__ typestr"); + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ValueError, + "Missing __array_interface__ typestr"); + } return NULL; } @@ -2439,7 +2479,10 @@ PyArray_FromInterface(PyObject *origin) * the 'descr' attribute. */ if (dtype->type_num == NPY_VOID) { - PyObject *descr = PyDict_GetItemString(iface, "descr"); + PyObject *descr = _PyDict_GetItemStringWithError(iface, "descr"); + if (descr == NULL && PyErr_Occurred()) { + goto fail; + } PyArray_Descr *new_dtype = NULL; if (descr != NULL && !_is_default_descr(descr, attr) && @@ -2453,10 +2496,17 @@ PyArray_FromInterface(PyObject *origin) Py_DECREF(attr); /* Pairs with the unicode handling above */ /* Get shape tuple from interface specification */ - attr = PyDict_GetItemString(iface, "shape"); + attr = _PyDict_GetItemStringWithError(iface, "shape"); if (attr == NULL) { + if (PyErr_Occurred()) { + return NULL; + } /* Shape must be specified when 'data' is specified */ - if (PyDict_GetItemString(iface, "data") != NULL) { + PyObject *data = _PyDict_GetItemStringWithError(iface, "data"); + if (data == NULL && PyErr_Occurred()) { + return NULL; + } + else if (data != NULL) { Py_DECREF(iface); PyErr_SetString(PyExc_ValueError, "Missing __array_interface__ shape"); @@ -2487,7 +2537,10 @@ PyArray_FromInterface(PyObject *origin) } /* Get data buffer from interface specification */ - attr = PyDict_GetItemString(iface, "data"); + attr = _PyDict_GetItemStringWithError(iface, "data"); + if (attr == NULL && PyErr_Occurred()){ + return NULL; + } /* Case for data access through pointer */ if (attr && PyTuple_Check(attr)) { @@ -2551,8 +2604,11 @@ PyArray_FromInterface(PyObject *origin) _dealloc_cached_buffer_info(base); /* Get offset number from interface specification */ - attr = PyDict_GetItemString(iface, "offset"); - if (attr) { + attr = _PyDict_GetItemStringWithError(iface, "offset"); + if (attr == NULL && PyErr_Occurred()) { + goto fail; + } + else if (attr) { npy_longlong num = PyLong_AsLongLong(attr); if (error_converting(num)) { PyErr_SetString(PyExc_TypeError, @@ -2587,7 +2643,10 @@ PyArray_FromInterface(PyObject *origin) goto fail; } } - attr = PyDict_GetItemString(iface, "strides"); + attr = _PyDict_GetItemStringWithError(iface, "strides"); + if (attr == NULL && PyErr_Occurred()){ + return NULL; + } if (attr != NULL && attr != Py_None) { if (!PyTuple_Check(attr)) { PyErr_SetString(PyExc_TypeError, @@ -2623,13 +2682,18 @@ PyArray_FromInterface(PyObject *origin) return NULL; } -/*NUMPY_API*/ +/*NUMPY_API + */ NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context) { PyObject *new; PyObject *array_meth; + if (context != NULL) { + PyErr_SetString(PyExc_RuntimeError, "'context' must be NULL"); + return NULL; + } array_meth = PyArray_LookupSpecial_OnInstance(op, "__array__"); if (array_meth == NULL) { if (PyErr_Occurred()) { @@ -2637,29 +2701,11 @@ PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context) } return Py_NotImplemented; } - if (context == NULL) { - if (typecode == NULL) { - new = PyObject_CallFunction(array_meth, NULL); - } - else { - new = PyObject_CallFunction(array_meth, "O", typecode); - } + if (typecode == NULL) { + new = PyObject_CallFunction(array_meth, NULL); } else { - if (typecode == NULL) { - new = PyObject_CallFunction(array_meth, "OO", Py_None, context); - if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Clear(); - new = PyObject_CallFunction(array_meth, ""); - } - } - else { - new = PyObject_CallFunction(array_meth, "OO", typecode, context); - if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Clear(); - new = PyObject_CallFunction(array_meth, "O", typecode); - } - } + new = PyObject_CallFunction(array_meth, "O", typecode); } Py_DECREF(array_meth); if (new == NULL) { diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c index cdeb65d0e..d3cce8a37 100644 --- a/numpy/core/src/multiarray/datetime_busday.c +++ b/numpy/core/src/multiarray/datetime_busday.c @@ -1012,7 +1012,7 @@ array_busday_offset(PyObject *NPY_UNUSED(self), /* This steals the datetime_dtype reference */ dates = (PyArrayObject *)PyArray_FromAny(dates_in, datetime_dtype, - 0, 0, 0, dates_in); + 0, 0, 0, NULL); if (dates == NULL) { goto fail; } @@ -1021,7 +1021,7 @@ array_busday_offset(PyObject *NPY_UNUSED(self), /* Make 'offsets' into an array */ offsets = (PyArrayObject *)PyArray_FromAny(offsets_in, PyArray_DescrFromType(NPY_INT64), - 0, 0, 0, offsets_in); + 0, 0, 0, NULL); if (offsets == NULL) { goto fail; } @@ -1142,7 +1142,7 @@ array_busday_count(PyObject *NPY_UNUSED(self), /* This steals the datetime_dtype reference */ dates_begin = (PyArrayObject *)PyArray_FromAny(dates_begin_in, datetime_dtype, - 0, 0, 0, dates_begin_in); + 0, 0, 0, NULL); if (dates_begin == NULL) { goto fail; } @@ -1165,7 +1165,7 @@ array_busday_count(PyObject *NPY_UNUSED(self), /* This steals the datetime_dtype reference */ dates_end = (PyArrayObject *)PyArray_FromAny(dates_end_in, datetime_dtype, - 0, 0, 0, dates_end_in); + 0, 0, 0, NULL); if (dates_end == NULL) { goto fail; } @@ -1286,7 +1286,7 @@ array_is_busday(PyObject *NPY_UNUSED(self), /* This steals the datetime_dtype reference */ dates = (PyArrayObject *)PyArray_FromAny(dates_in, datetime_dtype, - 0, 0, 0, dates_in); + 0, 0, 0, NULL); if (dates == NULL) { goto fail; } diff --git a/numpy/core/src/multiarray/datetime_busdaycal.c b/numpy/core/src/multiarray/datetime_busdaycal.c index eb6ef04be..1aa5f6ab1 100644 --- a/numpy/core/src/multiarray/datetime_busdaycal.c +++ b/numpy/core/src/multiarray/datetime_busdaycal.c @@ -293,7 +293,7 @@ PyArray_HolidaysConverter(PyObject *dates_in, npy_holidayslist *holidays) /* This steals the datetime_dtype reference */ dates = (PyArrayObject *)PyArray_FromAny(dates_in, datetime_dtype, - 0, 0, 0, dates_in); + 0, 0, 0, NULL); if (dates == NULL) { goto fail; } diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index da3babc33..0f35e867c 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -509,15 +509,20 @@ _convert_from_array_descr(PyObject *obj, int align) _report_generic_error(); goto fail; } - if ((PyDict_GetItem(fields, name) != NULL) + if ((PyDict_GetItemWithError(fields, name) != NULL) || (title && PyBaseString_Check(title) - && (PyDict_GetItem(fields, title) != NULL))) { + && (PyDict_GetItemWithError(fields, title) != NULL))) { PyErr_Format(PyExc_ValueError, "field %R occurs more than once", name); Py_DECREF(conv); goto fail; } + else if (PyErr_Occurred()) { + /* Dict lookup crashed */ + Py_DECREF(conv); + goto fail; + } dtypeflags |= (conv->flags & NPY_FROM_FIELDS); if (align) { int _align = conv->alignment; @@ -545,17 +550,25 @@ _convert_from_array_descr(PyObject *obj, int align) goto fail; } if (PyBaseString_Check(title)) { - if (PyDict_GetItem(fields, title) != NULL) { + PyObject *existing = PyDict_GetItemWithError(fields, title); + if (existing == NULL && PyErr_Occurred()) { + goto fail; + } + if (existing != NULL) { PyErr_SetString(PyExc_ValueError, "title already used as a name or title."); Py_DECREF(tup); goto fail; } - PyDict_SetItem(fields, title, tup); + if (PyDict_SetItem(fields, title, tup) < 0) { + goto fail; + } } } else { - PyDict_SetItem(fields, name, tup); + if (PyDict_SetItem(fields, name, tup) < 0) { + goto fail; + } } totalsize += conv->elsize; @@ -786,8 +799,12 @@ _validate_union_object_dtype(PyArray_Descr *new, PyArray_Descr *conv) if (name == NULL) { return -1; } - tup = PyDict_GetItem(conv->fields, name); + tup = PyDict_GetItemWithError(conv->fields, name); if (tup == NULL) { + if (!PyErr_Occurred()) { + /* fields was missing the name it claimed to contain */ + PyErr_BadInternalCall(); + } return -1; } dtype = (PyArray_Descr *)PyTuple_GET_ITEM(tup, 0); @@ -909,8 +926,12 @@ _validate_object_field_overlap(PyArray_Descr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItem(fields, key); + tup = PyDict_GetItemWithError(fields, key); if (tup == NULL) { + if (!PyErr_Occurred()) { + /* fields was missing the name it claimed to contain */ + PyErr_BadInternalCall(); + } return -1; } if (!PyArg_ParseTuple(tup, "Oi|O", &fld_dtype, &fld_offset, &title)) { @@ -925,8 +946,12 @@ _validate_object_field_overlap(PyArray_Descr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItem(fields, key); + tup = PyDict_GetItemWithError(fields, key); if (tup == NULL) { + if (!PyErr_Occurred()) { + /* fields was missing the name it claimed to contain */ + PyErr_BadInternalCall(); + } return -1; } if (!PyArg_ParseTuple(tup, "Oi|O", &fld2_dtype, @@ -1178,23 +1203,39 @@ _convert_from_dict(PyObject *obj, int align) } /* Insert into dictionary */ - if (PyDict_GetItem(fields, name) != NULL) { + if (PyDict_GetItemWithError(fields, name) != NULL) { PyErr_SetString(PyExc_ValueError, "name already used as a name or title"); Py_DECREF(tup); goto fail; } - PyDict_SetItem(fields, name, tup); + else if (PyErr_Occurred()) { + /* MemoryError during dict lookup */ + Py_DECREF(tup); + goto fail; + } + int ret = PyDict_SetItem(fields, name, tup); Py_DECREF(name); + if (ret < 0) { + Py_DECREF(tup); + goto fail; + } if (len == 3) { if (PyBaseString_Check(title)) { - if (PyDict_GetItem(fields, title) != NULL) { + if (PyDict_GetItemWithError(fields, title) != NULL) { PyErr_SetString(PyExc_ValueError, "title already used as a name or title."); Py_DECREF(tup); goto fail; } - PyDict_SetItem(fields, title, tup); + else if (PyErr_Occurred()) { + /* MemoryError during dict lookup */ + goto fail; + } + if (PyDict_SetItem(fields, title, tup) < 0) { + Py_DECREF(tup); + goto fail; + } } } Py_DECREF(tup); @@ -1614,8 +1655,11 @@ _convert_from_str(PyObject *obj, int align) if (typeDict == NULL) { goto fail; } - PyObject *item = PyDict_GetItem(typeDict, obj); + PyObject *item = PyDict_GetItemWithError(typeDict, obj); if (item == NULL) { + if (PyErr_Occurred()) { + return NULL; + } goto fail; } @@ -2098,7 +2142,14 @@ arraydescr_names_set(PyArray_Descr *self, PyObject *val) self->hash = -1; /* Update dictionary keys in fields */ new_names = PySequence_Tuple(val); + if (new_names == NULL) { + return -1; + } new_fields = PyDict_New(); + if (new_fields == NULL) { + Py_DECREF(new_names); + return -1; + } for (i = 0; i < N; i++) { PyObject *key; PyObject *item; @@ -2106,20 +2157,35 @@ arraydescr_names_set(PyArray_Descr *self, PyObject *val) int ret; key = PyTuple_GET_ITEM(self->names, i); /* Borrowed references to item and new_key */ - item = PyDict_GetItem(self->fields, key); + item = PyDict_GetItemWithError(self->fields, key); + if (item == NULL) { + if (!PyErr_Occurred()) { + /* fields was missing the name it claimed to contain */ + PyErr_BadInternalCall(); + } + Py_DECREF(new_names); + Py_DECREF(new_fields); + return -1; + } new_key = PyTuple_GET_ITEM(new_names, i); /* Check for duplicates */ ret = PyDict_Contains(new_fields, new_key); - if (ret != 0) { - if (ret < 0) { - PyErr_Clear(); - } + if (ret < 0) { + Py_DECREF(new_names); + Py_DECREF(new_fields); + return -1; + } + else if (ret != 0) { PyErr_SetString(PyExc_ValueError, "Duplicate field names given."); Py_DECREF(new_names); Py_DECREF(new_fields); return -1; } - PyDict_SetItem(new_fields, new_key, item); + if (PyDict_SetItem(new_fields, new_key, item) < 0) { + Py_DECREF(new_names); + Py_DECREF(new_fields); + return -1; + } } /* Replace names */ @@ -2554,8 +2620,12 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args) if (fields != Py_None) { PyObject *key, *list; key = PyInt_FromLong(-1); - list = PyDict_GetItem(fields, key); + list = PyDict_GetItemWithError(fields, key); if (!list) { + if (!PyErr_Occurred()) { + /* fields was missing the name it claimed to contain */ + PyErr_BadInternalCall(); + } return NULL; } Py_INCREF(list); @@ -2726,8 +2796,12 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args) for (i = 0; i < PyTuple_GET_SIZE(names); ++i) { name = PyTuple_GET_ITEM(names, i); - field = PyDict_GetItem(fields, name); + field = PyDict_GetItemWithError(fields, name); if (!field) { + if (!PyErr_Occurred()) { + /* fields was missing the name it claimed to contain */ + PyErr_BadInternalCall(); + } return NULL; } @@ -2935,8 +3009,13 @@ PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) Py_INCREF(old); PyTuple_SET_ITEM(newvalue, i, old); } - PyDict_SetItem(newfields, key, newvalue); + int ret = PyDict_SetItem(newfields, key, newvalue); Py_DECREF(newvalue); + if (ret < 0) { + Py_DECREF(newfields); + Py_DECREF(new); + return NULL; + } } Py_DECREF(new->fields); new->fields = newfields; @@ -3212,10 +3291,12 @@ _check_has_fields(PyArray_Descr *self) static PyObject * _subscript_by_name(PyArray_Descr *self, PyObject *op) { - PyObject *obj = PyDict_GetItem(self->fields, op); + PyObject *obj = PyDict_GetItemWithError(self->fields, op); if (obj == NULL) { - PyErr_Format(PyExc_KeyError, - "Field named %R not found.", op); + if (!PyErr_Occurred()) { + PyErr_Format(PyExc_KeyError, + "Field named %R not found.", op); + } return NULL; } PyObject *descr = PyTuple_GET_ITEM(obj, 0); @@ -3292,9 +3373,11 @@ arraydescr_field_subset_view(PyArray_Descr *self, PyObject *ind) */ PyTuple_SET_ITEM(names, i, name); - tup = PyDict_GetItem(self->fields, name); + tup = PyDict_GetItemWithError(self->fields, name); if (tup == NULL) { - PyErr_SetObject(PyExc_KeyError, name); + if (!PyErr_Occurred()) { + PyErr_SetObject(PyExc_KeyError, name); + } goto fail; } diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c index 1694596e9..d14b8e638 100644 --- a/numpy/core/src/multiarray/dragon4.c +++ b/numpy/core/src/multiarray/dragon4.c @@ -1565,8 +1565,8 @@ Dragon4(BigInt *bigints, const npy_int32 exponent, /* Options struct for easy passing of Dragon4 options. * * scientific - boolean controlling whether scientific notation is used - * digit_mode - whether to use unique or fixed fracional output - * cutoff_mode - whether 'precision' refers to toal digits, or digits past + * digit_mode - whether to use unique or fixed fractional output + * cutoff_mode - whether 'precision' refers to to all digits, or digits past * the decimal point. * precision - When negative, prints as many digits as needed for a unique * number. When positive specifies the maximum number of diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src index 70af3fef9..1cc557825 100644 --- a/numpy/core/src/multiarray/einsum.c.src +++ b/numpy/core/src/multiarray/einsum.c.src @@ -2152,6 +2152,11 @@ get_combined_dims_view(PyArrayObject *op, int iop, char *labels) } /* A repeated label, find the original one and merge them. */ else { +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuninitialized" +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif int i = icombinemap[idim + label]; icombinemap[idim] = -1; @@ -2164,6 +2169,9 @@ get_combined_dims_view(PyArrayObject *op, int iop, char *labels) return NULL; } new_strides[i] += stride; +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif } } diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c index 9d39ee7a8..9a9c51fee 100644 --- a/numpy/core/src/multiarray/getset.c +++ b/numpy/core/src/multiarray/getset.c @@ -272,31 +272,56 @@ array_interface_get(PyArrayObject *self) Py_DECREF(dict); return NULL; } + int ret; /* dataptr */ obj = array_dataptr_get(self); - PyDict_SetItemString(dict, "data", obj); + ret = PyDict_SetItemString(dict, "data", obj); Py_DECREF(obj); + if (ret < 0) { + Py_DECREF(dict); + return NULL; + } obj = array_protocol_strides_get(self); - PyDict_SetItemString(dict, "strides", obj); + ret = PyDict_SetItemString(dict, "strides", obj); Py_DECREF(obj); + if (ret < 0) { + Py_DECREF(dict); + return NULL; + } obj = array_protocol_descr_get(self); - PyDict_SetItemString(dict, "descr", obj); + ret = PyDict_SetItemString(dict, "descr", obj); Py_DECREF(obj); + if (ret < 0) { + Py_DECREF(dict); + return NULL; + } obj = arraydescr_protocol_typestr_get(PyArray_DESCR(self)); - PyDict_SetItemString(dict, "typestr", obj); + ret = PyDict_SetItemString(dict, "typestr", obj); Py_DECREF(obj); + if (ret < 0) { + Py_DECREF(dict); + return NULL; + } obj = array_shape_get(self); - PyDict_SetItemString(dict, "shape", obj); + ret = PyDict_SetItemString(dict, "shape", obj); Py_DECREF(obj); + if (ret < 0) { + Py_DECREF(dict); + return NULL; + } obj = PyInt_FromLong(3); - PyDict_SetItemString(dict, "version", obj); + ret = PyDict_SetItemString(dict, "version", obj); Py_DECREF(obj); + if (ret < 0) { + Py_DECREF(dict); + return NULL; + } return dict; } diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 54d9085b7..f0ef8ba3b 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -21,11 +21,168 @@ #include "lowlevel_strided_loops.h" #include "array_assign.h" -#include "item_selection.h" #include "npy_sort.h" #include "npy_partition.h" #include "npy_binsearch.h" #include "alloc.h" +#include "arraytypes.h" + + + +static NPY_GCC_OPT_3 NPY_INLINE int +npy_fasttake_impl( + char *dest, char *src, const npy_intp *indices, + npy_intp n, npy_intp m, npy_intp max_item, + npy_intp nelem, npy_intp chunk, + NPY_CLIPMODE clipmode, npy_intp itemsize, int needs_refcounting, + PyArray_Descr *dtype, int axis) +{ + NPY_BEGIN_THREADS_DEF; + NPY_BEGIN_THREADS_DESCR(dtype); + switch (clipmode) { + case NPY_RAISE: + for (npy_intp i = 0; i < n; i++) { + for (npy_intp j = 0; j < m; j++) { + npy_intp tmp = indices[j]; + if (check_and_adjust_index(&tmp, max_item, axis, + _save) < 0) { + return -1; + } + char *tmp_src = src + tmp * chunk; + if (needs_refcounting) { + for (npy_intp k = 0; k < nelem; k++) { + PyArray_Item_INCREF(tmp_src, dtype); + PyArray_Item_XDECREF(dest, dtype); + memmove(dest, tmp_src, itemsize); + dest += itemsize; + tmp_src += itemsize; + } + } + else { + memmove(dest, tmp_src, chunk); + dest += chunk; + } + } + src += chunk*max_item; + } + break; + case NPY_WRAP: + for (npy_intp i = 0; i < n; i++) { + for (npy_intp j = 0; j < m; j++) { + npy_intp tmp = indices[j]; + if (tmp < 0) { + while (tmp < 0) { + tmp += max_item; + } + } + else if (tmp >= max_item) { + while (tmp >= max_item) { + tmp -= max_item; + } + } + char *tmp_src = src + tmp * chunk; + if (needs_refcounting) { + for (npy_intp k = 0; k < nelem; k++) { + PyArray_Item_INCREF(tmp_src, dtype); + PyArray_Item_XDECREF(dest, dtype); + memmove(dest, tmp_src, itemsize); + dest += itemsize; + tmp_src += itemsize; + } + } + else { + memmove(dest, tmp_src, chunk); + dest += chunk; + } + } + src += chunk*max_item; + } + break; + case NPY_CLIP: + for (npy_intp i = 0; i < n; i++) { + for (npy_intp j = 0; j < m; j++) { + npy_intp tmp = indices[j]; + if (tmp < 0) { + tmp = 0; + } + else if (tmp >= max_item) { + tmp = max_item - 1; + } + char *tmp_src = src + tmp * chunk; + if (needs_refcounting) { + for (npy_intp k = 0; k < nelem; k++) { + PyArray_Item_INCREF(tmp_src, dtype); + PyArray_Item_XDECREF(dest, dtype); + memmove(dest, tmp_src, itemsize); + dest += itemsize; + tmp_src += itemsize; + } + } + else { + memmove(dest, tmp_src, chunk); + dest += chunk; + } + } + src += chunk*max_item; + } + break; + } + + NPY_END_THREADS; + return 0; +} + + +/* + * Helper function instantiating npy_fasttake_impl in different branches + * to allow the compiler to optimize each to the specific itemsize. + */ +static NPY_GCC_OPT_3 int +npy_fasttake( + char *dest, char *src, const npy_intp *indices, + npy_intp n, npy_intp m, npy_intp max_item, + npy_intp nelem, npy_intp chunk, + NPY_CLIPMODE clipmode, npy_intp itemsize, int needs_refcounting, + PyArray_Descr *dtype, int axis) +{ + if (!needs_refcounting) { + if (chunk == 1) { + return npy_fasttake_impl( + dest, src, indices, n, m, max_item, nelem, chunk, + clipmode, itemsize, needs_refcounting, dtype, axis); + } + if (chunk == 2) { + return npy_fasttake_impl( + dest, src, indices, n, m, max_item, nelem, chunk, + clipmode, itemsize, needs_refcounting, dtype, axis); + } + if (chunk == 4) { + return npy_fasttake_impl( + dest, src, indices, n, m, max_item, nelem, chunk, + clipmode, itemsize, needs_refcounting, dtype, axis); + } + if (chunk == 8) { + return npy_fasttake_impl( + dest, src, indices, n, m, max_item, nelem, chunk, + clipmode, itemsize, needs_refcounting, dtype, axis); + } + if (chunk == 16) { + return npy_fasttake_impl( + dest, src, indices, n, m, max_item, nelem, chunk, + clipmode, itemsize, needs_refcounting, dtype, axis); + } + if (chunk == 32) { + return npy_fasttake_impl( + dest, src, indices, n, m, max_item, nelem, chunk, + clipmode, itemsize, needs_refcounting, dtype, axis); + } + } + + return npy_fasttake_impl( + dest, src, indices, n, m, max_item, nelem, chunk, + clipmode, itemsize, needs_refcounting, dtype, axis); +} + /*NUMPY_API * Take @@ -35,12 +192,10 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, PyArrayObject *out, NPY_CLIPMODE clipmode) { PyArray_Descr *dtype; - PyArray_FastTakeFunc *func; PyArrayObject *obj = NULL, *self, *indices; - npy_intp nd, i, j, n, m, k, max_item, tmp, chunk, itemsize, nelem; + npy_intp nd, i, n, m, max_item, chunk, itemsize, nelem; npy_intp shape[NPY_MAXDIMS]; - char *src, *dest, *tmp_src; - int err; + npy_bool needs_refcounting; indices = NULL; @@ -122,9 +277,10 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, nelem = chunk; itemsize = PyArray_ITEMSIZE(obj); chunk = chunk * itemsize; - src = PyArray_DATA(self); - dest = PyArray_DATA(obj); + char *src = PyArray_DATA(self); + char *dest = PyArray_DATA(obj); needs_refcounting = PyDataType_REFCHK(PyArray_DESCR(self)); + npy_intp *indices_data = (npy_intp *)PyArray_DATA(indices); if ((max_item == 0) && (PyArray_SIZE(obj) != 0)) { /* Index error, since that is the usual error for raise mode */ @@ -133,107 +289,10 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, goto fail; } - func = PyArray_DESCR(self)->f->fasttake; - if (func == NULL) { - NPY_BEGIN_THREADS_DEF; - NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(self)); - switch(clipmode) { - case NPY_RAISE: - for (i = 0; i < n; i++) { - for (j = 0; j < m; j++) { - tmp = ((npy_intp *)(PyArray_DATA(indices)))[j]; - if (check_and_adjust_index(&tmp, max_item, axis, - _save) < 0) { - goto fail; - } - tmp_src = src + tmp * chunk; - if (needs_refcounting) { - for (k=0; k < nelem; k++) { - PyArray_Item_INCREF(tmp_src, PyArray_DESCR(self)); - PyArray_Item_XDECREF(dest, PyArray_DESCR(self)); - memmove(dest, tmp_src, itemsize); - dest += itemsize; - tmp_src += itemsize; - } - } - else { - memmove(dest, tmp_src, chunk); - dest += chunk; - } - } - src += chunk*max_item; - } - break; - case NPY_WRAP: - for (i = 0; i < n; i++) { - for (j = 0; j < m; j++) { - tmp = ((npy_intp *)(PyArray_DATA(indices)))[j]; - if (tmp < 0) { - while (tmp < 0) { - tmp += max_item; - } - } - else if (tmp >= max_item) { - while (tmp >= max_item) { - tmp -= max_item; - } - } - tmp_src = src + tmp * chunk; - if (needs_refcounting) { - for (k=0; k < nelem; k++) { - PyArray_Item_INCREF(tmp_src, PyArray_DESCR(self)); - PyArray_Item_XDECREF(dest, PyArray_DESCR(self)); - memmove(dest, tmp_src, itemsize); - dest += itemsize; - tmp_src += itemsize; - } - } - else { - memmove(dest, tmp_src, chunk); - dest += chunk; - } - } - src += chunk*max_item; - } - break; - case NPY_CLIP: - for (i = 0; i < n; i++) { - for (j = 0; j < m; j++) { - tmp = ((npy_intp *)(PyArray_DATA(indices)))[j]; - if (tmp < 0) { - tmp = 0; - } - else if (tmp >= max_item) { - tmp = max_item - 1; - } - tmp_src = src + tmp * chunk; - if (needs_refcounting) { - for (k=0; k < nelem; k++) { - PyArray_Item_INCREF(tmp_src, PyArray_DESCR(self)); - PyArray_Item_XDECREF(dest, PyArray_DESCR(self)); - memmove(dest, tmp_src, itemsize); - dest += itemsize; - tmp_src += itemsize; - } - } - else { - memmove(dest, tmp_src, chunk); - dest += chunk; - } - } - src += chunk*max_item; - } - break; - } - NPY_END_THREADS; - } - else { - /* no gil release, need it for error reporting */ - err = func(dest, src, (npy_intp *)(PyArray_DATA(indices)), - max_item, n, m, nelem, clipmode); - if (err) { - goto fail; - } + if (npy_fasttake( + dest, src, indices_data, n, m, max_item, nelem, chunk, + clipmode, itemsize, needs_refcounting, dtype, axis) < 0) { + goto fail; } Py_XDECREF(indices); @@ -431,16 +490,78 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, return NULL; } + +static NPY_GCC_OPT_3 NPY_INLINE void +npy_fastputmask_impl( + char *dest, char *src, const npy_bool *mask_data, + npy_intp ni, npy_intp nv, npy_intp chunk) +{ + if (nv == 1) { + for (npy_intp i = 0; i < ni; i++) { + if (mask_data[i]) { + memmove(dest, src, chunk); + } + dest += chunk; + } + } + else { + char *tmp_src = src; + for (npy_intp i = 0, j = 0; i < ni; i++, j++) { + if (NPY_UNLIKELY(j >= nv)) { + j = 0; + tmp_src = src; + } + if (mask_data[i]) { + memmove(dest, tmp_src, chunk); + } + dest += chunk; + tmp_src += chunk; + } + } +} + + +/* + * Helper function instantiating npy_fastput_impl in different branches + * to allow the compiler to optimize each to the specific itemsize. + */ +static NPY_GCC_OPT_3 void +npy_fastputmask( + char *dest, char *src, npy_bool *mask_data, + npy_intp ni, npy_intp nv, npy_intp chunk) +{ + if (chunk == 1) { + return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + } + if (chunk == 2) { + return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + } + if (chunk == 4) { + return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + } + if (chunk == 8) { + return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + } + if (chunk == 16) { + return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + } + if (chunk == 32) { + return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + } + + return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); +} + + /*NUMPY_API * Put values into an array according to a mask. */ NPY_NO_EXPORT PyObject * PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) { - PyArray_FastPutmaskFunc *func; PyArrayObject *mask, *values; PyArray_Descr *dtype; - npy_intp i, j, chunk, ni, nv; + npy_intp chunk, ni, nv; char *src, *dest; npy_bool *mask_data; int copied = 0; @@ -505,7 +626,7 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) dest = PyArray_DATA(self); if (PyDataType_REFCHK(PyArray_DESCR(self))) { - for (i = 0, j = 0; i < ni; i++, j++) { + for (npy_intp i = 0, j = 0; i < ni; i++, j++) { if (j >= nv) { j = 0; } @@ -522,20 +643,7 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) else { NPY_BEGIN_THREADS_DEF; NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(self)); - func = PyArray_DESCR(self)->f->fastputmask; - if (func == NULL) { - for (i = 0, j = 0; i < ni; i++, j++) { - if (j >= nv) { - j = 0; - } - if (mask_data[i]) { - memmove(dest + i*chunk, src + j*chunk, chunk); - } - } - } - else { - func(dest, mask_data, ni, src, nv); - } + npy_fastputmask(dest, src, mask_data, ni, nv, chunk); NPY_END_THREADS; } diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 3efb3cb9d..4122d27ad 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -1404,8 +1404,11 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) npy_intp offset; /* get the field offset and dtype */ - tup = PyDict_GetItem(PyArray_DESCR(arr)->fields, ind); - if (tup == NULL){ + tup = PyDict_GetItemWithError(PyArray_DESCR(arr)->fields, ind); + if (tup == NULL && PyErr_Occurred()) { + return 0; + } + else if (tup == NULL){ PyObject *errmsg = PyUString_FromString("no field of name "); PyUString_Concat(&errmsg, ind); PyErr_SetObject(PyExc_ValueError, errmsg); diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 83c993425..7b9aa4794 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -64,7 +64,11 @@ get_forwarding_ndarray_method(const char *name) if (module_methods == NULL) { return NULL; } - callable = PyDict_GetItemString(PyModule_GetDict(module_methods), name); + callable = _PyDict_GetItemStringWithError(PyModule_GetDict(module_methods), name); + if (callable == NULL && PyErr_Occurred()) { + Py_DECREF(module_methods); + return NULL; + } if (callable == NULL) { Py_DECREF(module_methods); PyErr_Format(PyExc_RuntimeError, diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index b1b9c0051..11e0bc44d 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -1559,7 +1559,6 @@ _prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order) return ret; } - #define STRIDING_OK(op, order) \ ((order) == NPY_ANYORDER || \ (order) == NPY_KEEPORDER || \ @@ -1601,7 +1600,10 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) dtype_obj = PyTuple_GET_ITEM(args, 1); } else if (kws) { - dtype_obj = PyDict_GetItem(kws, npy_ma_str_dtype); + dtype_obj = PyDict_GetItemWithError(kws, npy_ma_str_dtype); + if (dtype_obj == NULL && PyErr_Occurred()) { + return NULL; + } if (dtype_obj == NULL) { dtype_obj = Py_None; } @@ -1618,7 +1620,10 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) else { /* fast path for copy=False rest default (np.asarray) */ PyObject * copy_obj, * order_obj, *ndmin_obj; - copy_obj = PyDict_GetItem(kws, npy_ma_str_copy); + copy_obj = PyDict_GetItemWithError(kws, npy_ma_str_copy); + if (copy_obj == NULL && PyErr_Occurred()) { + return NULL; + } if (copy_obj != Py_False) { goto full_path; } @@ -1627,14 +1632,20 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) /* order does not matter for contiguous 1d arrays */ if (PyArray_NDIM((PyArrayObject*)op) > 1 || !PyArray_IS_C_CONTIGUOUS((PyArrayObject*)op)) { - order_obj = PyDict_GetItem(kws, npy_ma_str_order); - if (order_obj != Py_None && order_obj != NULL) { + order_obj = PyDict_GetItemWithError(kws, npy_ma_str_order); + if (order_obj == NULL && PyErr_Occurred()) { + return NULL; + } + else if (order_obj != Py_None && order_obj != NULL) { goto full_path; } } - ndmin_obj = PyDict_GetItem(kws, npy_ma_str_ndmin); - if (ndmin_obj) { + ndmin_obj = PyDict_GetItemWithError(kws, npy_ma_str_ndmin); + if (ndmin_obj == NULL && PyErr_Occurred()) { + return NULL; + } + else if (ndmin_obj) { long t = PyLong_AsLong(ndmin_obj); if (error_converting(t)) { goto clean_type; diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index 8cdc502d6..21471a80a 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -57,8 +57,11 @@ array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo */ /* FIXME - macro contains a return */ -#define SET(op) temp = PyDict_GetItemString(dict, #op); \ - if (temp != NULL) { \ +#define SET(op) temp = _PyDict_GetItemStringWithError(dict, #op); \ + if (temp == NULL && PyErr_Occurred()) { \ + return -1; \ + } \ + else if (temp != NULL) { \ if (!(PyCallable_Check(temp))) { \ return -1; \ } \ diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c index 2e8fb514f..997467b4d 100644 --- a/numpy/core/src/multiarray/usertypes.c +++ b/numpy/core/src/multiarray/usertypes.c @@ -128,6 +128,44 @@ PyArray_InitArrFuncs(PyArray_ArrFuncs *f) f->cancastto = NULL; } + +static int +test_deprecated_arrfuncs_members(PyArray_ArrFuncs *f) { + /* NumPy 1.19, 2020-01-15 */ + if (f->fastputmask != NULL) { + if (DEPRECATE( + "The ->f->fastputmask member of custom dtypes is ignored; " + "setting it may be an error in the future.\n" + "The custom dtype you are using must be revised, but " + "results will not be affected.") < 0) { + return -1; + } + } + /* NumPy 1.19, 2020-01-15 */ + if (f->fasttake != NULL) { + if (DEPRECATE( + "The ->f->fastputmask member of custom dtypes is ignored; " + "setting it may be an error in the future.\n" + "The custom dtype you are using must be revised, but " + "results will not be affected.") < 0) { + return -1; + } + } + /* NumPy 1.19, 2020-01-15 */ + if (f->fastclip != NULL) { + /* fastclip was already deprecated at execution time in 1.17. */ + if (DEPRECATE( + "The ->f->fastclip member of custom dtypes is deprecated; " + "setting it will be an error in the future.\n" + "The custom dtype you are using must be changed to use " + "PyUFunc_RegisterLoopForDescr to attach a custom loop to " + "np.core.umath.clip, np.minimum, and np.maximum") < 0) { + return -1; + } + } + return 0; +} + /* returns typenum to associate with this type >=NPY_USERDEF. needs the userdecrs table and PyArray_NUMUSER variables @@ -176,6 +214,11 @@ PyArray_RegisterDataType(PyArray_Descr *descr) PyErr_SetString(PyExc_ValueError, "missing typeobject"); return -1; } + + if (test_deprecated_arrfuncs_members(f) < 0) { + return -1; + } + userdescrs = realloc(userdescrs, (NPY_NUMUSERTYPES+1)*sizeof(void *)); if (userdescrs == NULL) { diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src index bd3fe80b6..672fe31ae 100644 --- a/numpy/core/src/umath/_umath_tests.c.src +++ b/numpy/core/src/umath/_umath_tests.c.src @@ -610,7 +610,13 @@ PyMODINIT_FUNC PyInit__umath_tests(void) { } import_array(); + if (PyErr_Occurred()) { + return NULL; + } import_ufunc(); + if (PyErr_Occurred()) { + return NULL; + } d = PyModule_GetDict(m); diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c index aea1815e8..3404a0c6a 100644 --- a/numpy/core/src/umath/extobj.c +++ b/numpy/core/src/umath/extobj.c @@ -165,7 +165,7 @@ get_global_ext_obj(void) if (thedict == NULL) { thedict = PyEval_GetBuiltins(); } - ref = PyDict_GetItem(thedict, npy_um_str_pyvals_name); + ref = PyDict_GetItemWithError(thedict, npy_um_str_pyvals_name); #if USE_USE_DEFAULTS==1 } #endif @@ -290,6 +290,9 @@ _check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) { /* Get error object globals */ if (extobj == NULL) { extobj = get_global_ext_obj(); + if (extobj == NULL && PyErr_Occurred()) { + return -1; + } } if (_extract_pyvals(extobj, ufunc_name, NULL, NULL, &errobj) < 0) { @@ -311,6 +314,9 @@ _get_bufsize_errmask(PyObject * extobj, const char *ufunc_name, /* Get the buffersize and errormask */ if (extobj == NULL) { extobj = get_global_ext_obj(); + if (extobj == NULL && PyErr_Occurred()) { + return -1; + } } if (_extract_pyvals(extobj, ufunc_name, buffersize, errormask, NULL) < 0) { diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c index 43bed425c..bf6e5a698 100644 --- a/numpy/core/src/umath/override.c +++ b/numpy/core/src/umath/override.c @@ -112,9 +112,16 @@ fail: static int normalize_signature_keyword(PyObject *normal_kwds) { - PyObject* obj = PyDict_GetItemString(normal_kwds, "sig"); + PyObject *obj = _PyDict_GetItemStringWithError(normal_kwds, "sig"); + if (obj == NULL && PyErr_Occurred()){ + return -1; + } if (obj != NULL) { - if (PyDict_GetItemString(normal_kwds, "signature")) { + PyObject *sig = _PyDict_GetItemStringWithError(normal_kwds, "signature"); + if (sig == NULL && PyErr_Occurred()) { + return -1; + } + if (sig) { PyErr_SetString(PyExc_TypeError, "cannot specify both 'sig' and 'signature'"); return -1; @@ -165,11 +172,17 @@ normalize___call___args(PyUFuncObject *ufunc, PyObject *args, /* If we have more args than nin, they must be the output variables.*/ if (nargs > nin) { - if(nkwds > 0 && PyDict_GetItemString(*normal_kwds, "out")) { - PyErr_Format(PyExc_TypeError, - "argument given by name ('out') and position " - "(%"NPY_INTP_FMT")", nin); - return -1; + if (nkwds > 0) { + PyObject *out_kwd = _PyDict_GetItemStringWithError(*normal_kwds, "out"); + if (out_kwd == NULL && PyErr_Occurred()) { + return -1; + } + else if (out_kwd) { + PyErr_Format(PyExc_TypeError, + "argument given by name ('out') and position " + "(%"NPY_INTP_FMT")", nin); + return -1; + } } for (i = nin; i < nargs; i++) { not_all_none = (PyTuple_GET_ITEM(args, i) != Py_None); @@ -204,11 +217,20 @@ normalize___call___args(PyUFuncObject *ufunc, PyObject *args, } } /* gufuncs accept either 'axes' or 'axis', but not both */ - if (nkwds >= 2 && (PyDict_GetItemString(*normal_kwds, "axis") && - PyDict_GetItemString(*normal_kwds, "axes"))) { - PyErr_SetString(PyExc_TypeError, - "cannot specify both 'axis' and 'axes'"); - return -1; + if (nkwds >= 2) { + PyObject *axis_kwd = _PyDict_GetItemStringWithError(*normal_kwds, "axis"); + if (axis_kwd == NULL && PyErr_Occurred()) { + return -1; + } + PyObject *axes_kwd = _PyDict_GetItemStringWithError(*normal_kwds, "axes"); + if (axes_kwd == NULL && PyErr_Occurred()) { + return -1; + } + if (axis_kwd && axes_kwd) { + PyErr_SetString(PyExc_TypeError, + "cannot specify both 'axis' and 'axes'"); + return -1; + } } /* finally, ufuncs accept 'sig' or 'signature' normalize to 'signature' */ return nkwds == 0 ? 0 : normalize_signature_keyword(*normal_kwds); @@ -243,7 +265,11 @@ normalize_reduce_args(PyUFuncObject *ufunc, PyObject *args, } for (i = 1; i < nargs; i++) { - if (PyDict_GetItemString(*normal_kwds, kwlist[i])) { + PyObject *kwd = _PyDict_GetItemStringWithError(*normal_kwds, kwlist[i]); + if (kwd == NULL && PyErr_Occurred()) { + return -1; + } + else if (kwd) { PyErr_Format(PyExc_TypeError, "argument given by name ('%s') and position " "(%"NPY_INTP_FMT")", kwlist[i], i); @@ -293,7 +319,11 @@ normalize_accumulate_args(PyUFuncObject *ufunc, PyObject *args, } for (i = 1; i < nargs; i++) { - if (PyDict_GetItemString(*normal_kwds, kwlist[i])) { + PyObject *kwd = _PyDict_GetItemStringWithError(*normal_kwds, kwlist[i]); + if (kwd == NULL && PyErr_Occurred()) { + return -1; + } + else if (kwd) { PyErr_Format(PyExc_TypeError, "argument given by name ('%s') and position " "(%"NPY_INTP_FMT")", kwlist[i], i); @@ -341,7 +371,11 @@ normalize_reduceat_args(PyUFuncObject *ufunc, PyObject *args, } for (i = 2; i < nargs; i++) { - if (PyDict_GetItemString(*normal_kwds, kwlist[i])) { + PyObject *kwd = _PyDict_GetItemStringWithError(*normal_kwds, kwlist[i]); + if (kwd == NULL && PyErr_Occurred()) { + return -1; + } + else if (kwd) { PyErr_Format(PyExc_TypeError, "argument given by name ('%s') and position " "(%"NPY_INTP_FMT")", kwlist[i], i); @@ -469,8 +503,11 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, /* ensure out is always a tuple */ normal_kwds = PyDict_Copy(kwds); - out = PyDict_GetItemString(normal_kwds, "out"); - if (out != NULL) { + out = _PyDict_GetItemStringWithError(normal_kwds, "out"); + if (out == NULL && PyErr_Occurred()) { + goto fail; + } + else if (out) { int nout = ufunc->nout; if (PyTuple_CheckExact(out)) { diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 1f29526be..121597579 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -1134,7 +1134,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n) /* Order of operations important for MSVC 2015 */ *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i]; } - assert((npy_uintp)n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)); + assert(n < stride || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)); if (i + 3 * stride <= n) { /* load the first elements */ @vtype@ c1 = @vpre@_load_@vsuf@((@type@*)&ip[i]); @@ -1856,7 +1856,7 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void * method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, PI/4]. * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the quadrant k = * int(y). - * (4) For elements outside that range, Cody-Waite reduction peforms poorly + * (4) For elements outside that range, Cody-Waite reduction performs poorly * leading to catastrophic cancellation. We compute cosine by calling glibc in * a scalar fashion. * (5) Vectorized implementation has a max ULP of 1.49 and performs at least diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index fdbe8f2ad..e4ce437fb 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -324,7 +324,7 @@ _find_array_prepare(ufunc_full_args args, NPY_NO_EXPORT int set_matmul_flags(PyObject *d) { - PyObject *matmul = PyDict_GetItemString(d, "matmul"); + PyObject *matmul = _PyDict_GetItemStringWithError(d, "matmul"); if (matmul == NULL) { return -1; } @@ -397,7 +397,7 @@ _ufunc_setup_flags(PyUFuncObject *ufunc, npy_uint32 op_in_flags, * A NULL is placed in output_wrap for outputs that * should just have PyArray_Return called. */ -static void +static int _find_array_wrap(ufunc_full_args args, PyObject *kwds, PyObject **output_wrap, int nin, int nout) { @@ -409,9 +409,12 @@ _find_array_wrap(ufunc_full_args args, PyObject *kwds, * If a 'subok' parameter is passed and isn't True, don't wrap but put None * into slots with out arguments which means return the out argument */ - if (kwds != NULL && (obj = PyDict_GetItem(kwds, - npy_um_str_subok)) != NULL) { - if (obj != Py_True) { + if (kwds != NULL) { + obj = PyDict_GetItemWithError(kwds, npy_um_str_subok); + if (obj == NULL && PyErr_Occurred()) { + return -1; + } + else if (obj != NULL && obj != Py_True) { /* skip search for wrap members */ goto handle_out; } @@ -450,7 +453,7 @@ handle_out: } Py_XDECREF(wrap); - return; + return 0; } @@ -1031,7 +1034,7 @@ get_ufunc_arguments(PyUFuncObject *ufunc, int nin = ufunc->nin; int nout = ufunc->nout; int nop = ufunc->nargs; - PyObject *obj, *context; + PyObject *obj; PyArray_Descr *dtype = NULL; /* * Initialize output objects so caller knows when outputs and optional @@ -1068,22 +1071,8 @@ get_ufunc_arguments(PyUFuncObject *ufunc, out_op[i] = (PyArrayObject *)PyArray_FromArray(obj_a, NULL, 0); } else { - if (!PyArray_IsScalar(obj, Generic)) { - /* - * TODO: There should be a comment here explaining what - * context does. - */ - context = Py_BuildValue("OOi", ufunc, args, i); - if (context == NULL) { - goto fail; - } - } - else { - context = NULL; - } out_op[i] = (PyArrayObject *)PyArray_FromAny(obj, - NULL, 0, 0, 0, context); - Py_XDECREF(context); + NULL, 0, 0, 0, NULL); } if (out_op[i] == NULL) { @@ -1928,7 +1917,15 @@ make_full_arg_tuple( } /* Look for output keyword arguments */ - out_kwd = kwds ? PyDict_GetItem(kwds, npy_um_str_out) : NULL; + if (kwds) { + out_kwd = PyDict_GetItemWithError(kwds, npy_um_str_out); + if (out_kwd == NULL && PyErr_Occurred()) { + goto fail; + } + } + else { + out_kwd = NULL; + } if (out_kwd != NULL) { assert(nargs == nin); @@ -3296,9 +3293,12 @@ get_binary_op_function(PyUFuncObject *ufunc, int *otype, if (key == NULL) { return -1; } - obj = PyDict_GetItem(ufunc->userloops, key); + obj = PyDict_GetItemWithError(ufunc->userloops, key); Py_DECREF(key); - if (obj != NULL) { + if (obj == NULL && PyErr_Occurred()) { + return -1; + } + else if (obj != NULL) { funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj); while (funcdata != NULL) { int *types = funcdata->arg_types; @@ -3861,8 +3861,6 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, stride_copy[1] = stride1; stride_copy[2] = stride0; - needs_api = NpyIter_IterationNeedsAPI(iter); - NPY_BEGIN_THREADS_NDITER(iter); do { @@ -4387,7 +4385,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, PyObject *axes_in = NULL; PyArrayObject *mp = NULL, *wheremask = NULL, *ret = NULL; PyObject *op; - PyObject *obj_ind, *context; + PyObject *obj_ind; PyArrayObject *indices = NULL; PyArray_Descr *otype = NULL; PyArrayObject *out = NULL; @@ -4426,8 +4424,11 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, } /* if there is a tuple of 1 for `out` in kwds, unpack it */ if (kwds != NULL) { - PyObject *out_obj = PyDict_GetItem(kwds, npy_um_str_out); - if (out_obj != NULL && PyTuple_CheckExact(out_obj)) { + PyObject *out_obj = PyDict_GetItemWithError(kwds, npy_um_str_out); + if (out_obj == NULL && PyErr_Occurred()){ + return NULL; + } + else if (out_obj != NULL && PyTuple_CheckExact(out_obj)) { if (PyTuple_GET_SIZE(out_obj) != 1) { PyErr_SetString(PyExc_ValueError, "The 'out' tuple must have exactly one entry"); @@ -4478,14 +4479,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, } } /* Ensure input is an array */ - if (!PyArray_Check(op) && !PyArray_IsScalar(op, Generic)) { - context = Py_BuildValue("O(O)i", ufunc, op, 0); - } - else { - context = NULL; - } - mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, context); - Py_XDECREF(context); + mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, NULL); if (mp == NULL) { goto fail; } @@ -4719,7 +4713,9 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) if (make_full_arg_tuple(&full_args, ufunc->nin, ufunc->nout, args, kwds) < 0) { goto fail; } - _find_array_wrap(full_args, kwds, wraparr, ufunc->nin, ufunc->nout); + if (_find_array_wrap(full_args, kwds, wraparr, ufunc->nin, ufunc->nout) < 0) { + goto fail; + } /* wrap outputs */ for (i = 0; i < ufunc->nout; i++) { @@ -4781,8 +4777,11 @@ ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args) if (thedict == NULL) { thedict = PyEval_GetBuiltins(); } - res = PyDict_GetItem(thedict, npy_um_str_pyvals_name); - if (res != NULL) { + res = PyDict_GetItemWithError(thedict, npy_um_str_pyvals_name); + if (res == NULL && PyErr_Occurred()) { + return NULL; + } + else if (res != NULL) { Py_INCREF(res); return res; } @@ -5127,8 +5126,11 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, function, arg_typenums, data); if (result == 0) { - cobj = PyDict_GetItem(ufunc->userloops, key); - if (cobj == NULL) { + cobj = PyDict_GetItemWithError(ufunc->userloops, key); + if (cobj == NULL && PyErr_Occurred()) { + result = -1; + } + else if (cobj == NULL) { PyErr_SetString(PyExc_KeyError, "userloop for user dtype not found"); result = -1; @@ -5232,9 +5234,12 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, funcdata->nargs = 0; /* Get entry for this user-defined type*/ - cobj = PyDict_GetItem(ufunc->userloops, key); + cobj = PyDict_GetItemWithError(ufunc->userloops, key); + if (cobj == NULL && PyErr_Occurred()) { + return 0; + } /* If it's not there, then make one and return. */ - if (cobj == NULL) { + else if (cobj == NULL) { cobj = NpyCapsule_FromVoidPtr((void *)funcdata, _loop1d_list_free); if (cobj == NULL) { goto fail; diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 0e71305b6..2534ff78a 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -1379,9 +1379,12 @@ find_userloop(PyUFuncObject *ufunc, if (key == NULL) { return -1; } - obj = PyDict_GetItem(ufunc->userloops, key); + obj = PyDict_GetItemWithError(ufunc->userloops, key); Py_DECREF(key); - if (obj == NULL) { + if (obj == NULL && PyErr_Occurred()){ + return -1; + } + else if (obj == NULL) { continue; } for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj); @@ -1784,9 +1787,12 @@ linear_search_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItem(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); Py_DECREF(key); - if (obj == NULL) { + if (obj == NULL && PyErr_Occurred()){ + return -1; + } + else if (obj == NULL) { continue; } for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj); @@ -1848,9 +1854,12 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItem(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); Py_DECREF(key); - if (obj == NULL) { + if (obj == NULL && PyErr_Occurred()){ + return -1; + } + else if (obj == NULL) { continue; } diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index e14006985..bad42d657 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -70,9 +70,7 @@ object_ufunc_loop_selector(PyUFuncObject *ufunc, } PyObject * -ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) { - /* Keywords are ignored for now */ - +ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject *function, *pyname = NULL; int nin, nout, i, nargs; PyUFunc_PyFuncData *fdata; @@ -81,14 +79,18 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS Py_ssize_t fname_len = -1; void * ptr, **data; int offset[2]; + PyObject *identity = NULL; /* note: not the same semantics as Py_None */ + static char *kwlist[] = {"", "nin", "nout", "identity", NULL}; - if (!PyArg_ParseTuple(args, "Oii:frompyfunc", &function, &nin, &nout)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "Oii|$O:frompyfunc", kwlist, + &function, &nin, &nout, &identity)) { return NULL; } if (!PyCallable_Check(function)) { PyErr_SetString(PyExc_TypeError, "function must be callable"); return NULL; } + nargs = nin + nout; pyname = PyObject_GetAttrString(function, "__name__"); @@ -146,10 +148,10 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS /* Do a better job someday */ doc = "dynamic ufunc based on a python function"; - self = (PyUFuncObject *)PyUFunc_FromFuncAndData( + self = (PyUFuncObject *)PyUFunc_FromFuncAndDataAndSignatureAndIdentity( (PyUFuncGenericFunction *)pyfunc_functions, data, - types, /* ntypes */ 1, nin, nout, PyUFunc_None, - str, doc, /* unused */ 0); + types, /* ntypes */ 1, nin, nout, identity ? PyUFunc_IdentityValue : PyUFunc_None, + str, doc, /* unused */ 0, NULL, identity); if (self == NULL) { PyArray_free(ptr); diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py index 3f0a59eec..71b46e551 100644 --- a/numpy/core/tests/test_api.py +++ b/numpy/core/tests/test_api.py @@ -39,57 +39,38 @@ def test_array_array(): assert_equal(old_refcount, sys.getrefcount(np.float64)) # test string - S2 = np.dtype((str, 2)) - S3 = np.dtype((str, 3)) - S5 = np.dtype((str, 5)) + S2 = np.dtype((bytes, 2)) + S3 = np.dtype((bytes, 3)) + S5 = np.dtype((bytes, 5)) + assert_equal(np.array(b"1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array(b"1.0").dtype, S3) + assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3) + assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1.")) + assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5)) + + # test string + U2 = np.dtype((str, 2)) + U3 = np.dtype((str, 3)) + U5 = np.dtype((str, 5)) assert_equal(np.array("1.0", dtype=np.float64), np.ones((), dtype=np.float64)) - assert_equal(np.array("1.0").dtype, S3) - assert_equal(np.array("1.0", dtype=str).dtype, S3) - assert_equal(np.array("1.0", dtype=S2), np.array("1.")) - assert_equal(np.array("1", dtype=S5), np.ones((), dtype=S5)) - - # test unicode - _unicode = globals().get("unicode") - if _unicode: - U2 = np.dtype((_unicode, 2)) - U3 = np.dtype((_unicode, 3)) - U5 = np.dtype((_unicode, 5)) - assert_equal(np.array(_unicode("1.0"), dtype=np.float64), - np.ones((), dtype=np.float64)) - assert_equal(np.array(_unicode("1.0")).dtype, U3) - assert_equal(np.array(_unicode("1.0"), dtype=_unicode).dtype, U3) - assert_equal(np.array(_unicode("1.0"), dtype=U2), - np.array(_unicode("1."))) - assert_equal(np.array(_unicode("1"), dtype=U5), - np.ones((), dtype=U5)) + assert_equal(np.array("1.0").dtype, U3) + assert_equal(np.array("1.0", dtype=str).dtype, U3) + assert_equal(np.array("1.0", dtype=U2), np.array(str("1."))) + assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5)) builtins = getattr(__builtins__, '__dict__', __builtins__) assert_(hasattr(builtins, 'get')) - # test buffer - _buffer = builtins.get("buffer") - if _buffer and sys.version_info[:3] >= (2, 7, 5): - # This test fails for earlier versions of Python. - # Evidently a bug got fixed in 2.7.5. - dat = np.array(_buffer('1.0'), dtype=np.float64) - assert_equal(dat, [49.0, 46.0, 48.0]) - assert_(dat.dtype.type is np.float64) - - dat = np.array(_buffer(b'1.0')) - assert_equal(dat, [49, 46, 48]) - assert_(dat.dtype.type is np.uint8) - - # test memoryview, new version of buffer - _memoryview = builtins.get("memoryview") - if _memoryview: - dat = np.array(_memoryview(b'1.0'), dtype=np.float64) - assert_equal(dat, [49.0, 46.0, 48.0]) - assert_(dat.dtype.type is np.float64) - - dat = np.array(_memoryview(b'1.0')) - assert_equal(dat, [49, 46, 48]) - assert_(dat.dtype.type is np.uint8) + # test memoryview + dat = np.array(memoryview(b'1.0'), dtype=np.float64) + assert_equal(dat, [49.0, 46.0, 48.0]) + assert_(dat.dtype.type is np.float64) + + dat = np.array(memoryview(b'1.0')) + assert_equal(dat, [49, 46, 48]) + assert_(dat.dtype.type is np.uint8) # test array interface a = np.array(100.0, dtype=np.float64) diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index eac4647c9..96240be0f 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -233,12 +233,8 @@ class TestArray2String: return 'O' x = np.arange(3) - if sys.version_info[0] >= 3: - x_hex = "[0x0 0x1 0x2]" - x_oct = "[0o0 0o1 0o2]" - else: - x_hex = "[0x0L 0x1L 0x2L]" - x_oct = "[0L 01L 02L]" + x_hex = "[0x0 0x1 0x2]" + x_oct = "[0o0 0o1 0o2]" assert_(np.array2string(x, formatter={'all':_format_function}) == "[. o O]") assert_(np.array2string(x, formatter={'int_kind':_format_function}) == @@ -469,12 +465,8 @@ class TestPrintOptions: assert_equal(unicode(np.array(u'café', '<U4')), u'café') - if sys.version_info[0] >= 3: - assert_equal(repr(np.array('café', '<U4')), - "array('café', dtype='<U4')") - else: - assert_equal(repr(np.array(u'café', '<U4')), - "array(u'caf\\xe9', dtype='<U4')") + assert_equal(repr(np.array('café', '<U4')), + "array('café', dtype='<U4')") assert_equal(str(np.array('test', np.str_)), 'test') a = np.zeros(1, dtype=[('a', '<i4', (3,))]) @@ -707,7 +699,7 @@ class TestPrintOptions: array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.], dtype=float32)""")) - styp = '<U4' if sys.version_info[0] >= 3 else '|S4' + styp = '<U4' assert_equal(repr(np.ones(3, dtype=styp)), "array(['1', '1', '1'], dtype='{}')".format(styp)) assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\ @@ -846,17 +838,14 @@ class TestPrintOptions: def test_unicode_object_array(): import sys - if sys.version_info[0] >= 3: - expected = "array(['é'], dtype=object)" - else: - expected = "array([u'\\xe9'], dtype=object)" + expected = "array(['é'], dtype=object)" x = np.array([u'\xe9'], dtype=object) assert_equal(repr(x), expected) class TestContextManager: def test_ctx_mgr(self): - # test that context manager actuall works + # test that context manager actually works with np.printoptions(precision=2): s = str(np.array([2.0]) / 3) assert_equal(s, '[0.67]') diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py index d2a1e86d2..4c9016c3e 100644 --- a/numpy/core/tests/test_defchararray.py +++ b/numpy/core/tests/test_defchararray.py @@ -341,15 +341,8 @@ class TestMethods: assert_array_equal(C, tgt) def test_decode(self): - if sys.version_info[0] >= 3: - A = np.char.array([b'\\u03a3']) - assert_(A.decode('unicode-escape')[0] == '\u03a3') - else: - with suppress_warnings() as sup: - if sys.py3kwarning: - sup.filter(DeprecationWarning, "'hex_codec'") - A = np.char.array(['736563726574206d657373616765']) - assert_(A.decode('hex_codec')[0] == 'secret message') + A = np.char.array([b'\\u03a3']) + assert_(A.decode('unicode-escape')[0] == '\u03a3') def test_encode(self): B = self.B.encode('unicode_escape') @@ -360,18 +353,12 @@ class TestMethods: assert_(T[2, 0] == b'123 345 \0') def test_join(self): - if sys.version_info[0] >= 3: - # NOTE: list(b'123') == [49, 50, 51] - # so that b','.join(b'123') results to an error on Py3 - A0 = self.A.decode('ascii') - else: - A0 = self.A + # NOTE: list(b'123') == [49, 50, 51] + # so that b','.join(b'123') results to an error on Py3 + A0 = self.A.decode('ascii') A = np.char.join([',', '#'], A0) - if sys.version_info[0] >= 3: - assert_(issubclass(A.dtype.type, np.unicode_)) - else: - assert_(issubclass(A.dtype.type, np.string_)) + assert_(issubclass(A.dtype.type, np.unicode_)) tgt = np.array([[' ,a,b,c, ', ''], ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) @@ -442,15 +429,6 @@ class TestMethods: assert_(issubclass(R.dtype.type, np.string_)) assert_array_equal(R, tgt) - if sys.version_info[0] < 3: - # NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3 - R = self.A.replace(b'a', u'\u03a3') - tgt = [[u' \u03a3bc ', ''], - ['12345', u'MixedC\u03a3se'], - ['123 \t 345 \x00', 'UPPER']] - assert_(issubclass(R.dtype.type, np.unicode_)) - assert_array_equal(R, tgt) - def test_rjust(self): assert_(issubclass(self.A.rjust(10).dtype.type, np.string_)) diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 7232b5949..252133d7b 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -227,15 +227,10 @@ class TestComparisonDeprecations(_DeprecationTestCase): struct = np.zeros(2, dtype="i4,i4") for arg2 in [struct, "a"]: for f in [operator.lt, operator.le, operator.gt, operator.ge]: - if sys.version_info[0] >= 3: - # py3 - with warnings.catch_warnings() as l: - warnings.filterwarnings("always") - assert_raises(TypeError, f, arg1, arg2) - assert_(not l) - else: - # py2 - assert_warns(DeprecationWarning, f, arg1, arg2) + with warnings.catch_warnings() as l: + warnings.filterwarnings("always") + assert_raises(TypeError, f, arg1, arg2) + assert_(not l) class TestDatetime64Timezone(_DeprecationTestCase): @@ -332,9 +327,6 @@ class TestNumericStyleTypecodes(_DeprecationTestCase): 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64', 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0' ] - if sys.version_info[0] < 3: - deprecated_types.extend(['Unicode0', 'String0']) - for dt in deprecated_types: self.assert_deprecated(np.dtype, exceptions=(TypeError,), args=(dt,)) @@ -355,28 +347,6 @@ class TestTestDeprecated: test_case_instance.teardown() -class TestClassicIntDivision(_DeprecationTestCase): - """ - See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2 - if used for division - List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html - """ - def test_int_dtypes(self): - #scramble types and do some mix and match testing - deprecated_types = [ - 'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16', - 'intp', 'int64', 'uint32', 'int16' - ] - if sys.version_info[0] < 3 and sys.py3kwarning: - import operator as op - dt2 = 'bool_' - for dt1 in deprecated_types: - a = np.array([1,2,3], dtype=dt1) - b = np.array([1,2,3], dtype=dt2) - self.assert_deprecated(op.div, args=(a,b)) - dt2 = dt1 - - class TestNonNumericConjugate(_DeprecationTestCase): """ Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes, @@ -566,3 +536,16 @@ class TestNonZero(_DeprecationTestCase): def test_zerod(self): self.assert_deprecated(lambda: np.nonzero(np.array(0))) self.assert_deprecated(lambda: np.nonzero(np.array(1))) + + +def test_deprecate_ragged_arrays(): + # 2019-11-29 1.19.0 + # + # NEP 34 deprecated automatic object dtype when creating ragged + # arrays. Also see the "ragged" tests in `test_multiarray` + # + # emits a VisibleDeprecationWarning + arg = [1, [2, 3]] + with assert_warns(np.VisibleDeprecationWarning): + np.array(arg) + diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 1d24d8a3d..c9a65cd9c 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -136,11 +136,11 @@ class TestBuiltin: 'offsets':[0, 2]}, align=True) def test_field_order_equality(self): - x = np.dtype({'names': ['A', 'B'], - 'formats': ['i4', 'f4'], + x = np.dtype({'names': ['A', 'B'], + 'formats': ['i4', 'f4'], 'offsets': [0, 4]}) - y = np.dtype({'names': ['B', 'A'], - 'formats': ['f4', 'i4'], + y = np.dtype({'names': ['B', 'A'], + 'formats': ['f4', 'i4'], 'offsets': [4, 0]}) assert_equal(x == y, False) @@ -418,7 +418,7 @@ class TestRecord: {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)}) def test_fieldless_views(self): - a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[], + a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[], 'itemsize':8}) assert_raises(ValueError, a.view, np.dtype([])) @@ -900,11 +900,6 @@ class TestString: assert_equal(repr(dt), "dtype(('<i2', (1,)))") assert_equal(str(dt), "('<i2', (1,))") - @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only") - def test_dtype_str_with_long_in_shape(self): - # Pull request #376, should not error - np.dtype('(1L,)i4') - def test_base_dtype_with_object_type(self): # Issue gh-2798, should not error. np.array(['a'], dtype="O").astype(("O", [("name", "O")])) diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py index cadd0d513..3c35245a3 100644 --- a/numpy/core/tests/test_item_selection.py +++ b/numpy/core/tests/test_item_selection.py @@ -20,8 +20,9 @@ class TestTake: 'clip': {-1: 0, 4: 1}} # Currently all types but object, use the same function generation. # So it should not be necessary to test all. However test also a non - # refcounted struct on top of object. - types = int, object, np.dtype([('', 'i', 2)]) + # refcounted struct on top of object, which has a size that hits the + # default (non-specialized) path. + types = int, object, np.dtype([('', 'i2', 3)]) for t in types: # ta works, even if the array may be odd if buffer interface is used ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py index c5115fa7e..7c1cff9b7 100644 --- a/numpy/core/tests/test_mem_overlap.py +++ b/numpy/core/tests/test_mem_overlap.py @@ -11,9 +11,6 @@ from numpy.testing import ( assert_, assert_raises, assert_equal, assert_array_equal ) -if sys.version_info[0] >= 3: - xrange = range - ndims = 2 size = 10 @@ -138,11 +135,7 @@ def test_diophantine_fuzz(): # Check no solution exists (provided the problem is # small enough so that brute force checking doesn't # take too long) - try: - ranges = tuple(xrange(0, a*ub+1, a) for a, ub in zip(A, U)) - except OverflowError: - # xrange on 32-bit Python 2 may overflow - continue + ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U)) size = 1 for r in ranges: @@ -475,7 +468,7 @@ def check_internal_overlap(a, manual_expected=None): # Brute-force check m = set() - ranges = tuple(xrange(n) for n in a.shape) + ranges = tuple(range(n) for n in a.shape) for v in itertools.product(*ranges): offset = sum(s*w for s, w in zip(a.strides, v)) if offset in m: diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 85910886a..600941cfd 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -29,10 +29,7 @@ except ImportError: except ImportError: pathlib = None -if sys.version_info[0] >= 3: - import builtins -else: - import __builtin__ as builtins +import builtins from decimal import Decimal import numpy as np @@ -44,21 +41,13 @@ from numpy.testing import ( assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring, temppath, suppress_warnings, break_cycles, ) +from numpy.testing._private.utils import _no_tracing from numpy.core.tests._locales import CommaDecimalPointLocale # Need to test an object that does not fully implement math interface from datetime import timedelta, datetime -if sys.version_info[:2] > (3, 2): - # In Python 3.3 the representation of empty shape, strides and sub-offsets - # is an empty tuple instead of None. - # https://docs.python.org/dev/whatsnew/3.3.html#api-changes - EMPTY = () -else: - EMPTY = None - - def _aligned_zeros(shape, dtype=float, order="C", align=None): """ Allocate a new ndarray with aligned memory. @@ -94,26 +83,6 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None): data.fill(0) return data -def _no_tracing(func): - """ - Decorator to temporarily turn off tracing for the duration of a test. - Needed in tests that check refcounting, otherwise the tracing itself - influences the refcounts - """ - if not hasattr(sys, 'gettrace'): - return func - else: - @functools.wraps(func) - def wrapper(*args, **kwargs): - original_trace = sys.gettrace() - try: - sys.settrace(None) - return func(*args, **kwargs) - finally: - sys.settrace(original_trace) - return wrapper - - class TestFlags: def setup(self): @@ -189,7 +158,6 @@ class TestFlags: vals.setflags(write=True) assert_(vals.flags.writeable) - @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies") @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies") def test_writeable_pickle(self): import pickle @@ -456,7 +424,7 @@ class TestArrayConstruction: assert_equal(r, np.ones((2, 6, 6))) d = np.ones((6, )) - r = np.array([[d, d + 1], d + 2]) + r = np.array([[d, d + 1], d + 2], dtype=object) assert_equal(len(r), 2) assert_equal(r[0], [d, d + 1]) assert_equal(r[1], d + 2) @@ -971,13 +939,6 @@ class TestCreation: assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex) assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex) - @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") - def test_sequence_long(self): - assert_equal(np.array([long(4), long(4)]).dtype, long) - assert_equal(np.array([long(4), 2**80]).dtype, object) - assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object) - assert_equal(np.array([2**80, long(4)]).dtype, object) - def test_non_sequence_sequence(self): """Should not segfault. @@ -1059,34 +1020,60 @@ class TestCreation: assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,), shape=(max_bytes//itemsize + 1,), dtype=dtype) - def test_jagged_ndim_object(self): - # Lists of mismatching depths are treated as object arrays - a = np.array([[1], 2, 3]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) + def _ragged_creation(self, seq): + # without dtype=object, the ragged object should raise + with assert_warns(np.VisibleDeprecationWarning): + a = np.array(seq) + b = np.array(seq, dtype=object) + assert_equal(a, b) + return b - a = np.array([1, [2], 3]) + def test_ragged_ndim_object(self): + # Lists of mismatching depths are treated as object arrays + a = self._ragged_creation([[1], 2, 3]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) - a = np.array([1, 2, [3]]) + a = self._ragged_creation([1, [2], 3]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) - def test_jagged_shape_object(self): - # The jagged dimension of a list is turned into an object array - a = np.array([[1, 1], [2], [3]]) + a = self._ragged_creation([1, 2, [3]]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) - a = np.array([[1], [2, 2], [3]]) + def test_ragged_shape_object(self): + # The ragged dimension of a list is turned into an object array + a = self._ragged_creation([[1, 1], [2], [3]]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) - a = np.array([[1], [2], [3, 3]]) + a = self._ragged_creation([[1], [2, 2], [3]]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) + a = self._ragged_creation([[1], [2], [3, 3]]) + assert a.shape == (3,) + assert a.dtype == object + + def test_array_of_ragged_array(self): + outer = np.array([None, None]) + outer[0] = outer[1] = np.array([1, 2, 3]) + assert np.array(outer).shape == (2,) + assert np.array([outer]).shape == (1, 2) + + outer_ragged = np.array([None, None]) + outer_ragged[0] = np.array([1, 2, 3]) + outer_ragged[1] = np.array([1, 2, 3, 4]) + # should both of these emit deprecation warnings? + assert np.array(outer_ragged).shape == (2,) + assert np.array([outer_ragged]).shape == (1, 2,) + + def test_deep_nonragged_object(self): + # None of these should raise, even though they are missing dtype=object + a = np.array([[[Decimal(1)]]]) + a = np.array([1, Decimal(1)]) + a = np.array([[1], [Decimal(1)]]) class TestStructured: def test_subarray_field_access(self): @@ -2266,10 +2253,11 @@ class TestMethods: assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind) assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1) - def test_searchsorted_with_sorter(self): + def test_searchsorted_with_invalid_sorter(self): a = np.array([5, 2, 1, 3, 4]) s = np.argsort(a) - assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3))) + assert_raises(TypeError, np.searchsorted, a, 0, + sorter=np.array((1, (2, 3)), dtype=object)) assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6]) @@ -2279,6 +2267,7 @@ class TestMethods: assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3]) + def test_searchsorted_with_sorter(self): a = np.random.rand(300) s = a.argsort() b = np.sort(a) @@ -3373,6 +3362,7 @@ class TestBinop: 'and': (np.bitwise_and, True, int), 'xor': (np.bitwise_xor, True, int), 'or': (np.bitwise_or, True, int), + 'matmul': (np.matmul, False, float), # 'ge': (np.less_equal, False), # 'gt': (np.less, False), # 'le': (np.greater_equal, False), @@ -3380,8 +3370,6 @@ class TestBinop: # 'eq': (np.equal, False), # 'ne': (np.not_equal, False), } - if sys.version_info >= (3, 5): - ops['matmul'] = (np.matmul, False, float) class Coerced(Exception): pass @@ -3806,8 +3794,6 @@ class TestSubscripting: def test_test_zero_rank(self): x = np.array([1, 2, 3]) assert_(isinstance(x[0], np.int_)) - if sys.version_info[0] < 3: - assert_(isinstance(x[0], int)) assert_(type(x[0, ...]) is np.ndarray) @@ -3906,10 +3892,7 @@ class TestPickling: assert ref() is None def _loads(self, obj): - if sys.version_info[0] >= 3: - return pickle.loads(obj, encoding='latin1') - else: - return pickle.loads(obj) + return pickle.loads(obj, encoding='latin1') # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field @@ -4475,11 +4458,10 @@ class TestCompress: class TestPutmask: def tst_basic(self, x, T, mask, val): np.putmask(x, mask, val) - assert_equal(x[mask], T(val)) - assert_equal(x.dtype, T) + assert_equal(x[mask], np.array(val, T)) def test_ip_types(self): - unchecked_types = [bytes, unicode, np.void, object] + unchecked_types = [bytes, unicode, np.void] x = np.random.random(1000)*100 mask = x < 40 @@ -4490,6 +4472,10 @@ class TestPutmask: if T not in unchecked_types: self.tst_basic(x.copy().astype(T), T, mask, val) + # Also test string of a length which uses an untypical length + dt = np.dtype("S3") + self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3]) + def test_mask_size(self): assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) @@ -4529,7 +4515,7 @@ class TestTake: assert_array_equal(x.take(ind, axis=0), x) def test_ip_types(self): - unchecked_types = [bytes, unicode, np.void, object] + unchecked_types = [bytes, unicode, np.void] x = np.random.random(24)*100 x.shape = 2, 3, 4 @@ -4538,6 +4524,9 @@ class TestTake: if T not in unchecked_types: self.tst_basic(x.copy().astype(T)) + # Also test string of a length which uses an untypical length + self.tst_basic(x.astype("S3")) + def test_raise(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 @@ -5218,7 +5207,6 @@ class TestRecord: # Error raised when multiple fields have the same name assert_raises(ValueError, test_dtype_init) - @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") def test_bytes_fields(self): # Bytes are not allowed in field names and not recognized in titles # on Py3 @@ -5234,7 +5222,6 @@ class TestRecord: y = x[0] assert_raises(IndexError, y.__getitem__, b'a') - @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") def test_multiple_field_name_unicode(self): def test_dtype_unicode(): np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")]) @@ -5242,32 +5229,6 @@ class TestRecord: # Error raised when multiple fields have the same name(unicode included) assert_raises(ValueError, test_dtype_unicode) - @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") - def test_unicode_field_titles(self): - # Unicode field titles are added to field dict on Py2 - title = u'b' - dt = np.dtype([((title, 'a'), int)]) - dt[title] - dt['a'] - x = np.array([(1,), (2,), (3,)], dtype=dt) - x[title] - x['a'] - y = x[0] - y[title] - y['a'] - - @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") - def test_unicode_field_names(self): - # Unicode field names are converted to ascii on Python 2: - encodable_name = u'b' - assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b') - assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b') - - # But raises UnicodeEncodeError if it can't be encoded: - nonencodable_name = u'\uc3bc' - assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)]) - assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)]) - def test_fromarrays_unicode(self): # A single name string provided to fromarrays() is allowed to be unicode # on both Python 2 and 3: @@ -5288,51 +5249,41 @@ class TestRecord: a = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) - is_py3 = sys.version_info[0] >= 3 - if is_py3: - funcs = (str,) - # byte string indexing fails gracefully - assert_raises(IndexError, a.__setitem__, b'f1', 1) - assert_raises(IndexError, a.__getitem__, b'f1') - assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) - assert_raises(IndexError, a['f1'].__getitem__, b'sf1') - else: - funcs = (str, unicode) - for func in funcs: - b = a.copy() - fn1 = func('f1') - b[fn1] = 1 - assert_equal(b[fn1], 1) - fnn = func('not at all') - assert_raises(ValueError, b.__setitem__, fnn, 1) - assert_raises(ValueError, b.__getitem__, fnn) - b[0][fn1] = 2 - assert_equal(b[fn1], 2) - # Subfield - assert_raises(ValueError, b[0].__setitem__, fnn, 1) - assert_raises(ValueError, b[0].__getitem__, fnn) - # Subfield - fn3 = func('f3') - sfn1 = func('sf1') - b[fn3][sfn1] = 1 - assert_equal(b[fn3][sfn1], 1) - assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) - assert_raises(ValueError, b[fn3].__getitem__, fnn) - # multiple subfields - fn2 = func('f2') - b[fn2] = 3 - - assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) - assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) - assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) + # byte string indexing fails gracefully + assert_raises(IndexError, a.__setitem__, b'f1', 1) + assert_raises(IndexError, a.__getitem__, b'f1') + assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) + assert_raises(IndexError, a['f1'].__getitem__, b'sf1') + b = a.copy() + fn1 = str('f1') + b[fn1] = 1 + assert_equal(b[fn1], 1) + fnn = str('not at all') + assert_raises(ValueError, b.__setitem__, fnn, 1) + assert_raises(ValueError, b.__getitem__, fnn) + b[0][fn1] = 2 + assert_equal(b[fn1], 2) + # Subfield + assert_raises(ValueError, b[0].__setitem__, fnn, 1) + assert_raises(ValueError, b[0].__getitem__, fnn) + # Subfield + fn3 = str('f3') + sfn1 = str('sf1') + b[fn3][sfn1] = 1 + assert_equal(b[fn3][sfn1], 1) + assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) + assert_raises(ValueError, b[fn3].__getitem__, fnn) + # multiple subfields + fn2 = str('f2') + b[fn2] = 3 + + assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) + assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) + assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) # non-ascii unicode field indexing is well behaved - if not is_py3: - pytest.skip('non ascii unicode field indexing skipped; ' - 'raises segfault on python 2.x') - else: - assert_raises(ValueError, a.__setitem__, u'\u03e0', 1) - assert_raises(ValueError, a.__getitem__, u'\u03e0') + assert_raises(ValueError, a.__setitem__, u'\u03e0', 1) + assert_raises(ValueError, a.__getitem__, u'\u03e0') def test_record_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') @@ -6305,56 +6256,55 @@ class TestMatmul(MatmulCommon): assert not np.any(c) -if sys.version_info[:2] >= (3, 5): - class TestMatmulOperator(MatmulCommon): - import operator - matmul = operator.matmul - - def test_array_priority_override(self): +class TestMatmulOperator(MatmulCommon): + import operator + matmul = operator.matmul - class A: - __array_priority__ = 1000 + def test_array_priority_override(self): - def __matmul__(self, other): - return "A" - - def __rmatmul__(self, other): - return "A" - - a = A() - b = np.ones(2) - assert_equal(self.matmul(a, b), "A") - assert_equal(self.matmul(b, a), "A") - - def test_matmul_raises(self): - assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5)) - assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) - assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc')) - - def test_matmul_inplace(): - # It would be nice to support in-place matmul eventually, but for now - # we don't have a working implementation, so better just to error out - # and nudge people to writing "a = a @ b". - a = np.eye(3) - b = np.eye(3) - assert_raises(TypeError, a.__imatmul__, b) - import operator - assert_raises(TypeError, operator.imatmul, a, b) - # we avoid writing the token `exec` so as not to crash python 2's - # parser - exec_ = getattr(builtins, "exec") - assert_raises(TypeError, exec_, "a @= b", globals(), locals()) - - def test_matmul_axes(): - a = np.arange(3*4*5).reshape(3, 4, 5) - c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) - assert c.shape == (3, 4, 4) - d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) - assert d.shape == (4, 4, 3) - e = np.swapaxes(d, 0, 2) - assert_array_equal(e, c) - f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)]) - assert f.shape == (4, 5) + class A: + __array_priority__ = 1000 + + def __matmul__(self, other): + return "A" + + def __rmatmul__(self, other): + return "A" + + a = A() + b = np.ones(2) + assert_equal(self.matmul(a, b), "A") + assert_equal(self.matmul(b, a), "A") + + def test_matmul_raises(self): + assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5)) + assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) + assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc')) + +def test_matmul_inplace(): + # It would be nice to support in-place matmul eventually, but for now + # we don't have a working implementation, so better just to error out + # and nudge people to writing "a = a @ b". + a = np.eye(3) + b = np.eye(3) + assert_raises(TypeError, a.__imatmul__, b) + import operator + assert_raises(TypeError, operator.imatmul, a, b) + # we avoid writing the token `exec` so as not to crash python 2's + # parser + exec_ = getattr(builtins, "exec") + assert_raises(TypeError, exec_, "a @= b", globals(), locals()) + +def test_matmul_axes(): + a = np.arange(3*4*5).reshape(3, 4, 5) + c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) + assert c.shape == (3, 4, 4) + d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) + assert d.shape == (4, 4, 3) + e = np.swapaxes(d, 0, 2) + assert_array_equal(e, c) + f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)]) + assert f.shape == (4, 5) class TestInner: @@ -7056,7 +7006,7 @@ class TestNewBufferProtocol: assert_equal(y.shape, (5,)) assert_equal(y.ndim, 1) assert_equal(y.strides, (4,)) - assert_equal(y.suboffsets, EMPTY) + assert_equal(y.suboffsets, ()) assert_equal(y.itemsize, 4) def test_export_simple_nd(self): @@ -7066,7 +7016,7 @@ class TestNewBufferProtocol: assert_equal(y.shape, (2, 2)) assert_equal(y.ndim, 2) assert_equal(y.strides, (16, 8)) - assert_equal(y.suboffsets, EMPTY) + assert_equal(y.suboffsets, ()) assert_equal(y.itemsize, 8) def test_export_discontiguous(self): @@ -7076,7 +7026,7 @@ class TestNewBufferProtocol: assert_equal(y.shape, (3, 3)) assert_equal(y.ndim, 2) assert_equal(y.strides, (36, 4)) - assert_equal(y.suboffsets, EMPTY) + assert_equal(y.suboffsets, ()) assert_equal(y.itemsize, 4) def test_export_record(self): @@ -7109,7 +7059,7 @@ class TestNewBufferProtocol: y = memoryview(x) assert_equal(y.shape, (1,)) assert_equal(y.ndim, 1) - assert_equal(y.suboffsets, EMPTY) + assert_equal(y.suboffsets, ()) sz = sum([np.dtype(b).itemsize for a, b in dt]) if np.dtype('l').itemsize == 4: @@ -7125,10 +7075,10 @@ class TestNewBufferProtocol: x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) y = memoryview(x) assert_equal(y.format, 'T{(2,2)i:a:}') - assert_equal(y.shape, EMPTY) + assert_equal(y.shape, ()) assert_equal(y.ndim, 0) - assert_equal(y.strides, EMPTY) - assert_equal(y.suboffsets, EMPTY) + assert_equal(y.strides, ()) + assert_equal(y.suboffsets, ()) assert_equal(y.itemsize, 16) def test_export_endian(self): @@ -7231,7 +7181,6 @@ class TestNewBufferProtocol: a = np.empty((1,) * 32) self._check_roundtrip(a) - @pytest.mark.skipif(sys.version_info < (2, 7, 7), reason="See gh-11115") def test_error_too_many_dims(self): def make_ctype(shape, scalar_type): t = scalar_type @@ -7272,12 +7221,11 @@ class TestNewBufferProtocol: np.array(t()) exc = cm.exception - if sys.version_info.major > 2: - with assert_raises_regex( - NotImplementedError, - r"Unrepresentable .* 'u' \(UCS-2 strings\)" - ): - raise exc.__cause__ + with assert_raises_regex( + NotImplementedError, + r"Unrepresentable .* 'u' \(UCS-2 strings\)" + ): + raise exc.__cause__ def test_ctypes_integer_via_memoryview(self): # gh-11150, due to bpo-10746 @@ -7556,7 +7504,6 @@ class TestConversion: class NotConvertible: def __bool__(self): raise NotImplementedError - __nonzero__ = __bool__ # python 2 assert_raises(NotImplementedError, bool, np.array(NotConvertible())) assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) @@ -7834,11 +7781,6 @@ class TestArrayPriority: op.ge, op.lt, op.le, op.ne, op.eq ] - # See #7949. Don't use "/" operator With -3 switch, since python reports it - # as a DeprecationWarning - if sys.version_info[0] < 3 and not sys.py3kwarning: - binary_ops.append(op.div) - class Foo(np.ndarray): __array_priority__ = 100. @@ -7966,14 +7908,7 @@ class TestFormat: def test_1d_format(self): # until gh-5543, ensure that the behaviour matches what it used to be a = np.array([np.pi]) - if sys.version_info[:2] >= (3, 4): - assert_raises(TypeError, '{:30}'.format, a) - else: - with suppress_warnings() as sup: - sup.filter(PendingDeprecationWarning) - res = '{:30}'.format(a) - dst = object.__format__(a, '30') - assert_equal(res, dst) + assert_raises(TypeError, '{:30}'.format, a) from numpy.testing import IS_PYPY @@ -8324,7 +8259,7 @@ def test_npymath_real(): assert_allclose(got, expected) def test_uintalignment_and_alignment(): - # alignment code needs to satisfy these requrements: + # alignment code needs to satisfy these requirements: # 1. numpy structs match C struct layout # 2. ufuncs/casting is safe wrt to aligned access # 3. copy code is safe wrt to "uint alidned" access diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 934f0a2fd..c4956c298 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -1209,14 +1209,12 @@ class TestNonzero: def test_nonzero_invalid_object(self): # gh-9295 - a = np.array([np.array([1, 2]), 3]) + a = np.array([np.array([1, 2]), 3], dtype=object) assert_raises(ValueError, np.nonzero, a) class BoolErrors: def __bool__(self): raise ValueError("Not allowed") - def __nonzero__(self): - raise ValueError("Not allowed") assert_raises(ValueError, np.nonzero, np.array([BoolErrors()])) @@ -1882,7 +1880,7 @@ class TestClip: assert_array_strict_equal(ac, act) def test_clip_with_out_transposed(self): - # Test that the out argument works when tranposed + # Test that the out argument works when transposed a = np.arange(16).reshape(4, 4) out = np.empty_like(a).T a.clip(4, 10, out=out) @@ -1991,7 +1989,9 @@ class TestClip: actual = np.clip(arr, amin, amax) assert_equal(actual, exp) - @pytest.mark.xfail(reason="no scalar nan propagation yet") + @pytest.mark.xfail(reason="no scalar nan propagation yet", + raises=AssertionError, + strict=True) @pytest.mark.parametrize("arr, amin, amax", [ # problematic scalar nan case from hypothesis (np.zeros(10, dtype=np.int64), @@ -2001,10 +2001,10 @@ class TestClip: def test_clip_scalar_nan_propagation(self, arr, amin, amax): # enforcement of scalar nan propagation for comparisons # called through clip() - expected = np.minimum(np.maximum(a, amin), amax) + expected = np.minimum(np.maximum(arr, amin), amax) with assert_warns(DeprecationWarning): actual = np.clip(arr, amin, amax) - assert_equal(actual, expected) + assert_equal(actual, expected) @pytest.mark.xfail(reason="propagation doesn't match spec") @pytest.mark.parametrize("arr, amin, amax", [ diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py index 7105154ed..c72d13947 100644 --- a/numpy/core/tests/test_numerictypes.py +++ b/numpy/core/tests/test_numerictypes.py @@ -306,10 +306,7 @@ class ReadValuesNested: h = np.array(self._buffer, dtype=self._descr) assert_(h.dtype['Info']['value'].name == 'complex128') assert_(h.dtype['Info']['y2'].name == 'float64') - if sys.version_info[0] >= 3: - assert_(h.dtype['info']['Name'].name == 'str256') - else: - assert_(h.dtype['info']['Name'].name == 'unicode256') + assert_(h.dtype['info']['Name'].name == 'str256') assert_(h.dtype['info']['Value'].name == 'complex128') def test_nested2_descriptor(self): diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py index 36d652a41..89a8b48bf 100644 --- a/numpy/core/tests/test_print.py +++ b/numpy/core/tests/test_print.py @@ -7,10 +7,7 @@ from numpy.testing import assert_, assert_equal from numpy.core.tests._locales import CommaDecimalPointLocale -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO +from io import StringIO _REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py index 4b4bd2729..bf43fedcc 100644 --- a/numpy/core/tests/test_records.py +++ b/numpy/core/tests/test_records.py @@ -24,12 +24,8 @@ class TestFromrecords: names='col1,col2,col3') assert_equal(r[0].item(), (456, 'dbe', 1.2)) assert_equal(r['col1'].dtype.kind, 'i') - if sys.version_info[0] >= 3: - assert_equal(r['col2'].dtype.kind, 'U') - assert_equal(r['col2'].dtype.itemsize, 12) - else: - assert_equal(r['col2'].dtype.kind, 'S') - assert_equal(r['col2'].dtype.itemsize, 3) + assert_equal(r['col2'].dtype.kind, 'U') + assert_equal(r['col2'].dtype.itemsize, 12) assert_equal(r['col3'].dtype.kind, 'f') def test_fromrecords_0len(self): @@ -422,7 +418,7 @@ class TestRecord: a['obj'] = data a['int'] = 42 ctor, args = a[0].__reduce__() - # check the contructor is what we expect before interpreting the arguments + # check the constructor is what we expect before interpreting the arguments assert ctor is np.core.multiarray.scalar dtype, obj = args # make sure we did not pickle the address diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index c77c11d41..3a9b96886 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -14,8 +14,8 @@ from numpy.testing import ( assert_raises_regex, assert_warns, suppress_warnings, _assert_valid_refcount, HAS_REFCOUNT, ) +from numpy.testing._private.utils import _no_tracing from numpy.compat import asbytes, asunicode, long, pickle -from test.support import no_tracing try: RecursionError @@ -487,15 +487,13 @@ class TestRegression: b"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb."), ] - if sys.version_info[:2] >= (3, 4): - # encoding='bytes' was added in Py3.4 - for original, data in test_data: - result = pickle.loads(data, encoding='bytes') - assert_equal(result, original) + for original, data in test_data: + result = pickle.loads(data, encoding='bytes') + assert_equal(result, original) - if isinstance(result, np.ndarray) and result.dtype.names is not None: - for name in result.dtype.names: - assert_(isinstance(name, str)) + if isinstance(result, np.ndarray) and result.dtype.names is not None: + for name in result.dtype.names: + assert_(isinstance(name, str)) def test_pickle_dtype(self): # Ticket #251 @@ -1106,14 +1104,8 @@ class TestRegression: # The dtype is float64, but the isbuiltin attribute is 0. data_dir = path.join(path.dirname(__file__), 'data') filename = path.join(data_dir, "astype_copy.pkl") - if sys.version_info[0] >= 3: - f = open(filename, 'rb') + with open(filename, 'rb') as f: xp = pickle.load(f, encoding='latin1') - f.close() - else: - f = open(filename) - xp = pickle.load(f) - f.close() xpd = xp.astype(np.float64) assert_((xp.__array_interface__['data'][0] != xpd.__array_interface__['data'][0])) @@ -1230,10 +1222,7 @@ class TestRegression: msg = 'unicode offset: %d chars' % i t = np.dtype([('a', 'S%d' % i), ('b', 'U2')]) x = np.array([(b'a', u'b')], dtype=t) - if sys.version_info[0] >= 3: - assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) - else: - assert_equal(str(x), "[('a', u'b')]", err_msg=msg) + assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) def test_sign_for_complex_nan(self): # Ticket 794. @@ -1315,7 +1304,7 @@ class TestRegression: assert_(pickle.loads( pickle.dumps(test_record, protocol=proto)) == test_record) - @no_tracing + @_no_tracing def test_blasdot_uninitialized_memory(self): # Ticket #950 for m in [0, 1, 2]: @@ -1365,13 +1354,13 @@ class TestRegression: def test_array_from_sequence_scalar_array(self): # Ticket #1078: segfaults when creating an array with a sequence of # 0d arrays. - a = np.array((np.ones(2), np.array(2))) + a = np.array((np.ones(2), np.array(2)), dtype=object) assert_equal(a.shape, (2,)) assert_equal(a.dtype, np.dtype(object)) assert_equal(a[0], np.ones(2)) assert_equal(a[1], np.array(2)) - a = np.array(((1,), np.array(1))) + a = np.array(((1,), np.array(1)), dtype=object) assert_equal(a.shape, (2,)) assert_equal(a.dtype, np.dtype(object)) assert_equal(a[0], (1,)) @@ -1379,7 +1368,7 @@ class TestRegression: def test_array_from_sequence_scalar_array2(self): # Ticket #1081: weird array with strange input... - t = np.array([np.array([]), np.array(0, object)]) + t = np.array([np.array([]), np.array(0, object)], dtype=object) assert_equal(t.shape, (2,)) assert_equal(t.dtype, np.dtype(object)) @@ -1815,11 +1804,6 @@ class TestRegression: assert_raises(RecursionError, int, a) assert_raises(RecursionError, long, a) assert_raises(RecursionError, float, a) - if sys.version_info.major == 2: - # in python 3, this falls back on operator.index, which fails on - # on dtype=object - assert_raises(RecursionError, oct, a) - assert_raises(RecursionError, hex, a) a[()] = None def test_object_array_circular_reference(self): @@ -1846,11 +1830,6 @@ class TestRegression: assert_equal(int(a), int(0)) assert_equal(long(a), long(0)) assert_equal(float(a), float(0)) - if sys.version_info.major == 2: - # in python 3, this falls back on operator.index, which fails on - # on dtype=object - assert_equal(oct(a), oct(0)) - assert_equal(hex(a), hex(0)) def test_object_array_self_copy(self): # An object array being copied into itself DECREF'ed before INCREF'ing @@ -1954,13 +1933,12 @@ class TestRegression: assert_equal(s[0], "\x01") def test_pickle_bytes_overwrite(self): - if sys.version_info[0] >= 3: - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - data = np.array([1], dtype='b') - data = pickle.loads(pickle.dumps(data, protocol=proto)) - data[0] = 0xdd - bytestring = "\x01 ".encode('ascii') - assert_equal(bytestring[0:1], '\x01'.encode('ascii')) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + data = np.array([1], dtype='b') + data = pickle.loads(pickle.dumps(data, protocol=proto)) + data[0] = 0xdd + bytestring = "\x01 ".encode('ascii') + assert_equal(bytestring[0:1], '\x01'.encode('ascii')) def test_pickle_py2_array_latin1_hack(self): # Check that unpickling hacks in Py3 that support @@ -1971,12 +1949,11 @@ class TestRegression: b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n" b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n" b"p13\ntp14\nb.") - if sys.version_info[0] >= 3: - # This should work: - result = pickle.loads(data, encoding='latin1') - assert_array_equal(result, np.array([129], dtype='b')) - # Should not segfault: - assert_raises(Exception, pickle.loads, data, encoding='koi8-r') + # This should work: + result = pickle.loads(data, encoding='latin1') + assert_array_equal(result, np.array([129], dtype='b')) + # Should not segfault: + assert_raises(Exception, pickle.loads, data, encoding='koi8-r') def test_pickle_py2_scalar_latin1_hack(self): # Check that scalar unpickling hack in Py3 that supports @@ -2003,25 +1980,24 @@ class TestRegression: b"tp8\nRp9\n."), 'different'), ] - if sys.version_info[0] >= 3: - for original, data, koi8r_validity in datas: - result = pickle.loads(data, encoding='latin1') - assert_equal(result, original) - - # Decoding under non-latin1 encoding (e.g.) KOI8-R can - # produce bad results, but should not segfault. - if koi8r_validity == 'different': - # Unicode code points happen to lie within latin1, - # but are different in koi8-r, resulting to silent - # bogus results - result = pickle.loads(data, encoding='koi8-r') - assert_(result != original) - elif koi8r_validity == 'invalid': - # Unicode code points outside latin1, so results - # to an encoding exception - assert_raises(ValueError, pickle.loads, data, encoding='koi8-r') - else: - raise ValueError(koi8r_validity) + for original, data, koi8r_validity in datas: + result = pickle.loads(data, encoding='latin1') + assert_equal(result, original) + + # Decoding under non-latin1 encoding (e.g.) KOI8-R can + # produce bad results, but should not segfault. + if koi8r_validity == 'different': + # Unicode code points happen to lie within latin1, + # but are different in koi8-r, resulting to silent + # bogus results + result = pickle.loads(data, encoding='koi8-r') + assert_(result != original) + elif koi8r_validity == 'invalid': + # Unicode code points outside latin1, so results + # to an encoding exception + assert_raises(ValueError, pickle.loads, data, encoding='koi8-r') + else: + raise ValueError(koi8r_validity) def test_structured_type_to_object(self): a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') @@ -2094,10 +2070,7 @@ class TestRegression: # Ticket #2081. Python compiled with two byte unicode # can lead to truncation if itemsize is not properly # adjusted for NumPy's four byte unicode. - if sys.version_info[0] >= 3: - a = np.array(['abcd']) - else: - a = np.array([u'abcd']) + a = np.array(['abcd']) assert_equal(a.dtype.itemsize, 16) def test_unique_stable(self): @@ -2240,12 +2213,7 @@ class TestRegression: rhs = Foo() lhs = np.array(1) for f in [op.lt, op.le, op.gt, op.ge]: - if sys.version_info[0] >= 3: - assert_raises(TypeError, f, lhs, rhs) - elif not sys.py3kwarning: - # With -3 switch in python 2, DeprecationWarning is raised - # which we are not interested in - f(lhs, rhs) + assert_raises(TypeError, f, lhs, rhs) assert_(not op.eq(lhs, rhs)) assert_(op.ne(lhs, rhs)) @@ -2288,9 +2256,10 @@ class TestRegression: x[0], x[-1] = x[-1], x[0] uf = np.frompyfunc(f, 1, 0) - a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]]) + a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]], dtype=object) assert_equal(uf(a), ()) - assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]]) + expected = np.array([[3, 2, 1], [5, 4], [9, 7, 8, 6]], dtype=object) + assert_array_equal(a, expected) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_leak_in_structured_dtype_comparison(self): diff --git a/numpy/core/tests/test_scalar_ctors.py b/numpy/core/tests/test_scalar_ctors.py index ae29d0605..d3592a5fc 100644 --- a/numpy/core/tests/test_scalar_ctors.py +++ b/numpy/core/tests/test_scalar_ctors.py @@ -40,19 +40,6 @@ class TestFromString: flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') assert_equal(flongdouble, -np.inf) - @pytest.mark.skipif((sys.version_info[0] >= 3) - or (sys.platform == "win32" - and platform.architecture()[0] == "64bit"), - reason="numpy.intp('0xff', 16) not supported on Py3 " - "or 64 bit Windows") - def test_intp(self): - # Ticket #99 - i_width = np.int_(0).nbytes*2 - 1 - np.intp('0x' + 'f'*i_width, 16) - assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16) - assert_raises(ValueError, np.intp, '0x1', 32) - assert_equal(255, np.intp('0xFF', 16)) - class TestFromInt: def test_intp(self): diff --git a/numpy/core/tests/test_scalarbuffer.py b/numpy/core/tests/test_scalarbuffer.py index 85673e3ab..bda1c5333 100644 --- a/numpy/core/tests/test_scalarbuffer.py +++ b/numpy/core/tests/test_scalarbuffer.py @@ -31,8 +31,6 @@ scalars_and_codes = [ scalars_only, codes_only = zip(*scalars_and_codes) -@pytest.mark.skipif(sys.version_info.major < 3, - reason="Python 2 scalars lack a buffer interface") class TestScalarPEP3118: @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) @@ -91,7 +89,7 @@ class TestScalarPEP3118: expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, 'shape': (8,), 'format': 'B'} v = memoryview(dt1) - res = as_dict(v) + res = as_dict(v) assert_equal(res, expected) v = memoryview(dt2 - dt1) diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 789ef4acd..c7f44cf50 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -84,7 +84,7 @@ class TestBaseMath: assert_almost_equal(np.square(inp2), np.multiply(inp2, inp2), err_msg=msg) # skip true divide for ints - if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning): + if dt != np.int32: assert_almost_equal(np.reciprocal(inp2), np.divide(1, inp2), err_msg=msg) diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py index d2f5287a6..7c23c7128 100644 --- a/numpy/core/tests/test_shape_base.py +++ b/numpy/core/tests/test_shape_base.py @@ -157,10 +157,8 @@ class TestHstack: def test_generator(self): with assert_warns(FutureWarning): hstack((np.arange(3) for _ in range(2))) - if sys.version_info.major > 2: - # map returns a list on Python 2 - with assert_warns(FutureWarning): - hstack(map(lambda x: x, np.ones((3, 2)))) + with assert_warns(FutureWarning): + hstack(map(lambda x: x, np.ones((3, 2)))) class TestVstack: diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 01e3c5087..abdaeeb93 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -1160,14 +1160,18 @@ class TestUfunc: # Twice reproduced also for tuples: np.add.accumulate(arr, out=arr) np.add.accumulate(arr, out=arr) - assert_array_equal(arr, np.array([[1]*i for i in [1, 3, 6, 10]])) + assert_array_equal(arr, + np.array([[1]*i for i in [1, 3, 6, 10]], dtype=object), + ) # And the same if the axis argument is used arr = np.ones((2, 4), dtype=object) arr[0, :] = [[2] for i in range(4)] np.add.accumulate(arr, out=arr, axis=-1) np.add.accumulate(arr, out=arr, axis=-1) - assert_array_equal(arr[0, :], np.array([[2]*i for i in [1, 3, 6, 10]])) + assert_array_equal(arr[0, :], + np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object), + ) def test_object_array_reduceat_inplace(self): # Checks that in-place reduceats work, see also gh-7465 @@ -1811,7 +1815,7 @@ class TestUfunc: assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", out=None) - # invalid keyord + # invalid keyword assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) assert_raises(TypeError, f, d, invalid=0) assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index e966eebf0..10a1c0803 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -1624,7 +1624,7 @@ class TestSpecialMethods: @property def args(self): # We need to ensure these are fetched at the same time, before - # any other ufuncs are calld by the assertions + # any other ufuncs are called by the assertions return (self._prepare_args, self._wrap_args) def __repr__(self): return "a" # for short test output @@ -1848,32 +1848,14 @@ class TestSpecialMethods: a = A() assert_raises(RuntimeError, ncu.maximum, a, a) - def test_array_with_context(self): + def test_array_too_many_args(self): - class A: - def __array__(self, dtype=None, context=None): - func, args, i = context - self.func = func - self.args = args - self.i = i - return np.zeros(1) - - class B: - def __array__(self, dtype=None): - return np.zeros(1, dtype) - - class C: - def __array__(self): + class A(object): + def __array__(self, dtype, context): return np.zeros(1) a = A() - ncu.maximum(np.zeros(1), a) - assert_(a.func is ncu.maximum) - assert_equal(a.args[0], 0) - assert_(a.args[1] is a) - assert_(a.i == 1) - assert_equal(ncu.maximum(a, B()), 0) - assert_equal(ncu.maximum(a, C()), 0) + assert_raises_regex(TypeError, '2 required positional', np.sum, a) def test_ufunc_override(self): # check override works even with instance with high priority. @@ -2880,6 +2862,32 @@ class TestSubclass: a = simple((3, 4)) assert_equal(a+a, a) + +class TestFrompyfunc(object): + + def test_identity(self): + def mul(a, b): + return a * b + + # with identity=value + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_equal(mul_ufunc.reduce([]), 1) + + # with identity=None (reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + # with no identity (not reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1))) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, dtype=complex): """ diff --git a/numpy/core/tests/test_unicode.py b/numpy/core/tests/test_unicode.py index b8ec56116..f16789148 100644 --- a/numpy/core/tests/test_unicode.py +++ b/numpy/core/tests/test_unicode.py @@ -4,40 +4,25 @@ import numpy as np from numpy.compat import unicode from numpy.testing import assert_, assert_equal, assert_array_equal -# Guess the UCS length for this python interpreter -if sys.version_info[:2] >= (3, 3): - # Python 3.3 uses a flexible string representation - ucs4 = False - - def buffer_length(arr): - if isinstance(arr, unicode): - arr = str(arr) - if not arr: - charmax = 0 - else: - charmax = max([ord(c) for c in arr]) - if charmax < 256: - size = 1 - elif charmax < 65536: - size = 2 - else: - size = 4 - return size * len(arr) - v = memoryview(arr) - if v.shape is None: - return len(v) * v.itemsize +def buffer_length(arr): + if isinstance(arr, unicode): + arr = str(arr) + if not arr: + charmax = 0 else: - return np.prod(v.shape) * v.itemsize -else: - if len(buffer(u'u')) == 4: - ucs4 = True + charmax = max([ord(c) for c in arr]) + if charmax < 256: + size = 1 + elif charmax < 65536: + size = 2 + else: + size = 4 + return size * len(arr) + v = memoryview(arr) + if v.shape is None: + return len(v) * v.itemsize else: - ucs4 = False - - def buffer_length(arr): - if isinstance(arr, np.ndarray): - return len(arr.data) - return len(buffer(arr)) + return np.prod(v.shape) * v.itemsize # In both cases below we need to make sure that the byte swapped value (as # UCS4) is still a valid unicode: @@ -52,12 +37,8 @@ def test_string_cast(): uni_arr1 = str_arr.astype('>U') uni_arr2 = str_arr.astype('<U') - if sys.version_info[0] < 3: - assert_array_equal(str_arr, uni_arr1) - assert_array_equal(str_arr, uni_arr2) - else: - assert_(str_arr != uni_arr1) - assert_(str_arr != uni_arr2) + assert_(str_arr != uni_arr1) + assert_(str_arr != uni_arr2) assert_array_equal(uni_arr1, uni_arr2) @@ -79,10 +60,7 @@ class CreateZeros: # Encode to ascii and double check assert_(ua_scalar.encode('ascii') == b'') # Check buffer lengths for scalars - if ucs4: - assert_(buffer_length(ua_scalar) == 0) - else: - assert_(buffer_length(ua_scalar) == 0) + assert_(buffer_length(ua_scalar) == 0) def test_zeros0D(self): # Check creation of 0-dimensional objects @@ -132,17 +110,14 @@ class CreateValues: assert_(ua_scalar.encode('utf-8') == (self.ucs_value*self.ulen).encode('utf-8')) # Check buffer lengths for scalars - if ucs4: - assert_(buffer_length(ua_scalar) == 4*self.ulen) + if self.ucs_value == ucs4_value: + # In UCS2, the \U0010FFFF will be represented using a + # surrogate *pair* + assert_(buffer_length(ua_scalar) == 2*2*self.ulen) else: - if self.ucs_value == ucs4_value: - # In UCS2, the \U0010FFFF will be represented using a - # surrogate *pair* - assert_(buffer_length(ua_scalar) == 2*2*self.ulen) - else: - # In UCS2, the \uFFFF will be represented using a - # regular 2-byte word - assert_(buffer_length(ua_scalar) == 2*self.ulen) + # In UCS2, the \uFFFF will be represented using a + # regular 2-byte word + assert_(buffer_length(ua_scalar) == 2*self.ulen) def test_values0D(self): # Check creation of 0-dimensional objects with values @@ -217,17 +192,14 @@ class AssignValues: assert_(ua_scalar.encode('utf-8') == (self.ucs_value*self.ulen).encode('utf-8')) # Check buffer lengths for scalars - if ucs4: - assert_(buffer_length(ua_scalar) == 4*self.ulen) + if self.ucs_value == ucs4_value: + # In UCS2, the \U0010FFFF will be represented using a + # surrogate *pair* + assert_(buffer_length(ua_scalar) == 2*2*self.ulen) else: - if self.ucs_value == ucs4_value: - # In UCS2, the \U0010FFFF will be represented using a - # surrogate *pair* - assert_(buffer_length(ua_scalar) == 2*2*self.ulen) - else: - # In UCS2, the \uFFFF will be represented using a - # regular 2-byte word - assert_(buffer_length(ua_scalar) == 2*self.ulen) + # In UCS2, the \uFFFF will be represented using a + # regular 2-byte word + assert_(buffer_length(ua_scalar) == 2*self.ulen) def test_values0D(self): # Check assignment of 0-dimensional objects with values diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index 5e3cd0e74..ea7912feb 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -82,11 +82,8 @@ def _needs_build(obj, cc_args, extra_postargs, pp_opts): def replace_method(klass, method_name, func): - if sys.version_info[0] < 3: - m = types.MethodType(func, None, klass) - else: - # Py3k does not have unbound method anymore, MethodType does not work - m = lambda self, *args, **kw: func(self, *args, **kw) + # Py3k does not have unbound method anymore, MethodType does not work + m = lambda self, *args, **kw: func(self, *args, **kw) setattr(klass, method_name, m) @@ -274,12 +271,8 @@ def CCompiler_compile(self, sources, output_dir=None, macros=None, if not sources: return [] - # FIXME:RELATIVE_IMPORT - if sys.version_info[0] < 3: - from .fcompiler import FCompiler, is_f_file, has_f90_header - else: - from numpy.distutils.fcompiler import (FCompiler, is_f_file, - has_f90_header) + from numpy.distutils.fcompiler import (FCompiler, is_f_file, + has_f90_header) if isinstance(self, FCompiler): display = [] for fc in ['f77', 'f90', 'fix']: diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py index efea90113..e066f9888 100644 --- a/numpy/distutils/cpuinfo.py +++ b/numpy/distutils/cpuinfo.py @@ -17,10 +17,7 @@ __all__ = ['cpu'] import sys, re, types import os -if sys.version_info[0] >= 3: - from subprocess import getstatusoutput -else: - from commands import getstatusoutput +from subprocess import getstatusoutput import warnings import platform @@ -484,10 +481,7 @@ class Win32CPUInfo(CPUInfoBase): info = [] try: #XXX: Bad style to use so long `try:...except:...`. Fix it! - if sys.version_info[0] >= 3: - import winreg - else: - import _winreg as winreg + import winreg prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)" r"\s+stepping\s+(?P<STP>\d+)", re.IGNORECASE) diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py index d35b4f898..fb10d2470 100644 --- a/numpy/distutils/exec_command.py +++ b/numpy/distutils/exec_command.py @@ -74,10 +74,6 @@ def filepath_from_subprocess_output(output): # Another historical oddity if output[-1:] == '\n': output = output[:-1] - # stdio uses bytes in python 2, so to avoid issues, we simply - # remove all non-ascii characters - if sys.version_info < (3, 0): - output = output.encode('ascii', errors='replace') return output @@ -89,10 +85,7 @@ def forward_bytes_to_stdout(val): The assumption is that the subprocess call already returned bytes in a suitable encoding. """ - if sys.version_info.major < 3: - # python 2 has binary output anyway - sys.stdout.write(val) - elif hasattr(sys.stdout, 'buffer'): + if hasattr(sys.stdout, 'buffer'): # use the underlying binary output if there is one sys.stdout.buffer.write(val) elif hasattr(sys.stdout, 'encoding'): @@ -305,11 +298,6 @@ def _exec_command(command, use_shell=None, use_tee = None, **env): if text[-1:] == '\n': text = text[:-1] - # stdio uses bytes in python 2, so to avoid issues, we simply - # remove all non-ascii characters - if sys.version_info < (3, 0): - text = text.encode('ascii', errors='replace') - if use_tee and text: print(text) return proc.returncode, text diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py index 3b5c3db35..704f1e7aa 100644 --- a/numpy/distutils/extension.py +++ b/numpy/distutils/extension.py @@ -10,9 +10,6 @@ import sys import re from distutils.extension import Extension as old_Extension -if sys.version_info[0] >= 3: - basestring = str - cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match @@ -74,7 +71,7 @@ class Extension(old_Extension): self.swig_opts = swig_opts or [] # swig_opts is assumed to be a list. Here we handle the case where it # is specified as a string instead. - if isinstance(self.swig_opts, basestring): + if isinstance(self.swig_opts, str): import warnings msg = "swig_opts is specified as a string instead of a list" warnings.warn(msg, SyntaxWarning, stacklevel=2) diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index 4fc9f33ff..128e54db3 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -412,8 +412,7 @@ class Gnu95FCompiler(GnuFCompiler): break h.update(block) text = base64.b32encode(h.digest()) - if sys.version_info[0] >= 3: - text = text.decode('ascii') + text = text.decode('ascii') return text.rstrip('=') def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py index ec1100b1b..79eec00a6 100644 --- a/numpy/distutils/log.py +++ b/numpy/distutils/log.py @@ -4,12 +4,8 @@ from distutils.log import * from distutils.log import Log as old_Log from distutils.log import _global_log -if sys.version_info[0] < 3: - from .misc_util import (red_text, default_text, cyan_text, green_text, - is_sequence, is_string) -else: - from numpy.distutils.misc_util import (red_text, default_text, cyan_text, - green_text, is_sequence, is_string) +from numpy.distutils.misc_util import (red_text, default_text, cyan_text, + green_text, is_sequence, is_string) def _fix_args(args,flag=1): diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index e2cd1c19b..475f73718 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -15,11 +15,7 @@ import textwrap # Overwrite certain distutils.ccompiler functions: import numpy.distutils.ccompiler - -if sys.version_info[0] < 3: - from . import log -else: - from numpy.distutils import log +from numpy.distutils import log # NT stuff # 1. Make sure libpython<version>.a exists for gcc. If not, build it. # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index eec8d56a3..f9d2be716 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -2242,10 +2242,7 @@ def get_info(pkgname, dirs=None): return info def is_bootstrapping(): - if sys.version_info[0] >= 3: - import builtins - else: - import __builtin__ as builtins + import builtins try: builtins.__NUMPY_SETUP__ diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py index 47965b4ae..26a0437fb 100644 --- a/numpy/distutils/npy_pkg_config.py +++ b/numpy/distutils/npy_pkg_config.py @@ -2,10 +2,7 @@ import sys import re import os -if sys.version_info[0] < 3: - from ConfigParser import RawConfigParser -else: - from configparser import RawConfigParser +from configparser import RawConfigParser __all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', 'read_config', 'parse_flags'] diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 508aeefc5..f0641a688 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -138,12 +138,8 @@ import textwrap from glob import glob from functools import reduce -if sys.version_info[0] < 3: - from ConfigParser import NoOptionError - from ConfigParser import RawConfigParser as ConfigParser -else: - from configparser import NoOptionError - from configparser import RawConfigParser as ConfigParser +from configparser import NoOptionError +from configparser import RawConfigParser as ConfigParser # It seems that some people are importing ConfigParser from here so is # good to keep its class name. Use of RawConfigParser is needed in # order to be able to load path names with percent in them, like @@ -264,32 +260,29 @@ if sys.platform == 'win32': default_include_dirs.extend( os.path.join(library_root, d) for d in _include_dirs) - if sys.version_info >= (3, 3): - # VCpkg is the de-facto package manager on windows for C/C++ - # libraries. If it is on the PATH, then we append its paths here. - # We also don't re-implement shutil.which for Python 2.7 because - # vcpkg doesn't support MSVC 2008. - vcpkg = shutil.which('vcpkg') - if vcpkg: - vcpkg_dir = os.path.dirname(vcpkg) - if platform.architecture() == '32bit': - specifier = 'x86' - else: - specifier = 'x64' - - vcpkg_installed = os.path.join(vcpkg_dir, 'installed') - for vcpkg_root in [ - os.path.join(vcpkg_installed, specifier + '-windows'), - os.path.join(vcpkg_installed, specifier + '-windows-static'), - ]: - add_system_root(vcpkg_root) - - # Conda is another popular package manager that provides libraries - conda = shutil.which('conda') - if conda: - conda_dir = os.path.dirname(conda) - add_system_root(os.path.join(conda_dir, '..', 'Library')) - add_system_root(os.path.join(conda_dir, 'Library')) + # VCpkg is the de-facto package manager on windows for C/C++ + # libraries. If it is on the PATH, then we append its paths here. + vcpkg = shutil.which('vcpkg') + if vcpkg: + vcpkg_dir = os.path.dirname(vcpkg) + if platform.architecture() == '32bit': + specifier = 'x86' + else: + specifier = 'x64' + + vcpkg_installed = os.path.join(vcpkg_dir, 'installed') + for vcpkg_root in [ + os.path.join(vcpkg_installed, specifier + '-windows'), + os.path.join(vcpkg_installed, specifier + '-windows-static'), + ]: + add_system_root(vcpkg_root) + + # Conda is another popular package manager that provides libraries + conda = shutil.which('conda') + if conda: + conda_dir = os.path.dirname(conda) + add_system_root(os.path.join(conda_dir, '..', 'Library')) + add_system_root(os.path.join(conda_dir, 'Library')) else: default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', @@ -714,7 +707,7 @@ class system_info: return info def get_info(self, notfound_action=0): - """ Return a dictonary with items that are compatible + """ Return a dictionary with items that are compatible with numpy.distutils.setup keyword arguments. """ flag = 0 @@ -2145,8 +2138,6 @@ class openblas_info(blas_info): extra_args = info['extra_link_args'] except Exception: extra_args = [] - if sys.version_info < (3, 5) and sys.version_info > (3, 0) and c.compiler_type == "msvc": - extra_args.append("/MANIFEST") try: with open(src, 'wt') as f: f.write(s) diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py index 2ac0a6308..3bbad9386 100644 --- a/numpy/distutils/tests/test_exec_command.py +++ b/numpy/distutils/tests/test_exec_command.py @@ -8,10 +8,7 @@ from numpy.testing import tempdir, assert_, assert_warns # In python 3 stdout, stderr are text (unicode compliant) devices, so to # emulate them import StringIO from the io module. -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO +from io import StringIO class redirect_stdout: """Context manager to redirect stdout for exec_command test.""" diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py index 9a4d3ba52..cf62cb019 100644 --- a/numpy/distutils/unixccompiler.py +++ b/numpy/distutils/unixccompiler.py @@ -8,11 +8,7 @@ from distutils.errors import DistutilsExecError, CompileError from distutils.unixccompiler import * from numpy.distutils.ccompiler import replace_method from numpy.distutils.misc_util import _commandline_dep_string - -if sys.version_info[0] < 3: - from . import log -else: - from numpy.distutils import log +from numpy.distutils import log # Note that UnixCCompiler._compile appeared in Python 2.3 def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 8ec5b510f..644339218 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -260,8 +260,11 @@ static PyObject * fortran_getattr(PyFortranObject *fp, char *name) { int i,j,k,flag; if (fp->dict != NULL) { - PyObject *v = PyDict_GetItemString(fp->dict, name); - if (v != NULL) { + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + if (v == NULL && PyErr_Occurred()) { + return NULL; + } + else if (v != NULL) { Py_INCREF(v); return v; } diff --git a/numpy/f2py/tests/test_compile_function.py b/numpy/f2py/tests/test_compile_function.py index d40ed63cf..f76fd6448 100644 --- a/numpy/f2py/tests/test_compile_function.py +++ b/numpy/f2py/tests/test_compile_function.py @@ -14,8 +14,6 @@ from . import util def setup_module(): - if sys.platform == 'win32' and sys.version_info[0] < 3: - pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)') if not util.has_c_compiler(): pytest.skip("Needs C compiler") if not util.has_f77_compiler(): diff --git a/numpy/fft/_pocketfft.c b/numpy/fft/_pocketfft.c index de86e36d3..764116a84 100644 --- a/numpy/fft/_pocketfft.c +++ b/numpy/fft/_pocketfft.c @@ -10,6 +10,11 @@ * \author Martin Reinecke */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION + +#include "Python.h" +#include "numpy/arrayobject.h" + #include <math.h> #include <string.h> #include <stdlib.h> @@ -2184,11 +2189,6 @@ WARN_UNUSED_RESULT static int rfft_forward(rfft_plan plan, double c[], double fc return rfftblue_forward(plan->blueplan,c,fct); } -#define NPY_NO_DEPRECATED_API NPY_API_VERSION - -#include "Python.h" -#include "numpy/arrayobject.h" - static PyObject * execute_complex(PyObject *a1, int is_forward, double fct) { diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index da31ec09e..14f92c081 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -6,10 +6,7 @@ from numpy.testing import ( ) import threading import sys -if sys.version_info[0] >= 3: - import queue -else: - import Queue as queue +import queue def fft1(x): diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index aa793958e..139b8c0ca 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -70,70 +70,6 @@ def _check_mode(mode, encoding, newline): raise ValueError("Argument 'newline' not supported in binary mode") -def _python2_bz2open(fn, mode, encoding, newline): - """Wrapper to open bz2 in text mode. - - Parameters - ---------- - fn : str - File name - mode : {'r', 'w'} - File mode. Note that bz2 Text files are not supported. - encoding : str - Ignored, text bz2 files not supported in Python2. - newline : str - Ignored, text bz2 files not supported in Python2. - """ - import bz2 - - _check_mode(mode, encoding, newline) - - if "t" in mode: - # BZ2File is missing necessary functions for TextIOWrapper - warnings.warn("Assuming latin1 encoding for bz2 text file in Python2", - RuntimeWarning, stacklevel=5) - mode = mode.replace("t", "") - return bz2.BZ2File(fn, mode) - -def _python2_gzipopen(fn, mode, encoding, newline): - """ Wrapper to open gzip in text mode. - - Parameters - ---------- - fn : str, bytes, file - File path or opened file. - mode : str - File mode. The actual files are opened as binary, but will decoded - using the specified `encoding` and `newline`. - encoding : str - Encoding to be used when reading/writing as text. - newline : str - Newline to be used when reading/writing as text. - - """ - import gzip - # gzip is lacking read1 needed for TextIOWrapper - class GzipWrap(gzip.GzipFile): - def read1(self, n): - return self.read(n) - - _check_mode(mode, encoding, newline) - - gz_mode = mode.replace("t", "") - - if isinstance(fn, (str, bytes)): - binary_file = GzipWrap(fn, gz_mode) - elif hasattr(fn, "read") or hasattr(fn, "write"): - binary_file = GzipWrap(None, gz_mode, fileobj=fn) - else: - raise TypeError("filename must be a str or bytes object, or a file") - - if "t" in mode: - return io.TextIOWrapper(binary_file, encoding, newline=newline) - else: - return binary_file - - # Using a class instead of a module-level dictionary # to reduce the initial 'import numpy' overhead by # deferring the import of lzma, bz2 and gzip until needed @@ -174,19 +110,13 @@ class _FileOpeners: try: import bz2 - if sys.version_info[0] >= 3: - self._file_openers[".bz2"] = bz2.open - else: - self._file_openers[".bz2"] = _python2_bz2open + self._file_openers[".bz2"] = bz2.open except ImportError: pass try: import gzip - if sys.version_info[0] >= 3: - self._file_openers[".gz"] = gzip.open - else: - self._file_openers[".gz"] = _python2_gzipopen + self._file_openers[".gz"] = gzip.open except ImportError: pass @@ -547,14 +477,10 @@ class DataSource: if os.path.exists(path): return True - # We import this here because importing urllib2 is slow and + # We import this here because importing urllib is slow and # a significant fraction of numpy's total import time. - if sys.version_info[0] >= 3: - from urllib.request import urlopen - from urllib.error import URLError - else: - from urllib2 import urlopen - from urllib2 import URLError + from urllib.request import urlopen + from urllib.error import URLError # Test cached url upath = self.abspath(path) diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py index 247eed07c..7569e7651 100644 --- a/numpy/lib/arraypad.py +++ b/numpy/lib/arraypad.py @@ -232,7 +232,7 @@ def _get_linear_ramps(padded, axis, width_pair, end_value_pair): def _get_stats(padded, axis, width_pair, length_pair, stat_func): """ - Calculate statistic for the empty-padded array in given dimnsion. + Calculate statistic for the empty-padded array in given dimension. Parameters ---------- @@ -271,7 +271,7 @@ def _get_stats(padded, axis, width_pair, length_pair, stat_func): if (left_length == 0 or right_length == 0) \ and stat_func in {np.amax, np.amin}: - # amax and amin can't operate on an emtpy array, + # amax and amin can't operate on an empty array, # raise a more descriptive warning here instead of the default one raise ValueError("stat_length of 0 yields no value for padding") diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 2ee43637c..114bae287 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -162,7 +162,6 @@ evolved with time and this document is more current. """ import numpy -import sys import io import warnings from numpy.lib.utils import safe_eval @@ -213,10 +212,7 @@ def magic(major, minor): raise ValueError("major version must be 0 <= major < 256") if minor < 0 or minor > 255: raise ValueError("minor version must be 0 <= minor < 256") - if sys.version_info[0] < 3: - return MAGIC_PREFIX + chr(major) + chr(minor) - else: - return MAGIC_PREFIX + bytes([major, minor]) + return MAGIC_PREFIX + bytes([major, minor]) def read_magic(fp): """ Read the magic string to get the version of the file format. @@ -234,10 +230,7 @@ def read_magic(fp): if magic_str[:-2] != MAGIC_PREFIX: msg = "the magic string is not correct; expected %r, got %r" raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) - if sys.version_info[0] < 3: - major, minor = map(ord, magic_str[-2:]) - else: - major, minor = magic_str[-2:] + major, minor = magic_str[-2:] return major, minor def _has_metadata(dt): @@ -542,16 +535,11 @@ def _filter_header(s): """ import tokenize - if sys.version_info[0] >= 3: - from io import StringIO - else: - from StringIO import StringIO + from io import StringIO tokens = [] last_token_was_number = False - # adding newline as python 2.7.5 workaround - string = s + "\n" - for token in tokenize.generate_tokens(StringIO(string).readline): + for token in tokenize.generate_tokens(StringIO(s).readline): token_type = token[0] token_string = token[1] if (last_token_was_number and @@ -561,8 +549,7 @@ def _filter_header(s): else: tokens.append(token) last_token_was_number = (token_type == tokenize.NUMBER) - # removing newline (see above) as python 2.7.5 workaround - return tokenize.untokenize(tokens)[:-1] + return tokenize.untokenize(tokens) def _read_array_header(fp, version): @@ -741,12 +728,10 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None): try: array = pickle.load(fp, **pickle_kwargs) except UnicodeError as err: - if sys.version_info[0] >= 3: - # Friendlier error message - raise UnicodeError("Unpickling a python object failed: %r\n" - "You may need to pass the encoding= option " - "to numpy.load" % (err,)) - raise + # Friendlier error message + raise UnicodeError("Unpickling a python object failed: %r\n" + "You may need to pass the encoding= option " + "to numpy.load" % (err,)) else: if isfileobj(fp): # We can use the fast fromfile() function. diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py index 0eff73b39..70ecd6eb1 100644 --- a/numpy/lib/histograms.py +++ b/numpy/lib/histograms.py @@ -954,9 +954,9 @@ def histogramdd(sample, bins=10, range=None, normed=None, weights=None, Note the unusual interpretation of sample when an array_like: * When an array, each row is a coordinate in a D-dimensional space - - such as ``histogramgramdd(np.array([p1, p2, p3]))``. + such as ``histogramdd(np.array([p1, p2, p3]))``. * When an array_like, each element is the list of values for single - coordinate - such as ``histogramgramdd((X, Y, Z))``. + coordinate - such as ``histogramdd((X, Y, Z))``. The first form should be preferred. diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py index d4811b94d..50157069c 100644 --- a/numpy/lib/mixins.py +++ b/numpy/lib/mixins.py @@ -1,6 +1,4 @@ """Mixin classes for custom array types that don't inherit from ndarray.""" -import sys - from numpy.core import umath as um @@ -152,9 +150,7 @@ class NDArrayOperatorsMixin: __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') __matmul__, __rmatmul__, __imatmul__ = _numeric_methods( um.matmul, 'matmul') - if sys.version_info.major < 3: - # Python 3 uses only __truediv__ and __floordiv__ - __div__, __rdiv__, __idiv__ = _numeric_methods(um.divide, 'div') + # Python 3 does not use __div__, __rdiv__, or __idiv__ __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( um.true_divide, 'truediv') __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index c47e388c0..29af488d2 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -7,6 +7,7 @@ import warnings import weakref import contextlib from operator import itemgetter, index as opindex +from collections.abc import Mapping import numpy as np from . import format @@ -26,12 +27,6 @@ from numpy.compat import ( pickle, contextlib_nullcontext ) -if sys.version_info[0] >= 3: - from collections.abc import Mapping -else: - from future_builtins import map - from collections import Mapping - @set_module('numpy') def loads(*args, **kwargs): @@ -264,26 +259,25 @@ class NpzFile(Mapping): raise KeyError("%s is not a file in the archive" % key) - if sys.version_info.major == 3: - # deprecate the python 2 dict apis that we supported by accident in - # python 3. We forgot to implement itervalues() at all in earlier - # versions of numpy, so no need to deprecated it here. + # deprecate the python 2 dict apis that we supported by accident in + # python 3. We forgot to implement itervalues() at all in earlier + # versions of numpy, so no need to deprecated it here. - def iteritems(self): - # Numpy 1.15, 2018-02-20 - warnings.warn( - "NpzFile.iteritems is deprecated in python 3, to match the " - "removal of dict.itertems. Use .items() instead.", - DeprecationWarning, stacklevel=2) - return self.items() + def iteritems(self): + # Numpy 1.15, 2018-02-20 + warnings.warn( + "NpzFile.iteritems is deprecated in python 3, to match the " + "removal of dict.itertems. Use .items() instead.", + DeprecationWarning, stacklevel=2) + return self.items() - def iterkeys(self): - # Numpy 1.15, 2018-02-20 - warnings.warn( - "NpzFile.iterkeys is deprecated in python 3, to match the " - "removal of dict.iterkeys. Use .keys() instead.", - DeprecationWarning, stacklevel=2) - return self.keys() + def iterkeys(self): + # Numpy 1.15, 2018-02-20 + warnings.warn( + "NpzFile.iterkeys is deprecated in python 3, to match the " + "removal of dict.iterkeys. Use .keys() instead.", + DeprecationWarning, stacklevel=2) + return self.keys() @set_module('numpy') @@ -412,11 +406,7 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, # result can similarly silently corrupt numerical data. raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") - if sys.version_info[0] >= 3: - pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) - else: - # Nothing to do on Python 2 - pickle_kwargs = {} + pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) # TODO: Use contextlib.ExitStack once we drop Python 2 if hasattr(file, 'read'): @@ -539,16 +529,10 @@ def save(file, arr, allow_pickle=True, fix_imports=True): fid = open(file, "wb") own_fid = True - if sys.version_info[0] >= 3: - pickle_kwargs = dict(fix_imports=fix_imports) - else: - # Nothing to do on Python 2 - pickle_kwargs = None - try: arr = np.asanyarray(arr) format.write_array(fid, arr, allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) + pickle_kwargs=dict(fix_imports=fix_imports)) finally: if own_fid: fid.close() @@ -691,7 +675,7 @@ def savez_compressed(file, *args, **kwds): The ``.npz`` file format is a zipped archive of files named after the variables they contain. The archive is compressed with ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable - in ``.npy`` format. For a description of the ``.npy`` format, see + in ``.npy`` format. For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. @@ -1375,9 +1359,6 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', open(fname, 'wt').close() fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) own_fh = True - # need to convert str to unicode for text io output - if sys.version_info[0] == 2: - fh = WriteWrap(fh, encoding or 'latin1') elif hasattr(fname, 'write'): # wrap to handle byte output streams fh = WriteWrap(fname, encoding or 'latin1') diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index 93aa67a3b..af4cfa09d 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -5,7 +5,6 @@ Most of these functions were initially implemented by John Hunter for matplotlib. They have been rewritten and extended for convenience. """ -import sys import itertools import numpy as np import numpy.ma as ma @@ -17,9 +16,6 @@ from numpy.lib._iotools import _is_string_like from numpy.compat import basestring from numpy.testing import suppress_warnings -if sys.version_info[0] < 3: - from future_builtins import zip - _check_fill_value = np.ma.core._check_fill_value @@ -333,12 +329,7 @@ def _izip_records(seqarrays, fill_value=None, flatten=True): else: zipfunc = _izip_fields - if sys.version_info[0] >= 3: - zip_longest = itertools.zip_longest - else: - zip_longest = itertools.izip_longest - - for tup in zip_longest(*seqarrays, fillvalue=fill_value): + for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value): yield tuple(zipfunc(tup)) @@ -436,7 +427,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, if seqdtype.names is None: seqdtype = np.dtype([('', seqdtype)]) if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype: - # Minimal processing needed: just make sure everythng's a-ok + # Minimal processing needed: just make sure everything's a-ok seqarrays = seqarrays.ravel() # Find what type of array we must return if usemask: diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index fdd22347d..d3bd88d7f 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -9,14 +9,9 @@ from numpy.testing import ( assert_, assert_equal, assert_raises, assert_warns ) -if sys.version_info[0] >= 3: - import urllib.request as urllib_request - from urllib.parse import urlparse - from urllib.error import URLError -else: - import urllib2 as urllib_request - from urlparse import urlparse - from urllib2 import URLError +import urllib.request as urllib_request +from urllib.parse import urlparse +from urllib.error import URLError def urlopen_stub(url, data=None): @@ -162,24 +157,6 @@ class TestDataSourceOpen: fp.close() assert_equal(magic_line, result) - @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only") - def test_Bz2File_text_mode_warning(self): - try: - import bz2 - except ImportError: - # We don't have the bz2 capabilities to test. - pytest.skip() - # Test datasource's internal file_opener for BZip2 files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') - fp = bz2.BZ2File(filepath, 'w') - fp.write(magic_line) - fp.close() - with assert_warns(RuntimeWarning): - fp = self.ds.open(filepath, 'rt') - result = fp.readline() - fp.close() - assert_equal(magic_line, result) - class TestDataSourceExists: def setup(self): diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index cd75b4ac4..75db5928b 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -1260,24 +1260,29 @@ class TestPadWidth: with pytest.raises(ValueError, match=match): np.pad(arr, pad_width, mode) - @pytest.mark.parametrize("pad_width", [ - "3", - "word", - None, - object(), - 3.4, - ((2, 3, 4), (3, 2)), # dtype=object (tuple) - complex(1, -1), - ((-2.1, 3), (3, 2)), + @pytest.mark.parametrize("pad_width, dtype", [ + ("3", None), + ("word", None), + (None, None), + (object(), None), + (3.4, None), + (((2, 3, 4), (3, 2)), object), + (complex(1, -1), None), + (((-2.1, 3), (3, 2)), None), ]) @pytest.mark.parametrize("mode", _all_modes.keys()) - def test_bad_type(self, pad_width, mode): + def test_bad_type(self, pad_width, dtype, mode): arr = np.arange(30).reshape((6, 5)) match = "`pad_width` must be of integral type." - with pytest.raises(TypeError, match=match): - np.pad(arr, pad_width, mode) - with pytest.raises(TypeError, match=match): - np.pad(arr, np.array(pad_width), mode) + if dtype is not None: + # avoid DeprecationWarning when not specifying dtype + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width, dtype=dtype), mode) + else: + with pytest.raises(TypeError, match=match): + np.pad(arr, pad_width, mode) + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width), mode) def test_pad_width_as_ndarray(self): a = np.arange(12) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 28ce038ae..24593d7b3 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -548,10 +548,7 @@ def test_load_padded_dtype(dt): def test_python2_python3_interoperability(): - if sys.version_info[0] >= 3: - fname = 'win64python2.npy' - else: - fname = 'python3.npy' + fname = 'win64python2.npy' path = os.path.join(os.path.dirname(__file__), 'data', fname) data = np.load(path) assert_array_equal(data, np.ones(2)) @@ -561,13 +558,7 @@ def test_pickle_python2_python3(): # Python 2 and Python 3 and vice versa data_dir = os.path.join(os.path.dirname(__file__), 'data') - if sys.version_info[0] >= 3: - xrange = range - else: - import __builtin__ - xrange = __builtin__.xrange - - expected = np.array([None, xrange, u'\u512a\u826f', + expected = np.array([None, range, u'\u512a\u826f', b'\xe4\xb8\x8d\xe8\x89\xaf'], dtype=object) @@ -583,34 +574,30 @@ def test_pickle_python2_python3(): else: data = data_f - if sys.version_info[0] >= 3: - if encoding == 'latin1' and fname.startswith('py2'): - assert_(isinstance(data[3], str)) - assert_array_equal(data[:-1], expected[:-1]) - # mojibake occurs - assert_array_equal(data[-1].encode(encoding), expected[-1]) - else: - assert_(isinstance(data[3], bytes)) - assert_array_equal(data, expected) + if encoding == 'latin1' and fname.startswith('py2'): + assert_(isinstance(data[3], str)) + assert_array_equal(data[:-1], expected[:-1]) + # mojibake occurs + assert_array_equal(data[-1].encode(encoding), expected[-1]) else: + assert_(isinstance(data[3], bytes)) assert_array_equal(data, expected) - if sys.version_info[0] >= 3: - if fname.startswith('py2'): - if fname.endswith('.npz'): - data = np.load(path, allow_pickle=True) - assert_raises(UnicodeError, data.__getitem__, 'x') - data.close() - data = np.load(path, allow_pickle=True, fix_imports=False, - encoding='latin1') - assert_raises(ImportError, data.__getitem__, 'x') - data.close() - else: - assert_raises(UnicodeError, np.load, path, - allow_pickle=True) - assert_raises(ImportError, np.load, path, - allow_pickle=True, fix_imports=False, - encoding='latin1') + if fname.startswith('py2'): + if fname.endswith('.npz'): + data = np.load(path, allow_pickle=True) + assert_raises(UnicodeError, data.__getitem__, 'x') + data.close() + data = np.load(path, allow_pickle=True, fix_imports=False, + encoding='latin1') + assert_raises(ImportError, data.__getitem__, 'x') + data.close() + else: + assert_raises(UnicodeError, np.load, path, + allow_pickle=True) + assert_raises(ImportError, np.load, path, + allow_pickle=True, fix_imports=False, + encoding='latin1') def test_pickle_disallow(): diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index dfce2d55d..7953de15d 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -23,8 +23,6 @@ from numpy.lib import ( from numpy.compat import long -PY2 = sys.version_info[0] == 2 - def get_mat(n): data = np.arange(n) data = np.add.outer(data, data) @@ -1568,11 +1566,8 @@ class TestLeaks: a.f = np.frompyfunc(getattr(a, name), 1, 1) out = a.f(np.arange(10)) a = None - if PY2: - assert_equal(sys.getrefcount(A_func), refcount) - else: - # A.func is part of a reference cycle if incr is non-zero - assert_equal(sys.getrefcount(A_func), refcount + incr) + # A.func is part of a reference cycle if incr is non-zero + assert_equal(sys.getrefcount(A_func), refcount + incr) for i in range(5): gc.collect() assert_equal(sys.getrefcount(A_func), refcount) diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index c21103891..fc16b7396 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -81,7 +81,7 @@ class TestHistogram: a, b = histogram(v, bins, density=False) assert_array_equal(a, [1, 2, 3, 4]) - # Variale bin widths are especially useful to deal with + # Variable bin widths are especially useful to deal with # infinities. v = np.arange(10) bins = [0, 1, 3, 6, np.inf] diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index ba27eea6c..2d6f39e40 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -44,7 +44,6 @@ class TextIO(BytesIO): BytesIO.writelines(self, [asbytes(s) for s in lines]) -MAJVER, MINVER = sys.version_info[:2] IS_64BIT = sys.maxsize > 2**32 try: import bz2 @@ -528,12 +527,10 @@ class TestSaveTxt: a = np.array([utf8], dtype=np.unicode_) # our gz wrapper support encoding suffixes = ['', '.gz'] - # stdlib 2 versions do not support encoding - if MAJVER > 2: - if HAS_BZ2: - suffixes.append('.bz2') - if HAS_LZMA: - suffixes.extend(['.xz', '.lzma']) + if HAS_BZ2: + suffixes.append('.bz2') + if HAS_LZMA: + suffixes.extend(['.xz', '.lzma']) with tempdir() as tmpdir: for suffix in suffixes: np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, @@ -578,7 +575,7 @@ class TestSaveTxt: def test_large_zip(self): # The test takes at least 6GB of memory, writes a file larger than 4GB test_data = np.asarray([np.random.rand(np.random.randint(50,100),4) - for i in range(800000)]) + for i in range(800000)], dtype=object) with tempdir() as tmpdir: np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data) @@ -599,18 +596,14 @@ class LoadTxtBase: res = self.loadfunc(f) assert_array_equal(res, wanted) - # Python2 .open does not support encoding - @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3") def test_compressed_gzip(self): self.check_compressed(gzip.open, ('.gz',)) @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2") - @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3") def test_compressed_bz2(self): self.check_compressed(bz2.open, ('.bz2',)) @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma") - @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3") def test_compressed_lzma(self): self.check_compressed(lzma.open, ('.xz', '.lzma')) diff --git a/numpy/lib/tests/test_mixins.py b/numpy/lib/tests/test_mixins.py index 7c22dae94..e184ffe19 100644 --- a/numpy/lib/tests/test_mixins.py +++ b/numpy/lib/tests/test_mixins.py @@ -6,9 +6,6 @@ import numpy as np from numpy.testing import assert_, assert_equal, assert_raises -PY2 = sys.version_info.major < 3 - - # NOTE: This class should be kept as an exact copy of the example from the # docstring for NDArrayOperatorsMixin. @@ -202,11 +199,10 @@ class TestNDArrayOperatorsMixin: array_like = ArrayLike(array) expected = ArrayLike(np.float64(5)) _assert_equal_type_and_value(expected, np.matmul(array_like, array)) - if not PY2: - _assert_equal_type_and_value( - expected, operator.matmul(array_like, array)) - _assert_equal_type_and_value( - expected, operator.matmul(array, array_like)) + _assert_equal_type_and_value( + expected, operator.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array, array_like)) def test_ufunc_at(self): array = ArrayLike(np.array([1, 2, 3, 4])) diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index 019b7595e..37cc158ba 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -206,10 +206,7 @@ class TestRegression: def test_loadtxt_fields_subarrays(self): # For ticket #1936 - if sys.version_info[0] >= 3: - from io import StringIO - else: - from StringIO import StringIO + from io import StringIO dt = [("a", 'u1', 2), ("b", 'u1', 2)] x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index 57c840342..c96bf795a 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -7,10 +7,7 @@ from numpy.testing import assert_, assert_equal, assert_raises_regex from numpy.lib import deprecate import numpy.lib.utils as utils -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO +from io import StringIO @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index d41a6e541..152322115 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -871,11 +871,7 @@ def _lookfor_generate_cache(module, import_modules, regenerate): # Local import to speed up numpy's import time. import inspect - if sys.version_info[0] >= 3: - # In Python3 stderr, stdout are text files. - from io import StringIO - else: - from StringIO import StringIO + from io import StringIO if module is None: module = "numpy" diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py index 2ddd083ea..531d861cf 100644 --- a/numpy/linalg/lapack_lite/clapack_scrub.py +++ b/numpy/linalg/lapack_lite/clapack_scrub.py @@ -1,15 +1,11 @@ #!/usr/bin/env python import sys, os import re +from io import StringIO + from plex import Scanner, Str, Lexicon, Opt, Bol, State, AnyChar, TEXT, IGNORE from plex.traditional import re as Re -PY2 = sys.version_info < (3, 0) - -if PY2: - from io import BytesIO as UStringIO -else: - from io import StringIO as UStringIO class MyScanner(Scanner): def __init__(self, info, name='<default>'): @@ -25,8 +21,8 @@ def sep_seq(sequence, sep): return pat def runScanner(data, scanner_class, lexicon=None): - info = UStringIO(data) - outfo = UStringIO() + info = StringIO(data) + outfo = StringIO() if lexicon is not None: scanner = scanner_class(lexicon, info) else: @@ -193,7 +189,7 @@ def cleanComments(source): return SourceLines state = SourceLines - for line in UStringIO(source): + for line in StringIO(source): state = state(line) comments.flushTo(lines) return lines.getValue() @@ -221,7 +217,7 @@ def removeHeader(source): return OutOfHeader state = LookingForHeader - for line in UStringIO(source): + for line in StringIO(source): state = state(line) return lines.getValue() @@ -230,7 +226,7 @@ def removeSubroutinePrototypes(source): r'/[*] Subroutine [*]/^\s*(?:(?:inline|static)\s+){0,2}(?!else|typedef|return)\w+\s+\*?\s*(\w+)\s*\([^0]+\)\s*;?' ) lines = LineQueue() - for line in UStringIO(source): + for line in StringIO(source): if not expression.match(line): lines.add(line) @@ -252,7 +248,7 @@ def removeBuiltinFunctions(source): return InBuiltInFunctions state = LookingForBuiltinFunctions - for line in UStringIO(source): + for line in StringIO(source): state = state(line) return lines.getValue() diff --git a/numpy/linalg/lapack_lite/f2c.h b/numpy/linalg/lapack_lite/f2c.h index 4462eaa74..d3fbfc177 100644 --- a/numpy/linalg/lapack_lite/f2c.h +++ b/numpy/linalg/lapack_lite/f2c.h @@ -11,6 +11,8 @@ #include "numpy/npy_common.h" #include "npy_cblas.h" +#include "lapack_lite_names.h" + typedef CBLAS_INT integer; typedef char *address; typedef short int shortint; @@ -383,6 +385,9 @@ extern void z_log(doublecomplex *, doublecomplex *); extern void z_sin(doublecomplex *, doublecomplex *); extern void z_sqrt(doublecomplex *, doublecomplex *); +extern double f__cabs(double, double); +extern double f__cabsf(float, float); + #ifdef __cplusplus } #endif diff --git a/numpy/linalg/lapack_lite/f2c_blas.c b/numpy/linalg/lapack_lite/f2c_blas.c index 44ad23bfe..65286892f 100644 --- a/numpy/linalg/lapack_lite/f2c_blas.c +++ b/numpy/linalg/lapack_lite/f2c_blas.c @@ -1,7 +1,7 @@ /* -NOTE: This is generated code. Look in Misc/lapack_lite for information on - remaking this file. -*/ + * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for + * information on remaking this file. + */ #include "f2c.h" #ifdef HAVE_CONFIG @@ -4912,7 +4912,7 @@ L20: ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. On exit, X is overwritten with the - transformed vector x. + tranformed vector x. INCX - INTEGER. On entry, INCX specifies the increment for the elements of @@ -9807,7 +9807,7 @@ L40: ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. On exit, X is overwritten with the - transformed vector x. + tranformed vector x. INCX - INTEGER. On entry, INCX specifies the increment for the elements of @@ -14410,7 +14410,7 @@ L40: ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. On exit, X is overwritten with the - transformed vector x. + tranformed vector x. INCX - INTEGER. On entry, INCX specifies the increment for the elements of @@ -19998,7 +19998,7 @@ L20: ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. On exit, X is overwritten with the - transformed vector x. + tranformed vector x. INCX - INTEGER. On entry, INCX specifies the increment for the elements of diff --git a/numpy/linalg/lapack_lite/f2c_c_lapack.c b/numpy/linalg/lapack_lite/f2c_c_lapack.c index f52e1e157..c36c0e368 100644 --- a/numpy/linalg/lapack_lite/f2c_c_lapack.c +++ b/numpy/linalg/lapack_lite/f2c_c_lapack.c @@ -1,7 +1,7 @@ /* -NOTE: This is generated code. Look in Misc/lapack_lite for information on - remaking this file. -*/ + * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for + * information on remaking this file. + */ #include "f2c.h" #ifdef HAVE_CONFIG diff --git a/numpy/linalg/lapack_lite/f2c_config.c b/numpy/linalg/lapack_lite/f2c_config.c index 2fe608227..3f59e0263 100644 --- a/numpy/linalg/lapack_lite/f2c_config.c +++ b/numpy/linalg/lapack_lite/f2c_config.c @@ -1,7 +1,7 @@ /* -NOTE: This is generated code. Look in Misc/lapack_lite for information on - remaking this file. -*/ + * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for + * information on remaking this file. + */ #include "f2c.h" #ifdef HAVE_CONFIG diff --git a/numpy/linalg/lapack_lite/f2c_d_lapack.c b/numpy/linalg/lapack_lite/f2c_d_lapack.c index 1a6675ef1..233db74b9 100644 --- a/numpy/linalg/lapack_lite/f2c_d_lapack.c +++ b/numpy/linalg/lapack_lite/f2c_d_lapack.c @@ -1,7 +1,7 @@ /* -NOTE: This is generated code. Look in Misc/lapack_lite for information on - remaking this file. -*/ + * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for + * information on remaking this file. + */ #include "f2c.h" #ifdef HAVE_CONFIG diff --git a/numpy/linalg/lapack_lite/f2c_lapack.c b/numpy/linalg/lapack_lite/f2c_lapack.c index d956ddbbb..752261044 100644 --- a/numpy/linalg/lapack_lite/f2c_lapack.c +++ b/numpy/linalg/lapack_lite/f2c_lapack.c @@ -1,7 +1,7 @@ /* -NOTE: This is generated code. Look in Misc/lapack_lite for information on - remaking this file. -*/ + * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for + * information on remaking this file. + */ #include "f2c.h" #ifdef HAVE_CONFIG diff --git a/numpy/linalg/lapack_lite/f2c_s_lapack.c b/numpy/linalg/lapack_lite/f2c_s_lapack.c index fccb1f58b..2a32315c7 100644 --- a/numpy/linalg/lapack_lite/f2c_s_lapack.c +++ b/numpy/linalg/lapack_lite/f2c_s_lapack.c @@ -1,7 +1,7 @@ /* -NOTE: This is generated code. Look in Misc/lapack_lite for information on - remaking this file. -*/ + * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for + * information on remaking this file. + */ #include "f2c.h" #ifdef HAVE_CONFIG diff --git a/numpy/linalg/lapack_lite/f2c_z_lapack.c b/numpy/linalg/lapack_lite/f2c_z_lapack.c index 0f11f2e72..8234eca41 100644 --- a/numpy/linalg/lapack_lite/f2c_z_lapack.c +++ b/numpy/linalg/lapack_lite/f2c_z_lapack.c @@ -1,7 +1,7 @@ /* -NOTE: This is generated code. Look in Misc/lapack_lite for information on - remaking this file. -*/ + * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for + * information on remaking this file. + */ #include "f2c.h" #ifdef HAVE_CONFIG diff --git a/numpy/linalg/lapack_lite/lapack_lite_names.h b/numpy/linalg/lapack_lite/lapack_lite_names.h new file mode 100644 index 000000000..08fd7257d --- /dev/null +++ b/numpy/linalg/lapack_lite/lapack_lite_names.h @@ -0,0 +1,691 @@ +/* + * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for + * information on remaking this file. + */ +/* + * This file renames all BLAS/LAPACK and f2c symbols to avoid + * dynamic symbol name conflicts, in cases where e.g. + * integer sizes do not match with 'standard' ABI. + */ +#define caxpy_ BLAS_FUNC(caxpy) +#define ccopy_ BLAS_FUNC(ccopy) +#define cdotc_ BLAS_FUNC(cdotc) +#define cdotu_ BLAS_FUNC(cdotu) +#define cgebak_ BLAS_FUNC(cgebak) +#define cgebal_ BLAS_FUNC(cgebal) +#define cgebd2_ BLAS_FUNC(cgebd2) +#define cgebrd_ BLAS_FUNC(cgebrd) +#define cgeev_ BLAS_FUNC(cgeev) +#define cgehd2_ BLAS_FUNC(cgehd2) +#define cgehrd_ BLAS_FUNC(cgehrd) +#define cgelq2_ BLAS_FUNC(cgelq2) +#define cgelqf_ BLAS_FUNC(cgelqf) +#define cgelsd_ BLAS_FUNC(cgelsd) +#define cgemm_ BLAS_FUNC(cgemm) +#define cgemv_ BLAS_FUNC(cgemv) +#define cgeqr2_ BLAS_FUNC(cgeqr2) +#define cgeqrf_ BLAS_FUNC(cgeqrf) +#define cgerc_ BLAS_FUNC(cgerc) +#define cgeru_ BLAS_FUNC(cgeru) +#define cgesdd_ BLAS_FUNC(cgesdd) +#define cgesv_ BLAS_FUNC(cgesv) +#define cgetf2_ BLAS_FUNC(cgetf2) +#define cgetrf_ BLAS_FUNC(cgetrf) +#define cgetrs_ BLAS_FUNC(cgetrs) +#define cheevd_ BLAS_FUNC(cheevd) +#define chemv_ BLAS_FUNC(chemv) +#define cher2_ BLAS_FUNC(cher2) +#define cher2k_ BLAS_FUNC(cher2k) +#define cherk_ BLAS_FUNC(cherk) +#define chetd2_ BLAS_FUNC(chetd2) +#define chetrd_ BLAS_FUNC(chetrd) +#define chseqr_ BLAS_FUNC(chseqr) +#define clabrd_ BLAS_FUNC(clabrd) +#define clacgv_ BLAS_FUNC(clacgv) +#define clacp2_ BLAS_FUNC(clacp2) +#define clacpy_ BLAS_FUNC(clacpy) +#define clacrm_ BLAS_FUNC(clacrm) +#define cladiv_ BLAS_FUNC(cladiv) +#define claed0_ BLAS_FUNC(claed0) +#define claed7_ BLAS_FUNC(claed7) +#define claed8_ BLAS_FUNC(claed8) +#define clahqr_ BLAS_FUNC(clahqr) +#define clahr2_ BLAS_FUNC(clahr2) +#define clals0_ BLAS_FUNC(clals0) +#define clalsa_ BLAS_FUNC(clalsa) +#define clalsd_ BLAS_FUNC(clalsd) +#define clange_ BLAS_FUNC(clange) +#define clanhe_ BLAS_FUNC(clanhe) +#define claqr0_ BLAS_FUNC(claqr0) +#define claqr1_ BLAS_FUNC(claqr1) +#define claqr2_ BLAS_FUNC(claqr2) +#define claqr3_ BLAS_FUNC(claqr3) +#define claqr4_ BLAS_FUNC(claqr4) +#define claqr5_ BLAS_FUNC(claqr5) +#define clarcm_ BLAS_FUNC(clarcm) +#define clarf_ BLAS_FUNC(clarf) +#define clarfb_ BLAS_FUNC(clarfb) +#define clarfg_ BLAS_FUNC(clarfg) +#define clarft_ BLAS_FUNC(clarft) +#define clartg_ BLAS_FUNC(clartg) +#define clascl_ BLAS_FUNC(clascl) +#define claset_ BLAS_FUNC(claset) +#define clasr_ BLAS_FUNC(clasr) +#define classq_ BLAS_FUNC(classq) +#define claswp_ BLAS_FUNC(claswp) +#define clatrd_ BLAS_FUNC(clatrd) +#define clatrs_ BLAS_FUNC(clatrs) +#define clauu2_ BLAS_FUNC(clauu2) +#define clauum_ BLAS_FUNC(clauum) +#define cpotf2_ BLAS_FUNC(cpotf2) +#define cpotrf_ BLAS_FUNC(cpotrf) +#define cpotri_ BLAS_FUNC(cpotri) +#define cpotrs_ BLAS_FUNC(cpotrs) +#define crot_ BLAS_FUNC(crot) +#define cscal_ BLAS_FUNC(cscal) +#define csrot_ BLAS_FUNC(csrot) +#define csscal_ BLAS_FUNC(csscal) +#define cstedc_ BLAS_FUNC(cstedc) +#define csteqr_ BLAS_FUNC(csteqr) +#define cswap_ BLAS_FUNC(cswap) +#define ctrevc_ BLAS_FUNC(ctrevc) +#define ctrexc_ BLAS_FUNC(ctrexc) +#define ctrmm_ BLAS_FUNC(ctrmm) +#define ctrmv_ BLAS_FUNC(ctrmv) +#define ctrsm_ BLAS_FUNC(ctrsm) +#define ctrsv_ BLAS_FUNC(ctrsv) +#define ctrti2_ BLAS_FUNC(ctrti2) +#define ctrtri_ BLAS_FUNC(ctrtri) +#define cung2r_ BLAS_FUNC(cung2r) +#define cungbr_ BLAS_FUNC(cungbr) +#define cunghr_ BLAS_FUNC(cunghr) +#define cungl2_ BLAS_FUNC(cungl2) +#define cunglq_ BLAS_FUNC(cunglq) +#define cungqr_ BLAS_FUNC(cungqr) +#define cunm2l_ BLAS_FUNC(cunm2l) +#define cunm2r_ BLAS_FUNC(cunm2r) +#define cunmbr_ BLAS_FUNC(cunmbr) +#define cunmhr_ BLAS_FUNC(cunmhr) +#define cunml2_ BLAS_FUNC(cunml2) +#define cunmlq_ BLAS_FUNC(cunmlq) +#define cunmql_ BLAS_FUNC(cunmql) +#define cunmqr_ BLAS_FUNC(cunmqr) +#define cunmtr_ BLAS_FUNC(cunmtr) +#define daxpy_ BLAS_FUNC(daxpy) +#define dbdsdc_ BLAS_FUNC(dbdsdc) +#define dbdsqr_ BLAS_FUNC(dbdsqr) +#define dcabs1_ BLAS_FUNC(dcabs1) +#define dcopy_ BLAS_FUNC(dcopy) +#define ddot_ BLAS_FUNC(ddot) +#define dgebak_ BLAS_FUNC(dgebak) +#define dgebal_ BLAS_FUNC(dgebal) +#define dgebd2_ BLAS_FUNC(dgebd2) +#define dgebrd_ BLAS_FUNC(dgebrd) +#define dgeev_ BLAS_FUNC(dgeev) +#define dgehd2_ BLAS_FUNC(dgehd2) +#define dgehrd_ BLAS_FUNC(dgehrd) +#define dgelq2_ BLAS_FUNC(dgelq2) +#define dgelqf_ BLAS_FUNC(dgelqf) +#define dgelsd_ BLAS_FUNC(dgelsd) +#define dgemm_ BLAS_FUNC(dgemm) +#define dgemv_ BLAS_FUNC(dgemv) +#define dgeqr2_ BLAS_FUNC(dgeqr2) +#define dgeqrf_ BLAS_FUNC(dgeqrf) +#define dger_ BLAS_FUNC(dger) +#define dgesdd_ BLAS_FUNC(dgesdd) +#define dgesv_ BLAS_FUNC(dgesv) +#define dgetf2_ BLAS_FUNC(dgetf2) +#define dgetrf_ BLAS_FUNC(dgetrf) +#define dgetrs_ BLAS_FUNC(dgetrs) +#define dhseqr_ BLAS_FUNC(dhseqr) +#define disnan_ BLAS_FUNC(disnan) +#define dlabad_ BLAS_FUNC(dlabad) +#define dlabrd_ BLAS_FUNC(dlabrd) +#define dlacpy_ BLAS_FUNC(dlacpy) +#define dladiv_ BLAS_FUNC(dladiv) +#define dlae2_ BLAS_FUNC(dlae2) +#define dlaed0_ BLAS_FUNC(dlaed0) +#define dlaed1_ BLAS_FUNC(dlaed1) +#define dlaed2_ BLAS_FUNC(dlaed2) +#define dlaed3_ BLAS_FUNC(dlaed3) +#define dlaed4_ BLAS_FUNC(dlaed4) +#define dlaed5_ BLAS_FUNC(dlaed5) +#define dlaed6_ BLAS_FUNC(dlaed6) +#define dlaed7_ BLAS_FUNC(dlaed7) +#define dlaed8_ BLAS_FUNC(dlaed8) +#define dlaed9_ BLAS_FUNC(dlaed9) +#define dlaeda_ BLAS_FUNC(dlaeda) +#define dlaev2_ BLAS_FUNC(dlaev2) +#define dlaexc_ BLAS_FUNC(dlaexc) +#define dlahqr_ BLAS_FUNC(dlahqr) +#define dlahr2_ BLAS_FUNC(dlahr2) +#define dlaisnan_ BLAS_FUNC(dlaisnan) +#define dlaln2_ BLAS_FUNC(dlaln2) +#define dlals0_ BLAS_FUNC(dlals0) +#define dlalsa_ BLAS_FUNC(dlalsa) +#define dlalsd_ BLAS_FUNC(dlalsd) +#define dlamc1_ BLAS_FUNC(dlamc1) +#define dlamc2_ BLAS_FUNC(dlamc2) +#define dlamc3_ BLAS_FUNC(dlamc3) +#define dlamc4_ BLAS_FUNC(dlamc4) +#define dlamc5_ BLAS_FUNC(dlamc5) +#define dlamch_ BLAS_FUNC(dlamch) +#define dlamrg_ BLAS_FUNC(dlamrg) +#define dlange_ BLAS_FUNC(dlange) +#define dlanst_ BLAS_FUNC(dlanst) +#define dlansy_ BLAS_FUNC(dlansy) +#define dlanv2_ BLAS_FUNC(dlanv2) +#define dlapy2_ BLAS_FUNC(dlapy2) +#define dlapy3_ BLAS_FUNC(dlapy3) +#define dlaqr0_ BLAS_FUNC(dlaqr0) +#define dlaqr1_ BLAS_FUNC(dlaqr1) +#define dlaqr2_ BLAS_FUNC(dlaqr2) +#define dlaqr3_ BLAS_FUNC(dlaqr3) +#define dlaqr4_ BLAS_FUNC(dlaqr4) +#define dlaqr5_ BLAS_FUNC(dlaqr5) +#define dlarf_ BLAS_FUNC(dlarf) +#define dlarfb_ BLAS_FUNC(dlarfb) +#define dlarfg_ BLAS_FUNC(dlarfg) +#define dlarft_ BLAS_FUNC(dlarft) +#define dlarfx_ BLAS_FUNC(dlarfx) +#define dlartg_ BLAS_FUNC(dlartg) +#define dlas2_ BLAS_FUNC(dlas2) +#define dlascl_ BLAS_FUNC(dlascl) +#define dlasd0_ BLAS_FUNC(dlasd0) +#define dlasd1_ BLAS_FUNC(dlasd1) +#define dlasd2_ BLAS_FUNC(dlasd2) +#define dlasd3_ BLAS_FUNC(dlasd3) +#define dlasd4_ BLAS_FUNC(dlasd4) +#define dlasd5_ BLAS_FUNC(dlasd5) +#define dlasd6_ BLAS_FUNC(dlasd6) +#define dlasd7_ BLAS_FUNC(dlasd7) +#define dlasd8_ BLAS_FUNC(dlasd8) +#define dlasda_ BLAS_FUNC(dlasda) +#define dlasdq_ BLAS_FUNC(dlasdq) +#define dlasdt_ BLAS_FUNC(dlasdt) +#define dlaset_ BLAS_FUNC(dlaset) +#define dlasq1_ BLAS_FUNC(dlasq1) +#define dlasq2_ BLAS_FUNC(dlasq2) +#define dlasq3_ BLAS_FUNC(dlasq3) +#define dlasq4_ BLAS_FUNC(dlasq4) +#define dlasq5_ BLAS_FUNC(dlasq5) +#define dlasq6_ BLAS_FUNC(dlasq6) +#define dlasr_ BLAS_FUNC(dlasr) +#define dlasrt_ BLAS_FUNC(dlasrt) +#define dlassq_ BLAS_FUNC(dlassq) +#define dlasv2_ BLAS_FUNC(dlasv2) +#define dlaswp_ BLAS_FUNC(dlaswp) +#define dlasy2_ BLAS_FUNC(dlasy2) +#define dlatrd_ BLAS_FUNC(dlatrd) +#define dlauu2_ BLAS_FUNC(dlauu2) +#define dlauum_ BLAS_FUNC(dlauum) +#define dnrm2_ BLAS_FUNC(dnrm2) +#define dorg2r_ BLAS_FUNC(dorg2r) +#define dorgbr_ BLAS_FUNC(dorgbr) +#define dorghr_ BLAS_FUNC(dorghr) +#define dorgl2_ BLAS_FUNC(dorgl2) +#define dorglq_ BLAS_FUNC(dorglq) +#define dorgqr_ BLAS_FUNC(dorgqr) +#define dorm2l_ BLAS_FUNC(dorm2l) +#define dorm2r_ BLAS_FUNC(dorm2r) +#define dormbr_ BLAS_FUNC(dormbr) +#define dormhr_ BLAS_FUNC(dormhr) +#define dorml2_ BLAS_FUNC(dorml2) +#define dormlq_ BLAS_FUNC(dormlq) +#define dormql_ BLAS_FUNC(dormql) +#define dormqr_ BLAS_FUNC(dormqr) +#define dormtr_ BLAS_FUNC(dormtr) +#define dpotf2_ BLAS_FUNC(dpotf2) +#define dpotrf_ BLAS_FUNC(dpotrf) +#define dpotri_ BLAS_FUNC(dpotri) +#define dpotrs_ BLAS_FUNC(dpotrs) +#define drot_ BLAS_FUNC(drot) +#define dscal_ BLAS_FUNC(dscal) +#define dstedc_ BLAS_FUNC(dstedc) +#define dsteqr_ BLAS_FUNC(dsteqr) +#define dsterf_ BLAS_FUNC(dsterf) +#define dswap_ BLAS_FUNC(dswap) +#define dsyevd_ BLAS_FUNC(dsyevd) +#define dsymv_ BLAS_FUNC(dsymv) +#define dsyr2_ BLAS_FUNC(dsyr2) +#define dsyr2k_ BLAS_FUNC(dsyr2k) +#define dsyrk_ BLAS_FUNC(dsyrk) +#define dsytd2_ BLAS_FUNC(dsytd2) +#define dsytrd_ BLAS_FUNC(dsytrd) +#define dtrevc_ BLAS_FUNC(dtrevc) +#define dtrexc_ BLAS_FUNC(dtrexc) +#define dtrmm_ BLAS_FUNC(dtrmm) +#define dtrmv_ BLAS_FUNC(dtrmv) +#define dtrsm_ BLAS_FUNC(dtrsm) +#define dtrti2_ BLAS_FUNC(dtrti2) +#define dtrtri_ BLAS_FUNC(dtrtri) +#define dzasum_ BLAS_FUNC(dzasum) +#define dznrm2_ BLAS_FUNC(dznrm2) +#define icamax_ BLAS_FUNC(icamax) +#define idamax_ BLAS_FUNC(idamax) +#define ieeeck_ BLAS_FUNC(ieeeck) +#define ilaclc_ BLAS_FUNC(ilaclc) +#define ilaclr_ BLAS_FUNC(ilaclr) +#define iladlc_ BLAS_FUNC(iladlc) +#define iladlr_ BLAS_FUNC(iladlr) +#define ilaenv_ BLAS_FUNC(ilaenv) +#define ilaslc_ BLAS_FUNC(ilaslc) +#define ilaslr_ BLAS_FUNC(ilaslr) +#define ilazlc_ BLAS_FUNC(ilazlc) +#define ilazlr_ BLAS_FUNC(ilazlr) +#define iparmq_ BLAS_FUNC(iparmq) +#define isamax_ BLAS_FUNC(isamax) +#define izamax_ BLAS_FUNC(izamax) +#define lsame_ BLAS_FUNC(lsame) +#define saxpy_ BLAS_FUNC(saxpy) +#define sbdsdc_ BLAS_FUNC(sbdsdc) +#define sbdsqr_ BLAS_FUNC(sbdsqr) +#define scabs1_ BLAS_FUNC(scabs1) +#define scasum_ BLAS_FUNC(scasum) +#define scnrm2_ BLAS_FUNC(scnrm2) +#define scopy_ BLAS_FUNC(scopy) +#define sdot_ BLAS_FUNC(sdot) +#define sgebak_ BLAS_FUNC(sgebak) +#define sgebal_ BLAS_FUNC(sgebal) +#define sgebd2_ BLAS_FUNC(sgebd2) +#define sgebrd_ BLAS_FUNC(sgebrd) +#define sgeev_ BLAS_FUNC(sgeev) +#define sgehd2_ BLAS_FUNC(sgehd2) +#define sgehrd_ BLAS_FUNC(sgehrd) +#define sgelq2_ BLAS_FUNC(sgelq2) +#define sgelqf_ BLAS_FUNC(sgelqf) +#define sgelsd_ BLAS_FUNC(sgelsd) +#define sgemm_ BLAS_FUNC(sgemm) +#define sgemv_ BLAS_FUNC(sgemv) +#define sgeqr2_ BLAS_FUNC(sgeqr2) +#define sgeqrf_ BLAS_FUNC(sgeqrf) +#define sger_ BLAS_FUNC(sger) +#define sgesdd_ BLAS_FUNC(sgesdd) +#define sgesv_ BLAS_FUNC(sgesv) +#define sgetf2_ BLAS_FUNC(sgetf2) +#define sgetrf_ BLAS_FUNC(sgetrf) +#define sgetrs_ BLAS_FUNC(sgetrs) +#define shseqr_ BLAS_FUNC(shseqr) +#define sisnan_ BLAS_FUNC(sisnan) +#define slabad_ BLAS_FUNC(slabad) +#define slabrd_ BLAS_FUNC(slabrd) +#define slacpy_ BLAS_FUNC(slacpy) +#define sladiv_ BLAS_FUNC(sladiv) +#define slae2_ BLAS_FUNC(slae2) +#define slaed0_ BLAS_FUNC(slaed0) +#define slaed1_ BLAS_FUNC(slaed1) +#define slaed2_ BLAS_FUNC(slaed2) +#define slaed3_ BLAS_FUNC(slaed3) +#define slaed4_ BLAS_FUNC(slaed4) +#define slaed5_ BLAS_FUNC(slaed5) +#define slaed6_ BLAS_FUNC(slaed6) +#define slaed7_ BLAS_FUNC(slaed7) +#define slaed8_ BLAS_FUNC(slaed8) +#define slaed9_ BLAS_FUNC(slaed9) +#define slaeda_ BLAS_FUNC(slaeda) +#define slaev2_ BLAS_FUNC(slaev2) +#define slaexc_ BLAS_FUNC(slaexc) +#define slahqr_ BLAS_FUNC(slahqr) +#define slahr2_ BLAS_FUNC(slahr2) +#define slaisnan_ BLAS_FUNC(slaisnan) +#define slaln2_ BLAS_FUNC(slaln2) +#define slals0_ BLAS_FUNC(slals0) +#define slalsa_ BLAS_FUNC(slalsa) +#define slalsd_ BLAS_FUNC(slalsd) +#define slamc1_ BLAS_FUNC(slamc1) +#define slamc2_ BLAS_FUNC(slamc2) +#define slamc3_ BLAS_FUNC(slamc3) +#define slamc4_ BLAS_FUNC(slamc4) +#define slamc5_ BLAS_FUNC(slamc5) +#define slamch_ BLAS_FUNC(slamch) +#define slamrg_ BLAS_FUNC(slamrg) +#define slange_ BLAS_FUNC(slange) +#define slanst_ BLAS_FUNC(slanst) +#define slansy_ BLAS_FUNC(slansy) +#define slanv2_ BLAS_FUNC(slanv2) +#define slapy2_ BLAS_FUNC(slapy2) +#define slapy3_ BLAS_FUNC(slapy3) +#define slaqr0_ BLAS_FUNC(slaqr0) +#define slaqr1_ BLAS_FUNC(slaqr1) +#define slaqr2_ BLAS_FUNC(slaqr2) +#define slaqr3_ BLAS_FUNC(slaqr3) +#define slaqr4_ BLAS_FUNC(slaqr4) +#define slaqr5_ BLAS_FUNC(slaqr5) +#define slarf_ BLAS_FUNC(slarf) +#define slarfb_ BLAS_FUNC(slarfb) +#define slarfg_ BLAS_FUNC(slarfg) +#define slarft_ BLAS_FUNC(slarft) +#define slarfx_ BLAS_FUNC(slarfx) +#define slartg_ BLAS_FUNC(slartg) +#define slas2_ BLAS_FUNC(slas2) +#define slascl_ BLAS_FUNC(slascl) +#define slasd0_ BLAS_FUNC(slasd0) +#define slasd1_ BLAS_FUNC(slasd1) +#define slasd2_ BLAS_FUNC(slasd2) +#define slasd3_ BLAS_FUNC(slasd3) +#define slasd4_ BLAS_FUNC(slasd4) +#define slasd5_ BLAS_FUNC(slasd5) +#define slasd6_ BLAS_FUNC(slasd6) +#define slasd7_ BLAS_FUNC(slasd7) +#define slasd8_ BLAS_FUNC(slasd8) +#define slasda_ BLAS_FUNC(slasda) +#define slasdq_ BLAS_FUNC(slasdq) +#define slasdt_ BLAS_FUNC(slasdt) +#define slaset_ BLAS_FUNC(slaset) +#define slasq1_ BLAS_FUNC(slasq1) +#define slasq2_ BLAS_FUNC(slasq2) +#define slasq3_ BLAS_FUNC(slasq3) +#define slasq4_ BLAS_FUNC(slasq4) +#define slasq5_ BLAS_FUNC(slasq5) +#define slasq6_ BLAS_FUNC(slasq6) +#define slasr_ BLAS_FUNC(slasr) +#define slasrt_ BLAS_FUNC(slasrt) +#define slassq_ BLAS_FUNC(slassq) +#define slasv2_ BLAS_FUNC(slasv2) +#define slaswp_ BLAS_FUNC(slaswp) +#define slasy2_ BLAS_FUNC(slasy2) +#define slatrd_ BLAS_FUNC(slatrd) +#define slauu2_ BLAS_FUNC(slauu2) +#define slauum_ BLAS_FUNC(slauum) +#define snrm2_ BLAS_FUNC(snrm2) +#define sorg2r_ BLAS_FUNC(sorg2r) +#define sorgbr_ BLAS_FUNC(sorgbr) +#define sorghr_ BLAS_FUNC(sorghr) +#define sorgl2_ BLAS_FUNC(sorgl2) +#define sorglq_ BLAS_FUNC(sorglq) +#define sorgqr_ BLAS_FUNC(sorgqr) +#define sorm2l_ BLAS_FUNC(sorm2l) +#define sorm2r_ BLAS_FUNC(sorm2r) +#define sormbr_ BLAS_FUNC(sormbr) +#define sormhr_ BLAS_FUNC(sormhr) +#define sorml2_ BLAS_FUNC(sorml2) +#define sormlq_ BLAS_FUNC(sormlq) +#define sormql_ BLAS_FUNC(sormql) +#define sormqr_ BLAS_FUNC(sormqr) +#define sormtr_ BLAS_FUNC(sormtr) +#define spotf2_ BLAS_FUNC(spotf2) +#define spotrf_ BLAS_FUNC(spotrf) +#define spotri_ BLAS_FUNC(spotri) +#define spotrs_ BLAS_FUNC(spotrs) +#define srot_ BLAS_FUNC(srot) +#define sscal_ BLAS_FUNC(sscal) +#define sstedc_ BLAS_FUNC(sstedc) +#define ssteqr_ BLAS_FUNC(ssteqr) +#define ssterf_ BLAS_FUNC(ssterf) +#define sswap_ BLAS_FUNC(sswap) +#define ssyevd_ BLAS_FUNC(ssyevd) +#define ssymv_ BLAS_FUNC(ssymv) +#define ssyr2_ BLAS_FUNC(ssyr2) +#define ssyr2k_ BLAS_FUNC(ssyr2k) +#define ssyrk_ BLAS_FUNC(ssyrk) +#define ssytd2_ BLAS_FUNC(ssytd2) +#define ssytrd_ BLAS_FUNC(ssytrd) +#define strevc_ BLAS_FUNC(strevc) +#define strexc_ BLAS_FUNC(strexc) +#define strmm_ BLAS_FUNC(strmm) +#define strmv_ BLAS_FUNC(strmv) +#define strsm_ BLAS_FUNC(strsm) +#define strti2_ BLAS_FUNC(strti2) +#define strtri_ BLAS_FUNC(strtri) +#define xerbla_ BLAS_FUNC(xerbla) +#define zaxpy_ BLAS_FUNC(zaxpy) +#define zcopy_ BLAS_FUNC(zcopy) +#define zdotc_ BLAS_FUNC(zdotc) +#define zdotu_ BLAS_FUNC(zdotu) +#define zdrot_ BLAS_FUNC(zdrot) +#define zdscal_ BLAS_FUNC(zdscal) +#define zgebak_ BLAS_FUNC(zgebak) +#define zgebal_ BLAS_FUNC(zgebal) +#define zgebd2_ BLAS_FUNC(zgebd2) +#define zgebrd_ BLAS_FUNC(zgebrd) +#define zgeev_ BLAS_FUNC(zgeev) +#define zgehd2_ BLAS_FUNC(zgehd2) +#define zgehrd_ BLAS_FUNC(zgehrd) +#define zgelq2_ BLAS_FUNC(zgelq2) +#define zgelqf_ BLAS_FUNC(zgelqf) +#define zgelsd_ BLAS_FUNC(zgelsd) +#define zgemm_ BLAS_FUNC(zgemm) +#define zgemv_ BLAS_FUNC(zgemv) +#define zgeqr2_ BLAS_FUNC(zgeqr2) +#define zgeqrf_ BLAS_FUNC(zgeqrf) +#define zgerc_ BLAS_FUNC(zgerc) +#define zgeru_ BLAS_FUNC(zgeru) +#define zgesdd_ BLAS_FUNC(zgesdd) +#define zgesv_ BLAS_FUNC(zgesv) +#define zgetf2_ BLAS_FUNC(zgetf2) +#define zgetrf_ BLAS_FUNC(zgetrf) +#define zgetrs_ BLAS_FUNC(zgetrs) +#define zheevd_ BLAS_FUNC(zheevd) +#define zhemv_ BLAS_FUNC(zhemv) +#define zher2_ BLAS_FUNC(zher2) +#define zher2k_ BLAS_FUNC(zher2k) +#define zherk_ BLAS_FUNC(zherk) +#define zhetd2_ BLAS_FUNC(zhetd2) +#define zhetrd_ BLAS_FUNC(zhetrd) +#define zhseqr_ BLAS_FUNC(zhseqr) +#define zlabrd_ BLAS_FUNC(zlabrd) +#define zlacgv_ BLAS_FUNC(zlacgv) +#define zlacp2_ BLAS_FUNC(zlacp2) +#define zlacpy_ BLAS_FUNC(zlacpy) +#define zlacrm_ BLAS_FUNC(zlacrm) +#define zladiv_ BLAS_FUNC(zladiv) +#define zlaed0_ BLAS_FUNC(zlaed0) +#define zlaed7_ BLAS_FUNC(zlaed7) +#define zlaed8_ BLAS_FUNC(zlaed8) +#define zlahqr_ BLAS_FUNC(zlahqr) +#define zlahr2_ BLAS_FUNC(zlahr2) +#define zlals0_ BLAS_FUNC(zlals0) +#define zlalsa_ BLAS_FUNC(zlalsa) +#define zlalsd_ BLAS_FUNC(zlalsd) +#define zlange_ BLAS_FUNC(zlange) +#define zlanhe_ BLAS_FUNC(zlanhe) +#define zlaqr0_ BLAS_FUNC(zlaqr0) +#define zlaqr1_ BLAS_FUNC(zlaqr1) +#define zlaqr2_ BLAS_FUNC(zlaqr2) +#define zlaqr3_ BLAS_FUNC(zlaqr3) +#define zlaqr4_ BLAS_FUNC(zlaqr4) +#define zlaqr5_ BLAS_FUNC(zlaqr5) +#define zlarcm_ BLAS_FUNC(zlarcm) +#define zlarf_ BLAS_FUNC(zlarf) +#define zlarfb_ BLAS_FUNC(zlarfb) +#define zlarfg_ BLAS_FUNC(zlarfg) +#define zlarft_ BLAS_FUNC(zlarft) +#define zlartg_ BLAS_FUNC(zlartg) +#define zlascl_ BLAS_FUNC(zlascl) +#define zlaset_ BLAS_FUNC(zlaset) +#define zlasr_ BLAS_FUNC(zlasr) +#define zlassq_ BLAS_FUNC(zlassq) +#define zlaswp_ BLAS_FUNC(zlaswp) +#define zlatrd_ BLAS_FUNC(zlatrd) +#define zlatrs_ BLAS_FUNC(zlatrs) +#define zlauu2_ BLAS_FUNC(zlauu2) +#define zlauum_ BLAS_FUNC(zlauum) +#define zpotf2_ BLAS_FUNC(zpotf2) +#define zpotrf_ BLAS_FUNC(zpotrf) +#define zpotri_ BLAS_FUNC(zpotri) +#define zpotrs_ BLAS_FUNC(zpotrs) +#define zrot_ BLAS_FUNC(zrot) +#define zscal_ BLAS_FUNC(zscal) +#define zstedc_ BLAS_FUNC(zstedc) +#define zsteqr_ BLAS_FUNC(zsteqr) +#define zswap_ BLAS_FUNC(zswap) +#define ztrevc_ BLAS_FUNC(ztrevc) +#define ztrexc_ BLAS_FUNC(ztrexc) +#define ztrmm_ BLAS_FUNC(ztrmm) +#define ztrmv_ BLAS_FUNC(ztrmv) +#define ztrsm_ BLAS_FUNC(ztrsm) +#define ztrsv_ BLAS_FUNC(ztrsv) +#define ztrti2_ BLAS_FUNC(ztrti2) +#define ztrtri_ BLAS_FUNC(ztrtri) +#define zung2r_ BLAS_FUNC(zung2r) +#define zungbr_ BLAS_FUNC(zungbr) +#define zunghr_ BLAS_FUNC(zunghr) +#define zungl2_ BLAS_FUNC(zungl2) +#define zunglq_ BLAS_FUNC(zunglq) +#define zungqr_ BLAS_FUNC(zungqr) +#define zunm2l_ BLAS_FUNC(zunm2l) +#define zunm2r_ BLAS_FUNC(zunm2r) +#define zunmbr_ BLAS_FUNC(zunmbr) +#define zunmhr_ BLAS_FUNC(zunmhr) +#define zunml2_ BLAS_FUNC(zunml2) +#define zunmlq_ BLAS_FUNC(zunmlq) +#define zunmql_ BLAS_FUNC(zunmql) +#define zunmqr_ BLAS_FUNC(zunmqr) +#define zunmtr_ BLAS_FUNC(zunmtr) + +/* Symbols exported by f2c.c */ +#define abort_ numpy_lapack_lite_abort_ +#define c_abs numpy_lapack_lite_c_abs +#define c_cos numpy_lapack_lite_c_cos +#define c_div numpy_lapack_lite_c_div +#define c_exp numpy_lapack_lite_c_exp +#define c_log numpy_lapack_lite_c_log +#define c_sin numpy_lapack_lite_c_sin +#define c_sqrt numpy_lapack_lite_c_sqrt +#define d_abs numpy_lapack_lite_d_abs +#define d_acos numpy_lapack_lite_d_acos +#define d_asin numpy_lapack_lite_d_asin +#define d_atan numpy_lapack_lite_d_atan +#define d_atn2 numpy_lapack_lite_d_atn2 +#define d_cnjg numpy_lapack_lite_d_cnjg +#define d_cos numpy_lapack_lite_d_cos +#define d_cosh numpy_lapack_lite_d_cosh +#define d_dim numpy_lapack_lite_d_dim +#define d_exp numpy_lapack_lite_d_exp +#define d_imag numpy_lapack_lite_d_imag +#define d_int numpy_lapack_lite_d_int +#define d_lg10 numpy_lapack_lite_d_lg10 +#define d_log numpy_lapack_lite_d_log +#define d_mod numpy_lapack_lite_d_mod +#define d_nint numpy_lapack_lite_d_nint +#define d_prod numpy_lapack_lite_d_prod +#define d_sign numpy_lapack_lite_d_sign +#define d_sin numpy_lapack_lite_d_sin +#define d_sinh numpy_lapack_lite_d_sinh +#define d_sqrt numpy_lapack_lite_d_sqrt +#define d_tan numpy_lapack_lite_d_tan +#define d_tanh numpy_lapack_lite_d_tanh +#define derf_ numpy_lapack_lite_derf_ +#define derfc_ numpy_lapack_lite_derfc_ +#define do_fio numpy_lapack_lite_do_fio +#define do_lio numpy_lapack_lite_do_lio +#define do_uio numpy_lapack_lite_do_uio +#define e_rdfe numpy_lapack_lite_e_rdfe +#define e_rdue numpy_lapack_lite_e_rdue +#define e_rsfe numpy_lapack_lite_e_rsfe +#define e_rsfi numpy_lapack_lite_e_rsfi +#define e_rsle numpy_lapack_lite_e_rsle +#define e_rsli numpy_lapack_lite_e_rsli +#define e_rsue numpy_lapack_lite_e_rsue +#define e_wdfe numpy_lapack_lite_e_wdfe +#define e_wdue numpy_lapack_lite_e_wdue +#define e_wsfe numpy_lapack_lite_e_wsfe +#define e_wsfi numpy_lapack_lite_e_wsfi +#define e_wsle numpy_lapack_lite_e_wsle +#define e_wsli numpy_lapack_lite_e_wsli +#define e_wsue numpy_lapack_lite_e_wsue +#define ef1asc_ numpy_lapack_lite_ef1asc_ +#define ef1cmc_ numpy_lapack_lite_ef1cmc_ +#define erf_ numpy_lapack_lite_erf_ +#define erfc_ numpy_lapack_lite_erfc_ +#define f__cabs numpy_lapack_lite_f__cabs +#define f__cabsf numpy_lapack_lite_f__cabsf +#define f_back numpy_lapack_lite_f_back +#define f_clos numpy_lapack_lite_f_clos +#define f_end numpy_lapack_lite_f_end +#define f_exit numpy_lapack_lite_f_exit +#define f_inqu numpy_lapack_lite_f_inqu +#define f_open numpy_lapack_lite_f_open +#define f_rew numpy_lapack_lite_f_rew +#define flush_ numpy_lapack_lite_flush_ +#define getarg_ numpy_lapack_lite_getarg_ +#define getenv_ numpy_lapack_lite_getenv_ +#define h_abs numpy_lapack_lite_h_abs +#define h_dim numpy_lapack_lite_h_dim +#define h_dnnt numpy_lapack_lite_h_dnnt +#define h_indx numpy_lapack_lite_h_indx +#define h_len numpy_lapack_lite_h_len +#define h_mod numpy_lapack_lite_h_mod +#define h_nint numpy_lapack_lite_h_nint +#define h_sign numpy_lapack_lite_h_sign +#define hl_ge numpy_lapack_lite_hl_ge +#define hl_gt numpy_lapack_lite_hl_gt +#define hl_le numpy_lapack_lite_hl_le +#define hl_lt numpy_lapack_lite_hl_lt +#define i_abs numpy_lapack_lite_i_abs +#define i_dim numpy_lapack_lite_i_dim +#define i_dnnt numpy_lapack_lite_i_dnnt +#define i_indx numpy_lapack_lite_i_indx +#define i_len numpy_lapack_lite_i_len +#define i_mod numpy_lapack_lite_i_mod +#define i_nint numpy_lapack_lite_i_nint +#define i_sign numpy_lapack_lite_i_sign +#define iargc_ numpy_lapack_lite_iargc_ +#define l_ge numpy_lapack_lite_l_ge +#define l_gt numpy_lapack_lite_l_gt +#define l_le numpy_lapack_lite_l_le +#define l_lt numpy_lapack_lite_l_lt +#define pow_ci numpy_lapack_lite_pow_ci +#define pow_dd numpy_lapack_lite_pow_dd +#define pow_di numpy_lapack_lite_pow_di +#define pow_hh numpy_lapack_lite_pow_hh +#define pow_ii numpy_lapack_lite_pow_ii +#define pow_ri numpy_lapack_lite_pow_ri +#define pow_zi numpy_lapack_lite_pow_zi +#define pow_zz numpy_lapack_lite_pow_zz +#define r_abs numpy_lapack_lite_r_abs +#define r_acos numpy_lapack_lite_r_acos +#define r_asin numpy_lapack_lite_r_asin +#define r_atan numpy_lapack_lite_r_atan +#define r_atn2 numpy_lapack_lite_r_atn2 +#define r_cnjg numpy_lapack_lite_r_cnjg +#define r_cos numpy_lapack_lite_r_cos +#define r_cosh numpy_lapack_lite_r_cosh +#define r_dim numpy_lapack_lite_r_dim +#define r_exp numpy_lapack_lite_r_exp +#define r_imag numpy_lapack_lite_r_imag +#define r_int numpy_lapack_lite_r_int +#define r_lg10 numpy_lapack_lite_r_lg10 +#define r_log numpy_lapack_lite_r_log +#define r_mod numpy_lapack_lite_r_mod +#define r_nint numpy_lapack_lite_r_nint +#define r_sign numpy_lapack_lite_r_sign +#define r_sin numpy_lapack_lite_r_sin +#define r_sinh numpy_lapack_lite_r_sinh +#define r_sqrt numpy_lapack_lite_r_sqrt +#define r_tan numpy_lapack_lite_r_tan +#define r_tanh numpy_lapack_lite_r_tanh +#define s_cat numpy_lapack_lite_s_cat +#define s_cmp numpy_lapack_lite_s_cmp +#define s_copy numpy_lapack_lite_s_copy +#define s_paus numpy_lapack_lite_s_paus +#define s_rdfe numpy_lapack_lite_s_rdfe +#define s_rdue numpy_lapack_lite_s_rdue +#define s_rnge numpy_lapack_lite_s_rnge +#define s_rsfe numpy_lapack_lite_s_rsfe +#define s_rsfi numpy_lapack_lite_s_rsfi +#define s_rsle numpy_lapack_lite_s_rsle +#define s_rsli numpy_lapack_lite_s_rsli +#define s_rsne numpy_lapack_lite_s_rsne +#define s_rsni numpy_lapack_lite_s_rsni +#define s_rsue numpy_lapack_lite_s_rsue +#define s_stop numpy_lapack_lite_s_stop +#define s_wdfe numpy_lapack_lite_s_wdfe +#define s_wdue numpy_lapack_lite_s_wdue +#define s_wsfe numpy_lapack_lite_s_wsfe +#define s_wsfi numpy_lapack_lite_s_wsfi +#define s_wsle numpy_lapack_lite_s_wsle +#define s_wsli numpy_lapack_lite_s_wsli +#define s_wsne numpy_lapack_lite_s_wsne +#define s_wsni numpy_lapack_lite_s_wsni +#define s_wsue numpy_lapack_lite_s_wsue +#define sig_die numpy_lapack_lite_sig_die +#define signal_ numpy_lapack_lite_signal_ +#define system_ numpy_lapack_lite_system_ +#define z_abs numpy_lapack_lite_z_abs +#define z_cos numpy_lapack_lite_z_cos +#define z_div numpy_lapack_lite_z_div +#define z_exp numpy_lapack_lite_z_exp +#define z_log numpy_lapack_lite_z_log +#define z_sin numpy_lapack_lite_z_sin +#define z_sqrt numpy_lapack_lite_z_sqrt diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py index 4b1a0ad82..960f5e2d8 100755 --- a/numpy/linalg/lapack_lite/make_lite.py +++ b/numpy/linalg/lapack_lite/make_lite.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -Usage: make_lite.py <wrapped_routines_file> <lapack_dir> <output_dir> +Usage: make_lite.py <wrapped_routines_file> <lapack_dir> Typical invocation: @@ -13,6 +13,7 @@ Requires the following to be on the path: """ import sys import os +import re import subprocess import shutil @@ -33,11 +34,14 @@ F2C_ARGS = ['-A', '-Nx800'] # The header to add to the top of the f2c_*.c file. Note that dlamch_() calls # will be replaced by the macros below by clapack_scrub.scrub_source() -HEADER = '''\ +HEADER_BLURB = '''\ /* -NOTE: This is generated code. Look in Misc/lapack_lite for information on - remaking this file. -*/ + * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for + * information on remaking this file. + */ +''' + +HEADER = HEADER_BLURB + '''\ #include "f2c.h" #ifdef HAVE_CONFIG @@ -279,6 +283,52 @@ def ensure_executable(name): except: raise SystemExit(name + ' not found') +def create_name_header(output_dir): + routine_re = re.compile(r'^ (subroutine|.* function)\s+(\w+)\(.*$', + re.I) + extern_re = re.compile(r'^extern [a-z]+ ([a-z0-9_]+)\(.*$') + + # BLAS/LAPACK symbols + symbols = set(['xerbla']) + for fn in os.listdir(output_dir): + fn = os.path.join(output_dir, fn) + + if not fn.endswith('.f'): + continue + + with open(fn, 'r') as f: + for line in f: + m = routine_re.match(line) + if m: + symbols.add(m.group(2).lower()) + + # f2c symbols + f2c_symbols = set() + with open('f2c.h', 'r') as f: + for line in f: + m = extern_re.match(line) + if m: + f2c_symbols.add(m.group(1)) + + with open(os.path.join(output_dir, 'lapack_lite_names.h'), 'w') as f: + f.write(HEADER_BLURB) + f.write( + "/*\n" + " * This file renames all BLAS/LAPACK and f2c symbols to avoid\n" + " * dynamic symbol name conflicts, in cases where e.g.\n" + " * integer sizes do not match with 'standard' ABI.\n" + " */\n") + + # Rename BLAS/LAPACK symbols + for name in sorted(symbols): + f.write("#define %s_ BLAS_FUNC(%s)\n" % (name, name)) + + # Rename also symbols that f2c exports itself + f.write("\n" + "/* Symbols exported by f2c.c */\n") + for name in sorted(f2c_symbols): + f.write("#define %s numpy_lapack_lite_%s\n" % (name, name)) + def main(): if len(sys.argv) != 3: print(__doc__) @@ -328,12 +378,14 @@ def main(): print() + create_name_header(output_dir) + for fname in os.listdir(output_dir): - if fname.endswith('.c'): + if fname.endswith('.c') or fname == 'lapack_lite_names.h': print('Copying ' + fname) shutil.copy( os.path.join(output_dir, fname), - os.path.dirname(__file__), + os.path.abspath(os.path.dirname(__file__)), ) diff --git a/numpy/linalg/lapack_lite/python_xerbla.c b/numpy/linalg/lapack_lite/python_xerbla.c index 4dbb92e1f..fe2f718b2 100644 --- a/numpy/linalg/lapack_lite/python_xerbla.c +++ b/numpy/linalg/lapack_lite/python_xerbla.c @@ -2,9 +2,6 @@ #include "numpy/npy_common.h" #include "npy_cblas.h" -#undef c_abs -#include "f2c.h" - /* From the original manpage: -------------------------- diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 15615e1a3..7c0b6facf 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -772,7 +772,7 @@ def cholesky(a): return wrap(r.astype(result_t, copy=False)) -# QR decompostion +# QR decomposition def _qr_dispatcher(a, mode=None): return (a,) diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py index 66eed41b0..acfab0a68 100644 --- a/numpy/linalg/setup.py +++ b/numpy/linalg/setup.py @@ -38,8 +38,13 @@ def configuration(parent_package='', top_path=None): def calc_info(self): info = {'language': 'c'} if sys.maxsize > 2**32: - # Build lapack-lite in 64-bit integer mode - info['define_macros'] = [('HAVE_BLAS_ILP64', None)] + # Build lapack-lite in 64-bit integer mode. + # The suffix is arbitrary (lapack_lite symbols follow it), + # but use the "64_" convention here. + info['define_macros'] = [ + ('HAVE_BLAS_ILP64', None), + ('BLAS_SYMBOL_SUFFIX', '64_') + ] self.set_info(**info) lapack_info = numpy_linalg_lapack_lite().get_info(2) diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src index 00ff6b7a8..e66fc9916 100644 --- a/numpy/linalg/umath_linalg.c.src +++ b/numpy/linalg/umath_linalg.c.src @@ -1841,7 +1841,7 @@ typedef struct geev_params_struct { void *WR; /* RWORK in complex versions, REAL W buffer for (sd)geev*/ void *WI; void *VLR; /* REAL VL buffers for _geev where _ is s, d */ - void *VRR; /* REAL VR buffers for _geev hwere _ is s, d */ + void *VRR; /* REAL VR buffers for _geev where _ is s, d */ void *WORK; void *W; /* final w */ void *VL; /* final vl */ @@ -3590,7 +3590,7 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { } }; -static void +static int addUfuncs(PyObject *dictionary) { PyObject *f; int i; @@ -3609,12 +3609,19 @@ addUfuncs(PyObject *dictionary) { d->doc, 0, d->signature); - PyDict_SetItemString(dictionary, d->name, f); + if (f == NULL) { + return -1; + } #if 0 dump_ufunc_object((PyUFuncObject*) f); #endif + int ret = PyDict_SetItemString(dictionary, d->name, f); Py_DECREF(f); + if (ret < 0) { + return -1; + } } + return 0; } @@ -3654,17 +3661,22 @@ PyObject *PyInit__umath_linalg(void) import_ufunc(); d = PyModule_GetDict(m); + if (d == NULL) { + return NULL; + } version = PyString_FromString(umath_linalg_version_string); - PyDict_SetItemString(d, "__version__", version); + if (version == NULL) { + return NULL; + } + int ret = PyDict_SetItemString(d, "__version__", version); Py_DECREF(version); + if (ret < 0) { + return NULL; + } /* Load the ufunc operators into the module's namespace */ - addUfuncs(d); - - if (PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load _umath_linalg module."); + if (addUfuncs(d) < 0) { return NULL; } diff --git a/numpy/ma/core.py b/numpy/ma/core.py index d51d8e6ec..fcbd1d8d0 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -20,6 +20,7 @@ Released for unlimited redistribution. """ # pylint: disable-msg=E1002 +import builtins import sys import operator import warnings @@ -27,11 +28,6 @@ import textwrap import re from functools import reduce -if sys.version_info[0] >= 3: - import builtins -else: - import __builtin__ as builtins - import numpy as np import numpy.core.umath as umath import numpy.core.numerictypes as ntypes @@ -99,7 +95,7 @@ def _deprecate_argsort_axis(arr): The array which argsort was called on np.ma.argsort has a long-term bug where the default of the axis argument - is wrong (gh-8701), which now must be kept for backwards compatibiity. + is wrong (gh-8701), which now must be kept for backwards compatibility. Thankfully, this only makes a difference when arrays are 2- or more- dimensional, so we only need a warning then. """ @@ -2826,8 +2822,8 @@ class MaskedArray(ndarray): elif isinstance(data, (tuple, list)): try: # If data is a sequence of masked array - mask = np.array([getmaskarray(m) for m in data], - dtype=mdtype) + mask = np.array([getmaskarray(np.asanyarray(m, dtype=mdtype)) + for m in data], dtype=mdtype) except ValueError: # If data is nested mask = nomask @@ -6394,10 +6390,6 @@ class MaskedConstant(MaskedArray): def __str__(self): return str(masked_print_option._display) - if sys.version_info.major < 3: - def __unicode__(self): - return unicode(masked_print_option._display) - def __repr__(self): if self is MaskedConstant.__singleton: return 'masked' diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 458b78580..bc8423188 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -934,7 +934,7 @@ class TestMaskedArray: def test_object_with_array(self): mx1 = masked_array([1.], mask=[True]) mx2 = masked_array([1., 2.]) - mx = masked_array([mx1, mx2], mask=[False, True]) + mx = masked_array([mx1, mx2], mask=[False, True], dtype=object) assert_(mx[0] is mx1) assert_(mx[1] is not mx2) assert_(np.all(mx[1].data == mx2.data)) @@ -5006,11 +5006,6 @@ class TestMaskedConstant: assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked) assert_raises(MaskError, int, np.ma.masked) - @pytest.mark.skipif(sys.version_info.major == 3, - reason="long doesn't exist in Python 3") - def test_coercion_long(self): - assert_raises(MaskError, long, np.ma.masked) - def test_coercion_float(self): a_f = np.zeros((), float) assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index b65e88a83..9b8e9fc42 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -540,17 +540,15 @@ def _valnd(val_f, c, *args): c, args : See the ``<type>val<n>d`` functions for more detail """ - try: - args = tuple(np.array(args, copy=False)) - except Exception: - # preserve the old error message - if len(args) == 2: + args = [np.asanyarray(a) for a in args] + shape0 = args[0].shape + if not all((a.shape == shape0 for a in args[1:])): + if len(args) == 3: raise ValueError('x, y, z are incompatible') - elif len(args) == 3: + elif len(args) == 2: raise ValueError('x, y are incompatible') else: raise ValueError('ordinates are incompatible') - it = iter(args) x0 = next(it) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index c90075dfe..50973c480 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -7,7 +7,7 @@ import numpy as np import numpy.polynomial.polynomial as poly from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, - assert_warns, assert_array_equal) + assert_warns, assert_array_equal, assert_raises_regex) def trim(x): @@ -227,7 +227,8 @@ class TestEvaluation: y1, y2, y3 = self.y #test exceptions - assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d) + assert_raises_regex(ValueError, 'incompatible', + poly.polyval2d, x1, x2[:2], self.c2d) #test values tgt = y1*y2 @@ -244,7 +245,8 @@ class TestEvaluation: y1, y2, y3 = self.y #test exceptions - assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d) + assert_raises_regex(ValueError, 'incompatible', + poly.polyval3d, x1, x2, x3[:2], self.c3d) #test values tgt = y1*y2*y3 diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 0ba403138..32eda25f7 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -2919,7 +2919,6 @@ cdef class Generator: it = np.PyArray_MultiIterNew2(p_arr, n_arr) randoms = <np.ndarray>np.empty(it.shape, np.int64) - randoms_data = <np.int64_t *>np.PyArray_DATA(randoms) cnt = np.PyArray_SIZE(randoms) it = np.PyArray_MultiIterNew3(randoms, p_arr, n_arr) @@ -4017,7 +4016,7 @@ cdef class Generator: total, num_colors, colors_ptr, nsamp, num_variates, variates_ptr) if result == -1: - raise MemoryError("Insufficent memory for multivariate_" + raise MemoryError("Insufficient memory for multivariate_" "hypergeometric with method='count' and " "sum(colors)=%d" % total) else: diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index a4d409f37..95921a8ea 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -3356,7 +3356,6 @@ cdef class RandomState: it = np.PyArray_MultiIterNew2(p_arr, n_arr) randoms = <np.ndarray>np.empty(it.shape, int) - randoms_data = <long *>np.PyArray_DATA(randoms) cnt = np.PyArray_SIZE(randoms) it = np.PyArray_MultiIterNew3(randoms, p_arr, n_arr) diff --git a/numpy/random/src/distributions/random_mvhg_count.c b/numpy/random/src/distributions/random_mvhg_count.c index 7cbed1f9e..1d4ed978e 100644 --- a/numpy/random/src/distributions/random_mvhg_count.c +++ b/numpy/random/src/distributions/random_mvhg_count.c @@ -1,8 +1,8 @@ +#include "numpy/random/distributions.h" #include <stdint.h> #include <stdlib.h> #include <stdbool.h> -#include "numpy/random/distributions.h" /* * random_multivariate_hypergeometric_count diff --git a/numpy/random/src/distributions/random_mvhg_marginals.c b/numpy/random/src/distributions/random_mvhg_marginals.c index 809d129de..689a85671 100644 --- a/numpy/random/src/distributions/random_mvhg_marginals.c +++ b/numpy/random/src/distributions/random_mvhg_marginals.c @@ -1,9 +1,9 @@ +#include "numpy/random/distributions.h" #include <stdint.h> #include <stddef.h> #include <stdbool.h> #include <math.h> -#include "numpy/random/distributions.h" #include "logfactorial.h" diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index fb0aac335..f0b502d06 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -55,9 +55,10 @@ class TestRegression: [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]: mt19937 = Generator(MT19937(12345)) - shuffled = list(t) + shuffled = np.array(t, dtype=object) mt19937.shuffle(shuffled) - assert_array_equal(shuffled, [t[2], t[0], t[3], t[1]]) + expected = np.array([t[2], t[0], t[3], t[1]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) def test_call_within_randomstate(self): # Check that custom BitGenerator does not call into global state @@ -117,7 +118,7 @@ class TestRegression: # a segfault on garbage collection. # See gh-7719 mt19937 = Generator(MT19937(1234)) - a = np.array([np.arange(1), np.arange(4)]) + a = np.array([np.arange(1), np.arange(4)], dtype=object) for _ in range(1000): mt19937.shuffle(a) diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 3be9edf02..1d8a0ed5a 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -68,7 +68,8 @@ class TestRegression: random.seed(12345) shuffled = list(t) random.shuffle(shuffled) - assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]]) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) def test_call_within_randomstate(self): # Check that custom RandomState does not call into global state @@ -128,7 +129,7 @@ class TestRegression: # a segfault on garbage collection. # See gh-7719 random.seed(1234) - a = np.array([np.arange(1), np.arange(4)]) + a = np.array([np.arange(1), np.arange(4)], dtype=object) for _ in range(1000): random.shuffle(a) diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index 7d77a31d8..cd8f3891c 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -64,7 +64,8 @@ class TestRegression: np.random.seed(12345) shuffled = list(t) random.shuffle(shuffled) - assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]]) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) def test_call_within_randomstate(self): # Check that custom RandomState does not call into global state @@ -124,7 +125,7 @@ class TestRegression: # a segfault on garbage collection. # See gh-7719 np.random.seed(1234) - a = np.array([np.arange(1), np.arange(4)]) + a = np.array([np.arange(1), np.arange(4)], dtype=object) for _ in range(1000): np.random.shuffle(a) diff --git a/numpy/testing/_private/nosetester.py b/numpy/testing/_private/nosetester.py index 4ca5267ce..45a582bb6 100644 --- a/numpy/testing/_private/nosetester.py +++ b/numpy/testing/_private/nosetester.py @@ -448,15 +448,6 @@ class NoseTester: warnings.simplefilter("always") from ...distutils import cpuinfo sup.filter(category=UserWarning, module=cpuinfo) - # See #7949: Filter out deprecation warnings due to the -3 flag to - # python 2 - if sys.version_info.major == 2 and sys.py3kwarning: - # This is very specific, so using the fragile module filter - # is fine - import threading - sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x") - sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x") - sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x") # Filter out some deprecation warnings inside nose 1.3.7 when run # on python 3.5b2. See # https://github.com/nose-devs/nose/issues/929 diff --git a/numpy/testing/_private/parameterized.py b/numpy/testing/_private/parameterized.py index dbfb4807c..086b292e2 100644 --- a/numpy/testing/_private/parameterized.py +++ b/numpy/testing/_private/parameterized.py @@ -35,7 +35,7 @@ import sys import inspect import warnings from functools import wraps -from types import MethodType as MethodType +from types import MethodType from collections import namedtuple try: @@ -45,30 +45,6 @@ except ImportError: from unittest import TestCase -PY2 = sys.version_info[0] == 2 - - -if PY2: - from types import InstanceType - lzip = zip - text_type = unicode - bytes_type = str - string_types = basestring, - def make_method(func, instance, type): - return MethodType(func, instance, type) -else: - # Python 3 doesn't have an InstanceType, so just use a dummy type. - class InstanceType(): - pass - lzip = lambda *a: list(zip(*a)) - text_type = str - string_types = str, - bytes_type = bytes - def make_method(func, instance, type): - if instance is None: - return func - return MethodType(func, instance) - _param = namedtuple("param", "args kwargs") class param(_param): @@ -122,7 +98,7 @@ class param(_param): """ if isinstance(args, param): return args - elif isinstance(args, string_types): + elif isinstance(args, (str,)): args = (args, ) try: return cls(*args) @@ -179,7 +155,7 @@ def parameterized_argument_value_pairs(func, p): named_args = argspec.args[arg_offset:] - result = lzip(named_args, p.args) + result = list(zip(named_args, p.args)) named_args = argspec.args[len(result) + arg_offset:] varargs = p.args[len(result):] @@ -214,11 +190,11 @@ def short_repr(x, n=64): """ x_repr = repr(x) - if isinstance(x_repr, bytes_type): + if isinstance(x_repr, bytes): try: - x_repr = text_type(x_repr, "utf-8") + x_repr = str(x_repr, "utf-8") except UnicodeDecodeError: - x_repr = text_type(x_repr, "latin1") + x_repr = str(x_repr, "latin1") if len(x_repr) > n: x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:] return x_repr @@ -246,7 +222,7 @@ def default_doc_func(func, num, p): def default_name_func(func, num, p): base_name = func.__name__ name_suffix = "_%s" %(num, ) - if len(p.args) > 0 and isinstance(p.args[0], string_types): + if len(p.args) > 0 and isinstance(p.args[0], (str,)): name_suffix += "_" + parameterized.to_safe_name(p.args[0]) return base_name + name_suffix @@ -324,15 +300,6 @@ class parameterized: @wraps(test_func) def wrapper(test_self=None): test_cls = test_self and type(test_self) - if test_self is not None: - if issubclass(test_cls, InstanceType): - raise TypeError(( - "@parameterized can't be used with old-style classes, but " - "%r has an old-style class. Consider using a new-style " - "class, or '@parameterized.expand' " - "(see http://stackoverflow.com/q/54867/71522 for more " - "information on old-style classes)." - ) %(test_self, )) original_doc = wrapper.__doc__ for num, args in enumerate(wrapper.parameterized_input): @@ -365,15 +332,7 @@ class parameterized: # Python 3 doesn't let us pull the function out of a bound method. unbound_func = nose_func if test_self is not None: - # Under nose on Py2 we need to return an unbound method to make - # sure that the `self` in the method is properly shared with the - # `self` used in `setUp` and `tearDown`. But only there. Everyone - # else needs a bound method. - func_self = ( - None if PY2 and detect_runner() == "nose" else - test_self - ) - nose_func = make_method(nose_func, func_self, type(test_self)) + nose_func = MethodType(nose_func, test_self) return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, ) def assert_not_in_testcase_subclass(self): diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 914491b71..1b88d91f6 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -21,10 +21,7 @@ from numpy.core import( intp, float32, empty, arange, array_repr, ndarray, isnat, array) import numpy.linalg.lapack_lite -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO +from io import StringIO __all__ = [ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', @@ -1344,14 +1341,7 @@ def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): """ __tracebackhide__ = True # Hide traceback for py.test - - if sys.version_info.major >= 3: - funcname = _d.assertRaisesRegex - else: - # Only present in Python 2.7, missing from unittest in 2.6 - funcname = _d.assertRaisesRegexp - - return funcname(exception_class, expected_regexp, *args, **kwargs) + return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs) def decorate_methods(cls, decorator, testmatch=None): @@ -2474,3 +2464,24 @@ def _get_mem_available(): return info['memfree'] + info['cached'] return None + + +def _no_tracing(func): + """ + Decorator to temporarily turn off tracing for the duration of a test. + Needed in tests that check refcounting, otherwise the tracing itself + influences the refcounts + """ + if not hasattr(sys, 'gettrace'): + return func + else: + @wraps(func) + def wrapper(*args, **kwargs): + original_trace = sys.gettrace() + try: + sys.settrace(None) + return func(*args, **kwargs) + finally: + sys.settrace(original_trace) + return wrapper + diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 48dd42a9f..b4aa7ec3d 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -28,9 +28,6 @@ def check_dir(module, module_name=None): return results -@pytest.mark.skipif( - sys.version_info[0] < 3, - reason="NumPy exposes slightly different functions on Python 2") def test_numpy_namespace(): # None of these objects are publicly documented to be part of the main # NumPy namespace (some are useful though, others need to be cleaned up) diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index 2b5a324ba..a6d2e62a9 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -3,10 +3,7 @@ import sys from numpy.testing import assert_raises, assert_, assert_equal from numpy.compat import pickle -if sys.version_info[:2] >= (3, 4): - from importlib import reload -else: - from imp import reload +from importlib import reload def test_numpy_reloading(): # gh-7844. Also check that relevant globals retain their identity. diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index 48896f4b7..ff75681dc 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -5,72 +5,71 @@ all of these occurrences but should catch almost all. import sys import pytest -if sys.version_info >= (3, 4): - from pathlib import Path - import ast - import tokenize - import numpy +from pathlib import Path +import ast +import tokenize +import numpy - class ParseCall(ast.NodeVisitor): - def __init__(self): - self.ls = [] +class ParseCall(ast.NodeVisitor): + def __init__(self): + self.ls = [] - def visit_Attribute(self, node): - ast.NodeVisitor.generic_visit(self, node) - self.ls.append(node.attr) + def visit_Attribute(self, node): + ast.NodeVisitor.generic_visit(self, node) + self.ls.append(node.attr) - def visit_Name(self, node): - self.ls.append(node.id) + def visit_Name(self, node): + self.ls.append(node.id) - class FindFuncs(ast.NodeVisitor): - def __init__(self, filename): - super().__init__() - self.__filename = filename +class FindFuncs(ast.NodeVisitor): + def __init__(self, filename): + super().__init__() + self.__filename = filename - def visit_Call(self, node): - p = ParseCall() - p.visit(node.func) - ast.NodeVisitor.generic_visit(self, node) + def visit_Call(self, node): + p = ParseCall() + p.visit(node.func) + ast.NodeVisitor.generic_visit(self, node) - if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': - if node.args[0].s == "ignore": - raise AssertionError( - "ignore filter should not be used; found in " - "{} on line {}".format(self.__filename, node.lineno)) + if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': + if node.args[0].s == "ignore": + raise AssertionError( + "ignore filter should not be used; found in " + "{} on line {}".format(self.__filename, node.lineno)) - if p.ls[-1] == 'warn' and ( - len(p.ls) == 1 or p.ls[-2] == 'warnings'): + if p.ls[-1] == 'warn' and ( + len(p.ls) == 1 or p.ls[-2] == 'warnings'): - if "testing/tests/test_warnings.py" == self.__filename: - # This file - return + if "testing/tests/test_warnings.py" == self.__filename: + # This file + return - # See if stacklevel exists: - if len(node.args) == 3: - return - args = {kw.arg for kw in node.keywords} - if "stacklevel" in args: - return - raise AssertionError( - "warnings should have an appropriate stacklevel; found in " - "{} on line {}".format(self.__filename, node.lineno)) + # See if stacklevel exists: + if len(node.args) == 3: + return + args = {kw.arg for kw in node.keywords} + if "stacklevel" in args: + return + raise AssertionError( + "warnings should have an appropriate stacklevel; found in " + "{} on line {}".format(self.__filename, node.lineno)) - @pytest.mark.slow - def test_warning_calls(): - # combined "ignore" and stacklevel error - base = Path(numpy.__file__).parent +@pytest.mark.slow +def test_warning_calls(): + # combined "ignore" and stacklevel error + base = Path(numpy.__file__).parent - for path in base.rglob("*.py"): - if base / "testing" in path.parents: - continue - if path == base / "__init__.py": - continue - if path == base / "random" / "__init__.py": - continue - # use tokenize to auto-detect encoding on systems where no - # default encoding is defined (e.g. LANG='C') - with tokenize.open(str(path)) as file: - tree = ast.parse(file.read()) - FindFuncs(path).visit(tree) + for path in base.rglob("*.py"): + if base / "testing" in path.parents: + continue + if path == base / "__init__.py": + continue + if path == base / "random" / "__init__.py": + continue + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g. LANG='C') + with tokenize.open(str(path)) as file: + tree = ast.parse(file.read()) + FindFuncs(path).visit(tree) diff --git a/runtests.py b/runtests.py index d36e5bd39..07b3f6c87 100755 --- a/runtests.py +++ b/runtests.py @@ -53,6 +53,7 @@ else: import sys import os +import builtins # In case we are run from the source directory, we don't want to import the # project from there: @@ -181,7 +182,7 @@ def main(argv): sys.modules['__main__'] = types.ModuleType('__main__') ns = dict(__name__='__main__', __file__=extra_argv[0]) - exec_(script, ns) + exec(script, ns) sys.exit(0) else: import code @@ -479,25 +480,5 @@ def lcov_generate(): print("HTML output generated under build/lcov/") -# -# Python 3 support -# - -if sys.version_info[0] >= 3: - import builtins - exec_ = getattr(builtins, "exec") -else: - def exec_(code, globs=None, locs=None): - """Execute code in a namespace.""" - if globs is None: - frame = sys._getframe(1) - globs = frame.f_globals - if locs is None: - locs = frame.f_locals - del frame - elif locs is None: - locs = globs - exec("""exec code in globs, locs""") - if __name__ == "__main__": main(argv=sys.argv[1:]) @@ -407,7 +407,7 @@ def setup_package(): os.chdir(src_path) sys.path.insert(0, src_path) - # Rewrite the version file everytime + # Rewrite the version file every time write_version_py() # The f2py scripts that will be installed diff --git a/shippable.yml b/shippable.yml index 4313a6de2..bea2a2206 100644 --- a/shippable.yml +++ b/shippable.yml @@ -21,9 +21,10 @@ runtime: build: ci: - # install dependencies + # install dependencies and newer toolchain for gfortran5 + - sudo add-apt-repository ppa:ubuntu-toolchain-r/test - sudo apt-get update - - sudo apt-get install gcc gfortran + - sudo apt-get install gcc gfortran libgfortran5 - target=$(python tools/openblas_support.py) - ls -lR "${target}" - sudo cp -r "${target}"/lib/* /usr/lib @@ -48,7 +49,7 @@ build: - extra_path=$(printf "%s:" "${extra_directories[@]}") - export PATH="${extra_path}${PATH}" # check OpenBLAS version - - python tools/openblas_support.py --check_version 0.3.7 + - python tools/openblas_support.py --check_version 0.3.8 # run the test suite - python runtests.py -n --debug-info --show-build-log -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10 diff --git a/test_requirements.txt b/test_requirements.txt index 8ca2ce87b..3aea49079 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,5 +1,5 @@ cython==0.29.14 -pytest==5.3.2 +pytest==5.3.3 pytz==2019.3 pytest-cov==2.8.1 pickle5; python_version == '3.7' diff --git a/tools/changelog.py b/tools/changelog.py index 00ffdd9eb..2a0c4da43 100755 --- a/tools/changelog.py +++ b/tools/changelog.py @@ -36,13 +36,11 @@ From the bash command line with $GITHUB token:: import os import sys import re -import codecs from git import Repo from github import Github -if sys.version_info.major < 3: - UTF8Writer = codecs.getwriter('utf8') - sys.stdout = UTF8Writer(sys.stdout) +if sys.version_info[:2] < (3, 6): + raise RuntimeError("Python version must be >= 3.6") this_repo = Repo(os.path.join(os.path.dirname(__file__), "..")) diff --git a/tools/npy_tempita/__init__.py b/tools/npy_tempita/__init__.py index 97905a16e..ec4b0d5e7 100644 --- a/tools/npy_tempita/__init__.py +++ b/tools/npy_tempita/__init__.py @@ -317,7 +317,7 @@ class Template: arg0 = e_value.args[0] else: arg0 = coerce_text(e_value) - e_value.args = (self._add_line_info(arg0, pos),) + e_value.args = (self._add_line_info(arg0, pos),) if PY3: raise e_value else: @@ -461,14 +461,11 @@ def html_quote(value, force=True): return '' if not isinstance(value, basestring_): value = coerce_text(value) - if sys.version >= "3" and isinstance(value, bytes): + if isinstance(value, bytes): value = html_escape(value.decode('latin1'), 1) value = value.encode('latin1') else: value = html_escape(value, 1) - if sys.version < "3": - if is_unicode(value): - value = value.encode('ascii', 'xmlcharrefreplace') return value @@ -639,9 +636,6 @@ class _Empty: def __bool__(self): return False - if sys.version < "3": - __nonzero__ = __bool__ - Empty = _Empty() del _Empty @@ -1289,7 +1283,7 @@ def fill_command(args=None): template_content = sys.stdin.read() template_name = '<stdin>' else: - with open(template_name, 'rb', encoding="latin-1") as f: + with open(template_name, 'rb', encoding="latin-1") as f: template_content = f.read() if options.use_html: TemplateClass = HTMLTemplate @@ -1298,7 +1292,7 @@ def fill_command(args=None): template = TemplateClass(template_content, name=template_name) result = template.substitute(vars) if options.output: - with open(options.output, 'wb') as f: + with open(options.output, 'wb') as f: f.write(result) else: sys.stdout.write(result) diff --git a/tools/npy_tempita/_looper.py b/tools/npy_tempita/_looper.py index 23121fe9e..8a1156678 100644 --- a/tools/npy_tempita/_looper.py +++ b/tools/npy_tempita/_looper.py @@ -61,9 +61,6 @@ class looper_iter: self.pos += 1 return result - if sys.version < "3": - next = __next__ - class loop_pos: @@ -94,9 +91,6 @@ class loop_pos: except IndexError: return None - if sys.version < "3": - next = __next__ - @property def previous(self): if self.pos == 0: diff --git a/tools/openblas_support.py b/tools/openblas_support.py index 2c1b70d6f..3708033d3 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -15,11 +15,11 @@ from tempfile import mkstemp, gettempdir import zipfile import tarfile -OPENBLAS_V = 'v0.3.7' -OPENBLAS_LONG = 'v0.3.7' +OPENBLAS_V = 'v0.3.8' +OPENBLAS_LONG = 'v0.3.5-605-gc815b8fb' # the 0.3.5 is misleading BASE_LOC = '' RACKSPACE = 'https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com' -ARCHITECTURES = ['', 'windows', 'darwin', 'arm', 'x86', 'ppc64'] +ARCHITECTURES = ['', 'windows', 'darwin', 'aarch64', 'x86', 'ppc64le', 's390x'] IS_32BIT = sys.maxsize < 2**32 def get_arch(): @@ -27,17 +27,12 @@ def get_arch(): ret = 'windows' elif platform.system() == 'Darwin': ret = 'darwin' - # Python3 returns a named tuple, but Python2 does not, so we are stuck - elif 'arm' in os.uname()[-1]: - ret = 'arm'; - elif 'aarch64' in os.uname()[-1]: - ret = 'arm'; - elif 'x86' in os.uname()[-1]: + elif 'x86' in platform.uname().machine: + # What do 32 bit machines report? + # If they are a docker, they report x86_64 ret = 'x86' - elif 'ppc64' in os.uname()[-1]: - ret = 'ppc64' else: - ret = '' + ret = platform.uname().machine assert ret in ARCHITECTURES return ret @@ -51,21 +46,10 @@ def get_ilp64(): def download_openblas(target, arch, ilp64): fnsuffix = {None: "", "64_": "64_"}[ilp64] filename = '' - if arch == 'arm': - # ARMv8 OpenBLAS built using script available here: - # https://github.com/tylerjereddy/openblas-static-gcc/tree/master/ARMv8 - # build done on GCC compile farm machine named gcc115 - # tarball uploaded manually to an unshared Dropbox location - filename = ('https://www.dropbox.com/s/vdeckao4omss187/' - 'openblas{}-{}-armv8.tar.gz?dl=1'.format(fnsuffix, OPENBLAS_V)) + if arch in ('aarch64', 'ppc64le', 's390x'): + filename = '{0}/openblas{1}-{2}-manylinux2014_{3}.tar.gz'.format( + RACKSPACE, fnsuffix, OPENBLAS_LONG, arch) typ = 'tar.gz' - elif arch == 'ppc64': - # build script for POWER8 OpenBLAS available here: - # https://github.com/tylerjereddy/openblas-static-gcc/blob/master/power8 - # built on GCC compile farm machine named gcc112 - # manually uploaded tarball to an unshared Dropbox location - filename = ('https://www.dropbox.com/s/yt0d2j86x1j8nh1/' - 'openblas{}-{}-ppc64le-power8.tar.gz?dl=1'.format(fnsuffix, OPENBLAS_V)) typ = 'tar.gz' elif arch == 'darwin': filename = '{0}/openblas{1}-{2}-macosx_10_9_x86_64-gf_1becaaa.tar.gz'.format( @@ -78,7 +62,7 @@ def download_openblas(target, arch, ilp64): suffix = 'win_amd64-gcc_7_1_0.zip' filename = '{0}/openblas{1}-{2}-{3}'.format(RACKSPACE, fnsuffix, OPENBLAS_LONG, suffix) typ = 'zip' - elif arch == 'x86': + elif 'x86' in arch: if IS_32BIT: suffix = 'manylinux1_i686.tar.gz' else: diff --git a/tools/pypy-test.sh b/tools/pypy-test.sh index 33a97ad17..1ce2bdf9d 100755 --- a/tools/pypy-test.sh +++ b/tools/pypy-test.sh @@ -6,7 +6,6 @@ set -o pipefail # Print expanded commands set -x -sudo apt-get -yq update sudo apt-get -yq install libatlas-base-dev liblapack-dev gfortran-5 F77=gfortran-5 F90=gfortran-5 \ diff --git a/tools/refguide_check.py b/tools/refguide_check.py index 3b587b77b..ad3f93c42 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -195,7 +195,7 @@ def find_names(module, names_dict): names_dict : dict Dictionary which contains module name as key and a set of found function names and directives as value - + Returns ------- None @@ -229,12 +229,12 @@ def find_names(module, names_dict): def get_all_dict(module): """ Return a copy of the __all__ dict with irrelevant items removed. - + Parameters ---------- module : ModuleType The module whose __all__ dict has to be processed - + Returns ------- deprecated : list @@ -242,7 +242,7 @@ def get_all_dict(module): not_deprecated : list List of non callable or non deprecated sub modules others : list - List of remaining types of sub modules + List of remaining types of sub modules """ if hasattr(module, "__all__"): all_dict = copy.deepcopy(module.__all__) @@ -863,7 +863,7 @@ def check_doctests(module, verbose, ns=None, ns : dict Name space of module dots : bool - + doctest_warnings : bool Returns @@ -934,7 +934,7 @@ def check_doctests_testfile(fname, verbose, ns=None, ns : dict Name space - + dots : bool doctest_warnings : bool @@ -978,12 +978,8 @@ def check_doctests_testfile(fname, verbose, ns=None, return results full_name = fname - if sys.version_info.major <= 2: - with open(fname) as f: - text = f.read() - else: - with open(fname, encoding='utf-8') as f: - text = f.read() + with open(fname, encoding='utf-8') as f: + text = f.read() PSEUDOCODE = set(['some_function', 'some_module', 'import example', 'ctypes.CDLL', # likely need compiling, skip it @@ -1116,7 +1112,7 @@ def init_matplotlib(): def main(argv): """ Validates the docstrings of all the pre decided set of - modules for errors and docstring standards. + modules for errors and docstring standards. """ parser = ArgumentParser(usage=__doc__.lstrip()) parser.add_argument("module_names", metavar="SUBMODULES", default=[], diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh index 9f8b66a47..f53d18611 100755 --- a/tools/travis-before-install.sh +++ b/tools/travis-before-install.sh @@ -1,5 +1,9 @@ #!/bin/bash +# Exit the script immediately if a command exits with a non-zero status, +# and print commands and their arguments as they are executed. +set -ex + uname -a free -m df -h @@ -29,9 +33,22 @@ fi source venv/bin/activate python -V +gcc --version popd -pip install --upgrade pip setuptools -pip install -r test_requirements.txt +pip install --upgrade pip + +# 'setuptools', 'wheel' and 'cython' are build dependencies. This information +# is stored in pyproject.toml, but there is not yet a standard way to install +# those dependencies with, say, a pip command, so we'll just hard-code their +# installation here. We only need to install them separately for the cases +# where numpy is installed with setup.py, which is the case for the Travis jobs +# where the environment variables USE_DEBUG or USE_WHEEL are set. When pip is +# used to install numpy, pip gets the build dependencies from pyproject.toml. +# A specific version of cython is required, so we read the cython package +# requirement using `grep cython test_requirements.txt` instead of simply +# writing 'pip install setuptools wheel cython'. +pip install setuptools wheel `grep cython test_requirements.txt` + if [ -n "$USE_ASV" ]; then pip install asv; fi diff --git a/tools/travis-test.sh b/tools/travis-test.sh index da9195d4d..cd3ffe29a 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -48,7 +48,7 @@ setup_base() if [ -z "$USE_DEBUG" ]; then $PIP install -v . 2>&1 | tee log else - # Python3.5-dbg on travis seems to need this + # The job run with USE_DEBUG=1 on travis needs this. export CFLAGS=$CFLAGS" -Wno-maybe-uninitialized" $PYTHON setup.py build build_src --verbose-cfg build_ext --inplace 2>&1 | tee log fi @@ -65,7 +65,13 @@ setup_base() run_test() { - $PIP install -r test_requirements.txt + # Install the test dependencies. + # Clear PYTHONOPTIMIZE when running `pip install -r test_requirements.txt` + # because version 2.19 of pycparser (a dependency of one of the packages + # in test_requirements.txt) does not provide a wheel, and the source tar + # file does not install correctly when Python's optimization level is set + # to strip docstrings (see https://github.com/eliben/pycparser/issues/291). + PYTHONOPTIMIZE="" $PIP install -r test_requirements.txt if [ -n "$USE_DEBUG" ]; then export PYTHONPATH=$PWD @@ -135,16 +141,11 @@ run_test() fi } + export PYTHON export PIP -$PIP install setuptools if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then - # Build wheel - $PIP install wheel - # ensure that the pip / setuptools versions deployed inside - # the venv are recent enough - $PIP install -U virtualenv # ensure some warnings are not issued export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result" # adjust gcc flags if C coverage requested @@ -167,8 +168,6 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then run_test elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then - # use an up-to-date pip / setuptools inside the venv - $PIP install -U virtualenv # temporary workaround for sdist failures. $PYTHON -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)" # ensure some warnings are not issued |