summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.circleci/config.yml2
-rw-r--r--.travis.yml1
-rw-r--r--README.md2
-rw-r--r--benchmarks/benchmarks/bench_random.py15
-rw-r--r--doc/RELEASE_WALKTHROUGH.rst.txt110
-rw-r--r--doc/changelog/1.14.3-changelog.rst27
-rw-r--r--doc/release/1.14.3-notes.rst41
-rw-r--r--doc/release/1.15.0-notes.rst37
m---------doc/scipy-sphinx-theme0
-rw-r--r--doc/source/dev/gitwash/development_workflow.rst2
-rw-r--r--doc/source/reference/c-api.coremath.rst32
-rw-r--r--doc/source/reference/routines.io.rst2
-rw-r--r--doc/source/reference/swig.testing.rst2
-rw-r--r--numpy/core/code_generators/generate_umath.py4
-rw-r--r--numpy/core/einsumfunc.py2
-rw-r--r--numpy/core/include/numpy/npy_interrupt.h2
-rw-r--r--numpy/core/include/numpy/npy_math.h11
-rw-r--r--numpy/core/setup.py2
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c4
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src65
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src4
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c6
-rw-r--r--numpy/core/src/npymath/ieee754.c.src75
-rw-r--r--numpy/core/src/umath/extobj.c2
-rw-r--r--numpy/core/src/umath/loops.c.src17
-rw-r--r--numpy/core/src/umath/reduction.c2
-rw-r--r--numpy/core/src/umath/scalarmath.c.src14
-rw-r--r--numpy/core/src/umath/simd.inc.src7
-rw-r--r--numpy/core/src/umath/ufunc_object.c47
-rw-r--r--numpy/core/tests/test_longdouble.py2
-rw-r--r--numpy/core/tests/test_multiarray.py6
-rw-r--r--numpy/core/tests/test_print.py2
-rw-r--r--numpy/core/tests/test_regression.py11
-rw-r--r--numpy/core/tests/test_umath.py11
-rw-r--r--numpy/distutils/system_info.py131
-rw-r--r--numpy/f2py/src/fortranobject.c2
-rw-r--r--numpy/lib/arraypad.py159
-rw-r--r--numpy/lib/format.py14
-rw-r--r--numpy/lib/histograms.py32
-rw-r--r--numpy/lib/npyio.py18
-rw-r--r--numpy/lib/tests/test_arraypad.py13
-rw-r--r--numpy/lib/tests/test_function_base.py6
-rw-r--r--numpy/lib/tests/test_histograms.py43
-rw-r--r--numpy/lib/tests/test_nanfunctions.py2
-rw-r--r--numpy/lib/tests/test_shape_base.py4
-rw-r--r--numpy/lib/twodim_base.py2
-rw-r--r--numpy/linalg/tests/test_linalg.py409
-rw-r--r--numpy/linalg/umath_linalg.c.src108
-rw-r--r--numpy/matrixlib/tests/test_matrix_linalg.py95
-rw-r--r--numpy/random/mtrand/mtrand.pyx22
-rw-r--r--numpy/testing/_private/decorators.py2
-rw-r--r--site.cfg.example8
52 files changed, 920 insertions, 719 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 230871ce1..e2eb01b04 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -22,7 +22,7 @@ jobs:
. venv/bin/activate
pip install cython sphinx matplotlib
sudo apt-get update
- sudo apt-get install -y graphviz
+ sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex
- run:
name: build numpy
diff --git a/.travis.yml b/.travis.yml
index 168a7a385..4632fbffe 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -54,7 +54,6 @@ matrix:
- cython3-dbg
- python3-dbg
- python3-dev
- - python3-nose
- python3-setuptools
- python: 3.6
env: USE_WHEEL=1 RUN_FULL_TESTS=1
diff --git a/README.md b/README.md
index a3d90f6cf..cd11b7bc5 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# <img alt="NumPy" src="branding/icons/numpylogo.svg" height="60">
+# <img alt="NumPy" src="https://cdn.rawgit.com/numpy/numpy/master/branding/icons/numpylogo.svg" height="60">
[![Travis](https://img.shields.io/travis/numpy/numpy/master.svg?label=Travis%20CI)](https://travis-ci.org/numpy/numpy)
[![AppVeyor](https://img.shields.io/appveyor/ci/charris/numpy/master.svg?label=AppVeyor)](https://ci.appveyor.com/project/charris/numpy)
diff --git a/benchmarks/benchmarks/bench_random.py b/benchmarks/benchmarks/bench_random.py
index 7ed3e2fa1..9d84d83d3 100644
--- a/benchmarks/benchmarks/bench_random.py
+++ b/benchmarks/benchmarks/bench_random.py
@@ -65,3 +65,18 @@ class Randint_dtype(Benchmark):
high = self.high[name]
np.random.randint(0, high + 1, size=10**5, dtype=name)
+
+class Permutation(Benchmark):
+ def setup(self):
+ self.n = 10000
+ self.a_1d = np.random.random_sample(self.n)
+ self.a_2d = np.random.random_sample((self.n, 2))
+
+ def time_permutation_1d(self):
+ np.random.permutation(self.a_1d)
+
+ def time_permutation_2d(self):
+ np.random.permutation(self.a_2d)
+
+ def time_permutation_int(self):
+ np.random.permutation(self.n)
diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt
index 81e15f91f..9a2b71d7e 100644
--- a/doc/RELEASE_WALKTHROUGH.rst.txt
+++ b/doc/RELEASE_WALKTHROUGH.rst.txt
@@ -1,12 +1,12 @@
-This file contains a walkthrough of the NumPy 1.12.0 release on Fedora Linux.
+This file contains a walkthrough of the NumPy 1.14.3 release on Linux.
The commands can be copied into the command line, but be sure to
-replace 1.12.0 by the correct version.
+replace 1.14.3 by the correct version.
Release Walkthrough
====================
-Building the release
---------------------
+Prepare the release commit
+--------------------------
Checkout the branch for the release, make sure it is up to date, and clean the
repository::
@@ -16,44 +16,39 @@ repository::
$ git submodule update
$ git clean -xdf
-Look at the git log to get the hash of the last commit in the release, then
-check it out::
-
- $ git log
- $ git checkout 7849751173fb47a5f17761b3515b42b4d8ce1197
-
Edit pavement.py and setup.py as detailed in HOWTO_RELEASE::
$ gvim pavement.py setup.py
- $ git commit -a -m"REL: NumPy 1.14.1 release."
+ $ git commit -a -m"REL: NumPy 1.14.3 release."
Sanity check::
$ python runtests.py -m "full"
$ python3 runtests.py -m "full"
-Tag it,and build the source distribution archives::
+Push this release directly onto the end of the maintenance branch. This
+requires write permission to the numpy repository::
- $ git tag -s v1.14.1
- $ paver sdist # sdist will do a git clean -xdf, so we omit that
+ $ git push upstream maintenance/1.14.x
-Check that the files in ``release/installers`` have the correct versions, then
-push the tag upstream; generation of the wheels for PyPI needs it::
+As an example, see the 1.14.3 REL commit: `<https://github.com/numpy/numpy/commit/73299826729be58cec179b52c656adfcaefada93>`_.
- $ git push upstream v1.14.1
+Build wheels
+------------
-Trigger the wheels build. This can take a while. The numpy-wheels repository is
-cloned from `<https://github.com/MacPython/numpy-wheels>`_. Start with a pull
-as the repo may have been accessed and changed by someone else and a push will
-fail.
+Trigger the wheels build by pointing the numpy-wheels repository at this
+commit. This can take a while. The numpy-wheels repository is cloned from
+`<https://github.com/MacPython/numpy-wheels>`_. Start with a pull as the repo
+may have been accessed and changed by someone else and a push will fail::
$ cd ../numpy-wheels
$ git pull origin master
$ git branch <new version> # only when starting new numpy version
- $ git checkout v1.14.x # v1.14.x already existed for the 1.14.1 release
+ $ git checkout v1.14.x # v1.14.x already existed for the 1.14.3 release
-The ``.travis.yml`` and ``appveyor.yml`` files need to be edited to make
-sure they have the correct version, search for ``BUILD_COMMIT``.
+Edit the ``.travis.yml`` and ``appveyor.yml`` files to make sure they have the
+correct version, and put in the commit hash for the ``REL`` commit created
+above for ``BUILD_COMMIT``. See `<https://github.com/MacPython/numpy-wheels/commit/fed9c04629c155e7804282eb803d81097244598d>`_ for an example::
$ gvim .travis.yml appveyor.yml
$ git commit -a
@@ -66,7 +61,6 @@ and appveyor build status. Check if all the needed wheels have been built and
uploaded before proceeding. There should currently be 22 of them at
`<https://wheels.scipy.org>`_, 4 for Mac, 8 for Windows, and 10 for Linux.
-
Download wheels
---------------
@@ -75,7 +69,7 @@ in the ``terryfy`` repository. The terryfy repository may be cloned from
`<https://github.com/MacPython/terryfy>`_ if you don't already have it. The
wheels can also be uploaded using the ``wheel-uploader``, but we prefer to
download all the wheels to the ``../numpy/release/installers`` directory and
-upload later using ``twine``.
+upload later using ``twine``::
$ cd ../terryfy
$ git pull origin master
@@ -88,14 +82,56 @@ upload later using ``twine``.
If you do this often, consider making CDN_URL and NPY_WHLS part of your default
environment.
+Tag the release
+---------------
+
+Once the wheels have been built and downloaded without errors, go back to your
+numpy repository in the maintenance branch and tag the ``REL`` commit, signing
+it with your gpg key, and build the source distribution archives::
+
+ $ git tag -s v1.14.3
+ $ paver sdist # sdist will do a git clean -xdf, so we omit that
+
+You should upload your public gpg key to github, so that the tag will appear
+"verified" there.
+
+Check that the files in ``release/installers`` have the correct versions, then
+push the tag upstream::
+
+ $ git push upstream v1.14.3
+
+We wait until this point to push the tag because it is very difficult to change
+the tag after it has been pushed.
+
+Reset the maintenance branch into a development state
+-----------------------------------------------------
+
+Add another ``REL`` commit to the numpy maintenance branch, which resets the
+``ISREALEASED`` flag to ``False`` and increments the version counter::
+
+ $ gvim pavement.py setup.py
+ $ git commit -a -m"REL: prepare 1.14.x for further development"
+ $ git push upstream maintenance/1.14.x
+
+This strategy is copied from the scipy release procedure and was used in numpy
+for the first time in 1.14.3. It needed to be modified a little since numpy
+has more strict requirements for the version number. It was acheived in two
+commits:
+`<https://github.com/numpy/numpy/commit/b8df705bdcce92d3e2c6f050eb4414192cf0df04>`_
+`<https://github.com/numpy/numpy/commit/29e175269624493114f77cceff93486271f9efff>`_.
Upload to PyPI
--------------
-Upload to PyPI using ``twine``. The choice here is to sign the files, so will
-need to sign every file separately when they are uploaded, keeping the gpg pass
-phrase in the clipboard and pasting it in will make that easier. We may chose
-to forgo the signing in the future::
+Upload to PyPI using ``twine``.
+
+In the past, we signed the wheels files, but after 1.14.3 wheels should no
+longer support or need signing. The instructions below still sign.
+
+For the 1.14.3 release we signed every file when it was uploaded. On systems
+which do not cache the gpg passphrase for a few minutes, keeping the it in the
+clipboard and pasting it in will make that easier. We may chose to forgo the
+signing in the future::
$ cd ../numpy
$ twine upload -s release/installers/*.whl
@@ -120,15 +156,15 @@ Generate the ``release/README`` files::
$ rm release/installers/*.asc
$ paver write_release_and_log
-Go to `<https://github.com/numpy/numpy/releases>`_, there should be a ``v1.14.1
+Go to `<https://github.com/numpy/numpy/releases>`_, there should be a ``v1.14.3
tag``, click on it and hit the edit button for that tag. There are two ways to
add files, using an editable text window and as binary uploads.
- Cut and paste the ``release/README.md`` file contents into the text window.
-- Upload ``release/installers/numpy-1.12.0.tar.gz`` as a binary file.
-- Upload ``release/installers/numpy-1.12.0.zip`` as a binary file.
+- Upload ``release/installers/numpy-1.14.3.tar.gz`` as a binary file.
+- Upload ``release/installers/numpy-1.14.3.zip`` as a binary file.
- Upload ``release/README`` as a binary file.
-- Upload ``doc/changelog/1.14.1-changelog.rst`` as a binary file.
+- Upload ``doc/changelog/1.14.3-changelog.rst`` as a binary file.
- Check the pre-release button if this is a pre-releases.
- Hit the ``{Publish,Update} release`` button at the bottom.
@@ -143,7 +179,7 @@ upload the documentation. Otherwise::
$ pushd doc
$ make dist
- $ make upload USERNAME=<yourname> RELEASE=v1.14.1
+ $ make upload USERNAME=<yourname> RELEASE=v1.14.3
$ popd
If the release series is a new one, you will need to rebuild and upload the
@@ -164,7 +200,7 @@ This assumes that you have forked `<https://github.com/scipy/scipy.org>`_::
$ cd ../scipy.org
$ git checkout master
$ git pull upstream master
- $ git checkout -b numpy-1.14.1
+ $ git checkout -b numpy-1.14.3
$ gvim www/index.rst # edit the News section
$ git commit -a
$ git push origin HEAD
@@ -180,7 +216,7 @@ announcements for the basic template. The contributor list can be generated as
follows::
$ cd ../numpy
- $ ./tools/changelog.py $GITHUB v1.14.0..v1.14.1 > tmp.rst
+ $ ./tools/changelog.py $GITHUB v1.14.2..v1.14.3 > tmp.rst
The contents of ``tmp.rst`` can then be cut and pasted into the announcement
email.
diff --git a/doc/changelog/1.14.3-changelog.rst b/doc/changelog/1.14.3-changelog.rst
new file mode 100644
index 000000000..784a9177f
--- /dev/null
+++ b/doc/changelog/1.14.3-changelog.rst
@@ -0,0 +1,27 @@
+
+Contributors
+============
+
+A total of 6 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Jonathan March +
+* Malcolm Smith +
+* Matti Picus
+* Pauli Virtanen
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#10862 <https://github.com/numpy/numpy/pull/10862>`__: BUG: floating types should override tp_print (1.14 backport)
+* `#10905 <https://github.com/numpy/numpy/pull/10905>`__: BUG: for 1.14 back-compat, accept list-of-lists in fromrecords
+* `#10947 <https://github.com/numpy/numpy/pull/10947>`__: BUG: 'style' arg to array2string broken in legacy mode (1.14...
+* `#10959 <https://github.com/numpy/numpy/pull/10959>`__: BUG: test, fix for missing flags['WRITEBACKIFCOPY'] key
+* `#10960 <https://github.com/numpy/numpy/pull/10960>`__: BUG: Add missing underscore to prototype in check_embedded_lapack
+* `#10961 <https://github.com/numpy/numpy/pull/10961>`__: BUG: Fix encoding regression in ma/bench.py (Issue #10868)
+* `#10962 <https://github.com/numpy/numpy/pull/10962>`__: BUG: core: fix NPY_TITLE_KEY macro on pypy
+* `#10974 <https://github.com/numpy/numpy/pull/10974>`__: BUG: test, fix PyArray_DiscardWritebackIfCopy...
diff --git a/doc/release/1.14.3-notes.rst b/doc/release/1.14.3-notes.rst
new file mode 100644
index 000000000..60b631168
--- /dev/null
+++ b/doc/release/1.14.3-notes.rst
@@ -0,0 +1,41 @@
+==========================
+NumPy 1.14.3 Release Notes
+==========================
+
+This is a bugfix release for a few bugs reported following the 1.14.2 release:
+
+* np.lib.recfunctions.fromrecords accepts a list-of-lists, until 1.15
+* In python2, float types use the new print style when printing to a file
+* style arg in "legacy" print mode now works for 0d arrays
+
+The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
+3.6 wheels available from PIP are built with Python 3.6.2 and should be
+compatible with all previous versions of Python 3.6. The source releases were
+cythonized with Cython 0.28.2.
+
+Contributors
+============
+
+A total of 6 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Jonathan March +
+* Malcolm Smith +
+* Matti Picus
+* Pauli Virtanen
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#10862 <https://github.com/numpy/numpy/pull/10862>`__: BUG: floating types should override tp_print (1.14 backport)
+* `#10905 <https://github.com/numpy/numpy/pull/10905>`__: BUG: for 1.14 back-compat, accept list-of-lists in fromrecords
+* `#10947 <https://github.com/numpy/numpy/pull/10947>`__: BUG: 'style' arg to array2string broken in legacy mode (1.14...
+* `#10959 <https://github.com/numpy/numpy/pull/10959>`__: BUG: test, fix for missing flags['WRITEBACKIFCOPY'] key
+* `#10960 <https://github.com/numpy/numpy/pull/10960>`__: BUG: Add missing underscore to prototype in check_embedded_lapack
+* `#10961 <https://github.com/numpy/numpy/pull/10961>`__: BUG: Fix encoding regression in ma/bench.py (Issue #10868)
+* `#10962 <https://github.com/numpy/numpy/pull/10962>`__: BUG: core: fix NPY_TITLE_KEY macro on pypy
+* `#10974 <https://github.com/numpy/numpy/pull/10974>`__: BUG: test, fix PyArray_DiscardWritebackIfCopy...
diff --git a/doc/release/1.15.0-notes.rst b/doc/release/1.15.0-notes.rst
index 736ec0961..d3f34d2bc 100644
--- a/doc/release/1.15.0-notes.rst
+++ b/doc/release/1.15.0-notes.rst
@@ -20,15 +20,18 @@ New functions
* ``nanquantile`` function, an interface to ``nanpercentile`` without factors
of 100
-* `np.printoptions`, the context manager which sets print options temporarily
+* `np.printoptions`, a context manager that sets print options temporarily
for the scope of the ``with`` block::
>>> with np.printoptions(precision=2):
... print(np.array([2.0]) / 3)
[0.67]
- * `np.histogram_bin_edges`, a function to get the edges of the bins used by a histogram
- without needing to calculate the histogram.
+* `np.histogram_bin_edges`, a function to get the edges of the bins used by a histogram
+ without needing to calculate the histogram.
+
+* `npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` have been added to
+ deal with compiler optimization changing the order of operations. See below for details.
Deprecations
============
@@ -42,6 +45,7 @@ Deprecations
* `np.ma.loads`, `np.ma.dumps`
* `np.ma.load`, `np.ma.dump` - these functions already failed on python 3,
when called with a string.
+
* Direct imports from the following modules is deprecated. All testing related
imports should come from `numpy.testing`.
* `np.testing.utils`
@@ -116,6 +120,12 @@ longer possible, and objects expecting the old API are respected. The silent suc
by removing the interception of an otherwise-normal Exception when ``axis`` was provided to an object
using the old API.
+unstructured void array's ``.item`` method now returns a bytes object
+---------------------------------------------------------------------
+``.item`` now returns a ``bytes`` object instead of a buffer or byte array.
+This may affect code which assumed the return value was mutable, which is no
+longer the case.
+
C API changes
=============
@@ -123,6 +133,14 @@ C API changes
* ``NpyIter_Close`` has been added and should be called before
``NpyIter_Deallocate`` to resolve possible writeback-enabled arrays.
+* Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier``
+ have been added and should be used in place of the ``npy_get_floatstatus``and
+ ``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang
+ were rearranging the order of operations when the previous functions were
+ used in the ufunc SIMD functions, resulting in the floatstatus flags being '
+ checked before the operation whose status we wanted to check was run.
+ See `#10339 <https://github.com/numpy/numpy/issues/10370>`__.
+
New Features
============
@@ -198,6 +216,13 @@ passed explicitly, and are not yet computed automatically.
No longer does an IQR of 0 result in `n_bins=1`, rather the number of bins
chosen is related to the data size in this situation.
+``histogram`` and ``histogramdd`` return edges matching the float type of the data
+----------------------------------------------------------------------------------
+When passed ``float16``, ``np.float32``, or ``np.longdouble`` data, the
+returned edges are now of the same dtype. Previously, ``histogram`` would only
+return the same type if explicit bins were given, and ``histogram`` would
+produce ``float64`` bins no matter what the inputs.
+
``histogramdd`` allows explicit ranges to be given in a subset of axes
----------------------------------------------------------------------
The ``range`` argument of `histogramdd` can now contain ``None`` values to
@@ -295,5 +320,11 @@ of the change, to further improve consistency, the name of the first argument
has been changed to ``a`` (from ``M``), and the exceptions for non-square
matrices have been changed to ``LinAlgError`` (from ``ValueError``).
+Increased performance in ``random.permutation`` for multidimensional arrays
+---------------------------------------------------------------------------
+``permutation`` uses the fast path in ``random.shuffle`` for all input
+array dimensions. Previously the fast path was only used for 1-d arrays.
+
+
Changes
=======
diff --git a/doc/scipy-sphinx-theme b/doc/scipy-sphinx-theme
-Subproject c466764e2231ba132c09826b5b138fffa1cfcec
+Subproject d990ab9134199f6496b9ac8567f10791f04a720
diff --git a/doc/source/dev/gitwash/development_workflow.rst b/doc/source/dev/gitwash/development_workflow.rst
index 5476e3202..c6884a7cf 100644
--- a/doc/source/dev/gitwash/development_workflow.rst
+++ b/doc/source/dev/gitwash/development_workflow.rst
@@ -396,7 +396,7 @@ collaborator:
Now all those people can do::
- git clone git@githhub.com:your-user-name/numpy.git
+ git clone git@github.com:your-user-name/numpy.git
Remember that links starting with ``git@`` use the ssh protocol and are
read-write; links starting with ``git://`` are read-only.
diff --git a/doc/source/reference/c-api.coremath.rst b/doc/source/reference/c-api.coremath.rst
index d3f7fcf75..ad92235da 100644
--- a/doc/source/reference/c-api.coremath.rst
+++ b/doc/source/reference/c-api.coremath.rst
@@ -183,14 +183,46 @@ Those can be useful for precise floating point comparison.
* NPY_FPE_UNDERFLOW
* NPY_FPE_INVALID
+ Note that :c:func:`npy_get_floatstatus_barrier` is preferable as it prevents
+ agressive compiler optimizations reordering the call relative to
+ the code setting the status, which could lead to incorrect results.
+
.. versionadded:: 1.9.0
+.. c:function:: int npy_get_floatstatus_barrier(char*)
+
+ Get floating point status. A pointer to a local variable is passed in to
+ prevent aggresive compiler optimizations from reodering this function call
+ relative to the code setting the status, which could lead to incorrect
+ results.
+
+ Returns a bitmask with following possible flags:
+
+ * NPY_FPE_DIVIDEBYZERO
+ * NPY_FPE_OVERFLOW
+ * NPY_FPE_UNDERFLOW
+ * NPY_FPE_INVALID
+
+ .. versionadded:: 1.15.0
+
.. c:function:: int npy_clear_floatstatus()
Clears the floating point status. Returns the previous status mask.
+ Note that :c:func:`npy_clear_floatstatus_barrier` is preferable as it
+ prevents agressive compiler optimizations reordering the call relative to
+ the code setting the status, which could lead to incorrect results.
+
.. versionadded:: 1.9.0
+.. c:function:: int npy_clear_floatstatus_barrier(char*)
+
+ Clears the floating point status. A pointer to a local variable is passed in to
+ prevent aggresive compiler optimizations from reodering this function call.
+ Returns the previous status mask.
+
+ .. versionadded:: 1.15.0
+n
Complex functions
~~~~~~~~~~~~~~~~~
diff --git a/doc/source/reference/routines.io.rst b/doc/source/reference/routines.io.rst
index 573498792..9acd22c56 100644
--- a/doc/source/reference/routines.io.rst
+++ b/doc/source/reference/routines.io.rst
@@ -14,7 +14,7 @@ NumPy binary files (NPY, NPZ)
savez_compressed
The format of these binary file types is documented in
-http://numpy.github.io/neps/npy-format.html
+:ref:`binary-serialization`.
Text files
----------
diff --git a/doc/source/reference/swig.testing.rst b/doc/source/reference/swig.testing.rst
index 13642a52e..594df952e 100644
--- a/doc/source/reference/swig.testing.rst
+++ b/doc/source/reference/swig.testing.rst
@@ -22,7 +22,7 @@ typemaps are working as expected.
Testing Organization
--------------------
-There are three indepedent testing frameworks supported, for one-,
+There are three independent testing frameworks supported, for one-,
two-, and three-dimensional arrays respectively. For one-dimensional
arrays, there are two C++ files, a header and a source, named::
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 7492baf9d..112af9a34 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -908,8 +908,8 @@ if sys.version_info[0] >= 3:
del defdict['divide']
def indent(st, spaces):
- indention = ' '*spaces
- indented = indention + st.replace('\n', '\n'+indention)
+ indentation = ' '*spaces
+ indented = indentation + st.replace('\n', '\n'+indentation)
# trim off any trailing spaces
indented = re.sub(r' +$', r'', indented)
return indented
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index 8cd6eae12..bb6767c4f 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -1148,7 +1148,7 @@ def einsum(*operands, **kwargs):
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
- # Append new items and derefernce what we can
+ # Append new items and dereference what we can
operands.append(new_view)
del tmp_operands, new_view
diff --git a/numpy/core/include/numpy/npy_interrupt.h b/numpy/core/include/numpy/npy_interrupt.h
index f71fd689e..40cb7ac5e 100644
--- a/numpy/core/include/numpy/npy_interrupt.h
+++ b/numpy/core/include/numpy/npy_interrupt.h
@@ -55,7 +55,7 @@ Ideas:
Simple Interface:
-In your C-extension: around a block of code you want to be interruptable
+In your C-extension: around a block of code you want to be interruptible
with a SIGINT
NPY_SIGINT_ON
diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h
index ba32bcdd3..582390cdc 100644
--- a/numpy/core/include/numpy/npy_math.h
+++ b/numpy/core/include/numpy/npy_math.h
@@ -524,8 +524,17 @@ npy_clongdouble npy_catanhl(npy_clongdouble z);
#define NPY_FPE_UNDERFLOW 4
#define NPY_FPE_INVALID 8
-int npy_get_floatstatus(void);
+int npy_clear_floatstatus_barrier(char*);
+int npy_get_floatstatus_barrier(char*);
+/*
+ * use caution with these - clang and gcc8.1 are known to reorder calls
+ * to this form of the function which can defeat the check. The _barrier
+ * form of the call is preferable, where the argument is
+ * (char*)&local_variable
+ */
int npy_clear_floatstatus(void);
+int npy_get_floatstatus(void);
+
void npy_set_floatstatus_divbyzero(void);
void npy_set_floatstatus_overflow(void);
void npy_set_floatstatus_underflow(void);
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 15f6e1522..7d8bab557 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -664,7 +664,7 @@ def configuration(parent_package='',top_path=None):
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
- # update the substition dictionary during npymath build
+ # update the substitution dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index 3d259ae05..17de99cb9 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -245,6 +245,10 @@ PyArray_AssignRawScalar(PyArrayObject *dst,
allocated_src_data = 1;
}
+ if (PyDataType_FLAGCHK(PyArray_DESCR(dst), NPY_NEEDS_INIT)) {
+ memset(tmp_src_data, 0, PyArray_DESCR(dst)->elsize);
+ }
+
if (PyArray_CastRawArrays(1, src_data, tmp_src_data, 0, 0,
src_dtype, PyArray_DESCR(dst), 0) != NPY_SUCCEED) {
src_data = tmp_src_data;
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 5e6804a5c..42f876125 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -655,9 +655,7 @@ VOID_getitem(void *input, void *vap)
{
PyArrayObject *ap = vap;
char *ip = input;
- PyArrayObject *u = NULL;
PyArray_Descr* descr;
- int itemsize;
descr = PyArray_DESCR(ap);
if (PyDataType_HASFIELDS(descr)) {
@@ -731,68 +729,7 @@ VOID_getitem(void *input, void *vap)
return (PyObject *)ret;
}
- /* 2017-11-26, 1.14 */
- if (DEPRECATE_FUTUREWARNING(
- "the `.item()` method of unstructured void types will return an "
- "immutable `bytes` object in the near future, the same as "
- "returned by `bytes(void_obj)`, instead of the mutable memoryview "
- "or integer array returned in numpy 1.13.") < 0) {
- return NULL;
- }
- /*
- * In the future all the code below will be replaced by
- *
- * For unstructured void types like V4, return a bytes object (copy).
- * return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize);
- */
-
- if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)
- || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) {
- PyErr_SetString(PyExc_ValueError,
- "tried to get void-array with object members as buffer.");
- return NULL;
- }
- itemsize = PyArray_DESCR(ap)->elsize;
-
-#if defined(NPY_PY3K)
- /*
- * Return a byte array; there are no plain buffer objects on Py3
- */
- {
- npy_intp dims[1], strides[1];
- dims[0] = itemsize;
- strides[0] = 1;
- descr = PyArray_DescrNewFromType(NPY_BYTE);
- u = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- descr, 1, dims, strides, ip,
- PyArray_ISWRITEABLE(ap) ? NPY_ARRAY_WRITEABLE : 0,
- NULL);
- Py_INCREF(ap);
- if (PyArray_SetBaseObject(u, (PyObject *)ap) < 0) {
- Py_DECREF(u);
- return NULL;
- }
- }
-#else
- /*
- * default is to return buffer object pointing to
- * current item a view of it
- */
- if (PyArray_ISWRITEABLE(ap)) {
- if (array_might_be_written(ap) < 0) {
- return NULL;
- }
- u = (PyArrayObject *)PyBuffer_FromReadWriteMemory(ip, itemsize);
- }
- else {
- u = (PyArrayObject *)PyBuffer_FromMemory(ip, itemsize);
- }
-#endif
-
- if (u == NULL) {
- return NULL;
- }
- return (PyObject *)u;
+ return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize);
}
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index 397aaf209..fa68af19a 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -1373,7 +1373,7 @@ PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
/*
* Advanded indexing iteration of arrays when there is a single indexing
* array which has the same memory order as the value array and both
- * can be trivally iterated (single stride, aligned, no casting necessary).
+ * can be trivially iterated (single stride, aligned, no casting necessary).
*/
NPY_NO_EXPORT int
mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
@@ -1747,7 +1747,7 @@ mapiter_@name@(PyArrayMapIterObject *mit)
}
else {
/*
- * faster resetting if the subspace iteration is trival.
+ * faster resetting if the subspace iteration is trivial.
* reset_offsets are zero for positive strides,
* for negative strides this shifts the pointer to the last
* item.
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 7eccb4a4b..6d323dbd8 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -197,7 +197,7 @@ PyArray_CompareLists(npy_intp *l1, npy_intp *l2, int n)
}
/*
- * simulates a C-style 1-3 dimensional array which can be accesed using
+ * simulates a C-style 1-3 dimensional array which can be accessed using
* ptr[i] or ptr[i][j] or ptr[i][j][k] -- requires pointer allocation
* for 2-d and 3-d.
*
@@ -3605,7 +3605,7 @@ as_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
/*
- * Prints floating-point scalars usign the Dragon4 algorithm, scientific mode.
+ * Prints floating-point scalars using the Dragon4 algorithm, scientific mode.
* See docstring of `np.format_float_scientific` for description of arguments.
* The differences is that a value of -1 is valid for pad_left, exp_digits,
* precision, which is equivalent to `None`.
@@ -3661,7 +3661,7 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
}
/*
- * Prints floating-point scalars usign the Dragon4 algorithm, positional mode.
+ * Prints floating-point scalars using the Dragon4 algorithm, positional mode.
* See docstring of `np.format_float_positional` for description of arguments.
* The differences is that a value of -1 is valid for pad_left, pad_right,
* precision, which is equivalent to `None`.
diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src
index bca690b4d..5405c8fe3 100644
--- a/numpy/core/src/npymath/ieee754.c.src
+++ b/numpy/core/src/npymath/ieee754.c.src
@@ -6,6 +6,7 @@
*/
#include "npy_math_common.h"
#include "npy_math_private.h"
+#include "numpy/utils.h"
#ifndef HAVE_COPYSIGN
double npy_copysign(double x, double y)
@@ -557,6 +558,15 @@ npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y)
}
#endif
+int npy_clear_floatstatus() {
+ char x=0;
+ return npy_clear_floatstatus_barrier(&x);
+}
+int npy_get_floatstatus() {
+ char x=0;
+ return npy_get_floatstatus_barrier(&x);
+}
+
/*
* Functions to set the floating point status word.
* keep in sync with NO_FLOATING_POINT_SUPPORT in ufuncobject.h
@@ -574,18 +584,24 @@ npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y)
defined(__NetBSD__)
#include <ieeefp.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char * param))
{
int fpstatus = fpgetsticky();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FP_X_DZ & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FP_X_OFL & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((FP_X_UFL & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((FP_X_INV & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
fpsetsticky(0);
return fpstatus;
@@ -617,10 +633,16 @@ void npy_set_floatstatus_invalid(void)
(defined(__FreeBSD__) && (__FreeBSD_version >= 502114))
# include <fenv.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char* param)
{
int fpstatus = fetestexcept(FE_DIVBYZERO | FE_OVERFLOW |
FE_UNDERFLOW | FE_INVALID);
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FE_DIVBYZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FE_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
@@ -628,10 +650,10 @@ int npy_get_floatstatus(void)
((FE_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
/* testing float status is 50-100 times faster than clearing on x86 */
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
if (fpstatus != 0) {
feclearexcept(FE_DIVBYZERO | FE_OVERFLOW |
FE_UNDERFLOW | FE_INVALID);
@@ -665,18 +687,24 @@ void npy_set_floatstatus_invalid(void)
#include <float.h>
#include <fpxcp.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
int fpstatus = fp_read_flag();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FP_DIV_BY_ZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FP_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((FP_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((FP_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
fp_swap_flag(0);
return fpstatus;
@@ -710,8 +738,11 @@ void npy_set_floatstatus_invalid(void)
#include <float.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
#if defined(_WIN64)
int fpstatus = _statusfp();
#else
@@ -720,15 +751,18 @@ int npy_get_floatstatus(void)
_statusfp2(&fpstatus, &fpstatus2);
fpstatus |= fpstatus2;
#endif
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((SW_ZERODIVIDE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((SW_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((SW_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((SW_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
_clearfp();
return fpstatus;
@@ -739,18 +773,24 @@ int npy_clear_floatstatus(void)
#include <machine/fpu.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
unsigned long fpstatus = ieee_get_fp_control();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((IEEE_STATUS_DZE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((IEEE_STATUS_OVF & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((IEEE_STATUS_UNF & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((IEEE_STATUS_INV & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
- long fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
/* clear status bits as well as disable exception mode if on */
ieee_set_fp_control(0);
@@ -759,13 +799,14 @@ int npy_clear_floatstatus(void)
#else
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char NPY_UNUSED(*param))
{
return 0;
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
+ int fpstatus = npy_get_floatstatus_barrier(param);
return 0;
}
diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c
index e44036358..188054e22 100644
--- a/numpy/core/src/umath/extobj.c
+++ b/numpy/core/src/umath/extobj.c
@@ -284,7 +284,7 @@ _check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) {
if (!errmask) {
return 0;
}
- fperr = PyUFunc_getfperr();
+ fperr = npy_get_floatstatus_barrier((char*)extobj);
if (!fperr) {
return 0;
}
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 8b1c7e703..1ca298b30 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1819,7 +1819,7 @@ NPY_NO_EXPORT void
*((npy_bool *)op1) = @func@(in1) != 0;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -1866,6 +1866,9 @@ NPY_NO_EXPORT void
const @type@ in2 = *(@type@ *)ip2;
io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2;
}
+ if (npy_isnan(io1)) {
+ npy_set_floatstatus_invalid();
+ }
*((@type@ *)iop1) = io1;
}
}
@@ -1901,7 +1904,7 @@ NPY_NO_EXPORT void
*((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -1991,7 +1994,7 @@ NPY_NO_EXPORT void
*((@type@ *)op1) = tmp + 0;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
NPY_NO_EXPORT void
@@ -2177,7 +2180,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
const npy_half in1 = *(npy_half *)ip1;
*((npy_bool *)op1) = @func@(in1) != 0;
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat**/
@@ -2239,7 +2242,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
const npy_half in2 = *(npy_half *)ip2;
*((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in2)) ? in1 : in2;
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat**/
@@ -2681,7 +2684,7 @@ NPY_NO_EXPORT void
const @ftype@ in1i = ((@ftype@ *)ip1)[1];
*((npy_bool *)op1) = @func@(in1r) @OP@ @func@(in1i);
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -2790,7 +2793,7 @@ NPY_NO_EXPORT void
((@ftype@ *)op1)[1] = in2i;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 681d3fefa..5c3a84e21 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -537,7 +537,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
}
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&iter);
if (NpyIter_GetIterSize(iter) != 0) {
NpyIter_IterNextFunc *iternext;
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index 6e1fb1ee8..3e29c4b4e 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -848,7 +848,7 @@ static PyObject *
}
#if @fperr@
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
#endif
/*
@@ -863,7 +863,7 @@ static PyObject *
#if @fperr@
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
@@ -993,7 +993,7 @@ static PyObject *
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
@@ -1008,7 +1008,7 @@ static PyObject *
}
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
@@ -1072,7 +1072,7 @@ static PyObject *
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
@@ -1136,7 +1136,7 @@ static PyObject *
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
@@ -1150,7 +1150,7 @@ static PyObject *
}
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 2241414ac..5c0568c12 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -1031,7 +1031,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
i += 2 * stride;
/* minps/minpd will set invalid flag if nan is encountered */
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)&c1);
LOOP_BLOCKED(@type@, 32) {
@vtype@ v1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
@vtype@ v2 = @vpre@_load_@vsuf@((@type@*)&ip[i + stride]);
@@ -1040,7 +1040,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
}
c1 = @vpre@_@VOP@_@vsuf@(c1, c2);
- if (npy_get_floatstatus() & NPY_FPE_INVALID) {
+ if (npy_get_floatstatus_barrier((char*)&c1) & NPY_FPE_INVALID) {
*op = @nan@;
}
else {
@@ -1051,6 +1051,9 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
LOOP_BLOCKED_END {
*op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
}
+ if (npy_isnan(*op)) {
+ npy_set_floatstatus_invalid();
+ }
}
/**end repeat1**/
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 36b77ef03..c1e8e5a77 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -100,7 +100,8 @@ PyUFunc_getfperr(void)
* non-clearing get was only added in 1.9 so this function always cleared
* keep it so just in case third party code relied on the clearing
*/
- return npy_clear_floatstatus();
+ char param = 0;
+ return npy_clear_floatstatus_barrier(&param);
}
#define HANDLEIT(NAME, str) {if (retstatus & NPY_FPE_##NAME) { \
@@ -133,7 +134,8 @@ NPY_NO_EXPORT int
PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first)
{
/* clearing is done for backward compatibility */
- int retstatus = npy_clear_floatstatus();
+ int retstatus;
+ retstatus = npy_clear_floatstatus_barrier((char*)&retstatus);
return PyUFunc_handlefperr(errmask, errobj, retstatus, first);
}
@@ -144,7 +146,8 @@ PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first)
NPY_NO_EXPORT void
PyUFunc_clearfperr()
{
- npy_clear_floatstatus();
+ char param = 0;
+ npy_clear_floatstatus_barrier(&param);
}
/*
@@ -2537,7 +2540,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
#endif
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&iter);
NPY_UF_DBG_PRINT("Executing inner loop\n");
@@ -2782,7 +2785,7 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
}
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&ufunc);
/* Do the ufunc loop */
if (need_fancy) {
@@ -3563,7 +3566,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
op_axes_arrays[2]};
npy_uint32 op_flags[3];
int i, idim, ndim, otype_final;
- int need_outer_iterator;
+ int need_outer_iterator = 0;
NpyIter *iter = NULL;
@@ -4279,11 +4282,9 @@ static PyObject *
ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
{
int i;
- PyTupleObject *ret;
PyArrayObject *mps[NPY_MAXARGS];
PyObject *retobj[NPY_MAXARGS];
PyObject *wraparr[NPY_MAXARGS];
- PyObject *res;
PyObject *override = NULL;
ufunc_full_args full_args = {NULL, NULL};
int errval;
@@ -4360,13 +4361,17 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
int j = ufunc->nin+i;
PyObject *wrap = wraparr[i];
- if (wrap != NULL) {
+ if (wrap == NULL) {
+ /* default behavior */
+ retobj[i] = PyArray_Return(mps[j]);
+ }
+ else if (wrap == Py_None) {
+ Py_DECREF(wrap);
+ retobj[i] = (PyObject *)mps[j];
+ }
+ else {
+ PyObject *res;
PyObject *args_tup;
- if (wrap == Py_None) {
- Py_DECREF(wrap);
- retobj[i] = (PyObject *)mps[j];
- continue;
- }
/* Call the method with appropriate context */
args_tup = _get_wrap_prepare_args(full_args);
@@ -4386,15 +4391,9 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
if (res == NULL) {
goto fail;
}
- else {
- Py_DECREF(mps[j]);
- retobj[i] = res;
- continue;
- }
- }
- else {
- /* default behavior */
- retobj[i] = PyArray_Return(mps[j]);
+
+ Py_DECREF(mps[j]);
+ retobj[i] = res;
}
}
@@ -4405,6 +4404,8 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
return retobj[0];
}
else {
+ PyTupleObject *ret;
+
ret = (PyTupleObject *)PyTuple_New(ufunc->nout);
for (i = 0; i < ufunc->nout; i++) {
PyTuple_SET_ITEM(ret, i, retobj[i]);
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index 513a71b99..cf50d5d5c 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -6,7 +6,7 @@ import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, temppath,
)
-from ._locales import CommaDecimalPointLocale
+from numpy.core.tests._locales import CommaDecimalPointLocale
LD_INFO = np.finfo(np.longdouble)
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 3c5f90cfc..f3032d394 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -34,7 +34,7 @@ from numpy.testing import (
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
SkipTest, temppath, suppress_warnings
)
-from ._locales import CommaDecimalPointLocale
+from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
@@ -7290,7 +7290,7 @@ class TestWritebackIfCopy(object):
# after resolve, the two arrays no longer reference each other
assert_(arr_wb.ctypes.data != 0)
assert_equal(arr_wb.base, None)
- # assigning to arr_wb does not get transfered to arr
+ # assigning to arr_wb does not get transferred to arr
arr_wb[...] = 100
assert_equal(arr, -100)
@@ -7321,7 +7321,7 @@ class TestWritebackIfCopy(object):
assert_equal(arr_wb.base, None)
if HAS_REFCOUNT:
assert_equal(arr_cnt, sys.getrefcount(arr))
- # assigning to arr_wb does not get transfered to arr
+ # assigning to arr_wb does not get transferred to arr
arr_wb[...] = 100
assert_equal(arr, orig)
diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py
index 746ad0e4b..433208748 100644
--- a/numpy/core/tests/test_print.py
+++ b/numpy/core/tests/test_print.py
@@ -4,7 +4,7 @@ import sys
import numpy as np
from numpy.testing import assert_, assert_equal, SkipTest
-from ._locales import CommaDecimalPointLocale
+from numpy.core.tests._locales import CommaDecimalPointLocale
if sys.version_info[0] >= 3:
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index b3cb3e610..d6dcaa982 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -2325,13 +2325,10 @@ class TestRegression(object):
def test_void_item_memview(self):
va = np.zeros(10, 'V4')
- # for now, there is just a futurewarning
- assert_warns(FutureWarning, va[:1].item)
- # in the future, test we got a bytes copy:
- #x = va[:1].item()
- #va[0] = b'\xff\xff\xff\xff'
- #del va
- #assert_equal(x, b'\x00\x00\x00\x00')
+ x = va[:1].item()
+ va[0] = b'\xff\xff\xff\xff'
+ del va
+ assert_equal(x, b'\x00\x00\x00\x00')
def test_structarray_title(self):
# The following used to segfault on pypy, due to NPY_TITLE_KEY
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index ea0be1892..1464a9e9a 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1328,6 +1328,17 @@ class TestMinMax(object):
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
+ def test_reduce_warns(self):
+ # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
+ # and put it before the call to an intrisic function that causes
+ # invalid status to be set. Also make sure warnings are emitted
+ for n in (2, 4, 8, 16, 32):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ for r in np.diagflat([np.nan] * n):
+ assert_equal(np.min(r), np.nan)
+ assert_equal(len(sup.log), n)
+
class TestAbsoluteNegative(object):
def test_abs_neg_blocked(self):
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 2a3ff2e52..65d7de316 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -385,6 +385,7 @@ def get_info(name, notfound_action=0):
'blis': blis_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
+ 'accelerate': accelerate_info, # use blas_opt instead
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
@@ -1551,39 +1552,10 @@ class lapack_opt_info(system_info):
if not atlas_info:
atlas_info = get_info('atlas')
- if sys.platform == 'darwin' \
- and not os.getenv('_PYTHON_HOST_PLATFORM', None) \
- and not (atlas_info or openblas_info or
- lapack_mkl_info):
- # Use the system lapack from Accelerate or vecLib under OSX
- args = []
- link_args = []
- if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
- 'x86_64' in get_platform() or \
- 'i386' in platform.platform():
- intel = 1
- else:
- intel = 0
- if os.path.exists('/System/Library/Frameworks'
- '/Accelerate.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
- elif os.path.exists('/System/Library/Frameworks'
- '/vecLib.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
- if args:
- self.set_info(extra_compile_args=args,
- extra_link_args=link_args,
- define_macros=[('NO_ATLAS_INFO', 3),
- ('HAVE_CBLAS', None)])
- return
+ accelerate_info = get_info('accelerate')
+ if accelerate_info and not atlas_info:
+ self.set_info(**accelerate_info)
+ return
need_lapack = 0
need_blas = 0
@@ -1659,43 +1631,10 @@ class blas_opt_info(system_info):
if not atlas_info:
atlas_info = get_info('atlas_blas')
- if sys.platform == 'darwin' \
- and not os.getenv('_PYTHON_HOST_PLATFORM', None) \
- and not (atlas_info or openblas_info or
- blas_mkl_info or blis_info):
- # Use the system BLAS from Accelerate or vecLib under OSX
- args = []
- link_args = []
- if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
- 'x86_64' in get_platform() or \
- 'i386' in platform.platform():
- intel = 1
- else:
- intel = 0
- if os.path.exists('/System/Library/Frameworks'
- '/Accelerate.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- args.extend([
- '-I/System/Library/Frameworks/vecLib.framework/Headers'])
- link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
- elif os.path.exists('/System/Library/Frameworks'
- '/vecLib.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- args.extend([
- '-I/System/Library/Frameworks/vecLib.framework/Headers'])
- link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
- if args:
- self.set_info(extra_compile_args=args,
- extra_link_args=link_args,
- define_macros=[('NO_ATLAS_INFO', 3),
- ('HAVE_CBLAS', None)])
- return
+ accelerate_info = get_info('accelerate')
+ if accelerate_info and not atlas_info:
+ self.set_info(**accelerate_info)
+ return
need_blas = 0
info = {}
@@ -1939,6 +1878,58 @@ class blis_info(blas_info):
include_dirs=incl_dirs)
self.set_info(**info)
+class accelerate_info(system_info):
+ section = 'accelerate'
+ notfounderror = BlasNotFoundError
+
+ def calc_info(self):
+ # Make possible to enable/disable from config file/env var
+ libraries = os.environ.get('ACCELERATE')
+ if libraries:
+ libraries = [libraries]
+ else:
+ libraries = self.get_libs('libraries', ['accelerate', 'veclib'])
+ libraries = [lib.strip().lower() for lib in libraries]
+
+ if (sys.platform == 'darwin' and
+ not os.getenv('_PYTHON_HOST_PLATFORM', None)):
+ # Use the system BLAS from Accelerate or vecLib under OSX
+ args = []
+ link_args = []
+ if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
+ 'x86_64' in get_platform() or \
+ 'i386' in platform.platform():
+ intel = 1
+ else:
+ intel = 0
+ if (os.path.exists('/System/Library/Frameworks'
+ '/Accelerate.framework/') and
+ 'accelerate' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ else:
+ args.extend(['-faltivec'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
+ elif (os.path.exists('/System/Library/Frameworks'
+ '/vecLib.framework/') and
+ 'veclib' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ else:
+ args.extend(['-faltivec'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
+
+ if args:
+ self.set_info(extra_compile_args=args,
+ extra_link_args=link_args,
+ define_macros=[('NO_ATLAS_INFO', 3),
+ ('HAVE_CBLAS', None)])
+
+ return
class blas_src_info(system_info):
section = 'blas_src'
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index dd2484eb4..78b06f066 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -539,7 +539,7 @@ void f2py_report_on_exit(int exit_flag,void *name) {
fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n",
cb_passed_counter,cb_passed_time);
- fprintf(stderr,"(e) wrapped (Fortran/C) functions (acctual) : %8d msec\n\n",
+ fprintf(stderr,"(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n",
passed_call_time-cb_passed_call_time-cb_passed_time);
fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n");
fprintf(stderr,"Exit status: %d\n",exit_flag);
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index daaa68d06..600301c56 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -100,12 +100,8 @@ def _prepend_const(arr, pad_amt, val, axis=-1):
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- if val == 0:
- return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),
- axis=axis)
- else:
- return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),
- arr), axis=axis)
+ return np.concatenate((np.full(padshape, val, dtype=arr.dtype), arr),
+ axis=axis)
def _append_const(arr, pad_amt, val, axis=-1):
@@ -134,12 +130,8 @@ def _append_const(arr, pad_amt, val, axis=-1):
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- if val == 0:
- return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),
- axis=axis)
- else:
- return np.concatenate(
- (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)
+ return np.concatenate((arr, np.full(padshape, val, dtype=arr.dtype)),
+ axis=axis)
def _prepend_edge(arr, pad_amt, axis=-1):
@@ -164,13 +156,9 @@ def _prepend_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else 0
+ edge_slice = tuple(slice(None) if i != axis else slice(0, 1)
for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- edge_arr = arr[edge_slice].reshape(pad_singleton)
+ edge_arr = arr[edge_slice]
return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),
axis=axis)
@@ -198,13 +186,9 @@ def _append_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1
+ edge_slice = tuple(slice(None) if i != axis else slice(x - 1, x)
for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- edge_arr = arr[edge_slice].reshape(pad_singleton)
+ edge_arr = arr[edge_slice]
return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),
axis=axis)
@@ -244,15 +228,11 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
reverse=True).astype(np.float64)
# Appropriate slicing to extract n-dimensional edge along `axis`
- edge_slice = tuple(slice(None) if i != axis else 0
+ edge_slice = tuple(slice(None) if i != axis else slice(0, 1)
for (i, x) in enumerate(arr.shape))
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract edge, reshape to original rank, and extend along `axis`
- edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+ # Extract edge, and extend along `axis`
+ edge_pad = arr[edge_slice].repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
@@ -299,15 +279,11 @@ def _append_ramp(arr, pad_amt, end, axis=-1):
reverse=False).astype(np.float64)
# Slice a chunk from the edge to calculate stats on
- edge_slice = tuple(slice(None) if i != axis else -1
+ edge_slice = tuple(slice(None) if i != axis else slice(x - 1, x)
for (i, x) in enumerate(arr.shape))
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract edge, reshape to original rank, and extend along `axis`
- edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+ # Extract edge, and extend along `axis`
+ edge_pad = arr[edge_slice].repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
@@ -359,12 +335,8 @@ def _prepend_max(arr, pad_amt, num, axis=-1):
max_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate max, reshape to add singleton dimension back
- max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate max
+ max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),
@@ -415,12 +387,8 @@ def _append_max(arr, pad_amt, num, axis=-1):
else:
max_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate max, reshape to add singleton dimension back
- max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate max
+ max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),
@@ -466,12 +434,8 @@ def _prepend_mean(arr, pad_amt, num, axis=-1):
mean_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate mean, reshape to add singleton dimension back
- mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)
+ # Extract slice, calculate mean
+ mean_chunk = arr[mean_slice].mean(axis, keepdims=True)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
@@ -523,12 +487,8 @@ def _append_mean(arr, pad_amt, num, axis=-1):
else:
mean_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate mean, reshape to add singleton dimension back
- mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate mean
+ mean_chunk = arr[mean_slice].mean(axis=axis, keepdims=True)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
@@ -575,12 +535,8 @@ def _prepend_med(arr, pad_amt, num, axis=-1):
med_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate median, reshape to add singleton dimension back
- med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate median
+ med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
@@ -632,12 +588,8 @@ def _append_med(arr, pad_amt, num, axis=-1):
else:
med_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate median, reshape to add singleton dimension back
- med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate median
+ med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
@@ -685,12 +637,8 @@ def _prepend_min(arr, pad_amt, num, axis=-1):
min_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate min, reshape to add singleton dimension back
- min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate min
+ min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),
@@ -741,12 +689,8 @@ def _append_min(arr, pad_amt, num, axis=-1):
else:
min_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate min, reshape to add singleton dimension back
- min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate min
+ min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),
@@ -798,17 +742,11 @@ def _pad_ref(arr, pad_amt, method, axis=-1):
ref_chunk1 = arr[ref_slice]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- ref_chunk1 = ref_chunk1.reshape(pad_singleton)
-
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else 0
+ edge_slice1 = tuple(slice(None) if i != axis else slice(0, 1)
for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ edge_chunk = arr[edge_slice1]
ref_chunk1 = 2 * edge_chunk - ref_chunk1
del edge_chunk
@@ -824,13 +762,10 @@ def _pad_ref(arr, pad_amt, method, axis=-1):
for (i, x) in enumerate(arr.shape))
ref_chunk2 = arr[ref_slice][rev_idx]
- if pad_amt[1] == 1:
- ref_chunk2 = ref_chunk2.reshape(pad_singleton)
-
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else -1
+ edge_slice2 = tuple(slice(None) if i != axis else slice(x - 1, x)
for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ edge_chunk = arr[edge_slice2]
ref_chunk2 = 2 * edge_chunk - ref_chunk2
del edge_chunk
@@ -884,17 +819,11 @@ def _pad_sym(arr, pad_amt, method, axis=-1):
for (i, x) in enumerate(arr.shape))
sym_chunk1 = arr[sym_slice][rev_idx]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- sym_chunk1 = sym_chunk1.reshape(pad_singleton)
-
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else 0
+ edge_slice1 = tuple(slice(None) if i != axis else slice(0, 1)
for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ edge_chunk = arr[edge_slice1]
sym_chunk1 = 2 * edge_chunk - sym_chunk1
del edge_chunk
@@ -908,13 +837,10 @@ def _pad_sym(arr, pad_amt, method, axis=-1):
for (i, x) in enumerate(arr.shape))
sym_chunk2 = arr[sym_slice][rev_idx]
- if pad_amt[1] == 1:
- sym_chunk2 = sym_chunk2.reshape(pad_singleton)
-
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else -1
+ edge_slice2 = tuple(slice(None) if i != axis else slice(x - 1, x)
for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ edge_chunk = arr[edge_slice2]
sym_chunk2 = 2 * edge_chunk - sym_chunk2
del edge_chunk
@@ -965,12 +891,6 @@ def _pad_wrap(arr, pad_amt, axis=-1):
for (i, x) in enumerate(arr.shape))
wrap_chunk1 = arr[wrap_slice]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)
-
##########################################################################
# Appended region
@@ -979,9 +899,6 @@ def _pad_wrap(arr, pad_amt, axis=-1):
for (i, x) in enumerate(arr.shape))
wrap_chunk2 = arr[wrap_slice]
- if pad_amt[1] == 1:
- wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)
-
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 363bb2101..a0ec55f01 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -1,5 +1,11 @@
"""
-Define a simple format for saving numpy arrays to disk with the full
+
+.. _binary-serialization:
+
+Binary Serialization
+====================
+
+A simple format for saving numpy arrays to disk with the full
information about them.
The ``.npy`` format is the standard binary file format in NumPy for
@@ -143,8 +149,10 @@ data HEADER_LEN."
Notes
-----
-The ``.npy`` format, including reasons for creating it and a comparison of
-alternatives, is described fully in the "npy-format" NEP.
+The ``.npy`` format, including motivation for creating it and a comparison of
+alternatives, is described in the `"npy-format" NEP
+<http://www.numpy.org/neps/nep-0001-npy-format.html>`_, however details have
+evolved with time and this document is more current.
"""
from __future__ import division, absolute_import, print_function
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index 90e19769e..2922b3a86 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -877,12 +877,6 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
# bins is an integer
bins = D*[bins]
- # avoid rounding issues for comparisons when dealing with inexact types
- if np.issubdtype(sample.dtype, np.inexact):
- edge_dt = sample.dtype
- else:
- edge_dt = float
-
# normalize the range argument
if range is None:
range = (None,) * D
@@ -896,13 +890,12 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
- edges[i] = np.linspace(smin, smax, bins[i] + 1, dtype=edge_dt)
+ edges[i] = np.linspace(smin, smax, bins[i] + 1)
elif np.ndim(bins[i]) == 1:
- edges[i] = np.asarray(bins[i], edge_dt)
- # not just monotonic, due to the use of mindiff below
- if np.any(edges[i][:-1] >= edges[i][1:]):
+ edges[i] = np.asarray(bins[i])
+ if np.any(edges[i][:-1] > edges[i][1:]):
raise ValueError(
- '`bins[{}]` must be strictly increasing, when an array'
+ '`bins[{}]` must be monotonically increasing, when an array'
.format(i))
else:
raise ValueError(
@@ -913,7 +906,8 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
# Compute the bin number each sample falls into.
Ncount = tuple(
- np.digitize(sample[:, i], edges[i])
+ # avoid np.digitize to work around gh-11022
+ np.searchsorted(edges[i], sample[:, i], side='right')
for i in _range(D)
)
@@ -921,16 +915,10 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
- # Rounding precision
- mindiff = dedges[i].min()
- if not np.isinf(mindiff):
- decimal = int(-np.log10(mindiff)) + 6
- # Find which points are on the rightmost edge.
- not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
- on_edge = (np.around(sample[:, i], decimal) ==
- np.around(edges[i][-1], decimal))
- # Shift these points one bin to the left.
- Ncount[i][on_edge & not_smaller_than_edge] -= 1
+ # Find which points are on the rightmost edge.
+ on_edge = (sample[:, i] == edges[i][-1])
+ # Shift these points one bin to the left.
+ Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 97f50b5d8..95804be7f 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -475,9 +475,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
Notes
-----
- For a description of the ``.npy`` format, see the module docstring
- of `numpy.lib.format` or the NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ For a description of the ``.npy`` format, see :ref:`binary-serialization`.
Examples
--------
@@ -561,9 +559,7 @@ def savez(file, *args, **kwds):
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
- description of the ``.npy`` format, see `numpy.lib.format` or the
- NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ description of the ``.npy`` format, see :ref:`binary-serialization`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
@@ -642,9 +638,9 @@ def savez_compressed(file, *args, **kwds):
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
- in ``.npy`` format. For a description of the ``.npy`` format, see
- `numpy.lib.format` or the NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ in ``.npy`` format. For a description of the ``.npy`` format, see
+ :ref:`binary-serialization`.
+
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
@@ -791,8 +787,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
the data-type.
comments : str or sequence of str, optional
The characters or list of characters used to indicate the start of a
- comment. For backwards compatibility, byte strings will be decoded as
- 'latin1'. The default is '#'.
+ comment. None implies no comments. For backwards compatibility, byte
+ strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 8be49ce67..8ba0370b0 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -489,6 +489,19 @@ class TestConstant(object):
)
assert_allclose(test, expected)
+ def test_check_large_integers(self):
+ uint64_max = 2 ** 64 - 1
+ arr = np.full(5, uint64_max, dtype=np.uint64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, uint64_max, dtype=np.uint64)
+ assert_array_equal(test, expected)
+
+ int64_max = 2 ** 63 - 1
+ arr = np.full(5, int64_max, dtype=np.int64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, int64_max, dtype=np.int64)
+ assert_array_equal(test, expected)
+
class TestLinearRamp(object):
def test_check_simple(self):
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 43d62a7ff..5dc96775b 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1525,9 +1525,9 @@ class TestDigitize(object):
class TestUnwrap(object):
def test_simple(self):
- # check that unwrap removes jumps greather that 2*pi
+ # check that unwrap removes jumps greater that 2*pi
assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
- # check that unwrap maintans continuity
+ # check that unwrap maintains continuity
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
@@ -2759,7 +2759,7 @@ class TestQuantile(object):
assert_equal(np.quantile(x, 0.5), 1.75)
def test_no_p_overwrite(self):
- # this is worth retesting, beause quantile does not make a copy
+ # this is worth retesting, because quantile does not make a copy
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
p = p0.copy()
np.quantile(np.arange(100.), p, interpolation="midpoint")
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index 06daacbdc..e16ae12c2 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -253,7 +253,7 @@ class TestHistogram(object):
one_nan = np.array([0, 1, np.nan])
all_nan = np.array([np.nan, np.nan])
- # the internal commparisons with NaN give warnings
+ # the internal comparisons with NaN give warnings
sup = suppress_warnings()
sup.filter(RuntimeWarning)
with sup:
@@ -613,8 +613,6 @@ class TestHistogramdd(object):
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
assert_raises(
- ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]])
- assert_raises(
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
@@ -646,7 +644,7 @@ class TestHistogramdd(object):
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
- assert_(hist[1] == 1.)
+ assert_(hist[1] == 0.0)
x = [1.0001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
@@ -660,3 +658,40 @@ class TestHistogramdd(object):
range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
+
+ def test_equal_edges(self):
+ """ Test that adjacent entries in an edge array can be equal """
+ x = np.array([0, 1, 2])
+ y = np.array([0, 1, 2])
+ x_edges = np.array([0, 2, 2])
+ y_edges = 1
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ hist_expected = np.array([
+ [2.],
+ [1.], # x == 2 falls in the final bin
+ ])
+ assert_equal(hist, hist_expected)
+
+ def test_edge_dtype(self):
+ """ Test that if an edge array is input, its type is preserved """
+ x = np.array([0, 10, 20])
+ y = x / 10
+ x_edges = np.array([0, 5, 15, 20])
+ y_edges = x_edges / 10
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(edges[0].dtype, x_edges.dtype)
+ assert_equal(edges[1].dtype, y_edges.dtype)
+
+ def test_large_integers(self):
+ big = 2**60 # Too large to represent with a full precision float
+
+ x = np.array([0], np.int64)
+ x_edges = np.array([-1, +1], np.int64)
+ y = big + x
+ y_edges = big + x_edges
+
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(hist[0, 0], 1)
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index e69d9dd7d..42e221506 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -912,7 +912,7 @@ class TestNanFunctions_Quantile(object):
assert_equal(np.nanquantile(x, 0.5), 1.75)
def test_no_p_overwrite(self):
- # this is worth retesting, beause quantile does not make a copy
+ # this is worth retesting, because quantile does not make a copy
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
p = p0.copy()
np.nanquantile(np.arange(100.), p, interpolation="midpoint")
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 080fd066d..0e86fbb19 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -79,7 +79,7 @@ class TestApplyAlongAxis(object):
def test_axis_insertion(self, cls=np.ndarray):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
return (x[::-1] * x[1:,None]).view(cls)
@@ -123,7 +123,7 @@ class TestApplyAlongAxis(object):
def test_axis_insertion_ma(self):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
res = x[::-1] * x[1:,None]
return np.ma.masked_where(res%5==0, res)
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 402c18850..cca316e9a 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -650,7 +650,7 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
N = 1
if N != 1 and N != 2:
- xedges = yedges = asarray(bins, float)
+ xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 00baa52bc..5ed1ff1c0 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -7,7 +7,6 @@ import os
import sys
import itertools
import traceback
-import warnings
import pytest
import numpy as np
@@ -22,12 +21,11 @@ from numpy.testing import (
)
-def ifthen(a, b):
- return not a or b
-
-
-def imply(a, b):
- return not a or b
+def consistent_subclass(out, in_):
+ # For ndarray subclass input, our output should have the same subclass
+ # (non-ndarray input gets converted to ndarray).
+ return type(out) is (type(in_) if isinstance(in_, np.ndarray)
+ else np.ndarray)
old_assert_almost_equal = assert_almost_equal
@@ -65,6 +63,7 @@ all_tags = {
'generalized', 'size-0', 'strided' # optional additions
}
+
class LinalgCase(object):
def __init__(self, name, a, b, tags=set()):
"""
@@ -86,6 +85,7 @@ class LinalgCase(object):
def __repr__(self):
return "<LinalgCase: %s>" % (self.name,)
+
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
@@ -129,10 +129,6 @@ CASES += apply_tag('square', [
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
- LinalgCase("0x0_matrix",
- np.empty((0, 0), dtype=double).view(np.matrix),
- np.empty((0, 1), dtype=double).view(np.matrix),
- tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
@@ -142,12 +138,6 @@ CASES += apply_tag('square', [
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
- LinalgCase("matrix_b_only",
- array([[1., 2.], [3., 4.]]),
- matrix([2., 1.]).T),
- LinalgCase("matrix_a_and_b",
- matrix([[1., 2.], [3., 4.]]),
- matrix([2., 1.]).T),
])
# non-square test-cases
@@ -231,9 +221,6 @@ CASES += apply_tag('hermitian', [
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
- LinalgCase("hmatrix_a_and_b",
- matrix([[1., 2.], [2., 1.]]),
- None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
@@ -270,12 +257,13 @@ def _make_generalized_cases():
return new_cases
+
CASES += _make_generalized_cases()
+
#
# Generate stride combination variations of the above
#
-
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
@@ -323,6 +311,7 @@ def _stride_comb_iter(x):
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
+
def _make_strided_cases():
new_cases = []
for case in CASES:
@@ -333,94 +322,104 @@ def _make_strided_cases():
new_cases.append(new_case)
return new_cases
+
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
+class LinalgTestCase(object):
+ TEST_CASES = CASES
-def _check_cases(func, require=set(), exclude=set()):
- """
- Run func on each of the cases with all of the tags in require, and none
- of the tags in exclude
- """
- for case in CASES:
- # filter by require and exclude
- if case.tags & require != require:
- continue
- if case.tags & exclude:
- continue
+ def check_cases(self, require=set(), exclude=set()):
+ """
+ Run func on each of the cases with all of the tags in require, and none
+ of the tags in exclude
+ """
+ for case in self.TEST_CASES:
+ # filter by require and exclude
+ if case.tags & require != require:
+ continue
+ if case.tags & exclude:
+ continue
- try:
- case.check(func)
- except Exception:
- msg = "In test case: %r\n\n" % case
- msg += traceback.format_exc()
- raise AssertionError(msg)
+ try:
+ case.check(self.do)
+ except Exception:
+ msg = "In test case: %r\n\n" % case
+ msg += traceback.format_exc()
+ raise AssertionError(msg)
-class LinalgSquareTestCase(object):
+class LinalgSquareTestCase(LinalgTestCase):
def test_sq_cases(self):
- _check_cases(self.do, require={'square'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'square'},
+ exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
- _check_cases(self.do, require={'square', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'square', 'size-0'},
+ exclude={'generalized'})
-class LinalgNonsquareTestCase(object):
+class LinalgNonsquareTestCase(LinalgTestCase):
def test_nonsq_cases(self):
- _check_cases(self.do, require={'nonsquare'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'nonsquare'},
+ exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
- _check_cases(self.do, require={'nonsquare', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'nonsquare', 'size-0'},
+ exclude={'generalized'})
-class HermitianTestCase(object):
+
+class HermitianTestCase(LinalgTestCase):
def test_herm_cases(self):
- _check_cases(self.do, require={'hermitian'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'hermitian'},
+ exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
- _check_cases(self.do, require={'hermitian', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'hermitian', 'size-0'},
+ exclude={'generalized'})
-class LinalgGeneralizedSquareTestCase(object):
+class LinalgGeneralizedSquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_sq_cases(self):
- _check_cases(self.do, require={'generalized', 'square'}, exclude={'size-0'})
+ self.check_cases(require={'generalized', 'square'},
+ exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_sq_cases(self):
- _check_cases(self.do, require={'generalized', 'square', 'size-0'})
+ self.check_cases(require={'generalized', 'square', 'size-0'})
-class LinalgGeneralizedNonsquareTestCase(object):
+class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_nonsq_cases(self):
- _check_cases(self.do, require={'generalized', 'nonsquare'}, exclude={'size-0'})
+ self.check_cases(require={'generalized', 'nonsquare'},
+ exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_nonsq_cases(self):
- _check_cases(self.do, require={'generalized', 'nonsquare', 'size-0'})
+ self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
-class HermitianGeneralizedTestCase(object):
+class HermitianGeneralizedTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_herm_cases(self):
- _check_cases(self.do,
- require={'generalized', 'hermitian'},
- exclude={'size-0'})
+ self.check_cases(require={'generalized', 'hermitian'},
+ exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_herm_cases(self):
- _check_cases(self.do,
- require={'generalized', 'hermitian', 'size-0'},
- exclude={'none'})
+ self.check_cases(require={'generalized', 'hermitian', 'size-0'},
+ exclude={'none'})
def dot_generalized(a, b):
@@ -452,13 +451,15 @@ def identity_like_generalized(a):
return identity(a.shape[0])
-class TestSolve(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
-
+class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+ # kept apart from TestSolve for use for testing with matrices.
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
- assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
+ assert_(consistent_subclass(x, b))
+
+class TestSolve(SolveCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -518,14 +519,16 @@ class TestSolve(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(result, ArraySubclass))
-class TestInv(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
- assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix)))
+ assert_(consistent_subclass(a_inv, a))
+
+class TestInv(InvCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -550,13 +553,15 @@ class TestInv(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(res, ArraySubclass))
-class TestEigvals(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
+
+class TestEigvals(EigvalsCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -585,15 +590,17 @@ class TestEigvals(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(res, np.ndarray))
-class TestEig(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
- assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix)))
+ assert_(consistent_subclass(evectors, a))
+
+class TestEig(EigCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -632,7 +639,7 @@ class TestEig(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(isinstance(a, np.ndarray))
-class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
@@ -643,9 +650,11 @@ class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
- assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
- assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
+ assert_(consistent_subclass(u, a))
+ assert_(consistent_subclass(vt, a))
+
+class TestSVD(SVDCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -670,7 +679,7 @@ class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_raises(linalg.LinAlgError, linalg.svd, a)
-class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
def do(self, a, b, tags):
@@ -715,6 +724,8 @@ class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
* (abs(cinv)**2).sum(-1).sum(-1)),
single_decimal=5, double_decimal=11)
+
+class TestCond(CondCases):
def test_basic_nonsvd(self):
# Smoketest the non-svd norms
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
@@ -778,20 +789,24 @@ class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(np.isfinite(c[1,0]))
-class TestPinv(LinalgSquareTestCase,
- LinalgNonsquareTestCase,
- LinalgGeneralizedSquareTestCase,
- LinalgGeneralizedNonsquareTestCase):
+class PinvCases(LinalgSquareTestCase,
+ LinalgNonsquareTestCase,
+ LinalgGeneralizedSquareTestCase,
+ LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
- assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
+ assert_(consistent_subclass(a_ginv, a))
+
+
+class TestPinv(PinvCases):
+ pass
-class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
@@ -810,6 +825,8 @@ class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
+
+class TestDet(DetCases):
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
@@ -853,7 +870,7 @@ class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
assert_(res[1].dtype.type is np.float64)
-class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
+class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
@@ -881,9 +898,11 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
- assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
- assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
+ assert_(consistent_subclass(x, b))
+ assert_(consistent_subclass(residuals, b))
+
+class TestLstsq(LstsqCases):
def test_future_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
@@ -902,6 +921,7 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
# Warning should be raised exactly once (first command)
assert_(len(w) == 1)
+
class TestMatrixPower(object):
R90 = array([[0, 1], [-1, 0]])
Arb22 = array([[4, -7], [-2, 10]])
@@ -975,7 +995,7 @@ class TestBoolPower(object):
assert_equal(matrix_power(A, 2), A)
-class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
+class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
@@ -988,6 +1008,8 @@ class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
ev2 = linalg.eigvalsh(a, 'U')
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
+
+class TestEigvalsh(object):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -1043,7 +1065,7 @@ class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
assert_(isinstance(res, np.ndarray))
-class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
+class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
@@ -1064,6 +1086,8 @@ class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
np.asarray(ev2)[..., None, :] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
+
+class TestEigh(object):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -1124,11 +1148,13 @@ class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
assert_(isinstance(a, np.ndarray))
-class _TestNorm(object):
-
+class _TestNormBase(object):
dt = None
dec = None
+
+class _TestNormGeneral(_TestNormBase):
+
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
@@ -1175,57 +1201,6 @@ class _TestNorm(object):
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
- def test_matrix_return_type(self):
- a = np.array([[1, 0, 1], [0, 1, 1]])
-
- exact_types = np.typecodes['AllInteger']
-
- # float32, complex64, float64, complex128 types are the only types
- # allowed by `linalg`, which performs the matrix operations used
- # within `norm`.
- inexact_types = 'fdFD'
-
- all_types = exact_types + inexact_types
-
- for each_inexact_types in all_types:
- at = a.astype(each_inexact_types)
-
- an = norm(at, -np.inf)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- with suppress_warnings() as sup:
- sup.filter(RuntimeWarning, "divide by zero encountered")
- an = norm(at, -1)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 1.0)
-
- an = norm(at, 1)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 2)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 3.0**(1.0/2.0))
-
- an = norm(at, -2)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 1.0)
-
- an = norm(at, np.inf)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 'fro')
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 'nuc')
- assert_(issubclass(an.dtype.type, np.floating))
- # Lower bar needed to support low precision floats.
- # They end up being off by 1 in the 7th place.
- old_assert_almost_equal(an, 2.7320508075688772, decimal=6)
-
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
@@ -1256,39 +1231,6 @@ class _TestNorm(object):
array(c, dtype=self.dt)):
_test(v)
- def test_matrix_2x2(self):
- A = matrix([[1, 3], [5, 7]], dtype=self.dt)
- assert_almost_equal(norm(A), 84 ** 0.5)
- assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
- assert_almost_equal(norm(A, 'nuc'), 10.0)
- assert_almost_equal(norm(A, inf), 12.0)
- assert_almost_equal(norm(A, -inf), 4.0)
- assert_almost_equal(norm(A, 1), 10.0)
- assert_almost_equal(norm(A, -1), 6.0)
- assert_almost_equal(norm(A, 2), 9.1231056256176615)
- assert_almost_equal(norm(A, -2), 0.87689437438234041)
-
- assert_raises(ValueError, norm, A, 'nofro')
- assert_raises(ValueError, norm, A, -3)
- assert_raises(ValueError, norm, A, 0)
-
- def test_matrix_3x3(self):
- # This test has been added because the 2x2 example
- # happened to have equal nuclear norm and induced 1-norm.
- # The 1/10 scaling factor accommodates the absolute tolerance
- # used in assert_almost_equal.
- A = (1 / 10) * \
- np.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
- assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
- assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
- assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
- assert_almost_equal(norm(A, inf), 1.1)
- assert_almost_equal(norm(A, -inf), 0.6)
- assert_almost_equal(norm(A, 1), 1.0)
- assert_almost_equal(norm(A, -1), 0.4)
- assert_almost_equal(norm(A, 2), 0.88722940323461277)
- assert_almost_equal(norm(A, -2), 0.19456584790481812)
-
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
@@ -1368,10 +1310,103 @@ class _TestNorm(object):
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
+
+class _TestNorm2D(_TestNormBase):
+ # Define the part for 2d arrays separately, so we can subclass this
+ # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg.
+ array = np.array
+
+ def test_matrix_empty(self):
+ assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0)
+
+ def test_matrix_return_type(self):
+ a = self.array([[1, 0, 1], [0, 1, 1]])
+
+ exact_types = np.typecodes['AllInteger']
+
+ # float32, complex64, float64, complex128 types are the only types
+ # allowed by `linalg`, which performs the matrix operations used
+ # within `norm`.
+ inexact_types = 'fdFD'
+
+ all_types = exact_types + inexact_types
+
+ for each_inexact_types in all_types:
+ at = a.astype(each_inexact_types)
+
+ an = norm(at, -np.inf)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "divide by zero encountered")
+ an = norm(at, -1)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 1.0)
+
+ an = norm(at, 1)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 2)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 3.0**(1.0/2.0))
+
+ an = norm(at, -2)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 1.0)
+
+ an = norm(at, np.inf)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 'fro')
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 'nuc')
+ assert_(issubclass(an.dtype.type, np.floating))
+ # Lower bar needed to support low precision floats.
+ # They end up being off by 1 in the 7th place.
+ np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)
+
+ def test_matrix_2x2(self):
+ A = self.array([[1, 3], [5, 7]], dtype=self.dt)
+ assert_almost_equal(norm(A), 84 ** 0.5)
+ assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
+ assert_almost_equal(norm(A, 'nuc'), 10.0)
+ assert_almost_equal(norm(A, inf), 12.0)
+ assert_almost_equal(norm(A, -inf), 4.0)
+ assert_almost_equal(norm(A, 1), 10.0)
+ assert_almost_equal(norm(A, -1), 6.0)
+ assert_almost_equal(norm(A, 2), 9.1231056256176615)
+ assert_almost_equal(norm(A, -2), 0.87689437438234041)
+
+ assert_raises(ValueError, norm, A, 'nofro')
+ assert_raises(ValueError, norm, A, -3)
+ assert_raises(ValueError, norm, A, 0)
+
+ def test_matrix_3x3(self):
+ # This test has been added because the 2x2 example
+ # happened to have equal nuclear norm and induced 1-norm.
+ # The 1/10 scaling factor accommodates the absolute tolerance
+ # used in assert_almost_equal.
+ A = (1 / 10) * \
+ self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
+ assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
+ assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
+ assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
+ assert_almost_equal(norm(A, inf), 1.1)
+ assert_almost_equal(norm(A, -inf), 0.6)
+ assert_almost_equal(norm(A, 1), 1.0)
+ assert_almost_equal(norm(A, -1), 0.4)
+ assert_almost_equal(norm(A, 2), 0.88722940323461277)
+ assert_almost_equal(norm(A, -2), 0.19456584790481812)
+
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
- A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
+ A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
@@ -1395,6 +1430,10 @@ class _TestNorm(object):
assert_raises(ValueError, norm, B, None, (0, 1, 2))
+class _TestNorm(_TestNorm2D, _TestNormGeneral):
+ pass
+
+
class TestNorm_NonSystematic(object):
def test_longdouble_norm(self):
@@ -1422,21 +1461,34 @@ class TestNorm_NonSystematic(object):
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
-class TestNormDouble(_TestNorm):
+# Separate definitions so we can use them for matrix tests.
+class _TestNormDoubleBase(_TestNormBase):
dt = np.double
dec = 12
-class TestNormSingle(_TestNorm):
+class _TestNormSingleBase(_TestNormBase):
dt = np.float32
dec = 6
-class TestNormInt64(_TestNorm):
+class _TestNormInt64Base(_TestNormBase):
dt = np.int64
dec = 12
+class TestNormDouble(_TestNorm, _TestNormDoubleBase):
+ pass
+
+
+class TestNormSingle(_TestNorm, _TestNormSingleBase):
+ pass
+
+
+class TestNormInt64(_TestNorm, _TestNormInt64Base):
+ pass
+
+
class TestMatrixRank(object):
def test_matrix_rank(self):
@@ -1487,6 +1539,8 @@ def test_reduced_rank():
class TestQR(object):
+ # Define the array class here, so run this on matrices elsewhere.
+ array = np.array
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
@@ -1537,7 +1591,7 @@ class TestQR(object):
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
- a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
+ a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
# Test double
h, tau = linalg.qr(a, mode='raw')
@@ -1553,22 +1607,21 @@ class TestQR(object):
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
- a = array([[1, 2], [3, 4]])
- b = array([[1, 2], [3, 4], [5, 6]])
+ a = self.array([[1, 2], [3, 4]])
+ b = self.array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
- self.check_qr(matrix(m1))
+
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
- self.check_qr(matrix(m1))
def test_0_size(self):
# There may be good ways to do (some of this) reasonably:
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
index 03fdd387a..3a5ad7250 100644
--- a/numpy/linalg/umath_linalg.c.src
+++ b/numpy/linalg/umath_linalg.c.src
@@ -382,17 +382,11 @@ typedef f2c_doublecomplex fortran_doublecomplex;
*****************************************************************************
*/
-static NPY_INLINE void *
-offset_ptr(void* ptr, ptrdiff_t offset)
-{
- return (void*)((npy_uint8*)ptr + offset);
-}
-
static NPY_INLINE int
get_fp_invalid_and_clear(void)
{
int status;
- status = npy_clear_floatstatus();
+ status = npy_clear_floatstatus_barrier((char*)&status);
return !!(status & NPY_FPE_INVALID);
}
@@ -403,7 +397,7 @@ set_fp_invalid_or_clear(int error_occurred)
npy_set_floatstatus_invalid();
}
else {
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)&error_occurred);
}
}
@@ -577,104 +571,6 @@ dump_linearize_data(const char* name, const LINEARIZE_DATA_t* params)
params->row_strides, params->column_strides);
}
-
-static NPY_INLINE float
-FLOAT_add(float op1, float op2)
-{
- return op1 + op2;
-}
-
-static NPY_INLINE double
-DOUBLE_add(double op1, double op2)
-{
- return op1 + op2;
-}
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_add(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0] + op2.array[0];
- result.array[1] = op1.array[1] + op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_add(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0] + op2.array[0];
- result.array[1] = op1.array[1] + op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE float
-FLOAT_mul(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE double
-DOUBLE_mul(double op1, double op2)
-{
- return op1*op2;
-}
-
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_mul(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1];
- result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_mul(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1];
- result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE float
-FLOAT_mulc(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE double
-DOUBLE_mulc(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_mulc(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1];
- result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_mulc(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1];
- result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0];
-
- return result;
-}
-
static NPY_INLINE void
print_FLOAT(npy_float s)
{
diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py
new file mode 100644
index 000000000..6fc733c2e
--- /dev/null
+++ b/numpy/matrixlib/tests/test_matrix_linalg.py
@@ -0,0 +1,95 @@
+""" Test functions for linalg module using the matrix class."""
+from __future__ import division, absolute_import, print_function
+
+import numpy as np
+
+from numpy.linalg.tests.test_linalg import (
+ LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase,
+ _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base,
+ SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases,
+ PinvCases, DetCases, LstsqCases)
+
+
+CASES = []
+
+# square test cases
+CASES += apply_tag('square', [
+ LinalgCase("0x0_matrix",
+ np.empty((0, 0), dtype=np.double).view(np.matrix),
+ np.empty((0, 1), dtype=np.double).view(np.matrix),
+ tags={'size-0'}),
+ LinalgCase("matrix_b_only",
+ np.array([[1., 2.], [3., 4.]]),
+ np.matrix([2., 1.]).T),
+ LinalgCase("matrix_a_and_b",
+ np.matrix([[1., 2.], [3., 4.]]),
+ np.matrix([2., 1.]).T),
+])
+
+# hermitian test-cases
+CASES += apply_tag('hermitian', [
+ LinalgCase("hmatrix_a_and_b",
+ np.matrix([[1., 2.], [2., 1.]]),
+ None),
+])
+# No need to make generalized or strided cases for matrices.
+
+
+class MatrixTestCase(LinalgTestCase):
+ TEST_CASES = CASES
+
+
+class TestSolveMatrix(SolveCases, MatrixTestCase):
+ pass
+
+
+class TestInvMatrix(InvCases, MatrixTestCase):
+ pass
+
+
+class TestEigvalsMatrix(EigvalsCases, MatrixTestCase):
+ pass
+
+
+class TestEigMatrix(EigCases, MatrixTestCase):
+ pass
+
+
+class TestSVDMatrix(SVDCases, MatrixTestCase):
+ pass
+
+
+class TestCondMatrix(CondCases, MatrixTestCase):
+ pass
+
+
+class TestPinvMatrix(PinvCases, MatrixTestCase):
+ pass
+
+
+class TestDetMatrix(DetCases, MatrixTestCase):
+ pass
+
+
+class TestLstsqMatrix(LstsqCases, MatrixTestCase):
+ pass
+
+
+class _TestNorm2DMatrix(_TestNorm2D):
+ array = np.matrix
+
+
+class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase):
+ pass
+
+
+class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase):
+ pass
+
+
+class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base):
+ pass
+
+
+class TestQRMatrix(_TestQR):
+ array = np.matrix
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 8ef153c15..b45b3146f 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -4901,10 +4901,24 @@ cdef class RandomState:
"""
if isinstance(x, (int, long, np.integer)):
arr = np.arange(x)
- else:
- arr = np.array(x)
- self.shuffle(arr)
- return arr
+ self.shuffle(arr)
+ return arr
+
+ arr = np.asarray(x)
+
+ # shuffle has fast-path for 1-d
+ if arr.ndim == 1:
+ # must return a copy
+ if arr is x:
+ arr = np.array(arr)
+ self.shuffle(arr)
+ return arr
+
+ # Shuffle index array, dtype to ensure fast path
+ idx = np.arange(arr.shape[0], dtype=np.intp)
+ self.shuffle(idx)
+ return arr[idx]
+
_rand = RandomState()
seed = _rand.seed
diff --git a/numpy/testing/_private/decorators.py b/numpy/testing/_private/decorators.py
index 60d3f968f..24c4e385d 100644
--- a/numpy/testing/_private/decorators.py
+++ b/numpy/testing/_private/decorators.py
@@ -34,7 +34,7 @@ def slow(t):
The exact definition of a slow test is obviously both subjective and
hardware-dependent, but in general any individual test that requires more
- than a second or two should be labeled as slow (the whole suite consits of
+ than a second or two should be labeled as slow (the whole suite consists of
thousands of tests, so even a second is significant).
Parameters
diff --git a/site.cfg.example b/site.cfg.example
index 645b48543..21609a332 100644
--- a/site.cfg.example
+++ b/site.cfg.example
@@ -180,6 +180,14 @@
# mkl_libs = mkl_rt
# lapack_libs =
+# ACCELERATE
+# ----------
+# Accelerate/vecLib is an OSX framework providing a BLAS and LAPACK implementations.
+#
+# [accelerate]
+# libraries = Accelerate, vecLib
+# #libraries = None
+
# UMFPACK
# -------
# The UMFPACK library is used in scikits.umfpack to factor large sparse matrices.