diff options
91 files changed, 1978 insertions, 1324 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml index 230871ce1..e2eb01b04 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,7 +22,7 @@ jobs: . venv/bin/activate pip install cython sphinx matplotlib sudo apt-get update - sudo apt-get install -y graphviz + sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex - run: name: build numpy diff --git a/.gitignore b/.gitignore index 6e3f8e041..fbdd4f784 100644 --- a/.gitignore +++ b/.gitignore @@ -100,6 +100,10 @@ Icon? ehthumbs.db Thumbs.db +# pytest generated files # +########################## +/.pytest_cache + # Things specific to this project # ################################### numpy/core/__svn_version__.py diff --git a/.travis.yml b/.travis.yml index 168a7a385..4632fbffe 100644 --- a/.travis.yml +++ b/.travis.yml @@ -54,7 +54,6 @@ matrix: - cython3-dbg - python3-dbg - python3-dev - - python3-nose - python3-setuptools - python: 3.6 env: USE_WHEEL=1 RUN_FULL_TESTS=1 @@ -1,4 +1,4 @@ -# <img alt="NumPy" src="branding/icons/numpylogo.svg" height="60"> +# <img alt="NumPy" src="https://cdn.rawgit.com/numpy/numpy/master/branding/icons/numpylogo.svg" height="60"> [](https://travis-ci.org/numpy/numpy) [](https://ci.appveyor.com/project/charris/numpy) diff --git a/benchmarks/benchmarks/bench_random.py b/benchmarks/benchmarks/bench_random.py index 7ed3e2fa1..9d84d83d3 100644 --- a/benchmarks/benchmarks/bench_random.py +++ b/benchmarks/benchmarks/bench_random.py @@ -65,3 +65,18 @@ class Randint_dtype(Benchmark): high = self.high[name] np.random.randint(0, high + 1, size=10**5, dtype=name) + +class Permutation(Benchmark): + def setup(self): + self.n = 10000 + self.a_1d = np.random.random_sample(self.n) + self.a_2d = np.random.random_sample((self.n, 2)) + + def time_permutation_1d(self): + np.random.permutation(self.a_1d) + + def time_permutation_2d(self): + np.random.permutation(self.a_2d) + + def time_permutation_int(self): + np.random.permutation(self.n) diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt index 81e15f91f..9a2b71d7e 100644 --- a/doc/RELEASE_WALKTHROUGH.rst.txt +++ b/doc/RELEASE_WALKTHROUGH.rst.txt @@ -1,12 +1,12 @@ -This file contains a walkthrough of the NumPy 1.12.0 release on Fedora Linux. +This file contains a walkthrough of the NumPy 1.14.3 release on Linux. The commands can be copied into the command line, but be sure to -replace 1.12.0 by the correct version. +replace 1.14.3 by the correct version. Release Walkthrough ==================== -Building the release --------------------- +Prepare the release commit +-------------------------- Checkout the branch for the release, make sure it is up to date, and clean the repository:: @@ -16,44 +16,39 @@ repository:: $ git submodule update $ git clean -xdf -Look at the git log to get the hash of the last commit in the release, then -check it out:: - - $ git log - $ git checkout 7849751173fb47a5f17761b3515b42b4d8ce1197 - Edit pavement.py and setup.py as detailed in HOWTO_RELEASE:: $ gvim pavement.py setup.py - $ git commit -a -m"REL: NumPy 1.14.1 release." + $ git commit -a -m"REL: NumPy 1.14.3 release." Sanity check:: $ python runtests.py -m "full" $ python3 runtests.py -m "full" -Tag it,and build the source distribution archives:: +Push this release directly onto the end of the maintenance branch. This +requires write permission to the numpy repository:: - $ git tag -s v1.14.1 - $ paver sdist # sdist will do a git clean -xdf, so we omit that + $ git push upstream maintenance/1.14.x -Check that the files in ``release/installers`` have the correct versions, then -push the tag upstream; generation of the wheels for PyPI needs it:: +As an example, see the 1.14.3 REL commit: `<https://github.com/numpy/numpy/commit/73299826729be58cec179b52c656adfcaefada93>`_. - $ git push upstream v1.14.1 +Build wheels +------------ -Trigger the wheels build. This can take a while. The numpy-wheels repository is -cloned from `<https://github.com/MacPython/numpy-wheels>`_. Start with a pull -as the repo may have been accessed and changed by someone else and a push will -fail. +Trigger the wheels build by pointing the numpy-wheels repository at this +commit. This can take a while. The numpy-wheels repository is cloned from +`<https://github.com/MacPython/numpy-wheels>`_. Start with a pull as the repo +may have been accessed and changed by someone else and a push will fail:: $ cd ../numpy-wheels $ git pull origin master $ git branch <new version> # only when starting new numpy version - $ git checkout v1.14.x # v1.14.x already existed for the 1.14.1 release + $ git checkout v1.14.x # v1.14.x already existed for the 1.14.3 release -The ``.travis.yml`` and ``appveyor.yml`` files need to be edited to make -sure they have the correct version, search for ``BUILD_COMMIT``. +Edit the ``.travis.yml`` and ``appveyor.yml`` files to make sure they have the +correct version, and put in the commit hash for the ``REL`` commit created +above for ``BUILD_COMMIT``. See `<https://github.com/MacPython/numpy-wheels/commit/fed9c04629c155e7804282eb803d81097244598d>`_ for an example:: $ gvim .travis.yml appveyor.yml $ git commit -a @@ -66,7 +61,6 @@ and appveyor build status. Check if all the needed wheels have been built and uploaded before proceeding. There should currently be 22 of them at `<https://wheels.scipy.org>`_, 4 for Mac, 8 for Windows, and 10 for Linux. - Download wheels --------------- @@ -75,7 +69,7 @@ in the ``terryfy`` repository. The terryfy repository may be cloned from `<https://github.com/MacPython/terryfy>`_ if you don't already have it. The wheels can also be uploaded using the ``wheel-uploader``, but we prefer to download all the wheels to the ``../numpy/release/installers`` directory and -upload later using ``twine``. +upload later using ``twine``:: $ cd ../terryfy $ git pull origin master @@ -88,14 +82,56 @@ upload later using ``twine``. If you do this often, consider making CDN_URL and NPY_WHLS part of your default environment. +Tag the release +--------------- + +Once the wheels have been built and downloaded without errors, go back to your +numpy repository in the maintenance branch and tag the ``REL`` commit, signing +it with your gpg key, and build the source distribution archives:: + + $ git tag -s v1.14.3 + $ paver sdist # sdist will do a git clean -xdf, so we omit that + +You should upload your public gpg key to github, so that the tag will appear +"verified" there. + +Check that the files in ``release/installers`` have the correct versions, then +push the tag upstream:: + + $ git push upstream v1.14.3 + +We wait until this point to push the tag because it is very difficult to change +the tag after it has been pushed. + +Reset the maintenance branch into a development state +----------------------------------------------------- + +Add another ``REL`` commit to the numpy maintenance branch, which resets the +``ISREALEASED`` flag to ``False`` and increments the version counter:: + + $ gvim pavement.py setup.py + $ git commit -a -m"REL: prepare 1.14.x for further development" + $ git push upstream maintenance/1.14.x + +This strategy is copied from the scipy release procedure and was used in numpy +for the first time in 1.14.3. It needed to be modified a little since numpy +has more strict requirements for the version number. It was acheived in two +commits: +`<https://github.com/numpy/numpy/commit/b8df705bdcce92d3e2c6f050eb4414192cf0df04>`_ +`<https://github.com/numpy/numpy/commit/29e175269624493114f77cceff93486271f9efff>`_. Upload to PyPI -------------- -Upload to PyPI using ``twine``. The choice here is to sign the files, so will -need to sign every file separately when they are uploaded, keeping the gpg pass -phrase in the clipboard and pasting it in will make that easier. We may chose -to forgo the signing in the future:: +Upload to PyPI using ``twine``. + +In the past, we signed the wheels files, but after 1.14.3 wheels should no +longer support or need signing. The instructions below still sign. + +For the 1.14.3 release we signed every file when it was uploaded. On systems +which do not cache the gpg passphrase for a few minutes, keeping the it in the +clipboard and pasting it in will make that easier. We may chose to forgo the +signing in the future:: $ cd ../numpy $ twine upload -s release/installers/*.whl @@ -120,15 +156,15 @@ Generate the ``release/README`` files:: $ rm release/installers/*.asc $ paver write_release_and_log -Go to `<https://github.com/numpy/numpy/releases>`_, there should be a ``v1.14.1 +Go to `<https://github.com/numpy/numpy/releases>`_, there should be a ``v1.14.3 tag``, click on it and hit the edit button for that tag. There are two ways to add files, using an editable text window and as binary uploads. - Cut and paste the ``release/README.md`` file contents into the text window. -- Upload ``release/installers/numpy-1.12.0.tar.gz`` as a binary file. -- Upload ``release/installers/numpy-1.12.0.zip`` as a binary file. +- Upload ``release/installers/numpy-1.14.3.tar.gz`` as a binary file. +- Upload ``release/installers/numpy-1.14.3.zip`` as a binary file. - Upload ``release/README`` as a binary file. -- Upload ``doc/changelog/1.14.1-changelog.rst`` as a binary file. +- Upload ``doc/changelog/1.14.3-changelog.rst`` as a binary file. - Check the pre-release button if this is a pre-releases. - Hit the ``{Publish,Update} release`` button at the bottom. @@ -143,7 +179,7 @@ upload the documentation. Otherwise:: $ pushd doc $ make dist - $ make upload USERNAME=<yourname> RELEASE=v1.14.1 + $ make upload USERNAME=<yourname> RELEASE=v1.14.3 $ popd If the release series is a new one, you will need to rebuild and upload the @@ -164,7 +200,7 @@ This assumes that you have forked `<https://github.com/scipy/scipy.org>`_:: $ cd ../scipy.org $ git checkout master $ git pull upstream master - $ git checkout -b numpy-1.14.1 + $ git checkout -b numpy-1.14.3 $ gvim www/index.rst # edit the News section $ git commit -a $ git push origin HEAD @@ -180,7 +216,7 @@ announcements for the basic template. The contributor list can be generated as follows:: $ cd ../numpy - $ ./tools/changelog.py $GITHUB v1.14.0..v1.14.1 > tmp.rst + $ ./tools/changelog.py $GITHUB v1.14.2..v1.14.3 > tmp.rst The contents of ``tmp.rst`` can then be cut and pasted into the announcement email. diff --git a/doc/changelog/1.14.3-changelog.rst b/doc/changelog/1.14.3-changelog.rst new file mode 100644 index 000000000..784a9177f --- /dev/null +++ b/doc/changelog/1.14.3-changelog.rst @@ -0,0 +1,27 @@ + +Contributors +============ + +A total of 6 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Allan Haldane +* Charles Harris +* Jonathan March + +* Malcolm Smith + +* Matti Picus +* Pauli Virtanen + +Pull requests merged +==================== + +A total of 8 pull requests were merged for this release. + +* `#10862 <https://github.com/numpy/numpy/pull/10862>`__: BUG: floating types should override tp_print (1.14 backport) +* `#10905 <https://github.com/numpy/numpy/pull/10905>`__: BUG: for 1.14 back-compat, accept list-of-lists in fromrecords +* `#10947 <https://github.com/numpy/numpy/pull/10947>`__: BUG: 'style' arg to array2string broken in legacy mode (1.14... +* `#10959 <https://github.com/numpy/numpy/pull/10959>`__: BUG: test, fix for missing flags['WRITEBACKIFCOPY'] key +* `#10960 <https://github.com/numpy/numpy/pull/10960>`__: BUG: Add missing underscore to prototype in check_embedded_lapack +* `#10961 <https://github.com/numpy/numpy/pull/10961>`__: BUG: Fix encoding regression in ma/bench.py (Issue #10868) +* `#10962 <https://github.com/numpy/numpy/pull/10962>`__: BUG: core: fix NPY_TITLE_KEY macro on pypy +* `#10974 <https://github.com/numpy/numpy/pull/10974>`__: BUG: test, fix PyArray_DiscardWritebackIfCopy... diff --git a/doc/release/1.14.3-notes.rst b/doc/release/1.14.3-notes.rst new file mode 100644 index 000000000..60b631168 --- /dev/null +++ b/doc/release/1.14.3-notes.rst @@ -0,0 +1,41 @@ +========================== +NumPy 1.14.3 Release Notes +========================== + +This is a bugfix release for a few bugs reported following the 1.14.2 release: + +* np.lib.recfunctions.fromrecords accepts a list-of-lists, until 1.15 +* In python2, float types use the new print style when printing to a file +* style arg in "legacy" print mode now works for 0d arrays + +The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python +3.6 wheels available from PIP are built with Python 3.6.2 and should be +compatible with all previous versions of Python 3.6. The source releases were +cythonized with Cython 0.28.2. + +Contributors +============ + +A total of 6 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Allan Haldane +* Charles Harris +* Jonathan March + +* Malcolm Smith + +* Matti Picus +* Pauli Virtanen + +Pull requests merged +==================== + +A total of 8 pull requests were merged for this release. + +* `#10862 <https://github.com/numpy/numpy/pull/10862>`__: BUG: floating types should override tp_print (1.14 backport) +* `#10905 <https://github.com/numpy/numpy/pull/10905>`__: BUG: for 1.14 back-compat, accept list-of-lists in fromrecords +* `#10947 <https://github.com/numpy/numpy/pull/10947>`__: BUG: 'style' arg to array2string broken in legacy mode (1.14... +* `#10959 <https://github.com/numpy/numpy/pull/10959>`__: BUG: test, fix for missing flags['WRITEBACKIFCOPY'] key +* `#10960 <https://github.com/numpy/numpy/pull/10960>`__: BUG: Add missing underscore to prototype in check_embedded_lapack +* `#10961 <https://github.com/numpy/numpy/pull/10961>`__: BUG: Fix encoding regression in ma/bench.py (Issue #10868) +* `#10962 <https://github.com/numpy/numpy/pull/10962>`__: BUG: core: fix NPY_TITLE_KEY macro on pypy +* `#10974 <https://github.com/numpy/numpy/pull/10974>`__: BUG: test, fix PyArray_DiscardWritebackIfCopy... diff --git a/doc/release/1.15.0-notes.rst b/doc/release/1.15.0-notes.rst index a6b23b892..d3f34d2bc 100644 --- a/doc/release/1.15.0-notes.rst +++ b/doc/release/1.15.0-notes.rst @@ -20,15 +20,18 @@ New functions * ``nanquantile`` function, an interface to ``nanpercentile`` without factors of 100 -* `np.printoptions`, the context manager which sets print options temporarily +* `np.printoptions`, a context manager that sets print options temporarily for the scope of the ``with`` block:: >>> with np.printoptions(precision=2): ... print(np.array([2.0]) / 3) [0.67] - * `np.histogram_bin_edges`, a function to get the edges of the bins used by a histogram - without needing to calculate the histogram. +* `np.histogram_bin_edges`, a function to get the edges of the bins used by a histogram + without needing to calculate the histogram. + +* `npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` have been added to + deal with compiler optimization changing the order of operations. See below for details. Deprecations ============ @@ -42,6 +45,7 @@ Deprecations * `np.ma.loads`, `np.ma.dumps` * `np.ma.load`, `np.ma.dump` - these functions already failed on python 3, when called with a string. + * Direct imports from the following modules is deprecated. All testing related imports should come from `numpy.testing`. * `np.testing.utils` @@ -55,7 +59,7 @@ Deprecations In the future, it might return a different result. Use `np.sum(np.from_iter(generator))` or the built-in Python `sum` instead. -* Users of the C-API should call ``PyArrayResolveWriteBackIfCopy`` or +* Users of the C-API should call ``PyArrayResolveWriteBackIfCopy`` or ``PyArray_DiscardWritbackIfCopy`` on any array with the ``WRITEBACKIFCOPY`` flag set, before the array is deallocated. A deprecation warning will be emitted if those calls are not used when needed. @@ -64,7 +68,7 @@ Deprecations anytime one of the iterator operands is writeable, so that numpy can manage writeback semantics, or should call ``it.close()``. A `RuntimeWarning` will be emitted otherwise in these cases. Users of the C-API - should call ``NpyIter_Close`` before ``NpyIter_Dealloc``. + should call ``NpyIter_Close`` before ``NpyIter_Deallocate``. Future Changes @@ -81,7 +85,7 @@ are some circumstances where nditer doesn't actually give you a view onto the writable array. Instead, it gives you a copy, and if you make changes to the copy, nditer later writes those changes back into your actual array. Currently, this writeback occurs when the array objects are garbage collected, which makes -this API error-prone on CPython and entirely broken on PyPy. Therefore, +this API error-prone on CPython and entirely broken on PyPy. Therefore, ``nditer`` should now be used as a context manager whenever using ``nditer`` with writeable arrays (``with np.nditer(...) as it: ...``). You may also explicitly call ``it.close()`` for cases where a context manager is unusable, @@ -116,12 +120,26 @@ longer possible, and objects expecting the old API are respected. The silent suc by removing the interception of an otherwise-normal Exception when ``axis`` was provided to an object using the old API. +unstructured void array's ``.item`` method now returns a bytes object +--------------------------------------------------------------------- +``.item`` now returns a ``bytes`` object instead of a buffer or byte array. +This may affect code which assumed the return value was mutable, which is no +longer the case. + C API changes ============= -``NpyIter_Close`` has been added and should be called before -``NpyIter_Dealloc`` to resolve possible writeback-enabled arrays. +* ``NpyIter_Close`` has been added and should be called before + ``NpyIter_Deallocate`` to resolve possible writeback-enabled arrays. + +* Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` + have been added and should be used in place of the ``npy_get_floatstatus``and + ``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang + were rearranging the order of operations when the previous functions were + used in the ufunc SIMD functions, resulting in the floatstatus flags being ' + checked before the operation whose status we wanted to check was run. + See `#10339 <https://github.com/numpy/numpy/issues/10370>`__. New Features ============ @@ -198,6 +216,13 @@ passed explicitly, and are not yet computed automatically. No longer does an IQR of 0 result in `n_bins=1`, rather the number of bins chosen is related to the data size in this situation. +``histogram`` and ``histogramdd`` return edges matching the float type of the data +---------------------------------------------------------------------------------- +When passed ``float16``, ``np.float32``, or ``np.longdouble`` data, the +returned edges are now of the same dtype. Previously, ``histogram`` would only +return the same type if explicit bins were given, and ``histogram`` would +produce ``float64`` bins no matter what the inputs. + ``histogramdd`` allows explicit ranges to be given in a subset of axes ---------------------------------------------------------------------- The ``range`` argument of `histogramdd` can now contain ``None`` values to @@ -287,6 +312,19 @@ of the overlap between input an output, that is, the next element accumulated is added before the accumulated result is stored in its place, hence the overlap is safe. Avoiding the copy results in faster execution. +``linalg.matrix_power`` can now handle stacks of matrices +--------------------------------------------------------- +Like other functions in ``linalg``, ``matrix_power`` can now deal with arrays +of dimension larger than 2, which are treated as stacks of matrices. As part +of the change, to further improve consistency, the name of the first argument +has been changed to ``a`` (from ``M``), and the exceptions for non-square +matrices have been changed to ``LinAlgError`` (from ``ValueError``). + +Increased performance in ``random.permutation`` for multidimensional arrays +--------------------------------------------------------------------------- +``permutation`` uses the fast path in ``random.shuffle`` for all input +array dimensions. Previously the fast path was only used for 1-d arrays. + Changes ======= diff --git a/doc/scipy-sphinx-theme b/doc/scipy-sphinx-theme -Subproject c466764e2231ba132c09826b5b138fffa1cfcec +Subproject d990ab9134199f6496b9ac8567f10791f04a720 diff --git a/doc/source/_templates/autosummary/minimal_module.rst b/doc/source/_templates/autosummary/minimal_module.rst new file mode 100644 index 000000000..f0d9f00b2 --- /dev/null +++ b/doc/source/_templates/autosummary/minimal_module.rst @@ -0,0 +1,8 @@ +{{ fullname | escape | underline}} + +.. automodule:: {{ fullname }} + + {% block docstring %} + {% endblock %} + + diff --git a/doc/source/about.rst b/doc/source/about.rst index 24dc3d0a0..776488ea4 100644 --- a/doc/source/about.rst +++ b/doc/source/about.rst @@ -1,7 +1,7 @@ About NumPy =========== -`NumPy <http://www.scipy.org/NumpPy/>`__ is the fundamental package +NumPy is the fundamental package needed for scientific computing with Python. This package contains: - a powerful N-dimensional :ref:`array object <arrays>` @@ -42,6 +42,8 @@ Our main means of communication are: More information about the development of NumPy can be found at our `Developer Zone <https://scipy.scipy.org/scipylib/dev-zone.html>`__. +The project management structure can be found at our :doc:`governance page <dev/governance/index>` + About this documentation ======================== diff --git a/doc/source/dev/gitwash/development_workflow.rst b/doc/source/dev/gitwash/development_workflow.rst index 5476e3202..c6884a7cf 100644 --- a/doc/source/dev/gitwash/development_workflow.rst +++ b/doc/source/dev/gitwash/development_workflow.rst @@ -396,7 +396,7 @@ collaborator: Now all those people can do:: - git clone git@githhub.com:your-user-name/numpy.git + git clone git@github.com:your-user-name/numpy.git Remember that links starting with ``git@`` use the ssh protocol and are read-write; links starting with ``git://`` are read-only. diff --git a/doc/source/reference/arrays.nditer.rst b/doc/source/reference/arrays.nditer.rst index acad29b11..239f4296b 100644 --- a/doc/source/reference/arrays.nditer.rst +++ b/doc/source/reference/arrays.nditer.rst @@ -78,27 +78,28 @@ order='C' for C order and order='F' for Fortran order. ... 0 3 1 4 2 5 +.. _nditer-context-manager: + Modifying Array Values ---------------------- -By default, the :class:`nditer` treats the input array as a read-only -object. To modify the array elements, you must specify either read-write -or write-only mode. This is controlled with per-operand flags. The -operands may be created as views into the original data with the -`WRITEBACKIFCOPY` flag. In this case the iterator must either - -- be used as a context manager, and the temporary data will be written back - to the original array when the `__exit__` function is called. -- have a call to the iterator's `close` function to ensure the modified data - is written back to the original array. - -Regular assignment in Python simply changes a reference in the local or -global variable dictionary instead of modifying an existing variable in -place. This means that simply assigning to `x` will not place the value -into the element of the array, but rather switch `x` from being an array -element reference to being a reference to the value you assigned. To -actually modify the element of the array, `x` should be indexed with -the ellipsis. +By default, the :class:`nditer` treats the input operand as a read-only +object. To be able to modify the array elements, you must specify either +read-write or write-only mode using the `'readwrite'` or `'writeonly'` +per-operand flags. + +The nditer will then yield writeable buffer arrays which you may modify. However, +because the nditer must copy this buffer data back to the original array once +iteration is finished, you must signal when the iteration is ended, by one of two +methods. You may either: + + - used the nditer as a context manager using the `with` statement, and + the temporary data will be written back when the context is exited. + - call the iterator's `close` method once finished iterating, which will trigger + the write-back. + +The nditer can no longer be iterated once either `close` is called or its +context is exited. .. admonition:: Example @@ -186,7 +187,7 @@ construct in order to be more readable. 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> >>> it = np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) - >>> with it: + >>> with it: .... while not it.finished: ... it[0] = it.multi_index[1] - it.multi_index[0] ... it.iternext() diff --git a/doc/source/reference/c-api.coremath.rst b/doc/source/reference/c-api.coremath.rst index d3f7fcf75..ad92235da 100644 --- a/doc/source/reference/c-api.coremath.rst +++ b/doc/source/reference/c-api.coremath.rst @@ -183,14 +183,46 @@ Those can be useful for precise floating point comparison. * NPY_FPE_UNDERFLOW * NPY_FPE_INVALID + Note that :c:func:`npy_get_floatstatus_barrier` is preferable as it prevents + agressive compiler optimizations reordering the call relative to + the code setting the status, which could lead to incorrect results. + .. versionadded:: 1.9.0 +.. c:function:: int npy_get_floatstatus_barrier(char*) + + Get floating point status. A pointer to a local variable is passed in to + prevent aggresive compiler optimizations from reodering this function call + relative to the code setting the status, which could lead to incorrect + results. + + Returns a bitmask with following possible flags: + + * NPY_FPE_DIVIDEBYZERO + * NPY_FPE_OVERFLOW + * NPY_FPE_UNDERFLOW + * NPY_FPE_INVALID + + .. versionadded:: 1.15.0 + .. c:function:: int npy_clear_floatstatus() Clears the floating point status. Returns the previous status mask. + Note that :c:func:`npy_clear_floatstatus_barrier` is preferable as it + prevents agressive compiler optimizations reordering the call relative to + the code setting the status, which could lead to incorrect results. + .. versionadded:: 1.9.0 +.. c:function:: int npy_clear_floatstatus_barrier(char*) + + Clears the floating point status. A pointer to a local variable is passed in to + prevent aggresive compiler optimizations from reodering this function call. + Returns the previous status mask. + + .. versionadded:: 1.15.0 +n Complex functions ~~~~~~~~~~~~~~~~~ diff --git a/doc/source/reference/c-api.iterator.rst b/doc/source/reference/c-api.iterator.rst index 17f1c45f2..392dcb730 100644 --- a/doc/source/reference/c-api.iterator.rst +++ b/doc/source/reference/c-api.iterator.rst @@ -709,6 +709,10 @@ Construction and Destruction the functions will pass back errors through it instead of setting a Python exception. + :c:func:`NpyIter_Deallocate` must be called for each copy. One call to + :c:func:`NpyIter_Close` is sufficient to trigger writeback resolution for + all copies since they share buffers. + .. c:function:: int NpyIter_RemoveAxis(NpyIter* iter, int axis)`` Removes an axis from iteration. This requires that @@ -761,8 +765,10 @@ Construction and Destruction .. c:function:: int NpyIter_Close(NpyIter* iter) - Resolves any needed writeback resolution. Must be called before - ``NpyIter_Deallocate``. After this call it is not safe to use the operands. + Resolves any needed writeback resolution. Should be called before + :c:func::`NpyIter_Deallocate`. After this call it is not safe to use the operands. + When using :c:func:`NpyIter_Copy`, only one call to :c:func:`NpyIter_Close` + is sufficient to resolve any writebacks, since the copies share buffers. Returns ``0`` or ``-1`` if unsuccessful. @@ -770,8 +776,8 @@ Construction and Destruction Deallocates the iterator object. - `NpyIter_Close` should be called before this. If not, and if writeback is - needed, it will be performed at this point in order to maintain + :c:func:`NpyIter_Close` should be called before this. If not, and if + writeback is needed, it will be performed at this point in order to maintain backward-compatibility with older code, and a deprecation warning will be emitted. Old code should be updated to call `NpyIter_Close` beforehand. diff --git a/doc/source/reference/routines.io.rst b/doc/source/reference/routines.io.rst index 573498792..55489951f 100644 --- a/doc/source/reference/routines.io.rst +++ b/doc/source/reference/routines.io.rst @@ -14,7 +14,7 @@ NumPy binary files (NPY, NPZ) savez_compressed The format of these binary file types is documented in -http://numpy.github.io/neps/npy-format.html +:py:mod:`numpy.lib.format` Text files ---------- @@ -78,3 +78,11 @@ Data sources :toctree: generated/ DataSource + +Binary Format Description +------------------------- +.. autosummary:: + :template: autosummary/minimal_module.rst + :toctree: generated/ + + lib.format diff --git a/doc/source/reference/swig.testing.rst b/doc/source/reference/swig.testing.rst index 13642a52e..594df952e 100644 --- a/doc/source/reference/swig.testing.rst +++ b/doc/source/reference/swig.testing.rst @@ -22,7 +22,7 @@ typemaps are working as expected. Testing Organization -------------------- -There are three indepedent testing frameworks supported, for one-, +There are three independent testing frameworks supported, for one-, two-, and three-dimensional arrays respectively. For one-dimensional arrays, there are two C++ files, a header and a source, named:: diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 59d25a9ca..bebd047f6 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -327,6 +327,13 @@ advanced usage and will not typically be used. multiple outputs is deprecated, and will raise a warning in numpy 1.10, and an error in a future release. + If 'out' is None (the default), a uninitialized return array is created. + The output array is then filled with the results of the ufunc in the places + that the broadcast 'where' is True. If 'where' is the scalar True (the + default), then this corresponds to the entire output being filled. + Note that outputs not explicitly filled are left with their + uninitialized values. + *where* .. versionadded:: 1.7 @@ -336,6 +343,9 @@ advanced usage and will not typically be used. of False indicate to leave the value in the output alone. This argument cannot be used for generalized ufuncs as those take non-scalar input. + Note that if an uninitialized return array is created, values of False + will leave those values **uninitialized**. + *axes* .. versionadded:: 1.15 diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index addc38f45..cfba01c45 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -17,8 +17,8 @@ Creating a new universal function Before reading this, it may help to familiarize yourself with the basics of C extensions for Python by reading/skimming the tutorials in Section 1 of `Extending and Embedding the Python Interpreter -<http://docs.python.org/extending/index.html>`_ and in `How to extend -NumPy <http://docs.scipy.org/doc/numpy/user/c-info.how-to-extend.html>`_ +<http://docs.python.org/extending/index.html>`_ and in :doc:`How to extend +NumPy <c-info.how-to-extend>` The umath module is a computer-generated C-module that creates many ufuncs. It provides a great many examples of how to create a universal diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index c187f8e31..e8030d562 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -385,10 +385,11 @@ add_newdoc('numpy.core', 'nditer', array([ 0.5, 1.5, 4.5, 9.5, 16.5]) If operand flags `"writeonly"` or `"readwrite"` are used the operands may - be views into the original data with the WRITEBACKIFCOPY flag. In this case - nditer must be used as a context manager. The temporary - data will be written back to the original data when the `` __exit__`` - function is called but not before:: + be views into the original data with the `WRITEBACKIFCOPY` flag. In this case + nditer must be used as a context manager or the nditer.close + method must be called before using the result. The temporary + data will be written back to the original data when the `__exit__` + function is called but not before: >>> a = np.arange(6, dtype='i4')[::-2] >>> with nditer(a, [], @@ -405,7 +406,7 @@ add_newdoc('numpy.core', 'nditer', references (like `x` in the example) may or may not share data with the original data `a`. If writeback semantics were active, i.e. if `x.base.flags.writebackifcopy` is `True`, then exiting the iterator - will sever the connection between `x` and `a`, writing to `x` will + will sever the connection between `x` and `a`, writing to `x` will no longer write to `a`. If writeback semantics are not active, then `x.data` will still point at some part of `a.data`, and writing to one will affect the other. @@ -566,6 +567,11 @@ add_newdoc('numpy.core', 'nditer', ('close', Resolve all writeback semantics in writeable operands. + See Also + -------- + + :ref:`nditer-context-manager` + """)) @@ -5641,10 +5647,13 @@ add_newdoc('numpy.core', 'ufunc', Alternate array object(s) in which to put the result; if provided, it must have a shape that the inputs broadcast to. A tuple of arrays (possible only as a keyword argument) must have length equal to the - number of outputs; use `None` for outputs to be allocated by the ufunc. + number of outputs; use `None` for uninitialized outputs to be + allocated by the ufunc. where : array_like, optional Values of True indicate to calculate the ufunc at that position, values - of False indicate to leave the value in the output alone. + of False indicate to leave the value in the output alone. Note that if + an uninitialized return array is created via the default ``out=None``, + then the elements where the values are False will remain uninitialized. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`. @@ -5652,7 +5661,8 @@ add_newdoc('numpy.core', 'ufunc', ------- r : ndarray or tuple of ndarray `r` will have the shape that the arrays in `x` broadcast to; if `out` is - provided, `r` will be equal to `out`. If the function has more than one + provided, it will be returned. If not, `r` will be allocated and + may contain uninitialized values. If the function has more than one output, then the result will be a tuple of arrays. """) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index f39248bd0..6d15cb23f 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -1088,7 +1088,7 @@ def format_float_positional(x, precision=None, unique=True, Examples -------- - >>> np.format_float_scientific(np.float32(np.pi)) + >>> np.format_float_positional(np.float32(np.pi)) '3.1415927' >>> np.format_float_positional(np.float16(np.pi)) '3.14' diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 7492baf9d..112af9a34 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -908,8 +908,8 @@ if sys.version_info[0] >= 3: del defdict['divide'] def indent(st, spaces): - indention = ' '*spaces - indented = indention + st.replace('\n', '\n'+indention) + indentation = ' '*spaces + indented = indentation + st.replace('\n', '\n'+indentation) # trim off any trailing spaces indented = re.sub(r' +$', r'', indented) return indented diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py index 157fa3447..6cfbbbcc7 100644 --- a/numpy/core/code_generators/numpy_api.py +++ b/numpy/core/code_generators/numpy_api.py @@ -6,7 +6,7 @@ Each dictionary contains name -> index pair. Whenever you change one index, you break the ABI (and the ABI version number should be incremented). Whenever you add an item to one of the dict, the API needs to be updated in both setup_common.py and by adding an appropriate -entry to cversion.txt (generate the hash via "python cversions.py". +entry to cversion.txt (generate the hash via "python cversions.py"). When adding a function, make sure to use the next integer not used as an index (in case you use an existing index or jump, the build will stop and raise an diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py index 8cd6eae12..bb6767c4f 100644 --- a/numpy/core/einsumfunc.py +++ b/numpy/core/einsumfunc.py @@ -1148,7 +1148,7 @@ def einsum(*operands, **kwargs): # Do the contraction new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs) - # Append new items and derefernce what we can + # Append new items and dereference what we can operands.append(new_view) del tmp_operands, new_view diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 75bcedd81..0db5663f9 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1336,10 +1336,11 @@ def diagonal(a, offset=0, axis1=0, axis2=1): Returns ------- array_of_diagonals : ndarray - If `a` is 2-D and not a `matrix`, a 1-D array of the same type as `a` - containing the diagonal is returned. If `a` is a `matrix`, a 1-D - array containing the diagonal is returned in order to maintain - backward compatibility. + If `a` is 2-D, then a 1-D array containing the diagonal and of the + same type as `a` is returned unless `a` is a `matrix`, in which case + a 1-D array rather than a (2-D) `matrix` is returned in order to + maintain backward compatibility. + If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` are removed, and a new axis inserted at the end corresponding to the diagonal. @@ -1496,10 +1497,9 @@ def ravel(a, order='C'): Returns ------- y : array_like - If `a` is a matrix, y is a 1-D ndarray, otherwise y is an array of - the same subtype as `a`. The shape of the returned array is - ``(a.size,)``. Matrices are special cased for backward - compatibility. + y is an array of the same subtype as `a`, with shape ``(a.size,)``. + Note that matrices are special cased for backward compatibility, if `a` + is a matrix, then y is a 1-D ndarray. See Also -------- diff --git a/numpy/core/include/numpy/npy_interrupt.h b/numpy/core/include/numpy/npy_interrupt.h index f71fd689e..40cb7ac5e 100644 --- a/numpy/core/include/numpy/npy_interrupt.h +++ b/numpy/core/include/numpy/npy_interrupt.h @@ -55,7 +55,7 @@ Ideas: Simple Interface: -In your C-extension: around a block of code you want to be interruptable +In your C-extension: around a block of code you want to be interruptible with a SIGINT NPY_SIGINT_ON diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h index ba32bcdd3..582390cdc 100644 --- a/numpy/core/include/numpy/npy_math.h +++ b/numpy/core/include/numpy/npy_math.h @@ -524,8 +524,17 @@ npy_clongdouble npy_catanhl(npy_clongdouble z); #define NPY_FPE_UNDERFLOW 4 #define NPY_FPE_INVALID 8 -int npy_get_floatstatus(void); +int npy_clear_floatstatus_barrier(char*); +int npy_get_floatstatus_barrier(char*); +/* + * use caution with these - clang and gcc8.1 are known to reorder calls + * to this form of the function which can defeat the check. The _barrier + * form of the call is preferable, where the argument is + * (char*)&local_variable + */ int npy_clear_floatstatus(void); +int npy_get_floatstatus(void); + void npy_set_floatstatus_divbyzero(void); void npy_set_floatstatus_overflow(void); void npy_set_floatstatus_underflow(void); diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 1108d4667..cd783d242 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -489,9 +489,9 @@ def asarray(a, dtype=None, order=None): Contrary to `asanyarray`, ndarray subclasses are not passed through: - >>> issubclass(np.matrix, np.ndarray) + >>> issubclass(np.recarray, np.ndarray) True - >>> a = np.matrix([[1, 2]]) + >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) >>> np.asarray(a) is a False >>> np.asanyarray(a) is a @@ -545,7 +545,7 @@ def asanyarray(a, dtype=None, order=None): Instances of `ndarray` subclasses are passed through as-is: - >>> a = np.matrix([1, 2]) + >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) >>> np.asanyarray(a) is a True @@ -2280,7 +2280,7 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): relative difference (`rtol` * abs(`b`)) and the absolute difference `atol` are added together to compare against the absolute difference between `a` and `b`. - + .. warning:: The default `atol` is not appropriate for comparing numbers that are much smaller than one (see Notes). diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 15f6e1522..7d8bab557 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -664,7 +664,7 @@ def configuration(parent_package='',top_path=None): def get_mathlib_info(*args): # Another ugly hack: the mathlib info is known once build_src is run, # but we cannot use add_installed_pkg_config here either, so we only - # update the substition dictionary during npymath build + # update the substitution dictionary during npymath build config_cmd = config.get_config_cmd() # Check that the toolchain works, to fail early if it doesn't diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c index 3d259ae05..17de99cb9 100644 --- a/numpy/core/src/multiarray/array_assign_scalar.c +++ b/numpy/core/src/multiarray/array_assign_scalar.c @@ -245,6 +245,10 @@ PyArray_AssignRawScalar(PyArrayObject *dst, allocated_src_data = 1; } + if (PyDataType_FLAGCHK(PyArray_DESCR(dst), NPY_NEEDS_INIT)) { + memset(tmp_src_data, 0, PyArray_DESCR(dst)->elsize); + } + if (PyArray_CastRawArrays(1, src_data, tmp_src_data, 0, 0, src_dtype, PyArray_DESCR(dst), 0) != NPY_SUCCEED) { src_data = tmp_src_data; diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index 69538c6b7..6f4d3d349 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -86,7 +86,8 @@ NPY_NO_EXPORT int PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) { int ret; - /* 2017-Nov-10 1.14 */ + /* 2017-Nov -10 1.14 (for PyPy only) */ + /* 2018-April-21 1.15 (all Python implementations) */ if (DEPRECATE("PyArray_SetUpdateIfCopyBase is deprecated, use " "PyArray_SetWritebackIfCopyBase instead, and be sure to call " "PyArray_ResolveWritebackIfCopy before the array is deallocated, " diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index 5e6804a5c..42f876125 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -655,9 +655,7 @@ VOID_getitem(void *input, void *vap) { PyArrayObject *ap = vap; char *ip = input; - PyArrayObject *u = NULL; PyArray_Descr* descr; - int itemsize; descr = PyArray_DESCR(ap); if (PyDataType_HASFIELDS(descr)) { @@ -731,68 +729,7 @@ VOID_getitem(void *input, void *vap) return (PyObject *)ret; } - /* 2017-11-26, 1.14 */ - if (DEPRECATE_FUTUREWARNING( - "the `.item()` method of unstructured void types will return an " - "immutable `bytes` object in the near future, the same as " - "returned by `bytes(void_obj)`, instead of the mutable memoryview " - "or integer array returned in numpy 1.13.") < 0) { - return NULL; - } - /* - * In the future all the code below will be replaced by - * - * For unstructured void types like V4, return a bytes object (copy). - * return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize); - */ - - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) - || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { - PyErr_SetString(PyExc_ValueError, - "tried to get void-array with object members as buffer."); - return NULL; - } - itemsize = PyArray_DESCR(ap)->elsize; - -#if defined(NPY_PY3K) - /* - * Return a byte array; there are no plain buffer objects on Py3 - */ - { - npy_intp dims[1], strides[1]; - dims[0] = itemsize; - strides[0] = 1; - descr = PyArray_DescrNewFromType(NPY_BYTE); - u = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - descr, 1, dims, strides, ip, - PyArray_ISWRITEABLE(ap) ? NPY_ARRAY_WRITEABLE : 0, - NULL); - Py_INCREF(ap); - if (PyArray_SetBaseObject(u, (PyObject *)ap) < 0) { - Py_DECREF(u); - return NULL; - } - } -#else - /* - * default is to return buffer object pointing to - * current item a view of it - */ - if (PyArray_ISWRITEABLE(ap)) { - if (array_might_be_written(ap) < 0) { - return NULL; - } - u = (PyArrayObject *)PyBuffer_FromReadWriteMemory(ip, itemsize); - } - else { - u = (PyArrayObject *)PyBuffer_FromMemory(ip, itemsize); - } -#endif - - if (u == NULL) { - return NULL; - } - return (PyObject *)u; + return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize); } diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src index 397aaf209..fa68af19a 100644 --- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src @@ -1373,7 +1373,7 @@ PyArray_TransferMaskedStridedToNDim(npy_intp ndim, /* * Advanded indexing iteration of arrays when there is a single indexing * array which has the same memory order as the value array and both - * can be trivally iterated (single stride, aligned, no casting necessary). + * can be trivially iterated (single stride, aligned, no casting necessary). */ NPY_NO_EXPORT int mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind, @@ -1747,7 +1747,7 @@ mapiter_@name@(PyArrayMapIterObject *mit) } else { /* - * faster resetting if the subspace iteration is trival. + * faster resetting if the subspace iteration is trivial. * reset_offsets are zero for positive strides, * for negative strides this shifts the pointer to the last * item. diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 7eccb4a4b..6d323dbd8 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -197,7 +197,7 @@ PyArray_CompareLists(npy_intp *l1, npy_intp *l2, int n) } /* - * simulates a C-style 1-3 dimensional array which can be accesed using + * simulates a C-style 1-3 dimensional array which can be accessed using * ptr[i] or ptr[i][j] or ptr[i][j][k] -- requires pointer allocation * for 2-d and 3-d. * @@ -3605,7 +3605,7 @@ as_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) /* - * Prints floating-point scalars usign the Dragon4 algorithm, scientific mode. + * Prints floating-point scalars using the Dragon4 algorithm, scientific mode. * See docstring of `np.format_float_scientific` for description of arguments. * The differences is that a value of -1 is valid for pad_left, exp_digits, * precision, which is equivalent to `None`. @@ -3661,7 +3661,7 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) } /* - * Prints floating-point scalars usign the Dragon4 algorithm, positional mode. + * Prints floating-point scalars using the Dragon4 algorithm, positional mode. * See docstring of `np.format_float_positional` for description of arguments. * The differences is that a value of -1 is valid for pad_left, pad_right, * precision, which is equivalent to `None`. diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index 915d743c8..14389a925 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -476,7 +476,9 @@ fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace, double exponent; NPY_SCALARKIND kind; /* NPY_NOSCALAR is not scalar */ - if (PyArray_Check(a1) && ((kind=is_scalar_with_conversion(o2, &exponent))>0)) { + if (PyArray_Check(a1) && + !PyArray_ISOBJECT(a1) && + ((kind=is_scalar_with_conversion(o2, &exponent))>0)) { PyObject *fastop = NULL; if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) { if (exponent == 1.0) { diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index cb4af0d12..6dc7e5a3e 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -4201,7 +4201,7 @@ doubletype_print(PyObject *o, FILE *fp, int flags) return -1; } - ret = PyObject_Print(to_print, fp, flags); + ret = PyObject_Print(to_print, fp, Py_PRINT_RAW); Py_DECREF(to_print); return ret; } diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src index bca690b4d..5405c8fe3 100644 --- a/numpy/core/src/npymath/ieee754.c.src +++ b/numpy/core/src/npymath/ieee754.c.src @@ -6,6 +6,7 @@ */ #include "npy_math_common.h" #include "npy_math_private.h" +#include "numpy/utils.h" #ifndef HAVE_COPYSIGN double npy_copysign(double x, double y) @@ -557,6 +558,15 @@ npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y) } #endif +int npy_clear_floatstatus() { + char x=0; + return npy_clear_floatstatus_barrier(&x); +} +int npy_get_floatstatus() { + char x=0; + return npy_get_floatstatus_barrier(&x); +} + /* * Functions to set the floating point status word. * keep in sync with NO_FLOATING_POINT_SUPPORT in ufuncobject.h @@ -574,18 +584,24 @@ npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y) defined(__NetBSD__) #include <ieeefp.h> -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char * param)) { int fpstatus = fpgetsticky(); + /* + * By using a volatile, the compiler cannot reorder this call + */ + if (param != NULL) { + volatile char NPY_UNUSED(c) = *(char*)param; + } return ((FP_X_DZ & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) | ((FP_X_OFL & fpstatus) ? NPY_FPE_OVERFLOW : 0) | ((FP_X_UFL & fpstatus) ? NPY_FPE_UNDERFLOW : 0) | ((FP_X_INV & fpstatus) ? NPY_FPE_INVALID : 0); } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char * param) { - int fpstatus = npy_get_floatstatus(); + int fpstatus = npy_get_floatstatus_barrier(param); fpsetsticky(0); return fpstatus; @@ -617,10 +633,16 @@ void npy_set_floatstatus_invalid(void) (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) # include <fenv.h> -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char* param) { int fpstatus = fetestexcept(FE_DIVBYZERO | FE_OVERFLOW | FE_UNDERFLOW | FE_INVALID); + /* + * By using a volatile, the compiler cannot reorder this call + */ + if (param != NULL) { + volatile char NPY_UNUSED(c) = *(char*)param; + } return ((FE_DIVBYZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) | ((FE_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) | @@ -628,10 +650,10 @@ int npy_get_floatstatus(void) ((FE_INVALID & fpstatus) ? NPY_FPE_INVALID : 0); } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char * param) { /* testing float status is 50-100 times faster than clearing on x86 */ - int fpstatus = npy_get_floatstatus(); + int fpstatus = npy_get_floatstatus_barrier(param); if (fpstatus != 0) { feclearexcept(FE_DIVBYZERO | FE_OVERFLOW | FE_UNDERFLOW | FE_INVALID); @@ -665,18 +687,24 @@ void npy_set_floatstatus_invalid(void) #include <float.h> #include <fpxcp.h> -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char *param) { int fpstatus = fp_read_flag(); + /* + * By using a volatile, the compiler cannot reorder this call + */ + if (param != NULL) { + volatile char NPY_UNUSED(c) = *(char*)param; + } return ((FP_DIV_BY_ZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) | ((FP_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) | ((FP_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) | ((FP_INVALID & fpstatus) ? NPY_FPE_INVALID : 0); } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char * param) { - int fpstatus = npy_get_floatstatus(); + int fpstatus = npy_get_floatstatus_barrier(param); fp_swap_flag(0); return fpstatus; @@ -710,8 +738,11 @@ void npy_set_floatstatus_invalid(void) #include <float.h> -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char *param) { + /* + * By using a volatile, the compiler cannot reorder this call + */ #if defined(_WIN64) int fpstatus = _statusfp(); #else @@ -720,15 +751,18 @@ int npy_get_floatstatus(void) _statusfp2(&fpstatus, &fpstatus2); fpstatus |= fpstatus2; #endif + if (param != NULL) { + volatile char NPY_UNUSED(c) = *(char*)param; + } return ((SW_ZERODIVIDE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) | ((SW_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) | ((SW_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) | ((SW_INVALID & fpstatus) ? NPY_FPE_INVALID : 0); } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char *param) { - int fpstatus = npy_get_floatstatus(); + int fpstatus = npy_get_floatstatus_barrier(param); _clearfp(); return fpstatus; @@ -739,18 +773,24 @@ int npy_clear_floatstatus(void) #include <machine/fpu.h> -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char *param) { unsigned long fpstatus = ieee_get_fp_control(); + /* + * By using a volatile, the compiler cannot reorder this call + */ + if (param != NULL) { + volatile char NPY_UNUSED(c) = *(char*)param; + } return ((IEEE_STATUS_DZE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) | ((IEEE_STATUS_OVF & fpstatus) ? NPY_FPE_OVERFLOW : 0) | ((IEEE_STATUS_UNF & fpstatus) ? NPY_FPE_UNDERFLOW : 0) | ((IEEE_STATUS_INV & fpstatus) ? NPY_FPE_INVALID : 0); } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char *param) { - long fpstatus = npy_get_floatstatus(); + int fpstatus = npy_get_floatstatus_barrier(param); /* clear status bits as well as disable exception mode if on */ ieee_set_fp_control(0); @@ -759,13 +799,14 @@ int npy_clear_floatstatus(void) #else -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char NPY_UNUSED(*param)) { return 0; } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char *param) { + int fpstatus = npy_get_floatstatus_barrier(param); return 0; } diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c index e44036358..188054e22 100644 --- a/numpy/core/src/umath/extobj.c +++ b/numpy/core/src/umath/extobj.c @@ -284,7 +284,7 @@ _check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) { if (!errmask) { return 0; } - fperr = PyUFunc_getfperr(); + fperr = npy_get_floatstatus_barrier((char*)extobj); if (!fperr) { return 0; } diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 8b1c7e703..1ca298b30 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1819,7 +1819,7 @@ NPY_NO_EXPORT void *((npy_bool *)op1) = @func@(in1) != 0; } } - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat1**/ @@ -1866,6 +1866,9 @@ NPY_NO_EXPORT void const @type@ in2 = *(@type@ *)ip2; io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2; } + if (npy_isnan(io1)) { + npy_set_floatstatus_invalid(); + } *((@type@ *)iop1) = io1; } } @@ -1901,7 +1904,7 @@ NPY_NO_EXPORT void *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2; } } - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat1**/ @@ -1991,7 +1994,7 @@ NPY_NO_EXPORT void *((@type@ *)op1) = tmp + 0; } } - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)dimensions); } NPY_NO_EXPORT void @@ -2177,7 +2180,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED const npy_half in1 = *(npy_half *)ip1; *((npy_bool *)op1) = @func@(in1) != 0; } - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat**/ @@ -2239,7 +2242,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED const npy_half in2 = *(npy_half *)ip2; *((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in2)) ? in1 : in2; } - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat**/ @@ -2681,7 +2684,7 @@ NPY_NO_EXPORT void const @ftype@ in1i = ((@ftype@ *)ip1)[1]; *((npy_bool *)op1) = @func@(in1r) @OP@ @func@(in1i); } - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat1**/ @@ -2790,7 +2793,7 @@ NPY_NO_EXPORT void ((@ftype@ *)op1)[1] = in2i; } } - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat1**/ diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index 681d3fefa..5c3a84e21 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -537,7 +537,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, } /* Start with the floating-point exception flags cleared */ - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&iter); if (NpyIter_GetIterSize(iter) != 0) { NpyIter_IterNextFunc *iternext; diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index 6e1fb1ee8..3e29c4b4e 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -848,7 +848,7 @@ static PyObject * } #if @fperr@ - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&out); #endif /* @@ -863,7 +863,7 @@ static PyObject * #if @fperr@ /* Check status flag. If it is set, then look up what to do */ - retstatus = PyUFunc_getfperr(); + retstatus = npy_get_floatstatus_barrier((char*)&out); if (retstatus) { int bufsize, errmask; PyObject *errobj; @@ -993,7 +993,7 @@ static PyObject * return Py_NotImplemented; } - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&out); /* * here we do the actual calculation with arg1 and arg2 @@ -1008,7 +1008,7 @@ static PyObject * } /* Check status flag. If it is set, then look up what to do */ - retstatus = PyUFunc_getfperr(); + retstatus = npy_get_floatstatus_barrier((char*)&out); if (retstatus) { int bufsize, errmask; PyObject *errobj; @@ -1072,7 +1072,7 @@ static PyObject * return Py_NotImplemented; } - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&out); /* * here we do the actual calculation with arg1 and arg2 @@ -1136,7 +1136,7 @@ static PyObject * return Py_NotImplemented; } - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&out); /* * here we do the actual calculation with arg1 and arg2 @@ -1150,7 +1150,7 @@ static PyObject * } /* Check status flag. If it is set, then look up what to do */ - retstatus = PyUFunc_getfperr(); + retstatus = npy_get_floatstatus_barrier((char*)&out); if (retstatus) { int bufsize, errmask; PyObject *errobj; diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 2241414ac..5c0568c12 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -1031,7 +1031,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n) i += 2 * stride; /* minps/minpd will set invalid flag if nan is encountered */ - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)&c1); LOOP_BLOCKED(@type@, 32) { @vtype@ v1 = @vpre@_load_@vsuf@((@type@*)&ip[i]); @vtype@ v2 = @vpre@_load_@vsuf@((@type@*)&ip[i + stride]); @@ -1040,7 +1040,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n) } c1 = @vpre@_@VOP@_@vsuf@(c1, c2); - if (npy_get_floatstatus() & NPY_FPE_INVALID) { + if (npy_get_floatstatus_barrier((char*)&c1) & NPY_FPE_INVALID) { *op = @nan@; } else { @@ -1051,6 +1051,9 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n) LOOP_BLOCKED_END { *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i]; } + if (npy_isnan(*op)) { + npy_set_floatstatus_invalid(); + } } /**end repeat1**/ diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 36b77ef03..c1e8e5a77 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -100,7 +100,8 @@ PyUFunc_getfperr(void) * non-clearing get was only added in 1.9 so this function always cleared * keep it so just in case third party code relied on the clearing */ - return npy_clear_floatstatus(); + char param = 0; + return npy_clear_floatstatus_barrier(¶m); } #define HANDLEIT(NAME, str) {if (retstatus & NPY_FPE_##NAME) { \ @@ -133,7 +134,8 @@ NPY_NO_EXPORT int PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) { /* clearing is done for backward compatibility */ - int retstatus = npy_clear_floatstatus(); + int retstatus; + retstatus = npy_clear_floatstatus_barrier((char*)&retstatus); return PyUFunc_handlefperr(errmask, errobj, retstatus, first); } @@ -144,7 +146,8 @@ PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) NPY_NO_EXPORT void PyUFunc_clearfperr() { - npy_clear_floatstatus(); + char param = 0; + npy_clear_floatstatus_barrier(¶m); } /* @@ -2537,7 +2540,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, #endif /* Start with the floating-point exception flags cleared */ - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&iter); NPY_UF_DBG_PRINT("Executing inner loop\n"); @@ -2782,7 +2785,7 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc, } /* Start with the floating-point exception flags cleared */ - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&ufunc); /* Do the ufunc loop */ if (need_fancy) { @@ -3563,7 +3566,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, op_axes_arrays[2]}; npy_uint32 op_flags[3]; int i, idim, ndim, otype_final; - int need_outer_iterator; + int need_outer_iterator = 0; NpyIter *iter = NULL; @@ -4279,11 +4282,9 @@ static PyObject * ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) { int i; - PyTupleObject *ret; PyArrayObject *mps[NPY_MAXARGS]; PyObject *retobj[NPY_MAXARGS]; PyObject *wraparr[NPY_MAXARGS]; - PyObject *res; PyObject *override = NULL; ufunc_full_args full_args = {NULL, NULL}; int errval; @@ -4360,13 +4361,17 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) int j = ufunc->nin+i; PyObject *wrap = wraparr[i]; - if (wrap != NULL) { + if (wrap == NULL) { + /* default behavior */ + retobj[i] = PyArray_Return(mps[j]); + } + else if (wrap == Py_None) { + Py_DECREF(wrap); + retobj[i] = (PyObject *)mps[j]; + } + else { + PyObject *res; PyObject *args_tup; - if (wrap == Py_None) { - Py_DECREF(wrap); - retobj[i] = (PyObject *)mps[j]; - continue; - } /* Call the method with appropriate context */ args_tup = _get_wrap_prepare_args(full_args); @@ -4386,15 +4391,9 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) if (res == NULL) { goto fail; } - else { - Py_DECREF(mps[j]); - retobj[i] = res; - continue; - } - } - else { - /* default behavior */ - retobj[i] = PyArray_Return(mps[j]); + + Py_DECREF(mps[j]); + retobj[i] = res; } } @@ -4405,6 +4404,8 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) return retobj[0]; } else { + PyTupleObject *ret; + ret = (PyTupleObject *)PyTuple_New(ufunc->nout); for (i = 0; i < ufunc->nout; i++) { PyTuple_SET_ITEM(ret, i, retobj[i]); diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py index a927968a4..9755e7b36 100644 --- a/numpy/core/tests/test_api.py +++ b/numpy/core/tests/test_api.py @@ -223,22 +223,25 @@ def test_array_astype(): b = a.astype('f4', subok=0, copy=False) assert_(a is b) - a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') + class MyNDArray(np.ndarray): + pass - # subok=True passes through a matrix + a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray) + + # subok=True passes through a subclass b = a.astype('f4', subok=True, copy=False) assert_(a is b) # subok=True is default, and creates a subtype on a cast b = a.astype('i4', copy=False) assert_equal(a, b) - assert_equal(type(b), np.matrix) + assert_equal(type(b), MyNDArray) - # subok=False never returns a matrix + # subok=False never returns a subclass b = a.astype('f4', subok=False, copy=False) assert_equal(a, b) assert_(not (a is b)) - assert_(type(b) is not np.matrix) + assert_(type(b) is not MyNDArray) # Make sure converting from string object to fixed length string # does not truncate. diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py index 65852e577..88f5deabc 100644 --- a/numpy/core/tests/test_indexing.py +++ b/numpy/core/tests/test_indexing.py @@ -576,19 +576,6 @@ class TestSubclasses(object): assert_(isinstance(s[[0, 1, 2]], SubClass)) assert_(isinstance(s[s > 0], SubClass)) - def test_matrix_fancy(self): - # The matrix class messes with the shape. While this is always - # weird (getitem is not used, it does not have setitem nor knows - # about fancy indexing), this tests gh-3110 - m = np.matrix([[1, 2], [3, 4]]) - - assert_(isinstance(m[[0,1,0], :], np.matrix)) - - # gh-3110. Note the transpose currently because matrices do *not* - # support dimension fixing for fancy indexing correctly. - x = np.asmatrix(np.arange(50).reshape(5,10)) - assert_equal(x[:2, np.array(-1)], x[:2, -1].T) - def test_finalize_gets_full_info(self): # Array finalize should be called on the filled array. class SubClass(np.ndarray): diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py index 513a71b99..cf50d5d5c 100644 --- a/numpy/core/tests/test_longdouble.py +++ b/numpy/core/tests/test_longdouble.py @@ -6,7 +6,7 @@ import numpy as np from numpy.testing import ( assert_, assert_equal, assert_raises, assert_array_equal, temppath, ) -from ._locales import CommaDecimalPointLocale +from numpy.core.tests._locales import CommaDecimalPointLocale LD_INFO = np.finfo(np.longdouble) longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 3c5f90cfc..3ca201edd 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -34,7 +34,7 @@ from numpy.testing import ( assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring, SkipTest, temppath, suppress_warnings ) -from ._locales import CommaDecimalPointLocale +from numpy.core.tests._locales import CommaDecimalPointLocale # Need to test an object that does not fully implement math interface from datetime import timedelta, datetime @@ -1745,13 +1745,6 @@ class TestMethods(object): assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)], dtype=mydtype)) - def test_sort_matrix_none(self): - a = np.matrix([[2, 1, 0]]) - actual = np.sort(a, axis=None) - expected = np.matrix([[0, 1, 2]]) - assert_equal(actual, expected) - assert_(type(expected) is np.matrix) - def test_argsort(self): # all c scalar argsorts use the same code with different types # so it suffices to run a quick check with one type. The number @@ -2497,14 +2490,6 @@ class TestMethods(object): assert_array_equal(np.partition(d, kth)[kth], tgt, err_msg="data: %r\n kth: %r" % (d, kth)) - def test_partition_matrix_none(self): - # gh-4301 - a = np.matrix([[2, 1, 0]]) - actual = np.partition(a, 1, axis=None) - expected = np.matrix([[0, 1, 2]]) - assert_equal(actual, expected) - assert_(type(expected) is np.matrix) - def test_argpartition_gh5524(self): # A test for functionality of argpartition on lists. d = [6,7,3,2,9,0] @@ -3332,7 +3317,39 @@ class TestBinop(object): with assert_raises(NotImplementedError): a ** 2 + def test_pow_array_object_dtype(self): + # test pow on arrays of object dtype + class SomeClass(object): + def __init__(self, num=None): + self.num = num + + # want to ensure a fast pow path is not taken + def __mul__(self, other): + raise AssertionError('__mul__ should not be called') + + def __div__(self, other): + raise AssertionError('__div__ should not be called') + + def __pow__(self, exp): + return SomeClass(num=self.num ** exp) + + def __eq__(self, other): + if isinstance(other, SomeClass): + return self.num == other.num + + __rpow__ = __pow__ + def pow_for(exp, arr): + return np.array([x ** exp for x in arr]) + + obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)]) + + assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr)) + assert_equal(obj_arr ** 0, pow_for(0, obj_arr)) + assert_equal(obj_arr ** 1, pow_for(1, obj_arr)) + assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) + assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + class TestTemporaryElide(object): # elision is only triggered on relatively large arrays @@ -5279,13 +5296,6 @@ class TestDot(object): assert_equal(np.dot(b, a), res) assert_equal(np.dot(b, b), res) - def test_dot_scalar_and_matrix_of_objects(self): - # Ticket #2469 - arr = np.matrix([1, 2], dtype=object) - desired = np.matrix([[3, 6]], dtype=object) - assert_equal(np.dot(arr, 3), desired) - assert_equal(np.dot(3, arr), desired) - def test_accelerate_framework_sgemv_fix(self): def aligned_array(shape, align, dtype, order='C'): @@ -5641,21 +5651,6 @@ class TestInner(object): assert_equal(np.inner(vec, sca), desired) assert_equal(np.inner(sca, vec), desired) - def test_inner_scalar_and_matrix(self): - for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': - sca = np.array(3, dtype=dt)[()] - arr = np.matrix([[1, 2], [3, 4]], dtype=dt) - desired = np.matrix([[3, 6], [9, 12]], dtype=dt) - assert_equal(np.inner(arr, sca), desired) - assert_equal(np.inner(sca, arr), desired) - - def test_inner_scalar_and_matrix_of_objects(self): - # Ticket #4482 - arr = np.matrix([1, 2], dtype=object) - desired = np.matrix([[3, 6]], dtype=object) - assert_equal(np.inner(arr, 3), desired) - assert_equal(np.inner(3, arr), desired) - def test_vecself(self): # Ticket 844. # Inner product of a vector with itself segfaults or give @@ -7290,7 +7285,7 @@ class TestWritebackIfCopy(object): # after resolve, the two arrays no longer reference each other assert_(arr_wb.ctypes.data != 0) assert_equal(arr_wb.base, None) - # assigning to arr_wb does not get transfered to arr + # assigning to arr_wb does not get transferred to arr arr_wb[...] = 100 assert_equal(arr, -100) @@ -7321,7 +7316,7 @@ class TestWritebackIfCopy(object): assert_equal(arr_wb.base, None) if HAS_REFCOUNT: assert_equal(arr_cnt, sys.getrefcount(arr)) - # assigning to arr_wb does not get transfered to arr + # assigning to arr_wb does not get transferred to arr arr_wb[...] = 100 assert_equal(arr, orig) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 77c26eacf..a0096efdb 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -811,7 +811,7 @@ def test_iter_nbo_align_contig(): assert_equal(i.operands[0], a) i.operands[0][:] = 2 assert_equal(au, [2]*6) - i = None # should not raise a DeprecationWarning + del i # should not raise a warning # Byte order change by requesting NBO a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() @@ -1469,26 +1469,25 @@ def test_iter_allocate_output_types_scalar(): def test_iter_allocate_output_subtype(): # Make sure that the subtype with priority wins + class MyNDArray(np.ndarray): + __array_priority__ = 15 - # matrix vs ndarray - a = np.matrix([[1, 2], [3, 4]]) + # subclass vs ndarray + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) b = np.arange(4).reshape(2, 2).T i = nditer([a, b, None], [], - [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) assert_equal(type(a), type(i.operands[2])) - assert_(type(b) != type(i.operands[2])) + assert_(type(b) is not type(i.operands[2])) assert_equal(i.operands[2].shape, (2, 2)) - # matrix always wants things to be 2D - b = np.arange(4).reshape(1, 2, 2) - assert_raises(RuntimeError, nditer, [a, b, None], [], - [['readonly'], ['readonly'], ['writeonly', 'allocate']]) - # but if subtypes are disabled, the result can still work + # If subtypes are disabled, we should get back an ndarray. i = nditer([a, b, None], [], - [['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_subtype']]) + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) assert_equal(type(b), type(i.operands[2])) - assert_(type(a) != type(i.operands[2])) - assert_equal(i.operands[2].shape, (1, 2, 2)) + assert_(type(a) is not type(i.operands[2])) + assert_equal(i.operands[2].shape, (2, 2)) def test_iter_allocate_output_errors(): # Check that the iterator will throw errors for bad output allocations @@ -2838,12 +2837,30 @@ def test_writebacks(): it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) - au = None + # reentering works + with it: + with it: + for x in it: + x[...] = 123 + + it = nditer(au, [], + [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + # make sure exiting the inner context manager closes the iterator + with it: + with it: + for x in it: + x[...] = 123 + assert_raises(ValueError, getattr, it, 'operands') # do not crash if original data array is decrefed + it = nditer(au, [], + [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + del au with it: for x in it: x[...] = 123 - # make sure we cannot reenter the iterand + # make sure we cannot reenter the closed iterator enter = it.__enter__ assert_raises(ValueError, enter) diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 40cccd404..95e9f8497 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -905,7 +905,7 @@ class TestTypes(object): fi = np.finfo(dt) assert_(np.can_cast(fi.min, dt)) assert_(np.can_cast(fi.max, dt)) - + # Custom exception class to test exception propagation in fromiter class NIterError(Exception): @@ -2201,13 +2201,16 @@ class TestLikeFuncs(object): self.compare_array_value(dz, value, fill_value) # Test the 'subok' parameter - a = np.matrix([[1, 2], [3, 4]]) + class MyNDArray(np.ndarray): + pass + + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) b = like_function(a, **fill_kwarg) - assert_(type(b) is np.matrix) + assert_(type(b) is MyNDArray) b = like_function(a, subok=False, **fill_kwarg) - assert_(type(b) is not np.matrix) + assert_(type(b) is not MyNDArray) def test_ones_like(self): self.check_like_function(np.ones_like, 1) diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py index 746ad0e4b..433208748 100644 --- a/numpy/core/tests/test_print.py +++ b/numpy/core/tests/test_print.py @@ -4,7 +4,7 @@ import sys import numpy as np from numpy.testing import assert_, assert_equal, SkipTest -from ._locales import CommaDecimalPointLocale +from numpy.core.tests._locales import CommaDecimalPointLocale if sys.version_info[0] >= 3: diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index b3cb3e610..d6dcaa982 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -2325,13 +2325,10 @@ class TestRegression(object): def test_void_item_memview(self): va = np.zeros(10, 'V4') - # for now, there is just a futurewarning - assert_warns(FutureWarning, va[:1].item) - # in the future, test we got a bytes copy: - #x = va[:1].item() - #va[0] = b'\xff\xff\xff\xff' - #del va - #assert_equal(x, b'\x00\x00\x00\x00') + x = va[:1].item() + va[0] = b'\xff\xff\xff\xff' + del va + assert_equal(x, b'\x00\x00\x00\x00') def test_structarray_title(self): # The following used to segfault on pypy, due to NPY_TITLE_KEY diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py index 94d8294f1..a20ec9f74 100644 --- a/numpy/core/tests/test_scalarprint.py +++ b/numpy/core/tests/test_scalarprint.py @@ -4,9 +4,10 @@ """ from __future__ import division, absolute_import, print_function -import tempfile +import code, sys +from tempfile import TemporaryFile import numpy as np -from numpy.testing import assert_, assert_equal +from numpy.testing import assert_, assert_equal, suppress_warnings class TestRealScalars(object): @@ -53,7 +54,7 @@ class TestRealScalars(object): # output to a "real file" (ie, not a StringIO). Make sure we don't # inherit it. x = np.double(0.1999999999999) - with tempfile.TemporaryFile('r+t') as f: + with TemporaryFile('r+t') as f: print(x, file=f) f.seek(0) output = f.read() @@ -62,6 +63,37 @@ class TestRealScalars(object): # precision as '0.2', but we want numpy's np.double('0.1999999999999') # to print the unique value, '0.1999999999999'. + # gh-11031 + # Only in the python2 interactive shell and when stdout is a "real" + # file, the output of the last command is printed to stdout without + # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print + # x` are potentially different. Make sure they are the same. The only + # way I found to get prompt-like output is using an actual prompt from + # the 'code' module. Again, must use tempfile to get a "real" file. + + # dummy user-input which enters one line and then ctrl-Ds. + def userinput(): + yield 'np.sqrt(2)' + raise EOFError + gen = userinput() + input_func = lambda prompt="": next(gen) + + with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe: + orig_stdout, orig_stderr = sys.stdout, sys.stderr + sys.stdout, sys.stderr = fo, fe + + # py2 code.interact sends irrelevant internal DeprecationWarnings + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + code.interact(local={'np': np}, readfunc=input_func, banner='') + + sys.stdout, sys.stderr = orig_stdout, orig_stderr + + fo.seek(0) + capture = fo.read().strip() + + assert_equal(capture, repr(np.sqrt(2))) + def test_dragon4(self): # these tests are adapted from Ryan Juckett's dragon4 implementation, # see dragon4.c for details. diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py index 1d91a651e..72b3451a4 100644 --- a/numpy/core/tests/test_shape_base.py +++ b/numpy/core/tests/test_shape_base.py @@ -364,10 +364,6 @@ def test_stack(): stack, [np.zeros((3, 3)), np.zeros(3)], axis=1) assert_raises_regex(ValueError, 'must have the same shape', stack, [np.arange(2), np.arange(3)]) - # np.matrix - m = np.matrix([[1, 2], [3, 4]]) - assert_raises_regex(ValueError, 'shape too large to be a matrix', - stack, [m, m]) class TestBlock(object): diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index fe40456d5..8479260a3 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -892,13 +892,6 @@ class TestUfunc(object): np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) assert_array_equal(arr, out) - def test_object_scalar_multiply(self): - # Tickets #2469 and #4482 - arr = np.matrix([1, 2], dtype=object) - desired = np.matrix([[3, 6]], dtype=object) - assert_equal(np.multiply(arr, 3), desired) - assert_equal(np.multiply(3, arr), desired) - def test_zerosize_reduction(self): # Test with default dtype and object dtype for a in [[], np.array([], dtype=object)]: diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index ea0be1892..2a42b1ed1 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -1328,6 +1328,17 @@ class TestMinMax(object): assert_equal(d.max(), d[0]) assert_equal(d.min(), d[0]) + def test_reduce_warns(self): + # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus + # and put it before the call to an intrisic function that causes + # invalid status to be set. Also make sure warnings are emitted + for n in (2, 4, 8, 16, 32): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + for r in np.diagflat([np.nan] * n): + assert_equal(np.min(r), np.nan) + assert_equal(len(sup.log), n) + class TestAbsoluteNegative(object): def test_abs_neg_blocked(self): @@ -1664,13 +1675,16 @@ class TestSpecialMethods(object): assert_equal(ncu.maximum(a, C()), 0) def test_ufunc_override(self): - + # check override works even with instance with high priority. class A(object): def __array_ufunc__(self, func, method, *inputs, **kwargs): return self, func, method, inputs, kwargs + class MyNDArray(np.ndarray): + __array_priority__ = 100 + a = A() - b = np.matrix([1]) + b = np.array([1]).view(MyNDArray) res0 = np.multiply(a, b) res1 = np.multiply(b, b, out=a) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 2a3ff2e52..65d7de316 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -385,6 +385,7 @@ def get_info(name, notfound_action=0): 'blis': blis_info, # use blas_opt instead 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead 'blas_mkl': blas_mkl_info, # use blas_opt instead + 'accelerate': accelerate_info, # use blas_opt instead 'x11': x11_info, 'fft_opt': fft_opt_info, 'fftw': fftw_info, @@ -1551,39 +1552,10 @@ class lapack_opt_info(system_info): if not atlas_info: atlas_info = get_info('atlas') - if sys.platform == 'darwin' \ - and not os.getenv('_PYTHON_HOST_PLATFORM', None) \ - and not (atlas_info or openblas_info or - lapack_mkl_info): - # Use the system lapack from Accelerate or vecLib under OSX - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'x86_64' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if os.path.exists('/System/Library/Frameworks' - '/Accelerate.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) - elif os.path.exists('/System/Library/Frameworks' - '/vecLib.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - link_args.extend(['-Wl,-framework', '-Wl,vecLib']) - if args: - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO', 3), - ('HAVE_CBLAS', None)]) - return + accelerate_info = get_info('accelerate') + if accelerate_info and not atlas_info: + self.set_info(**accelerate_info) + return need_lapack = 0 need_blas = 0 @@ -1659,43 +1631,10 @@ class blas_opt_info(system_info): if not atlas_info: atlas_info = get_info('atlas_blas') - if sys.platform == 'darwin' \ - and not os.getenv('_PYTHON_HOST_PLATFORM', None) \ - and not (atlas_info or openblas_info or - blas_mkl_info or blis_info): - # Use the system BLAS from Accelerate or vecLib under OSX - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'x86_64' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if os.path.exists('/System/Library/Frameworks' - '/Accelerate.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) - elif os.path.exists('/System/Library/Frameworks' - '/vecLib.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,vecLib']) - if args: - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO', 3), - ('HAVE_CBLAS', None)]) - return + accelerate_info = get_info('accelerate') + if accelerate_info and not atlas_info: + self.set_info(**accelerate_info) + return need_blas = 0 info = {} @@ -1939,6 +1878,58 @@ class blis_info(blas_info): include_dirs=incl_dirs) self.set_info(**info) +class accelerate_info(system_info): + section = 'accelerate' + notfounderror = BlasNotFoundError + + def calc_info(self): + # Make possible to enable/disable from config file/env var + libraries = os.environ.get('ACCELERATE') + if libraries: + libraries = [libraries] + else: + libraries = self.get_libs('libraries', ['accelerate', 'veclib']) + libraries = [lib.strip().lower() for lib in libraries] + + if (sys.platform == 'darwin' and + not os.getenv('_PYTHON_HOST_PLATFORM', None)): + # Use the system BLAS from Accelerate or vecLib under OSX + args = [] + link_args = [] + if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ + 'x86_64' in get_platform() or \ + 'i386' in platform.platform(): + intel = 1 + else: + intel = 0 + if (os.path.exists('/System/Library/Frameworks' + '/Accelerate.framework/') and + 'accelerate' in libraries): + if intel: + args.extend(['-msse3']) + else: + args.extend(['-faltivec']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) + elif (os.path.exists('/System/Library/Frameworks' + '/vecLib.framework/') and + 'veclib' in libraries): + if intel: + args.extend(['-msse3']) + else: + args.extend(['-faltivec']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,vecLib']) + + if args: + self.set_info(extra_compile_args=args, + extra_link_args=link_args, + define_macros=[('NO_ATLAS_INFO', 3), + ('HAVE_CBLAS', None)]) + + return class blas_src_info(system_info): section = 'blas_src' diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index dd2484eb4..78b06f066 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -539,7 +539,7 @@ void f2py_report_on_exit(int exit_flag,void *name) { fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n", cb_passed_counter,cb_passed_time); - fprintf(stderr,"(e) wrapped (Fortran/C) functions (acctual) : %8d msec\n\n", + fprintf(stderr,"(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", passed_call_time-cb_passed_call_time-cb_passed_time); fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); fprintf(stderr,"Exit status: %d\n",exit_flag); diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py index daaa68d06..600301c56 100644 --- a/numpy/lib/arraypad.py +++ b/numpy/lib/arraypad.py @@ -100,12 +100,8 @@ def _prepend_const(arr, pad_amt, val, axis=-1): return arr padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) - if val == 0: - return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr), - axis=axis) - else: - return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype), - arr), axis=axis) + return np.concatenate((np.full(padshape, val, dtype=arr.dtype), arr), + axis=axis) def _append_const(arr, pad_amt, val, axis=-1): @@ -134,12 +130,8 @@ def _append_const(arr, pad_amt, val, axis=-1): return arr padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) - if val == 0: - return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)), - axis=axis) - else: - return np.concatenate( - (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis) + return np.concatenate((arr, np.full(padshape, val, dtype=arr.dtype)), + axis=axis) def _prepend_edge(arr, pad_amt, axis=-1): @@ -164,13 +156,9 @@ def _prepend_edge(arr, pad_amt, axis=-1): if pad_amt == 0: return arr - edge_slice = tuple(slice(None) if i != axis else 0 + edge_slice = tuple(slice(None) if i != axis else slice(0, 1) for (i, x) in enumerate(arr.shape)) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - edge_arr = arr[edge_slice].reshape(pad_singleton) + edge_arr = arr[edge_slice] return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr), axis=axis) @@ -198,13 +186,9 @@ def _append_edge(arr, pad_amt, axis=-1): if pad_amt == 0: return arr - edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1 + edge_slice = tuple(slice(None) if i != axis else slice(x - 1, x) for (i, x) in enumerate(arr.shape)) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - edge_arr = arr[edge_slice].reshape(pad_singleton) + edge_arr = arr[edge_slice] return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)), axis=axis) @@ -244,15 +228,11 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1): reverse=True).astype(np.float64) # Appropriate slicing to extract n-dimensional edge along `axis` - edge_slice = tuple(slice(None) if i != axis else 0 + edge_slice = tuple(slice(None) if i != axis else slice(0, 1) for (i, x) in enumerate(arr.shape)) - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract edge, reshape to original rank, and extend along `axis` - edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) + # Extract edge, and extend along `axis` + edge_pad = arr[edge_slice].repeat(pad_amt, axis) # Linear ramp slope = (end - edge_pad) / float(pad_amt) @@ -299,15 +279,11 @@ def _append_ramp(arr, pad_amt, end, axis=-1): reverse=False).astype(np.float64) # Slice a chunk from the edge to calculate stats on - edge_slice = tuple(slice(None) if i != axis else -1 + edge_slice = tuple(slice(None) if i != axis else slice(x - 1, x) for (i, x) in enumerate(arr.shape)) - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract edge, reshape to original rank, and extend along `axis` - edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) + # Extract edge, and extend along `axis` + edge_pad = arr[edge_slice].repeat(pad_amt, axis) # Linear ramp slope = (end - edge_pad) / float(pad_amt) @@ -359,12 +335,8 @@ def _prepend_max(arr, pad_amt, num, axis=-1): max_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate max, reshape to add singleton dimension back - max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) + # Extract slice, calculate max + max_chunk = arr[max_slice].max(axis=axis, keepdims=True) # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr), @@ -415,12 +387,8 @@ def _append_max(arr, pad_amt, num, axis=-1): else: max_slice = tuple(slice(None) for x in arr.shape) - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate max, reshape to add singleton dimension back - max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) + # Extract slice, calculate max + max_chunk = arr[max_slice].max(axis=axis, keepdims=True) # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)), @@ -466,12 +434,8 @@ def _prepend_mean(arr, pad_amt, num, axis=-1): mean_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate mean, reshape to add singleton dimension back - mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton) + # Extract slice, calculate mean + mean_chunk = arr[mean_slice].mean(axis, keepdims=True) _round_ifneeded(mean_chunk, arr.dtype) # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` @@ -523,12 +487,8 @@ def _append_mean(arr, pad_amt, num, axis=-1): else: mean_slice = tuple(slice(None) for x in arr.shape) - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate mean, reshape to add singleton dimension back - mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton) + # Extract slice, calculate mean + mean_chunk = arr[mean_slice].mean(axis=axis, keepdims=True) _round_ifneeded(mean_chunk, arr.dtype) # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` @@ -575,12 +535,8 @@ def _prepend_med(arr, pad_amt, num, axis=-1): med_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate median, reshape to add singleton dimension back - med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) + # Extract slice, calculate median + med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True) _round_ifneeded(med_chunk, arr.dtype) # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` @@ -632,12 +588,8 @@ def _append_med(arr, pad_amt, num, axis=-1): else: med_slice = tuple(slice(None) for x in arr.shape) - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate median, reshape to add singleton dimension back - med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) + # Extract slice, calculate median + med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True) _round_ifneeded(med_chunk, arr.dtype) # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` @@ -685,12 +637,8 @@ def _prepend_min(arr, pad_amt, num, axis=-1): min_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate min, reshape to add singleton dimension back - min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) + # Extract slice, calculate min + min_chunk = arr[min_slice].min(axis=axis, keepdims=True) # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr), @@ -741,12 +689,8 @@ def _append_min(arr, pad_amt, num, axis=-1): else: min_slice = tuple(slice(None) for x in arr.shape) - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate min, reshape to add singleton dimension back - min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) + # Extract slice, calculate min + min_chunk = arr[min_slice].min(axis=axis, keepdims=True) # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)), @@ -798,17 +742,11 @@ def _pad_ref(arr, pad_amt, method, axis=-1): ref_chunk1 = arr[ref_slice] - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - if pad_amt[0] == 1: - ref_chunk1 = ref_chunk1.reshape(pad_singleton) - # Memory/computationally more expensive, only do this if `method='odd'` if 'odd' in method and pad_amt[0] > 0: - edge_slice1 = tuple(slice(None) if i != axis else 0 + edge_slice1 = tuple(slice(None) if i != axis else slice(0, 1) for (i, x) in enumerate(arr.shape)) - edge_chunk = arr[edge_slice1].reshape(pad_singleton) + edge_chunk = arr[edge_slice1] ref_chunk1 = 2 * edge_chunk - ref_chunk1 del edge_chunk @@ -824,13 +762,10 @@ def _pad_ref(arr, pad_amt, method, axis=-1): for (i, x) in enumerate(arr.shape)) ref_chunk2 = arr[ref_slice][rev_idx] - if pad_amt[1] == 1: - ref_chunk2 = ref_chunk2.reshape(pad_singleton) - if 'odd' in method: - edge_slice2 = tuple(slice(None) if i != axis else -1 + edge_slice2 = tuple(slice(None) if i != axis else slice(x - 1, x) for (i, x) in enumerate(arr.shape)) - edge_chunk = arr[edge_slice2].reshape(pad_singleton) + edge_chunk = arr[edge_slice2] ref_chunk2 = 2 * edge_chunk - ref_chunk2 del edge_chunk @@ -884,17 +819,11 @@ def _pad_sym(arr, pad_amt, method, axis=-1): for (i, x) in enumerate(arr.shape)) sym_chunk1 = arr[sym_slice][rev_idx] - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - if pad_amt[0] == 1: - sym_chunk1 = sym_chunk1.reshape(pad_singleton) - # Memory/computationally more expensive, only do this if `method='odd'` if 'odd' in method and pad_amt[0] > 0: - edge_slice1 = tuple(slice(None) if i != axis else 0 + edge_slice1 = tuple(slice(None) if i != axis else slice(0, 1) for (i, x) in enumerate(arr.shape)) - edge_chunk = arr[edge_slice1].reshape(pad_singleton) + edge_chunk = arr[edge_slice1] sym_chunk1 = 2 * edge_chunk - sym_chunk1 del edge_chunk @@ -908,13 +837,10 @@ def _pad_sym(arr, pad_amt, method, axis=-1): for (i, x) in enumerate(arr.shape)) sym_chunk2 = arr[sym_slice][rev_idx] - if pad_amt[1] == 1: - sym_chunk2 = sym_chunk2.reshape(pad_singleton) - if 'odd' in method: - edge_slice2 = tuple(slice(None) if i != axis else -1 + edge_slice2 = tuple(slice(None) if i != axis else slice(x - 1, x) for (i, x) in enumerate(arr.shape)) - edge_chunk = arr[edge_slice2].reshape(pad_singleton) + edge_chunk = arr[edge_slice2] sym_chunk2 = 2 * edge_chunk - sym_chunk2 del edge_chunk @@ -965,12 +891,6 @@ def _pad_wrap(arr, pad_amt, axis=-1): for (i, x) in enumerate(arr.shape)) wrap_chunk1 = arr[wrap_slice] - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - if pad_amt[0] == 1: - wrap_chunk1 = wrap_chunk1.reshape(pad_singleton) - ########################################################################## # Appended region @@ -979,9 +899,6 @@ def _pad_wrap(arr, pad_amt, axis=-1): for (i, x) in enumerate(arr.shape)) wrap_chunk2 = arr[wrap_slice] - if pad_amt[1] == 1: - wrap_chunk2 = wrap_chunk2.reshape(pad_singleton) - # Concatenate `arr` with both chunks, extending along `axis` return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis) diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 363bb2101..23eac7e7d 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -1,5 +1,10 @@ """ -Define a simple format for saving numpy arrays to disk with the full +Binary serialization + +NPY format +========== + +A simple format for saving numpy arrays to disk with the full information about them. The ``.npy`` format is the standard binary file format in NumPy for @@ -143,8 +148,10 @@ data HEADER_LEN." Notes ----- -The ``.npy`` format, including reasons for creating it and a comparison of -alternatives, is described fully in the "npy-format" NEP. +The ``.npy`` format, including motivation for creating it and a comparison of +alternatives, is described in the `"npy-format" NEP +<http://www.numpy.org/neps/nep-0001-npy-format.html>`_, however details have +evolved with time and this document is more current. """ from __future__ import division, absolute_import, print_function diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 72beef471..a6e3e07d3 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1633,9 +1633,9 @@ def disp(mesg, device=None, linefeed=True): Besides ``sys.stdout``, a file-like object can also be used as it has both required methods: - >>> from StringIO import StringIO + >>> from io import StringIO >>> buf = StringIO() - >>> np.disp('"Display" in a file', device=buf) + >>> np.disp(u'"Display" in a file', device=buf) >>> buf.getvalue() '"Display" in a file\\n' diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py index 90e19769e..2922b3a86 100644 --- a/numpy/lib/histograms.py +++ b/numpy/lib/histograms.py @@ -877,12 +877,6 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): # bins is an integer bins = D*[bins] - # avoid rounding issues for comparisons when dealing with inexact types - if np.issubdtype(sample.dtype, np.inexact): - edge_dt = sample.dtype - else: - edge_dt = float - # normalize the range argument if range is None: range = (None,) * D @@ -896,13 +890,12 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): raise ValueError( '`bins[{}]` must be positive, when an integer'.format(i)) smin, smax = _get_outer_edges(sample[:,i], range[i]) - edges[i] = np.linspace(smin, smax, bins[i] + 1, dtype=edge_dt) + edges[i] = np.linspace(smin, smax, bins[i] + 1) elif np.ndim(bins[i]) == 1: - edges[i] = np.asarray(bins[i], edge_dt) - # not just monotonic, due to the use of mindiff below - if np.any(edges[i][:-1] >= edges[i][1:]): + edges[i] = np.asarray(bins[i]) + if np.any(edges[i][:-1] > edges[i][1:]): raise ValueError( - '`bins[{}]` must be strictly increasing, when an array' + '`bins[{}]` must be monotonically increasing, when an array' .format(i)) else: raise ValueError( @@ -913,7 +906,8 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): # Compute the bin number each sample falls into. Ncount = tuple( - np.digitize(sample[:, i], edges[i]) + # avoid np.digitize to work around gh-11022 + np.searchsorted(edges[i], sample[:, i], side='right') for i in _range(D) ) @@ -921,16 +915,10 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): # For the rightmost bin, we want values equal to the right edge to be # counted in the last bin, and not as an outlier. for i in _range(D): - # Rounding precision - mindiff = dedges[i].min() - if not np.isinf(mindiff): - decimal = int(-np.log10(mindiff)) + 6 - # Find which points are on the rightmost edge. - not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) - on_edge = (np.around(sample[:, i], decimal) == - np.around(edges[i][-1], decimal)) - # Shift these points one bin to the left. - Ncount[i][on_edge & not_smaller_than_edge] -= 1 + # Find which points are on the rightmost edge. + on_edge = (sample[:, i] == edges[i][-1]) + # Shift these points one bin to the left. + Ncount[i][on_edge] -= 1 # Compute the sample indices in the flattened histogram matrix. # This raises an error if the array is too large. diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index 67585443b..b109d65e1 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -475,9 +475,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True): Notes ----- - For a description of the ``.npy`` format, see the module docstring - of `numpy.lib.format` or the NumPy Enhancement Proposal - http://numpy.github.io/neps/npy-format.html + For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. Examples -------- @@ -561,9 +559,7 @@ def savez(file, *args, **kwds): The ``.npz`` file format is a zipped archive of files named after the variables they contain. The archive is not compressed and each file in the archive contains one variable in ``.npy`` format. For a - description of the ``.npy`` format, see `numpy.lib.format` or the - NumPy Enhancement Proposal - http://numpy.github.io/neps/npy-format.html + description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. When opening the saved ``.npz`` file with `load` a `NpzFile` object is returned. This is a dictionary-like object which can be queried for @@ -642,9 +638,9 @@ def savez_compressed(file, *args, **kwds): The ``.npz`` file format is a zipped archive of files named after the variables they contain. The archive is compressed with ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable - in ``.npy`` format. For a description of the ``.npy`` format, see - `numpy.lib.format` or the NumPy Enhancement Proposal - http://numpy.github.io/neps/npy-format.html + in ``.npy`` format. For a description of the ``.npy`` format, see + :py:mod:`numpy.lib.format`. + When opening the saved ``.npz`` file with `load` a `NpzFile` object is returned. This is a dictionary-like object which can be queried for @@ -791,8 +787,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, the data-type. comments : str or sequence of str, optional The characters or list of characters used to indicate the start of a - comment. For backwards compatibility, byte strings will be decoded as - 'latin1'. The default is '#'. + comment. None implies no comments. For backwards compatibility, byte + strings will be decoded as 'latin1'. The default is '#'. delimiter : str, optional The string used to separate values. For backwards compatibility, byte strings will be decoded as 'latin1'. The default is whitespace. @@ -859,18 +855,18 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, Examples -------- >>> from io import StringIO # StringIO behaves like a file object - >>> c = StringIO("0 1\\n2 3") + >>> c = StringIO(u"0 1\\n2 3") >>> np.loadtxt(c) array([[ 0., 1.], [ 2., 3.]]) - >>> d = StringIO("M 21 72\\nF 35 58") + >>> d = StringIO(u"M 21 72\\nF 35 58") >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), ... 'formats': ('S1', 'i4', 'f4')}) array([('M', 21, 72.0), ('F', 35, 58.0)], dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')]) - >>> c = StringIO("1,0,2\\n3,0,4") + >>> c = StringIO(u"1,0,2\\n3,0,4") >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) >>> x array([ 1., 3.]) @@ -1632,7 +1628,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, Comma delimited file with mixed dtype - >>> s = StringIO("1,1.3,abcde") + >>> s = StringIO(u"1,1.3,abcde") >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), ... ('mystring','S5')], delimiter=",") >>> data @@ -1659,7 +1655,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, An example with fixed-width columns - >>> s = StringIO("11.3abcde") + >>> s = StringIO(u"11.3abcde") >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], ... delimiter=[1,3,5]) >>> data diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index 41b5e2f64..078608bbb 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -113,11 +113,6 @@ def poly(seq_of_zeros): >>> np.poly(P) array([ 1. , 0. , 0.16666667]) - Or a square matrix object: - - >>> np.poly(np.matrix(P)) - array([ 1. , 0. , 0.16666667]) - Note how in all cases the leading coefficient is always 1. """ diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py index e07caf805..f1838fee6 100644 --- a/numpy/lib/scimath.py +++ b/numpy/lib/scimath.py @@ -555,7 +555,7 @@ def arctanh(x): -------- >>> np.set_printoptions(precision=4) - >>> np.emath.arctanh(np.matrix(np.eye(2))) + >>> np.emath.arctanh(np.eye(2)) array([[ Inf, 0.], [ 0., Inf]]) >>> np.emath.arctanh([1j]) diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index 8be49ce67..8ba0370b0 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -489,6 +489,19 @@ class TestConstant(object): ) assert_allclose(test, expected) + def test_check_large_integers(self): + uint64_max = 2 ** 64 - 1 + arr = np.full(5, uint64_max, dtype=np.uint64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, uint64_max, dtype=np.uint64) + assert_array_equal(test, expected) + + int64_max = 2 ** 63 - 1 + arr = np.full(5, int64_max, dtype=np.int64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, int64_max, dtype=np.int64) + assert_array_equal(test, expected) + class TestLinearRamp(object): def test_check_simple(self): diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 76c36c53e..984a3b15e 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -74,8 +74,6 @@ class TestSetOps(object): assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8])) assert_array_equal([7,1], ediff1d(two_elem, to_begin=7)) assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6])) - assert(isinstance(ediff1d(np.matrix(1)), np.matrix)) - assert(isinstance(ediff1d(np.matrix(1), to_begin=1), np.matrix)) def test_isin(self): # the tests for in1d cover most of isin's behavior diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 43d62a7ff..4103a9eb3 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -287,9 +287,6 @@ class TestAverage(object): assert_almost_equal(y5.mean(0), average(y5, 0)) assert_almost_equal(y5.mean(1), average(y5, 1)) - y6 = np.matrix(rand(5, 5)) - assert_array_equal(y6.mean(0), average(y6, 0)) - def test_weights(self): y = np.arange(10) w = np.arange(10) @@ -357,14 +354,6 @@ class TestAverage(object): assert_equal(type(np.average(a)), subclass) assert_equal(type(np.average(a, weights=w)), subclass) - # also test matrices - a = np.matrix([[1,2],[3,4]]) - w = np.matrix([[1,2],[3,4]]) - - r = np.average(a, axis=0, weights=w) - assert_equal(type(r), np.matrix) - assert_equal(r, [[2.5, 10.0/3]]) - def test_upcasting(self): types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'), ('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')] @@ -1525,9 +1514,9 @@ class TestDigitize(object): class TestUnwrap(object): def test_simple(self): - # check that unwrap removes jumps greather that 2*pi + # check that unwrap removes jumps greater that 2*pi assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) - # check that unwrap maintans continuity + # check that unwrap maintains continuity assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) @@ -1623,16 +1612,6 @@ class TestTrapz(object): xm = np.ma.array(x, mask=mask) assert_almost_equal(trapz(y, xm), r) - def test_matrix(self): - # Test to make sure matrices give the same answer as ndarrays - x = np.linspace(0, 5) - y = x * x - r = trapz(y, x) - mx = np.matrix(x) - my = np.matrix(y) - mr = trapz(my, mx) - assert_almost_equal(mr, r) - class TestSinc(object): @@ -2759,7 +2738,7 @@ class TestQuantile(object): assert_equal(np.quantile(x, 0.5), 1.75) def test_no_p_overwrite(self): - # this is worth retesting, beause quantile does not make a copy + # this is worth retesting, because quantile does not make a copy p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) p = p0.copy() np.quantile(np.arange(100.), p, interpolation="midpoint") diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index 06daacbdc..e16ae12c2 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -253,7 +253,7 @@ class TestHistogram(object): one_nan = np.array([0, 1, np.nan]) all_nan = np.array([np.nan, np.nan]) - # the internal commparisons with NaN give warnings + # the internal comparisons with NaN give warnings sup = suppress_warnings() sup.filter(RuntimeWarning) with sup: @@ -613,8 +613,6 @@ class TestHistogramdd(object): assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) assert_raises( - ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]]) - assert_raises( ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) @@ -646,7 +644,7 @@ class TestHistogramdd(object): bins = [[0., 0.5, 1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) - assert_(hist[1] == 1.) + assert_(hist[1] == 0.0) x = [1.0001] bins = [[0., 0.5, 1.0]] hist, _ = histogramdd(x, bins=bins) @@ -660,3 +658,40 @@ class TestHistogramdd(object): range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) assert_raises(ValueError, histogramdd, vals, range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) + + def test_equal_edges(self): + """ Test that adjacent entries in an edge array can be equal """ + x = np.array([0, 1, 2]) + y = np.array([0, 1, 2]) + x_edges = np.array([0, 2, 2]) + y_edges = 1 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + hist_expected = np.array([ + [2.], + [1.], # x == 2 falls in the final bin + ]) + assert_equal(hist, hist_expected) + + def test_edge_dtype(self): + """ Test that if an edge array is input, its type is preserved """ + x = np.array([0, 10, 20]) + y = x / 10 + x_edges = np.array([0, 5, 15, 20]) + y_edges = x_edges / 10 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(edges[0].dtype, x_edges.dtype) + assert_equal(edges[1].dtype, y_edges.dtype) + + def test_large_integers(self): + big = 2**60 # Too large to represent with a full precision float + + x = np.array([0], np.int64) + x_edges = np.array([-1, +1], np.int64) + y = big + x + y_edges = big + x_edges + + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(hist[0, 0], 1) diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index f934e952a..089a7589a 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -184,37 +184,6 @@ class TestConcatenator(object): assert_array_equal(d[:5, :], b) assert_array_equal(d[5:, :], c) - def test_matrix(self): - a = [1, 2] - b = [3, 4] - - ab_r = np.r_['r', a, b] - ab_c = np.r_['c', a, b] - - assert_equal(type(ab_r), np.matrix) - assert_equal(type(ab_c), np.matrix) - - assert_equal(np.array(ab_r), [[1,2,3,4]]) - assert_equal(np.array(ab_c), [[1],[2],[3],[4]]) - - assert_raises(ValueError, lambda: np.r_['rc', a, b]) - - def test_matrix_scalar(self): - r = np.r_['r', [1, 2], 3] - assert_equal(type(r), np.matrix) - assert_equal(np.array(r), [[1,2,3]]) - - def test_matrix_builder(self): - a = np.array([1]) - b = np.array([2]) - c = np.array([3]) - d = np.array([4]) - actual = np.r_['a, b; c, d'] - expected = np.bmat([[a, b], [c, d]]) - - assert_equal(actual, expected) - assert_equal(type(actual), type(expected)) - def test_0d(self): assert_equal(r_[0, np.array(1), 2], [0, 1, 2]) assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3]) diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index e69d9dd7d..504372faf 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -113,42 +113,46 @@ class TestNanFunctions_MinMax(object): for f in self.nanfuncs: assert_(f(0.) == 0.) - def test_matrices(self): + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + # Check that it works and that type and # shape are preserved - mat = np.matrix(np.eye(3)) + mine = np.eye(3).view(MyNDArray) for f in self.nanfuncs: - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3)) - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 1)) - res = f(mat) - assert_(np.isscalar(res)) + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + # check that rows of nan are dealt with for subclasses (#4628) - mat[1] = np.nan + mine[1] = np.nan for f in self.nanfuncs: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) assert_(not np.any(np.isnan(res))) assert_(len(w) == 0) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) - and not np.isnan(res[2, 0])) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(np.isnan(res[1]) and not np.isnan(res[0]) + and not np.isnan(res[2])) assert_(len(w) == 1, 'no warning raised') assert_(issubclass(w[0].category, RuntimeWarning)) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - res = f(mat) - assert_(np.isscalar(res)) + res = f(mine) + assert_(res.shape == ()) assert_(res != np.nan) assert_(len(w) == 0) @@ -209,19 +213,22 @@ class TestNanFunctions_ArgminArgmax(object): for f in self.nanfuncs: assert_(f(0.) == 0.) - def test_matrices(self): + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + # Check that it works and that type and # shape are preserved - mat = np.matrix(np.eye(3)) + mine = np.eye(3).view(MyNDArray) for f in self.nanfuncs: - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3)) - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 1)) - res = f(mat) - assert_(np.isscalar(res)) + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) class TestNanFunctions_IntTypes(object): @@ -381,19 +388,27 @@ class SharedNanFunctionsTestsMixin(object): for f in self.nanfuncs: assert_(f(0.) == 0.) - def test_matrices(self): + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + # Check that it works and that type and # shape are preserved - mat = np.matrix(np.eye(3)) + array = np.eye(3) + mine = array.view(MyNDArray) for f in self.nanfuncs: - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3)) - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 1)) - res = f(mat) - assert_(np.isscalar(res)) + expected_shape = f(array, axis=0).shape + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array, axis=1).shape + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array).shape + res = f(mine) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): @@ -481,18 +496,6 @@ class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): res = f(d, axis=axis) assert_equal(res.shape, (3, 5, 7, 11)) - def test_matrices(self): - # Check that it works and that type and - # shape are preserved - mat = np.matrix(np.eye(3)) - for f in self.nanfuncs: - for axis in np.arange(2): - res = f(mat, axis=axis) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 3)) - res = f(mat) - assert_(res.shape == (1, 3*3)) - def test_result_values(self): for axis in (-2, -1, 0, 1, None): tgt = np.cumprod(_ndat_ones, axis=axis) @@ -912,7 +915,7 @@ class TestNanFunctions_Quantile(object): assert_equal(np.nanquantile(x, 0.5), 1.75) def test_no_p_overwrite(self): - # this is worth retesting, beause quantile does not make a copy + # this is worth retesting, because quantile does not make a copy p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) p = p0.copy() np.nanquantile(np.arange(100.), p, interpolation="midpoint") diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py index 080fd066d..a35d90b70 100644 --- a/numpy/lib/tests/test_shape_base.py +++ b/numpy/lib/tests/test_shape_base.py @@ -29,19 +29,21 @@ class TestApplyAlongAxis(object): [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) def test_preserve_subclass(self): - # this test is particularly malicious because matrix - # refuses to become 1d def double(row): return row * 2 - m = np.matrix([[0, 1], [2, 3]]) - expected = np.matrix([[0, 2], [4, 6]]) + + class MyNDArray(np.ndarray): + pass + + m = np.array([[0, 1], [2, 3]]).view(MyNDArray) + expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) result = apply_along_axis(double, 0, m) - assert_(isinstance(result, np.matrix)) + assert_(isinstance(result, MyNDArray)) assert_array_equal(result, expected) result = apply_along_axis(double, 1, m) - assert_(isinstance(result, np.matrix)) + assert_(isinstance(result, MyNDArray)) assert_array_equal(result, expected) def test_subclass(self): @@ -79,7 +81,7 @@ class TestApplyAlongAxis(object): def test_axis_insertion(self, cls=np.ndarray): def f1to2(x): - """produces an assymmetric non-square matrix from x""" + """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) return (x[::-1] * x[1:,None]).view(cls) @@ -123,7 +125,7 @@ class TestApplyAlongAxis(object): def test_axis_insertion_ma(self): def f1to2(x): - """produces an assymmetric non-square matrix from x""" + """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) res = x[::-1] * x[1:,None] return np.ma.masked_where(res%5==0, res) @@ -492,16 +494,10 @@ class TestSqueeze(object): class TestKron(object): def test_return_type(self): - a = np.ones([2, 2]) - m = np.asmatrix(a) - assert_equal(type(kron(a, a)), np.ndarray) - assert_equal(type(kron(m, m)), np.matrix) - assert_equal(type(kron(a, m)), np.matrix) - assert_equal(type(kron(m, a)), np.matrix) - class myarray(np.ndarray): __array_priority__ = 0.0 + a = np.ones([2, 2]) ma = myarray(a.shape, a.dtype, a.data) assert_equal(type(kron(a, a)), np.ndarray) assert_equal(type(kron(ma, ma)), myarray) diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 402c18850..cca316e9a 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -650,7 +650,7 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None): N = 1 if N != 1 and N != 2: - xedges = yedges = asarray(bins, float) + xedges = yedges = asarray(bins) bins = [xedges, yedges] hist, edges = histogramdd([x, y], bins, range, normed, weights) return hist, edges[0], edges[1] diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 5ee230f92..5757b1827 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -16,20 +16,20 @@ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', 'LinAlgError', 'multi_dot'] +import operator import warnings from numpy.core import ( array, asarray, zeros, empty, empty_like, intc, single, double, - csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot, - add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size, - finfo, errstate, geterrobj, longdouble, moveaxis, amin, amax, product, abs, - broadcast, atleast_2d, intp, asanyarray, object_, ones, matmul, - swapaxes, divide, count_nonzero, ndarray, isnan + csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot, + add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite, + finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs, + atleast_2d, intp, asanyarray, object_, matmul, + swapaxes, divide, count_nonzero, isnan ) from numpy.core.multiarray import normalize_axis_index -from numpy.lib import triu, asfarray +from numpy.lib.twodim_base import triu, eye from numpy.linalg import lapack_lite, _umath_linalg -from numpy.matrixlib.defmatrix import matrix_power # For Python2/3 compatibility _N = b'N' @@ -532,6 +532,109 @@ def inv(a): return wrap(ainv.astype(result_t, copy=False)) +def matrix_power(a, n): + """ + Raise a square matrix to the (integer) power `n`. + + For positive integers `n`, the power is computed by repeated matrix + squarings and matrix multiplications. If ``n == 0``, the identity matrix + of the same shape as M is returned. If ``n < 0``, the inverse + is computed and then raised to the ``abs(n)``. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be "powered." + n : int + The exponent can be any integer or long integer, positive, + negative, or zero. + + Returns + ------- + a**n : (..., M, M) ndarray or matrix object + The return value is the same shape and type as `M`; + if the exponent is positive or zero then the type of the + elements is the same as those of `M`. If the exponent is + negative the elements are floating-point. + + Raises + ------ + LinAlgError + For matrices that are not square or that (for negative powers) cannot + be inverted numerically. + + Examples + -------- + >>> from numpy.linalg import matrix_power + >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit + >>> matrix_power(i, 3) # should = -i + array([[ 0, -1], + [ 1, 0]]) + >>> matrix_power(i, 0) + array([[1, 0], + [0, 1]]) + >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements + array([[ 0., 1.], + [-1., 0.]]) + + Somewhat more sophisticated example + + >>> q = np.zeros((4, 4)) + >>> q[0:2, 0:2] = -i + >>> q[2:4, 2:4] = i + >>> q # one of the three quaternion units not equal to 1 + array([[ 0., -1., 0., 0.], + [ 1., 0., 0., 0.], + [ 0., 0., 0., 1.], + [ 0., 0., -1., 0.]]) + >>> matrix_power(q, 2) # = -np.eye(4) + array([[-1., 0., 0., 0.], + [ 0., -1., 0., 0.], + [ 0., 0., -1., 0.], + [ 0., 0., 0., -1.]]) + + """ + a = asanyarray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + + try: + n = operator.index(n) + except TypeError: + raise TypeError("exponent must be an integer") + + if n == 0: + a = empty_like(a) + a[...] = eye(a.shape[-2], dtype=a.dtype) + return a + + elif n < 0: + a = inv(a) + n = abs(n) + + # short-cuts. + if n == 1: + return a + + elif n == 2: + return matmul(a, a) + + elif n == 3: + return matmul(matmul(a, a), a) + + # Use binary decomposition to reduce the number of matrix multiplications. + # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to + # increasing powers of 2, and multiply into the result as needed. + z = result = None + while n > 0: + z = a if z is None else matmul(z, z) + n, bit = divmod(n, 2) + if bit: + result = z if result is None else matmul(result, z) + + return result + + # Cholesky decomposition def cholesky(a): diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 4a87330c7..5ed1ff1c0 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -7,11 +7,10 @@ import os import sys import itertools import traceback -import warnings import pytest import numpy as np -from numpy import array, single, double, csingle, cdouble, dot, identity +from numpy import array, single, double, csingle, cdouble, dot, identity, matmul from numpy import multiply, atleast_2d, inf, asarray, matrix from numpy import linalg from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError @@ -22,12 +21,11 @@ from numpy.testing import ( ) -def ifthen(a, b): - return not a or b - - -def imply(a, b): - return not a or b +def consistent_subclass(out, in_): + # For ndarray subclass input, our output should have the same subclass + # (non-ndarray input gets converted to ndarray). + return type(out) is (type(in_) if isinstance(in_, np.ndarray) + else np.ndarray) old_assert_almost_equal = assert_almost_equal @@ -65,6 +63,7 @@ all_tags = { 'generalized', 'size-0', 'strided' # optional additions } + class LinalgCase(object): def __init__(self, name, a, b, tags=set()): """ @@ -86,6 +85,7 @@ class LinalgCase(object): def __repr__(self): return "<LinalgCase: %s>" % (self.name,) + def apply_tag(tag, cases): """ Add the given tag (a string) to each of the cases (a list of LinalgCase @@ -129,10 +129,6 @@ CASES += apply_tag('square', [ np.empty((0, 0), dtype=double), np.empty((0,), dtype=double), tags={'size-0'}), - LinalgCase("0x0_matrix", - np.empty((0, 0), dtype=double).view(np.matrix), - np.empty((0, 1), dtype=double).view(np.matrix), - tags={'size-0'}), LinalgCase("8x8", np.random.rand(8, 8), np.random.rand(8)), @@ -142,12 +138,6 @@ CASES += apply_tag('square', [ LinalgCase("nonarray", [[1, 2], [3, 4]], [2, 1]), - LinalgCase("matrix_b_only", - array([[1., 2.], [3., 4.]]), - matrix([2., 1.]).T), - LinalgCase("matrix_a_and_b", - matrix([[1., 2.], [3., 4.]]), - matrix([2., 1.]).T), ]) # non-square test-cases @@ -231,9 +221,6 @@ CASES += apply_tag('hermitian', [ LinalgCase("matrix_b_only", array([[1., 2.], [2., 1.]]), None), - LinalgCase("hmatrix_a_and_b", - matrix([[1., 2.], [2., 1.]]), - None), LinalgCase("hmatrix_1x1", np.random.rand(1, 1), None), @@ -270,12 +257,13 @@ def _make_generalized_cases(): return new_cases + CASES += _make_generalized_cases() + # # Generate stride combination variations of the above # - def _stride_comb_iter(x): """ Generate cartesian product of strides for all axes @@ -323,6 +311,7 @@ def _stride_comb_iter(x): xi = np.lib.stride_tricks.as_strided(x, strides=s) yield xi, "stride_xxx_0_0" + def _make_strided_cases(): new_cases = [] for case in CASES: @@ -333,94 +322,104 @@ def _make_strided_cases(): new_cases.append(new_case) return new_cases + CASES += _make_strided_cases() # # Test different routines against the above cases # +class LinalgTestCase(object): + TEST_CASES = CASES -def _check_cases(func, require=set(), exclude=set()): - """ - Run func on each of the cases with all of the tags in require, and none - of the tags in exclude - """ - for case in CASES: - # filter by require and exclude - if case.tags & require != require: - continue - if case.tags & exclude: - continue + def check_cases(self, require=set(), exclude=set()): + """ + Run func on each of the cases with all of the tags in require, and none + of the tags in exclude + """ + for case in self.TEST_CASES: + # filter by require and exclude + if case.tags & require != require: + continue + if case.tags & exclude: + continue - try: - case.check(func) - except Exception: - msg = "In test case: %r\n\n" % case - msg += traceback.format_exc() - raise AssertionError(msg) + try: + case.check(self.do) + except Exception: + msg = "In test case: %r\n\n" % case + msg += traceback.format_exc() + raise AssertionError(msg) -class LinalgSquareTestCase(object): +class LinalgSquareTestCase(LinalgTestCase): def test_sq_cases(self): - _check_cases(self.do, require={'square'}, exclude={'generalized', 'size-0'}) + self.check_cases(require={'square'}, + exclude={'generalized', 'size-0'}) def test_empty_sq_cases(self): - _check_cases(self.do, require={'square', 'size-0'}, exclude={'generalized'}) + self.check_cases(require={'square', 'size-0'}, + exclude={'generalized'}) -class LinalgNonsquareTestCase(object): +class LinalgNonsquareTestCase(LinalgTestCase): def test_nonsq_cases(self): - _check_cases(self.do, require={'nonsquare'}, exclude={'generalized', 'size-0'}) + self.check_cases(require={'nonsquare'}, + exclude={'generalized', 'size-0'}) def test_empty_nonsq_cases(self): - _check_cases(self.do, require={'nonsquare', 'size-0'}, exclude={'generalized'}) + self.check_cases(require={'nonsquare', 'size-0'}, + exclude={'generalized'}) -class HermitianTestCase(object): + +class HermitianTestCase(LinalgTestCase): def test_herm_cases(self): - _check_cases(self.do, require={'hermitian'}, exclude={'generalized', 'size-0'}) + self.check_cases(require={'hermitian'}, + exclude={'generalized', 'size-0'}) def test_empty_herm_cases(self): - _check_cases(self.do, require={'hermitian', 'size-0'}, exclude={'generalized'}) + self.check_cases(require={'hermitian', 'size-0'}, + exclude={'generalized'}) -class LinalgGeneralizedSquareTestCase(object): +class LinalgGeneralizedSquareTestCase(LinalgTestCase): @pytest.mark.slow def test_generalized_sq_cases(self): - _check_cases(self.do, require={'generalized', 'square'}, exclude={'size-0'}) + self.check_cases(require={'generalized', 'square'}, + exclude={'size-0'}) @pytest.mark.slow def test_generalized_empty_sq_cases(self): - _check_cases(self.do, require={'generalized', 'square', 'size-0'}) + self.check_cases(require={'generalized', 'square', 'size-0'}) -class LinalgGeneralizedNonsquareTestCase(object): +class LinalgGeneralizedNonsquareTestCase(LinalgTestCase): @pytest.mark.slow def test_generalized_nonsq_cases(self): - _check_cases(self.do, require={'generalized', 'nonsquare'}, exclude={'size-0'}) + self.check_cases(require={'generalized', 'nonsquare'}, + exclude={'size-0'}) @pytest.mark.slow def test_generalized_empty_nonsq_cases(self): - _check_cases(self.do, require={'generalized', 'nonsquare', 'size-0'}) + self.check_cases(require={'generalized', 'nonsquare', 'size-0'}) -class HermitianGeneralizedTestCase(object): +class HermitianGeneralizedTestCase(LinalgTestCase): @pytest.mark.slow def test_generalized_herm_cases(self): - _check_cases(self.do, - require={'generalized', 'hermitian'}, - exclude={'size-0'}) + self.check_cases(require={'generalized', 'hermitian'}, + exclude={'size-0'}) @pytest.mark.slow def test_generalized_empty_herm_cases(self): - _check_cases(self.do, - require={'generalized', 'hermitian', 'size-0'}, - exclude={'none'}) + self.check_cases(require={'generalized', 'hermitian', 'size-0'}, + exclude={'none'}) def dot_generalized(a, b): @@ -446,20 +445,21 @@ def identity_like_generalized(a): a = asarray(a) if a.ndim >= 3: r = np.empty(a.shape, dtype=a.dtype) - for c in itertools.product(*map(range, a.shape[:-2])): - r[c] = identity(a.shape[-2]) + r[...] = identity(a.shape[-2]) return r else: return identity(a.shape[0]) -class TestSolve(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): - +class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # kept apart from TestSolve for use for testing with matrices. def do(self, a, b, tags): x = linalg.solve(a, b) assert_almost_equal(b, dot_generalized(a, x)) - assert_(imply(isinstance(b, matrix), isinstance(x, matrix))) + assert_(consistent_subclass(x, b)) + +class TestSolve(SolveCases): def test_types(self): def check(dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) @@ -519,14 +519,16 @@ class TestSolve(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): assert_(isinstance(result, ArraySubclass)) -class TestInv(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): +class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): def do(self, a, b, tags): a_inv = linalg.inv(a) assert_almost_equal(dot_generalized(a, a_inv), identity_like_generalized(a)) - assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix))) + assert_(consistent_subclass(a_inv, a)) + +class TestInv(InvCases): def test_types(self): def check(dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) @@ -551,13 +553,15 @@ class TestInv(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): assert_(isinstance(res, ArraySubclass)) -class TestEigvals(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): +class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): def do(self, a, b, tags): ev = linalg.eigvals(a) evalues, evectors = linalg.eig(a) assert_almost_equal(ev, evalues) + +class TestEigvals(EigvalsCases): def test_types(self): def check(dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) @@ -586,15 +590,17 @@ class TestEigvals(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): assert_(isinstance(res, np.ndarray)) -class TestEig(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): +class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): def do(self, a, b, tags): evalues, evectors = linalg.eig(a) assert_allclose(dot_generalized(a, evectors), np.asarray(evectors) * np.asarray(evalues)[..., None, :], rtol=get_rtol(evalues.dtype)) - assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix))) + assert_(consistent_subclass(evectors, a)) + +class TestEig(EigCases): def test_types(self): def check(dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) @@ -633,7 +639,7 @@ class TestEig(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): assert_(isinstance(a, np.ndarray)) -class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): +class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): def do(self, a, b, tags): if 'size-0' in tags: @@ -644,9 +650,11 @@ class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :], np.asarray(vt)), rtol=get_rtol(u.dtype)) - assert_(imply(isinstance(a, matrix), isinstance(u, matrix))) - assert_(imply(isinstance(a, matrix), isinstance(vt, matrix))) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + +class TestSVD(SVDCases): def test_types(self): def check(dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) @@ -671,7 +679,7 @@ class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): assert_raises(linalg.LinAlgError, linalg.svd, a) -class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): +class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): # cond(x, p) for p in (None, 2, -2) def do(self, a, b, tags): @@ -716,6 +724,8 @@ class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): * (abs(cinv)**2).sum(-1).sum(-1)), single_decimal=5, double_decimal=11) + +class TestCond(CondCases): def test_basic_nonsvd(self): # Smoketest the non-svd norms A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) @@ -779,20 +789,24 @@ class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): assert_(np.isfinite(c[1,0])) -class TestPinv(LinalgSquareTestCase, - LinalgNonsquareTestCase, - LinalgGeneralizedSquareTestCase, - LinalgGeneralizedNonsquareTestCase): +class PinvCases(LinalgSquareTestCase, + LinalgNonsquareTestCase, + LinalgGeneralizedSquareTestCase, + LinalgGeneralizedNonsquareTestCase): def do(self, a, b, tags): a_ginv = linalg.pinv(a) # `a @ a_ginv == I` does not hold if a is singular dot = dot_generalized assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) - assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix))) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinv(PinvCases): + pass -class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): +class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): def do(self, a, b, tags): d = linalg.det(a) @@ -811,6 +825,8 @@ class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): assert_almost_equal(np.abs(s[m]), 1) assert_equal(ld[~m], -inf) + +class TestDet(DetCases): def test_zero(self): assert_equal(linalg.det([[0.0]]), 0.0) assert_equal(type(linalg.det([[0.0]])), double) @@ -854,7 +870,7 @@ class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): assert_(res[1].dtype.type is np.float64) -class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase): +class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase): def do(self, a, b, tags): if 'size-0' in tags: @@ -882,9 +898,11 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase): expect_resids = np.array([]).view(type(x)) assert_almost_equal(residuals, expect_resids) assert_(np.issubdtype(residuals.dtype, np.floating)) - assert_(imply(isinstance(b, matrix), isinstance(x, matrix))) - assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix))) + assert_(consistent_subclass(x, b)) + assert_(consistent_subclass(residuals, b)) + +class TestLstsq(LstsqCases): def test_future_rcond(self): a = np.array([[0., 1., 0., 1., 2., 0.], [0., 2., 0., 0., 1., 0.], @@ -903,20 +921,26 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase): # Warning should be raised exactly once (first command) assert_(len(w) == 1) + class TestMatrixPower(object): R90 = array([[0, 1], [-1, 0]]) Arb22 = array([[4, -7], [-2, 10]]) noninv = array([[1, 0], [0, 0]]) - arbfloat = array([[0.1, 3.2], [1.2, 0.7]]) + arbfloat = array([[[0.1, 3.2], [1.2, 0.7]], + [[0.2, 6.4], [2.4, 1.4]]]) large = identity(10) t = large[1, :].copy() - large[1, :] = large[0,:] + large[1, :] = large[0, :] large[0, :] = t def test_large_power(self): assert_equal( matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5 + 1), self.R90) + assert_equal( + matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 1), self.R90) + assert_equal( + matrix_power(self.R90, 2 ** 100 + 2 + 1), -self.R90) def test_large_power_trailing_zero(self): assert_equal( @@ -925,7 +949,7 @@ class TestMatrixPower(object): def testip_zero(self): def tz(M): mz = matrix_power(M, 0) - assert_equal(mz, identity(M.shape[0])) + assert_equal(mz, identity_like_generalized(M)) assert_equal(mz.dtype, M.dtype) for M in [self.Arb22, self.arbfloat, self.large]: tz(M) @@ -941,7 +965,7 @@ class TestMatrixPower(object): def testip_two(self): def tz(M): mz = matrix_power(M, 2) - assert_equal(mz, dot(M, M)) + assert_equal(mz, matmul(M, M)) assert_equal(mz.dtype, M.dtype) for M in [self.Arb22, self.arbfloat, self.large]: tz(M) @@ -949,14 +973,19 @@ class TestMatrixPower(object): def testip_invert(self): def tz(M): mz = matrix_power(M, -1) - assert_almost_equal(identity(M.shape[0]), dot(mz, M)) + assert_almost_equal(matmul(mz, M), identity_like_generalized(M)) for M in [self.R90, self.Arb22, self.arbfloat, self.large]: tz(M) def test_invert_noninvertible(self): - import numpy.linalg - assert_raises(numpy.linalg.linalg.LinAlgError, - lambda: matrix_power(self.noninv, -1)) + assert_raises(LinAlgError, matrix_power, self.noninv, -1) + + def test_invalid(self): + assert_raises(TypeError, matrix_power, self.R90, 1.5) + assert_raises(TypeError, matrix_power, self.R90, [1]) + assert_raises(LinAlgError, matrix_power, np.array([1]), 1) + assert_raises(LinAlgError, matrix_power, np.array([[1], [2]]), 1) + assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2)), 1) class TestBoolPower(object): @@ -966,7 +995,7 @@ class TestBoolPower(object): assert_equal(matrix_power(A, 2), A) -class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase): +class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): def do(self, a, b, tags): # note that eigenvalue arrays returned by eig must be sorted since @@ -979,6 +1008,8 @@ class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase): ev2 = linalg.eigvalsh(a, 'U') assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype)) + +class TestEigvalsh(object): def test_types(self): def check(dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) @@ -1034,7 +1065,7 @@ class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase): assert_(isinstance(res, np.ndarray)) -class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase): +class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase): def do(self, a, b, tags): # note that eigenvalue arrays returned by eig must be sorted since @@ -1055,6 +1086,8 @@ class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase): np.asarray(ev2)[..., None, :] * np.asarray(evc2), rtol=get_rtol(ev.dtype), err_msg=repr(a)) + +class TestEigh(object): def test_types(self): def check(dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) @@ -1115,11 +1148,13 @@ class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase): assert_(isinstance(a, np.ndarray)) -class _TestNorm(object): - +class _TestNormBase(object): dt = None dec = None + +class _TestNormGeneral(_TestNormBase): + def test_empty(self): assert_equal(norm([]), 0.0) assert_equal(norm(array([], dtype=self.dt)), 0.0) @@ -1166,57 +1201,6 @@ class _TestNorm(object): assert_(issubclass(an.dtype.type, np.floating)) assert_almost_equal(an, 1.0) - def test_matrix_return_type(self): - a = np.array([[1, 0, 1], [0, 1, 1]]) - - exact_types = np.typecodes['AllInteger'] - - # float32, complex64, float64, complex128 types are the only types - # allowed by `linalg`, which performs the matrix operations used - # within `norm`. - inexact_types = 'fdFD' - - all_types = exact_types + inexact_types - - for each_inexact_types in all_types: - at = a.astype(each_inexact_types) - - an = norm(at, -np.inf) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 2.0) - - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") - an = norm(at, -1) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 1.0) - - an = norm(at, 1) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 2.0) - - an = norm(at, 2) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 3.0**(1.0/2.0)) - - an = norm(at, -2) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 1.0) - - an = norm(at, np.inf) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 2.0) - - an = norm(at, 'fro') - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 2.0) - - an = norm(at, 'nuc') - assert_(issubclass(an.dtype.type, np.floating)) - # Lower bar needed to support low precision floats. - # They end up being off by 1 in the 7th place. - old_assert_almost_equal(an, 2.7320508075688772, decimal=6) - def test_vector(self): a = [1, 2, 3, 4] b = [-1, -2, -3, -4] @@ -1247,39 +1231,6 @@ class _TestNorm(object): array(c, dtype=self.dt)): _test(v) - def test_matrix_2x2(self): - A = matrix([[1, 3], [5, 7]], dtype=self.dt) - assert_almost_equal(norm(A), 84 ** 0.5) - assert_almost_equal(norm(A, 'fro'), 84 ** 0.5) - assert_almost_equal(norm(A, 'nuc'), 10.0) - assert_almost_equal(norm(A, inf), 12.0) - assert_almost_equal(norm(A, -inf), 4.0) - assert_almost_equal(norm(A, 1), 10.0) - assert_almost_equal(norm(A, -1), 6.0) - assert_almost_equal(norm(A, 2), 9.1231056256176615) - assert_almost_equal(norm(A, -2), 0.87689437438234041) - - assert_raises(ValueError, norm, A, 'nofro') - assert_raises(ValueError, norm, A, -3) - assert_raises(ValueError, norm, A, 0) - - def test_matrix_3x3(self): - # This test has been added because the 2x2 example - # happened to have equal nuclear norm and induced 1-norm. - # The 1/10 scaling factor accommodates the absolute tolerance - # used in assert_almost_equal. - A = (1 / 10) * \ - np.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt) - assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5) - assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5) - assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836) - assert_almost_equal(norm(A, inf), 1.1) - assert_almost_equal(norm(A, -inf), 0.6) - assert_almost_equal(norm(A, 1), 1.0) - assert_almost_equal(norm(A, -1), 0.4) - assert_almost_equal(norm(A, 2), 0.88722940323461277) - assert_almost_equal(norm(A, -2), 0.19456584790481812) - def test_axis(self): # Vector norms. # Compare the use of `axis` with computing the norm of each row @@ -1359,10 +1310,103 @@ class _TestNorm(object): assert_(found.shape == expected_shape, shape_err.format(found.shape, expected_shape, order, k)) + +class _TestNorm2D(_TestNormBase): + # Define the part for 2d arrays separately, so we can subclass this + # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg. + array = np.array + + def test_matrix_empty(self): + assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0) + + def test_matrix_return_type(self): + a = self.array([[1, 0, 1], [0, 1, 1]]) + + exact_types = np.typecodes['AllInteger'] + + # float32, complex64, float64, complex128 types are the only types + # allowed by `linalg`, which performs the matrix operations used + # within `norm`. + inexact_types = 'fdFD' + + all_types = exact_types + inexact_types + + for each_inexact_types in all_types: + at = a.astype(each_inexact_types) + + an = norm(at, -np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 1.0) + + an = norm(at, 1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 3.0**(1.0/2.0)) + + an = norm(at, -2) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 1.0) + + an = norm(at, np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 'fro') + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 'nuc') + assert_(issubclass(an.dtype.type, np.floating)) + # Lower bar needed to support low precision floats. + # They end up being off by 1 in the 7th place. + np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6) + + def test_matrix_2x2(self): + A = self.array([[1, 3], [5, 7]], dtype=self.dt) + assert_almost_equal(norm(A), 84 ** 0.5) + assert_almost_equal(norm(A, 'fro'), 84 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 10.0) + assert_almost_equal(norm(A, inf), 12.0) + assert_almost_equal(norm(A, -inf), 4.0) + assert_almost_equal(norm(A, 1), 10.0) + assert_almost_equal(norm(A, -1), 6.0) + assert_almost_equal(norm(A, 2), 9.1231056256176615) + assert_almost_equal(norm(A, -2), 0.87689437438234041) + + assert_raises(ValueError, norm, A, 'nofro') + assert_raises(ValueError, norm, A, -3) + assert_raises(ValueError, norm, A, 0) + + def test_matrix_3x3(self): + # This test has been added because the 2x2 example + # happened to have equal nuclear norm and induced 1-norm. + # The 1/10 scaling factor accommodates the absolute tolerance + # used in assert_almost_equal. + A = (1 / 10) * \ + self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt) + assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836) + assert_almost_equal(norm(A, inf), 1.1) + assert_almost_equal(norm(A, -inf), 0.6) + assert_almost_equal(norm(A, 1), 1.0) + assert_almost_equal(norm(A, -1), 0.4) + assert_almost_equal(norm(A, 2), 0.88722940323461277) + assert_almost_equal(norm(A, -2), 0.19456584790481812) + def test_bad_args(self): # Check that bad arguments raise the appropriate exceptions. - A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) # Using `axis=<integer>` or passing in a 1-D array implies vector @@ -1386,6 +1430,10 @@ class _TestNorm(object): assert_raises(ValueError, norm, B, None, (0, 1, 2)) +class _TestNorm(_TestNorm2D, _TestNormGeneral): + pass + + class TestNorm_NonSystematic(object): def test_longdouble_norm(self): @@ -1413,21 +1461,34 @@ class TestNorm_NonSystematic(object): old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5) -class TestNormDouble(_TestNorm): +# Separate definitions so we can use them for matrix tests. +class _TestNormDoubleBase(_TestNormBase): dt = np.double dec = 12 -class TestNormSingle(_TestNorm): +class _TestNormSingleBase(_TestNormBase): dt = np.float32 dec = 6 -class TestNormInt64(_TestNorm): +class _TestNormInt64Base(_TestNormBase): dt = np.int64 dec = 12 +class TestNormDouble(_TestNorm, _TestNormDoubleBase): + pass + + +class TestNormSingle(_TestNorm, _TestNormSingleBase): + pass + + +class TestNormInt64(_TestNorm, _TestNormInt64Base): + pass + + class TestMatrixRank(object): def test_matrix_rank(self): @@ -1478,6 +1539,8 @@ def test_reduced_rank(): class TestQR(object): + # Define the array class here, so run this on matrices elsewhere. + array = np.array def check_qr(self, a): # This test expects the argument `a` to be an ndarray or @@ -1528,7 +1591,7 @@ class TestQR(object): # of the functions in lapack_lite. Consequently, this test is # very limited in scope. Note that the results are in FORTRAN # order, hence the h arrays are transposed. - a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double) + a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double) # Test double h, tau = linalg.qr(a, mode='raw') @@ -1544,22 +1607,21 @@ class TestQR(object): assert_(tau.shape == (2,)) def test_mode_all_but_economic(self): - a = array([[1, 2], [3, 4]]) - b = array([[1, 2], [3, 4], [5, 6]]) + a = self.array([[1, 2], [3, 4]]) + b = self.array([[1, 2], [3, 4], [5, 6]]) for dt in "fd": m1 = a.astype(dt) m2 = b.astype(dt) self.check_qr(m1) self.check_qr(m2) self.check_qr(m2.T) - self.check_qr(matrix(m1)) + for dt in "fd": m1 = 1 + 1j * a.astype(dt) m2 = 1 + 1j * b.astype(dt) self.check_qr(m1) self.check_qr(m2) self.check_qr(m2.T) - self.check_qr(matrix(m1)) def test_0_size(self): # There may be good ways to do (some of this) reasonably: diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src index 03fdd387a..3a5ad7250 100644 --- a/numpy/linalg/umath_linalg.c.src +++ b/numpy/linalg/umath_linalg.c.src @@ -382,17 +382,11 @@ typedef f2c_doublecomplex fortran_doublecomplex; ***************************************************************************** */ -static NPY_INLINE void * -offset_ptr(void* ptr, ptrdiff_t offset) -{ - return (void*)((npy_uint8*)ptr + offset); -} - static NPY_INLINE int get_fp_invalid_and_clear(void) { int status; - status = npy_clear_floatstatus(); + status = npy_clear_floatstatus_barrier((char*)&status); return !!(status & NPY_FPE_INVALID); } @@ -403,7 +397,7 @@ set_fp_invalid_or_clear(int error_occurred) npy_set_floatstatus_invalid(); } else { - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)&error_occurred); } } @@ -577,104 +571,6 @@ dump_linearize_data(const char* name, const LINEARIZE_DATA_t* params) params->row_strides, params->column_strides); } - -static NPY_INLINE float -FLOAT_add(float op1, float op2) -{ - return op1 + op2; -} - -static NPY_INLINE double -DOUBLE_add(double op1, double op2) -{ - return op1 + op2; -} - -static NPY_INLINE COMPLEX_t -CFLOAT_add(COMPLEX_t op1, COMPLEX_t op2) -{ - COMPLEX_t result; - result.array[0] = op1.array[0] + op2.array[0]; - result.array[1] = op1.array[1] + op2.array[1]; - - return result; -} - -static NPY_INLINE DOUBLECOMPLEX_t -CDOUBLE_add(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2) -{ - DOUBLECOMPLEX_t result; - result.array[0] = op1.array[0] + op2.array[0]; - result.array[1] = op1.array[1] + op2.array[1]; - - return result; -} - -static NPY_INLINE float -FLOAT_mul(float op1, float op2) -{ - return op1*op2; -} - -static NPY_INLINE double -DOUBLE_mul(double op1, double op2) -{ - return op1*op2; -} - - -static NPY_INLINE COMPLEX_t -CFLOAT_mul(COMPLEX_t op1, COMPLEX_t op2) -{ - COMPLEX_t result; - result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1]; - result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1]; - - return result; -} - -static NPY_INLINE DOUBLECOMPLEX_t -CDOUBLE_mul(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2) -{ - DOUBLECOMPLEX_t result; - result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1]; - result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1]; - - return result; -} - -static NPY_INLINE float -FLOAT_mulc(float op1, float op2) -{ - return op1*op2; -} - -static NPY_INLINE double -DOUBLE_mulc(float op1, float op2) -{ - return op1*op2; -} - -static NPY_INLINE COMPLEX_t -CFLOAT_mulc(COMPLEX_t op1, COMPLEX_t op2) -{ - COMPLEX_t result; - result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1]; - result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0]; - - return result; -} - -static NPY_INLINE DOUBLECOMPLEX_t -CDOUBLE_mulc(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2) -{ - DOUBLECOMPLEX_t result; - result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1]; - result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0]; - - return result; -} - static NPY_INLINE void print_FLOAT(npy_float s) { diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 91cf8ed0f..fb28fa8e5 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3089,7 +3089,7 @@ class MaskedArray(ndarray): returned object (this is equivalent to setting the ``type`` parameter). type : Python type, optional - Type of the returned view, e.g., ndarray or matrix. Again, the + Type of the returned view, either ndarray or a subclass. The default None results in type preservation. Notes @@ -3673,14 +3673,14 @@ class MaskedArray(ndarray): >>> type(x.filled()) <type 'numpy.ndarray'> - Subclassing is preserved. This means that if the data part of the masked - array is a matrix, `filled` returns a matrix: - - >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) - >>> x.filled() - matrix([[ 1, 999999], - [999999, 4]]) + Subclassing is preserved. This means that if, e.g., the data part of + the masked array is a recarray, `filled` returns a recarray: + >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray) + >>> m = np.ma.array(x, mask=[(True, False), (False, True)]) + >>> m.filled() + rec.array([(999999, 2), ( -3, 999999)], + dtype=[('f0', '<i8'), ('f1', '<i8')]) """ m = self._mask if m is nomask: diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 8272dced9..da35217d1 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1465,9 +1465,14 @@ class MAxisConcatenator(AxisConcatenator): """ concatenate = staticmethod(concatenate) - @staticmethod - def makemat(arr): - return array(arr.data.view(np.matrix), mask=arr.mask) + @classmethod + def makemat(cls, arr): + # There used to be a view as np.matrix here, but we may eventually + # deprecate that class. In preparation, we use the unmasked version + # to construct the matrix (with copy=False for backwards compatibility + # with the .view) + data = super(MAxisConcatenator, cls).makemat(arr.data, copy=False) + return array(data, mask=arr.mask) def __getitem__(self, key): # matrix builder syntax, like 'a, b; c, d' diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 9caf38b56..63703f6cd 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -335,49 +335,6 @@ class TestMaskedArray(object): assert_equal(s1, s2) assert_(x1[1:1].shape == (0,)) - def test_matrix_indexing(self): - # Tests conversions and indexing - x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) - x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]]) - x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]]) - x4 = array(x1) - # test conversion to strings - str(x2) # raises? - repr(x2) # raises? - # tests of indexing - assert_(type(x2[1, 0]) is type(x1[1, 0])) - assert_(x1[1, 0] == x2[1, 0]) - assert_(x2[1, 1] is masked) - assert_equal(x1[0, 2], x2[0, 2]) - assert_equal(x1[0, 1:], x2[0, 1:]) - assert_equal(x1[:, 2], x2[:, 2]) - assert_equal(x1[:], x2[:]) - assert_equal(x1[1:], x3[1:]) - x1[0, 2] = 9 - x2[0, 2] = 9 - assert_equal(x1, x2) - x1[0, 1:] = 99 - x2[0, 1:] = 99 - assert_equal(x1, x2) - x2[0, 1] = masked - assert_equal(x1, x2) - x2[0, 1:] = masked - assert_equal(x1, x2) - x2[0, :] = x1[0, :] - x2[0, 1] = masked - assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) - x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) - assert_(allequal(getmask(x3)[1], array([1, 1, 0]))) - assert_(allequal(getmask(x3[1]), array([1, 1, 0]))) - x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) - assert_(allequal(getmask(x4[1]), array([1, 1, 0]))) - assert_(allequal(x4[1], array([1, 2, 3]))) - x1 = np.matrix(np.arange(5) * 1.0) - x2 = masked_values(x1, 3.0) - assert_equal(x1, x2) - assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) - assert_equal(3.0, x2.fill_value) - @suppress_copy_mask_on_assignment def test_copy(self): # Tests of some subtle points of copying and sizing. @@ -611,11 +568,13 @@ class TestMaskedArray(object): def test_pickling_subbaseclass(self): # Test pickling w/ a subclass of ndarray - a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) + x = np.array([(1.0, 2), (3.0, 4)], + dtype=[('x', float), ('y', int)]).view(np.recarray) + a = masked_array(x, mask=[(True, False), (False, True)]) a_pickled = pickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) - assert_(isinstance(a_pickled._data, np.matrix)) + assert_(isinstance(a_pickled._data, np.recarray)) def test_pickling_maskedconstant(self): # Test pickling MaskedConstant @@ -1448,16 +1407,6 @@ class TestMaskedArrayArithmetic(object): assert_(result is output) assert_(output[0] is masked) - def test_count_mean_with_matrix(self): - m = np.ma.array(np.matrix([[1,2],[3,4]]), mask=np.zeros((2,2))) - - assert_equal(m.count(axis=0).shape, (1,2)) - assert_equal(m.count(axis=1).shape, (2,1)) - - #make sure broadcasting inside mean and var work - assert_equal(m.mean(axis=0), [[2., 3.]]) - assert_equal(m.mean(axis=1), [[1.5], [3.5]]) - def test_eq_on_structured(self): # Test the equality of structured arrays ndtype = [('A', int), ('B', int)] @@ -1740,23 +1689,6 @@ class TestMaskedArrayAttributes(object): def test_flat(self): # Test that flat can return all types of items [#4585, #4615] - # test simple access - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - assert_equal(test.flat[1], 2) - assert_equal(test.flat[2], masked) - assert_(np.all(test.flat[0:2] == test[0, 0:2])) - # Test flat on masked_matrices - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) - control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) - assert_equal(test, control) - # Test setting - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - testflat = test.flat - testflat[:] = testflat[[2, 1, 0]] - assert_equal(test, control) - testflat[0] = 9 - assert_equal(test[0, 0], 9) # test 2-D record array # ... on structured array w/ masked records x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], @@ -1784,12 +1716,6 @@ class TestMaskedArrayAttributes(object): if i >= x.shape[-1]: i = 0 j += 1 - # test that matrices keep the correct shape (#4615) - a = masked_array(np.matrix(np.eye(2)), mask=0) - b = a.flat - b01 = b[:2] - assert_equal(b01.data, array([[1., 0.]])) - assert_equal(b01.mask, array([[False, False]])) def test_assign_dtype(self): # check that the mask's dtype is updated when dtype is changed @@ -2893,32 +2819,6 @@ class TestMaskedArrayMethods(object): assert_equal(mxsmall.any(0), [True, True, False]) assert_equal(mxsmall.any(1), [True, True, False]) - def test_allany_onmatrices(self): - x = np.array([[0.13, 0.26, 0.90], - [0.28, 0.33, 0.63], - [0.31, 0.87, 0.70]]) - X = np.matrix(x) - m = np.array([[True, False, False], - [False, False, False], - [True, True, False]], dtype=np.bool_) - mX = masked_array(X, mask=m) - mXbig = (mX > 0.5) - mXsmall = (mX < 0.5) - - assert_(not mXbig.all()) - assert_(mXbig.any()) - assert_equal(mXbig.all(0), np.matrix([False, False, True])) - assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) - assert_equal(mXbig.any(0), np.matrix([False, False, True])) - assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) - - assert_(not mXsmall.all()) - assert_(mXsmall.any()) - assert_equal(mXsmall.all(0), np.matrix([True, True, False])) - assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) - assert_equal(mXsmall.any(0), np.matrix([True, True, False])) - assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) - def test_allany_oddities(self): # Some fun with all and any store = empty((), dtype=bool) @@ -3017,14 +2917,6 @@ class TestMaskedArrayMethods(object): b = a.compressed() assert_equal(b, [2, 3, 4]) - a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) - b = a.compressed() - assert_equal(b, a) - assert_(isinstance(b, np.matrix)) - a[0, 0] = masked - b = a.compressed() - assert_equal(b, [[2, 3, 4]]) - def test_empty(self): # Tests empty/like datatype = [('a', int), ('b', float), ('c', '|S8')] @@ -3139,10 +3031,6 @@ class TestMaskedArrayMethods(object): a = array([0, 0], mask=[1, 1]) aravel = a.ravel() assert_equal(aravel._mask.shape, a.shape) - a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) - aravel = a.ravel() - assert_equal(aravel.shape, (1, 5)) - assert_equal(aravel._mask.shape, a.shape) # Checks that small_mask is preserved a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) assert_equal(a.ravel()._mask, [0, 0, 0, 0]) @@ -4607,10 +4495,6 @@ class TestMaskedFields(object): assert_equal(test, data) assert_equal(test.mask, controlmask.reshape(-1, 2)) - test = a.view((float, 2), np.matrix) - assert_equal(test, data) - assert_(isinstance(test, np.matrix)) - def test_getitem(self): ndtype = [('a', float), ('b', float)] a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) @@ -4794,11 +4678,12 @@ class TestMaskedView(object): def test_view_to_dtype_and_type(self): (data, a, controlmask) = self.data - test = a.view((float, 2), np.matrix) + test = a.view((float, 2), np.recarray) assert_equal(test, data) - assert_(isinstance(test, np.matrix)) + assert_(isinstance(test, np.recarray)) assert_(not isinstance(test, MaskedArray)) + class TestOptionalArgs(object): def test_ndarrayfuncs(self): # test axis arg behaves the same as ndarray (including multiple axes) diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index a7a32b628..2d5e30b2c 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -311,6 +311,9 @@ class TestConcatenator(object): assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4']) def test_matrix(self): + # Test consistency with unmasked version. If we ever deprecate + # matrix, this test should either still pass, or both actual and + # expected should fail to be build. actual = mr_['r', 1, 2, 3] expected = np.ma.array(np.r_['r', 1, 2, 3]) assert_array_equal(actual, expected) diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py index b61a46278..f8ab52bb9 100644 --- a/numpy/ma/tests/test_subclassing.py +++ b/numpy/ma/tests/test_subclassing.py @@ -75,27 +75,6 @@ class MSubArray(SubArray, MaskedArray): msubarray = MSubArray -class MMatrix(MaskedArray, np.matrix,): - - def __new__(cls, data, mask=nomask): - mat = np.matrix(data) - _data = MaskedArray.__new__(cls, data=mat, mask=mask) - return _data - - def __array_finalize__(self, obj): - np.matrix.__array_finalize__(self, obj) - MaskedArray.__array_finalize__(self, obj) - return - - def _get_series(self): - _view = self.view(MaskedArray) - _view._sharedmask = False - return _view - _series = property(fget=_get_series) - -mmatrix = MMatrix - - # Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing # setting to non-class values (and thus np.ma.core.masked_print_option) # and overrides __array_wrap__, updating the info dict, to check that this @@ -180,7 +159,7 @@ class TestSubclassing(object): def setup(self): x = np.arange(5, dtype='float') - mx = mmatrix(x, mask=[0, 1, 0, 0, 0]) + mx = msubarray(x, mask=[0, 1, 0, 0, 0]) self.data = (x, mx) def test_data_subclassing(self): @@ -196,34 +175,34 @@ class TestSubclassing(object): def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray (x, mx) = self.data - assert_(isinstance(mx._data, np.matrix)) + assert_(isinstance(mx._data, subarray)) def test_masked_unary_operations(self): # Tests masked_unary_operation (x, mx) = self.data with np.errstate(divide='ignore'): - assert_(isinstance(log(mx), mmatrix)) + assert_(isinstance(log(mx), msubarray)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation (x, mx) = self.data - # Result should be a mmatrix - assert_(isinstance(add(mx, mx), mmatrix)) - assert_(isinstance(add(mx, x), mmatrix)) + # Result should be a msubarray + assert_(isinstance(add(mx, mx), msubarray)) + assert_(isinstance(add(mx, x), msubarray)) # Result should work assert_equal(add(mx, x), mx+x) - assert_(isinstance(add(mx, mx)._data, np.matrix)) - assert_(isinstance(add.outer(mx, mx), mmatrix)) - assert_(isinstance(hypot(mx, mx), mmatrix)) - assert_(isinstance(hypot(mx, x), mmatrix)) + assert_(isinstance(add(mx, mx)._data, subarray)) + assert_(isinstance(add.outer(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, x), msubarray)) def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation (x, mx) = self.data xmx = masked_array(mx.data.__array__(), mask=mx.mask) - assert_(isinstance(divide(mx, mx), mmatrix)) - assert_(isinstance(divide(mx, x), mmatrix)) + assert_(isinstance(divide(mx, mx), msubarray)) + assert_(isinstance(divide(mx, x), msubarray)) assert_equal(divide(mx, mx), divide(xmx, xmx)) def test_attributepropagation(self): diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 1f5c94921..9909fec8d 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -5,8 +5,11 @@ __all__ = ['matrix', 'bmat', 'mat', 'asmatrix'] import sys import ast import numpy.core.numeric as N -from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray -from numpy.core.numerictypes import issubdtype +from numpy.core.numeric import concatenate, isscalar +# While not in __all__, matrix_power used to be defined here, so we import +# it for backward compatibility. +from numpy.linalg import matrix_power + def _convert_from_string(data): for char in '[]': @@ -63,114 +66,6 @@ def asmatrix(data, dtype=None): """ return matrix(data, dtype=dtype, copy=False) -def matrix_power(M, n): - """ - Raise a square matrix to the (integer) power `n`. - - For positive integers `n`, the power is computed by repeated matrix - squarings and matrix multiplications. If ``n == 0``, the identity matrix - of the same shape as M is returned. If ``n < 0``, the inverse - is computed and then raised to the ``abs(n)``. - - Parameters - ---------- - M : ndarray or matrix object - Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``, - with `m` a positive integer. - n : int - The exponent can be any integer or long integer, positive, - negative, or zero. - - Returns - ------- - M**n : ndarray or matrix object - The return value is the same shape and type as `M`; - if the exponent is positive or zero then the type of the - elements is the same as those of `M`. If the exponent is - negative the elements are floating-point. - - Raises - ------ - LinAlgError - If the matrix is not numerically invertible. - - See Also - -------- - matrix - Provides an equivalent function as the exponentiation operator - (``**``, not ``^``). - - Examples - -------- - >>> from numpy import linalg as LA - >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit - >>> LA.matrix_power(i, 3) # should = -i - array([[ 0, -1], - [ 1, 0]]) - >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix - matrix([[ 0, -1], - [ 1, 0]]) - >>> LA.matrix_power(i, 0) - array([[1, 0], - [0, 1]]) - >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements - array([[ 0., 1.], - [-1., 0.]]) - - Somewhat more sophisticated example - - >>> q = np.zeros((4, 4)) - >>> q[0:2, 0:2] = -i - >>> q[2:4, 2:4] = i - >>> q # one of the three quaternion units not equal to 1 - array([[ 0., -1., 0., 0.], - [ 1., 0., 0., 0.], - [ 0., 0., 0., 1.], - [ 0., 0., -1., 0.]]) - >>> LA.matrix_power(q, 2) # = -np.eye(4) - array([[-1., 0., 0., 0.], - [ 0., -1., 0., 0.], - [ 0., 0., -1., 0.], - [ 0., 0., 0., -1.]]) - - """ - M = asanyarray(M) - if M.ndim != 2 or M.shape[0] != M.shape[1]: - raise ValueError("input must be a square array") - if not issubdtype(type(n), N.integer): - raise TypeError("exponent must be an integer") - - from numpy.linalg import inv - - if n==0: - M = M.copy() - M[:] = identity(M.shape[0]) - return M - elif n<0: - M = inv(M) - n *= -1 - - result = M - if n <= 3: - for _ in range(n-1): - result=N.dot(result, M) - return result - - # binary decomposition to reduce the number of Matrix - # multiplications for n > 3. - beta = binary_repr(n) - Z, q, t = M, 0, len(beta) - while beta[t-q-1] == '0': - Z = N.dot(Z, Z) - q += 1 - result = Z - for k in range(q+1, t): - Z = N.dot(Z, Z) - if beta[t-k-1] == '1': - result = N.dot(result, Z) - return result - - class matrix(N.ndarray): """ matrix(data, dtype=None, copy=True) diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index a02a05c09..d160490b3 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -13,7 +13,7 @@ from numpy.testing import ( assert_, assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_raises ) -from numpy.matrixlib.defmatrix import matrix_power +from numpy.linalg import matrix_power from numpy.matrixlib import mat class TestCtor(object): diff --git a/numpy/matrixlib/tests/test_interaction.py b/numpy/matrixlib/tests/test_interaction.py new file mode 100644 index 000000000..fefb159c6 --- /dev/null +++ b/numpy/matrixlib/tests/test_interaction.py @@ -0,0 +1,361 @@ +"""Tests of interaction of matrix with other parts of numpy. + +Note that tests with MaskedArray and linalg are done in separate files. +""" +from __future__ import division, absolute_import, print_function + +import textwrap +import warnings + +import numpy as np +from numpy.testing import (assert_, assert_equal, assert_raises, + assert_raises_regex, assert_array_equal, + assert_almost_equal, assert_array_almost_equal) + + +def test_fancy_indexing(): + # The matrix class messes with the shape. While this is always + # weird (getitem is not used, it does not have setitem nor knows + # about fancy indexing), this tests gh-3110 + # 2018-04-29: moved here from core.tests.test_index. + m = np.matrix([[1, 2], [3, 4]]) + + assert_(isinstance(m[[0, 1, 0], :], np.matrix)) + + # gh-3110. Note the transpose currently because matrices do *not* + # support dimension fixing for fancy indexing correctly. + x = np.asmatrix(np.arange(50).reshape(5, 10)) + assert_equal(x[:2, np.array(-1)], x[:2, -1].T) + + +def test_polynomial_mapdomain(): + # test that polynomial preserved matrix subtype. + # 2018-04-29: moved here from polynomial.tests.polyutils. + dom1 = [0, 4] + dom2 = [1, 3] + x = np.matrix([dom1, dom1]) + res = np.polynomial.polyutils.mapdomain(x, dom1, dom2) + assert_(isinstance(res, np.matrix)) + + +def test_sort_matrix_none(): + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.sort(a, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_partition_matrix_none(): + # gh-4301 + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.partition(a, 1, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_dot_scalar_and_matrix_of_objects(): + # Ticket #2469 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.dot(arr, 3), desired) + assert_equal(np.dot(3, arr), desired) + + +def test_inner_scalar_and_matrix(): + # 2018-04-29: moved here from core.tests.test_multiarray + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + arr = np.matrix([[1, 2], [3, 4]], dtype=dt) + desired = np.matrix([[3, 6], [9, 12]], dtype=dt) + assert_equal(np.inner(arr, sca), desired) + assert_equal(np.inner(sca, arr), desired) + + +def test_inner_scalar_and_matrix_of_objects(): + # Ticket #4482 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.inner(arr, 3), desired) + assert_equal(np.inner(3, arr), desired) + + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + # 2018-04-29: moved here from core.tests.test_nditer, given the + # matrix specific shape test. + + # matrix vs ndarray + a = np.matrix([[1, 2], [3, 4]]) + b = np.arange(4).reshape(2, 2).T + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(type(i.operands[2]) is np.matrix) + assert_(type(i.operands[2]) is not np.ndarray) + assert_equal(i.operands[2].shape, (2, 2)) + + # matrix always wants things to be 2D + b = np.arange(4).reshape(1, 2, 2) + assert_raises(RuntimeError, np.nditer, [a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + # but if subtypes are disabled, the result can still work + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_(type(i.operands[2]) is np.ndarray) + assert_(type(i.operands[2]) is not np.matrix) + assert_equal(i.operands[2].shape, (1, 2, 2)) + + +def like_function(): + # 2018-04-29: moved here from core.tests.test_numeric + a = np.matrix([[1, 2], [3, 4]]) + for like_function in np.zeros_like, np.ones_like, np.empty_like: + b = like_function(a) + assert_(type(b) is np.matrix) + + c = like_function(a, subok=False) + assert_(type(c) is not np.matrix) + + +def test_array_astype(): + # 2018-04-29: copied here from core.tests.test_api + # subok=True passes through a matrix + a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), np.matrix) + + # subok=False never returns a matrix + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not np.matrix) + + +def test_stack(): + # 2018-04-29: copied here from core.tests.test_shape_base + # check np.matrix cannot be stacked + m = np.matrix([[1, 2], [3, 4]]) + assert_raises_regex(ValueError, 'shape too large to be a matrix', + np.stack, [m, m]) + + +def test_object_scalar_multiply(): + # Tickets #2469 and #4482 + # 2018-04-29: moved here from core.tests.test_ufunc + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.multiply(arr, 3), desired) + assert_equal(np.multiply(3, arr), desired) + + +def test_nanfunctions_matrices(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in [np.nanmin, np.nanmax]: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + # check that rows of nan are dealt with for subclasses (#4628) + mat[1] = np.nan + for f in [np.nanmin, np.nanmax]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) + and not np.isnan(res[2, 0])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat) + assert_(np.isscalar(res)) + assert_(res != np.nan) + assert_(len(w) == 0) + + +def test_nanfunctions_matrices_general(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod, + np.nanmean, np.nanvar, np.nanstd): + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + + for f in np.nancumsum, np.nancumprod: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3*3)) + + +def test_average_matrix(): + # 2018-04-29: moved here from core.tests.test_function_base. + y = np.matrix(np.random.rand(5, 5)) + assert_array_equal(y.mean(0), np.average(y, 0)) + + a = np.matrix([[1, 2], [3, 4]]) + w = np.matrix([[1, 2], [3, 4]]) + + r = np.average(a, axis=0, weights=w) + assert_equal(type(r), np.matrix) + assert_equal(r, [[2.5, 10.0/3]]) + + +def test_trapz_matrix(): + # Test to make sure matrices give the same answer as ndarrays + # 2018-04-29: moved here from core.tests.test_function_base. + x = np.linspace(0, 5) + y = x * x + r = np.trapz(y, x) + mx = np.matrix(x) + my = np.matrix(y) + mr = np.trapz(my, mx) + assert_almost_equal(mr, r) + + +def test_ediff1d_matrix(): + # 2018-04-29: moved here from core.tests.test_arraysetops. + assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix)) + assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix)) + + +def test_apply_along_axis_matrix(): + # this test is particularly malicious because matrix + # refuses to become 1d + # 2018-04-29: moved here from core.tests.test_shape_base. + def double(row): + return row * 2 + + m = np.matrix([[0, 1], [2, 3]]) + expected = np.matrix([[0, 2], [4, 6]]) + + result = np.apply_along_axis(double, 0, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + result = np.apply_along_axis(double, 1, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + +def test_kron_matrix(): + # 2018-04-29: moved here from core.tests.test_shape_base. + a = np.ones([2, 2]) + m = np.asmatrix(a) + assert_equal(type(np.kron(a, a)), np.ndarray) + assert_equal(type(np.kron(m, m)), np.matrix) + assert_equal(type(np.kron(a, m)), np.matrix) + assert_equal(type(np.kron(m, a)), np.matrix) + + +class TestConcatenatorMatrix(object): + # 2018-04-29: moved here from core.tests.test_index_tricks. + def test_matrix(self): + a = [1, 2] + b = [3, 4] + + ab_r = np.r_['r', a, b] + ab_c = np.r_['c', a, b] + + assert_equal(type(ab_r), np.matrix) + assert_equal(type(ab_c), np.matrix) + + assert_equal(np.array(ab_r), [[1, 2, 3, 4]]) + assert_equal(np.array(ab_c), [[1], [2], [3], [4]]) + + assert_raises(ValueError, lambda: np.r_['rc', a, b]) + + def test_matrix_scalar(self): + r = np.r_['r', [1, 2], 3] + assert_equal(type(r), np.matrix) + assert_equal(np.array(r), [[1, 2, 3]]) + + def test_matrix_builder(self): + a = np.array([1]) + b = np.array([2]) + c = np.array([3]) + d = np.array([4]) + actual = np.r_['a, b; c, d'] + expected = np.bmat([[a, b], [c, d]]) + + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + +def test_array_equal_error_message_matrix(): + # 2018-04-29: moved here from testing.tests.test_utils. + try: + assert_equal(np.array([1, 2]), np.matrix([1, 2])) + except AssertionError as e: + msg = str(e) + msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)") + msg_reference = textwrap.dedent("""\ + + Arrays are not equal + + (shapes (2,), (1, 2) mismatch) + x: array([1, 2]) + y: matrix([[1, 2]])""") + try: + assert_equal(msg, msg_reference) + except AssertionError: + assert_equal(msg2, msg_reference) + else: + raise AssertionError("Did not raise") + + +def test_array_almost_equal_matrix(): + # Matrix slicing keeps things 2-D, while array does not necessarily. + # See gh-8452. + # 2018-04-29: moved here from testing.tests.test_utils. + m1 = np.matrix([[1., 2.]]) + m2 = np.matrix([[1., np.nan]]) + m3 = np.matrix([[1., -np.inf]]) + m4 = np.matrix([[np.nan, np.inf]]) + m5 = np.matrix([[1., 2.], [np.nan, np.inf]]) + for assert_func in assert_array_almost_equal, assert_almost_equal: + for m in m1, m2, m3, m4, m5: + assert_func(m, m) + a = np.array(m) + assert_func(a, m) + assert_func(m, a) diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py new file mode 100644 index 000000000..80d1cacca --- /dev/null +++ b/numpy/matrixlib/tests/test_masked_matrix.py @@ -0,0 +1,211 @@ +from __future__ import division, absolute_import, print_function + +import pickle + +import numpy as np +from numpy.ma.testutils import assert_, assert_equal +from numpy.ma.core import (masked_array, masked_values, masked, allequal, + MaskType, getmask, MaskedArray, nomask, + log, add, hypot, divide) + + +class MMatrix(MaskedArray, np.matrix,): + + def __new__(cls, data, mask=nomask): + mat = np.matrix(data) + _data = MaskedArray.__new__(cls, data=mat, mask=mask) + return _data + + def __array_finalize__(self, obj): + np.matrix.__array_finalize__(self, obj) + MaskedArray.__array_finalize__(self, obj) + return + + def _get_series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + _series = property(fget=_get_series) + + +class TestMaskedMatrix(object): + def test_matrix_indexing(self): + # Tests conversions and indexing + x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) + x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]]) + x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]]) + x4 = masked_array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + # tests of indexing + assert_(type(x2[1, 0]) is type(x1[1, 0])) + assert_(x1[1, 0] == x2[1, 0]) + assert_(x2[1, 1] is masked) + assert_equal(x1[0, 2], x2[0, 2]) + assert_equal(x1[0, 1:], x2[0, 1:]) + assert_equal(x1[:, 2], x2[:, 2]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[0, 2] = 9 + x2[0, 2] = 9 + assert_equal(x1, x2) + x1[0, 1:] = 99 + x2[0, 1:] = 99 + assert_equal(x1, x2) + x2[0, 1] = masked + assert_equal(x1, x2) + x2[0, 1:] = masked + assert_equal(x1, x2) + x2[0, :] = x1[0, :] + x2[0, 1] = masked + assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) + x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0]))) + assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0]))) + x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0]))) + assert_(allequal(x4[1], masked_array([1, 2, 3]))) + x1 = np.matrix(np.arange(5) * 1.0) + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType), + x2.mask)) + assert_equal(3.0, x2.fill_value) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) + a_pickled = pickle.loads(a.dumps()) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.matrix)) + + def test_count_mean_with_matrix(self): + m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2))) + + assert_equal(m.count(axis=0).shape, (1, 2)) + assert_equal(m.count(axis=1).shape, (2, 1)) + + # Make sure broadcasting inside mean and var work + assert_equal(m.mean(axis=0), [[2., 3.]]) + assert_equal(m.mean(axis=1), [[1.5], [3.5]]) + + def test_flat(self): + # Test that flat can return items even for matrices [#4585, #4615] + # test simple access + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + assert_equal(test.flat[1], 2) + assert_equal(test.flat[2], masked) + assert_(np.all(test.flat[0:2] == test[0, 0:2])) + # Test flat on masked_matrices + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # Test setting + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[[2, 1, 0]] + assert_equal(test, control) + testflat[0] = 9 + # test that matrices keep the correct shape (#4615) + a = masked_array(np.matrix(np.eye(2)), mask=0) + b = a.flat + b01 = b[:2] + assert_equal(b01.data, np.array([[1., 0.]])) + assert_equal(b01.mask, np.array([[False, False]])) + + def test_allany_onmatrices(self): + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + X = np.matrix(x) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool_) + mX = masked_array(X, mask=m) + mXbig = (mX > 0.5) + mXsmall = (mX < 0.5) + + assert_(not mXbig.all()) + assert_(mXbig.any()) + assert_equal(mXbig.all(0), np.matrix([False, False, True])) + assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) + assert_equal(mXbig.any(0), np.matrix([False, False, True])) + assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) + + assert_(not mXsmall.all()) + assert_(mXsmall.any()) + assert_equal(mXsmall.all(0), np.matrix([True, True, False])) + assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) + assert_equal(mXsmall.any(0), np.matrix([True, True, False])) + assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) + + def test_compressed(self): + a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + assert_(isinstance(b, np.matrix)) + a[0, 0] = masked + b = a.compressed() + assert_equal(b, [[2, 3, 4]]) + + def test_ravel(self): + a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel.shape, (1, 5)) + assert_equal(aravel._mask.shape, a.shape) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = masked_array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + test = a.view((float, 2), np.matrix) + assert_equal(test, data) + assert_(isinstance(test, np.matrix)) + assert_(not isinstance(test, MaskedArray)) + + +class TestSubclassing(object): + # Test suite for masked subclasses of ndarray. + + def setup(self): + x = np.arange(5, dtype='float') + mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, np.matrix)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), MMatrix)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a MMatrix + assert_(isinstance(add(mx, mx), MMatrix)) + assert_(isinstance(add(mx, x), MMatrix)) + # Result should work + assert_equal(add(mx, x), mx+x) + assert_(isinstance(add(mx, mx)._data, np.matrix)) + assert_(isinstance(add.outer(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, x), MMatrix)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), MMatrix)) + assert_(isinstance(divide(mx, x), MMatrix)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py new file mode 100644 index 000000000..6fc733c2e --- /dev/null +++ b/numpy/matrixlib/tests/test_matrix_linalg.py @@ -0,0 +1,95 @@ +""" Test functions for linalg module using the matrix class.""" +from __future__ import division, absolute_import, print_function + +import numpy as np + +from numpy.linalg.tests.test_linalg import ( + LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase, + _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base, + SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases, + PinvCases, DetCases, LstsqCases) + + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("0x0_matrix", + np.empty((0, 0), dtype=np.double).view(np.matrix), + np.empty((0, 1), dtype=np.double).view(np.matrix), + tags={'size-0'}), + LinalgCase("matrix_b_only", + np.array([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), + LinalgCase("matrix_a_and_b", + np.matrix([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hmatrix_a_and_b", + np.matrix([[1., 2.], [2., 1.]]), + None), +]) +# No need to make generalized or strided cases for matrices. + + +class MatrixTestCase(LinalgTestCase): + TEST_CASES = CASES + + +class TestSolveMatrix(SolveCases, MatrixTestCase): + pass + + +class TestInvMatrix(InvCases, MatrixTestCase): + pass + + +class TestEigvalsMatrix(EigvalsCases, MatrixTestCase): + pass + + +class TestEigMatrix(EigCases, MatrixTestCase): + pass + + +class TestSVDMatrix(SVDCases, MatrixTestCase): + pass + + +class TestCondMatrix(CondCases, MatrixTestCase): + pass + + +class TestPinvMatrix(PinvCases, MatrixTestCase): + pass + + +class TestDetMatrix(DetCases, MatrixTestCase): + pass + + +class TestLstsqMatrix(LstsqCases, MatrixTestCase): + pass + + +class _TestNorm2DMatrix(_TestNorm2D): + array = np.matrix + + +class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase): + pass + + +class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase): + pass + + +class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base): + pass + + +class TestQRMatrix(_TestQR): + array = np.matrix diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py index 32ea55716..801c558cc 100644 --- a/numpy/polynomial/tests/test_polyutils.py +++ b/numpy/polynomial/tests/test_polyutils.py @@ -63,7 +63,7 @@ class TestDomain(object): dom1 = [0, 4] dom2 = [1, 3] tgt = dom2 - res = pu. mapdomain(dom1, dom1, dom2) + res = pu.mapdomain(dom1, dom1, dom2) assert_almost_equal(res, tgt) # test for complex values @@ -83,11 +83,14 @@ class TestDomain(object): assert_almost_equal(res, tgt) # test that subtypes are preserved. + class MyNDArray(np.ndarray): + pass + dom1 = [0, 4] dom2 = [1, 3] - x = np.matrix([dom1, dom1]) + x = np.array([dom1, dom1]).view(MyNDArray) res = pu.mapdomain(x, dom1, dom2) - assert_(isinstance(res, np.matrix)) + assert_(isinstance(res, MyNDArray)) def test_mapparms(self): # test for real values diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 8ef153c15..b45b3146f 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -4901,10 +4901,24 @@ cdef class RandomState: """ if isinstance(x, (int, long, np.integer)): arr = np.arange(x) - else: - arr = np.array(x) - self.shuffle(arr) - return arr + self.shuffle(arr) + return arr + + arr = np.asarray(x) + + # shuffle has fast-path for 1-d + if arr.ndim == 1: + # must return a copy + if arr is x: + arr = np.array(arr) + self.shuffle(arr) + return arr + + # Shuffle index array, dtype to ensure fast path + idx = np.arange(arr.shape[0], dtype=np.intp) + self.shuffle(idx) + return arr[idx] + _rand = RandomState() seed = _rand.seed diff --git a/numpy/testing/_private/decorators.py b/numpy/testing/_private/decorators.py index 60d3f968f..24c4e385d 100644 --- a/numpy/testing/_private/decorators.py +++ b/numpy/testing/_private/decorators.py @@ -34,7 +34,7 @@ def slow(t): The exact definition of a slow test is obviously both subjective and hardware-dependent, but in general any individual test that requires more - than a second or two should be labeled as slow (the whole suite consits of + than a second or two should be labeled as slow (the whole suite consists of thousands of tests, so even a second is significant). Parameters diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 0592e62f8..c9e8384c2 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -286,7 +286,7 @@ class TestEqual(TestArrayEqual): def test_error_message(self): try: - self._assert_func(np.array([1, 2]), np.matrix([1, 2])) + self._assert_func(np.array([1, 2]), np.array([[1, 2]])) except AssertionError as e: msg = str(e) msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)") @@ -296,7 +296,7 @@ class TestEqual(TestArrayEqual): (shapes (2,), (1, 2) mismatch) x: array([1, 2]) - y: matrix([[1, 2]])""") + y: array([[1, 2]])""") try: assert_equal(msg, msg_reference) except AssertionError: @@ -366,20 +366,6 @@ class TestArrayAlmostEqual(_GenericTest): self._assert_func(b, a) self._assert_func(b, b) - def test_matrix(self): - # Matrix slicing keeps things 2-D, while array does not necessarily. - # See gh-8452. - m1 = np.matrix([[1., 2.]]) - m2 = np.matrix([[1., np.nan]]) - m3 = np.matrix([[1., -np.inf]]) - m4 = np.matrix([[np.nan, np.inf]]) - m5 = np.matrix([[1., 2.], [np.nan, np.inf]]) - for m in m1, m2, m3, m4, m5: - self._assert_func(m, m) - a = np.array(m) - self._assert_func(a, m) - self._assert_func(m, a) - def test_subclass_that_cannot_be_bool(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having @@ -479,20 +465,6 @@ class TestAlmostEqual(_GenericTest): # remove anything that's not the array string assert_equal(str(e).split('%)\n ')[1], b) - def test_matrix(self): - # Matrix slicing keeps things 2-D, while array does not necessarily. - # See gh-8452. - m1 = np.matrix([[1., 2.]]) - m2 = np.matrix([[1., np.nan]]) - m3 = np.matrix([[1., -np.inf]]) - m4 = np.matrix([[np.nan, np.inf]]) - m5 = np.matrix([[1., 2.], [np.nan, np.inf]]) - for m in m1, m2, m3, m4, m5: - self._assert_func(m, m) - a = np.array(m) - self._assert_func(a, m) - self._assert_func(m, a) - def test_subclass_that_cannot_be_bool(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having diff --git a/site.cfg.example b/site.cfg.example index 645b48543..21609a332 100644 --- a/site.cfg.example +++ b/site.cfg.example @@ -180,6 +180,14 @@ # mkl_libs = mkl_rt # lapack_libs = +# ACCELERATE +# ---------- +# Accelerate/vecLib is an OSX framework providing a BLAS and LAPACK implementations. +# +# [accelerate] +# libraries = Accelerate, vecLib +# #libraries = None + # UMFPACK # ------- # The UMFPACK library is used in scikits.umfpack to factor large sparse matrices. |