summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml6
-rw-r--r--benchmarks/benchmarks/bench_core.py71
-rw-r--r--doc/HOWTO_DOCUMENT.rst.txt4
-rw-r--r--doc/HOWTO_RELEASE.rst.txt99
-rw-r--r--doc/example.py4
-rw-r--r--doc/release/1.11.1-notes.rst28
-rw-r--r--doc/release/1.12.0-notes.rst84
-rw-r--r--doc/source/reference/arrays.datetime.rst6
-rw-r--r--doc/source/reference/arrays.indexing.rst2
-rw-r--r--doc/source/reference/routines.array-creation.rst1
-rw-r--r--doc/source/reference/routines.polynomials.polynomial.rst1
-rw-r--r--doc/source/reference/ufuncs.rst3
-rw-r--r--doc/source/release.rst1
-rw-r--r--doc/source/user/basics.io.genfromtxt.rst50
-rw-r--r--numpy/add_newdocs.py88
-rw-r--r--numpy/compat/py3k.py13
-rw-r--r--numpy/core/__init__.py13
-rw-r--r--numpy/core/_internal.py2
-rw-r--r--numpy/core/fromnumeric.py2
-rw-r--r--numpy/core/function_base.py153
-rw-r--r--numpy/core/getlimits.py17
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h17
-rw-r--r--numpy/core/memmap.py11
-rw-r--r--numpy/core/numeric.py121
-rw-r--r--numpy/core/numerictypes.py2
-rw-r--r--numpy/core/records.py2
-rw-r--r--numpy/core/src/multiarray/arrayobject.c56
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src51
-rw-r--r--numpy/core/src/multiarray/calculation.c22
-rw-r--r--numpy/core/src/multiarray/compiled_base.c177
-rw-r--r--numpy/core/src/multiarray/compiled_base.h2
-rw-r--r--numpy/core/src/multiarray/convert.c9
-rw-r--r--numpy/core/src/multiarray/ctors.c89
-rw-r--r--numpy/core/src/multiarray/ctors.h6
-rw-r--r--numpy/core/src/multiarray/descriptor.c40
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c90
-rw-r--r--numpy/core/src/multiarray/item_selection.c13
-rw-r--r--numpy/core/src/multiarray/iterators.c4
-rw-r--r--numpy/core/src/multiarray/mapping.c17
-rw-r--r--numpy/core/src/multiarray/methods.c14
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c2
-rw-r--r--numpy/core/src/multiarray/numpyos.c2
-rw-r--r--numpy/core/src/multiarray/numpyos.h2
-rw-r--r--numpy/core/src/multiarray/shape.c4
-rw-r--r--numpy/core/src/private/lowlevel_strided_loops.h2
-rw-r--r--numpy/core/src/private/npy_config.h4
-rw-r--r--numpy/core/src/umath/loops.c.src4
-rw-r--r--numpy/core/tests/test_api.py2
-rw-r--r--numpy/core/tests/test_deprecations.py25
-rw-r--r--numpy/core/tests/test_dtype.py10
-rw-r--r--numpy/core/tests/test_function_base.py202
-rw-r--r--numpy/core/tests/test_getlimits.py18
-rw-r--r--numpy/core/tests/test_indexing.py23
-rw-r--r--numpy/core/tests/test_memmap.py14
-rw-r--r--numpy/core/tests/test_multiarray.py74
-rw-r--r--numpy/core/tests/test_numeric.py2
-rw-r--r--numpy/core/tests/test_records.py14
-rw-r--r--numpy/core/tests/test_umath.py4
-rw-r--r--numpy/core/tests/test_unicode.py52
-rw-r--r--numpy/ctypeslib.py2
-rw-r--r--numpy/distutils/exec_command.py16
-rw-r--r--numpy/distutils/fcompiler/intel.py4
-rw-r--r--numpy/distutils/intelccompiler.py4
-rw-r--r--numpy/distutils/misc_util.py12
-rw-r--r--numpy/distutils/npy_pkg_config.py10
-rw-r--r--numpy/distutils/system_info.py14
-rw-r--r--numpy/distutils/tests/test_misc_util.py9
-rw-r--r--numpy/distutils/tests/test_system_info.py36
-rw-r--r--numpy/fft/fftpack.py20
-rw-r--r--numpy/fft/helper.py99
-rw-r--r--numpy/fft/tests/test_helper.py79
-rw-r--r--numpy/lib/format.py33
-rw-r--r--numpy/lib/function_base.py187
-rw-r--r--numpy/lib/npyio.py65
-rw-r--r--numpy/lib/polynomial.py13
-rw-r--r--numpy/lib/stride_tricks.py2
-rw-r--r--numpy/lib/tests/test_format.py22
-rw-r--r--numpy/lib/tests/test_function_base.py130
-rw-r--r--numpy/lib/tests/test_io.py109
-rw-r--r--numpy/lib/tests/test_polynomial.py32
-rw-r--r--numpy/lib/tests/test_twodim_base.py41
-rw-r--r--numpy/lib/twodim_base.py61
-rw-r--r--numpy/lib/utils.py4
-rw-r--r--numpy/linalg/linalg.py21
-rw-r--r--numpy/linalg/tests/test_regression.py46
-rw-r--r--numpy/ma/core.py67
-rw-r--r--numpy/ma/extras.py10
-rw-r--r--numpy/ma/tests/test_core.py23
-rw-r--r--numpy/ma/tests/test_extras.py13
-rw-r--r--numpy/polynomial/_polybase.py4
-rw-r--r--numpy/polynomial/polynomial.py93
-rw-r--r--numpy/polynomial/tests/test_polynomial.py64
-rw-r--r--numpy/random/mtrand/Python.pxi14
-rw-r--r--numpy/random/mtrand/distributions.c5
-rw-r--r--numpy/random/mtrand/mt_compat.h68
-rw-r--r--numpy/random/mtrand/mtrand.pyx31
-rw-r--r--numpy/random/mtrand/numpy.pxd6
-rw-r--r--numpy/random/tests/test_random.py22
-rw-r--r--numpy/random/tests/test_regression.py29
-rw-r--r--numpy/testing/tests/test_utils.py36
-rw-r--r--numpy/testing/utils.py108
-rw-r--r--numpy/tests/test_ctypeslib.py9
-rwxr-xr-xruntests.py4
-rwxr-xr-xsetup.py9
-rw-r--r--site.cfg.example1
-rw-r--r--tools/swig/numpy.i7
-rw-r--r--tools/swig/pyfragments.swg21
107 files changed, 2713 insertions, 853 deletions
diff --git a/.travis.yml b/.travis.yml
index 4bfe02ce8..fee1e72ee 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -87,11 +87,7 @@ before_install:
- source venv/bin/activate
- python -V
- pip install --upgrade pip setuptools
- - pip install nose
- - pip install pytz
- # pip install coverage
- # Speed up install by not compiling Cython
- - pip install --install-option="--no-cython-compile" Cython
+ - pip install nose pytz cython
- if [ -n "$USE_ASV" ]; then pip install asv; fi
- popd
diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py
index 08e21cc8c..b595ae469 100644
--- a/benchmarks/benchmarks/bench_core.py
+++ b/benchmarks/benchmarks/bench_core.py
@@ -2,78 +2,78 @@ from __future__ import absolute_import, division, print_function
from .common import Benchmark
-import numpy
+import numpy as np
class Core(Benchmark):
def setup(self):
self.l100 = range(100)
self.l50 = range(50)
- self.l = [numpy.arange(1000), numpy.arange(1000)]
- self.l10x10 = numpy.ones((10, 10))
+ self.l = [np.arange(1000), np.arange(1000)]
+ self.l10x10 = np.ones((10, 10))
def time_array_1(self):
- numpy.array(1)
+ np.array(1)
def time_array_empty(self):
- numpy.array([])
+ np.array([])
def time_array_l1(self):
- numpy.array([1])
+ np.array([1])
def time_array_l100(self):
- numpy.array(self.l100)
+ np.array(self.l100)
def time_array_l(self):
- numpy.array(self.l)
+ np.array(self.l)
def time_vstack_l(self):
- numpy.vstack(self.l)
+ np.vstack(self.l)
def time_hstack_l(self):
- numpy.hstack(self.l)
+ np.hstack(self.l)
def time_dstack_l(self):
- numpy.dstack(self.l)
+ np.dstack(self.l)
def time_arange_100(self):
- numpy.arange(100)
+ np.arange(100)
def time_zeros_100(self):
- numpy.zeros(100)
+ np.zeros(100)
def time_ones_100(self):
- numpy.ones(100)
+ np.ones(100)
def time_empty_100(self):
- numpy.empty(100)
+ np.empty(100)
def time_eye_100(self):
- numpy.eye(100)
+ np.eye(100)
def time_identity_100(self):
- numpy.identity(100)
+ np.identity(100)
def time_eye_3000(self):
- numpy.eye(3000)
+ np.eye(3000)
def time_identity_3000(self):
- numpy.identity(3000)
+ np.identity(3000)
def time_diag_l100(self):
- numpy.diag(self.l100)
+ np.diag(self.l100)
def time_diagflat_l100(self):
- numpy.diagflat(self.l100)
+ np.diagflat(self.l100)
def time_diagflat_l50_l50(self):
- numpy.diagflat([self.l50, self.l50])
+ np.diagflat([self.l50, self.l50])
def time_triu_l10x10(self):
- numpy.triu(self.l10x10)
+ np.triu(self.l10x10)
def time_tril_l10x10(self):
- numpy.tril(self.l10x10)
+ np.tril(self.l10x10)
class MA(Benchmark):
@@ -82,10 +82,27 @@ class MA(Benchmark):
self.t100 = ([True] * 100)
def time_masked_array(self):
- numpy.ma.masked_array()
+ np.ma.masked_array()
def time_masked_array_l100(self):
- numpy.ma.masked_array(self.l100)
+ np.ma.masked_array(self.l100)
def time_masked_array_l100_t100(self):
- numpy.ma.masked_array(self.l100, self.t100)
+ np.ma.masked_array(self.l100, self.t100)
+
+
+class CorrConv(Benchmark):
+ params = [[50, 1000, 1e5],
+ [10, 100, 1000, 1e4],
+ ['valid', 'same', 'full']]
+ param_names = ['size1', 'size2', 'mode']
+
+ def setup(self, size1, size2, mode):
+ self.x1 = np.linspace(0, 1, num=size1)
+ self.x2 = np.cos(np.linspace(0, 2*np.pi, num=size2))
+
+ def time_correlate(self, size1, size2, mode):
+ np.correlate(self.x1, self.x2, mode=mode)
+
+ def time_convolve(self, size1, size2, mode):
+ np.convolve(self.x1, self.x2, mode=mode)
diff --git a/doc/HOWTO_DOCUMENT.rst.txt b/doc/HOWTO_DOCUMENT.rst.txt
index 721835013..954f331e8 100644
--- a/doc/HOWTO_DOCUMENT.rst.txt
+++ b/doc/HOWTO_DOCUMENT.rst.txt
@@ -178,6 +178,8 @@ The sections of the docstring are:
----------
x : type
Description of parameter `x`.
+ y
+ Description of parameter `y` (with type not specified)
Enclose variables in single backticks. The colon must be preceded
by a space, or omitted if the type is absent.
@@ -461,7 +463,7 @@ here, the **Parameters** section of the docstring details the constructors
parameters.
An **Attributes** section, located below the **Parameters** section,
-may be used to describe class variables::
+may be used to describe non-method attributes of the class::
Attributes
----------
diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt
index ee05981fc..1c5fd7563 100644
--- a/doc/HOWTO_RELEASE.rst.txt
+++ b/doc/HOWTO_RELEASE.rst.txt
@@ -387,6 +387,58 @@ Trigger a build by doing an empty (or otherwise) commit to the repository::
The wheels, once built, appear in http://wheels.scipy.org
+Trigger Windows builds on Appveyor
+----------------------------------
+
+See: `build Windows wheels`_
+
+* Clone / update the https://github.com/numpy/windows-wheel-builder repository;
+* Check the ``appveyor.yml`` file in that repository;
+* Edit the line starting ``NP_VERSION:`` to give the numpy tag that you want
+ to build;
+* Push up to github to trigger a build.
+
+The wheels appear in a Rackspace CDN container at:
+
+* http://58688808cd85529d4031-38dee5dca2544308e91131f21428d924.r12.cf2.rackcdn.com
+* https://84c1a9a06db6836f5a98-38dee5dca2544308e91131f21428d924.ssl.cf2.rackcdn.com
+
+The contents via the HTTPS URL seems to get updated more slowly than via the
+HTTP URL, so if you need the binaries quickly, prefer the HTTP URL.
+
+.. _build Windows wheels: https://github.com/numpy/windows-wheel-builder
+
+Trigger Manylinux builds on travis-ci
+-------------------------------------
+
+.. note::
+
+ Until we move the manylinux build scripts, you'll need to ask
+ ``@matthew-brett`` to make you a collaborator on the manylinux repos.
+
+* Clone / update the repository at
+ https://github.com/matthew-brett/manylinux-builds
+* Edit the line in ``.travis.yml`` starting ``NUMPY_VERSIONS=`` to set the
+ numpy tag to build;
+* Push your edits to ``.travis.yml`` up to github to trigger a mass manylinux
+ build;
+* Clone / update the repository at
+ https://github.com/matthew-brett/manylinux-testing;
+* Push an empty commit to the ``manylinux-testing`` repo to trigger a test run
+ of the newly-built numpy wheels with a range of dependent libraries, as well
+ as numpy's own unit tests. The tests will take several hours.
+
+The built wheels will be available from a Rackspace CDN container at:
+
+* http://ccdd0ebb5a931e58c7c5-aae005c4999d7244ac63632f8b80e089.r77.cf2.rackcdn.com
+* https://d9a97980b71d47cde94d-aae005c4999d7244ac63632f8b80e089.ssl.cf2.rackcdn.com
+
+As for the other Rackspace containers, the HTTP address may update first, and
+you should wait 15 minutes after the build finishes before fetching the
+binaries. For the manylinux wheels, the time to run ``manylinux-testing`` is
+much greater than 15 minutes, so waiting for the tests to pass will be enough
+time for the binaries to refresh on Rackspace.
+
Make the release
----------------
@@ -403,11 +455,48 @@ For example::
scp <filename> <username>,numpy@frs.sourceforge.net:/home/frs/project/n/nu/numpy/NumPy/<releasedir>/
-Update PyPi
+Update PyPI
-----------
-The final release (not betas or release candidates) should be uploaded to PyPi.
-There are two ways to update PyPi, the first one is::
+The wheels and source should be uploaded to PyPI.
+
+You should upload the wheels first, and the source formats last, to make sure
+that pip users don't accidentally get a source install when they were
+expecting a binary wheel.
+
+You can do this automatically using the ``wheel-uploader`` script from
+https://github.com/MacPython/terryfy. Here is the recommended incantation for
+downloading all the Windows, Manylinux, OSX wheels and uploading to PyPI.
+
+::
+
+ cd ~/wheelhouse # local directory to cache wheel downloads
+ MANYLINUX_URL=http://ccdd0ebb5a931e58c7c5-aae005c4999d7244ac63632f8b80e089.r77.cf2.rackcdn.com
+ WINDOWS_URL=http://58688808cd85529d4031-38dee5dca2544308e91131f21428d924.r12.cf2.rackcdn.com
+ OSX_URL=http://wheels.scipy.org
+ wheel-uploader -u $MANYLINUX_URL -v -s -t manylinux1 numpy 1.11.1rc1
+ wheel-uploader -u $WINDOWS_URL -v -s -t win numpy 1.11.1rc1
+ wheel-uploader -u $OSX_URL -v -s -t macosx numpy 1.11.1rc1
+
+The ``-v`` flag gives verbose feedback, ``-s`` causes the script to sign the
+wheels with your GPG key before upload.
+
+You may well find that these uploads break at some point, with error messages
+from the PyPI server. In this case you'll have to continue the uploads by
+hand using `twine <https://pypi.python.org/pypi/twine>`_, using something
+like::
+
+ twine upload -s numpy-1.11.1rc1-cp34-*.whl
+
+Do this for the wheel files that ``wheel-uploader`` downloaded, but for which
+the upload failed.
+
+The ``warehouse`` PyPI server seems to be more reliable in receiving automated
+wheel uploads. You can set the repository to upload to with the ``-r`` flag
+to ``wheel-uploader`` and ``twine``. The warehouse repository URL for your
+``~/.pypirc`` file is https://upload.pypi.io/legacy/
+
+There are two ways to update the source release on PyPI, the first one is::
$ git clean -fxd # to be safe
$ python setup.py sdist --formats=gztar,zip # to check
@@ -417,10 +506,10 @@ This will ask for your key PGP passphrase, in order to sign the built source
packages.
The second way is to upload the PKG_INFO file inside the sdist dir in the
-web interface of PyPi. The source tarball can also be uploaded through this
+web interface of PyPI. The source tarball can also be uploaded through this
interface.
-To push the travis-ci OSX wheels up to pypi see :
+To push the travis-ci OSX wheels up to PyPI see :
https://github.com/MacPython/numpy-wheels#uploading-the-built-wheels-to-pypi
.. _push-tag-and-commit:
diff --git a/doc/example.py b/doc/example.py
index 5c891f64c..560775038 100644
--- a/doc/example.py
+++ b/doc/example.py
@@ -35,7 +35,7 @@ import matplotlib.pyplot as plt
from my_module import my_func, other_func
-def foo(var1, var2, long_var_name='hi') :
+def foo(var1, var2, long_var_name='hi'):
r"""A one-line summary that does not use variable names or the
function name.
@@ -111,7 +111,7 @@ def foo(var1, var2, long_var_name='hi') :
These are written in doctest format, and should illustrate how to
use the function.
- >>> a=[1,2,3]
+ >>> a = [1, 2, 3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
diff --git a/doc/release/1.11.1-notes.rst b/doc/release/1.11.1-notes.rst
new file mode 100644
index 000000000..072f01ecb
--- /dev/null
+++ b/doc/release/1.11.1-notes.rst
@@ -0,0 +1,28 @@
+NumPy 1.11.1 Release Notes
+**************************
+
+Numpy 1.11.1 supports Python 2.6 - 2.7 and 3.2 - 3.5. It fixes bugs and
+regressions found in Numpy 1.11.0 and includes several build related
+improvements. Wheels for Linux, Windows, and OSX can be found on pypi.
+
+Fixes Merged
+============
+
+- #7506 BUG: Make sure numpy imports on python 2.6 when nose is unavailable.
+- #7530 BUG: Floating exception with invalid axis in np.lexsort.
+- #7535 BUG: Extend glibc complex trig functions blacklist to glibc < 2.18.
+- #7551 BUG: Allow graceful recovery for no compiler.
+- #7558 BUG: Constant padding expected wrong type in constant_values.
+- #7578 BUG: Fix OverflowError in Python 3.x. in swig interface.
+- #7590 BLD: Fix configparser.InterpolationSyntaxError.
+- #7597 BUG: Make np.ma.take work on scalars.
+- #7608 BUG: linalg.norm(): Don't convert object arrays to float.
+- #7638 BLD: Correct C compiler customization in system_info.py.
+- #7654 BUG: ma.median of 1d array should return a scalar.
+- #7656 BLD: Remove hardcoded Intel compiler flag -xSSE4.2.
+- #7660 BUG: Temporary fix for str(mvoid) for object field types.
+- #7665 BUG: Fix incorrect printing of 1D masked arrays.
+- #7670 BUG: Correct initial index estimate in histogram.
+- #7671 BUG: Boolean assignment no GIL release when transfer needs API.
+- #7676 BUG: Fix handling of right edge of final histogram bin.
+- #7680 BUG: Fix np.clip bug NaN handling for Visual Studio 2015.
diff --git a/doc/release/1.12.0-notes.rst b/doc/release/1.12.0-notes.rst
index 084f6bac5..ffab3ccd4 100644
--- a/doc/release/1.12.0-notes.rst
+++ b/doc/release/1.12.0-notes.rst
@@ -18,6 +18,9 @@ Future Changes
* In 1.13 NAT will always compare False except for ``NAT != NAT``,
which will be True. In short, NAT will behave like NaN
+* In 1.13 np.average will preserve subclasses, to match the behavior of most
+ other numpy functions such as np.mean. In particular, this means calls which
+ returned a scalar may return a 0-d subclass object instead.
Compatibility notes
@@ -86,6 +89,21 @@ FutureWarning to changed behavior
* ``np.full`` now returns an array of the fill-value's dtype if no dtype is
given, instead of defaulting to float.
+* np.average will emit a warning if the argument is a subclass of ndarray,
+ as the subclass will be preserved starting in 1.13. (see Future Changes)
+
+Greater consistancy in ``assert_almost_equal``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The precision check for scalars has been changed to match that for arrays. It
+is now
+
+ abs(actual - desired) < 1.5 * 10**(-decimal)
+
+Note that this is looser than previously documented, but agrees with the
+previous implementation used in ``assert_array_almost_equal``. Due to the
+change in implementation some very delicate tests may fail that did not
+fail before.
+
C API
~~~~~
@@ -101,6 +119,12 @@ keyword argument. It can be set to False when no write operation
to the returned array is expected to avoid accidental
unpredictable writes.
+
+``axes`` keyword argument for ``rot90``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``axes`` keyword argument in ``rot90`` determines the plane in which the
+array is rotated. It defaults to ``axes=(0,1)`` as in the originial function.
+
Generalized ``flip``
~~~~~~~~~~~~~~~~~~~~
``flipud`` and ``fliplr`` reverse the elements of an array along axis=0 and
@@ -128,10 +152,29 @@ file that will remain empty (bar a docstring) in the standard numpy source,
but that can be overwritten by people making binary distributions of numpy.
New nanfunctions ``nancumsum`` and ``nancumprod`` added
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Nanfunctions ``nancumsum`` and ``nancumprod`` have been added to
compute ``cumsum`` and ``cumprod`` by ignoring nans.
+``np.interp`` can now interpolate complex values
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+``np.lib.interp(x, xp, fp)`` now allows the interpolated array ``fp``
+to be complex and will interpolate at ``complex128`` precision.
+
+New polynomial evaluation function ``polyvalfromroots`` added
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The new function ``polyvalfromroots`` evaluates a polynomial at given points
+from the roots of the polynomial. This is useful for higher order polynomials,
+where expansion into polynomial coefficients is inaccurate at machine
+precision.
+
+New array creation function ``geomspace`` added
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The new function ``geomspace`` generates a geometric sequence. It is similar
+to ``logspace``, but with start and stop specified directly:
+``geomspace(start, stop)`` behaves the same as
+``logspace(log10(start), log10(stop))``.
+
Improvements
============
@@ -159,8 +202,8 @@ Generalized Ufuncs will now unlock the GIL
Generalized Ufuncs, including most of the linalg module, will now unlock
the Python global interpreter lock.
-np.roll can now roll multiple axes at the same time
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+``np.roll can now roll multiple axes at the same time``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``shift`` and ``axis`` arguments to ``roll`` are now broadcast against each
other, and each specified axis is shifted accordingly.
@@ -169,6 +212,32 @@ The *__complex__* method has been implemented on the ndarray object
Calling ``complex()`` on a size 1 array will now cast to a python
complex.
+``pathlib.Path`` objects now supported
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The standard ``np.load``, ``np.save``, ``np.loadtxt``, ``np.savez``, and similar
+functions can now take ``pathlib.Path`` objects as an argument instead of a
+filename or open file object.
+
+Add ``bits`` attribute to ``np.finfo``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This makes ``np.finfo`` consistent with ``np.iinfo`` which already has that
+attribute.
+
+Caches in `np.fft` are now bounded in total size and item count
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The caches in `np.fft` that speed up successive FFTs of the same length can no
+longer grow without bounds. They have been replaced with LRU (least recently
+used) caches that automatically evict no longer needed items if either the
+memory size or item count limit has been reached.
+
+Improved handling of zero-width string/unicode dtypes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Fixed several interfaces that explicitly disallowed arrays with zero-width
+string dtypes (i.e. ``dtype('S0')`` or ``dtype('U0')``, and fixed several
+bugs where such dtypes were not handled properly. In particular, changed
+``ndarray.__new__`` to not implicitly convert ``dtype('S0')`` to
+``dtype('S1')`` (and likewise for unicode) when creating new arrays.
+
Changes
=======
@@ -192,6 +261,12 @@ from these operations.
Also, reduction of a memmap (e.g. ``.sum(axis=None``) now returns a numpy
scalar instead of a 0d memmap.
+numpy.sctypes now includes bytes on Python3 too
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Previously, it included str (bytes) and unicode on Python2, but only str
+(unicode) on Python3.
+
+
Deprecations
============
@@ -211,4 +286,5 @@ If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
represent the number in base 2 (positive) or 2's complement (negative) form,
the function used to silently ignore the parameter and return a representation
using the minimal number of bits needed for the form in question. Such behavior
-is now considered unsafe from a user perspective and will raise an error in the future.
+is now considered unsafe from a user perspective and will raise an error in the
+future.
diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst
index f5b454875..cbc696ae8 100644
--- a/doc/source/reference/arrays.datetime.rst
+++ b/doc/source/reference/arrays.datetime.rst
@@ -177,9 +177,9 @@ And here are the time units:
======== ================ ======================= ==========================
h hour +/- 1.0e15 years [1.0e15 BC, 1.0e15 AD]
m minute +/- 1.7e13 years [1.7e13 BC, 1.7e13 AD]
- s second +/- 2.9e12 years [ 2.9e9 BC, 2.9e9 AD]
- ms millisecond +/- 2.9e9 years [ 2.9e6 BC, 2.9e6 AD]
- us microsecond +/- 2.9e6 years [290301 BC, 294241 AD]
+ s second +/- 2.9e11 years [2.9e11 BC, 2.9e11 AD]
+ ms millisecond +/- 2.9e8 years [ 2.9e8 BC, 2.9e8 AD]
+ us microsecond +/- 2.9e5 years [290301 BC, 294241 AD]
ns nanosecond +/- 292 years [ 1678 AD, 2262 AD]
ps picosecond +/- 106 days [ 1969 AD, 1970 AD]
fs femtosecond +/- 2.6 hours [ 1969 AD, 1970 AD]
diff --git a/doc/source/reference/arrays.indexing.rst b/doc/source/reference/arrays.indexing.rst
index 50b2492d2..6e9bb9276 100644
--- a/doc/source/reference/arrays.indexing.rst
+++ b/doc/source/reference/arrays.indexing.rst
@@ -171,7 +171,7 @@ concepts to remember include:
.. data:: newaxis
The :const:`newaxis` object can be used in all slicing operations to
- create an axis of length one. :const: :const:`newaxis` is an alias for
+ create an axis of length one. :const:`newaxis` is an alias for
'None', and 'None' can be used in place of this with the same result.
diff --git a/doc/source/reference/routines.array-creation.rst b/doc/source/reference/routines.array-creation.rst
index c7c6ab815..e718f0052 100644
--- a/doc/source/reference/routines.array-creation.rst
+++ b/doc/source/reference/routines.array-creation.rst
@@ -80,6 +80,7 @@ Numerical ranges
arange
linspace
logspace
+ geomspace
meshgrid
mgrid
ogrid
diff --git a/doc/source/reference/routines.polynomials.polynomial.rst b/doc/source/reference/routines.polynomials.polynomial.rst
index 431856622..8194ca867 100644
--- a/doc/source/reference/routines.polynomials.polynomial.rst
+++ b/doc/source/reference/routines.polynomials.polynomial.rst
@@ -32,6 +32,7 @@ Basics
polygrid3d
polyroots
polyfromroots
+ polyvalfromroots
Fitting
-------
diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst
index f14a2c168..8faf97c4f 100644
--- a/doc/source/reference/ufuncs.rst
+++ b/doc/source/reference/ufuncs.rst
@@ -521,7 +521,6 @@ Math operations
sqrt
square
reciprocal
- ones_like
.. tip::
@@ -649,8 +648,6 @@ single operation.
.. autosummary::
- isreal
- iscomplex
isfinite
isinf
isnan
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 1801b2067..f31a4f143 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -3,6 +3,7 @@ Release Notes
*************
.. include:: ../release/1.12.0-notes.rst
+.. include:: ../release/1.11.1-notes.rst
.. include:: ../release/1.11.0-notes.rst
.. include:: ../release/1.10.4-notes.rst
.. include:: ../release/1.10.3-notes.rst
diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst
index 5c0e28e6f..1fd7e7b65 100644
--- a/doc/source/user/basics.io.genfromtxt.rst
+++ b/doc/source/user/basics.io.genfromtxt.rst
@@ -19,7 +19,7 @@ other faster and simpler functions like :func:`~numpy.loadtxt` cannot.
When giving examples, we will use the following conventions::
>>> import numpy as np
- >>> from StringIO import StringIO
+ >>> from io import BytesIO
@@ -59,7 +59,7 @@ example, comma-separated files (CSV) use a comma (``,``) or a semicolon
(``;``) as delimiter::
>>> data = "1, 2, 3\n4, 5, 6"
- >>> np.genfromtxt(StringIO(data), delimiter=",")
+ >>> np.genfromtxt(BytesIO(data), delimiter=",")
array([[ 1., 2., 3.],
[ 4., 5., 6.]])
@@ -75,12 +75,12 @@ defined as a given number of characters. In that case, we need to set
size) or to a sequence of integers (if columns can have different sizes)::
>>> data = " 1 2 3\n 4 5 67\n890123 4"
- >>> np.genfromtxt(StringIO(data), delimiter=3)
+ >>> np.genfromtxt(BytesIO(data), delimiter=3)
array([[ 1., 2., 3.],
[ 4., 5., 67.],
[ 890., 123., 4.]])
>>> data = "123456789\n 4 7 9\n 4567 9"
- >>> np.genfromtxt(StringIO(data), delimiter=(4, 3, 2))
+ >>> np.genfromtxt(BytesIO(data), delimiter=(4, 3, 2))
array([[ 1234., 567., 89.],
[ 4., 7., 9.],
[ 4., 567., 9.]])
@@ -96,12 +96,12 @@ This behavior can be overwritten by setting the optional argument
>>> data = "1, abc , 2\n 3, xxx, 4"
>>> # Without autostrip
- >>> np.genfromtxt(StringIO(data), delimiter=",", dtype="|S5")
+ >>> np.genfromtxt(BytesIO(data), delimiter=",", dtype="|S5")
array([['1', ' abc ', ' 2'],
['3', ' xxx', ' 4']],
dtype='|S5')
>>> # With autostrip
- >>> np.genfromtxt(StringIO(data), delimiter=",", dtype="|S5", autostrip=True)
+ >>> np.genfromtxt(BytesIO(data), delimiter=",", dtype="|S5", autostrip=True)
array([['1', 'abc', '2'],
['3', 'xxx', '4']],
dtype='|S5')
@@ -126,7 +126,7 @@ marker(s) is simply ignored::
... # And here comes the last line
... 9, 0
... """
- >>> np.genfromtxt(StringIO(data), comments="#", delimiter=",")
+ >>> np.genfromtxt(BytesIO(data), comments="#", delimiter=",")
[[ 1. 2.]
[ 3. 4.]
[ 5. 6.]
@@ -154,9 +154,9 @@ performed. Similarly, we can skip the last ``n`` lines of the file by
using the :keyword:`skip_footer` attribute and giving it a value of ``n``::
>>> data = "\n".join(str(i) for i in range(10))
- >>> np.genfromtxt(StringIO(data),)
+ >>> np.genfromtxt(BytesIO(data),)
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
- >>> np.genfromtxt(StringIO(data),
+ >>> np.genfromtxt(BytesIO(data),
... skip_header=3, skip_footer=5)
array([ 3., 4.])
@@ -178,7 +178,7 @@ For example, if we want to import only the first and the last columns, we
can use ``usecols=(0, -1)``::
>>> data = "1 2 3\n4 5 6"
- >>> np.genfromtxt(StringIO(data), usecols=(0, -1))
+ >>> np.genfromtxt(BytesIO(data), usecols=(0, -1))
array([[ 1., 3.],
[ 4., 6.]])
@@ -187,11 +187,11 @@ giving their name to the :keyword:`usecols` argument, either as a sequence
of strings or a comma-separated string::
>>> data = "1 2 3\n4 5 6"
- >>> np.genfromtxt(StringIO(data),
+ >>> np.genfromtxt(BytesIO(data),
... names="a, b, c", usecols=("a", "c"))
array([(1.0, 3.0), (4.0, 6.0)],
dtype=[('a', '<f8'), ('c', '<f8')])
- >>> np.genfromtxt(StringIO(data),
+ >>> np.genfromtxt(BytesIO(data),
... names="a, b, c", usecols=("a, c"))
array([(1.0, 3.0), (4.0, 6.0)],
dtype=[('a', '<f8'), ('c', '<f8')])
@@ -249,7 +249,7 @@ A natural approach when dealing with tabular data is to allocate a name to
each column. A first possibility is to use an explicit structured dtype,
as mentioned previously::
- >>> data = StringIO("1 2 3\n 4 5 6")
+ >>> data = BytesIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, dtype=[(_, int) for _ in "abc"])
array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', '<i8'), ('b', '<i8'), ('c', '<i8')])
@@ -257,7 +257,7 @@ as mentioned previously::
Another simpler possibility is to use the :keyword:`names` keyword with a
sequence of strings or a comma-separated string::
- >>> data = StringIO("1 2 3\n 4 5 6")
+ >>> data = BytesIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, names="A, B, C")
array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)],
dtype=[('A', '<f8'), ('B', '<f8'), ('C', '<f8')])
@@ -271,7 +271,7 @@ that case, we must use the :keyword:`names` keyword with a value of
``True``. The names will then be read from the first line (after the
``skip_header`` ones), even if the line is commented out::
- >>> data = StringIO("So it goes\n#a b c\n1 2 3\n 4 5 6")
+ >>> data = BytesIO("So it goes\n#a b c\n1 2 3\n 4 5 6")
>>> np.genfromtxt(data, skip_header=1, names=True)
array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
@@ -280,7 +280,7 @@ The default value of :keyword:`names` is ``None``. If we give any other
value to the keyword, the new names will overwrite the field names we may
have defined with the dtype::
- >>> data = StringIO("1 2 3\n 4 5 6")
+ >>> data = BytesIO("1 2 3\n 4 5 6")
>>> ndtype=[('a',int), ('b', float), ('c', int)]
>>> names = ["A", "B", "C"]
>>> np.genfromtxt(data, names=names, dtype=ndtype)
@@ -295,7 +295,7 @@ If ``names=None`` but a structured dtype is expected, names are defined
with the standard NumPy default of ``"f%i"``, yielding names like ``f0``,
``f1`` and so forth::
- >>> data = StringIO("1 2 3\n 4 5 6")
+ >>> data = BytesIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, dtype=(int, float, int))
array([(1, 2.0, 3), (4, 5.0, 6)],
dtype=[('f0', '<i8'), ('f1', '<f8'), ('f2', '<i8')])
@@ -303,7 +303,7 @@ with the standard NumPy default of ``"f%i"``, yielding names like ``f0``,
In the same way, if we don't give enough names to match the length of the
dtype, the missing names will be defined with this default template::
- >>> data = StringIO("1 2 3\n 4 5 6")
+ >>> data = BytesIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, dtype=(int, float, int), names="a")
array([(1, 2.0, 3), (4, 5.0, 6)],
dtype=[('a', '<i8'), ('f0', '<f8'), ('f1', '<i8')])
@@ -311,7 +311,7 @@ dtype, the missing names will be defined with this default template::
We can overwrite this default with the :keyword:`defaultfmt` argument, that
takes any format string::
- >>> data = StringIO("1 2 3\n 4 5 6")
+ >>> data = BytesIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, dtype=(int, float, int), defaultfmt="var_%02i")
array([(1, 2.0, 3), (4, 5.0, 6)],
dtype=[('var_00', '<i8'), ('var_01', '<f8'), ('var_02', '<i8')])
@@ -377,7 +377,7 @@ representing a percentage to a float between 0 and 1::
>>> data = "1, 2.3%, 45.\n6, 78.9%, 0"
>>> names = ("i", "p", "n")
>>> # General case .....
- >>> np.genfromtxt(StringIO(data), delimiter=",", names=names)
+ >>> np.genfromtxt(BytesIO(data), delimiter=",", names=names)
array([(1.0, nan, 45.0), (6.0, nan, 0.0)],
dtype=[('i', '<f8'), ('p', '<f8'), ('n', '<f8')])
@@ -387,7 +387,7 @@ and ``' 78.9%'`` cannot be converted to float and we end up having
``np.nan`` instead. Let's now use a converter::
>>> # Converted case ...
- >>> np.genfromtxt(StringIO(data), delimiter=",", names=names,
+ >>> np.genfromtxt(BytesIO(data), delimiter=",", names=names,
... converters={1: convertfunc})
array([(1.0, 0.023, 45.0), (6.0, 0.78900000000000003, 0.0)],
dtype=[('i', '<f8'), ('p', '<f8'), ('n', '<f8')])
@@ -396,7 +396,7 @@ The same results can be obtained by using the name of the second column
(``"p"``) as key instead of its index (1)::
>>> # Using a name for the converter ...
- >>> np.genfromtxt(StringIO(data), delimiter=",", names=names,
+ >>> np.genfromtxt(BytesIO(data), delimiter=",", names=names,
... converters={"p": convertfunc})
array([(1.0, 0.023, 45.0), (6.0, 0.78900000000000003, 0.0)],
dtype=[('i', '<f8'), ('p', '<f8'), ('n', '<f8')])
@@ -410,8 +410,8 @@ by default::
>>> data = "1, , 3\n 4, 5, 6"
>>> convert = lambda x: float(x.strip() or -999)
- >>> np.genfromtxt(StringIO(data), delimiter=",",
- ... converter={1: convert})
+ >>> np.genfromtxt(BytesIO(data), delimiter=",",
+ ... converters={1: convert})
array([[ 1., -999., 3.],
[ 4., 5., 6.]])
@@ -492,7 +492,7 @@ and second column, and to -999 if they occur in the last column::
... names="a,b,c",
... missing_values={0:"N/A", 'b':" ", 2:"???"},
... filling_values={0:0, 'b':0, 2:-999})
- >>> np.genfromtxt(StringIO.StringIO(data), **kwargs)
+ >>> np.genfromtxt(BytesIO(data), **kwargs)
array([(0, 2, 3), (4, 0, -999)],
dtype=[('a', '<i8'), ('b', '<i8'), ('c', '<i8')])
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 055c23480..c6ceb29c6 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -262,7 +262,7 @@ add_newdoc('numpy.core', 'nditer',
has_multi_index : bool
If True, the iterator was created with the "multi_index" flag,
and the property `multi_index` can be used to retrieve it.
- index :
+ index
When the "c_index" or "f_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
and `has_index` is False.
@@ -273,10 +273,10 @@ add_newdoc('numpy.core', 'nditer',
An index which matches the order of iteration.
itersize : int
Size of the iterator.
- itviews :
+ itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern.
- multi_index :
+ multi_index
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and `has_multi_index` is False.
@@ -288,7 +288,7 @@ add_newdoc('numpy.core', 'nditer',
The array(s) to be iterated over.
shape : tuple of ints
Shape tuple, the shape of the iterator.
- value :
+ value
Value of `operands` at current iteration. Normally, this is a
tuple of array scalars, but if the flag "external_loop" is used,
it is a tuple of one dimensional arrays.
@@ -481,6 +481,11 @@ add_newdoc('numpy.core', 'broadcast',
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
+ See Also
+ --------
+ broadcast_arrays
+ broadcast_to
+
Examples
--------
Manually adding two vectors, using broadcasting:
@@ -547,9 +552,26 @@ add_newdoc('numpy.core', 'broadcast', ('iters',
"""))
+add_newdoc('numpy.core', 'broadcast', ('ndim',
+ """
+ Number of dimensions of broadcasted result. Alias for `nd`.
+
+ .. versionadded:: 1.12.0
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.ndim
+ 2
+
+ """))
+
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
- Number of dimensions of broadcasted result.
+ Number of dimensions of broadcasted result. For code intended for Numpy
+ 1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
@@ -642,35 +664,43 @@ add_newdoc('numpy.core', 'broadcast', ('reset',
add_newdoc('numpy.core.multiarray', 'array',
"""
- array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0)
+ array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
- An array, any object exposing the array interface, an
- object whose __array__ method returns an array, or any
- (nested) sequence.
+ An array, any object exposing the array interface, an object whose
+ __array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
- The desired data-type for the array. If not given, then
- the type will be determined as the minimum type required
- to hold the objects in the sequence. This argument can only
- be used to 'upcast' the array. For downcasting, use the
- .astype(t) method.
+ The desired data-type for the array. If not given, then the type will
+ be determined as the minimum type required to hold the objects in the
+ sequence. This argument can only be used to 'upcast' the array. For
+ downcasting, use the .astype(t) method.
copy : bool, optional
- If true (default), then the object is copied. Otherwise, a copy
- will only be made if __array__ returns a copy, if obj is a
- nested sequence, or if a copy is needed to satisfy any of the other
- requirements (`dtype`, `order`, etc.).
- order : {'C', 'F', 'A'}, optional
- Specify the order of the array. If order is 'C', then the array
- will be in C-contiguous order (last-index varies the fastest).
- If order is 'F', then the returned array will be in
- Fortran-contiguous order (first-index varies the fastest).
- If order is 'A' (default), then the returned array may be
- in any order (either C-, Fortran-contiguous, or even discontiguous),
- unless a copy is required, in which case it will be C-contiguous.
+ If true (default), then the object is copied. Otherwise, a copy will
+ only be made if __array__ returns a copy, if obj is a nested sequence,
+ or if a copy is needed to satisfy any of the other requirements
+ (`dtype`, `order`, etc.).
+ order : {'K', 'A', 'C', 'F'}, optional
+ Specify the memory layout of the array. If object is not an array, the
+ newly created array will be in C order (row major) unless 'F' is
+ specified, in which case it will be in Fortran order (column major).
+ If object is an array the following holds.
+
+ ===== ========= ===================================================
+ order no copy copy=True
+ ===== ========= ===================================================
+ 'K' unchanged F & C order preserved, otherwise most similar order
+ 'A' unchanged F order if input is F and not C, otherwise C order
+ 'C' C order C order
+ 'F' F order F order
+ ===== ========= ===================================================
+
+ When ``copy=False`` and a copy is made for other reasons, the result is
+ the same as if ``copy=True``, with some exceptions for `A`, see the
+ Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
@@ -688,6 +718,12 @@ add_newdoc('numpy.core.multiarray', 'array',
--------
empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like
+ Notes
+ -----
+ When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
+ and a copy is forced by a change in dtype, then the order of the result is
+ not necessarily 'C' as expected. This is likely a bug.
+
Examples
--------
>>> np.array([1, 2, 3])
diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py
index d95a362ca..992ea50e6 100644
--- a/numpy/compat/py3k.py
+++ b/numpy/compat/py3k.py
@@ -7,9 +7,13 @@ from __future__ import division, absolute_import, print_function
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
- 'integer_types']
+ 'integer_types', 'is_pathlib_path', 'Path']
import sys
+try:
+ from pathlib import Path
+except ImportError:
+ Path = None
if sys.version_info[0] >= 3:
import io
@@ -86,3 +90,10 @@ def asunicode_nested(x):
return [asunicode_nested(y) for y in x]
else:
return asunicode(x)
+
+
+def is_pathlib_path(obj):
+ """
+ Check whether obj is a pathlib.Path object.
+ """
+ return Path is not None and isinstance(obj, Path)
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index e8719ca75..1ac850002 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -11,7 +11,18 @@ for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
if envkey not in os.environ:
os.environ[envkey] = '1'
env_added.append(envkey)
-from . import multiarray
+
+try:
+ from . import multiarray
+except ImportError:
+ msg = """
+Importing the multiarray numpy extension module failed. Most
+likely you are trying to import a failed build of numpy.
+If you're working with a numpy git repo, try `git clean -xdf` (removes all
+files not under version control). Otherwise reinstall numpy.
+"""
+ raise ImportError(msg)
+
for envkey in env_added:
del os.environ[envkey]
del envkey
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index 47c933411..5ad440fa4 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -33,8 +33,6 @@ def _makenames_list(adict, align):
if (num < 0):
raise ValueError("invalid offset.")
format = dtype(obj[0], align=align)
- if (format.itemsize == 0):
- raise ValueError("all itemsizes must be fixed.")
if (n > 2):
title = obj[2]
else:
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 261a0e2fe..28ee4fffa 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -2855,7 +2855,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
- 0.546875
+ 0.54999924
Computing the mean in float64 is more accurate:
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index 750fbe838..17a36eb4c 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -3,10 +3,12 @@ from __future__ import division, absolute_import, print_function
import warnings
import operator
-__all__ = ['logspace', 'linspace']
-
from . import numeric as _nx
-from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
+from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
+ TooHardError)
+
+__all__ = ['logspace', 'linspace', 'geomspace']
+
def _index_deprecate(i, stacklevel=2):
try:
@@ -58,7 +60,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
- step : float
+ step : float, optional
Only returned if `retstep` is True
Size of spacing between samples.
@@ -73,11 +75,11 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
- array([ 2. , 2.25, 2.5 , 2.75, 3. ])
+ array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
- array([ 2. , 2.2, 2.4, 2.6, 2.8])
+ array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
- (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
+ (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
@@ -102,8 +104,8 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
- start = start * 1.
- stop = stop * 1.
+ start = start * 1.0
+ stop = stop * 1.0
dt = result_type(start, stop, float(num))
if dtype is None:
@@ -156,7 +158,7 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
- length ``num``) are returned.
+ length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
@@ -182,6 +184,7 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
+ geomspace : Similar to logspace, but with endpoints specified directly.
Notes
-----
@@ -195,11 +198,11 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
- array([ 100. , 215.443469 , 464.15888336, 1000. ])
+ array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
- array([ 100. , 177.827941 , 316.22776602, 562.34132519])
+ array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
- array([ 4. , 5.0396842 , 6.34960421, 8. ])
+ array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
@@ -221,3 +224,127 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
+
+
+def geomspace(start, stop, num=50, endpoint=True, dtype=None):
+ """
+ Return numbers spaced evenly on a log scale (a geometric progression).
+
+ This is similar to `logspace`, but with endpoints specified directly.
+ Each output sample is a constant multiple of the previous.
+
+ Parameters
+ ----------
+ start : scalar
+ The starting value of the sequence.
+ stop : scalar
+ The final value of the sequence, unless `endpoint` is False.
+ In that case, ``num + 1`` values are spaced over the
+ interval in log-space, of which all but the last (a sequence of
+ length `num`) are returned.
+ num : integer, optional
+ Number of samples to generate. Default is 50.
+ endpoint : boolean, optional
+ If true, `stop` is the last sample. Otherwise, it is not included.
+ Default is True.
+ dtype : dtype
+ The type of the output array. If `dtype` is not given, infer the data
+ type from the other input arguments.
+
+ Returns
+ -------
+ samples : ndarray
+ `num` samples, equally spaced on a log scale.
+
+ See Also
+ --------
+ logspace : Similar to geomspace, but with endpoints specified using log
+ and base.
+ linspace : Similar to geomspace, but with arithmetic instead of geometric
+ progression.
+ arange : Similar to linspace, with the step size specified instead of the
+ number of samples.
+
+ Notes
+ -----
+ If the inputs or dtype are complex, the output will follow a logarithmic
+ spiral in the complex plane. (There are an infinite number of spirals
+ passing through two points; the output will follow the shortest such path.)
+
+ Examples
+ --------
+ >>> np.geomspace(1, 1000, num=4)
+ array([ 1., 10., 100., 1000.])
+ >>> np.geomspace(1, 1000, num=3, endpoint=False)
+ array([ 1., 10., 100.])
+ >>> np.geomspace(1, 1000, num=4, endpoint=False)
+ array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])
+ >>> np.geomspace(1, 256, num=9)
+ array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])
+
+ Note that the above may not produce exact integers:
+
+ >>> np.geomspace(1, 256, num=9, dtype=int)
+ array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])
+ >>> np.around(np.geomspace(1, 256, num=9)).astype(int)
+ array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])
+
+ Negative, decreasing, and complex inputs are allowed:
+
+ >>> geomspace(1000, 1, num=4)
+ array([ 1000., 100., 10., 1.])
+ >>> geomspace(-1000, -1, num=4)
+ array([-1000., -100., -10., -1.])
+ >>> geomspace(1j, 1000j, num=4) # Straight line
+ array([ 0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
+ >>> geomspace(-1+0j, 1+0j, num=5) # Circle
+ array([-1.00000000+0.j , -0.70710678+0.70710678j,
+ 0.00000000+1.j , 0.70710678+0.70710678j,
+ 1.00000000+0.j ])
+
+ Graphical illustration of ``endpoint`` parameter:
+
+ >>> import matplotlib.pyplot as plt
+ >>> N = 10
+ >>> y = np.zeros(N)
+ >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
+ >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
+ >>> plt.axis([0.5, 2000, 0, 3])
+ >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
+ >>> plt.show()
+
+ """
+ if start == 0 or stop == 0:
+ raise ValueError('Geometric sequence cannot include zero')
+
+ dt = result_type(start, stop, float(num))
+ if dtype is None:
+ dtype = dt
+ else:
+ # complex to dtype('complex128'), for instance
+ dtype = _nx.dtype(dtype)
+
+ # Avoid negligible real or imaginary parts in output by rotating to
+ # positive real, calculating, then undoing rotation
+ out_sign = 1
+ if start.real == stop.real == 0:
+ start, stop = start.imag, stop.imag
+ out_sign = 1j * out_sign
+ if _nx.sign(start) == _nx.sign(stop) == -1:
+ start, stop = -start, -stop
+ out_sign = -out_sign
+
+ # Promote both arguments to the same dtype in case, for instance, one is
+ # complex and another is negative and log would produce NaN otherwise
+ start = start + (stop - stop)
+ stop = stop + (start - start)
+ if _nx.issubdtype(dtype, complex):
+ start = start + 0j
+ stop = stop + 0j
+
+ log_start = _nx.log10(start)
+ log_stop = _nx.log10(stop)
+ result = out_sign * logspace(log_start, log_stop, num=num,
+ endpoint=endpoint, base=10.0, dtype=dtype)
+
+ return result.astype(dtype)
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index 2ea9c0e11..d4025cb3b 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -30,6 +30,8 @@ class finfo(object):
Attributes
----------
+ bits : int
+ The number of bits occupied by the type.
eps : float
The smallest representable positive number such that
``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating
@@ -157,6 +159,7 @@ class finfo(object):
setattr(self, word, getattr(machar, word))
for word in ['tiny', 'resolution', 'epsneg']:
setattr(self, word, getattr(machar, word).flat[0])
+ self.bits = self.dtype.itemsize * 8
self.max = machar.huge.flat[0]
self.min = -self.max
self.eps = machar.eps.flat[0]
@@ -174,12 +177,12 @@ class finfo(object):
fmt = (
'Machine parameters for %(dtype)s\n'
'---------------------------------------------------------------\n'
- 'precision=%(precision)3s resolution= %(_str_resolution)s\n'
- 'machep=%(machep)6s eps= %(_str_eps)s\n'
- 'negep =%(negep)6s epsneg= %(_str_epsneg)s\n'
- 'minexp=%(minexp)6s tiny= %(_str_tiny)s\n'
- 'maxexp=%(maxexp)6s max= %(_str_max)s\n'
- 'nexp =%(nexp)6s min= -max\n'
+ 'precision = %(precision)3s resolution = %(_str_resolution)s\n'
+ 'machep = %(machep)6s eps = %(_str_eps)s\n'
+ 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
+ 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
+ 'maxexp = %(maxexp)6s max = %(_str_max)s\n'
+ 'nexp = %(nexp)6s min = -max\n'
'---------------------------------------------------------------\n'
)
return fmt % self.__dict__
@@ -200,6 +203,8 @@ class iinfo(object):
Attributes
----------
+ bits : int
+ The number of bits occupied by the type.
min : int
The smallest integer expressible by the type.
max : int
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index df43122d0..a9848f434 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -328,9 +328,20 @@ struct NpyAuxData_tag {
#define NPY_USE_PYMEM 1
#if NPY_USE_PYMEM == 1
-#define PyArray_malloc PyMem_Malloc
-#define PyArray_free PyMem_Free
-#define PyArray_realloc PyMem_Realloc
+ /* numpy sometimes calls PyArray_malloc() with the GIL released. On Python
+ 3.3 and older, it was safe to call PyMem_Malloc() with the GIL released.
+ On Python 3.4 and newer, it's better to use PyMem_RawMalloc() to be able
+ to use tracemalloc. On Python 3.6, calling PyMem_Malloc() with the GIL
+ released is now a fatal error in debug mode. */
+# if PY_VERSION_HEX >= 0x03040000
+# define PyArray_malloc PyMem_RawMalloc
+# define PyArray_free PyMem_RawFree
+# define PyArray_realloc PyMem_RawRealloc
+# else
+# define PyArray_malloc PyMem_Malloc
+# define PyArray_free PyMem_Free
+# define PyArray_realloc PyMem_Realloc
+# endif
#else
#define PyArray_malloc malloc
#define PyArray_free free
diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py
index 827909c47..5f6182742 100644
--- a/numpy/core/memmap.py
+++ b/numpy/core/memmap.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from .numeric import uint8, ndarray, dtype
-from numpy.compat import long, basestring
+from numpy.compat import long, basestring, is_pathlib_path
__all__ = ['memmap']
@@ -39,7 +39,7 @@ class memmap(ndarray):
Parameters
----------
- filename : str or file-like object
+ filename : str, file-like object, or pathlib.Path instance
The file name or file object to be used as the array data buffer.
dtype : data-type, optional
The data-type used to interpret the file contents.
@@ -82,7 +82,7 @@ class memmap(ndarray):
Attributes
----------
- filename : str
+ filename : str or pathlib.Path instance
Path to the mapped file.
offset : int
Offset position in the file.
@@ -213,6 +213,9 @@ class memmap(ndarray):
if hasattr(filename, 'read'):
fid = filename
own_file = False
+ elif is_pathlib_path(filename):
+ fid = filename.open((mode == 'c' and 'r' or mode)+'b')
+ own_file = True
else:
fid = open(filename, (mode == 'c' and 'r' or mode)+'b')
own_file = True
@@ -267,6 +270,8 @@ class memmap(ndarray):
if isinstance(filename, basestring):
self.filename = os.path.abspath(filename)
+ elif is_pathlib_path(filename):
+ self.filename = filename.resolve()
# py3 returns int for TemporaryFile().name
elif (hasattr(filename, "name") and
isinstance(filename.name, basestring)):
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 11a95fa7b..b3eed9714 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -6,7 +6,20 @@ import operator
import sys
import warnings
-from numpy.core import multiarray
+from . import multiarray
+from .multiarray import (
+ _fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
+ BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
+ WRAP, arange, array, broadcast, can_cast, compare_chararrays,
+ concatenate, copyto, count_nonzero, dot, dtype, einsum, empty,
+ empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring,
+ inner, int_asbuffer, lexsort, matmul, may_share_memory,
+ min_scalar_type, ndarray, nditer, nested_iters, promote_types,
+ putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
+ zeros)
+if sys.version_info[0] < 3:
+ from .multiarray import newbuffer, getbuffer
+
from . import umath
from .umath import (invert, sin, UFUNC_BUFSIZE_DEFAULT, ERR_IGNORE,
ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG,
@@ -15,6 +28,10 @@ from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
from ._internal import TooHardError
+bitwise_not = invert
+ufunc = type(sin)
+newaxis = None
+
if sys.version_info[0] >= 3:
import pickle
basestring = str
@@ -50,6 +67,7 @@ __all__ = [
'TooHardError',
]
+
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
@@ -64,26 +82,6 @@ class ComplexWarning(RuntimeWarning):
"""
pass
-bitwise_not = invert
-
-CLIP = multiarray.CLIP
-WRAP = multiarray.WRAP
-RAISE = multiarray.RAISE
-MAXDIMS = multiarray.MAXDIMS
-ALLOW_THREADS = multiarray.ALLOW_THREADS
-BUFSIZE = multiarray.BUFSIZE
-MAY_SHARE_BOUNDS = multiarray.MAY_SHARE_BOUNDS
-MAY_SHARE_EXACT = multiarray.MAY_SHARE_EXACT
-
-ndarray = multiarray.ndarray
-flatiter = multiarray.flatiter
-nditer = multiarray.nditer
-nested_iters = multiarray.nested_iters
-broadcast = multiarray.broadcast
-dtype = multiarray.dtype
-copyto = multiarray.copyto
-ufunc = type(sin)
-
def zeros_like(a, dtype=None, order='K', subok=True):
"""
@@ -147,6 +145,7 @@ def zeros_like(a, dtype=None, order='K', subok=True):
multiarray.copyto(res, z, casting='unsafe')
return res
+
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
@@ -193,6 +192,7 @@ def ones(shape, dtype=None, order='C'):
multiarray.copyto(a, 1, casting='unsafe')
return a
+
def ones_like(a, dtype=None, order='K', subok=True):
"""
Return an array of ones with the same shape and type as a given array.
@@ -253,6 +253,7 @@ def ones_like(a, dtype=None, order='K', subok=True):
multiarray.copyto(res, 1, casting='unsafe')
return res
+
def full(shape, fill_value, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with `fill_value`.
@@ -301,6 +302,7 @@ def full(shape, fill_value, dtype=None, order='C'):
multiarray.copyto(a, fill_value, casting='unsafe')
return a
+
def full_like(a, fill_value, dtype=None, order='K', subok=True):
"""
Return a full array with the same shape and type as a given array.
@@ -373,42 +375,6 @@ def extend_all(module):
if a not in adict:
__all__.append(a)
-newaxis = None
-
-
-arange = multiarray.arange
-array = multiarray.array
-zeros = multiarray.zeros
-count_nonzero = multiarray.count_nonzero
-empty = multiarray.empty
-empty_like = multiarray.empty_like
-fromstring = multiarray.fromstring
-fromiter = multiarray.fromiter
-fromfile = multiarray.fromfile
-frombuffer = multiarray.frombuffer
-shares_memory = multiarray.shares_memory
-may_share_memory = multiarray.may_share_memory
-if sys.version_info[0] < 3:
- newbuffer = multiarray.newbuffer
- getbuffer = multiarray.getbuffer
-int_asbuffer = multiarray.int_asbuffer
-where = multiarray.where
-concatenate = multiarray.concatenate
-fastCopyAndTranspose = multiarray._fastCopyAndTranspose
-set_numeric_ops = multiarray.set_numeric_ops
-can_cast = multiarray.can_cast
-promote_types = multiarray.promote_types
-min_scalar_type = multiarray.min_scalar_type
-result_type = multiarray.result_type
-lexsort = multiarray.lexsort
-compare_chararrays = multiarray.compare_chararrays
-putmask = multiarray.putmask
-einsum = multiarray.einsum
-dot = multiarray.dot
-inner = multiarray.inner
-vdot = multiarray.vdot
-matmul = multiarray.matmul
-
def asarray(a, dtype=None, order=None):
"""Convert the input to an array.
@@ -480,6 +446,7 @@ def asarray(a, dtype=None, order=None):
"""
return array(a, dtype, copy=False, order=order)
+
def asanyarray(a, dtype=None, order=None):
"""Convert the input to an ndarray, but pass ndarray subclasses through.
@@ -531,6 +498,7 @@ def asanyarray(a, dtype=None, order=None):
"""
return array(a, dtype, copy=False, order=order, subok=True)
+
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array in memory (C order).
@@ -567,6 +535,7 @@ def ascontiguousarray(a, dtype=None):
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
+
def asfortranarray(a, dtype=None):
"""
Return an array laid out in Fortran order in memory.
@@ -603,6 +572,7 @@ def asfortranarray(a, dtype=None):
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
+
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
@@ -698,6 +668,7 @@ def require(a, dtype=None, requirements=None):
break
return arr
+
def isfortran(a):
"""
Returns True if the array is Fortran contiguous but *not* C contiguous.
@@ -759,6 +730,7 @@ def isfortran(a):
"""
return a.flags.fnc
+
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
@@ -799,6 +771,7 @@ def argwhere(a):
"""
return transpose(nonzero(a))
+
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
@@ -838,15 +811,18 @@ def flatnonzero(a):
"""
return a.ravel().nonzero()[0]
+
_mode_from_name_dict = {'v': 0,
's': 1,
'f': 2}
+
def _mode_from_name(mode):
if isinstance(mode, basestring):
return _mode_from_name_dict[mode.lower()[0]]
return mode
+
def correlate(a, v, mode='valid'):
"""
Cross-correlation of two 1-dimensional sequences.
@@ -914,6 +890,7 @@ def correlate(a, v, mode='valid'):
mode = _mode_from_name(mode)
return multiarray.correlate2(a, v, mode)
+
def convolve(a,v,mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
@@ -1011,6 +988,7 @@ def convolve(a,v,mode='full'):
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
+
def outer(a, b, out=None):
"""
Compute the outer product of two vectors.
@@ -1331,6 +1309,7 @@ def tensordot(a, b, axes=2):
res = dot(at, bt)
return res.reshape(olda + oldb)
+
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
@@ -1571,6 +1550,7 @@ def moveaxis(a, source, destination):
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
+
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
@@ -1771,16 +1751,20 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
# This works because we are moving the last axis
return rollaxis(cp, -1, axisc)
+
#Use numarray's printing function
from .arrayprint import array2string, get_printoptions, set_printoptions
+
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
+
if issubclass(longlong, int):
_typelessdata.append(longlong)
+
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
@@ -1853,6 +1837,7 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
+
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
@@ -1889,6 +1874,7 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
+
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
@@ -1948,6 +1934,7 @@ def set_string_function(f, repr=True):
else:
return multiarray.set_string_function(f, repr)
+
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
@@ -2027,6 +2014,7 @@ def indices(dimensions, dtype=int):
add(tmp, val, res[i])
return res
+
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
@@ -2080,7 +2068,8 @@ def fromfunction(function, shape, **kwargs):
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
- return function(*args,**kwargs)
+ return function(*args, **kwargs)
+
def isscalar(num):
"""
@@ -2111,6 +2100,7 @@ def isscalar(num):
else:
return type(num) in ScalarType
+
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
@@ -2214,6 +2204,7 @@ def binary_repr(num, width=None):
warn_if_insufficient(width, binwidth)
return '1' * (outwidth - binwidth) + binary
+
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
@@ -2288,6 +2279,7 @@ def load(file):
file = open(file, "rb")
return pickle.load(file)
+
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
@@ -2301,6 +2293,7 @@ def _maketup(descr, val):
res = [_maketup(fields[name][0], val) for name in dt.names]
return tuple(res)
+
def identity(n, dtype=None):
"""
Return the identity array.
@@ -2332,6 +2325,7 @@ def identity(n, dtype=None):
from numpy import eye
return eye(n, dtype=dtype)
+
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns True if two arrays are element-wise equal within a tolerance.
@@ -2397,6 +2391,7 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
return bool(res)
+
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
@@ -2498,6 +2493,7 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
else:
return cond
+
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
@@ -2539,6 +2535,7 @@ def array_equal(a1, a2):
return False
return bool(asarray(a1 == a2).all())
+
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
@@ -2598,6 +2595,7 @@ for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
+
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
@@ -2748,6 +2746,7 @@ def geterr():
res['invalid'] = _errdict_rev[val]
return res
+
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
@@ -2771,6 +2770,7 @@ def setbufsize(size):
umath.seterrobj(pyvals)
return old
+
def getbufsize():
"""
Return the size of the buffer used in ufuncs.
@@ -2783,6 +2783,7 @@ def getbufsize():
"""
return umath.geterrobj()[0]
+
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
@@ -2874,6 +2875,7 @@ def seterrcall(func):
umath.seterrobj(pyvals)
return old
+
def geterrcall():
"""
Return the current callback function used on floating-point errors.
@@ -2918,10 +2920,12 @@ def geterrcall():
"""
return umath.geterrobj()[2]
+
class _unspecified(object):
pass
_Unspecified = _unspecified()
+
class errstate(object):
"""
errstate(**kwargs)
@@ -3004,6 +3008,7 @@ def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
umath.seterrobj(defval)
+
# set the default values
_setdef()
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 1b6551e6c..600d5af33 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -438,7 +438,7 @@ sctypes = {'int': [],
'uint':[],
'float':[],
'complex':[],
- 'others':[bool, object, str, unicode, void]}
+ 'others':[bool, object, bytes, unicode, void]}
def _add_array_type(typename, bits):
try:
diff --git a/numpy/core/records.py b/numpy/core/records.py
index 9f5dcc811..3bee394cd 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -424,7 +424,7 @@ class recarray(ndarray):
return self
def __array_finalize__(self, obj):
- if self.dtype.type is not record:
+ if self.dtype.type is not record and self.dtype.fields:
# if self.dtype is not np.record, invoke __setattr__ which will
# convert it to a record if it is a void dtype.
self.dtype = self.dtype
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 0800991fd..8f583887a 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -52,6 +52,7 @@ maintainer email: oliphant.travis@ieee.org
#include "array_assign.h"
#include "alloc.h"
#include "mem_overlap.h"
+#include "numpyos.h"
/*NUMPY_API
Compute the size of an array (in number of items)
@@ -880,18 +881,13 @@ _mystrncmp(char *s1, char *s2, int len1, int len2)
#define SMALL_STRING 2048
-#if defined(isspace)
-#undef isspace
-#define isspace(c) ((c==' ')||(c=='\t')||(c=='\n')||(c=='\r')||(c=='\v')||(c=='\f'))
-#endif
-
static void _rstripw(char *s, int n)
{
int i;
for (i = n - 1; i >= 1; i--) { /* Never strip to length 0. */
int c = s[i];
- if (!c || isspace(c)) {
+ if (!c || NumPyOS_ascii_isspace((int)c)) {
s[i] = 0;
}
else {
@@ -905,7 +901,7 @@ static void _unistripw(npy_ucs4 *s, int n)
int i;
for (i = n - 1; i >= 1; i--) { /* Never strip to length 0. */
npy_ucs4 c = s[i];
- if (!c || isspace(c)) {
+ if (!c || NumPyOS_ascii_isspace((int)c)) {
s[i] = 0;
}
else {
@@ -1087,7 +1083,7 @@ _strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op,
{
PyArrayObject *result;
PyArrayMultiIterObject *mit;
- int val;
+ int val, cast = 0;
/* Cast arrays to a common type */
if (PyArray_TYPE(self) != PyArray_DESCR(other)->type_num) {
@@ -1099,9 +1095,13 @@ _strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op,
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
#else
+ cast = 1;
+#endif /* define(NPY_PY3K) */
+ }
+ if (cast || (PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(other))) {
PyObject *new;
if (PyArray_TYPE(self) == NPY_STRING &&
- PyArray_DESCR(other)->type_num == NPY_UNICODE) {
+ PyArray_DESCR(other)->type_num == NPY_UNICODE) {
PyArray_Descr* unicode = PyArray_DescrNew(PyArray_DESCR(other));
unicode->elsize = PyArray_DESCR(self)->elsize << 2;
new = PyArray_FromAny((PyObject *)self, unicode,
@@ -1112,10 +1112,17 @@ _strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op,
Py_INCREF(other);
self = (PyArrayObject *)new;
}
- else if (PyArray_TYPE(self) == NPY_UNICODE &&
- PyArray_DESCR(other)->type_num == NPY_STRING) {
+ else if ((PyArray_TYPE(self) == NPY_UNICODE) &&
+ ((PyArray_DESCR(other)->type_num == NPY_STRING) ||
+ (PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(other)))) {
PyArray_Descr* unicode = PyArray_DescrNew(PyArray_DESCR(self));
- unicode->elsize = PyArray_DESCR(other)->elsize << 2;
+
+ if (PyArray_DESCR(other)->type_num == NPY_STRING) {
+ unicode->elsize = PyArray_DESCR(other)->elsize << 2;
+ }
+ else {
+ unicode->elsize = PyArray_DESCR(other)->elsize;
+ }
new = PyArray_FromAny((PyObject *)other, unicode,
0, 0, 0, NULL);
if (new == NULL) {
@@ -1130,7 +1137,6 @@ _strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op,
"in comparison");
return NULL;
}
-#endif
}
else {
Py_INCREF(self);
@@ -1656,11 +1662,6 @@ array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
}
itemsize = descr->elsize;
- if (itemsize == 0) {
- PyErr_SetString(PyExc_ValueError,
- "data-type with unspecified variable length");
- goto fail;
- }
if (strides.ptr != NULL) {
npy_intp nb, off;
@@ -1694,10 +1695,11 @@ array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
if (buffer.ptr == NULL) {
ret = (PyArrayObject *)
- PyArray_NewFromDescr(subtype, descr,
- (int)dims.len,
- dims.ptr,
- strides.ptr, NULL, is_f_order, NULL);
+ PyArray_NewFromDescr_int(subtype, descr,
+ (int)dims.len,
+ dims.ptr,
+ strides.ptr, NULL, is_f_order, NULL,
+ 0, 1);
if (ret == NULL) {
descr = NULL;
goto fail;
@@ -1730,11 +1732,11 @@ array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
buffer.flags |= NPY_ARRAY_F_CONTIGUOUS;
}
ret = (PyArrayObject *)\
- PyArray_NewFromDescr(subtype, descr,
- dims.len, dims.ptr,
- strides.ptr,
- offset + (char *)buffer.ptr,
- buffer.flags, NULL);
+ PyArray_NewFromDescr_int(subtype, descr,
+ dims.len, dims.ptr,
+ strides.ptr,
+ offset + (char *)buffer.ptr,
+ buffer.flags, NULL, 0, 1);
if (ret == NULL) {
descr = NULL;
goto fail;
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 71347ddbc..852ff03b6 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -18,6 +18,7 @@
#include "npy_sort.h"
#include "common.h"
#include "ctors.h"
+#include "lowlevel_strided_loops.h"
#include "usertypes.h"
#include "_datetime.h"
#include "arrayobject.h"
@@ -2284,21 +2285,19 @@ UNICODE_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
}
}
- n *= itemsize;
if (swap) {
- char *a, *b, c;
+ int i;
+ char *_dst;
+ itemsize = itemsize / 4;
- /* n is the number of unicode characters to swap */
- n >>= 2;
- for (a = (char *)dst; n > 0; n--) {
- b = a + 3;
- c = *a;
- *a++ = *b;
- *b-- = c;
- c = *a;
- *a++ = *b;
- *b-- = c;
- a += 2;
+ while (n > 0) {
+ _dst = dst;
+ for (i=0; i < itemsize; i++) {
+ npy_bswap4_unaligned(_dst);
+ _dst += 4;
+ }
+ dst += dstride;
+ --n;
}
}
}
@@ -2326,17 +2325,14 @@ UNICODE_copyswap (char *dst, char *src, int swap, PyArrayObject *arr)
}
if (swap) {
- char *a, *b, c;
- itemsize >>= 2;
- for (a = (char *)dst; itemsize>0; itemsize--) {
- b = a + 3;
- c = *a;
- *a++ = *b;
- *b-- = c;
- c = *a;
- *a++ = *b;
- *b-- = c;
- a += 2;
+ int i;
+ char *_dst;
+ itemsize = itemsize / 4;
+
+ _dst = dst;
+ for (i=0; i < itemsize; i++) {
+ npy_bswap4_unaligned(_dst);
+ _dst += 4;
}
}
}
@@ -3763,6 +3759,13 @@ static void
}
}
else {
+ /*
+ * Visual Studio 2015 loop vectorizer handles NaN in an unexpected
+ * manner, see: https://github.com/numpy/numpy/issues/7601
+ */
+ #if (_MSC_VER == 1900)
+ #pragma loop( no_vector )
+ #endif
for (i = 0; i < ni; i++) {
if (@lt@(in[i], min_val)) {
out[i] = min_val;
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index e3cec21b1..c82c6c46c 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -830,28 +830,24 @@ _GenericBinaryOutFunction(PyArrayObject *m1, PyObject *m2, PyArrayObject *out,
return PyObject_CallFunction(op, "OO", m1, m2);
}
else {
- PyObject *args, *kw, *ret;
+ PyObject *args, *ret;
+ static PyObject *kw = NULL;
- args = Py_BuildValue("OOO", m1, m2, out);
- if (args == NULL) {
- return NULL;
- }
- kw = PyDict_New();
if (kw == NULL) {
- Py_DECREF(args);
- return NULL;
+ kw = Py_BuildValue("{s:s}", "casting", "unsafe");
+ if (kw == NULL) {
+ return NULL;
+ }
}
- if (PyDict_SetItemString(kw, "casting",
- PyUString_FromString("unsafe")) < 0) {
- Py_DECREF(args);
- Py_DECREF(kw);
+
+ args = Py_BuildValue("OOO", m1, m2, out);
+ if (args == NULL) {
return NULL;
}
ret = PyObject_Call(op, args, kw);
Py_DECREF(args);
- Py_DECREF(kw);
return ret;
}
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 136d0859e..25be59184 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -375,6 +375,7 @@ arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
} else {
Py_XDECREF(values);
Py_XDECREF(mask);
+ Py_XDECREF(array);
Py_RETURN_NONE;
}
}
@@ -664,6 +665,182 @@ fail:
return NULL;
}
+/* As for arr_interp but for complex fp values */
+NPY_NO_EXPORT PyObject *
+arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
+{
+
+ PyObject *fp, *xp, *x;
+ PyObject *left = NULL, *right = NULL;
+ PyArrayObject *afp = NULL, *axp = NULL, *ax = NULL, *af = NULL;
+ npy_intp i, lenx, lenxp;
+
+ const npy_double *dx, *dz;
+ const npy_cdouble *dy;
+ npy_cdouble lval, rval;
+ npy_cdouble *dres, *slopes = NULL;
+
+ static char *kwlist[] = {"x", "xp", "fp", "left", "right", NULL};
+
+ NPY_BEGIN_THREADS_DEF;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwdict, "OOO|OO", kwlist,
+ &x, &xp, &fp, &left, &right)) {
+ return NULL;
+ }
+
+ afp = (PyArrayObject *)PyArray_ContiguousFromAny(fp, NPY_CDOUBLE, 1, 1);
+
+ if (afp == NULL) {
+ return NULL;
+ }
+
+ axp = (PyArrayObject *)PyArray_ContiguousFromAny(xp, NPY_DOUBLE, 1, 1);
+ if (axp == NULL) {
+ goto fail;
+ }
+ ax = (PyArrayObject *)PyArray_ContiguousFromAny(x, NPY_DOUBLE, 1, 0);
+ if (ax == NULL) {
+ goto fail;
+ }
+ lenxp = PyArray_SIZE(axp);
+ if (lenxp == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "array of sample points is empty");
+ goto fail;
+ }
+ if (PyArray_SIZE(afp) != lenxp) {
+ PyErr_SetString(PyExc_ValueError,
+ "fp and xp are not of the same length.");
+ goto fail;
+ }
+
+ lenx = PyArray_SIZE(ax);
+ dx = (const npy_double *)PyArray_DATA(axp);
+ dz = (const npy_double *)PyArray_DATA(ax);
+
+ af = (PyArrayObject *)PyArray_SimpleNew(PyArray_NDIM(ax),
+ PyArray_DIMS(ax), NPY_CDOUBLE);
+ if (af == NULL) {
+ goto fail;
+ }
+
+ dy = (const npy_cdouble *)PyArray_DATA(afp);
+ dres = (npy_cdouble *)PyArray_DATA(af);
+ /* Get left and right fill values. */
+ if ((left == NULL) || (left == Py_None)) {
+ lval = dy[0];
+ }
+ else {
+ lval.real = PyComplex_RealAsDouble(left);
+ if ((lval.real == -1) && PyErr_Occurred()) {
+ goto fail;
+ }
+ lval.imag = PyComplex_ImagAsDouble(left);
+ if ((lval.imag == -1) && PyErr_Occurred()) {
+ goto fail;
+ }
+ }
+
+ if ((right == NULL) || (right == Py_None)) {
+ rval = dy[lenxp - 1];
+ }
+ else {
+ rval.real = PyComplex_RealAsDouble(right);
+ if ((rval.real == -1) && PyErr_Occurred()) {
+ goto fail;
+ }
+ rval.imag = PyComplex_ImagAsDouble(right);
+ if ((rval.imag == -1) && PyErr_Occurred()) {
+ goto fail;
+ }
+ }
+
+ /* binary_search_with_guess needs at least a 3 item long array */
+ if (lenxp == 1) {
+ const npy_double xp_val = dx[0];
+ const npy_cdouble fp_val = dy[0];
+
+ NPY_BEGIN_THREADS_THRESHOLDED(lenx);
+ for (i = 0; i < lenx; ++i) {
+ const npy_double x_val = dz[i];
+ dres[i] = (x_val < xp_val) ? lval :
+ ((x_val > xp_val) ? rval : fp_val);
+ }
+ NPY_END_THREADS;
+ }
+ else {
+ npy_intp j = 0;
+
+ /* only pre-calculate slopes if there are relatively few of them. */
+ if (lenxp <= lenx) {
+ slopes = PyArray_malloc((lenxp - 1) * sizeof(npy_cdouble));
+ if (slopes == NULL) {
+ goto fail;
+ }
+ }
+
+ NPY_BEGIN_THREADS;
+
+ if (slopes != NULL) {
+ for (i = 0; i < lenxp - 1; ++i) {
+ const double inv_dx = 1.0 / (dx[i+1] - dx[i]);
+ slopes[i].real = (dy[i+1].real - dy[i].real) * inv_dx;
+ slopes[i].imag = (dy[i+1].imag - dy[i].imag) * inv_dx;
+ }
+ }
+
+ for (i = 0; i < lenx; ++i) {
+ const npy_double x_val = dz[i];
+
+ if (npy_isnan(x_val)) {
+ dres[i].real = x_val;
+ dres[i].imag = 0.0;
+ continue;
+ }
+
+ j = binary_search_with_guess(x_val, dx, lenxp, j);
+ if (j == -1) {
+ dres[i] = lval;
+ }
+ else if (j == lenxp) {
+ dres[i] = rval;
+ }
+ else if (j == lenxp - 1) {
+ dres[i] = dy[j];
+ }
+ else {
+ if (slopes!=NULL) {
+ dres[i].real = slopes[j].real*(x_val - dx[j]) + dy[j].real;
+ dres[i].imag = slopes[j].imag*(x_val - dx[j]) + dy[j].imag;
+ }
+ else {
+ const npy_double inv_dx = 1.0 / (dx[j+1] - dx[j]);
+ dres[i].real = (dy[j+1].real - dy[j].real)*(x_val - dx[j])*
+ inv_dx + dy[j].real;
+ dres[i].imag = (dy[j+1].imag - dy[j].imag)*(x_val - dx[j])*
+ inv_dx + dy[j].imag;
+ }
+ }
+ }
+
+ NPY_END_THREADS;
+ }
+ PyArray_free(slopes);
+
+ Py_DECREF(afp);
+ Py_DECREF(axp);
+ Py_DECREF(ax);
+ return (PyObject *)af;
+
+fail:
+ Py_XDECREF(afp);
+ Py_XDECREF(axp);
+ Py_XDECREF(ax);
+ Py_XDECREF(af);
+ return NULL;
+}
+
/*
* Converts a Python sequence into 'count' PyArrayObjects
*
diff --git a/numpy/core/src/multiarray/compiled_base.h b/numpy/core/src/multiarray/compiled_base.h
index 19e3778ad..51508531c 100644
--- a/numpy/core/src/multiarray/compiled_base.h
+++ b/numpy/core/src/multiarray/compiled_base.h
@@ -11,6 +11,8 @@ arr_digitize(PyObject *, PyObject *, PyObject *kwds);
NPY_NO_EXPORT PyObject *
arr_interp(PyObject *, PyObject *, PyObject *);
NPY_NO_EXPORT PyObject *
+arr_interp_complex(PyObject *, PyObject *, PyObject *);
+NPY_NO_EXPORT PyObject *
arr_ravel_multi_index(PyObject *, PyObject *, PyObject *);
NPY_NO_EXPORT PyObject *
arr_unravel_index(PyObject *, PyObject *, PyObject *);
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index 5499160be..8e0c66632 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -14,6 +14,7 @@
#include "npy_pycompat.h"
#include "arrayobject.h"
+#include "ctors.h"
#include "mapping.h"
#include "lowlevel_strided_loops.h"
#include "scalartypes.h"
@@ -132,6 +133,10 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
"cannot write object arrays to a file in binary mode");
return -1;
}
+ if (PyArray_DESCR(self)->elsize == 0) {
+ /* For zero-width data types there's nothing to write */
+ return 0;
+ }
if (npy_fallocate(PyArray_NBYTES(self), fp) != 0) {
return -1;
}
@@ -600,13 +605,13 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype)
dtype = PyArray_DESCR(self);
Py_INCREF(dtype);
- ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
+ ret = (PyArrayObject *)PyArray_NewFromDescr_int(subtype,
dtype,
PyArray_NDIM(self), PyArray_DIMS(self),
PyArray_STRIDES(self),
PyArray_DATA(self),
flags,
- (PyObject *)self);
+ (PyObject *)self, 0, 1);
if (ret == NULL) {
Py_XDECREF(type);
return NULL;
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 0017de0ad..a03bacceb 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -892,12 +892,14 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
* Generic new array creation routine.
* Internal variant with calloc argument for PyArray_Zeros.
*
- * steals a reference to descr (even on failure)
+ * steals a reference to descr. On failure or descr->subarray, descr will
+ * be decrefed.
*/
-static PyObject *
+NPY_NO_EXPORT PyObject *
PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
npy_intp *dims, npy_intp *strides, void *data,
- int flags, PyObject *obj, int zeroed)
+ int flags, PyObject *obj, int zeroed,
+ int allow_emptystring)
{
PyArrayObject_fields *fa;
int i;
@@ -916,7 +918,8 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
newstrides, nd);
ret = PyArray_NewFromDescr_int(subtype, descr, nd, newdims,
newstrides,
- data, flags, obj, zeroed);
+ data, flags, obj, zeroed,
+ allow_emptystring);
return ret;
}
@@ -931,20 +934,21 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
/* Check datatype element size */
nbytes = descr->elsize;
if (nbytes == 0) {
- if (!PyDataType_ISSTRING(descr)) {
+ if (!PyDataType_ISFLEXIBLE(descr)) {
PyErr_SetString(PyExc_TypeError, "Empty data-type");
Py_DECREF(descr);
return NULL;
- }
- PyArray_DESCR_REPLACE(descr);
- if (descr == NULL) {
- return NULL;
- }
- if (descr->type_num == NPY_STRING) {
- nbytes = descr->elsize = 1;
- }
- else {
- nbytes = descr->elsize = sizeof(npy_ucs4);
+ } else if (PyDataType_ISSTRING(descr) && !allow_emptystring) {
+ PyArray_DESCR_REPLACE(descr);
+ if (descr == NULL) {
+ return NULL;
+ }
+ if (descr->type_num == NPY_STRING) {
+ nbytes = descr->elsize = 1;
+ }
+ else {
+ nbytes = descr->elsize = sizeof(npy_ucs4);
+ }
}
}
@@ -1125,7 +1129,8 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
/*NUMPY_API
* Generic new array creation routine.
*
- * steals a reference to descr (even on failure)
+ * steals a reference to descr. On failure or when dtype->subarray is
+ * true, dtype will be decrefed.
*/
NPY_NO_EXPORT PyObject *
PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
@@ -1134,7 +1139,7 @@ PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
{
return PyArray_NewFromDescr_int(subtype, descr, nd,
dims, strides, data,
- flags, obj, 0);
+ flags, obj, 0, 0);
}
/*NUMPY_API
@@ -1150,7 +1155,8 @@ PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
* subok - If 1, use the prototype's array subtype, otherwise
* always create a base-class array.
*
- * NOTE: If dtype is not NULL, steals the dtype reference.
+ * NOTE: If dtype is not NULL, steals the dtype reference. On failure or when
+ * dtype->subarray is true, dtype will be decrefed.
*/
NPY_NO_EXPORT PyObject *
PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER order,
@@ -2839,7 +2845,8 @@ PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags)
/*NUMPY_API
* Zeros
*
- * steal a reference
+ * steals a reference to type. On failure or when dtype->subarray is
+ * true, dtype will be decrefed.
* accepts NULL type
*/
NPY_NO_EXPORT PyObject *
@@ -2855,7 +2862,7 @@ PyArray_Zeros(int nd, npy_intp *dims, PyArray_Descr *type, int is_f_order)
type,
nd, dims,
NULL, NULL,
- is_f_order, NULL, 1);
+ is_f_order, NULL, 1, 0);
if (ret == NULL) {
return NULL;
@@ -3257,17 +3264,21 @@ array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, npy_intp num, size_t *nrea
}
num = numbytes / dtype->elsize;
}
- r = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- dtype,
- 1, &num,
- NULL, NULL,
- 0, NULL);
+ /*
+ * When dtype->subarray is true, PyArray_NewFromDescr will decref dtype
+ * even on success, so make sure it stays around until exit.
+ */
+ Py_INCREF(dtype);
+ r = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &num,
+ NULL, NULL, 0, NULL);
if (r == NULL) {
+ Py_DECREF(dtype);
return NULL;
}
NPY_BEGIN_ALLOW_THREADS;
*nread = fread(PyArray_DATA(r), dtype->elsize, num, fp);
NPY_END_ALLOW_THREADS;
+ Py_DECREF(dtype);
return r;
}
@@ -3290,13 +3301,17 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
npy_intp bytes, totalbytes;
size = (num >= 0) ? num : FROM_BUFFER_SIZE;
+
+ /*
+ * When dtype->subarray is true, PyArray_NewFromDescr will decref dtype
+ * even on success, so make sure it stays around until exit.
+ */
+ Py_INCREF(dtype);
r = (PyArrayObject *)
- PyArray_NewFromDescr(&PyArray_Type,
- dtype,
- 1, &size,
- NULL, NULL,
- 0, NULL);
+ PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &size,
+ NULL, NULL, 0, NULL);
if (r == NULL) {
+ Py_DECREF(dtype);
return NULL;
}
clean_sep = swab_separator(sep);
@@ -3345,6 +3360,7 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
free(clean_sep);
fail:
+ Py_DECREF(dtype);
if (err == 1) {
PyErr_NoMemory();
}
@@ -3362,7 +3378,8 @@ fail:
* array corresponding to the data encoded in that file.
*
* If the dtype is NULL, the default array type is used (double).
- * If non-null, the reference is stolen.
+ * If non-null, the reference is stolen and if dtype->subarray is true dtype
+ * will be decrefed even on success.
*
* The number of elements to read is given as ``num``; if it is < 0, then
* then as many as possible are read.
@@ -3388,10 +3405,12 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char *sep)
return NULL;
}
if (dtype->elsize == 0) {
- PyErr_SetString(PyExc_ValueError,
- "The elements are 0-sized.");
- Py_DECREF(dtype);
- return NULL;
+ /* Nothing to read, just create an empty array of the requested type */
+ return PyArray_NewFromDescr_int(&PyArray_Type,
+ dtype,
+ 1, &num,
+ NULL, NULL,
+ 0, NULL, 0, 1);
}
if ((sep == NULL) || (strlen(sep) == 0)) {
ret = array_fromfile_binary(fp, dtype, num, &nread);
diff --git a/numpy/core/src/multiarray/ctors.h b/numpy/core/src/multiarray/ctors.h
index 783818def..e889910cb 100644
--- a/numpy/core/src/multiarray/ctors.h
+++ b/numpy/core/src/multiarray/ctors.h
@@ -6,6 +6,12 @@ PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
npy_intp *dims, npy_intp *strides, void *data,
int flags, PyObject *obj);
+NPY_NO_EXPORT PyObject *
+PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
+ npy_intp *dims, npy_intp *strides, void *data,
+ int flags, PyObject *obj, int zeroed,
+ int allow_emptystring);
+
NPY_NO_EXPORT PyObject *PyArray_New(PyTypeObject *, int nd, npy_intp *,
int, npy_intp *, void *, int, int, PyObject *);
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 03a4654a0..fbfda72d7 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -29,7 +29,9 @@
#define NPY_NEXT_ALIGNED_OFFSET(offset, alignment) \
(((offset) + (alignment) - 1) & (-(alignment)))
+#ifndef PyDictProxy_Check
#define PyDictProxy_Check(obj) (Py_TYPE(obj) == &PyDictProxy_Type)
+#endif
static PyObject *typeDict = NULL; /* Must be explicitly loaded */
@@ -533,8 +535,6 @@ _convert_from_array_descr(PyObject *obj, int align)
goto fail;
}
dtypeflags |= (conv->flags & NPY_FROM_FIELDS);
- tup = PyTuple_New((title == NULL ? 2 : 3));
- PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
if (align) {
int _align;
@@ -544,10 +544,10 @@ _convert_from_array_descr(PyObject *obj, int align)
}
maxalign = PyArray_MAX(maxalign, _align);
}
+ tup = PyTuple_New((title == NULL ? 2 : 3));
+ PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize));
- PyDict_SetItem(fields, name, tup);
-
/*
* Title can be "meta-data". Only insert it
* into the fields dictionary if it is a string
@@ -556,6 +556,7 @@ _convert_from_array_descr(PyObject *obj, int align)
if (title != NULL) {
Py_INCREF(title);
PyTuple_SET_ITEM(tup, 2, title);
+ PyDict_SetItem(fields, name, tup);
#if defined(NPY_PY3K)
if (PyUString_Check(title)) {
#else
@@ -570,6 +571,10 @@ _convert_from_array_descr(PyObject *obj, int align)
PyDict_SetItem(fields, title, tup);
}
}
+ else {
+ PyDict_SetItem(fields, name, tup);
+ }
+
totalsize += conv->elsize;
Py_DECREF(tup);
}
@@ -1133,7 +1138,7 @@ _convert_from_dict(PyObject *obj, int align)
}
}
Py_DECREF(tup);
- if ((ret == NPY_FAIL) || (newdescr->elsize == 0)) {
+ if (ret == NPY_FAIL) {
goto fail;
}
dtypeflags |= (newdescr->flags & NPY_FROM_FIELDS);
@@ -1534,6 +1539,31 @@ finish:
}
#endif
if (item) {
+ /* Check for a deprecated Numeric-style typecode */
+ if (PyBytes_Check(obj)) {
+ char *type = NULL;
+ Py_ssize_t len = 0;
+ char *dep_tps[] = {"Bool", "Complex", "Float", "Int",
+ "Object0", "String0", "Timedelta64",
+ "Unicode0", "UInt", "Void0"};
+ int ndep_tps = sizeof(dep_tps) / sizeof(dep_tps[0]);
+ int i;
+
+ if (PyBytes_AsStringAndSize(obj, &type, &len) < 0) {
+ goto error;
+ }
+ for (i = 0; i < ndep_tps; ++i) {
+ char *dep_tp = dep_tps[i];
+
+ if (strncmp(type, dep_tp, strlen(dep_tp)) == 0) {
+ if (DEPRECATE("Numeric-style type codes are "
+ "deprecated and will result in "
+ "an error in the future.") < 0) {
+ goto fail;
+ }
+ }
+ }
+ }
return PyArray_DescrConverter(item, at);
}
}
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index fd371a1f6..b3b8b036f 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -22,6 +22,7 @@
#include "npy_pycompat.h"
#include "convert_datatype.h"
+#include "ctors.h"
#include "_datetime.h"
#include "datetime_strings.h"
@@ -142,6 +143,7 @@ _strided_to_strided_copy_references(char *dst, npy_intp dst_stride,
}
}
+
/************************** ZERO-PADDED COPY ******************************/
/* Does a zero-padded copy */
@@ -209,14 +211,49 @@ _strided_to_strided_truncate_copy(char *dst, npy_intp dst_stride,
}
}
+/*
+ * Does a strided to strided zero-padded or truncated copy for the case where
+ * unicode swapping is needed.
+ */
+static void
+_strided_to_strided_unicode_copyswap(char *dst, npy_intp dst_stride,
+ char *src, npy_intp src_stride,
+ npy_intp N, npy_intp src_itemsize,
+ NpyAuxData *data)
+{
+ _strided_zero_pad_data *d = (_strided_zero_pad_data *)data;
+ npy_intp dst_itemsize = d->dst_itemsize;
+ npy_intp zero_size = dst_itemsize - src_itemsize;
+ npy_intp copy_size = zero_size > 0 ? src_itemsize : dst_itemsize;
+ char *_dst;
+ npy_intp characters = dst_itemsize / 4;
+ int i;
+
+ while (N > 0) {
+ memcpy(dst, src, copy_size);
+ if (zero_size > 0) {
+ memset(dst + src_itemsize, 0, zero_size);
+ }
+ _dst = dst;
+ for (i=0; i < characters; i++) {
+ npy_bswap4_unaligned(_dst);
+ _dst += 4;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ --N;
+ }
+}
+
+
NPY_NO_EXPORT int
-PyArray_GetStridedZeroPadCopyFn(int aligned,
+PyArray_GetStridedZeroPadCopyFn(int aligned, int unicode_swap,
npy_intp src_stride, npy_intp dst_stride,
npy_intp src_itemsize, npy_intp dst_itemsize,
PyArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata)
{
- if (src_itemsize == dst_itemsize) {
+ if ((src_itemsize == dst_itemsize) && !unicode_swap) {
*out_stransfer = PyArray_GetStridedCopyFn(aligned, src_stride,
dst_stride, src_itemsize);
*out_transferdata = NULL;
@@ -233,7 +270,10 @@ PyArray_GetStridedZeroPadCopyFn(int aligned,
d->base.free = (NpyAuxData_FreeFunc *)&PyArray_free;
d->base.clone = &_strided_zero_pad_data_clone;
- if (src_itemsize < dst_itemsize) {
+ if (unicode_swap) {
+ *out_stransfer = &_strided_to_strided_unicode_copyswap;
+ }
+ else if (src_itemsize < dst_itemsize) {
*out_stransfer = &_strided_to_strided_zero_pad_copy;
}
else {
@@ -518,7 +558,7 @@ _strided_to_strided_wrap_copy_swap(char *dst, npy_intp dst_stride,
d->copyswapn(dst, dst_stride, src, src_stride, N, d->swap, d->arr);
}
-/* This only gets used for custom data types */
+/* This only gets used for custom data types and for Unicode when swapping */
static int
wrap_copy_swap_function(int aligned,
npy_intp src_stride, npy_intp dst_stride,
@@ -549,8 +589,8 @@ wrap_copy_swap_function(int aligned,
* The copyswap functions shouldn't need that.
*/
Py_INCREF(dtype);
- data->arr = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype,
- 1, &shape, NULL, NULL, 0, NULL);
+ data->arr = (PyArrayObject *)PyArray_NewFromDescr_int(&PyArray_Type, dtype,
+ 1, &shape, NULL, NULL, 0, NULL, 0, 1);
if (data->arr == NULL) {
PyArray_free(data);
return NPY_FAIL;
@@ -1405,8 +1445,8 @@ get_nbo_cast_transfer_function(int aligned,
return NPY_FAIL;
}
}
- data->aip = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, tmp_dtype,
- 1, &shape, NULL, NULL, 0, NULL);
+ data->aip = (PyArrayObject *)PyArray_NewFromDescr_int(&PyArray_Type,
+ tmp_dtype, 1, &shape, NULL, NULL, 0, NULL, 0, 1);
if (data->aip == NULL) {
PyArray_free(data);
return NPY_FAIL;
@@ -1429,8 +1469,8 @@ get_nbo_cast_transfer_function(int aligned,
return NPY_FAIL;
}
}
- data->aop = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, tmp_dtype,
- 1, &shape, NULL, NULL, 0, NULL);
+ data->aop = (PyArrayObject *)PyArray_NewFromDescr_int(&PyArray_Type,
+ tmp_dtype, 1, &shape, NULL, NULL, 0, NULL, 0, 1);
if (data->aop == NULL) {
Py_DECREF(data->aip);
PyArray_free(data);
@@ -3482,8 +3522,13 @@ PyArray_GetDTypeCopySwapFn(int aligned,
itemsize);
*outtransferdata = NULL;
}
+ else if (dtype->kind == 'U') {
+ return wrap_copy_swap_function(aligned,
+ src_stride, dst_stride, dtype, 1,
+ outstransfer, outtransferdata);
+ }
/* If it's not complex, one swap */
- else if(dtype->kind != 'c') {
+ else if (dtype->kind != 'c') {
*outstransfer = PyArray_GetStridedCopySwapFn(aligned,
src_stride, dst_stride,
itemsize);
@@ -3628,11 +3673,19 @@ PyArray_GetDTypeTransferFunction(int aligned,
}
}
- /* The special types, which have no byte-order */
+ /* The special types, which have no or subelement byte-order */
switch (src_type_num) {
+ case NPY_UNICODE:
+ /* Wrap the copy swap function when swapping is necessary */
+ if (PyArray_ISNBO(src_dtype->byteorder) !=
+ PyArray_ISNBO(dst_dtype->byteorder)) {
+ return wrap_copy_swap_function(aligned,
+ src_stride, dst_stride,
+ src_dtype, 1,
+ out_stransfer, out_transferdata);
+ }
case NPY_VOID:
case NPY_STRING:
- case NPY_UNICODE:
*out_stransfer = PyArray_GetStridedCopyFn(0,
src_stride, dst_stride,
src_itemsize);
@@ -3705,10 +3758,17 @@ PyArray_GetDTypeTransferFunction(int aligned,
/* Check for different-sized strings, unicodes, or voids */
if (src_type_num == dst_type_num) {
switch (src_type_num) {
- case NPY_STRING:
case NPY_UNICODE:
+ if (PyArray_ISNBO(src_dtype->byteorder) !=
+ PyArray_ISNBO(dst_dtype->byteorder)) {
+ return PyArray_GetStridedZeroPadCopyFn(0, 1,
+ src_stride, dst_stride,
+ src_dtype->elsize, dst_dtype->elsize,
+ out_stransfer, out_transferdata);
+ }
+ case NPY_STRING:
case NPY_VOID:
- return PyArray_GetStridedZeroPadCopyFn(0,
+ return PyArray_GetStridedZeroPadCopyFn(0, 0,
src_stride, dst_stride,
src_dtype->elsize, dst_dtype->elsize,
out_stransfer, out_transferdata);
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index 80dc8201f..dcd3322c4 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -1441,11 +1441,6 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
&& PyDataType_FLAGCHK(PyArray_DESCR(mps[i]), NPY_NEEDS_PYAPI)) {
object = 1;
}
- its[i] = (PyArrayIterObject *)PyArray_IterAllButAxis(
- (PyObject *)mps[i], &axis);
- if (its[i] == NULL) {
- goto fail;
- }
}
/* Now we can check the axis */
@@ -1472,6 +1467,14 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
goto fail;
}
+ for (i = 0; i < n; i++) {
+ its[i] = (PyArrayIterObject *)PyArray_IterAllButAxis(
+ (PyObject *)mps[i], &axis);
+ if (its[i] == NULL) {
+ goto fail;
+ }
+ }
+
/* Now do the sorting */
ret = (PyArrayObject *)PyArray_New(&PyArray_Type, PyArray_NDIM(mps[0]),
PyArray_DIMS(mps[0]), NPY_INTP,
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 3dd7b0ebb..50f1cb1f4 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -1696,6 +1696,10 @@ static PyMemberDef arraymultiter_members[] = {
T_INT,
offsetof(PyArrayMultiIterObject, nd),
READONLY, NULL},
+ {"ndim",
+ T_INT,
+ offsetof(PyArrayMultiIterObject, nd),
+ READONLY, NULL},
{NULL, 0, 0, 0, NULL},
};
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 6a30fc492..3d33f8a85 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -14,6 +14,7 @@
#include "npy_import.h"
#include "common.h"
+#include "ctors.h"
#include "iterators.h"
#include "mapping.h"
#include "lowlevel_strided_loops.h"
@@ -1129,7 +1130,9 @@ array_assign_boolean_subscript(PyArrayObject *self,
return -1;
}
- NPY_BEGIN_THREADS_NDITER(iter);
+ if (!needs_api) {
+ NPY_BEGIN_THREADS_NDITER(iter);
+ }
do {
innersize = *NpyIter_GetInnerLoopSizePtr(iter);
@@ -1153,7 +1156,9 @@ array_assign_boolean_subscript(PyArrayObject *self,
}
} while (iternext(iter));
- NPY_END_THREADS;
+ if (!needs_api) {
+ NPY_END_THREADS;
+ }
NPY_AUXDATA_FREE(transferdata);
NpyIter_Deallocate(iter);
@@ -1287,7 +1292,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
/* view the array at the new offset+dtype */
Py_INCREF(fieldtype);
- *view = (PyArrayObject*)PyArray_NewFromDescr(
+ *view = (PyArrayObject*)PyArray_NewFromDescr_int(
Py_TYPE(arr),
fieldtype,
PyArray_NDIM(arr),
@@ -1295,7 +1300,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
PyArray_STRIDES(arr),
PyArray_BYTES(arr) + offset,
PyArray_FLAGS(arr),
- (PyObject *)arr);
+ (PyObject *)arr, 0, 1);
if (*view == NULL) {
return 0;
}
@@ -1393,7 +1398,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
view_dtype->fields = fields;
view_dtype->flags = PyArray_DESCR(arr)->flags;
- *view = (PyArrayObject*)PyArray_NewFromDescr(
+ *view = (PyArrayObject*)PyArray_NewFromDescr_int(
Py_TYPE(arr),
view_dtype,
PyArray_NDIM(arr),
@@ -1401,7 +1406,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
PyArray_STRIDES(arr),
PyArray_DATA(arr),
PyArray_FLAGS(arr),
- (PyObject *)arr);
+ (PyObject *)arr, 0, 1);
if (*view == NULL) {
return 0;
}
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index b8e066b79..634690648 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -379,13 +379,13 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset)
Py_DECREF(safe);
}
- ret = PyArray_NewFromDescr(Py_TYPE(self),
- typed,
- PyArray_NDIM(self), PyArray_DIMS(self),
- PyArray_STRIDES(self),
- PyArray_BYTES(self) + offset,
- PyArray_FLAGS(self)&(~NPY_ARRAY_F_CONTIGUOUS),
- (PyObject *)self);
+ ret = PyArray_NewFromDescr_int(Py_TYPE(self),
+ typed,
+ PyArray_NDIM(self), PyArray_DIMS(self),
+ PyArray_STRIDES(self),
+ PyArray_BYTES(self) + offset,
+ PyArray_FLAGS(self)&(~NPY_ARRAY_F_CONTIGUOUS),
+ (PyObject *)self, 0, 1);
if (ret == NULL) {
return NULL;
}
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index e2731068b..62b562856 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -4233,6 +4233,8 @@ static struct PyMethodDef array_module_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"interp", (PyCFunction)arr_interp,
METH_VARARGS | METH_KEYWORDS, NULL},
+ {"interp_complex", (PyCFunction)arr_interp_complex,
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"ravel_multi_index", (PyCFunction)arr_ravel_multi_index,
METH_VARARGS | METH_KEYWORDS, NULL},
{"unravel_index", (PyCFunction)arr_unravel_index,
diff --git a/numpy/core/src/multiarray/numpyos.c b/numpy/core/src/multiarray/numpyos.c
index 450ec40b6..0cfb9e66b 100644
--- a/numpy/core/src/multiarray/numpyos.c
+++ b/numpy/core/src/multiarray/numpyos.c
@@ -339,7 +339,7 @@ ASCII_FORMAT(long double, l, double)
* Same as isspace under C locale
*/
NPY_NO_EXPORT int
-NumPyOS_ascii_isspace(char c)
+NumPyOS_ascii_isspace(int c)
{
return c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t'
|| c == '\v';
diff --git a/numpy/core/src/multiarray/numpyos.h b/numpy/core/src/multiarray/numpyos.h
index 3bf77391e..7ca795a6f 100644
--- a/numpy/core/src/multiarray/numpyos.h
+++ b/numpy/core/src/multiarray/numpyos.h
@@ -29,6 +29,6 @@ NPY_NO_EXPORT int
NumPyOS_ascii_ftoLf(FILE *fp, long double *value);
NPY_NO_EXPORT int
-NumPyOS_ascii_isspace(char c);
+NumPyOS_ascii_isspace(int c);
#endif
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index cc02d15f9..30fd45f33 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -255,12 +255,12 @@ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims,
}
Py_INCREF(PyArray_DESCR(self));
- ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self),
+ ret = (PyArrayObject *)PyArray_NewFromDescr_int(Py_TYPE(self),
PyArray_DESCR(self),
ndim, dimensions,
strides,
PyArray_DATA(self),
- flags, (PyObject *)self);
+ flags, (PyObject *)self, 0, 1);
if (ret == NULL) {
goto fail;
diff --git a/numpy/core/src/private/lowlevel_strided_loops.h b/numpy/core/src/private/lowlevel_strided_loops.h
index a6bb4c7eb..02b8c73c1 100644
--- a/numpy/core/src/private/lowlevel_strided_loops.h
+++ b/numpy/core/src/private/lowlevel_strided_loops.h
@@ -126,7 +126,7 @@ PyArray_GetStridedCopySwapPairFn(int aligned,
* Returns NPY_SUCCEED or NPY_FAIL
*/
NPY_NO_EXPORT int
-PyArray_GetStridedZeroPadCopyFn(int aligned,
+PyArray_GetStridedZeroPadCopyFn(int aligned, int unicode_swap,
npy_intp src_stride, npy_intp dst_stride,
npy_intp src_itemsize, npy_intp dst_itemsize,
PyArray_StridedUnaryOp **outstransfer,
diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h
index 3d1470609..5f8aa3b79 100644
--- a/numpy/core/src/private/npy_config.h
+++ b/numpy/core/src/private/npy_config.h
@@ -75,7 +75,7 @@
#include <features.h>
#if defined(__GLIBC__)
-#if !__GLIBC_PREREQ(2, 16)
+#if !__GLIBC_PREREQ(2, 18)
#undef HAVE_CASIN
#undef HAVE_CASINF
@@ -96,7 +96,7 @@
#undef HAVE_CACOSHF
#undef HAVE_CACOSHL
-#endif /* __GLIBC_PREREQ(2, 16) */
+#endif /* __GLIBC_PREREQ(2, 18) */
#endif /* defined(__GLIBC_PREREQ) */
#endif /* defined(HAVE_FEATURES_H) */
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 6f0dee123..157b30e70 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -921,6 +921,10 @@ NPY_NO_EXPORT void
*((@type@ *)op1) = 1;
continue;
}
+ if (in1 == 1) {
+ *((@type@ *)op1) = 1;
+ continue;
+ }
if (in2 < 0 || in1 == 0) {
*((@type@ *)op1) = 0;
continue;
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 876cab4d7..08bc6f947 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -64,7 +64,7 @@ def test_array_array():
np.ones((), dtype=U5))
builtins = getattr(__builtins__, '__dict__', __builtins__)
- assert_(isinstance(builtins, dict))
+ assert_(hasattr(builtins, 'get'))
# test buffer
_buffer = builtins.get("buffer")
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index e6d3cd261..547280b23 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -198,7 +198,7 @@ class _DeprecationTestCase(object):
(warning.category,))
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
- lst = [w.category for w in self.log]
+ lst = [str(w.category) for w in self.log]
raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
@@ -609,6 +609,29 @@ class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTest
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
+class TestNumericStyleTypecodes(_DeprecationTestCase):
+ """
+ Deprecate the old numeric-style dtypes, which are especially
+ confusing for complex types, e.g. Complex32 -> complex64. When the
+ deprecation cycle is complete, the check for the strings should be
+ removed from PyArray_DescrConverter in descriptor.c, and the
+ deprecated keys should not be added as capitalized aliases in
+ _add_aliases in numerictypes.py.
+ """
+ def test_all_dtypes(self):
+ deprecated_types = [
+ 'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
+ 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
+ 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0'
+ ]
+ if sys.version_info[0] < 3:
+ deprecated_types.extend(['Unicode0', 'String0'])
+
+ for dt in deprecated_types:
+ self.assert_deprecated(np.dtype, exceptions=(TypeError,),
+ args=(dt,))
+
+
class TestTestDeprecated(object):
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index a6cb66b7d..f0721d7a3 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -256,6 +256,16 @@ class TestRecord(TestCase):
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
+ def test_from_dict_with_zero_width_field(self):
+ # Regression test for #6430 / #2196
+ dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
+ dt2 = np.dtype({'names': ['val1', 'val2'],
+ 'formats': [(np.float32, (0,)), int]})
+
+ assert_dtype_equal(dt, dt2)
+ assert_equal(dt.fields['val1'][0].itemsize, 0)
+ assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
+
def test_bool_commastring(self):
d = np.dtype('?,?,?') # raises?
assert_equal(len(d.names), 3)
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 6b5430611..0fabb2588 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -1,13 +1,45 @@
from __future__ import division, absolute_import, print_function
-from numpy import (logspace, linspace, dtype, array, finfo, typecodes, arange,
- isnan, ndarray)
+from numpy import (logspace, linspace, geomspace, dtype, array, finfo,
+ typecodes, arange, isnan, ndarray, sqrt)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
- assert_array_equal
+ assert_array_equal, assert_allclose
)
+class PhysicalQuantity(float):
+ def __new__(cls, value):
+ return float.__new__(cls, value)
+
+ def __add__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(x) + float(self))
+ __radd__ = __add__
+
+ def __sub__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(self) - float(x))
+
+ def __rsub__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(x) - float(self))
+
+ def __mul__(self, x):
+ return PhysicalQuantity(float(x) * float(self))
+ __rmul__ = __mul__
+
+ def __div__(self, x):
+ return PhysicalQuantity(float(self) / float(x))
+
+ def __rdiv__(self, x):
+ return PhysicalQuantity(float(x) / float(self))
+
+
+class PhysicalQuantity2(ndarray):
+ __array_priority__ = 10
+
+
class TestLogspace(TestCase):
def test_basic(self):
@@ -28,6 +60,136 @@ class TestLogspace(TestCase):
y = logspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
+ def test_physical_quantities(self):
+ a = PhysicalQuantity(1.0)
+ b = PhysicalQuantity(5.0)
+ assert_equal(logspace(a, b), logspace(1.0, 5.0))
+
+ def test_subclass(self):
+ a = array(1).view(PhysicalQuantity2)
+ b = array(7).view(PhysicalQuantity2)
+ ls = logspace(a, b)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, logspace(1.0, 7.0))
+ ls = logspace(a, b, 1)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, logspace(1.0, 7.0, 1))
+
+
+class TestGeomspace(TestCase):
+
+ def test_basic(self):
+ y = geomspace(1, 1e6)
+ assert_(len(y) == 50)
+ y = geomspace(1, 1e6, num=100)
+ assert_(y[-1] == 10 ** 6)
+ y = geomspace(1, 1e6, endpoint=False)
+ assert_(y[-1] < 10 ** 6)
+ y = geomspace(1, 1e6, num=7)
+ assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
+
+ y = geomspace(8, 2, num=3)
+ assert_allclose(y, [8, 4, 2])
+ assert_array_equal(y.imag, 0)
+
+ y = geomspace(-1, -100, num=3)
+ assert_array_equal(y, [-1, -10, -100])
+ assert_array_equal(y.imag, 0)
+
+ y = geomspace(-100, -1, num=3)
+ assert_array_equal(y, [-100, -10, -1])
+ assert_array_equal(y.imag, 0)
+
+ def test_complex(self):
+ # Purely imaginary
+ y = geomspace(1j, 16j, num=5)
+ assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
+ assert_array_equal(y.real, 0)
+
+ y = geomspace(-4j, -324j, num=5)
+ assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
+ assert_array_equal(y.real, 0)
+
+ y = geomspace(1+1j, 1000+1000j, num=4)
+ assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
+
+ y = geomspace(-1+1j, -1000+1000j, num=4)
+ assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
+
+ # Logarithmic spirals
+ y = geomspace(-1, 1, num=3, dtype=complex)
+ assert_allclose(y, [-1, 1j, +1])
+
+ y = geomspace(0+3j, -3+0j, 3)
+ assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
+ y = geomspace(0+3j, 3+0j, 3)
+ assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
+ y = geomspace(-3+0j, 0-3j, 3)
+ assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
+ y = geomspace(0+3j, -3+0j, 3)
+ assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
+ y = geomspace(-2-3j, 5+7j, 7)
+ assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
+ 2.08885354-4.34146838j, 4.58345529-3.16355218j,
+ 6.41401745-0.55233457j, 6.75707386+3.11795092j,
+ 5+7j])
+
+ # Type promotion should prevent the -5 from becoming a NaN
+ y = geomspace(3j, -5, 2)
+ assert_allclose(y, [3j, -5])
+ y = geomspace(-5, 3j, 2)
+ assert_allclose(y, [-5, 3j])
+
+ def test_dtype(self):
+ y = geomspace(1, 1e6, dtype='float32')
+ assert_equal(y.dtype, dtype('float32'))
+ y = geomspace(1, 1e6, dtype='float64')
+ assert_equal(y.dtype, dtype('float64'))
+ y = geomspace(1, 1e6, dtype='int32')
+ assert_equal(y.dtype, dtype('int32'))
+
+ # Native types
+ y = geomspace(1, 1e6, dtype=float)
+ assert_equal(y.dtype, dtype('float_'))
+ y = geomspace(1, 1e6, dtype=complex)
+ assert_equal(y.dtype, dtype('complex'))
+
+ def test_array_scalar(self):
+ lim1 = array([120, 100], dtype="int8")
+ lim2 = array([-120, -100], dtype="int8")
+ lim3 = array([1200, 1000], dtype="uint16")
+ t1 = geomspace(lim1[0], lim1[1], 5)
+ t2 = geomspace(lim2[0], lim2[1], 5)
+ t3 = geomspace(lim3[0], lim3[1], 5)
+ t4 = geomspace(120.0, 100.0, 5)
+ t5 = geomspace(-120.0, -100.0, 5)
+ t6 = geomspace(1200.0, 1000.0, 5)
+
+ # t3 uses float32, t6 uses float64
+ assert_allclose(t1, t4, rtol=1e-2)
+ assert_allclose(t2, t5, rtol=1e-2)
+ assert_allclose(t3, t6, rtol=1e-5)
+
+ def test_physical_quantities(self):
+ a = PhysicalQuantity(1.0)
+ b = PhysicalQuantity(5.0)
+ assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
+
+ def test_subclass(self):
+ a = array(1).view(PhysicalQuantity2)
+ b = array(7).view(PhysicalQuantity2)
+ gs = geomspace(a, b)
+ assert type(gs) is PhysicalQuantity2
+ assert_equal(gs, geomspace(1.0, 7.0))
+ gs = geomspace(a, b, 1)
+ assert type(gs) is PhysicalQuantity2
+ assert_equal(gs, geomspace(1.0, 7.0, 1))
+
+ def test_bounds(self):
+ assert_raises(ValueError, geomspace, 0, 10)
+ assert_raises(ValueError, geomspace, 10, 0)
+ assert_raises(ValueError, geomspace, 0, 0)
+
class TestLinspace(TestCase):
@@ -77,48 +239,18 @@ class TestLinspace(TestCase):
def test_complex(self):
lim1 = linspace(1 + 2j, 3 + 4j, 5)
- t1 = array([ 1.0+2.j, 1.5+2.5j, 2.0+3.j, 2.5+3.5j, 3.0+4.j])
+ t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j])
lim2 = linspace(1j, 10, 5)
- t2 = array([ 0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0.j])
+ t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j])
assert_equal(lim1, t1)
assert_equal(lim2, t2)
def test_physical_quantities(self):
- class PhysicalQuantity(float):
- def __new__(cls, value):
- return float.__new__(cls, value)
-
- def __add__(self, x):
- assert_(isinstance(x, PhysicalQuantity))
- return PhysicalQuantity(float(x) + float(self))
- __radd__ = __add__
-
- def __sub__(self, x):
- assert_(isinstance(x, PhysicalQuantity))
- return PhysicalQuantity(float(self) - float(x))
-
- def __rsub__(self, x):
- assert_(isinstance(x, PhysicalQuantity))
- return PhysicalQuantity(float(x) - float(self))
-
- def __mul__(self, x):
- return PhysicalQuantity(float(x) * float(self))
- __rmul__ = __mul__
-
- def __div__(self, x):
- return PhysicalQuantity(float(self) / float(x))
-
- def __rdiv__(self, x):
- return PhysicalQuantity(float(x) / float(self))
-
a = PhysicalQuantity(0.0)
b = PhysicalQuantity(1.0)
assert_equal(linspace(a, b), linspace(0.0, 1.0))
def test_subclass(self):
- class PhysicalQuantity2(ndarray):
- __array_priority__ = 10
-
a = array(0).view(PhysicalQuantity2)
b = array(1).view(PhysicalQuantity2)
ls = linspace(a, b)
diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py
index c36d7c068..600f8f52c 100644
--- a/numpy/core/tests/test_getlimits.py
+++ b/numpy/core/tests/test_getlimits.py
@@ -42,6 +42,19 @@ class TestLongdouble(TestCase):
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
+class TestFinfo(TestCase):
+ def test_basic(self):
+ dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
+ [np.float16, np.float32, np.float64, np.complex64,
+ np.complex128]))
+ for dt1, dt2 in dts:
+ for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machar', 'machep',
+ 'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp',
+ 'nmant', 'precision', 'resolution', 'tiny'):
+ assert_equal(getattr(finfo(dt1), attr),
+ getattr(finfo(dt2), attr), attr)
+ self.assertRaises(ValueError, finfo, 'i4')
+
class TestIinfo(TestCase):
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
@@ -49,8 +62,9 @@ class TestIinfo(TestCase):
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]))
for dt1, dt2 in dts:
- assert_equal(iinfo(dt1).min, iinfo(dt2).min)
- assert_equal(iinfo(dt1).max, iinfo(dt2).max)
+ for attr in ('bits', 'min', 'max'):
+ assert_equal(getattr(iinfo(dt1), attr),
+ getattr(iinfo(dt2), attr), attr)
self.assertRaises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 8d6f6a96b..49231f37e 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -15,7 +15,14 @@ from numpy.testing import (
try:
- cdll = np.ctypeslib.load_library('multiarray', np.core.multiarray.__file__)
+ cdll = None
+ if hasattr(sys, 'gettotalrefcount'):
+ try:
+ cdll = np.ctypeslib.load_library('multiarray_d', np.core.multiarray.__file__)
+ except OSError:
+ pass
+ if cdll is None:
+ cdll = np.ctypeslib.load_library('multiarray', np.core.multiarray.__file__)
_HAS_CTYPE = True
except ImportError:
_HAS_CTYPE = False
@@ -213,6 +220,20 @@ class TestIndexing(TestCase):
assert_raises(ValueError, f, a, [1, 2, 3])
assert_raises(ValueError, f, a[:1], [1, 2, 3])
+ def test_boolean_assignment_needs_api(self):
+ # See also gh-7666
+ # This caused a segfault on Python 2 due to the GIL not being
+ # held when the iterator does not need it, but the transfer function
+ # does
+ arr = np.zeros(1000)
+ indx = np.zeros(1000, dtype=bool)
+ indx[:100] = True
+ arr[indx] = np.ones(100, dtype=object)
+
+ expected = np.zeros(1000)
+ expected[:100] = 1
+ assert_array_equal(arr, expected)
+
def test_boolean_indexing_twodim(self):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index 47f58ea7e..4aa02e26f 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -7,6 +7,7 @@ from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp
from numpy import (
memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
+from numpy.compat import Path
from numpy import arange, allclose, asarray
from numpy.testing import (
@@ -73,6 +74,19 @@ class TestMemmap(TestCase):
del b
del fp
+ @dec.skipif(Path is None, "No pathlib.Path")
+ def test_path(self):
+ tmpname = mktemp('', 'mmap', dir=self.tempdir)
+ fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ abspath = os.path.abspath(tmpname)
+ fp[:] = self.data[:]
+ self.assertEqual(abspath, str(fp.filename))
+ b = fp[:1]
+ self.assertEqual(abspath, str(b.filename))
+ del b
+ del fp
+
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index deb1f66eb..ca13a172f 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -23,13 +23,13 @@ from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
- test_inplace_increment, get_buffer_info, test_as_c_array
+ test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
- assert_array_less, runstring, dec, SkipTest
+ assert_array_less, runstring, dec, SkipTest, temppath
)
# Need to test an object that does not fully implement math interface
@@ -923,6 +923,59 @@ class TestStructured(TestCase):
assert_raises(ValueError, testassign)
+ def test_zero_width_string(self):
+ # Test for PR #6430 / issues #473, #4955, #2585
+
+ dt = np.dtype([('I', int), ('S', 'S0')])
+
+ x = np.zeros(4, dtype=dt)
+
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['S'].itemsize, 0)
+
+ x['S'] = ['a', 'b', 'c', 'd']
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['I'], [0, 0, 0, 0])
+
+ # Variation on test case from #4955
+ x['S'][x['I'] == 0] = 'hello'
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['I'], [0, 0, 0, 0])
+
+ # Variation on test case from #2585
+ x['S'] = 'A'
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['I'], [0, 0, 0, 0])
+
+ # Allow zero-width dtypes in ndarray constructor
+ y = np.ndarray(4, dtype=x['S'].dtype)
+ assert_equal(y.itemsize, 0)
+ assert_equal(x['S'], y)
+
+ # More tests for indexing an array with zero-width fields
+ assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
+ ('b', 'u1')])['a'].itemsize, 0)
+ assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
+ assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
+
+ xx = x['S'].reshape((2, 2))
+ assert_equal(xx.itemsize, 0)
+ assert_equal(xx, [[b'', b''], [b'', b'']])
+
+ b = io.BytesIO()
+ np.save(b, xx)
+
+ b.seek(0)
+ yy = np.load(b)
+ assert_equal(yy.itemsize, 0)
+ assert_equal(xx, yy)
+
+ with temppath(suffix='.npy') as tmp:
+ np.save(tmp, xx)
+ yy = np.load(tmp)
+ assert_equal(yy.itemsize, 0)
+ assert_equal(xx, yy)
+
class TestBool(TestCase):
def test_test_interning(self):
@@ -3452,6 +3505,12 @@ class TestClip(TestCase):
x = val.clip(max=4)
assert_(np.all(x <= 4))
+ def test_nan(self):
+ input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
+ result = input_arr.clip(-1, 1)
+ expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
+ assert_array_equal(result, expected)
+
class TestCompress(TestCase):
def test_axis(self):
@@ -3479,11 +3538,11 @@ class TestCompress(TestCase):
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
- assert_(np.all(x[mask] == T(val)))
- assert_(x.dtype == T)
+ assert_equal(x[mask], T(val))
+ assert_equal(x.dtype, T)
def test_ip_types(self):
- unchecked_types = [str, unicode, np.void, object]
+ unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
@@ -3526,7 +3585,7 @@ class TestTake(object):
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
- unchecked_types = [str, unicode, np.void, object]
+ unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
@@ -3614,6 +3673,9 @@ class TestLexsort(TestCase):
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
+ def test_invalid_axis(self): # gh-7528
+ x = np.linspace(0., 1., 42*3).reshape(42, 3)
+ assert_raises(ValueError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index dd9c83b25..7fdbbc930 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -2495,6 +2495,7 @@ class TestBroadcast(TestCase):
np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
for mit in mits:
assert_equal(mit.shape, (5, 6, 7))
+ assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 4)
for a, ia in zip(arrs, mit.iters):
@@ -2505,6 +2506,7 @@ class TestBroadcast(TestCase):
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
assert_equal(mit.shape, (5, 6, 7))
+ assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 1)
assert_(arrs[0] is mit.iters[0].base)
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 2c85546a7..6ef6badda 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -254,6 +254,20 @@ class TestFromrecords(TestCase):
assert_equal(a[0]['qux'].D, asbytes('fgehi'))
assert_equal(a[0]['qux']['D'], asbytes('fgehi'))
+ def test_zero_width_strings(self):
+ # Test for #6430, based on the test case from #1901
+
+ cols = [['test'] * 3, [''] * 3]
+ rec = np.rec.fromarrays(cols)
+ assert_equal(rec['f0'], ['test', 'test', 'test'])
+ assert_equal(rec['f1'], ['', '', ''])
+
+ dt = np.dtype([('f0', '|S4'), ('f1', '|S')])
+ rec = np.rec.fromarrays(cols, dtype=dt)
+ assert_equal(rec.itemsize, 4)
+ assert_equal(rec['f0'], [b'test', b'test', b'test'])
+ assert_equal(rec['f1'], [b'', b'', b''])
+
class TestRecord(TestCase):
def setUp(self):
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index f2f94d7bf..759e996e3 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -434,6 +434,10 @@ class TestPower(TestCase):
arr = np.arange(-10, 10)
assert_equal(np.power(arr, 0), np.ones_like(arr))
+ def test_integer_power_of_1(self):
+ arr = np.arange(-10, 10)
+ assert_equal(np.power(1, arr), np.ones_like(arr))
+
class TestLog2(TestCase):
def test_log2_values(self):
diff --git a/numpy/core/tests/test_unicode.py b/numpy/core/tests/test_unicode.py
index 7a421a5fb..9b6519cb3 100644
--- a/numpy/core/tests/test_unicode.py
+++ b/numpy/core/tests/test_unicode.py
@@ -4,7 +4,8 @@ import sys
import numpy as np
from numpy.compat import asbytes, unicode, sixu
-from numpy.testing import TestCase, run_module_suite, assert_equal
+from numpy.testing import (
+ TestCase, run_module_suite, assert_, assert_equal, assert_array_equal)
# Guess the UCS length for this python interpreter
if sys.version_info[:2] >= (3, 3):
@@ -52,6 +53,20 @@ ucs2_value = sixu('\u0900')
ucs4_value = sixu('\U00100900')
+def test_string_cast():
+ str_arr = np.array(["1234", "1234\0\0"], dtype='S')
+ uni_arr1 = str_arr.astype('>U')
+ uni_arr2 = str_arr.astype('<U')
+
+ if sys.version_info[0] < 3:
+ assert_array_equal(str_arr, uni_arr1)
+ assert_array_equal(str_arr, uni_arr2)
+ else:
+ assert_(str_arr != uni_arr1)
+ assert_(str_arr != uni_arr2)
+ assert_array_equal(uni_arr1, uni_arr2)
+
+
############################################################
# Creation tests
############################################################
@@ -302,7 +317,7 @@ class byteorder_values:
# Check byteorder of single-dimensional objects
ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
- self.assertTrue(ua[0] != ua2[0])
+ self.assertTrue((ua != ua2).all())
self.assertTrue(ua[-1] != ua2[-1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
@@ -311,14 +326,43 @@ class byteorder_values:
def test_valuesMD(self):
# Check byteorder of multi-dimensional objects
ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4,
- dtype='U%s' % self.ulen)
+ dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
- self.assertTrue(ua[0, 0, 0] != ua2[0, 0, 0])
+ self.assertTrue((ua != ua2).all())
self.assertTrue(ua[-1, -1, -1] != ua2[-1, -1, -1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
+ def test_values_cast(self):
+ # Check byteorder of when casting the array for a strided and
+ # contiguous array:
+ test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
+ test2 = np.repeat(test1, 2)[::2]
+ for ua in (test1, test2):
+ ua2 = ua.astype(dtype=ua.dtype.newbyteorder())
+ self.assertTrue((ua == ua2).all())
+ self.assertTrue(ua[-1] == ua2[-1])
+ ua3 = ua2.astype(dtype=ua.dtype)
+ # Arrays must be equal after the round-trip
+ assert_equal(ua, ua3)
+
+ def test_values_updowncast(self):
+ # Check byteorder of when casting the array to a longer and shorter
+ # string length for strided and contiguous arrays
+ test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
+ test2 = np.repeat(test1, 2)[::2]
+ for ua in (test1, test2):
+ # Cast to a longer type with zero padding
+ longer_type = np.dtype('U%s' % (self.ulen+1)).newbyteorder()
+ ua2 = ua.astype(dtype=longer_type)
+ self.assertTrue((ua == ua2).all())
+ self.assertTrue(ua[-1] == ua2[-1])
+ # Cast back again with truncating:
+ ua3 = ua2.astype(dtype=ua.dtype)
+ # Arrays must be equal after the round-trip
+ assert_equal(ua, ua3)
+
class test_byteorder_1_ucs2(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 1, UCS2 values)"""
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 38173fba4..36bcf2764 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -178,7 +178,7 @@ class _ndptr(_ndptr_base):
def _check_retval_(self):
"""This method is called when this class is used as the .restype
- asttribute for a shared-library function. It constructs a numpy
+ attribute for a shared-library function. It constructs a numpy
array from a void pointer."""
return array(self)
diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py
index 50f03feeb..4a4bc67f2 100644
--- a/numpy/distutils/exec_command.py
+++ b/numpy/distutils/exec_command.py
@@ -363,10 +363,14 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ):
else:
argv = shlex.split(command)
+ # `spawn*p` family with path (vp, vpe, ...) are not available on windows.
+ # Also prefer spawn{v,vp} in favor of spawn{ve,vpe} if no env
+ # modification is actually requested as the *e* functions are not thread
+ # safe on windows (https://bugs.python.org/issue6476)
if hasattr(os, 'spawnvpe'):
- spawn_command = os.spawnvpe
+ spawn_command = os.spawnvpe if env else os.spawnvp
else:
- spawn_command = os.spawnve
+ spawn_command = os.spawnve if env else os.spawnv
argv[0] = find_executable(argv[0]) or argv[0]
if not os.path.isfile(argv[0]):
log.warn('Executable %s does not exist' % (argv[0]))
@@ -395,7 +399,7 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ):
log.debug('Running %s(%s,%r,%r,os.environ)' \
% (spawn_command.__name__, os.P_WAIT, argv[0], argv))
- if sys.version_info[0] >= 3 and os.name == 'nt':
+ if env and sys.version_info[0] >= 3 and os.name == 'nt':
# Pre-encode os.environ, discarding un-encodable entries,
# to avoid it failing during encoding as part of spawn. Failure
# is possible if the environment contains entries that are not
@@ -431,7 +435,11 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ):
else:
os.dup2(fout.fileno(), se_fileno)
try:
- status = spawn_command(os.P_WAIT, argv0, argv, encoded_environ)
+ # Use spawnv in favor of spawnve, unless necessary
+ if env:
+ status = spawn_command(os.P_WAIT, argv0, argv, encoded_environ)
+ else:
+ status = spawn_command(os.P_WAIT, argv0, argv)
except Exception:
errmess = str(get_exception())
status = 999
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index c4f15a073..f3e616e1d 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -123,7 +123,7 @@ class IntelEM64TFCompiler(IntelFCompiler):
return ['-openmp -fp-model strict -O1']
def get_flags_arch(self):
- return ['-xSSE4.2']
+ return ['']
# Is there no difference in the version string between the above compilers
# and the Visual compilers?
@@ -205,7 +205,7 @@ class IntelEM64VisualFCompiler(IntelVisualFCompiler):
version_match = simple_version_match(start='Intel\(R\).*?64,')
def get_flags_arch(self):
- return ['/QaxSSE4.2']
+ return ['']
if __name__ == '__main__':
diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py
index 20c6d2ba4..ee089dbae 100644
--- a/numpy/distutils/intelccompiler.py
+++ b/numpy/distutils/intelccompiler.py
@@ -54,7 +54,7 @@ class IntelEM64TCCompiler(UnixCCompiler):
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 '
- '-fomit-frame-pointer -openmp -xSSE4.2')
+ '-fomit-frame-pointer -openmp')
compiler = self.cc_exe
if platform.system() == 'Darwin':
shared_flag = '-Wl,-undefined,dynamic_lookup'
@@ -88,7 +88,7 @@ if platform.system() == 'Windows':
self.lib = self.find_exe('xilib')
self.linker = self.find_exe('xilink')
self.compile_options = ['/nologo', '/O3', '/MD', '/W3',
- '/Qstd=c99', '/QaxSSE4.2']
+ '/Qstd=c99']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Qstd=c99', '/Z7', '/D_DEBUG']
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 79b6f25f3..8136f8f4f 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -126,13 +126,13 @@ def allpath(name):
return os.path.join(*splitted)
def rel_path(path, parent_path):
- """Return path relative to parent_path.
- """
- pd = os.path.abspath(parent_path)
- apath = os.path.abspath(path)
- if len(apath)<len(pd):
+ """Return path relative to parent_path."""
+ # Use realpath to avoid issues with symlinked dirs (see gh-7707)
+ pd = os.path.realpath(os.path.abspath(parent_path))
+ apath = os.path.realpath(os.path.abspath(path))
+ if len(apath) < len(pd):
return path
- if apath==pd:
+ if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
index fe64709ca..e7d6448ea 100644
--- a/numpy/distutils/npy_pkg_config.py
+++ b/numpy/distutils/npy_pkg_config.py
@@ -5,9 +5,9 @@ import re
import os
if sys.version_info[0] < 3:
- from ConfigParser import SafeConfigParser, NoOptionError
+ from ConfigParser import RawConfigParser, NoOptionError
else:
- from configparser import ConfigParser, SafeConfigParser, NoOptionError
+ from configparser import RawConfigParser, NoOptionError
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
@@ -259,11 +259,7 @@ def parse_config(filename, dirs=None):
else:
filenames = [filename]
- if sys.version[:3] > '3.1':
- # SafeConfigParser is deprecated in py-3.2 and renamed to ConfigParser
- config = ConfigParser()
- else:
- config = SafeConfigParser()
+ config = RawConfigParser()
n = config.read(filenames)
if not len(n) >= 1:
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 0c3300bca..5ff5041a6 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -129,9 +129,15 @@ import warnings
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
- from ConfigParser import NoOptionError, ConfigParser
+ from ConfigParser import NoOptionError
+ from ConfigParser import RawConfigParser as ConfigParser
else:
- from configparser import NoOptionError, ConfigParser
+ from configparser import NoOptionError
+ from configparser import RawConfigParser as ConfigParser
+# It seems that some people are importing ConfigParser from here so is
+# good to keep its class name. Use of RawConfigParser is needed in
+# order to be able to load path names with percent in them, like
+# `feature%2Fcool` which is common on git flow branch names.
from distutils.errors import DistutilsError
from distutils.dist import Distribution
@@ -965,7 +971,7 @@ class djbfft_info(system_info):
class mkl_info(system_info):
section = 'mkl'
- dir_env_var = 'MKL'
+ dir_env_var = 'MKLROOT'
_lib_mkl = ['mkl', 'vml', 'guide']
def get_mkl_rootdir(self):
@@ -1704,6 +1710,7 @@ class blas_info(system_info):
# cblas or blas
res = False
c = distutils.ccompiler.new_compiler()
+ c.customize('')
tmpdir = tempfile.mkdtemp()
s = """#include <cblas.h>
int main(int argc, const char *argv[])
@@ -1784,6 +1791,7 @@ class openblas_lapack_info(openblas_info):
def check_embedded_lapack(self, info):
res = False
c = distutils.ccompiler.new_compiler()
+ c.customize('')
tmpdir = tempfile.mkdtemp()
s = """void zungqr();
int main(int argc, const char *argv[])
diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py
index c50b9480b..3e97b6fe2 100644
--- a/numpy/distutils/tests/test_misc_util.py
+++ b/numpy/distutils/tests/test_misc_util.py
@@ -4,7 +4,7 @@ from __future__ import division, absolute_import, print_function
from os.path import join, sep, dirname
from numpy.distutils.misc_util import (
- appendpath, minrelpath, gpaths, get_shared_lib_extension
+ appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal
@@ -75,5 +75,12 @@ class TestSharedExtension(TestCase):
# just check for no crash
assert_(get_shared_lib_extension(is_python_ext=True))
+
+def test_installed_npymath_ini():
+ # Regression test for gh-7707. If npymath.ini wasn't installed, then this
+ # will give an error.
+ info = get_info('npymath')
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py
index 58ad05a59..0f45cd79e 100644
--- a/numpy/distutils/tests/test_system_info.py
+++ b/numpy/distutils/tests/test_system_info.py
@@ -3,6 +3,8 @@ from __future__ import division, print_function
import os
import shutil
from tempfile import mkstemp, mkdtemp
+from subprocess import Popen, PIPE
+from distutils.errors import DistutilsError
from numpy.distutils import ccompiler
from numpy.testing import TestCase, run_module_suite, assert_, assert_equal
@@ -54,6 +56,27 @@ void bar(void) {
}
"""
+def have_compiler():
+ """ Return True if there appears to be an executable compiler
+ """
+ compiler = ccompiler.new_compiler()
+ try:
+ cmd = compiler.compiler # Unix compilers
+ except AttributeError:
+ try:
+ compiler.initialize() # MSVC is different
+ except DistutilsError:
+ return False
+ cmd = [compiler.cc]
+ try:
+ Popen(cmd, stdout=PIPE, stderr=PIPE)
+ except OSError:
+ return False
+ return True
+
+
+HAVE_COMPILER = have_compiler()
+
class test_system_info(system_info):
@@ -171,38 +194,39 @@ class TestSystemInfoReading(TestCase):
extra = tsi.calc_extra_info()
assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])
+ @skipif(not HAVE_COMPILER)
def test_compile1(self):
# Compile source and link the first source
c = ccompiler.new_compiler()
+ previousDir = os.getcwd()
try:
# Change directory to not screw up directories
- previousDir = os.getcwd()
os.chdir(self._dir1)
c.compile([os.path.basename(self._src1)], output_dir=self._dir1)
# Ensure that the object exists
assert_(os.path.isfile(self._src1.replace('.c', '.o')) or
os.path.isfile(self._src1.replace('.c', '.obj')))
+ finally:
os.chdir(previousDir)
- except OSError:
- pass
+ @skipif(not HAVE_COMPILER)
@skipif('msvc' in repr(ccompiler.new_compiler()))
def test_compile2(self):
# Compile source and link the second source
tsi = self.c_temp2
c = ccompiler.new_compiler()
extra_link_args = tsi.calc_extra_info()['extra_link_args']
+ previousDir = os.getcwd()
try:
# Change directory to not screw up directories
- previousDir = os.getcwd()
os.chdir(self._dir2)
c.compile([os.path.basename(self._src2)], output_dir=self._dir2,
extra_postargs=extra_link_args)
# Ensure that the object exists
assert_(os.path.isfile(self._src2.replace('.c', '.o')))
+ finally:
os.chdir(previousDir)
- except OSError:
- pass
+
if __name__ == '__main__':
run_module_suite()
diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py
index fe5b76e1a..8dc3eccbc 100644
--- a/numpy/fft/fftpack.py
+++ b/numpy/fft/fftpack.py
@@ -38,9 +38,10 @@ __all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
+from .helper import _FFTCache
-_fft_cache = {}
-_real_fft_cache = {}
+_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
+_real_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
@@ -54,12 +55,13 @@ def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
- try:
- # Thread-safety note: We rely on list.pop() here to atomically
- # retrieve-and-remove a wsave from the cache. This ensures that no
- # other thread can get the same wsave while we're using it.
- wsave = fft_cache.setdefault(n, []).pop()
- except (IndexError):
+ # We have to ensure that only a single thread can access a wsave array
+ # at any given time. Thus we remove it from the cache and insert it
+ # again after it has been used. Multiple threads might create multiple
+ # copies of the wsave array. This is intentional and a limitation of
+ # the current C code.
+ wsave = fft_cache.pop_twiddle_factors(n)
+ if wsave is None:
wsave = init_function(n)
if a.shape[axis] != n:
@@ -85,7 +87,7 @@ def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
- fft_cache[n].append(wsave)
+ fft_cache.put_twiddle_factors(n, wsave)
return r
diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py
index 160120e58..0832bc5a4 100644
--- a/numpy/fft/helper.py
+++ b/numpy/fft/helper.py
@@ -4,6 +4,9 @@ Discrete Fourier Transforms - helper.py
"""
from __future__ import division, absolute_import, print_function
+import collections
+import threading
+
from numpy.compat import integer_types
from numpy.core import (
asarray, concatenate, arange, take, integer, empty
@@ -222,3 +225,99 @@ def rfftfreq(n, d=1.0):
N = n//2 + 1
results = arange(0, N, dtype=int)
return results * val
+
+
+class _FFTCache(object):
+ """
+ Cache for the FFT twiddle factors as an LRU (least recently used) cache.
+
+ Parameters
+ ----------
+ max_size_in_mb : int
+ Maximum memory usage of the cache before items are being evicted.
+ max_item_count : int
+ Maximum item count of the cache before items are being evicted.
+
+ Notes
+ -----
+ Items will be evicted if either limit has been reached upon getting and
+ setting. The maximum memory usages is not strictly the given
+ ``max_size_in_mb`` but rather
+ ``max(max_size_in_mb, 1.5 * size_of_largest_item)``. Thus the cache will
+ never be completely cleared - at least one item will remain and a single
+ large item can cause the cache to retain several smaller items even if the
+ given maximum cache size has been exceeded.
+ """
+ def __init__(self, max_size_in_mb, max_item_count):
+ self._max_size_in_bytes = max_size_in_mb * 1024 ** 2
+ self._max_item_count = max_item_count
+ self._dict = collections.OrderedDict()
+ self._lock = threading.Lock()
+
+ def put_twiddle_factors(self, n, factors):
+ """
+ Store twiddle factors for an FFT of length n in the cache.
+
+ Putting multiple twiddle factors for a certain n will store it multiple
+ times.
+
+ Parameters
+ ----------
+ n : int
+ Data length for the FFT.
+ factors : ndarray
+ The actual twiddle values.
+ """
+ with self._lock:
+ # Pop + later add to move it to the end for LRU behavior.
+ # Internally everything is stored in a dictionary whose values are
+ # lists.
+ try:
+ value = self._dict.pop(n)
+ except KeyError:
+ value = []
+ value.append(factors)
+ self._dict[n] = value
+ self._prune_cache()
+
+ def pop_twiddle_factors(self, n):
+ """
+ Pop twiddle factors for an FFT of length n from the cache.
+
+ Will return None if the requested twiddle factors are not available in
+ the cache.
+
+ Parameters
+ ----------
+ n : int
+ Data length for the FFT.
+
+ Returns
+ -------
+ out : ndarray or None
+ The retrieved twiddle factors if available, else None.
+ """
+ with self._lock:
+ if n not in self._dict or not self._dict[n]:
+ return None
+ # Pop + later add to move it to the end for LRU behavior.
+ all_values = self._dict.pop(n)
+ value = all_values.pop()
+ # Only put pack if there are still some arrays left in the list.
+ if all_values:
+ self._dict[n] = all_values
+ return value
+
+ def _prune_cache(self):
+ # Always keep at least one item.
+ while len(self._dict) > 1 and (
+ len(self._dict) > self._max_item_count or self._check_size()):
+ self._dict.popitem(last=False)
+
+ def _check_size(self):
+ item_sizes = [sum(_j.nbytes for _j in _i)
+ for _i in self._dict.values() if _i]
+ if not item_sizes:
+ return False
+ max_size = max(self._max_size_in_bytes, 1.5 * max(item_sizes))
+ return sum(item_sizes) > max_size
diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py
index 1a51f8e3a..cb85755d2 100644
--- a/numpy/fft/tests/test_helper.py
+++ b/numpy/fft/tests/test_helper.py
@@ -10,6 +10,7 @@ import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
from numpy import fft
from numpy import pi
+from numpy.fft.helper import _FFTCache
class TestFFTShift(TestCase):
@@ -74,5 +75,83 @@ class TestIRFFTN(TestCase):
fft.irfftn(a, axes=axes)
+class TestFFTCache(TestCase):
+
+ def test_basic_behaviour(self):
+ c = _FFTCache(max_size_in_mb=1, max_item_count=4)
+
+ # Put
+ c.put_twiddle_factors(1, np.ones(2, dtype=np.float32))
+ c.put_twiddle_factors(2, np.zeros(2, dtype=np.float32))
+
+ # Get
+ assert_array_almost_equal(c.pop_twiddle_factors(1),
+ np.ones(2, dtype=np.float32))
+ assert_array_almost_equal(c.pop_twiddle_factors(2),
+ np.zeros(2, dtype=np.float32))
+
+ # Nothing should be left.
+ self.assertEqual(len(c._dict), 0)
+
+ # Now put everything in twice so it can be retrieved once and each will
+ # still have one item left.
+ for _ in range(2):
+ c.put_twiddle_factors(1, np.ones(2, dtype=np.float32))
+ c.put_twiddle_factors(2, np.zeros(2, dtype=np.float32))
+ assert_array_almost_equal(c.pop_twiddle_factors(1),
+ np.ones(2, dtype=np.float32))
+ assert_array_almost_equal(c.pop_twiddle_factors(2),
+ np.zeros(2, dtype=np.float32))
+ self.assertEqual(len(c._dict), 2)
+
+ def test_automatic_pruning(self):
+ # That's around 2600 single precision samples.
+ c = _FFTCache(max_size_in_mb=0.01, max_item_count=4)
+
+ c.put_twiddle_factors(1, np.ones(200, dtype=np.float32))
+ c.put_twiddle_factors(2, np.ones(200, dtype=np.float32))
+ self.assertEqual(list(c._dict.keys()), [1, 2])
+
+ # This is larger than the limit but should still be kept.
+ c.put_twiddle_factors(3, np.ones(3000, dtype=np.float32))
+ self.assertEqual(list(c._dict.keys()), [1, 2, 3])
+ # Add one more.
+ c.put_twiddle_factors(4, np.ones(3000, dtype=np.float32))
+ # The other three should no longer exist.
+ self.assertEqual(list(c._dict.keys()), [4])
+
+ # Now test the max item count pruning.
+ c = _FFTCache(max_size_in_mb=0.01, max_item_count=2)
+ c.put_twiddle_factors(2, np.empty(2))
+ c.put_twiddle_factors(1, np.empty(2))
+ # Can still be accessed.
+ self.assertEqual(list(c._dict.keys()), [2, 1])
+
+ c.put_twiddle_factors(3, np.empty(2))
+ # 1 and 3 can still be accessed - c[2] has been touched least recently
+ # and is thus evicted.
+ self.assertEqual(list(c._dict.keys()), [1, 3])
+
+ # One last test. We will add a single large item that is slightly
+ # bigger then the cache size. Some small items can still be added.
+ c = _FFTCache(max_size_in_mb=0.01, max_item_count=5)
+ c.put_twiddle_factors(1, np.ones(3000, dtype=np.float32))
+ c.put_twiddle_factors(2, np.ones(2, dtype=np.float32))
+ c.put_twiddle_factors(3, np.ones(2, dtype=np.float32))
+ c.put_twiddle_factors(4, np.ones(2, dtype=np.float32))
+ self.assertEqual(list(c._dict.keys()), [1, 2, 3, 4])
+
+ # One more big item. This time it is 6 smaller ones but they are
+ # counted as one big item.
+ for _ in range(6):
+ c.put_twiddle_factors(5, np.ones(500, dtype=np.float32))
+ # '1' no longer in the cache. Rest still in the cache.
+ self.assertEqual(list(c._dict.keys()), [2, 3, 4, 5])
+
+ # Another big item - should now be the only item in the cache.
+ c.put_twiddle_factors(6, np.ones(4000, dtype=np.float32))
+ self.assertEqual(list(c._dict.keys()), [6])
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index a0f2c5497..e62677bd8 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -558,8 +558,11 @@ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
warnings.warn("Stored array in format 2.0. It can only be"
"read by NumPy >= 1.9", UserWarning)
- # Set buffer size to 16 MiB to hide the Python loop overhead.
- buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
+ if array.itemsize == 0:
+ buffersize = 0
+ else:
+ # Set buffer size to 16 MiB to hide the Python loop overhead.
+ buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data
@@ -623,7 +626,7 @@ def read_array(fp, allow_pickle=True, pickle_kwargs=None):
if len(shape) == 0:
count = 1
else:
- count = numpy.multiply.reduce(shape)
+ count = numpy.multiply.reduce(shape, dtype=numpy.int64)
# Now read the actual data.
if dtype.hasobject:
@@ -655,15 +658,21 @@ def read_array(fp, allow_pickle=True, pickle_kwargs=None):
# of the read. In non-chunked case count < max_read_count, so
# only one read is performed.
- max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
-
- array = numpy.empty(count, dtype=dtype)
- for i in range(0, count, max_read_count):
- read_count = min(max_read_count, count - i)
- read_size = int(read_count * dtype.itemsize)
- data = _read_bytes(fp, read_size, "array data")
- array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
- count=read_count)
+ # Use np.ndarray instead of np.empty since the latter does
+ # not correctly instantiate zero-width string dtypes; see
+ # https://github.com/numpy/numpy/pull/6430
+ array = numpy.ndarray(count, dtype=dtype)
+
+ if dtype.itemsize > 0:
+ # If dtype.itemsize == 0 then there's nothing more to read
+ max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
+
+ for i in range(0, count, max_read_count):
+ read_count = min(max_read_count, count - i)
+ read_size = int(read_count * dtype.itemsize)
+ data = _read_bytes(fp, read_size, "array data")
+ array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
+ count=read_count)
if fortran_order:
array.shape = shape[::-1]
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index b119f667a..1e44345b0 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -7,11 +7,11 @@ import operator
import numpy as np
import numpy.core.numeric as _nx
-from numpy.core import linspace, atleast_1d, atleast_2d
+from numpy.core import linspace, atleast_1d, atleast_2d, transpose
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
- integer, isscalar
+ integer, isscalar, absolute
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
@@ -23,8 +23,10 @@ from numpy.core.fromnumeric import (
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
-from numpy.core.multiarray import _insert, add_docstring
-from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
+from numpy.core.multiarray import (
+ _insert, add_docstring, digitize, bincount,
+ interp as compiled_interp, interp_complex as compiled_interp_complex
+ )
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
from numpy.compat.py3k import basestring
@@ -37,7 +39,7 @@ if sys.version_info[0] < 3:
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip',
- 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
+ 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
@@ -45,6 +47,92 @@ __all__ = [
]
+def rot90(m, k=1, axes=(0,1)):
+ """
+ Rotate an array by 90 degrees in the plane specified by axes.
+
+ Rotation direction is from the first towards the second axis.
+
+ .. versionadded:: 1.12.0
+
+ Parameters
+ ----------
+ m : array_like
+ Array of two or more dimensions.
+ k : integer
+ Number of times the array is rotated by 90 degrees.
+ axes: (2,) array_like
+ The array is rotated in the plane defined by the axes.
+ Axes must be different.
+
+ Returns
+ -------
+ y : ndarray
+ A rotated view of `m`.
+
+ See Also
+ --------
+ flip : Reverse the order of elements in an array along the given axis.
+ fliplr : Flip an array horizontally.
+ flipud : Flip an array vertically.
+
+ Notes
+ -----
+ rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
+ rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
+
+ Examples
+ --------
+ >>> m = np.array([[1,2],[3,4]], int)
+ >>> m
+ array([[1, 2],
+ [3, 4]])
+ >>> np.rot90(m)
+ array([[2, 4],
+ [1, 3]])
+ >>> np.rot90(m, 2)
+ array([[4, 3],
+ [2, 1]])
+ >>> m = np.arange(8).reshape((2,2,2))
+ >>> np.rot90(m, 1, (1,2))
+ array([[[1, 3],
+ [0, 2]],
+
+ [[5, 7],
+ [4, 6]]])
+
+ """
+ axes = tuple(axes)
+ if len(axes) != 2:
+ raise ValueError("len(axes) must be 2.")
+
+ m = asanyarray(m)
+
+ if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim:
+ raise ValueError("Axes must be different.")
+
+ if (axes[0] >= m.ndim or axes[0] < -m.ndim
+ or axes[1] >= m.ndim or axes[1] < -m.ndim):
+ raise ValueError("Axes={} out of range for array of ndim={}."
+ .format(axes, m.ndim))
+
+ k %= 4
+
+ if k == 0:
+ return m[:]
+ if k == 2:
+ return flip(flip(m, axes[0]), axes[1])
+
+ axes_list = arange(0, m.ndim)
+ axes_list[axes[0]], axes_list[axes[1]] = axes_list[axes[1]], axes_list[axes[0]]
+
+ if k == 1:
+ return transpose(flip(m,axes[1]), axes_list)
+ else:
+ # k == 3
+ return flip(transpose(m, axes_list), axes[1])
+
+
def flip(m, axis):
"""
Reverse the order of elements in an array along the given axis.
@@ -57,7 +145,7 @@ def flip(m, axis):
----------
m : array_like
Input array.
- axis: integer
+ axis : integer
Axis in array, which entries are reversed.
@@ -637,6 +725,9 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
+ # Compute the bin edges for potential correction.
+ bin_edges = linspace(mn, mx, bins + 1, endpoint=True)
+
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
@@ -655,8 +746,8 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
- tmp_a = tmp_a.astype(float)
- tmp_a -= mn
+ tmp_a_data = tmp_a.astype(float)
+ tmp_a = tmp_a_data - mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
@@ -664,6 +755,14 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
+ # The index computation is not guaranteed to give exactly
+ # consistent results within ~1 ULP of the bin edges.
+ decrement = tmp_a_data < bin_edges[indices]
+ indices[decrement] -= 1
+ # The last bin includes the right edge. The other bins do not.
+ increment = (tmp_a_data >= bin_edges[indices + 1]) & (indices != bins - 1)
+ indices[increment] += 1
+
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
@@ -671,8 +770,8 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
- # We now compute the bin edges since these are returned
- bins = linspace(mn, mx, bins + 1, endpoint=True)
+ # Rename the bin edges for return.
+ bins = bin_edges
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
@@ -989,7 +1088,19 @@ def average(a, axis=None, weights=None, returned=False):
TypeError: Axis must be specified when shapes of a and weights differ.
"""
- a = np.asanyarray(a)
+ # 3/19/2016 1.12.0:
+ # replace the next few lines with "a = np.asanyarray(a)"
+ if (type(a) not in (np.ndarray, np.matrix) and
+ issubclass(type(a), np.ndarray)):
+ warnings.warn("np.average currently does not preserve subclasses, but "
+ "will do so in the future to match the behavior of most "
+ "other numpy functions such as np.mean. In particular, "
+ "this means calls which returned a scalar may return a "
+ "0-d subclass object instead.",
+ FutureWarning, stacklevel=2)
+
+ if not isinstance(a, np.matrix):
+ a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
@@ -1410,9 +1521,10 @@ def gradient(f, *varargs, **kwargs):
Returns
-------
- gradient : list of ndarray
- Each element of `list` has the same shape as `f` giving the derivative
- of `f` with respect to each dimension.
+ gradient : ndarray or list of ndarray
+ A set of ndarrays (or a single ndarray if there is only one dimension)
+ correposnding to the derivatives of f with respect to each dimension.
+ Each derivative has the same shape as f.
Examples
--------
@@ -1432,9 +1544,8 @@ def gradient(f, *varargs, **kwargs):
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
- >>> dx = np.gradient(x)
>>> y = x**2
- >>> np.gradient(y, dx, edge_order=2)
+ >>> np.gradient(y, edge_order=2)
array([-0., 2., 4., 6., 8.])
The axis keyword can be used to specify a subset of axes of which the gradient is calculated
@@ -1472,6 +1583,8 @@ def gradient(f, *varargs, **kwargs):
else:
raise SyntaxError(
"invalid number of arguments")
+ if any([not np.isscalar(dxi) for dxi in dx]):
+ raise ValueError("distances must be scalars")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
@@ -1661,13 +1774,13 @@ def interp(x, xp, fp, left=None, right=None, period=None):
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
- fp : 1-D sequence of floats
+ fp : 1-D sequence of float or complex
The y-coordinates of the data points, same length as `xp`.
- left : float, optional
+ left : optional float or complex corresponding to fp
Value to return for `x < xp[0]`, default is `fp[0]`.
- right : float, optional
+ right : optional float or complex corresponding to fp
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
@@ -1679,7 +1792,7 @@ def interp(x, xp, fp, left=None, right=None, period=None):
Returns
-------
- y : float or ndarray
+ y : float or complex (corresponding to fp) or ndarray
The interpolated values, same shape as `x`.
Raises
@@ -1730,14 +1843,31 @@ def interp(x, xp, fp, left=None, right=None, period=None):
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
+ Complex interpolation
+ >>> x = [1.5, 4.0]
+ >>> xp = [2,3,5]
+ >>> fp = [1.0j, 0, 2+3j]
+ >>> np.interp(x, xp, fp)
+ array([ 0.+1.j , 1.+1.5j])
+
"""
+
+ fp = np.asarray(fp)
+
+ if np.iscomplexobj(fp):
+ interp_func = compiled_interp_complex
+ input_dtype = np.complex128
+ else:
+ interp_func = compiled_interp
+ input_dtype = np.float64
+
if period is None:
if isinstance(x, (float, int, number)):
- return compiled_interp([x], xp, fp, left, right).item()
+ return interp_func([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
- return compiled_interp([x], xp, fp, left, right).item()
+ return interp_func([x], xp, fp, left, right).item()
else:
- return compiled_interp(x, xp, fp, left, right)
+ return interp_func(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
@@ -1750,7 +1880,8 @@ def interp(x, xp, fp, left=None, right=None, period=None):
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
- fp = np.asarray(fp, dtype=np.float64)
+ fp = np.asarray(fp, dtype=input_dtype)
+
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
@@ -1763,12 +1894,12 @@ def interp(x, xp, fp, left=None, right=None, period=None):
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
+
if return_array:
- return compiled_interp(x, xp, fp, left, right)
+ return interp_func(x, xp, fp, left, right)
else:
- return compiled_interp(x, xp, fp, left, right).item()
-
-
+ return interp_func(x, xp, fp, left, right).item()
+
def angle(z, deg=0):
"""
Return the angle of the complex argument.
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index a6e4a8dac..9e3127c89 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -14,12 +14,12 @@ from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
- ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
- flatten_dtype, easy_dtype, _bytes_to_name
+ ConverterLockError, ConversionWarning, _is_string_like,
+ has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
- asbytes, asstr, asbytes_nested, bytes, basestring, unicode
+ asbytes, asstr, asbytes_nested, bytes, basestring, unicode, is_pathlib_path
)
if sys.version_info[0] >= 3:
@@ -86,10 +86,19 @@ class BagObj(object):
return object.__getattribute__(self, '_obj').keys()
-def zipfile_factory(*args, **kwargs):
+def zipfile_factory(file, *args, **kwargs):
+ """
+ Create a ZipFile.
+
+ Allows for Zip64, and the `file` argument can accept file, str, or
+ pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
+ constructor.
+ """
+ if is_pathlib_path(file):
+ file = str(file)
import zipfile
kwargs['allowZip64'] = True
- return zipfile.ZipFile(*args, **kwargs)
+ return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(object):
@@ -261,7 +270,7 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
Parameters
----------
- file : file-like object or string
+ file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
@@ -355,12 +364,13 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
memmap([4, 5, 6])
"""
- import gzip
-
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
+ elif is_pathlib_path(file):
+ fid = file.open("rb")
+ own_fid = True
else:
fid = file
@@ -389,7 +399,9 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
- fid.seek(-N, 1) # back-up
+ # If the file size is less than N, we need to make sure not
+ # to seek past the beginning of the file
+ fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
@@ -425,9 +437,9 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
Parameters
----------
- file : file or str
+ file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
- then the filename is unchanged. If file is a string, a ``.npy``
+ then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
@@ -476,6 +488,11 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
+ elif is_pathlib_path(file):
+ if not file.name.endswith('.npy'):
+ file = file.parent / (file.name + '.npy')
+ fid = file.open("wb")
+ own_fid = True
else:
fid = file
@@ -507,8 +524,9 @@ def savez(file, *args, **kwds):
----------
file : str or file
Either the file name (string) or an open file (file-like object)
- where the data will be saved. If file is a string, the ``.npz``
- extension will be appended to the file name if it is not already there.
+ where the data will be saved. If file is a string or a Path, the
+ ``.npz`` extension will be appended to the file name if it is not
+ already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
@@ -610,6 +628,9 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
+ elif is_pathlib_path(file):
+ if not file.name.endswith('.npz'):
+ file = file.parent / (file.name + '.npz')
namedict = kwds
for i, val in enumerate(args):
@@ -695,7 +716,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
Parameters
----------
- fname : file or str
+ fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
@@ -722,12 +743,12 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
Skip the first `skiprows` lines; default: 0.
usecols : int or sequence, optional
- Which columns to read, with 0 being the first. For example,
+ Which columns to read, with 0 being the first. For example,
usecols = (1,4,5) will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
-
+
.. versionadded:: 1.11.0
-
+
Also when a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
third column the same way as `usecols = (3,)`` would.
@@ -822,6 +843,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
fown = False
try:
+ if is_pathlib_path(fname):
+ fname = str(fname)
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
@@ -1018,7 +1041,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
- `' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
+ `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
@@ -1117,6 +1140,8 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
delimiter = asstr(delimiter)
own_fh = False
+ if is_pathlib_path(fname):
+ fname = str(fname)
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
@@ -1302,7 +1327,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
Parameters
----------
- fname : file, str, list of str, generator
+ fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Mote
that generators must return byte strings in Python 3k. The strings
@@ -1477,6 +1502,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
+ if is_pathlib_path(fname):
+ fname = str(fname)
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 189e59154..d96b8969f 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -15,7 +15,7 @@ import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
-from numpy.lib.function_base import trim_zeros, sort_complex
+from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
@@ -145,11 +145,7 @@ def poly(seq_of_zeros):
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
- pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
- neg_roots = NX.conjugate(sort_complex(
- NX.compress(roots.imag < 0, roots)))
- if (len(pos_roots) == len(neg_roots) and
- NX.alltrue(neg_roots == pos_roots)):
+ if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
@@ -439,7 +435,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
- residuals, rank, singular_values, rcond :
+ residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
@@ -603,6 +599,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
+ if len(x) <= order + 2:
+ raise ValueError("the number of data points must exceed order + 2 "
+ "for Bayesian estimate the covariance matrix")
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index a87c34fb5..f390cf49b 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -244,7 +244,7 @@ def broadcast_arrays(*args, **kwargs):
subok = kwargs.pop('subok', False)
if kwargs:
raise TypeError('broadcast_arrays() got an unexpected keyword '
- 'argument {}'.format(kwargs.pop()))
+ 'argument {!r}'.format(kwargs.keys()[0]))
args = [np.array(_m, copy=False, subok=subok) for _m in args]
shape = _broadcast_shape(*args)
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index a091ef5b3..892b32a9c 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -635,6 +635,7 @@ def test_version_2_0():
assert_raises(ValueError, format.write_array, f, d, (1, 0))
+@dec.slow
def test_version_2_0_memmap():
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
@@ -836,5 +837,26 @@ def test_large_file_support():
assert_array_equal(r, d)
+@dec.slow
+@dec.skipif(np.dtype(np.intp).itemsize < 8, "test requires 64-bit system")
+def test_large_archive():
+ # Regression test for product of saving arrays with dimensions of array
+ # having a product that doesn't fit in int32. See gh-7598 for details.
+ try:
+ a = np.empty((2**30, 2), dtype=np.uint8)
+ except MemoryError:
+ raise SkipTest("Could not create large file")
+
+ fname = os.path.join(tempdir, "large_archive")
+
+ with open(fname, "wb") as f:
+ np.savez(f, arr=a)
+
+ with open(fname, "rb") as f:
+ new_a = np.load(f)["arr"]
+
+ assert_(a.shape == new_a.shape)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 34dfd5ecc..c2bcc62ba 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -16,7 +16,7 @@ from numpy.lib import (
add_newdoc_ufunc, angle, average, bartlett, blackman, corrcoef, cov,
delete, diff, digitize, extract, flipud, gradient, hamming, hanning,
histogram, histogramdd, i0, insert, interp, kaiser, meshgrid, msort,
- piecewise, place, select, setxor1d, sinc, split, trapz, trim_zeros,
+ piecewise, place, rot90, select, setxor1d, sinc, split, trapz, trim_zeros,
unwrap, unique, vectorize,
)
@@ -29,7 +29,76 @@ def get_mat(n):
return data
+class TestRot90(TestCase):
+ def test_basic(self):
+ self.assertRaises(ValueError, rot90, np.ones(4))
+ assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2))
+ assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2))
+ assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1))
+ assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(-2,1))
+
+ a = [[0, 1, 2],
+ [3, 4, 5]]
+ b1 = [[2, 5],
+ [1, 4],
+ [0, 3]]
+ b2 = [[5, 4, 3],
+ [2, 1, 0]]
+ b3 = [[3, 0],
+ [4, 1],
+ [5, 2]]
+ b4 = [[0, 1, 2],
+ [3, 4, 5]]
+
+ for k in range(-3, 13, 4):
+ assert_equal(rot90(a, k=k), b1)
+ for k in range(-2, 13, 4):
+ assert_equal(rot90(a, k=k), b2)
+ for k in range(-1, 13, 4):
+ assert_equal(rot90(a, k=k), b3)
+ for k in range(0, 13, 4):
+ assert_equal(rot90(a, k=k), b4)
+
+ assert_equal(rot90(rot90(a, axes=(0,1)), axes=(1,0)), a)
+ assert_equal(rot90(a, k=1, axes=(1,0)), rot90(a, k=-1, axes=(0,1)))
+
+ def test_axes(self):
+ a = np.ones((50, 40, 3))
+ assert_equal(rot90(a).shape, (40, 50, 3))
+ assert_equal(rot90(a, axes=(0,2)), rot90(a, axes=(0,-1)))
+ assert_equal(rot90(a, axes=(1,2)), rot90(a, axes=(-2,-1)))
+
+ def test_rotation_axes(self):
+ a = np.arange(8).reshape((2,2,2))
+
+ a_rot90_01 = [[[2, 3],
+ [6, 7]],
+ [[0, 1],
+ [4, 5]]]
+ a_rot90_12 = [[[1, 3],
+ [0, 2]],
+ [[5, 7],
+ [4, 6]]]
+ a_rot90_20 = [[[4, 0],
+ [6, 2]],
+ [[5, 1],
+ [7, 3]]]
+ a_rot90_10 = [[[4, 5],
+ [0, 1]],
+ [[6, 7],
+ [2, 3]]]
+
+ assert_equal(rot90(a, axes=(0, 1)), a_rot90_01)
+ assert_equal(rot90(a, axes=(1, 0)), a_rot90_10)
+ assert_equal(rot90(a, axes=(1, 2)), a_rot90_12)
+
+ for k in range(1,5):
+ assert_equal(rot90(a, k=k, axes=(2, 0)),
+ rot90(a_rot90_20, k=k-1, axes=(2, 0)))
+
+
class TestFlip(TestCase):
+
def test_axes(self):
self.assertRaises(ValueError, np.flip, np.ones(4), axis=1)
self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=2)
@@ -58,13 +127,11 @@ class TestFlip(TestCase):
def test_3d_swap_axis0(self):
a = np.array([[[0, 1],
[2, 3]],
-
[[4, 5],
[6, 7]]])
b = np.array([[[4, 5],
[6, 7]],
-
[[0, 1],
[2, 3]]])
@@ -73,13 +140,11 @@ class TestFlip(TestCase):
def test_3d_swap_axis1(self):
a = np.array([[[0, 1],
[2, 3]],
-
[[4, 5],
[6, 7]]])
b = np.array([[[2, 3],
[0, 1]],
-
[[6, 7],
[4, 5]]])
@@ -87,16 +152,14 @@ class TestFlip(TestCase):
def test_3d_swap_axis2(self):
a = np.array([[[0, 1],
- [2, 3]],
-
- [[4, 5],
- [6, 7]]])
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
b = np.array([[[1, 0],
- [3, 2]],
-
- [[5, 4],
- [7, 6]]])
+ [3, 2]],
+ [[5, 4],
+ [7, 6]]])
assert_equal(np.flip(a, 2), b)
@@ -666,6 +729,9 @@ class TestGradient(TestCase):
assert_raises(SyntaxError, gradient, x, np.array([1., 1.]),
np.array([1., 1.]), np.array([1., 1.]))
+ # disallow arrays as distances, see gh-6847
+ assert_raises(ValueError, gradient, np.arange(5), np.ones(5))
+
def test_masked(self):
# Make sure that gradient supports subclasses like masked arrays
x = np.ma.array([[1, 1], [3, 4]],
@@ -1404,6 +1470,22 @@ class TestHistogram(TestCase):
assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
+ def test_bin_edge_cases(self):
+ # Ensure that floating-point computations correctly place edge cases.
+ arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
+ hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
+ mask = hist > 0
+ left_edges = edges[:-1][mask]
+ right_edges = edges[1:][mask]
+ for x, left, right in zip(arr, left_edges, right_edges):
+ self.assertGreaterEqual(x, left)
+ self.assertLess(x, right)
+
+ def test_last_bin_inclusive_range(self):
+ arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
+ hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
+ self.assertEqual(hist[-1], 1)
+
class TestHistogramOptimBinNums(TestCase):
"""
@@ -2232,6 +2314,28 @@ class TestInterp(TestCase):
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.nan
assert_almost_equal(np.interp(x0, x, y), x0)
+
+ def test_complex_interp(self):
+ # test complex interpolation
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j
+ x0 = 0.3
+ y0 = x0 + (1+x0)*1.0j
+ assert_almost_equal(np.interp(x0, x, y), y0)
+ # test complex left and right
+ x0 = -1
+ left = 2 + 3.0j
+ assert_almost_equal(np.interp(x0, x, y, left=left), left)
+ x0 = 2.0
+ right = 2 + 3.0j
+ assert_almost_equal(np.interp(x0, x, y, right=right), right)
+ # test complex periodic
+ x = [-180, -170, -185, 185, -10, -5, 0, 365]
+ xp = [190, -190, 350, -350]
+ fp = [5+1.0j, 10+2j, 3+3j, 4+4j]
+ y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j,
+ 3.5+3.5j, 3.75+3.75j]
+ assert_almost_equal(np.interp(x, xp, fp, period=360), y)
def test_zero_dimensional_interpolation_point(self):
x = np.linspace(0, 1, 5)
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index c0f8c1953..de73d57f7 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -14,14 +14,13 @@ from datetime import datetime
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
-from numpy.compat import asbytes, bytes, unicode
+from numpy.compat import asbytes, bytes, unicode, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
TestCase, run_module_suite, assert_warns, assert_,
assert_raises_regex, assert_raises, assert_allclose,
- assert_array_equal,temppath
+ assert_array_equal, temppath, dec
)
-from numpy.testing.utils import tempdir
class TextIO(BytesIO):
@@ -158,6 +157,7 @@ class RoundtripTest(object):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
+ @dec.slow
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
@@ -1829,6 +1829,109 @@ M 33 21.99
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
+
+class TestPathUsage(TestCase):
+ # Test that pathlib.Path can be used
+ @np.testing.dec.skipif(Path is None, "No pathlib.Path")
+ def test_loadtxt(self):
+ with temppath(suffix='.txt') as path:
+ path = Path(path)
+ a = np.array([[1.1, 2], [3, 4]])
+ np.savetxt(path, a)
+ x = np.loadtxt(path)
+ assert_array_equal(x, a)
+
+ @np.testing.dec.skipif(Path is None, "No pathlib.Path")
+ def test_save_load(self):
+ # Test that pathlib.Path instances can be used with savez.
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ a = np.array([[1, 2], [3, 4]], int)
+ np.save(path, a)
+ data = np.load(path)
+ assert_array_equal(data, a)
+
+ @np.testing.dec.skipif(Path is None, "No pathlib.Path")
+ def test_savez_load(self):
+ # Test that pathlib.Path instances can be used with savez.
+ with temppath(suffix='.npz') as path:
+ path = Path(path)
+ np.savez(path, lab='place holder')
+ with np.load(path) as data:
+ assert_array_equal(data['lab'], 'place holder')
+
+ @np.testing.dec.skipif(Path is None, "No pathlib.Path")
+ def test_savez_compressed_load(self):
+ # Test that pathlib.Path instances can be used with savez.
+ with temppath(suffix='.npz') as path:
+ path = Path(path)
+ np.savez_compressed(path, lab='place holder')
+ data = np.load(path)
+ assert_array_equal(data['lab'], 'place holder')
+ data.close()
+
+ @np.testing.dec.skipif(Path is None, "No pathlib.Path")
+ def test_genfromtxt(self):
+ with temppath(suffix='.txt') as path:
+ path = Path(path)
+ a = np.array([(1, 2), (3, 4)])
+ np.savetxt(path, a)
+ data = np.genfromtxt(path)
+ assert_array_equal(a, data)
+
+ @np.testing.dec.skipif(Path is None, "No pathlib.Path")
+ def test_ndfromtxt(self):
+ # Test outputing a standard ndarray
+ with temppath(suffix='.txt') as path:
+ path = Path(path)
+ with path.open('w') as f:
+ f.write(u'1 2\n3 4')
+
+ control = np.array([[1, 2], [3, 4]], dtype=int)
+ test = np.ndfromtxt(path, dtype=int)
+ assert_array_equal(test, control)
+
+ @np.testing.dec.skipif(Path is None, "No pathlib.Path")
+ def test_mafromtxt(self):
+ # From `test_fancy_dtype_alt` above
+ with temppath(suffix='.txt') as path:
+ path = Path(path)
+ with path.open('w') as f:
+ f.write(u'1,2,3.0\n4,5,6.0\n')
+
+ test = np.mafromtxt(path, delimiter=',')
+ control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
+ assert_equal(test, control)
+
+ @np.testing.dec.skipif(Path is None, "No pathlib.Path")
+ def test_recfromtxt(self):
+ with temppath(suffix='.txt') as path:
+ path = Path(path)
+ with path.open('w') as f:
+ f.write(u'A,B\n0,1\n2,3')
+
+ kwargs = dict(delimiter=",", missing_values="N/A", names=True)
+ test = np.recfromtxt(path, **kwargs)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('A', np.int), ('B', np.int)])
+ self.assertTrue(isinstance(test, np.recarray))
+ assert_equal(test, control)
+
+ @np.testing.dec.skipif(Path is None, "No pathlib.Path")
+ def test_recfromcsv(self):
+ with temppath(suffix='.txt') as path:
+ path = Path(path)
+ with path.open('w') as f:
+ f.write(u'A,B\n0,1\n2,3')
+
+ kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
+ test = np.recfromcsv(path, dtype=None, **kwargs)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('A', np.int), ('B', np.int)])
+ self.assertTrue(isinstance(test, np.recarray))
+ assert_equal(test, control)
+
+
def test_gzip_load():
a = np.random.random((5, 5))
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 5c15941e6..00dffd3d3 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -81,7 +81,7 @@ poly1d([ 2.])
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, rundocs
+ assert_almost_equal, assert_array_almost_equal, assert_raises, rundocs
)
@@ -89,6 +89,30 @@ class TestDocs(TestCase):
def test_doctests(self):
return rundocs()
+ def test_poly(self):
+ assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]),
+ [1, -3, -2, 6])
+
+ # From matlab docs
+ A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
+ assert_array_almost_equal(np.poly(A), [1, -6, -72, -27])
+
+ # Should produce real output for perfect conjugates
+ assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j])))
+ assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j,
+ 1-2j, 1.+3.5j, 1-3.5j])))
+ assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j])))
+ assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j])))
+ assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j])))
+ assert_(np.isrealobj(np.poly([1j, -1j])))
+ assert_(np.isrealobj(np.poly([1, -1])))
+
+ assert_(np.iscomplexobj(np.poly([1j, -1.0000001j])))
+
+ np.random.seed(42)
+ a = np.random.randn(100) + 1j*np.random.randn(100)
+ assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a))))))
+
def test_roots(self):
assert_array_equal(np.roots([1, 0, 0]), [0, 0])
@@ -111,6 +135,12 @@ class TestDocs(TestCase):
err = [1, -1, 1, -1, 1, -1, 1]
weights = np.arange(8, 1, -1)**2/7.0
+ # Check exception when too few points for variance estimate. Note that
+ # the Bayesian estimate requires the number of data points to exceed
+ # degree + 3.
+ assert_raises(ValueError, np.polyfit,
+ [0, 1, 3], [0, 1, 3], deg=0, cov=True)
+
# check 1D case
m, cov = np.polyfit(x, y+err, 2, cov=True)
est = [3.8571, 0.2857, 1.619]
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index b65a8df97..31925d5fe 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -5,11 +5,11 @@ from __future__ import division, absolute_import, print_function
from numpy.testing import (
TestCase, run_module_suite, assert_equal, assert_array_equal,
- assert_array_max_ulp, assert_array_almost_equal, assert_raises, rand,
+ assert_array_max_ulp, assert_array_almost_equal, assert_raises
)
from numpy import (
- arange, rot90, add, fliplr, flipud, zeros, ones, eye, array, diag,
+ arange, add, fliplr, flipud, zeros, ones, eye, array, diag,
histogram2d, tri, mask_indices, triu_indices, triu_indices_from,
tril_indices, tril_indices_from, vander,
)
@@ -169,37 +169,6 @@ class TestFlipud(TestCase):
assert_equal(flipud(a), b)
-class TestRot90(TestCase):
- def test_basic(self):
- self.assertRaises(ValueError, rot90, ones(4))
-
- a = [[0, 1, 2],
- [3, 4, 5]]
- b1 = [[2, 5],
- [1, 4],
- [0, 3]]
- b2 = [[5, 4, 3],
- [2, 1, 0]]
- b3 = [[3, 0],
- [4, 1],
- [5, 2]]
- b4 = [[0, 1, 2],
- [3, 4, 5]]
-
- for k in range(-3, 13, 4):
- assert_equal(rot90(a, k=k), b1)
- for k in range(-2, 13, 4):
- assert_equal(rot90(a, k=k), b2)
- for k in range(-1, 13, 4):
- assert_equal(rot90(a, k=k), b3)
- for k in range(0, 13, 4):
- assert_equal(rot90(a, k=k), b4)
-
- def test_axes(self):
- a = ones((50, 40, 3))
- assert_equal(rot90(a).shape, (40, 50, 3))
-
-
class TestHistogram2d(TestCase):
def test_simple(self):
x = array(
@@ -254,7 +223,7 @@ class TestHistogram2d(TestCase):
assert_array_almost_equal(H, answer, 3)
def test_all_outliers(self):
- r = rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6
+ r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6
H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1]))
assert_array_equal(H, 0)
@@ -267,10 +236,10 @@ class TestHistogram2d(TestCase):
def test_binparameter_combination(self):
x = array(
- [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
+ [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
0.59944483, 1])
y = array(
- [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
+ [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
0.15886423, 1])
edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
H, xe, ye = histogram2d(x, y, (edges, 4))
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index aefe8d64b..8858f5bad 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -4,14 +4,14 @@
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
- asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
- where, int8, int16, int32, int64, empty, promote_types, diagonal,
+ absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
+ asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
-from numpy.core import iinfo
+from numpy.core import iinfo, transpose
__all__ = [
- 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
+ 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
@@ -136,59 +136,6 @@ def flipud(m):
return m[::-1, ...]
-def rot90(m, k=1):
- """
- Rotate an array by 90 degrees in the counter-clockwise direction.
-
- The first two dimensions are rotated; therefore, the array must be at
- least 2-D.
-
- Parameters
- ----------
- m : array_like
- Array of two or more dimensions.
- k : integer
- Number of times the array is rotated by 90 degrees.
-
- Returns
- -------
- y : ndarray
- Rotated array.
-
- See Also
- --------
- fliplr : Flip an array horizontally.
- flipud : Flip an array vertically.
-
- Examples
- --------
- >>> m = np.array([[1,2],[3,4]], int)
- >>> m
- array([[1, 2],
- [3, 4]])
- >>> np.rot90(m)
- array([[2, 4],
- [1, 3]])
- >>> np.rot90(m, 2)
- array([[4, 3],
- [2, 1]])
-
- """
- m = asanyarray(m)
- if m.ndim < 2:
- raise ValueError("Input must >= 2-d.")
- k = k % 4
- if k == 0:
- return m
- elif k == 1:
- return fliplr(m).swapaxes(0, 1)
- elif k == 2:
- return fliplr(flipud(m))
- else:
- # k == 3
- return fliplr(m.swapaxes(0, 1))
-
-
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 3f29699e9..a2191468f 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -393,9 +393,9 @@ def _info(obj, output=sys.stdout):
Parameters
----------
- obj: ndarray
+ obj : ndarray
Must be ndarray, not checked.
- output:
+ output
Where printed output goes.
Notes
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 9d486d2a5..c0a84fc4f 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -23,7 +23,7 @@ from numpy.core import (
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
- broadcast, atleast_2d, intp, asanyarray, isscalar
+ broadcast, atleast_2d, intp, asanyarray, isscalar, object_
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
@@ -1804,8 +1804,9 @@ def lstsq(a, b, rcond=-1):
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
- Singular values are set to zero if they are smaller than `rcond`
- times the largest singular value of `a`.
+ For the purposes of rank determination, singular values are treated
+ as zero if they are smaller than `rcond` times the largest singular
+ value of `a`.
Returns
-------
@@ -2112,7 +2113,7 @@ def norm(x, ord=None, axis=None, keepdims=False):
"""
x = asarray(x)
- if not issubclass(x.dtype.type, inexact):
+ if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
@@ -2346,12 +2347,12 @@ def _multi_dot_three(A, B, C):
than `_multi_dot_matrix_chain_order`
"""
- # cost1 = cost((AB)C)
- cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
- A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
- # cost2 = cost((AB)C)
- cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
- A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
+ a0, a1b0 = A.shape
+ b1c0, c1 = C.shape
+ # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
+ cost1 = a0 * b1c0 * (a1b0 + c1)
+ # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
+ cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py
index 54a67bce3..6991628fb 100644
--- a/numpy/linalg/tests/test_regression.py
+++ b/numpy/linalg/tests/test_regression.py
@@ -90,6 +90,52 @@ class TestRegression(TestCase):
assert_equal(np.linalg.matrix_rank(a), 1)
assert_array_less(1, np.linalg.norm(a, ord=2))
+ def test_norm_object_array(self):
+ # gh-7575
+ testvector = np.array([np.array([0, 1]), 0, 0], dtype=object)
+
+ norm = linalg.norm(testvector)
+ assert_array_equal(norm, [0, 1])
+ self.assertEqual(norm.dtype, np.dtype('float64'))
+
+ norm = linalg.norm(testvector, ord=1)
+ assert_array_equal(norm, [0, 1])
+ self.assertNotEqual(norm.dtype, np.dtype('float64'))
+
+ norm = linalg.norm(testvector, ord=2)
+ assert_array_equal(norm, [0, 1])
+ self.assertEqual(norm.dtype, np.dtype('float64'))
+
+ self.assertRaises(ValueError, linalg.norm, testvector, ord='fro')
+ self.assertRaises(ValueError, linalg.norm, testvector, ord='nuc')
+ self.assertRaises(ValueError, linalg.norm, testvector, ord=np.inf)
+ self.assertRaises(ValueError, linalg.norm, testvector, ord=-np.inf)
+ self.assertRaises((AttributeError, DeprecationWarning),
+ linalg.norm, testvector, ord=0)
+ self.assertRaises(ValueError, linalg.norm, testvector, ord=-1)
+ self.assertRaises(ValueError, linalg.norm, testvector, ord=-2)
+
+ testmatrix = np.array([[np.array([0, 1]), 0, 0],
+ [0, 0, 0]], dtype=object)
+
+ norm = linalg.norm(testmatrix)
+ assert_array_equal(norm, [0, 1])
+ self.assertEqual(norm.dtype, np.dtype('float64'))
+
+ norm = linalg.norm(testmatrix, ord='fro')
+ assert_array_equal(norm, [0, 1])
+ self.assertEqual(norm.dtype, np.dtype('float64'))
+
+ self.assertRaises(TypeError, linalg.norm, testmatrix, ord='nuc')
+ self.assertRaises(ValueError, linalg.norm, testmatrix, ord=np.inf)
+ self.assertRaises(ValueError, linalg.norm, testmatrix, ord=-np.inf)
+ self.assertRaises(ValueError, linalg.norm, testmatrix, ord=0)
+ self.assertRaises(ValueError, linalg.norm, testmatrix, ord=1)
+ self.assertRaises(ValueError, linalg.norm, testmatrix, ord=-1)
+ self.assertRaises(TypeError, linalg.norm, testmatrix, ord=2)
+ self.assertRaises(TypeError, linalg.norm, testmatrix, ord=-2)
+ self.assertRaises(ValueError, linalg.norm, testmatrix, ord=3)
+
if __name__ == '__main__':
run_module_suite()
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index e908a952c..29b818c06 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -2709,8 +2709,11 @@ class MaskedArray(ndarray):
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
- # Maximum number of elements per axis used when printing an array.
+
+ # Maximum number of elements per axis used when printing an array. The
+ # 1d case is handled separately because we need more values in this case.
_print_width = 100
+ _print_width_1d = 1500
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None, keep_mask=True,
@@ -2967,25 +2970,28 @@ class MaskedArray(ndarray):
else:
with np.errstate(divide='ignore', invalid='ignore'):
d = filled(domain(*args), True)
- # Fill the result where the domain is wrong
- try:
- # Binary domain: take the last value
- fill_value = ufunc_fills[func][-1]
- except TypeError:
- # Unary domain: just use this one
- fill_value = ufunc_fills[func]
- except KeyError:
- # Domain not recognized, use fill_value instead
- fill_value = self.fill_value
- result = result.copy()
- np.copyto(result, fill_value, where=d)
- # Update the mask
- if m is nomask:
- if d is not nomask:
+
+ if d.any():
+ # Fill the result where the domain is wrong
+ try:
+ # Binary domain: take the last value
+ fill_value = ufunc_fills[func][-1]
+ except TypeError:
+ # Unary domain: just use this one
+ fill_value = ufunc_fills[func]
+ except KeyError:
+ # Domain not recognized, use fill_value instead
+ fill_value = self.fill_value
+ result = result.copy()
+ np.copyto(result, fill_value, where=d)
+
+ # Update the mask
+ if m is nomask:
m = d
- else:
- # Don't modify inplace, we risk back-propagation
- m = (m | d)
+ else:
+ # Don't modify inplace, we risk back-propagation
+ m = (m | d)
+
# Make sure the mask has the proper size
if result.shape == () and m:
return masked
@@ -3796,9 +3802,11 @@ class MaskedArray(ndarray):
mask = m
# For big arrays, to avoid a costly conversion to the
# object dtype, extract the corners before the conversion.
+ print_width = (self._print_width if self.ndim > 1
+ else self._print_width_1d)
for axis in range(self.ndim):
- if data.shape[axis] > self._print_width:
- ind = self._print_width // 2
+ if data.shape[axis] > print_width:
+ ind = print_width // 2
arr = np.split(data, (ind, -ind), axis=axis)
data = np.concatenate((arr[0], arr[2]), axis=axis)
arr = np.split(mask, (ind, -ind), axis=axis)
@@ -5610,9 +5618,10 @@ class MaskedArray(ndarray):
maskindices = getattr(indices, '_mask', nomask)
if maskindices is not nomask:
indices = indices.filled(0)
- # Get the data
+ # Get the data, promoting scalars to 0d arrays with [...] so that
+ # .view works correctly
if out is None:
- out = _data.take(indices, axis=axis, mode=mode).view(cls)
+ out = _data.take(indices, axis=axis, mode=mode)[...].view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
@@ -5623,7 +5632,8 @@ class MaskedArray(ndarray):
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
- return out
+ # demote 0d arrays back to scalars, for consistency with ndarray.take
+ return out[()]
# Array methods
copy = _arraymethod('copy')
@@ -5934,7 +5944,14 @@ class mvoid(MaskedArray):
return self._data.__str__()
printopt = masked_print_option
rdtype = _recursive_make_descr(self._data.dtype, "O")
- res = np.array([self._data]).astype(rdtype)
+
+ # temporary hack to fix gh-7493. A more permanent fix
+ # is proposed in gh-6053, after which the next two
+ # lines should be changed to
+ # res = np.array([self._data], dtype=rdtype)
+ res = np.empty(1, rdtype)
+ res[:1] = self._data
+
_recursive_printoption(res, self._mask, printopt)
return str(res[0])
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index ea8f9e49a..781b25449 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -27,7 +27,7 @@ import warnings
from . import core as ma
from .core import (
- MaskedArray, MAError, add, array, asarray, concatenate, filled,
+ MaskedArray, MAError, add, array, asarray, concatenate, filled, count,
getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
nomask, ones, sort, zeros, getdata, get_masked_subclass, dot,
mask_rowcols
@@ -653,6 +653,10 @@ def _median(a, axis=None, out=None, overwrite_input=False):
elif axis < 0:
axis += a.ndim
+ if asorted.ndim == 1:
+ idx, odd = divmod(count(asorted), 2)
+ return asorted[idx - (not odd) : idx + 1].mean()
+
counts = asorted.shape[axis] - (asorted.mask).sum(axis=axis)
h = counts // 2
# create indexing mesh grid for all but reduced axis
@@ -661,10 +665,10 @@ def _median(a, axis=None, out=None, overwrite_input=False):
ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij')
# insert indices of low and high median
ind.insert(axis, h - 1)
- low = asorted[ind]
+ low = asorted[tuple(ind)]
low._sharedmask = False
ind[axis] = h
- high = asorted[ind]
+ high = asorted[tuple(ind)]
# duplicate high if odd number of elements so mean does nothing
odd = counts % 2 == 1
if asorted.ndim == 1:
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 217f307c6..5c7ae4356 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -451,6 +451,15 @@ class TestMaskedArray(TestCase):
' mask = [False True False],\n'
' fill_value = 999999)\n')
+ a = np.ma.arange(2000)
+ a[1:50] = np.ma.masked
+ assert_equal(
+ repr(a),
+ 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n'
+ ' mask = [False True True ..., False False False],\n'
+ ' fill_value = 999999)\n'
+ )
+
def test_pickling(self):
# Tests pickling
a = arange(10)
@@ -757,6 +766,10 @@ class TestMaskedArray(TestCase):
finally:
masked_print_option.set_display(ini_display)
+ # also check if there are object datatypes (see gh-7493)
+ mx = array([(1,), (2,)], dtype=[('a', 'O')])
+ assert_equal(str(mx[0]), "(1,)")
+
def test_mvoid_multidim_print(self):
# regression test for gh-6019
@@ -1197,6 +1210,12 @@ class TestMaskedArrayArithmetic(TestCase):
a /= 1.
assert_equal(a.mask, [0, 0, 0])
+ def test_ufunc_nomask(self):
+ # check the case ufuncs should set the mask to false
+ m = np.ma.array([1])
+ # check we don't get array([False], dtype=bool)
+ assert_equal(np.true_divide(m, 5).mask.shape, ())
+
def test_noshink_on_creation(self):
# Check that the mask is not shrunk on array creation when not wanted
a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)
@@ -2998,6 +3017,10 @@ class TestMaskedArrayMethods(TestCase):
assert_equal(x.take([[0, 1], [0, 1]]),
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
+ # assert_equal crashes when passed np.ma.mask
+ self.assertIs(x[1], np.ma.masked)
+ self.assertIs(x.take(1), np.ma.masked)
+
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 52669cb90..b2c053fbd 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -662,6 +662,19 @@ class TestMedian(TestCase):
assert_equal(np.ma.median(np.arange(9)), 4.)
assert_equal(np.ma.median(range(9)), 4)
+ def test_masked_1d(self):
+ "test the examples given in the docstring of ma.median"
+ x = array(np.arange(8), mask=[0]*4 + [1]*4)
+ assert_equal(np.ma.median(x), 1.5)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+ x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
+ assert_equal(np.ma.median(x), 2.5)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+
+ def test_1d_shape_consistency(self):
+ assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape,
+ np.ma.median(array([1,2,3],mask=[0,1,0])).shape )
+
def test_2d(self):
# Tests median w/ 2D
(n, p) = (101, 30)
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index 6fa72b6f9..09190ce4e 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -223,13 +223,13 @@ class ABCPolyBase(object):
Returns
-------
- coef:
+ coef
The coefficients of`other` if it is a compatible instance,
of ABCPolyBase, otherwise `other`.
Raises
------
- TypeError:
+ TypeError
When `other` is an incompatible instance of ABCPolyBase.
"""
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index 5d05f5991..c310d659d 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -36,6 +36,7 @@ Misc Functions
--------------
- `polyfromroots` -- create a polynomial with specified roots.
- `polyroots` -- find the roots of a polynomial.
+- `polyvalfromroots` -- evalute a polynomial at given points from roots.
- `polyvander` -- Vandermonde-like matrix for powers.
- `polyvander2d` -- Vandermonde-like matrix for 2D power series.
- `polyvander3d` -- Vandermonde-like matrix for 3D power series.
@@ -58,8 +59,8 @@ from __future__ import division, absolute_import, print_function
__all__ = [
'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd',
'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval',
- 'polyder', 'polyint', 'polyfromroots', 'polyvander', 'polyfit',
- 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d',
+ 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander',
+ 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d',
'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d']
import warnings
@@ -780,6 +781,94 @@ def polyval(x, c, tensor=True):
return c0
+def polyvalfromroots(x, r, tensor=True):
+ """
+ Evaluate a polynomial specified by its roots at points x.
+
+ If `r` is of length `N`, this function returns the value
+
+ .. math:: p(x) = \prod_{n=1}^{N} (x - r_n)
+
+ The parameter `x` is converted to an array only if it is a tuple or a
+ list, otherwise it is treated as a scalar. In either case, either `x`
+ or its elements must support multiplication and addition both with
+ themselves and with the elements of `r`.
+
+ If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If `r`
+ is multidimensional, then the shape of the result depends on the value of
+ `tensor`. If `tensor is ``True`` the shape will be r.shape[1:] + x.shape;
+ that is, each polynomial is evaluated at every value of `x`. If `tensor` is
+ ``False``, the shape will be r.shape[1:]; that is, each polynomial is
+ evaluated only for the corresponding broadcast value of `x`. Note that
+ scalars have shape (,).
+
+ .. versionadded:: 1.12
+
+ Parameters
+ ----------
+ x : array_like, compatible object
+ If `x` is a list or tuple, it is converted to an ndarray, otherwise
+ it is left unchanged and treated as a scalar. In either case, `x`
+ or its elements must support addition and multiplication with
+ with themselves and with the elements of `r`.
+ r : array_like
+ Array of roots. If `r` is multidimensional the first index is the
+ root index, while the remaining indices enumerate multiple
+ polynomials. For instance, in the two dimensional case the roots
+ of each polynomial may be thought of as stored in the columns of `r`.
+ tensor : boolean, optional
+ If True, the shape of the roots array is extended with ones on the
+ right, one for each dimension of `x`. Scalars have dimension 0 for this
+ action. The result is that every column of coefficients in `r` is
+ evaluated for every element of `x`. If False, `x` is broadcast over the
+ columns of `r` for the evaluation. This keyword is useful when `r` is
+ multidimensional. The default value is True.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The shape of the returned array is described above.
+
+ See Also
+ --------
+ polyroots, polyfromroots, polyval
+
+ Examples
+ --------
+ >>> from numpy.polynomial.polynomial import polyvalfromroots
+ >>> polyvalfromroots(1, [1,2,3])
+ 0.0
+ >>> a = np.arange(4).reshape(2,2)
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> polyvalfromroots(a, [-1, 0, 1])
+ array([[ -0., 0.],
+ [ 6., 24.]])
+ >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients
+ >>> r # each column of r defines one polynomial
+ array([[-2, -1],
+ [ 0, 1]])
+ >>> b = [-2, 1]
+ >>> polyvalfromroots(b, r, tensor=True)
+ array([[-0., 3.],
+ [ 3., 0.]])
+ >>> polyvalfromroots(b, r, tensor=False)
+ array([-0., 0.])
+ """
+ r = np.array(r, ndmin=1, copy=0)
+ if r.dtype.char in '?bBhHiIlLqQpP':
+ r = r.astype(np.double)
+ if isinstance(x, (tuple, list)):
+ x = np.asarray(x)
+ if isinstance(x, np.ndarray):
+ if tensor:
+ r = r.reshape(r.shape + (1,)*x.ndim)
+ elif x.ndim >= r.ndim:
+ raise ValueError("x.ndim must be < r.ndim when tensor == False")
+ return np.prod(x - r, axis=0)
+
+
def polyval2d(x, y, c):
"""
Evaluate a 2-D polynomial at points (x, y).
diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py
index 0e6a2e8a0..037be5927 100644
--- a/numpy/polynomial/tests/test_polynomial.py
+++ b/numpy/polynomial/tests/test_polynomial.py
@@ -136,6 +136,70 @@ class TestEvaluation(TestCase):
assert_equal(poly.polyval(x, [1, 0]).shape, dims)
assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims)
+ def test_polyvalfromroots(self):
+ # check exception for broadcasting x values over root array with
+ # too few dimensions
+ assert_raises(ValueError, poly.polyvalfromroots,
+ [1], [1], tensor=False)
+
+ # check empty input
+ assert_equal(poly.polyvalfromroots([], [1]).size, 0)
+ assert_(poly.polyvalfromroots([], [1]).shape == (0,))
+
+ # check empty input + multidimensional roots
+ assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0)
+ assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0))
+
+ # check scalar input
+ assert_equal(poly.polyvalfromroots(1, 1), 0)
+ assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,))
+
+ # check normal input)
+ x = np.linspace(-1, 1)
+ y = [x**i for i in range(5)]
+ for i in range(1, 5):
+ tgt = y[i]
+ res = poly.polyvalfromroots(x, [0]*i)
+ assert_almost_equal(res, tgt)
+ tgt = x*(x - 1)*(x + 1)
+ res = poly.polyvalfromroots(x, [-1, 0, 1])
+ assert_almost_equal(res, tgt)
+
+ # check that shape is preserved
+ for i in range(3):
+ dims = [2]*i
+ x = np.zeros(dims)
+ assert_equal(poly.polyvalfromroots(x, [1]).shape, dims)
+ assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims)
+ assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims)
+
+ # check compatibility with factorization
+ ptest = [15, 2, -16, -2, 1]
+ r = poly.polyroots(ptest)
+ x = np.linspace(-1, 1)
+ assert_almost_equal(poly.polyval(x, ptest),
+ poly.polyvalfromroots(x, r))
+
+ # check multidimensional arrays of roots and values
+ # check tensor=False
+ rshape = (3, 5)
+ x = np.arange(-3, 2)
+ r = np.random.randint(-5, 5, size=rshape)
+ res = poly.polyvalfromroots(x, r, tensor=False)
+ tgt = np.empty(r.shape[1:])
+ for ii in range(tgt.size):
+ tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii])
+ assert_equal(res, tgt)
+
+ # check tensor=True
+ x = np.vstack([x, 2*x])
+ res = poly.polyvalfromroots(x, r, tensor=True)
+ tgt = np.empty(r.shape[1:] + x.shape)
+ for ii in range(r.shape[1]):
+ for jj in range(x.shape[0]):
+ tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii])
+ assert_equal(res, tgt)
+
def test_polyval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
diff --git a/numpy/random/mtrand/Python.pxi b/numpy/random/mtrand/Python.pxi
index 01d47af50..f23a3bfe6 100644
--- a/numpy/random/mtrand/Python.pxi
+++ b/numpy/random/mtrand/Python.pxi
@@ -28,20 +28,6 @@ cdef extern from "Python.h":
void Py_INCREF(object obj)
void Py_XINCREF(object obj)
- # CObject API
-# If this is uncommented it needs to be fixed to use PyCapsule
-# for Python >= 3.0
-#
-# ctypedef void (*destructor1)(void* cobj)
-# ctypedef void (*destructor2)(void* cobj, void* desc)
-# int PyCObject_Check(object p)
-# object PyCObject_FromVoidPtr(void* cobj, destructor1 destr)
-# object PyCObject_FromVoidPtrAndDesc(void* cobj, void* desc,
-# destructor2 destr)
-# void* PyCObject_AsVoidPtr(object self)
-# void* PyCObject_GetDesc(object self)
-# int PyCObject_SetVoidPtr(object self, void* cobj)
-
# TypeCheck API
int PyFloat_Check(object obj)
int PyInt_Check(object obj)
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
index 7c44088a7..e195700d4 100644
--- a/numpy/random/mtrand/distributions.c
+++ b/numpy/random/mtrand/distributions.c
@@ -500,6 +500,11 @@ long rk_poisson_mult(rk_state *state, double lam)
}
}
+/*
+ * The transformed rejection method for generating Poisson random variables
+ * W. Hoermann
+ * Insurance: Mathematics and Economics 12, 39-45 (1993)
+ */
#define LS2PI 0.91893853320467267
#define TWELFTH 0.083333333333333333333333
long rk_poisson_ptrs(rk_state *state, double lam)
diff --git a/numpy/random/mtrand/mt_compat.h b/numpy/random/mtrand/mt_compat.h
deleted file mode 100644
index ab56a553c..000000000
--- a/numpy/random/mtrand/mt_compat.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * This is a convenience header file providing compatibility utilities
- * for supporting Python 2 and Python 3 in the same code base.
- *
- * It can be removed when Python 2.6 is dropped as PyCapsule is available
- * in both Python 3.1+ and Python 2.7.
- */
-
-#ifndef _MT_COMPAT_H_
-#define _MT_COMPAT_H_
-
-#include <Python.h>
-#include <numpy/npy_common.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/*
- * PyCObject functions adapted to PyCapsules.
- *
- * The main job here is to get rid of the improved error handling
- * of PyCapsules. It's a shame...
- */
-#if PY_VERSION_HEX >= 0x03000000
-
-static NPY_INLINE PyObject *
-NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
-{
- PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
- if (ret == NULL) {
- PyErr_Clear();
- }
- return ret;
-}
-
-static NPY_INLINE void *
-NpyCapsule_AsVoidPtr(PyObject *obj)
-{
- void *ret = PyCapsule_GetPointer(obj, NULL);
- if (ret == NULL) {
- PyErr_Clear();
- }
- return ret;
-}
-
-#else
-
-static NPY_INLINE PyObject *
-NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *))
-{
- return PyCObject_FromVoidPtr(ptr, dtor);
-}
-
-static NPY_INLINE void *
-NpyCapsule_AsVoidPtr(PyObject *ptr)
-{
- return PyCObject_AsVoidPtr(ptr);
-}
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _COMPAT_H_ */
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index f7afae58c..abf7a4102 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -23,6 +23,7 @@
include "Python.pxi"
include "numpy.pxd"
+include "cpython/pycapsule.pxd"
from libc cimport string
@@ -594,7 +595,7 @@ def _rand_bool(low, high, size, rngstate):
cdef npy_bool *out
cdef ndarray array "arrayObject"
cdef npy_intp cnt
- cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+ cdef rk_state *state = <rk_state *>PyCapsule_GetPointer(rngstate, NULL)
rng = <npy_bool>(high - low)
off = <npy_bool>(low)
@@ -621,7 +622,7 @@ def _rand_int8(low, high, size, rngstate):
cdef npy_uint8 *out
cdef ndarray array "arrayObject"
cdef npy_intp cnt
- cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+ cdef rk_state *state = <rk_state *>PyCapsule_GetPointer(rngstate, NULL)
rng = <npy_uint8>(high - low)
off = <npy_uint8>(<npy_int8>low)
@@ -648,7 +649,7 @@ def _rand_int16(low, high, size, rngstate):
cdef npy_uint16 *out
cdef ndarray array "arrayObject"
cdef npy_intp cnt
- cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+ cdef rk_state *state = <rk_state *>PyCapsule_GetPointer(rngstate, NULL)
rng = <npy_uint16>(high - low)
off = <npy_uint16>(<npy_int16>low)
@@ -699,7 +700,7 @@ def _rand_int32(low, high, size, rngstate):
cdef npy_uint32 *out
cdef ndarray array "arrayObject"
cdef npy_intp cnt
- cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+ cdef rk_state *state = <rk_state *>PyCapsule_GetPointer(rngstate, NULL)
rng = <npy_uint32>(high - low)
off = <npy_uint32>(<npy_int32>low)
@@ -726,7 +727,7 @@ def _rand_int64(low, high, size, rngstate):
cdef npy_uint64 *out
cdef ndarray array "arrayObject"
cdef npy_intp cnt
- cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+ cdef rk_state *state = <rk_state *>PyCapsule_GetPointer(rngstate, NULL)
rng = <npy_uint64>(high - low)
off = <npy_uint64>(<npy_int64>low)
@@ -752,7 +753,7 @@ def _rand_uint8(low, high, size, rngstate):
cdef npy_uint8 *out
cdef ndarray array "arrayObject"
cdef npy_intp cnt
- cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+ cdef rk_state *state = <rk_state *>PyCapsule_GetPointer(rngstate, NULL)
rng = <npy_uint8>(high - low)
off = <npy_uint8>(low)
@@ -779,7 +780,7 @@ def _rand_uint16(low, high, size, rngstate):
cdef npy_uint16 *out
cdef ndarray array "arrayObject"
cdef npy_intp cnt
- cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+ cdef rk_state *state = <rk_state *>PyCapsule_GetPointer(rngstate, NULL)
rng = <npy_uint16>(high - low)
off = <npy_uint16>(low)
@@ -806,7 +807,7 @@ def _rand_uint32(low, high, size, rngstate):
cdef npy_uint32 *out
cdef ndarray array "arrayObject"
cdef npy_intp cnt
- cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+ cdef rk_state *state = <rk_state *>PyCapsule_GetPointer(rngstate, NULL)
rng = <npy_uint32>(high - low)
off = <npy_uint32>(low)
@@ -833,7 +834,7 @@ def _rand_uint64(low, high, size, rngstate):
cdef npy_uint64 *out
cdef ndarray array "arrayObject"
cdef npy_intp cnt
- cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+ cdef rk_state *state = <rk_state *>PyCapsule_GetPointer(rngstate, NULL)
rng = <npy_uint64>(high - low)
off = <npy_uint64>(low)
@@ -914,7 +915,7 @@ cdef class RandomState:
def __init__(self, seed=None):
self.internal_state = <rk_state*>PyMem_Malloc(sizeof(rk_state))
- self.state_address = NpyCapsule_FromVoidPtr(self.internal_state, NULL)
+ self.state_address = PyCapsule_New(self.internal_state, NULL, NULL)
self.lock = Lock()
self.seed(seed)
@@ -2013,7 +2014,7 @@ cdef class RandomState:
----------
.. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
Random Signal Principles", 4th ed, 2001, p. 57.
- .. [2] Wikipedia, "Poisson process",
+ .. [2] Wikipedia, "Poisson process",
http://en.wikipedia.org/wiki/Poisson_process
.. [3] Wikipedia, "Exponential distribution",
http://en.wikipedia.org/wiki/Exponential_distribution
@@ -2211,7 +2212,7 @@ cdef class RandomState:
--------
Draw samples from the distribution:
- >>> shape, scale = 2., 2. # mean and dispersion
+ >>> shape, scale = 2., 2. # mean=4, std=2*sqrt(2)
>>> s = np.random.gamma(shape, scale, 1000)
Display the histogram of the samples, along with
@@ -5051,7 +5052,11 @@ cdef class RandomState:
x_ptr = <char*><size_t>x.ctypes.data
stride = x.strides[0]
itemsize = x.dtype.itemsize
- buf = np.empty_like(x[0]) # GC'd at function exit
+ # As the array x could contain python objects we use a buffer
+ # of bytes for the swaps to avoid leaving one of the objects
+ # within the buffer and erroneously decrementing it's refcount
+ # when the function exits.
+ buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
buf_ptr = <char*><size_t>buf.ctypes.data
with self.lock:
# We trick gcc into providing a specialized implementation for
diff --git a/numpy/random/mtrand/numpy.pxd b/numpy/random/mtrand/numpy.pxd
index 488278d6c..d5b0d74ca 100644
--- a/numpy/random/mtrand/numpy.pxd
+++ b/numpy/random/mtrand/numpy.pxd
@@ -2,12 +2,6 @@
cdef extern from "numpy/npy_no_deprecated_api.h": pass
-cdef extern from "mt_compat.h":
-
- object NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(object o))
- void * NpyCapsule_AsVoidPtr(object o)
-
-
cdef extern from "numpy/arrayobject.h":
cdef enum NPY_TYPES:
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 08039cbbe..a06de58e3 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -260,11 +260,13 @@ class TestRandomDist(TestCase):
def test_random_integers(self):
np.random.seed(self.seed)
- actual = np.random.random_integers(-99, 99, size=(3, 2))
- desired = np.array([[31, 3],
- [-52, 41],
- [-48, -66]])
- assert_array_equal(actual, desired)
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", DeprecationWarning)
+ actual = np.random.random_integers(-99, 99, size=(3, 2))
+ desired = np.array([[31, 3],
+ [-52, 41],
+ [-48, -66]])
+ assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
@@ -272,10 +274,12 @@ class TestRandomDist(TestCase):
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
- actual = np.random.random_integers(np.iinfo('l').max,
- np.iinfo('l').max)
- desired = np.iinfo('l').max
- assert_equal(actual, desired)
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", DeprecationWarning)
+ actual = np.random.random_integers(np.iinfo('l').max,
+ np.iinfo('l').max)
+ desired = np.iinfo('l').max
+ assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py
index 133a1aa5a..b50b6b260 100644
--- a/numpy/random/tests/test_regression.py
+++ b/numpy/random/tests/test_regression.py
@@ -113,5 +113,34 @@ class TestRegression(TestCase):
assert_(c in a)
assert_raises(ValueError, np.random.choice, a, p=probs*0.9)
+ def test_shuffle_of_array_of_different_length_strings(self):
+ # Test that permuting an array of different length strings
+ # will not cause a segfault on garbage collection
+ # Tests gh-7710
+ np.random.seed(1234)
+
+ a = np.array(['a', 'a' * 1000])
+
+ for _ in range(100):
+ np.random.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_shuffle_of_array_of_objects(self):
+ # Test that permuting an array of objects will not cause
+ # a segfault on garbage collection.
+ # See gh-7719
+ np.random.seed(1234)
+ a = np.array([np.arange(1), np.arange(4)])
+
+ for _ in range(1000):
+ np.random.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index fe1f411c4..842d55b37 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -11,7 +11,7 @@ from numpy.testing import (
assert_warns, assert_no_warnings, assert_allclose, assert_approx_equal,
assert_array_almost_equal_nulp, assert_array_max_ulp,
clear_and_catch_warnings, run_module_suite,
- assert_string_equal, assert_, tempdir, temppath,
+ assert_string_equal, assert_, tempdir, temppath,
)
import unittest
@@ -246,6 +246,23 @@ class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_almost_equal
+ def test_closeness(self):
+ # Note that in the course of time we ended up with
+ # `abs(x - y) < 1.5 * 10**(-decimal)`
+ # instead of the previously documented
+ # `abs(x - y) < 0.5 * 10**(-decimal)`
+ # so this check serves to preserve the wrongness.
+
+ # test scalars
+ self._assert_func(1.499999, 0.0, decimal=0)
+ self.assertRaises(AssertionError,
+ lambda: self._assert_func(1.5, 0.0, decimal=0))
+
+ # test arrays
+ self._assert_func([1.499999], [0.0], decimal=0)
+ self.assertRaises(AssertionError,
+ lambda: self._assert_func([1.5], [0.0], decimal=0))
+
def test_simple(self):
x = np.array([1234.2222])
y = np.array([1234.2223])
@@ -288,6 +305,23 @@ class TestAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_almost_equal
+ def test_closeness(self):
+ # Note that in the course of time we ended up with
+ # `abs(x - y) < 1.5 * 10**(-decimal)`
+ # instead of the previously documented
+ # `abs(x - y) < 0.5 * 10**(-decimal)`
+ # so this check serves to preserve the wrongness.
+
+ # test scalars
+ self._assert_func(1.499999, 0.0, decimal=0)
+ self.assertRaises(AssertionError,
+ lambda: self._assert_func(1.5, 0.0, decimal=0))
+
+ # test arrays
+ self._assert_func([1.499999], [0.0], decimal=0)
+ self.assertRaises(AssertionError,
+ lambda: self._assert_func([1.5], [0.0], decimal=0))
+
def test_nan_item(self):
self._assert_func(np.nan, np.nan)
self.assertRaises(AssertionError,
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 3667e67e0..dfed5d148 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -424,11 +424,14 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
instead of this function for more consistent floating point
comparisons.
- The test is equivalent to ``abs(desired-actual) < 0.5 * 10**(-decimal)``.
+ The test verifies that the elements of ``actual`` and ``desired`` satisfy.
- Given two objects (numbers or ndarrays), check that all elements of these
- objects are almost equal. An exception is raised at conflicting values.
- For ndarrays this delegates to assert_array_almost_equal
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+
+ That is a looser test than originally documented, but agrees with what the
+ actual implementation in `assert_array_almost_equal` did up to rounding
+ vagaries. An exception is raised at conflicting values. For ndarrays this
+ delegates to assert_array_almost_equal
Parameters
----------
@@ -529,7 +532,7 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
return
except (NotImplementedError, TypeError):
pass
- if round(abs(desired - actual), decimal) != 0:
+ if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
raise AssertionError(_build_err_msg())
@@ -819,14 +822,16 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
instead of this function for more consistent floating point
comparisons.
- The test verifies identical shapes and verifies values with
- ``abs(desired-actual) < 0.5 * 10**(-decimal)``.
+ The test verifies identical shapes and that the elements of ``actual`` and
+ ``desired`` satisfy.
- Given two array_like objects, check that the shape is equal and all
- elements of these objects are almost equal. An exception is raised at
- shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if
- both objects have NaNs in the same positions.
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+
+ That is a looser test than originally documented, but agrees with what the
+ actual implementation did up to rounding vagaries. An exception is raised
+ at shape mismatch or conflicting values. In contrast to the standard usage
+ in numpy, NaNs are compared like numbers, no assertion is raised if both
+ objects have NaNs in the same positions.
Parameters
----------
@@ -903,12 +908,12 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
# casting of x later.
dtype = result_type(y, 1.)
y = array(y, dtype=dtype, copy=False, subok=True)
- z = abs(x-y)
+ z = abs(x - y)
if not issubdtype(z.dtype, number):
z = z.astype(float_) # handle object arrays
- return around(z, decimal) <= 10.0**(-decimal)
+ return z < 1.5 * 10.0**(-decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header=('Arrays are not almost equal to %d decimals' % decimal),
@@ -1146,9 +1151,6 @@ def assert_raises(*args,**kwargs):
return nose.tools.assert_raises(*args,**kwargs)
-assert_raises_regex_impl = None
-
-
def assert_raises_regex(exception_class, expected_regexp,
callable_obj=None, *args, **kwargs):
"""
@@ -1159,70 +1161,22 @@ def assert_raises_regex(exception_class, expected_regexp,
Name of this function adheres to Python 3.2+ reference, but should work in
all versions down to 2.6.
+ Notes
+ -----
+ .. versionadded:: 1.9.0
+
"""
__tracebackhide__ = True # Hide traceback for py.test
nose = import_nose()
- global assert_raises_regex_impl
- if assert_raises_regex_impl is None:
- try:
- # Python 3.2+
- assert_raises_regex_impl = nose.tools.assert_raises_regex
- except AttributeError:
- try:
- # 2.7+
- assert_raises_regex_impl = nose.tools.assert_raises_regexp
- except AttributeError:
- # 2.6
-
- # This class is copied from Python2.7 stdlib almost verbatim
- class _AssertRaisesContext(object):
- """A context manager used to implement TestCase.assertRaises* methods."""
-
- def __init__(self, expected, expected_regexp=None):
- self.expected = expected
- self.expected_regexp = expected_regexp
-
- def failureException(self, msg):
- return AssertionError(msg)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, tb):
- if exc_type is None:
- try:
- exc_name = self.expected.__name__
- except AttributeError:
- exc_name = str(self.expected)
- raise self.failureException(
- "{0} not raised".format(exc_name))
- if not issubclass(exc_type, self.expected):
- # let unexpected exceptions pass through
- return False
- self.exception = exc_value # store for later retrieval
- if self.expected_regexp is None:
- return True
-
- expected_regexp = self.expected_regexp
- if isinstance(expected_regexp, basestring):
- expected_regexp = re.compile(expected_regexp)
- if not expected_regexp.search(str(exc_value)):
- raise self.failureException(
- '"%s" does not match "%s"' %
- (expected_regexp.pattern, str(exc_value)))
- return True
-
- def impl(cls, regex, callable_obj, *a, **kw):
- mgr = _AssertRaisesContext(cls, regex)
- if callable_obj is None:
- return mgr
- with mgr:
- callable_obj(*a, **kw)
- assert_raises_regex_impl = impl
-
- return assert_raises_regex_impl(exception_class, expected_regexp,
- callable_obj, *args, **kwargs)
+ if sys.version_info.major >= 3:
+ funcname = nose.tools.assert_raises_regex
+ else:
+ # Only present in Python 2.7, missing from unittest in 2.6
+ funcname = nose.tools.assert_raises_regexp
+
+ return funcname(exception_class, expected_regexp, callable_obj,
+ *args, **kwargs)
def decorate_methods(cls, decorator, testmatch=None):
diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py
index 36274ad46..2c58f1184 100644
--- a/numpy/tests/test_ctypeslib.py
+++ b/numpy/tests/test_ctypeslib.py
@@ -8,7 +8,14 @@ from numpy.distutils.misc_util import get_shared_lib_extension
from numpy.testing import TestCase, run_module_suite, dec
try:
- cdll = load_library('multiarray', np.core.multiarray.__file__)
+ cdll = None
+ if hasattr(sys, 'gettotalrefcount'):
+ try:
+ cdll = load_library('multiarray_d', np.core.multiarray.__file__)
+ except OSError:
+ pass
+ if cdll is None:
+ cdll = load_library('multiarray', np.core.multiarray.__file__)
_HAS_CTYPE = True
except ImportError:
_HAS_CTYPE = False
diff --git a/runtests.py b/runtests.py
index 2a836d9cf..b833bbd04 100755
--- a/runtests.py
+++ b/runtests.py
@@ -109,6 +109,9 @@ def main(argv):
parser.add_argument("--bench-compare", action="store", metavar="COMMIT",
help=("Compare benchmark results to COMMIT. "
"Note that you need to commit your changes first!"))
+ parser.add_argument("--raise-warnings", default=None, type=str,
+ choices=('develop', 'release'),
+ help="if 'develop', some warnings are treated as errors")
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
args = parser.parse_args(argv)
@@ -289,6 +292,7 @@ def main(argv):
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
+ raise_warnings=args.raise_warnings,
coverage=args.coverage)
finally:
os.chdir(cwd)
diff --git a/setup.py b/setup.py
index b2e45ec45..8b8863255 100755
--- a/setup.py
+++ b/setup.py
@@ -30,8 +30,8 @@ import subprocess
import textwrap
-if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 2):
- raise RuntimeError("Python version 2.6, 2.7 or >= 3.2 required.")
+if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 4):
+ raise RuntimeError("Python version 2.7 or >= 3.4 required.")
if sys.version_info[0] >= 3:
import builtins
@@ -47,11 +47,8 @@ License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 2
-Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
-Programming Language :: Python :: 3.2
-Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: Implementation :: CPython
@@ -317,7 +314,7 @@ def parse_setuppy_commands():
flake8="`setup.py flake8` is not supported, use flake8 standalone",
)
bad_commands['nosetests'] = bad_commands['test']
- for commands in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
+ for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
'register', 'check', 'install_data', 'install_headers',
'install_lib', 'install_scripts', ):
bad_commands[command] = "`setup.py %s` is not supported" % command
diff --git a/site.cfg.example b/site.cfg.example
index 69cb9892c..05b49e507 100644
--- a/site.cfg.example
+++ b/site.cfg.example
@@ -8,6 +8,7 @@
# will also be checked for the file ~/.numpy-site.cfg .
# The format of the file is that of the standard library's ConfigParser module.
+# No interpolation is allowed, RawConfigParser class being used to load it.
#
# http://docs.python.org/3/library/configparser.html
#
diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i
index 67a519e6d..b8fdaeb1f 100644
--- a/tools/swig/numpy.i
+++ b/tools/swig/numpy.i
@@ -81,6 +81,7 @@
%#define array_descr(a) (((PyArrayObject*)a)->descr)
%#define array_flags(a) (((PyArrayObject*)a)->flags)
%#define array_enableflags(a,f) (((PyArrayObject*)a)->flags) = f
+%#define array_is_fortran(a) (PyArray_ISFORTRAN((PyArrayObject*)a))
%#else
%#define is_array(a) ((a) && PyArray_Check(a))
%#define array_type(a) PyArray_TYPE((PyArrayObject*)a)
@@ -93,10 +94,10 @@
%#define array_descr(a) PyArray_DESCR((PyArrayObject*)a)
%#define array_flags(a) PyArray_FLAGS((PyArrayObject*)a)
%#define array_enableflags(a,f) PyArray_ENABLEFLAGS((PyArrayObject*)a,f)
+%#define array_is_fortran(a) (PyArray_IS_F_CONTIGUOUS((PyArrayObject*)a))
%#endif
%#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS((PyArrayObject*)a))
%#define array_is_native(a) (PyArray_ISNOTSWAPPED((PyArrayObject*)a))
-%#define array_is_fortran(a) (PyArray_IS_F_CONTIGUOUS((PyArrayObject*)a))
}
/**********************************************************************/
@@ -295,7 +296,11 @@
Py_INCREF(array_descr(ary));
result = (PyArrayObject*) PyArray_FromArray(ary,
array_descr(ary),
+%#if NPY_API_VERSION < 0x00000007
+ NPY_FORTRANORDER);
+%#else
NPY_ARRAY_F_CONTIGUOUS);
+%#endif
*is_new_object = 1;
}
return result;
diff --git a/tools/swig/pyfragments.swg b/tools/swig/pyfragments.swg
index b5decf12c..901e6ed9d 100644
--- a/tools/swig/pyfragments.swg
+++ b/tools/swig/pyfragments.swg
@@ -75,15 +75,22 @@
SWIG_AsVal_dec(unsigned long)(PyObject *obj, unsigned long *val)
{
PyArray_Descr * ulongDescr = PyArray_DescrNewFromType(NPY_ULONG);
- if (PyInt_Check(obj)) {
+ %#if PY_VERSION_HEX < 0x03000000
+ if (PyInt_Check(obj))
+ {
long v = PyInt_AsLong(obj);
- if (v >= 0) {
- if (val) *val = v;
- return SWIG_OK;
- } else {
- return SWIG_OverflowError;
+ if (v >= 0)
+ {
+ if (val) *val = v;
+ return SWIG_OK;
+ }
+ else
+ {
+ return SWIG_OverflowError;
}
- } else if (PyLong_Check(obj)) {
+ } else
+ %#endif
+ if (PyLong_Check(obj)) {
unsigned long v = PyLong_AsUnsignedLong(obj);
if (!PyErr_Occurred()) {
if (val) *val = v;