summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--benchmarks/benchmarks/bench_shape_base.py89
-rw-r--r--doc/changelog/1.13.2-changelog.rst46
-rw-r--r--doc/release/1.13.2-notes.rst58
-rw-r--r--doc/release/1.14.0-notes.rst132
-rw-r--r--doc/source/dev/governance/people.rst12
-rw-r--r--doc/source/reference/routines.polynomials.package.rst1
-rw-r--r--doc/source/reference/routines.polynomials.polyutils.rst4
-rw-r--r--doc/source/release.rst2
-rw-r--r--doc/source/user/building.rst4
-rw-r--r--doc/source/user/quickstart.rst28
-rw-r--r--numpy/add_newdocs.py24
-rw-r--r--numpy/core/arrayprint.py605
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py16
-rw-r--r--numpy/core/einsumfunc.py10
-rw-r--r--numpy/core/fromnumeric.py12
-rw-r--r--numpy/core/function_base.py17
-rw-r--r--numpy/core/include/numpy/npy_cpu.h6
-rw-r--r--numpy/core/include/numpy/npy_endian.h6
-rw-r--r--numpy/core/numeric.py281
-rw-r--r--numpy/core/shape_base.py8
-rw-r--r--numpy/core/src/multiarray/_datetime.h3
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c3
-rw-r--r--numpy/core/src/multiarray/arrayobject.c6
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src258
-rw-r--r--numpy/core/src/multiarray/cblasfuncs.c37
-rw-r--r--numpy/core/src/multiarray/convert.c4
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c6
-rw-r--r--numpy/core/src/multiarray/ctors.c9
-rw-r--r--numpy/core/src/multiarray/datetime.c58
-rw-r--r--numpy/core/src/multiarray/descriptor.c5
-rw-r--r--numpy/core/src/multiarray/descriptor.h4
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c388
-rw-r--r--numpy/core/src/multiarray/item_selection.c42
-rw-r--r--numpy/core/src/multiarray/iterators.c70
-rw-r--r--numpy/core/src/multiarray/mapping.c72
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c326
-rw-r--r--numpy/core/src/multiarray/number.c12
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src102
-rw-r--r--numpy/core/src/multiarray/sequence.c37
-rw-r--r--numpy/core/src/multiarray/strfuncs.c3
-rw-r--r--numpy/core/src/multiarray/temp_elide.c6
-rw-r--r--numpy/core/src/npymath/npy_math_complex.c.src8
-rw-r--r--numpy/core/src/npymath/npy_math_internal.h.src2
-rw-r--r--numpy/core/src/umath/scalarmath.c.src4
-rw-r--r--numpy/core/tests/test_arrayprint.py140
-rw-r--r--numpy/core/tests/test_deprecations.py16
-rw-r--r--numpy/core/tests/test_dtype.py16
-rw-r--r--numpy/core/tests/test_einsum.py14
-rw-r--r--numpy/core/tests/test_indexing.py6
-rw-r--r--numpy/core/tests/test_multiarray.py201
-rw-r--r--numpy/core/tests/test_nditer.py103
-rw-r--r--numpy/core/tests/test_numeric.py36
-rw-r--r--numpy/core/tests/test_records.py17
-rw-r--r--numpy/core/tests/test_regression.py2
-rw-r--r--numpy/core/tests/test_scalarmath.py13
-rw-r--r--numpy/core/tests/test_shape_base.py34
-rw-r--r--numpy/distutils/fcompiler/gnu.py3
-rw-r--r--numpy/distutils/misc_util.py2
-rw-r--r--numpy/distutils/tests/test_fcompiler_gnu.py3
-rw-r--r--numpy/f2py/cfuncs.py8
-rw-r--r--numpy/f2py/rules.py6
-rw-r--r--numpy/lib/arraypad.py22
-rw-r--r--numpy/lib/format.py51
-rw-r--r--numpy/lib/function_base.py54
-rw-r--r--numpy/lib/nanfunctions.py2
-rw-r--r--numpy/lib/npyio.py7
-rw-r--r--numpy/lib/shape_base.py2
-rw-r--r--numpy/lib/tests/test_format.py7
-rw-r--r--numpy/lib/tests/test_function_base.py17
-rw-r--r--numpy/lib/tests/test_io.py10
-rw-r--r--numpy/linalg/linalg.py123
-rw-r--r--numpy/linalg/tests/test_linalg.py45
-rw-r--r--numpy/ma/core.py199
-rw-r--r--numpy/ma/tests/test_core.py118
-rw-r--r--numpy/polynomial/chebyshev.py16
-rw-r--r--numpy/polynomial/hermite.py20
-rw-r--r--numpy/polynomial/hermite_e.py20
-rw-r--r--numpy/polynomial/laguerre.py20
-rw-r--r--numpy/polynomial/legendre.py20
-rw-r--r--numpy/polynomial/polynomial.py2
-rw-r--r--numpy/polynomial/polyutils.py37
-rw-r--r--numpy/polynomial/tests/test_printing.py24
-rw-r--r--numpy/random/mtrand/numpy.pxd12
-rw-r--r--numpy/testing/tests/test_utils.py24
84 files changed, 2548 insertions, 1750 deletions
diff --git a/benchmarks/benchmarks/bench_shape_base.py b/benchmarks/benchmarks/bench_shape_base.py
new file mode 100644
index 000000000..9d0f0ae04
--- /dev/null
+++ b/benchmarks/benchmarks/bench_shape_base.py
@@ -0,0 +1,89 @@
+from __future__ import absolute_import, division, print_function
+
+from .common import Benchmark
+
+import numpy as np
+
+
+class Block(Benchmark):
+ params = [1, 10, 100]
+ param_names = ['size']
+
+ def setup(self, n):
+ self.a_2d = np.ones((2 * n, 2 * n))
+ self.b_1d = np.ones(2 * n)
+ self.b_2d = 2 * self.a_2d
+
+ self.a = np.ones(3 * n)
+ self.b = np.ones(3 * n)
+
+ self.one_2d = np.ones((1 * n, 3 * n))
+ self.two_2d = np.ones((1 * n, 3 * n))
+ self.three_2d = np.ones((1 * n, 6 * n))
+ self.four_1d = np.ones(6 * n)
+ self.five_0d = np.ones(1 * n)
+ self.six_1d = np.ones(5 * n)
+ self.zero_2d = np.zeros((2 * n, 6 * n))
+
+ self.one = np.ones(3 * n)
+ self.two = 2 * np.ones((3, 3 * n))
+ self.three = 3 * np.ones(3 * n)
+ self.four = 4 * np.ones(3 * n)
+ self.five = 5 * np.ones(1 * n)
+ self.six = 6 * np.ones(5 * n)
+ self.zero = np.zeros((2 * n, 6 * n))
+
+ self.a000 = np.ones((2 * n, 2 * n, 2 * n), int) * 1
+
+ self.a100 = np.ones((3 * n, 2 * n, 2 * n), int) * 2
+ self.a010 = np.ones((2 * n, 3 * n, 2 * n), int) * 3
+ self.a001 = np.ones((2 * n, 2 * n, 3 * n), int) * 4
+
+ self.a011 = np.ones((2 * n, 3 * n, 3 * n), int) * 5
+ self.a101 = np.ones((3 * n, 2 * n, 3 * n), int) * 6
+ self.a110 = np.ones((3 * n, 3 * n, 2 * n), int) * 7
+
+ self.a111 = np.ones((3 * n, 3 * n, 3 * n), int) * 8
+
+ def time_block_simple_row_wise(self, n):
+ np.block([self.a_2d, self.b_2d])
+
+ def time_block_simple_column_wise(self, n):
+ np.block([[self.a_2d], [self.b_2d]])
+
+ def time_block_complicated(self, n):
+ np.block([[self.one_2d, self.two_2d],
+ [self.three_2d],
+ [self.four_1d],
+ [self.five_0d, self.six_1d],
+ [self.zero_2d]])
+
+ def time_nested(self, n):
+ np.block([
+ [
+ np.block([
+ [self.one],
+ [self.three],
+ [self.four]
+ ]),
+ self.two
+ ],
+ [self.five, self.six],
+ [self.zero]
+ ])
+
+ def time_3d(self, n):
+ np.block([
+ [
+ [self.a000, self.a001],
+ [self.a010, self.a011],
+ ],
+ [
+ [self.a100, self.a101],
+ [self.a110, self.a111],
+ ]
+ ])
+
+ def time_no_lists(self, n):
+ np.block(1)
+ np.block(np.eye(3 * n))
diff --git a/doc/changelog/1.13.2-changelog.rst b/doc/changelog/1.13.2-changelog.rst
new file mode 100644
index 000000000..897f436f9
--- /dev/null
+++ b/doc/changelog/1.13.2-changelog.rst
@@ -0,0 +1,46 @@
+
+Contributors
+============
+
+A total of 12 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Brandon Carter
+* Charles Harris
+* Eric Wieser
+* Iryna Shcherbina +
+* James Bourbeau +
+* Jonathan Helmus
+* Julian Taylor
+* Matti Picus
+* Michael Lamparski +
+* Michael Seifert
+* Ralf Gommers
+
+Pull requests merged
+====================
+
+A total of 21 pull requests were merged for this release.
+
+* `#9390 <https://github.com/numpy/numpy/pull/9390>`__: BUG: Return the poly1d coefficients array directly
+* `#9555 <https://github.com/numpy/numpy/pull/9555>`__: BUG: fix regression in 1.13.x in distutils.mingw32ccompiler.
+* `#9556 <https://github.com/numpy/numpy/pull/9556>`__: BUG: Fix true_divide when dtype=np.float64 specified.
+* `#9557 <https://github.com/numpy/numpy/pull/9557>`__: DOC: Fix some rst markup in numpy/doc/basics.py.
+* `#9558 <https://github.com/numpy/numpy/pull/9558>`__: BLD: remove -xhost flag from IntelFCompiler.
+* `#9559 <https://github.com/numpy/numpy/pull/9559>`__: DOC: removes broken docstring example (source code, png, pdf)...
+* `#9580 <https://github.com/numpy/numpy/pull/9580>`__: BUG: Add hypot and cabs functions to WIN32 blacklist.
+* `#9732 <https://github.com/numpy/numpy/pull/9732>`__: BUG: Make scalar function elision check if temp is writeable.
+* `#9736 <https://github.com/numpy/numpy/pull/9736>`__: BUG: various fixes to np.gradient
+* `#9742 <https://github.com/numpy/numpy/pull/9742>`__: BUG: Fix np.pad for CVE-2017-12852
+* `#9744 <https://github.com/numpy/numpy/pull/9744>`__: BUG: Check for exception in sort functions, add tests
+* `#9745 <https://github.com/numpy/numpy/pull/9745>`__: DOC: Add whitespace after "versionadded::" directive so it actually...
+* `#9746 <https://github.com/numpy/numpy/pull/9746>`__: BUG: memory leak in np.dot of size 0
+* `#9747 <https://github.com/numpy/numpy/pull/9747>`__: BUG: adjust gfortran version search regex
+* `#9757 <https://github.com/numpy/numpy/pull/9757>`__: BUG: Cython 0.27 breaks NumPy on Python 3.
+* `#9764 <https://github.com/numpy/numpy/pull/9764>`__: BUG: Ensure `_npy_scaled_cexp{,f,l}` is defined when needed.
+* `#9765 <https://github.com/numpy/numpy/pull/9765>`__: BUG: PyArray_CountNonzero does not check for exceptions
+* `#9766 <https://github.com/numpy/numpy/pull/9766>`__: BUG: Fixes histogram monotonicity check for unsigned bin values
+* `#9767 <https://github.com/numpy/numpy/pull/9767>`__: BUG: ensure consistent result dtype of count_nonzero
+* `#9771 <https://github.com/numpy/numpy/pull/9771>`__: MAINT,BUG: Fix mtrand for Cython 0.27.
+* `#9772 <https://github.com/numpy/numpy/pull/9772>`__: DOC: Create the 1.13.2 release notes.
diff --git a/doc/release/1.13.2-notes.rst b/doc/release/1.13.2-notes.rst
new file mode 100644
index 000000000..f2f9120f5
--- /dev/null
+++ b/doc/release/1.13.2-notes.rst
@@ -0,0 +1,58 @@
+==========================
+NumPy 1.13.2 Release Notes
+==========================
+
+This is a bugfix release for some problems found since 1.13.1. The most
+important fixes are for CVE-2017-12852 and temporary elision. Users of earlier
+versions of 1.13 should upgrade.
+
+The Python versions supported are 2.7 and 3.4 - 3.6. The Python 3.6 wheels
+available from PIP are built with Python 3.6.2 and should be compatible with
+all previous versions of Python 3.6. The Windows wheels are now built
+with OpenBlas instead ATLAS, which should improve the performance of the linear
+algebra functions.
+
+Contributors
+============
+
+A total of 12 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Brandon Carter
+* Charles Harris
+* Eric Wieser
+* Iryna Shcherbina +
+* James Bourbeau +
+* Jonathan Helmus
+* Julian Taylor
+* Matti Picus
+* Michael Lamparski +
+* Michael Seifert
+* Ralf Gommers
+
+Pull requests merged
+====================
+
+A total of 20 pull requests were merged for this release.
+
+* #9390 BUG: Return the poly1d coefficients array directly
+* #9555 BUG: Fix regression in 1.13.x in distutils.mingw32ccompiler.
+* #9556 BUG: Fix true_divide when dtype=np.float64 specified.
+* #9557 DOC: Fix some rst markup in numpy/doc/basics.py.
+* #9558 BLD: Remove -xhost flag from IntelFCompiler.
+* #9559 DOC: Removes broken docstring example (source code, png, pdf)...
+* #9580 BUG: Add hypot and cabs functions to WIN32 blacklist.
+* #9732 BUG: Make scalar function elision check if temp is writeable.
+* #9736 BUG: Various fixes to np.gradient
+* #9742 BUG: Fix np.pad for CVE-2017-12852
+* #9744 BUG: Check for exception in sort functions, add tests
+* #9745 DOC: Add whitespace after "versionadded::" directive so it actually...
+* #9746 BUG: Memory leak in np.dot of size 0
+* #9747 BUG: Adjust gfortran version search regex
+* #9757 BUG: Cython 0.27 breaks NumPy on Python 3.
+* #9764 BUG: Ensure `_npy_scaled_cexp{,f,l}` is defined when needed.
+* #9765 BUG: PyArray_CountNonzero does not check for exceptions
+* #9766 BUG: Fixes histogram monotonicity check for unsigned bin values
+* #9767 BUG: Ensure consistent result dtype of count_nonzero
+* #9771 BUG, MAINT: Fix mtrand for Cython 0.27.
diff --git a/doc/release/1.14.0-notes.rst b/doc/release/1.14.0-notes.rst
index e38c8fc83..3224b80fd 100644
--- a/doc/release/1.14.0-notes.rst
+++ b/doc/release/1.14.0-notes.rst
@@ -42,6 +42,12 @@ equivalent to the second.
The ``rcond`` parameter to ``np.linalg.lstsq`` will change its default to the
better value of machine precision times the maximum of the input matrix
dimensions. A FutureWarning is given if the parameter is not passed explicitly.
+* ``a.flat.__array__()`` will return a writeable copy of ``a`` when ``a`` is
+ non-contiguous. Previously it returned an UPDATEIFCOPY array when ``a`` was
+ writeable. Currently it returns a non-writeable copy. See gh-7054 for a
+ discussion of the issue.
+
+
Build System Changes
====================
@@ -50,12 +56,32 @@ Build System Changes
Compatibility notes
===================
+``a.flat.__array__()`` returns non-writeable arrays when ``a`` is non-contiguous
+--------------------------------------------------------------------------------
+The intent is that the UPDATEIFCOPY array previously returned when ``a`` was
+non-contiguous will be replaced by a writeable copy in the future. This
+temporary measure is aimed to notify folks who expect the underlying array be
+modified in this situation that that will no longer be the case. The most
+likely places for this to be noticed is when expressions of the form
+``np.asarray(a.flat)`` are used, or when ``a.flat`` is passed as the out
+parameter to a ufunc.
+
``np.tensordot`` now returns zero array when contracting over 0-length dimension
--------------------------------------------------------------------------------
Previously ``np.tensordot`` raised a ValueError when contracting over 0-length
dimension. Now it returns a zero array, which is consistent with the behaviour
of ``np.dot`` and ``np.einsum``.
+``np.ma.masked`` is no longer writeable
+---------------------------------------
+Attempts to mutate the ``masked`` constant now error, as the underlying arrays
+are marked readonly. In the past, it was possible to get away with::
+
+ # emulating a function that sometimes returns np.ma.masked
+ val = random.choice([np.ma.masked, 10])
+ var_arr = np.asarray(val)
+ val_arr += 1 # now errors, previously changed np.ma.masked.data
+
``np.ma`` functions producing ``fill_value``s have changed
----------------------------------------------------------
Previously, ``np.ma.default_fill_value`` would return a 0d array, but
@@ -108,6 +134,12 @@ the result is always a view on the original masked array.
This breaks any code that used ``masked_arr.squeeze() is np.ma.masked``, but
fixes code that writes to the result of `.squeeze()`.
+Renamed first parameter of ``can_cast`` from ``from`` to ``from_``
+------------------------------------------------------------------
+The previous parameter name ``from`` is a reserved keyword in Python, which made
+it difficult to pass the argument by name. This has been fixed by renaming
+the parameter to ``from_``.
+
C API changes
=============
@@ -195,6 +227,26 @@ selected via the ``--fcompiler`` and ``--compiler`` options to
supported; by default a gfortran-compatible static archive
``openblas.a`` is looked for.
+``concatenate`` and ``stack`` gained an ``out`` argument
+--------------------------------------------------------
+A preallocated buffer of the desired dtype can now be used for the output of
+these functions.
+
+``np.linalg.pinv`` now works on stacked matrices
+------------------------------------------------
+Previously it was limited to a single 2d array.
+
+``numpy.save`` aligns data to 64 bytes instead of 16
+----------------------------------------------------
+Saving NumPy arrays in the ``npy`` format with ``numpy.save`` inserts
+padding before the array data to align it at 64 bytes. Previously
+this was only 16 bytes (and sometimes less due to a bug in the code
+for version 2). Now the alignment is 64 bytes, which matches the
+widest SIMD instruction set commonly available, and is also the most
+common cache line size. This makes ``npy`` files easier to use in
+programs which open them with ``mmap``, especially on Linux where an
+``mmap`` offset must be a multiple of the page size.
+
Changes
=======
@@ -204,8 +256,78 @@ Changes
0d arrays now use the array2string formatters to print their elements, like
other arrays. The ``style`` argument of ``array2string`` is now non-functional.
-Integer scalars are now unaffected by ``np.set_string_function``
-----------------------------------------------------------------
-Previously the str/repr of integer scalars could be controlled by
-``np.set_string_function``, unlike most other numpy scalars. This is no longer
-the case.
+User-defined types now need to implement ``__str__`` and ``__repr__``
+---------------------------------------------------------------------
+Previously, user-defined types could fall back to a default implementation of
+``__str__`` and ``__repr__`` implemented in numpy, but this has now been
+removed. Now user-defined types will fall back to the python default
+``object.__str__`` and ``object.__repr__``.
+
+``np.linalg.matrix_rank`` is more efficient for hermitian matrices
+------------------------------------------------------------------
+The keyword argument ``hermitian`` was added to toggle between standard
+SVD-based matrix rank calculation and the more efficient eigenvalue-based
+method for symmetric/hermitian matrices.
+
+Integer and Void scalars are now unaffected by ``np.set_string_function``
+-------------------------------------------------------------------------
+Previously the ``str`` and ``repr`` of integer and void scalars could be
+controlled by ``np.set_string_function``, unlike most other numpy scalars. This
+is no longer the case.
+
+Multiple-field indexing/assignment of structured arrays
+-------------------------------------------------------
+The indexing and assignment of structured arrays with multiple fields has
+changed in a number of ways:
+
+First, indexing a structured array with multiple fields (eg,
+``arr[['f1', 'f3']]``) returns a view into the original array instead of a
+copy. The returned view will have extra padding bytes corresponding to
+intervening fields in the original array, unlike the copy in 1.13, which will
+affect code such as ``arr[['f1', 'f3']].view(newdtype)``.
+
+Second, assignment between structured arrays will now occur "by position"
+instead of "by field name". The Nth field of the destination will be set to the
+Nth field of the source regardless of field name, unlike in numpy versions 1.6
+to 1.13 in which fields in the destination array were set to the
+identically-named field in the source array or to 0 if the source did not have
+a field.
+
+Correspondingly, the order of fields in a structured dtypes now matters when
+computing dtype equality. For example with the dtypes
+`x = dtype({'names': ['A', 'B'], 'formats': ['i4', 'f4'], 'offsets': [0, 4]})`
+`y = dtype({'names': ['B', 'A'], 'formats': ['f4', 'i4'], 'offsets': [4, 0]})`
+now `x == y` will return `False`, unlike before. This makes dictionary-based
+dtype specifications like `dtype({'a': ('i4', 0), 'b': ('f4', 4)})` dangerous
+in python < 3.6 since dict key-order is not preserved in those versions.
+
+Assignment from a structured array to a boolean array now raises a ValueError,
+unlike in 1.13 where it always set the destination elements to `True`.
+
+Assignment from structured array with more than one field to a non-structured
+array now raises a ValueError. In 1.13 this copied just the first field of the
+source to the destination.
+
+Using field "titles" in multiple-field indexing is now disallowed, as is
+repeating a field name in a multiple-field index.
+
+``sign`` option added to ``np.setprintoptions`` and ``np.array2string``
+-----------------------------------------------------------------------
+This option controls printing of the sign of floating-point types, and may be
+one of the characters '-', '+' or ' ', or the string 'legacy'. With '+' numpy
+always prints the sign of positive values, with ' ' it always prints a space
+(whitespace character) in the sign position of positive values, and with '-' it
+will omit the sign character for positive values, and with 'legacy' it will
+behave like ' ' except no space is printed in 0d arrays. The new default is '-'.
+
+Unneeded whitespace in float array printing removed
+---------------------------------------------------
+The new default of ``sign='-'`` (see last note) means that the ``repr`` of
+float arrays now often omits the whitespace characters previously used to
+display the sign. This new behavior can be disabled to mostly reproduce numpy
+1.13 behavior by calling ``np.set_printoptions(sign='legacy')``.
+
+``threshold`` and ``edgeitems`` options added to ``np.array2string``
+-----------------------------------------------------------------
+These options could previously be controlled using ``np.set_printoptions``, but
+now can be changed on a per-call basis as arguments to ``np.array2string``.
diff --git a/doc/source/dev/governance/people.rst b/doc/source/dev/governance/people.rst
index a0f08b57d..b22852a5a 100644
--- a/doc/source/dev/governance/people.rst
+++ b/doc/source/dev/governance/people.rst
@@ -12,8 +12,6 @@ Steering council
* Ralf Gommers
-* Alex Griffing
-
* Charles Harris
* Nathaniel Smith
@@ -22,12 +20,22 @@ Steering council
* Pauli Virtanen
+* Eric Wieser
+
+* Marten van Kerkwijk
+
+* Stephan Hoyer
+
+* Allan Haldane
+
Emeritus members
----------------
* Travis Oliphant - Project Founder / Emeritus Leader (served: 2005-2012)
+* Alex Griffing (served: 2015-2017)
+
NumFOCUS Subcommittee
---------------------
diff --git a/doc/source/reference/routines.polynomials.package.rst b/doc/source/reference/routines.polynomials.package.rst
index b2d357b31..61cb57fbb 100644
--- a/doc/source/reference/routines.polynomials.package.rst
+++ b/doc/source/reference/routines.polynomials.package.rst
@@ -15,3 +15,4 @@ Polynomial Package
routines.polynomials.laguerre
routines.polynomials.hermite
routines.polynomials.hermite_e
+ routines.polynomials.polyutils
diff --git a/doc/source/reference/routines.polynomials.polyutils.rst b/doc/source/reference/routines.polynomials.polyutils.rst
new file mode 100644
index 000000000..4bafd09de
--- /dev/null
+++ b/doc/source/reference/routines.polynomials.polyutils.rst
@@ -0,0 +1,4 @@
+Polyutils
+=========
+
+.. automodule:: numpy.polynomial.polyutils
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 238df42b3..8ae1c7ce2 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -2,6 +2,8 @@
Release Notes
*************
+.. include:: ../release/1.13.2-notes.rst
+.. include:: ../release/1.13.1-notes.rst
.. include:: ../release/1.13.0-notes.rst
.. include:: ../release/1.12.1-notes.rst
.. include:: ../release/1.12.0-notes.rst
diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst
index fa3f2ccb4..b98f89c2d 100644
--- a/doc/source/user/building.rst
+++ b/doc/source/user/building.rst
@@ -32,7 +32,7 @@ Building NumPy requires the following software installed:
FORTRAN 77 compiler installed.
Note that NumPy is developed mainly using GNU compilers. Compilers from
- other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Porland,
+ other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Portland,
Lahey, HP, IBM, Microsoft are only supported in the form of community
feedback, and may not work out of the box. GCC 4.x (and later) compilers
are recommended.
@@ -137,7 +137,7 @@ Additional compiler flags can be supplied by setting the ``OPT``,
Building with ATLAS support
---------------------------
-Ubuntu
+Ubuntu
~~~~~~
You can install the necessary package for optimized ATLAS with this command::
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index f37e255bc..4a10faae8 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -25,14 +25,12 @@ The Basics
NumPy's main object is the homogeneous multidimensional array. It is a
table of elements (usually numbers), all of the same type, indexed by a
-tuple of positive integers. In NumPy dimensions are called *axes*. The
-number of axes is *rank*.
+tuple of positive integers. In NumPy dimensions are called *axes*.
-For example, the coordinates of a point in 3D space ``[1, 2, 1]`` is an
-array of rank 1, because it has one axis. That axis has a length of 3.
-In the example pictured below, the array has rank 2 (it is 2-dimensional).
-The first dimension (axis) has a length of 2, the second dimension has a
-length of 3.
+For example, the coordinates of a point in 3D space ``[1, 2, 1]`` has
+one axis. That axis has 3 elements in it, so we say it has a length
+of 3. In the example pictured below, the array has 2 axes. The first
+axis has a length of 2, the second axis has a length of 3.
::
@@ -46,14 +44,12 @@ arrays and offers less functionality. The more important attributes of
an ``ndarray`` object are:
ndarray.ndim
- the number of axes (dimensions) of the array. In the Python world,
- the number of dimensions is referred to as *rank*.
+ the number of axes (dimensions) of the array.
ndarray.shape
the dimensions of the array. This is a tuple of integers indicating
the size of the array in each dimension. For a matrix with *n* rows
and *m* columns, ``shape`` will be ``(n,m)``. The length of the
- ``shape`` tuple is therefore the rank, or number of dimensions,
- ``ndim``.
+ ``shape`` tuple is therefore the number of axes, ``ndim``.
ndarray.size
the total number of elements of the array. This is equal to the
product of the elements of ``shape``.
@@ -537,8 +533,8 @@ remaining axes. NumPy also allows you to write this using dots as
``b[i,...]``.
The **dots** (``...``) represent as many colons as needed to produce a
-complete indexing tuple. For example, if ``x`` is a rank 5 array (i.e.,
-it has 5 axes), then
+complete indexing tuple. For example, if ``x`` is an array with 5
+axes, then
- ``x[1,2,...]`` is equivalent to ``x[1,2,:,:,:]``,
- ``x[...,3]`` to ``x[:,:,:,:,3]`` and
@@ -1245,9 +1241,9 @@ selecting the slices we want::
Note that the length of the 1D boolean array must coincide with the
length of the dimension (or axis) you want to slice. In the previous
-example, ``b1`` is a 1-rank array with length 3 (the number of *rows* in
-``a``), and ``b2`` (of length 4) is suitable to index the 2nd rank
-(columns) of ``a``.
+example, ``b1`` has length 3 (the number of *rows* in ``a``), and
+``b2`` (of length 4) is suitable to index the 2nd axis (columns) of
+``a``.
The ix_() function
-------------------
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 687204fc1..55538ad1b 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -1158,7 +1158,7 @@ add_newdoc('numpy.core.multiarray', 'frombuffer',
add_newdoc('numpy.core.multiarray', 'concatenate',
"""
- concatenate((a1, a2, ...), axis=0)
+ concatenate((a1, a2, ...), axis=0, out=None)
Join a sequence of arrays along an existing axis.
@@ -1169,6 +1169,10 @@ add_newdoc('numpy.core.multiarray', 'concatenate',
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what concatenate would have returned if no
+ out argument were specified.
Returns
-------
@@ -1338,7 +1342,8 @@ add_newdoc('numpy.core.multiarray', 'arange',
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
- step size is 1. If `step` is specified, `start` must also be given.
+ step size is 1. If `step` is specified as a position argument,
+ `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
@@ -1589,7 +1594,7 @@ add_newdoc('numpy.core.multiarray', 'lexsort',
add_newdoc('numpy.core.multiarray', 'can_cast',
"""
- can_cast(from, totype, casting = 'safe')
+ can_cast(from_, to, casting='safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
@@ -1598,9 +1603,9 @@ add_newdoc('numpy.core.multiarray', 'can_cast',
Parameters
----------
- from : dtype, dtype specifier, scalar, or array
+ from_ : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
- totype : dtype or dtype specifier
+ to : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
@@ -3096,7 +3101,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
- """a.__deepcopy__() -> Deep copy of array.
+ """a.__deepcopy__(memo, /) -> Deep copy of array.
Used if copy.deepcopy is called on an array.
@@ -3112,9 +3117,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
- """a.__setstate__(version, shape, dtype, isfortran, rawdata)
+ """a.__setstate__(state, /)
For unpickling.
+
+ The `state` argument must be a sequence that contains the following
+ elements:
Parameters
----------
@@ -3764,7 +3772,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
- a.max(axis=None, out=None)
+ a.max(axis=None, out=None, keepdims=False)
Return the maximum along a given axis.
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 795ceec6c..e1df556ef 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -5,7 +5,8 @@ $Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
from __future__ import division, absolute_import, print_function
-__all__ = ["array2string", "set_printoptions", "get_printoptions"]
+__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
+ "set_printoptions", "get_printoptions"]
__docformat__ = 'restructuredtext'
#
@@ -37,11 +38,14 @@ else:
import numpy as np
from . import numerictypes as _nt
-from .umath import maximum, minimum, absolute, not_equal, isnan, isinf
+from .umath import absolute, not_equal, isnan, isinf
+from . import multiarray
from .multiarray import (array, format_longfloat, datetime_as_string,
- datetime_data, dtype)
-from .fromnumeric import ravel
-from .numeric import asarray
+ datetime_data, dtype, ndarray)
+from .fromnumeric import ravel, any
+from .numeric import concatenate, asarray, errstate
+from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
+ flexible)
import warnings
if sys.version_info[0] >= 3:
@@ -51,24 +55,36 @@ else:
_MAXINT = sys.maxint
_MININT = -sys.maxint - 1
-def product(x, y):
- return x*y
+_format_options = {
+ 'edgeitems': 3, # repr N leading and trailing items of each dimension
+ 'threshold': 1000, # total items > triggers array summarization
+ 'precision': 8, # precision of floating point representations
+ 'suppress': False, # suppress printing small floating values in exp format
+ 'linewidth': 75,
+ 'nanstr': 'nan',
+ 'infstr': 'inf',
+ 'sign': '-',
+ 'formatter': None }
-_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension
-_summaryThreshold = 1000 # total items > triggers array summarization
+def _make_options_dict(precision=None, threshold=None, edgeitems=None,
+ linewidth=None, suppress=None, nanstr=None, infstr=None,
+ sign=None, formatter=None):
+ """ make a dictionary out of the non-None arguments, plus sanity checks """
-_float_output_precision = 8
-_float_output_suppress_small = False
-_line_width = 75
-_nan_str = 'nan'
-_inf_str = 'inf'
-_formatter = None # formatting function for array elements
+ options = {k: v for k, v in locals().items() if v is not None}
+ if suppress is not None:
+ options['suppress'] = bool(suppress)
+
+ if sign not in [None, '-', '+', ' ', 'legacy']:
+ raise ValueError("sign option must be one of "
+ "' ', '+', '-', or 'legacy'")
+
+ return options
def set_printoptions(precision=None, threshold=None, edgeitems=None,
- linewidth=None, suppress=None,
- nanstr=None, infstr=None,
- formatter=None):
+ linewidth=None, suppress=None, nanstr=None, infstr=None,
+ formatter=None, sign=None):
"""
Set printing options.
@@ -89,12 +105,21 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
- Whether or not suppress printing of small floating point values
- using scientific notation (default False).
+ If True, always print floating point numbers using fixed point
+ notation, in which case numbers equal to zero in the current precision
+ will print as zero. If False, then scientific notation is used when
+ absolute value of the smallest number is < 1e-4 or the ratio of the
+ maximum absolute value to the minimum is > 1e3. The default is False.
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
+ sign : string, either '-', '+', ' ' or 'legacy', optional
+ Controls printing of the sign of floating-point types. If '+', always
+ print the sign of positive values. If ' ', always prints a space
+ (whitespace character) in the sign position of positive values. If
+ '-', omit the sign character of positive values. If 'legacy', print a
+ space for positive values except in 0d arrays. (default '-')
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
@@ -170,26 +195,12 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
"""
+ opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
+ suppress, nanstr, infstr, sign, formatter)
+ # formatter is always reset
+ opt['formatter'] = formatter
+ _format_options.update(opt)
- global _summaryThreshold, _summaryEdgeItems, _float_output_precision
- global _line_width, _float_output_suppress_small, _nan_str, _inf_str
- global _formatter
-
- if linewidth is not None:
- _line_width = linewidth
- if threshold is not None:
- _summaryThreshold = threshold
- if edgeitems is not None:
- _summaryEdgeItems = edgeitems
- if precision is not None:
- _float_output_precision = precision
- if suppress is not None:
- _float_output_suppress_small = not not suppress
- if nanstr is not None:
- _nan_str = nanstr
- if infstr is not None:
- _inf_str = infstr
- _formatter = formatter
def get_printoptions():
"""
@@ -208,6 +219,7 @@ def get_printoptions():
- nanstr : str
- infstr : str
- formatter : dict of callables
+ - sign : str
For a full description of these options, see `set_printoptions`.
@@ -216,41 +228,25 @@ def get_printoptions():
set_printoptions, set_string_function
"""
- d = dict(precision=_float_output_precision,
- threshold=_summaryThreshold,
- edgeitems=_summaryEdgeItems,
- linewidth=_line_width,
- suppress=_float_output_suppress_small,
- nanstr=_nan_str,
- infstr=_inf_str,
- formatter=_formatter)
- return d
+ return _format_options.copy()
def _leading_trailing(a):
- from . import numeric as _nc
+ edgeitems = _format_options['edgeitems']
if a.ndim == 1:
- if len(a) > 2*_summaryEdgeItems:
- b = _nc.concatenate((a[:_summaryEdgeItems],
- a[-_summaryEdgeItems:]))
+ if len(a) > 2*edgeitems:
+ b = concatenate((a[:edgeitems], a[-edgeitems:]))
else:
b = a
else:
- if len(a) > 2*_summaryEdgeItems:
- l = [_leading_trailing(a[i]) for i in range(
- min(len(a), _summaryEdgeItems))]
+ if len(a) > 2*edgeitems:
+ l = [_leading_trailing(a[i]) for i in range(min(len(a), edgeitems))]
l.extend([_leading_trailing(a[-i]) for i in range(
- min(len(a), _summaryEdgeItems), 0, -1)])
+ min(len(a), edgeitems), 0, -1)])
else:
l = [_leading_trailing(a[i]) for i in range(0, len(a))]
- b = _nc.concatenate(tuple(l))
+ b = concatenate(tuple(l))
return b
-def _boolFormatter(x):
- if x:
- return ' True'
- else:
- return 'False'
-
def _object_format(o):
""" Object arrays containing lists should be printed unambiguously """
if type(o) is list:
@@ -262,15 +258,16 @@ def _object_format(o):
def repr_format(x):
return repr(x)
-def _get_formatdict(data, precision, suppress_small, formatter):
+def _get_formatdict(data, **opt):
+ prec, supp, sign = opt['precision'], opt['suppress'], opt['sign']
+
# wrapped in lambdas to avoid taking a code path with the wrong type of data
- formatdict = {'bool': lambda: _boolFormatter,
+ formatdict = {'bool': lambda: BoolFormat(data),
'int': lambda: IntegerFormat(data),
- 'float': lambda: FloatFormat(data, precision, suppress_small),
- 'longfloat': lambda: LongFloatFormat(precision),
- 'complexfloat': lambda: ComplexFormat(data, precision,
- suppress_small),
- 'longcomplexfloat': lambda: LongComplexFormat(precision),
+ 'float': lambda: FloatFormat(data, prec, supp, sign),
+ 'longfloat': lambda: LongFloatFormat(prec),
+ 'complexfloat': lambda: ComplexFormat(data, prec, supp, sign),
+ 'longcomplexfloat': lambda: LongComplexFormat(prec),
'datetime': lambda: DatetimeFormat(data),
'timedelta': lambda: TimedeltaFormat(data),
'object': lambda: _object_format,
@@ -282,6 +279,7 @@ def _get_formatdict(data, precision, suppress_small, formatter):
def indirect(x):
return lambda: x
+ formatter = opt['formatter']
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
@@ -305,24 +303,16 @@ def _get_formatdict(data, precision, suppress_small, formatter):
return formatdict
-def _get_format_function(data, precision, suppress_small, formatter):
+def _get_format_function(data, **options):
"""
find the right formatting function for the dtype_
"""
dtype_ = data.dtype
if dtype_.fields is not None:
- format_functions = []
- for field_name in dtype_.names:
- field_values = data[field_name]
- format_function = _get_format_function(
- ravel(field_values), precision, suppress_small, formatter)
- if dtype_[field_name].shape != ():
- format_function = SubArrayFormat(format_function)
- format_functions.append(format_function)
- return StructureFormat(format_functions)
+ return StructureFormat.from_data(data, **options)
dtypeobj = dtype_.type
- formatdict = _get_formatdict(data, precision, suppress_small, formatter)
+ formatdict = _get_formatdict(data, **options)
if issubclass(dtypeobj, _nt.bool_):
return formatdict['bool']()
elif issubclass(dtypeobj, _nt.integer):
@@ -349,30 +339,6 @@ def _get_format_function(data, precision, suppress_small, formatter):
else:
return formatdict['numpystr']()
-def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
- prefix="", formatter=None):
-
- if a.size > _summaryThreshold:
- summary_insert = "..., "
- data = _leading_trailing(a)
- else:
- summary_insert = ""
- data = ravel(asarray(a))
-
- # find the right formatting function for the array
- format_function = _get_format_function(data, precision,
- suppress_small, formatter)
-
- # skip over "["
- next_line_prefix = " "
- # skip over array(
- next_line_prefix += " "*len(prefix)
-
- lst = _formatArray(a, format_function, a.ndim, max_line_width,
- next_line_prefix, separator,
- _summaryEdgeItems, summary_insert)[:-1]
- return lst
-
def _recursive_guard(fillvalue='...'):
"""
@@ -403,12 +369,34 @@ def _recursive_guard(fillvalue='...'):
return decorating_function
-# gracefully handle recursive calls - this comes up when object arrays contain
-# themselves
+# gracefully handle recursive calls, when object arrays contain themselves
@_recursive_guard()
+def _array2string(a, options, separator=' ', prefix=""):
+ if a.size > options['threshold']:
+ summary_insert = "..., "
+ data = _leading_trailing(a)
+ else:
+ summary_insert = ""
+ data = asarray(a)
+
+ # find the right formatting function for the array
+ format_function = _get_format_function(data, **options)
+
+ # skip over "["
+ next_line_prefix = " "
+ # skip over array(
+ next_line_prefix += " "*len(prefix)
+
+ lst = _formatArray(a, format_function, a.ndim, options['linewidth'],
+ next_line_prefix, separator,
+ options['edgeitems'], summary_insert)[:-1]
+ return lst
+
+
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
- style=np._NoValue, formatter=None):
+ style=np._NoValue, formatter=None, threshold=None,
+ edgeitems=None, sign=None):
"""
Return a string representation of an array.
@@ -463,6 +451,18 @@ def array2string(a, max_line_width=None, precision=None,
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
+ threshold : int, optional
+ Total number of array elements which trigger summarization
+ rather than full repr.
+ edgeitems : int, optional
+ Number of array items in summary at beginning and end of
+ each dimension.
+ sign : string, either '-', '+', ' ' or 'legacy', optional
+ Controls printing of the sign of floating-point types. If '+', always
+ print the sign of positive values. If ' ', always prints a space
+ (whitespace character) in the sign position of positive values. If
+ '-', omit the sign character of positive values. If 'legacy', print a
+ space for positive values except in 0d arrays.
Returns
-------
@@ -503,30 +503,22 @@ def array2string(a, max_line_width=None, precision=None,
'[0x0L 0x1L 0x2L]'
"""
-
# Deprecation 05-16-2017 v1.14
if style is not np._NoValue:
warnings.warn("'style' argument is deprecated and no longer functional",
DeprecationWarning, stacklevel=3)
- if max_line_width is None:
- max_line_width = _line_width
-
- if precision is None:
- precision = _float_output_precision
-
- if suppress_small is None:
- suppress_small = _float_output_suppress_small
-
- if formatter is None:
- formatter = _formatter
+ overrides = _make_options_dict(precision, threshold, edgeitems,
+ max_line_width, suppress_small, None, None,
+ sign, formatter)
+ options = _format_options.copy()
+ options.update(overrides)
if a.size == 0:
# treat as a null array if any of shape elements == 0
lst = "[]"
else:
- lst = _array2string(a, max_line_width, precision, suppress_small,
- separator, prefix, formatter=formatter)
+ lst = _array2string(a, options, separator, prefix)
return lst
@@ -607,32 +599,38 @@ def _formatArray(a, format_function, rank, max_line_len,
class FloatFormat(object):
def __init__(self, data, precision, suppress_small, sign=False):
+ # for backcompatibility, accept bools
+ if isinstance(sign, bool):
+ sign = '+' if sign else '-'
+
+ self._legacy = False
+ if sign == 'legacy':
+ self._legacy = True
+ sign = '-' if data.shape == () else ' '
+
self.precision = precision
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
- self.max_str_len = 0
- try:
- self.fillFormat(data)
- except (TypeError, NotImplementedError):
- # if reduce(data) fails, this instance will not be called, just
- # instantiated in formatdict.
- pass
- def fillFormat(self, data):
- from . import numeric as _nc
+ self.fillFormat(data)
- with _nc.errstate(all='ignore'):
- special = isnan(data) | isinf(data)
+ def fillFormat(self, data):
+ with errstate(all='ignore'):
+ hasinf = isinf(data)
+ special = isnan(data) | hasinf
valid = not_equal(data, 0) & ~special
- non_zero = absolute(data.compress(valid))
+ non_zero = data[valid]
+ abs_non_zero = absolute(non_zero)
if len(non_zero) == 0:
max_val = 0.
min_val = 0.
+ min_val_sgn = 0.
else:
- max_val = maximum.reduce(non_zero)
- min_val = minimum.reduce(non_zero)
+ max_val = np.max(abs_non_zero)
+ min_val = np.min(abs_non_zero)
+ min_val_sgn = np.min(non_zero)
if max_val >= 1.e8:
self.exp_format = True
if not self.suppress_small and (min_val < 0.0001
@@ -641,53 +639,57 @@ class FloatFormat(object):
if self.exp_format:
self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
- self.max_str_len = 8 + self.precision
- if self.large_exponent:
- self.max_str_len += 1
- if self.sign:
- format = '%+'
- else:
- format = '%'
- format = format + '%d.%de' % (self.max_str_len, self.precision)
+
+ signpos = self.sign != '-' or any(non_zero < 0)
+ # for back-compatibility with np 1.13, use two spaces
+ if self._legacy:
+ signpos = 2
+ max_str_len = signpos + 6 + self.precision + self.large_exponent
+
+ conversion = '' if self.sign == '-' else self.sign
+ format = '%' + conversion + '%d.%de' % (max_str_len, self.precision)
else:
- format = '%%.%df' % (self.precision,)
- if len(non_zero):
- precision = max([_digits(x, self.precision, format)
- for x in non_zero])
+ if len(non_zero) and self.precision > 0:
+ precision = self.precision
+ trim_zero = lambda s: precision - (len(s) - len(s.rstrip('0')))
+ fmt = '%%.%df' % (precision,)
+ precision = max(trim_zero(fmt % x) for x in abs_non_zero)
else:
precision = 0
- precision = min(self.precision, precision)
- self.max_str_len = len(str(int(max_val))) + precision + 2
- if _nc.any(special):
- self.max_str_len = max(self.max_str_len,
- len(_nan_str),
- len(_inf_str)+1)
- if self.sign:
- format = '%#+'
- else:
- format = '%#'
- format = format + '%d.%df' % (self.max_str_len, precision)
- self.special_fmt = '%%%ds' % (self.max_str_len,)
+ int_len = len(str(int(max_val)))
+ signpos = self.sign != '-' or (len(str(int(min_val_sgn))) > int_len)
+ max_str_len = signpos + int_len + 1 + precision
+
+ if any(special):
+ neginf = self.sign != '-' or any(data[hasinf] < 0)
+ nanlen = len(_format_options['nanstr'])
+ inflen = len(_format_options['infstr']) + neginf
+ max_str_len = max(max_str_len, nanlen, inflen)
+
+ conversion = '' if self.sign == '-' else self.sign
+ format = '%#' + conversion + '%d.%df' % (max_str_len, precision)
+
+ self.special_fmt = '%%%ds' % (max_str_len,)
self.format = format
def __call__(self, x, strip_zeros=True):
- from . import numeric as _nc
-
- with _nc.errstate(invalid='ignore'):
+ with errstate(invalid='ignore'):
if isnan(x):
- if self.sign:
- return self.special_fmt % ('+' + _nan_str,)
+ nan_str = _format_options['nanstr']
+ if self.sign == '+':
+ return self.special_fmt % ('+' + nan_str,)
else:
- return self.special_fmt % (_nan_str,)
+ return self.special_fmt % (nan_str,)
elif isinf(x):
+ inf_str = _format_options['infstr']
if x > 0:
- if self.sign:
- return self.special_fmt % ('+' + _inf_str,)
+ if self.sign == '+':
+ return self.special_fmt % ('+' + inf_str,)
else:
- return self.special_fmt % (_inf_str,)
+ return self.special_fmt % (inf_str,)
else:
- return self.special_fmt % ('-' + _inf_str,)
+ return self.special_fmt % ('-' + inf_str,)
s = self.format % x
if self.large_exponent:
@@ -704,21 +706,11 @@ class FloatFormat(object):
s = z + ' '*(len(s)-len(z))
return s
-
-def _digits(x, precision, format):
- if precision > 0:
- s = format % x
- z = s.rstrip('0')
- return precision - len(s) + len(z)
- else:
- return 0
-
-
class IntegerFormat(object):
def __init__(self, data):
try:
- max_str_len = max(len(str(maximum.reduce(data))),
- len(str(minimum.reduce(data))))
+ max_str_len = max(len(str(np.max(data))),
+ len(str(np.min(data))))
self.format = '%' + str(max_str_len) + 'd'
except (TypeError, NotImplementedError):
# if reduce(data) fails, this instance will not be called, just
@@ -734,29 +726,45 @@ class IntegerFormat(object):
else:
return "%s" % x
+class BoolFormat(object):
+ def __init__(self, data, **kwargs):
+ # add an extra space so " True" and "False" have the same length and
+ # array elements align nicely when printed, except in 0d arrays
+ self.truestr = ' True' if data.shape != () else 'True'
+
+ def __call__(self, x):
+ return self.truestr if x else "False"
+
+
class LongFloatFormat(object):
# XXX Have to add something to determine the width to use a la FloatFormat
# Right now, things won't line up properly
def __init__(self, precision, sign=False):
+ # for backcompatibility, accept bools
+ if isinstance(sign, bool):
+ sign = '+' if sign else '-'
+
self.precision = precision
self.sign = sign
def __call__(self, x):
if isnan(x):
- if self.sign:
- return '+' + _nan_str
+ nan_str = _format_options['nanstr']
+ if self.sign == '+':
+ return '+' + nan_str
else:
- return ' ' + _nan_str
+ return ' ' + nan_str
elif isinf(x):
+ inf_str = _format_options['infstr']
if x > 0:
- if self.sign:
- return '+' + _inf_str
+ if self.sign == '+':
+ return '+' + inf_str
else:
- return ' ' + _inf_str
+ return ' ' + inf_str
else:
- return '-' + _inf_str
+ return '-' + inf_str
elif x >= 0:
- if self.sign:
+ if self.sign == '+':
return '+' + format_longfloat(x, self.precision)
else:
return ' ' + format_longfloat(x, self.precision)
@@ -767,7 +775,7 @@ class LongFloatFormat(object):
class LongComplexFormat(object):
def __init__(self, precision):
self.real_format = LongFloatFormat(precision)
- self.imag_format = LongFloatFormat(precision, sign=True)
+ self.imag_format = LongFloatFormat(precision, sign='+')
def __call__(self, x):
r = self.real_format(x.real)
@@ -776,10 +784,15 @@ class LongComplexFormat(object):
class ComplexFormat(object):
- def __init__(self, x, precision, suppress_small):
- self.real_format = FloatFormat(x.real, precision, suppress_small)
+ def __init__(self, x, precision, suppress_small, sign=False):
+ # for backcompatibility, accept bools
+ if isinstance(sign, bool):
+ sign = '+' if sign else '-'
+
+ self.real_format = FloatFormat(x.real, precision, suppress_small,
+ sign=sign)
self.imag_format = FloatFormat(x.imag, precision, suppress_small,
- sign=True)
+ sign='+')
def __call__(self, x):
r = self.real_format(x.real, strip_zeros=False)
@@ -821,8 +834,8 @@ class TimedeltaFormat(object):
v = int_view[not_equal(int_view, nat_value.view(int_dtype))]
if len(v) > 0:
# Max str length of non-NaT elements
- max_str_len = max(len(str(maximum.reduce(v))),
- len(str(minimum.reduce(v))))
+ max_str_len = max(len(str(np.max(v))),
+ len(str(np.min(v))))
else:
max_str_len = 0
if len(v) < len(data):
@@ -854,8 +867,208 @@ class StructureFormat(object):
self.format_functions = format_functions
self.num_fields = len(format_functions)
+ @classmethod
+ def from_data(cls, data, **options):
+ """
+ This is a second way to initialize StructureFormat, using the raw data
+ as input. Added to avoid changing the signature of __init__.
+ """
+ format_functions = []
+ for field_name in data.dtype.names:
+ format_function = _get_format_function(data[field_name], **options)
+ if data.dtype[field_name].shape != ():
+ format_function = SubArrayFormat(format_function)
+ format_functions.append(format_function)
+ return cls(format_functions)
+
def __call__(self, x):
s = "("
for field, format_function in zip(x, self.format_functions):
s += format_function(field) + ", "
return (s[:-2] if 1 < self.num_fields else s[:-1]) + ")"
+
+
+def _void_scalar_repr(x):
+ """
+ Implements the repr for structured-void scalars. It is called from the
+ scalartypes.c.src code, and is placed here because it uses the elementwise
+ formatters defined above.
+ """
+ return StructureFormat.from_data(array(x), **_format_options)(x)
+
+
+_typelessdata = [int_, float_, complex_]
+if issubclass(intc, int):
+ _typelessdata.append(intc)
+if issubclass(longlong, int):
+ _typelessdata.append(longlong)
+
+def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return the string representation of an array.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array.
+ max_line_width : int, optional
+ The maximum number of columns the string should span. Newline
+ characters split the string appropriately after array elements.
+ precision : int, optional
+ Floating point precision. Default is the current printing precision
+ (usually 8), which can be altered using `set_printoptions`.
+ suppress_small : bool, optional
+ Represent very small numbers as zero, default is False. Very small
+ is defined by `precision`, if the precision is 8 then
+ numbers smaller than 5e-9 are represented as zero.
+
+ Returns
+ -------
+ string : str
+ The string representation of an array.
+
+ See Also
+ --------
+ array_str, array2string, set_printoptions
+
+ Examples
+ --------
+ >>> np.array_repr(np.array([1,2]))
+ 'array([1, 2])'
+ >>> np.array_repr(np.ma.array([0.]))
+ 'MaskedArray([ 0.])'
+ >>> np.array_repr(np.array([], np.int32))
+ 'array([], dtype=int32)'
+
+ >>> x = np.array([1e-6, 4e-7, 2, 3])
+ >>> np.array_repr(x, precision=6, suppress_small=True)
+ 'array([ 0.000001, 0. , 2. , 3. ])'
+
+ """
+ if type(arr) is not ndarray:
+ class_name = type(arr).__name__
+ else:
+ class_name = "array"
+
+ if arr.size > 0 or arr.shape == (0,):
+ lst = array2string(arr, max_line_width, precision, suppress_small,
+ ', ', class_name + "(")
+ else: # show zero-length shape unless it is (0,)
+ lst = "[], shape=%s" % (repr(arr.shape),)
+
+ skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0
+
+ if skipdtype:
+ return "%s(%s)" % (class_name, lst)
+ else:
+ typename = arr.dtype.name
+ # Quote typename in the output if it is "complex".
+ if typename and not (typename[0].isalpha() and typename.isalnum()):
+ typename = "'%s'" % typename
+
+ lf = ' '
+ if issubclass(arr.dtype.type, flexible):
+ if arr.dtype.names:
+ typename = "%s" % str(arr.dtype)
+ else:
+ typename = "'%s'" % str(arr.dtype)
+ lf = '\n'+' '*len(class_name + "(")
+ return "%s(%s,%sdtype=%s)" % (class_name, lst, lf, typename)
+
+def array_str(a, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return a string representation of the data in an array.
+
+ The data in the array is returned as a single string. This function is
+ similar to `array_repr`, the difference being that `array_repr` also
+ returns information on the kind of array and its data type.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+ max_line_width : int, optional
+ Inserts newlines if text is longer than `max_line_width`. The
+ default is, indirectly, 75.
+ precision : int, optional
+ Floating point precision. Default is the current printing precision
+ (usually 8), which can be altered using `set_printoptions`.
+ suppress_small : bool, optional
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+
+ See Also
+ --------
+ array2string, array_repr, set_printoptions
+
+ Examples
+ --------
+ >>> np.array_str(np.arange(3))
+ '[0 1 2]'
+
+ """
+ return array2string(a, max_line_width, precision, suppress_small, ' ', "")
+
+def set_string_function(f, repr=True):
+ """
+ Set a Python function to be used when pretty printing arrays.
+
+ Parameters
+ ----------
+ f : function or None
+ Function to be used to pretty print arrays. The function should expect
+ a single array argument and return a string of the representation of
+ the array. If None, the function is reset to the default NumPy function
+ to print arrays.
+ repr : bool, optional
+ If True (default), the function for pretty printing (``__repr__``)
+ is set, if False the function that returns the default string
+ representation (``__str__``) is set.
+
+ See Also
+ --------
+ set_printoptions, get_printoptions
+
+ Examples
+ --------
+ >>> def pprint(arr):
+ ... return 'HA! - What are you going to do now?'
+ ...
+ >>> np.set_string_function(pprint)
+ >>> a = np.arange(10)
+ >>> a
+ HA! - What are you going to do now?
+ >>> print(a)
+ [0 1 2 3 4 5 6 7 8 9]
+
+ We can reset the function to the default:
+
+ >>> np.set_string_function(None)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ `repr` affects either pretty printing or normal string representation.
+ Note that ``__repr__`` is still affected by setting ``__str__``
+ because the width of each array element in the returned string becomes
+ equal to the length of the result of ``__str__()``.
+
+ >>> x = np.arange(4)
+ >>> np.set_string_function(lambda x:'random', repr=False)
+ >>> x.__str__()
+ 'random'
+ >>> x.__repr__()
+ 'array([ 0, 1, 2, 3])'
+
+ """
+ if f is None:
+ if repr:
+ return multiarray.set_string_function(array_repr, 1)
+ else:
+ return multiarray.set_string_function(array_str, 0)
+ else:
+ return multiarray.set_string_function(f, repr)
+
+set_string_function(array_str, 0)
+set_string_function(array_repr, 1)
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 5e0280a33..6aae57234 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -2887,8 +2887,18 @@ add_newdoc('numpy.core.umath', 'remainder',
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
- as the divisor `x2`. It should not be confused with the Matlab(TM) ``rem``
- function.
+ as the divisor `x2`. The MATLAB function equivalent to ``np.remainder``
+ is ``mod``.
+
+ .. warning::
+
+ This should not be confused with:
+
+ * Python 3.7's `math.remainder` and C's ``remainder``, which
+ computes the IEEE remainder, which are the complement to
+ ``round(x1 / x2)``.
+ * The MATLAB ``rem`` function and or the C ``%`` operator which is the
+ complement to ``int(x1 / x2)``.
Parameters
----------
@@ -2908,7 +2918,7 @@ add_newdoc('numpy.core.umath', 'remainder',
--------
floor_divide : Equivalent of Python ``//`` operator.
divmod : Simultaneous floor division and remainder.
- fmod : Equivalent of the Matlab(TM) ``rem`` function.
+ fmod : Equivalent of the MATLAB ``rem`` function.
divide, floor
Notes
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index 37d691027..1ea3e598c 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -166,8 +166,14 @@ def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
new_pos = positions + [con]
iter_results.append((new_cost, new_pos, new_input_sets))
- # Update list to iterate over
- full_results = iter_results
+ # Update combinatorial list, if we did not find anything return best
+ # path + remaining contractions
+ if iter_results:
+ full_results = iter_results
+ else:
+ path = min(full_results, key=lambda x: x[0])[1]
+ path += [tuple(range(len(input_sets) - iteration))]
+ return path
# If we have not found anything return single einsum contraction
if len(full_results) == 0:
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 6f7c45859..a94be7b4d 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -1245,13 +1245,13 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
Returns
-------
array_of_diagonals : ndarray
- If `a` is 2-D and not a matrix, a 1-D array of the same type as `a`
- containing the diagonal is returned. If `a` is a matrix, a 1-D
+ If `a` is 2-D and not a `matrix`, a 1-D array of the same type as `a`
+ containing the diagonal is returned. If `a` is a `matrix`, a 1-D
array containing the diagonal is returned in order to maintain
- backward compatibility. If the dimension of `a` is greater than
- two, then an array of diagonals is returned, "packed" from
- left-most dimension to right-most (e.g., if `a` is 3-D, then the
- diagonals are "packed" along rows).
+ backward compatibility.
+ If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
+ are removed, and a new axis inserted at the end corresponding to the
+ diagonal.
Raises
------
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index 0415e16ac..82de1a36e 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -115,17 +115,24 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
y = _nx.arange(0, num, dtype=dt)
delta = stop - start
+ # In-place multiplication y *= delta/div is faster, but prevents the multiplicant
+ # from overriding what class is produced, and thus prevents, e.g. use of Quantities,
+ # see gh-7142. Hence, we multiply in place only for standard scalar types.
+ _mult_inplace = _nx.isscalar(delta)
if num > 1:
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
- y = y * delta
+ if _mult_inplace:
+ y *= delta
+ else:
+ y = y * delta
else:
- # One might be tempted to use faster, in-place multiplication here,
- # but this prevents step from overriding what class is produced,
- # and thus prevents, e.g., use of Quantities; see gh-7142.
- y = y * step
+ if _mult_inplace:
+ y *= step
+ else:
+ y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 60abae4e0..84653ea18 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -15,6 +15,8 @@
* NPY_CPU_ARMEB
* NPY_CPU_SH_LE
* NPY_CPU_SH_BE
+ * NPY_CPU_ARCEL
+ * NPY_CPU_ARCEB
*/
#ifndef _NPY_CPUARCH_H_
#define _NPY_CPUARCH_H_
@@ -76,6 +78,10 @@
#define NPY_CPU_AARCH64
#elif defined(__mc68000__)
#define NPY_CPU_M68K
+#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_ARCEL
+#elif defined(__arc__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_ARCEB
#else
#error Unknown CPU, please report this to numpy maintainers with \
information about your platform (OS, CPU and compiler)
diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h
index e34b1d97e..1a42121db 100644
--- a/numpy/core/include/numpy/npy_endian.h
+++ b/numpy/core/include/numpy/npy_endian.h
@@ -45,7 +45,8 @@
|| defined(NPY_CPU_AARCH64) \
|| defined(NPY_CPU_SH_LE) \
|| defined(NPY_CPU_MIPSEL) \
- || defined(NPY_CPU_PPC64LE)
+ || defined(NPY_CPU_PPC64LE) \
+ || defined(NPY_CPU_ARCEL)
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
#elif defined(NPY_CPU_PPC) \
|| defined(NPY_CPU_SPARC) \
@@ -56,7 +57,8 @@
|| defined(NPY_CPU_SH_BE) \
|| defined(NPY_CPU_MIPSEB) \
|| defined(NPY_CPU_OR1K) \
- || defined(NPY_CPU_M68K)
+ || defined(NPY_CPU_M68K) \
+ || defined(NPY_CPU_ARCEB)
#define NPY_BYTE_ORDER NPY_BIG_ENDIAN
#else
#error Unknown CPU: can not set endianness
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index fde08490a..ce17a1900 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -46,28 +46,23 @@ loads = pickle.loads
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
- 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast',
- 'dtype', 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer',
- 'where', 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose',
- 'lexsort', 'set_numeric_ops', 'can_cast', 'promote_types',
- 'min_scalar_type', 'result_type', 'asarray', 'asanyarray',
- 'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like',
- 'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot',
- 'outer', 'vdot', 'roll',
- 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'array2string',
- 'get_printoptions', 'set_printoptions', 'array_repr', 'array_str',
- 'set_string_function', 'little_endian', 'require', 'fromiter',
- 'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'load',
- 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity',
- 'allclose', 'compare_chararrays', 'putmask', 'seterr', 'geterr',
- 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', 'errstate',
- 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_',
- 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE',
- 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', 'matmul',
- 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT',
- 'TooHardError', 'AxisError'
- ]
-
+ 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
+ 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where',
+ 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
+ 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
+ 'result_type', 'asarray', 'asanyarray', 'ascontiguousarray',
+ 'asfortranarray', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
+ 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
+ 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', 'require',
+ 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
+ 'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones',
+ 'identity', 'allclose', 'compare_chararrays', 'putmask', 'seterr',
+ 'geterr', 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall',
+ 'errstate', 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
+ 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
+ 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
+ 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
+ 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError' ]
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
@@ -363,20 +358,6 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True):
multiarray.copyto(res, fill_value, casting='unsafe')
return res
-
-def extend_all(module):
- adict = {}
- for a in __all__:
- adict[a] = 1
- try:
- mall = getattr(module, '__all__')
- except AttributeError:
- mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
- for a in mall:
- if a not in adict:
- __all__.append(a)
-
-
def count_nonzero(a, axis=None):
"""
Counts the number of non-zero values in the array ``a``.
@@ -1820,193 +1801,6 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
return moveaxis(cp, -1, axisc)
-
-# Use numarray's printing function
-from .arrayprint import array2string, get_printoptions, set_printoptions
-
-
-_typelessdata = [int_, float_, complex_]
-if issubclass(intc, int):
- _typelessdata.append(intc)
-
-
-if issubclass(longlong, int):
- _typelessdata.append(longlong)
-
-
-def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
- """
- Return the string representation of an array.
-
- Parameters
- ----------
- arr : ndarray
- Input array.
- max_line_width : int, optional
- The maximum number of columns the string should span. Newline
- characters split the string appropriately after array elements.
- precision : int, optional
- Floating point precision. Default is the current printing precision
- (usually 8), which can be altered using `set_printoptions`.
- suppress_small : bool, optional
- Represent very small numbers as zero, default is False. Very small
- is defined by `precision`, if the precision is 8 then
- numbers smaller than 5e-9 are represented as zero.
-
- Returns
- -------
- string : str
- The string representation of an array.
-
- See Also
- --------
- array_str, array2string, set_printoptions
-
- Examples
- --------
- >>> np.array_repr(np.array([1,2]))
- 'array([1, 2])'
- >>> np.array_repr(np.ma.array([0.]))
- 'MaskedArray([ 0.])'
- >>> np.array_repr(np.array([], np.int32))
- 'array([], dtype=int32)'
-
- >>> x = np.array([1e-6, 4e-7, 2, 3])
- >>> np.array_repr(x, precision=6, suppress_small=True)
- 'array([ 0.000001, 0. , 2. , 3. ])'
-
- """
- if type(arr) is not ndarray:
- class_name = type(arr).__name__
- else:
- class_name = "array"
-
- if arr.size > 0 or arr.shape == (0,):
- lst = array2string(arr, max_line_width, precision, suppress_small,
- ', ', class_name + "(")
- else: # show zero-length shape unless it is (0,)
- lst = "[], shape=%s" % (repr(arr.shape),)
-
- skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0
-
- if skipdtype:
- return "%s(%s)" % (class_name, lst)
- else:
- typename = arr.dtype.name
- # Quote typename in the output if it is "complex".
- if typename and not (typename[0].isalpha() and typename.isalnum()):
- typename = "'%s'" % typename
-
- lf = ' '
- if issubclass(arr.dtype.type, flexible):
- if arr.dtype.names:
- typename = "%s" % str(arr.dtype)
- else:
- typename = "'%s'" % str(arr.dtype)
- lf = '\n'+' '*len(class_name + "(")
- return "%s(%s,%sdtype=%s)" % (class_name, lst, lf, typename)
-
-
-def array_str(a, max_line_width=None, precision=None, suppress_small=None):
- """
- Return a string representation of the data in an array.
-
- The data in the array is returned as a single string. This function is
- similar to `array_repr`, the difference being that `array_repr` also
- returns information on the kind of array and its data type.
-
- Parameters
- ----------
- a : ndarray
- Input array.
- max_line_width : int, optional
- Inserts newlines if text is longer than `max_line_width`. The
- default is, indirectly, 75.
- precision : int, optional
- Floating point precision. Default is the current printing precision
- (usually 8), which can be altered using `set_printoptions`.
- suppress_small : bool, optional
- Represent numbers "very close" to zero as zero; default is False.
- Very close is defined by precision: if the precision is 8, e.g.,
- numbers smaller (in absolute value) than 5e-9 are represented as
- zero.
-
- See Also
- --------
- array2string, array_repr, set_printoptions
-
- Examples
- --------
- >>> np.array_str(np.arange(3))
- '[0 1 2]'
-
- """
- return array2string(a, max_line_width, precision, suppress_small, ' ', "")
-
-
-def set_string_function(f, repr=True):
- """
- Set a Python function to be used when pretty printing arrays.
-
- Parameters
- ----------
- f : function or None
- Function to be used to pretty print arrays. The function should expect
- a single array argument and return a string of the representation of
- the array. If None, the function is reset to the default NumPy function
- to print arrays.
- repr : bool, optional
- If True (default), the function for pretty printing (``__repr__``)
- is set, if False the function that returns the default string
- representation (``__str__``) is set.
-
- See Also
- --------
- set_printoptions, get_printoptions
-
- Examples
- --------
- >>> def pprint(arr):
- ... return 'HA! - What are you going to do now?'
- ...
- >>> np.set_string_function(pprint)
- >>> a = np.arange(10)
- >>> a
- HA! - What are you going to do now?
- >>> print(a)
- [0 1 2 3 4 5 6 7 8 9]
-
- We can reset the function to the default:
-
- >>> np.set_string_function(None)
- >>> a
- array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
-
- `repr` affects either pretty printing or normal string representation.
- Note that ``__repr__`` is still affected by setting ``__str__``
- because the width of each array element in the returned string becomes
- equal to the length of the result of ``__str__()``.
-
- >>> x = np.arange(4)
- >>> np.set_string_function(lambda x:'random', repr=False)
- >>> x.__str__()
- 'random'
- >>> x.__repr__()
- 'array([ 0, 1, 2, 3])'
-
- """
- if f is None:
- if repr:
- return multiarray.set_string_function(array_repr, 1)
- else:
- return multiarray.set_string_function(array_str, 0)
- else:
- return multiarray.set_string_function(f, repr)
-
-
-set_string_function(array_str, 0)
-set_string_function(array_repr, 1)
-
little_endian = (sys.byteorder == 'little')
@@ -2436,7 +2230,7 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
See Also
--------
- isclose, all, any
+ isclose, all, any, equal
Notes
-----
@@ -2446,9 +2240,14 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
- `allclose(a, b)` might be different from `allclose(b, a)` in
+ ``allclose(a, b)`` might be different from ``allclose(b, a)`` in
some rare cases.
+ The comparison of `a` and `b` uses standard broadcasting, which
+ means that `a` and `b` need not have the same shape in order for
+ ``allclose(a, b)`` to evaluate to True. The same is true for
+ `equal` but not `array_equal`.
+
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
@@ -2528,13 +2327,10 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
def within_tol(x, y, atol, rtol):
with errstate(invalid='ignore'):
- result = less_equal(abs(x-y), atol + rtol * abs(y))
- if isscalar(a) and isscalar(b):
- result = bool(result)
- return result
+ return less_equal(abs(x-y), atol + rtol * abs(y))
- x = array(a, copy=False, subok=True, ndmin=1)
- y = array(b, copy=False, subok=True, ndmin=1)
+ x = asanyarray(a)
+ y = asanyarray(b)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
@@ -2561,12 +2357,11 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
if equal_nan:
# Make NaN == NaN
both_nan = isnan(x) & isnan(y)
+
+ # Needed to treat masked arrays correctly. = True would not work.
cond[both_nan] = both_nan[both_nan]
- if isscalar(a) and isscalar(b):
- return bool(cond)
- else:
- return cond
+ return cond[()] # Flatten 0d arrays to scalars
def array_equal(a1, a2):
@@ -3092,10 +2887,26 @@ nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
+
+def extend_all(module):
+ adict = {}
+ for a in __all__:
+ adict[a] = 1
+ try:
+ mall = getattr(module, '__all__')
+ except AttributeError:
+ mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
+ for a in mall:
+ if a not in adict:
+ __all__.append(a)
+
from .umath import *
from .numerictypes import *
from . import fromnumeric
from .fromnumeric import *
+from . import arrayprint
+from .arrayprint import *
extend_all(fromnumeric)
extend_all(umath)
extend_all(numerictypes)
+extend_all(arrayprint)
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index f1847d7e3..026ad603a 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -293,7 +293,7 @@ def hstack(tup):
return _nx.concatenate(arrs, 1)
-def stack(arrays, axis=0):
+def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
@@ -309,6 +309,10 @@ def stack(arrays, axis=0):
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what stack would have returned if no
+ out argument were specified.
Returns
-------
@@ -358,7 +362,7 @@ def stack(arrays, axis=0):
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
- return _nx.concatenate(expanded_arrays, axis=axis)
+ return _nx.concatenate(expanded_arrays, axis=axis, out=out)
class _Recurser(object):
diff --git a/numpy/core/src/multiarray/_datetime.h b/numpy/core/src/multiarray/_datetime.h
index 345aed28a..3db1254d4 100644
--- a/numpy/core/src/multiarray/_datetime.h
+++ b/numpy/core/src/multiarray/_datetime.h
@@ -175,7 +175,8 @@ convert_datetime_metadata_to_tuple(PyArray_DatetimeMetaData *meta);
*/
NPY_NO_EXPORT int
convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
- PyArray_DatetimeMetaData *out_meta);
+ PyArray_DatetimeMetaData *out_meta,
+ npy_bool from_pickle);
/*
* Gets a tzoffset in minutes by calling the fromutc() function on
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index 28cc7031a..d1bce8c3b 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -293,7 +293,8 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src,
if (((PyArray_NDIM(dst) == 1 && PyArray_NDIM(src) >= 1 &&
PyArray_STRIDES(dst)[0] *
PyArray_STRIDES(src)[PyArray_NDIM(src) - 1] < 0) ||
- PyArray_NDIM(dst) > 1) && arrays_overlap(src, dst)) {
+ PyArray_NDIM(dst) > 1 || PyArray_HASFIELDS(dst)) &&
+ arrays_overlap(src, dst)) {
PyArrayObject *tmp;
/*
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 36d48af9f..1d4816d96 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -280,8 +280,7 @@ PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object)
if (PyArray_SIZE(dest) == 1) {
Py_DECREF(dtype);
Py_DECREF(src_object);
- ret = PyArray_DESCR(dest)->f->setitem(src_object,
- PyArray_DATA(dest), dest);
+ ret = PyArray_SETITEM(dest, PyArray_DATA(dest), src_object);
return ret;
}
else {
@@ -292,8 +291,7 @@ PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object)
Py_DECREF(src_object);
return -1;
}
- if (PyArray_DESCR(src)->f->setitem(src_object,
- PyArray_DATA(src), src) < 0) {
+ if (PyArray_SETITEM(src, PyArray_DATA(src), src_object) < 0) {
Py_DECREF(src_object);
Py_DECREF(src);
return -1;
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 921fbbe50..0826df1c3 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -36,6 +36,16 @@
#include <limits.h>
#include <assert.h>
+/* check for sequences, but ignore the types numpy considers scalars */
+static NPY_INLINE npy_bool
+PySequence_NoString_Check(PyObject *op) {
+ return
+ PySequence_Check(op) &&
+ !PyString_Check(op) &&
+ !PyUnicode_Check(op) &&
+ !PyArray_IsZeroDim(op);
+}
+
/*
*****************************************************************************
** PYTHON TYPES TO C TYPES **
@@ -223,8 +233,7 @@ static int
if (PyErr_Occurred()) {
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
- if (PySequence_Check(op) && !PyString_Check(op) &&
- !PyUnicode_Check(op)) {
+ if (PySequence_NoString_Check(op)) {
PyErr_SetString(PyExc_ValueError,
"setting an array element with a sequence.");
Py_DECREF(type);
@@ -289,37 +298,26 @@ static int
{
PyArrayObject *ap = vap;
Py_complex oop;
- PyObject *op2;
@type@ temp;
int rsize;
+ if (PyArray_IsZeroDim(op)) {
+ return convert_to_scalar_and_retry(op, ov, vap, @NAME@_setitem);
+ }
+
if (PyArray_IsScalar(op, @kind@)){
temp = ((Py@kind@ScalarObject *)op)->obval;
}
else {
- if (PyArray_IsZeroDim(op)) {
- /*
- * TODO: Elsewhere in this file we use PyArray_ToScalar. Is this
- * better or worse? Possibly an optimization.
- */
- op2 = PyArray_DESCR((PyArrayObject *)op)->f->getitem(
- PyArray_BYTES((PyArrayObject *)op),
- (PyArrayObject *)op);
- }
- else {
- op2 = op;
- Py_INCREF(op);
- }
- if (op2 == Py_None) {
+ if (op == Py_None) {
oop.real = NPY_NAN;
oop.imag = NPY_NAN;
}
else {
- oop = PyComplex_AsCComplex (op2);
- }
- Py_DECREF(op2);
- if (PyErr_Occurred()) {
- return -1;
+ oop = PyComplex_AsCComplex (op);
+ if (PyErr_Occurred()) {
+ return -1;
+ }
}
temp.real = (@ftype@) oop.real;
temp.imag = (@ftype@) oop.imag;
@@ -461,7 +459,7 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap)
return convert_to_scalar_and_retry(op, ov, vap, UNICODE_setitem);
}
- if (!PyBytes_Check(op) && !PyUnicode_Check(op) && PySequence_Check(op)) {
+ if (PySequence_NoString_Check(op)) {
PyErr_SetString(PyExc_ValueError,
"setting an array element with a sequence");
return -1;
@@ -552,7 +550,7 @@ STRING_setitem(PyObject *op, void *ov, void *vap)
return convert_to_scalar_and_retry(op, ov, vap, STRING_setitem);
}
- if (!PyBytes_Check(op) && !PyUnicode_Check(op) && PySequence_Check(op)) {
+ if (PySequence_NoString_Check(op)) {
PyErr_SetString(PyExc_ValueError,
"setting an array element with a sequence");
return -1;
@@ -687,7 +685,7 @@ VOID_getitem(void *input, void *vap)
else {
PyArray_ENABLEFLAGS(ap, NPY_ARRAY_ALIGNED);
}
- PyTuple_SET_ITEM(ret, i, new->f->getitem(ip+offset, ap));
+ PyTuple_SET_ITEM(ret, i, PyArray_GETITEM(ap, ip+offset));
((PyArrayObject_fields *)ap)->flags = savedflags;
}
((PyArrayObject_fields *)ap)->descr = descr;
@@ -774,66 +772,173 @@ VOID_getitem(void *input, void *vap)
NPY_NO_EXPORT int PyArray_CopyObject(PyArrayObject *, PyObject *);
+/* Given a structured PyArrayObject arr, index i and structured datatype descr,
+ * modify the dtype of arr to contain a single field corresponding to the ith
+ * field of descr, recompute the alignment flag, and return the offset of the
+ * field (in offset_p). This is useful in preparation for calling copyswap on
+ * individual fields of a numpy structure, in VOID_setitem. Compare to inner
+ * loops in VOID_getitem and VOID_nonzero.
+ *
+ * WARNING: Clobbers arr's dtype and alignment flag.
+ */
+NPY_NO_EXPORT int
+_setup_field(int i, PyArray_Descr *descr, PyArrayObject *arr,
+ npy_intp *offset_p)
+{
+ PyObject *key;
+ PyObject *tup;
+ PyArray_Descr *new;
+ npy_intp offset;
+
+ key = PyTuple_GET_ITEM(descr->names, i);
+ tup = PyDict_GetItem(descr->fields, key);
+ if (_unpack_field(tup, &new, &offset) < 0) {
+ return -1;
+ }
+
+ ((PyArrayObject_fields *)(arr))->descr = new;
+ if ((new->alignment > 1) && ((offset % new->alignment) != 0)) {
+ PyArray_CLEARFLAGS(arr, NPY_ARRAY_ALIGNED);
+ }
+ else {
+ PyArray_ENABLEFLAGS(arr, NPY_ARRAY_ALIGNED);
+ }
+
+ *offset_p = offset;
+ return 0;
+}
+
+/* Helper function for VOID_setitem, which uses the copyswap or casting code to
+ * copy structured datatypes between numpy arrays or scalars.
+ */
+static int
+_copy_and_return_void_setitem(PyArray_Descr *dstdescr, char *dstdata,
+ PyArray_Descr *srcdescr, char *srcdata){
+ PyArrayObject_fields dummy_struct;
+ PyArrayObject *dummy = (PyArrayObject *)&dummy_struct;
+ npy_int names_size = PyTuple_GET_SIZE(dstdescr->names);
+ npy_intp offset;
+ npy_int i;
+ int ret;
+
+ /* Fast path if dtypes are equal */
+ if (PyArray_EquivTypes(srcdescr, dstdescr)) {
+ for (i = 0; i < names_size; i++) {
+ /* neither line can ever fail, in principle */
+ if (_setup_field(i, dstdescr, dummy, &offset)) {
+ return -1;
+ }
+ PyArray_DESCR(dummy)->f->copyswap(dstdata + offset,
+ srcdata + offset, 0, dummy);
+ }
+ return 0;
+ }
+
+ /* Slow path */
+ ret = PyArray_CastRawArrays(1, srcdata, dstdata, 0, 0,
+ srcdescr, dstdescr, 0);
+ if (ret != NPY_SUCCEED) {
+ return -1;
+ }
+ return 0;
+}
+
static int
VOID_setitem(PyObject *op, void *input, void *vap)
{
char *ip = input;
PyArrayObject *ap = vap;
PyArray_Descr *descr;
+ int flags;
int itemsize=PyArray_DESCR(ap)->elsize;
int res;
descr = PyArray_DESCR(ap);
- if (descr->names && PyTuple_Check(op)) {
- PyObject *key;
- PyObject *names;
- int i, n;
- PyObject *tup;
- int savedflags;
-
- res = 0;
- /* get the names from the fields dictionary*/
- names = descr->names;
- n = PyTuple_GET_SIZE(names);
- if (PyTuple_GET_SIZE(op) != n) {
- PyErr_SetString(PyExc_ValueError,
- "size of tuple must match number of fields.");
- return -1;
- }
- savedflags = PyArray_FLAGS(ap);
- for (i = 0; i < n; i++) {
- PyArray_Descr *new;
- npy_intp offset;
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(descr->fields, key);
- if (_unpack_field(tup, &new, &offset) < 0) {
- ((PyArrayObject_fields *)ap)->descr = descr;
+ flags = PyArray_FLAGS(ap);
+ if (PyDataType_HASFIELDS(descr)) {
+ PyObject *errmsg;
+ npy_int i;
+ npy_intp offset;
+ int failed = 0;
+
+ /* If op is 0d-ndarray or numpy scalar, directly get dtype & data ptr */
+ if (PyArray_Check(op)) {
+ PyArrayObject *oparr = (PyArrayObject *)op;
+ if (PyArray_SIZE(oparr) != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "setting an array element with a sequence.");
return -1;
}
- /*
- * TODO: temporarily modifying the array like this
- * is bad coding style, should be changed.
- */
- ((PyArrayObject_fields *)ap)->descr = new;
- /* remember to update alignment flags */
- if ((new->alignment > 1)
- && ((((npy_intp)(ip+offset)) % new->alignment) != 0)) {
- PyArray_CLEARFLAGS(ap, NPY_ARRAY_ALIGNED);
+ return _copy_and_return_void_setitem(descr, ip,
+ PyArray_DESCR(oparr), PyArray_DATA(oparr));
+ }
+ else if (PyArray_IsScalar(op, Void)) {
+ PyArray_Descr *srcdescr = ((PyVoidScalarObject *)op)->descr;
+ char *srcdata = ((PyVoidScalarObject *)op)->obval;
+ return _copy_and_return_void_setitem(descr, ip, srcdescr, srcdata);
+ }
+ else if (PyTuple_Check(op)) {
+ /* if it's a tuple, copy field-by-field to ap, */
+ npy_intp names_size = PyTuple_GET_SIZE(descr->names);
+
+ if (names_size != PyTuple_Size(op)) {
+ errmsg = PyUString_FromFormat(
+ "could not assign tuple of length %zd to structure "
+ "with %" NPY_INTP_FMT " fields.",
+ PyTuple_Size(op), names_size);
+ PyErr_SetObject(PyExc_ValueError, errmsg);
+ Py_DECREF(errmsg);
+ return -1;
}
- else {
- PyArray_ENABLEFLAGS(ap, NPY_ARRAY_ALIGNED);
+
+ for (i = 0; i < names_size; i++) {
+ PyObject *item;
+
+ /* temporarily make ap have only this field */
+ if (_setup_field(i, descr, ap, &offset) == -1) {
+ failed = 1;
+ break;
+ }
+ item = PyTuple_GetItem(op, i);
+ if (item == NULL) {
+ failed = 1;
+ break;
+ }
+ /* use setitem to set this field */
+ if (PyArray_SETITEM(ap, ip + offset, item) < 0) {
+ failed = 1;
+ break;
+ }
}
- res = new->f->setitem(PyTuple_GET_ITEM(op, i), ip+offset, ap);
- ((PyArrayObject_fields *)ap)->flags = savedflags;
- if (res < 0) {
- break;
+ }
+ else {
+ /* Otherwise must be non-void scalar. Try to assign to each field */
+ npy_intp names_size = PyTuple_GET_SIZE(descr->names);
+
+ for (i = 0; i < names_size; i++) {
+ /* temporarily make ap have only this field */
+ if (_setup_field(i, descr, ap, &offset) == -1) {
+ failed = 1;
+ break;
+ }
+ /* use setitem to set this field */
+ if (PyArray_SETITEM(ap, ip + offset, op) < 0) {
+ failed = 1;
+ break;
+ }
}
}
- ((PyArrayObject_fields *)ap)->descr = descr;
- return res;
- }
- if (descr->subarray) {
+ /* reset clobbered attributes */
+ ((PyArrayObject_fields *)(ap))->descr = descr;
+ ((PyArrayObject_fields *)(ap))->flags = flags;
+
+ if (failed) {
+ return -1;
+ }
+ return 0;
+ }
+ else if (PyDataType_HASSUBARRAY(descr)) {
/* copy into an array of the same basic type */
PyArray_Dims shape = {NULL, -1};
PyArrayObject *ret;
@@ -862,19 +967,17 @@ VOID_setitem(PyObject *op, void *input, void *vap)
return res;
}
- /* Default is to use buffer interface to set item */
+ /*
+ * Fall through case - non-structured void datatype. This is a very
+ * undiscerning case: It interprets any object as a buffer
+ * and reads as many bytes as possible, padding with 0.
+ */
{
const void *buffer;
Py_ssize_t buflen;
- if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)
- || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) {
- PyErr_SetString(PyExc_ValueError,
- "Setting void-array with object members using buffer.");
- return -1;
- }
res = PyObject_AsReadBuffer(op, &buffer, &buflen);
if (res == -1) {
- goto fail;
+ return -1;
}
memcpy(ip, buffer, PyArray_MIN(buflen, itemsize));
if (itemsize > buflen) {
@@ -882,9 +985,6 @@ VOID_setitem(PyObject *op, void *input, void *vap)
}
}
return 0;
-
-fail:
- return -1;
}
static PyObject *
diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c
index 8432ae5cf..7cb1652bb 100644
--- a/numpy/core/src/multiarray/cblasfuncs.c
+++ b/numpy/core/src/multiarray/cblasfuncs.c
@@ -250,8 +250,6 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
npy_intp ap1stride = 0;
npy_intp dimensions[NPY_MAXDIMS];
npy_intp numbytes;
- double prior1, prior2;
- PyTypeObject *subtype;
MatrixShape ap1shape, ap2shape;
if (_bad_strides(ap1)) {
@@ -381,29 +379,17 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
}
}
- /* Choose which subtype to return */
- if (Py_TYPE(ap1) != Py_TYPE(ap2)) {
- prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
- prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
- subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1));
- }
- else {
- prior1 = prior2 = 0.0;
- subtype = Py_TYPE(ap1);
- }
-
if (out != NULL) {
int d;
/* verify that out is usable */
- if (Py_TYPE(out) != subtype ||
- PyArray_NDIM(out) != nd ||
+ if (PyArray_NDIM(out) != nd ||
PyArray_TYPE(out) != typenum ||
!PyArray_ISCARRAY(out)) {
PyErr_SetString(PyExc_ValueError,
- "output array is not acceptable "
- "(must have the right type, nr dimensions, and be a C-Array)");
+ "output array is not acceptable (must have the right datatype, "
+ "number of dimensions, and be a C-Array)");
goto fail;
}
for (d = 0; d < nd; ++d) {
@@ -439,7 +425,22 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
result = out;
}
else {
- PyObject *tmp = (PyObject *)(prior2 > prior1 ? ap2 : ap1);
+ double prior1, prior2;
+ PyTypeObject *subtype;
+ PyObject *tmp;
+
+ /* Choose which subtype to return */
+ if (Py_TYPE(ap1) != Py_TYPE(ap2)) {
+ prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
+ prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
+ subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1));
+ }
+ else {
+ prior1 = prior2 = 0.0;
+ subtype = Py_TYPE(ap1);
+ }
+
+ tmp = (PyObject *)(prior2 > prior1 ? ap2 : ap1);
out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
typenum, NULL, NULL, 0, 0, tmp);
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index 212da892d..ca30d3f88 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -86,7 +86,7 @@ recursive_tolist(PyArrayObject *self, char *dataptr, int startdim)
/* Base case */
if (startdim >= PyArray_NDIM(self)) {
- return PyArray_DESCR(self)->f->getitem(dataptr,self);
+ return PyArray_GETITEM(self, dataptr);
}
n = PyArray_DIM(self, startdim);
@@ -222,7 +222,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
PyArray_IterNew((PyObject *)self);
n4 = (format ? strlen((const char *)format) : 0);
while (it->index < it->size) {
- obj = PyArray_DESCR(self)->f->getitem(it->dataptr, self);
+ obj = PyArray_GETITEM(self, it->dataptr);
if (obj == NULL) {
Py_DECREF(it);
return -1;
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index e495f3160..f0f18eead 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -1353,7 +1353,7 @@ static int min_scalar_type_num(char *valueptr, int type_num,
case NPY_UINT: {
npy_uint value = *(npy_uint *)valueptr;
if (value <= NPY_MAX_UBYTE) {
- if (value < NPY_MAX_BYTE) {
+ if (value <= NPY_MAX_BYTE) {
*is_small_unsigned = 1;
}
return NPY_UBYTE;
@@ -1945,7 +1945,7 @@ PyArray_Zero(PyArrayObject *arr)
}
storeflags = PyArray_FLAGS(arr);
PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED);
- ret = PyArray_DESCR(arr)->f->setitem(zero_obj, zeroval, arr);
+ ret = PyArray_SETITEM(arr, zeroval, zero_obj);
((PyArrayObject_fields *)arr)->flags = storeflags;
if (ret < 0) {
PyDataMem_FREE(zeroval);
@@ -1992,7 +1992,7 @@ PyArray_One(PyArrayObject *arr)
storeflags = PyArray_FLAGS(arr);
PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED);
- ret = PyArray_DESCR(arr)->f->setitem(one_obj, oneval, arr);
+ ret = PyArray_SETITEM(arr, oneval, one_obj);
((PyArrayObject_fields *)arr)->flags = storeflags;
if (ret < 0) {
PyDataMem_FREE(oneval);
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index c9b3125ae..fb913d288 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -514,7 +514,7 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
}
else {
char * b = (PyArray_BYTES(dst) + i * PyArray_STRIDES(dst)[0]);
- res = PyArray_DESCR(dst)->f->setitem(o, b, dst);
+ res = PyArray_SETITEM(dst, b, o);
}
if (res < 0) {
Py_DECREF(o);
@@ -545,7 +545,7 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
}
else {
char * b = (PyArray_BYTES(dst) + i * PyArray_STRIDES(dst)[0]);
- res = PyArray_DESCR(dst)->f->setitem(o, b, dst);
+ res = PyArray_SETITEM(dst, b, o);
}
if (res < 0) {
Py_DECREF(seq);
@@ -1772,8 +1772,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
}
}
else {
- if (PyArray_DESCR(ret)->f->setitem(op,
- PyArray_DATA(ret), ret) < 0) {
+ if (PyArray_SETITEM(ret, PyArray_DATA(ret), op) < 0) {
Py_DECREF(ret);
ret = NULL;
}
@@ -3732,7 +3731,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
PyArray_DIMS(ret)[0] = i + 1;
if (((item = index2ptr(ret, i)) == NULL) ||
- (PyArray_DESCR(ret)->f->setitem(value, item, ret) == -1)) {
+ PyArray_SETITEM(ret, item, value) == -1) {
Py_DECREF(value);
goto done;
}
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 69bff071b..93babe8bd 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -1804,7 +1804,8 @@ convert_datetime_metadata_to_tuple(PyArray_DatetimeMetaData *meta)
*/
NPY_NO_EXPORT int
convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
- PyArray_DatetimeMetaData *out_meta)
+ PyArray_DatetimeMetaData *out_meta,
+ npy_bool from_pickle)
{
char *basestr = NULL;
Py_ssize_t len = 0, tuple_size;
@@ -1859,7 +1860,56 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
return -1;
}
- if (tuple_size == 4) {
+ /*
+ * The event metadata was removed way back in numpy 1.7 (cb4545), but was
+ * not deprecated at the time.
+ */
+
+ /* (unit, num, event) */
+ if (tuple_size == 3) {
+ /* Numpy 1.14, 2017-08-11 */
+ if (DEPRECATE(
+ "When passing a 3-tuple as (unit, num, event), the event "
+ "is ignored (since 1.7) - use (unit, num) instead") < 0) {
+ return -1;
+ }
+ }
+ /* (unit, num, den, event) */
+ else if (tuple_size == 4) {
+ PyObject *event = PyTuple_GET_ITEM(tuple, 3);
+ if (from_pickle) {
+ /* if (event == 1) */
+ PyObject *one = PyLong_FromLong(1);
+ int equal_one;
+ if (one == NULL) {
+ return -1;
+ }
+ equal_one = PyObject_RichCompareBool(event, one, Py_EQ);
+ if (equal_one == -1) {
+ return -1;
+ }
+
+ /* if the event data is not 1, it had semantics different to how
+ * datetime types now behave, which are no longer respected.
+ */
+ if (!equal_one) {
+ if (PyErr_WarnEx(PyExc_UserWarning,
+ "Loaded pickle file contains non-default event data "
+ "for a datetime type, which has been ignored since 1.7",
+ 1) < 0) {
+ return -1;
+ }
+ }
+ }
+ else if (event != Py_None) {
+ /* Numpy 1.14, 2017-08-11 */
+ if (DEPRECATE(
+ "When passing a 4-tuple as (unit, num, den, event), the "
+ "event argument is ignored (since 1.7), so should be None"
+ ) < 0) {
+ return -1;
+ }
+ }
den = PyInt_AsLong(PyTuple_GET_ITEM(tuple, 2));
if (error_converting(den)) {
return -1;
@@ -1897,8 +1947,8 @@ convert_pyobject_to_datetime_metadata(PyObject *obj,
Py_ssize_t len = 0;
if (PyTuple_Check(obj)) {
- return convert_datetime_metadata_tuple_to_datetime_metadata(obj,
- out_meta);
+ return convert_datetime_metadata_tuple_to_datetime_metadata(
+ obj, out_meta, NPY_FALSE);
}
/* Get an ASCII string */
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 8e6aa6789..1ae6e34a6 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -2887,7 +2887,8 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
if (convert_datetime_metadata_tuple_to_datetime_metadata(
PyTuple_GET_ITEM(metadata, 1),
- &temp_dt_data) < 0) {
+ &temp_dt_data,
+ NPY_TRUE) < 0) {
return NULL;
}
@@ -3119,7 +3120,7 @@ static PyMethodDef arraydescr_methods[] = {
*
* Returns 1 if it has a simple layout, 0 otherwise.
*/
-static int
+NPY_NO_EXPORT int
is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype)
{
PyObject *names, *fields, *key, *tup, *title;
diff --git a/numpy/core/src/multiarray/descriptor.h b/numpy/core/src/multiarray/descriptor.h
index ff1fc980a..f95041195 100644
--- a/numpy/core/src/multiarray/descriptor.h
+++ b/numpy/core/src/multiarray/descriptor.h
@@ -10,6 +10,10 @@ array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args);
NPY_NO_EXPORT PyArray_Descr *
_arraydescr_fromobj(PyObject *obj);
+
+NPY_NO_EXPORT int
+is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype);
+
/*
* Creates a string repr of the dtype, excluding the 'dtype()' part
* surrounding the object. This object may be a string, a list, or
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index dfe95d65c..9c27255aa 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -25,6 +25,7 @@
#include "ctors.h"
#include "_datetime.h"
#include "datetime_strings.h"
+#include "descriptor.h"
#include "shape.h"
#include "lowlevel_strided_loops.h"
@@ -2521,7 +2522,7 @@ _strided_to_strided_field_transfer(char *dst, npy_intp dst_stride,
/*
* Handles fields transfer. To call this, at least one of the dtypes
- * must have fields
+ * must have fields. Does not take care of object<->structure conversion
*/
static int
get_fields_transfer_function(int aligned,
@@ -2532,22 +2533,26 @@ get_fields_transfer_function(int aligned,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
- PyObject *names, *key, *tup, *title;
+ PyObject *key, *tup, *title;
PyArray_Descr *src_fld_dtype, *dst_fld_dtype;
- npy_int i, names_size, field_count, structsize;
+ npy_int i, field_count, structsize;
int src_offset, dst_offset;
_field_transfer_data *data;
_single_field_transfer *fields;
+ int failed = 0;
+
+ /*
+ * There are three cases to take care of: 1. src is non-structured,
+ * 2. dst is non-structured, or 3. both are structured.
+ */
- /* Copy the src value to all the fields of dst */
+ /* 1. src is non-structured. Copy the src value to all the fields of dst */
if (!PyDataType_HASFIELDS(src_dtype)) {
- names = dst_dtype->names;
- names_size = PyTuple_GET_SIZE(dst_dtype->names);
+ field_count = PyTuple_GET_SIZE(dst_dtype->names);
- field_count = names_size;
+ /* Allocate the field-data structure and populate it */
structsize = sizeof(_field_transfer_data) +
(field_count + 1) * sizeof(_single_field_transfer);
- /* Allocate the data and populate it */
data = (_field_transfer_data *)PyArray_malloc(structsize);
if (data == NULL) {
PyErr_NoMemory();
@@ -2557,8 +2562,8 @@ get_fields_transfer_function(int aligned,
data->base.clone = &_field_transfer_data_clone;
fields = &data->fields;
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
+ for (i = 0; i < field_count; ++i) {
+ key = PyTuple_GET_ITEM(dst_dtype->names, i);
tup = PyDict_GetItem(dst_dtype->fields, key);
if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype,
&dst_offset, &title)) {
@@ -2584,7 +2589,7 @@ get_fields_transfer_function(int aligned,
}
/*
- * If the references should be removed from src, add
+ * If references should be decrefd in src, add
* another transfer function to do that.
*/
if (move_references && PyDataType_REFCHK(src_dtype)) {
@@ -2612,24 +2617,19 @@ get_fields_transfer_function(int aligned,
return NPY_SUCCEED;
}
- /* Copy the value of the first field to dst */
- else if (!PyDataType_HASFIELDS(dst_dtype)) {
- names = src_dtype->names;
- names_size = PyTuple_GET_SIZE(src_dtype->names);
- /*
- * If DECREF is needed on source fields, may need
- * to process all the fields
- */
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- field_count = names_size + 1;
- }
- else {
- field_count = 1;
+ /* 2. dst is non-structured. Allow transfer from single-field src to dst */
+ if (!PyDataType_HASFIELDS(dst_dtype)) {
+ if (PyTuple_GET_SIZE(src_dtype->names) != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "Can't cast from structure to non-structure, except if the "
+ "structure only has a single field.");
+ return NPY_FAIL;
}
+
+ /* Allocate the field-data structure and populate it */
structsize = sizeof(_field_transfer_data) +
- field_count * sizeof(_single_field_transfer);
- /* Allocate the data and populate it */
+ 1 * sizeof(_single_field_transfer);
data = (_field_transfer_data *)PyArray_malloc(structsize);
if (data == NULL) {
PyErr_NoMemory();
@@ -2639,286 +2639,102 @@ get_fields_transfer_function(int aligned,
data->base.clone = &_field_transfer_data_clone;
fields = &data->fields;
- key = PyTuple_GET_ITEM(names, 0);
+ key = PyTuple_GET_ITEM(src_dtype->names, 0);
tup = PyDict_GetItem(src_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- PyArray_free(data);
+ if (!PyArg_ParseTuple(tup, "Oi|O",
+ &src_fld_dtype, &src_offset, &title)) {
return NPY_FAIL;
}
- field_count = 0;
- /*
- * Special case bool type, the existence of fields implies True
- *
- * TODO: Perhaps a better behavior would be to combine all the
- * input fields with an OR? The same would apply to subarrays.
- */
- if (dst_dtype->type_num == NPY_BOOL) {
- if (get_bool_setdstone_transfer_function(dst_stride,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = 0;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = 0;
- field_count++;
-
- /* If the src field has references, may need to clear them */
- if (move_references && PyDataType_REFCHK(src_fld_dtype)) {
- if (get_decsrcref_transfer_function(0,
- src_stride,
- src_fld_dtype,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- NPY_AUXDATA_FREE(fields[0].data);
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = src_fld_dtype->elsize;
- field_count++;
- }
- }
- /* Transfer the first field to the output */
- else {
- if (PyArray_GetDTypeTransferFunction(0,
- src_stride, dst_stride,
- src_fld_dtype, dst_dtype,
- move_references,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = src_fld_dtype->elsize;
- field_count++;
- }
- /*
- * If the references should be removed from src, add
- * more transfer functions to decrement the references
- * for all the other fields.
- */
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- for (i = 1; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(src_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- return NPY_FAIL;
- }
- if (PyDataType_REFCHK(src_fld_dtype)) {
- if (get_decsrcref_transfer_function(0,
- src_stride,
- src_fld_dtype,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = field_count-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = src_fld_dtype->elsize;
- field_count++;
- }
- }
+ if (PyArray_GetDTypeTransferFunction(0,
+ src_stride, dst_stride,
+ src_fld_dtype, dst_dtype,
+ move_references,
+ &fields[0].stransfer,
+ &fields[0].data,
+ out_needs_api) != NPY_SUCCEED) {
+ PyArray_free(data);
+ return NPY_FAIL;
}
+ fields[0].src_offset = src_offset;
+ fields[0].dst_offset = 0;
+ fields[0].src_itemsize = src_fld_dtype->elsize;
- data->field_count = field_count;
+ data->field_count = 1;
*out_stransfer = &_strided_to_strided_field_transfer;
*out_transferdata = (NpyAuxData *)data;
return NPY_SUCCEED;
}
- /* Match up the fields to copy */
- else {
- /* Keeps track of the names we already used */
- PyObject *used_names_dict = NULL;
- int cmpval;
-
- const char *msg =
- "Assignment between structured arrays with different field names "
- "will change in numpy 1.14.\n\n"
- "Previously fields in the dst would be set to the value of the "
- "identically-named field in the src. In numpy 1.14 fields will "
- "instead be assigned 'by position': The Nth field of the dst "
- "will be set to the Nth field of the src array.\n\n"
- "See the release notes for details";
- /*
- * 2016-09-19, 1.12
- * Warn if the field names of the dst and src are not
- * identical, since then behavior will change in 1.13.
- */
- cmpval = PyObject_RichCompareBool(src_dtype->names,
- dst_dtype->names, Py_EQ);
- if (PyErr_Occurred()) {
- return NPY_FAIL;
- }
- if (cmpval != 1) {
- if (DEPRECATE_FUTUREWARNING(msg) < 0) {
- return NPY_FAIL;
- }
- }
- names = dst_dtype->names;
- names_size = PyTuple_GET_SIZE(dst_dtype->names);
+ /* 3. Otherwise both src and dst are structured arrays */
+ field_count = PyTuple_GET_SIZE(dst_dtype->names);
- /*
- * If DECREF is needed on source fields, will need
- * to also go through its fields.
- */
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- field_count = names_size + PyTuple_GET_SIZE(src_dtype->names);
- used_names_dict = PyDict_New();
- if (used_names_dict == NULL) {
- return NPY_FAIL;
- }
- }
- else {
- field_count = names_size;
- }
- structsize = sizeof(_field_transfer_data) +
- field_count * sizeof(_single_field_transfer);
- /* Allocate the data and populate it */
- data = (_field_transfer_data *)PyArray_malloc(structsize);
- if (data == NULL) {
- PyErr_NoMemory();
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- data->base.free = &_field_transfer_data_free;
- data->base.clone = &_field_transfer_data_clone;
- fields = &data->fields;
+ /* Match up the fields to copy (field-by-field transfer) */
+ if (PyTuple_GET_SIZE(src_dtype->names) != field_count) {
+ PyErr_SetString(PyExc_ValueError, "structures must have the same size");
+ return NPY_FAIL;
+ }
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(dst_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype,
- &dst_offset, &title)) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- tup = PyDict_GetItem(src_dtype->fields, key);
- if (tup != NULL) {
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- if (PyArray_GetDTypeTransferFunction(0,
- src_stride, dst_stride,
- src_fld_dtype, dst_fld_dtype,
- move_references,
- &fields[i].stransfer,
- &fields[i].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- fields[i].src_offset = src_offset;
- fields[i].dst_offset = dst_offset;
- fields[i].src_itemsize = src_fld_dtype->elsize;
+ /* Allocate the field-data structure and populate it */
+ structsize = sizeof(_field_transfer_data) +
+ field_count * sizeof(_single_field_transfer);
+ data = (_field_transfer_data *)PyArray_malloc(structsize);
+ if (data == NULL) {
+ PyErr_NoMemory();
+ return NPY_FAIL;
+ }
+ data->base.free = &_field_transfer_data_free;
+ data->base.clone = &_field_transfer_data_clone;
+ fields = &data->fields;
- if (used_names_dict != NULL) {
- PyDict_SetItem(used_names_dict, key, Py_True);
- }
- }
- else {
- if (get_setdstzero_transfer_function(0,
- dst_stride,
- dst_fld_dtype,
- &fields[i].stransfer,
- &fields[i].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- fields[i].src_offset = 0;
- fields[i].dst_offset = dst_offset;
- fields[i].src_itemsize = 0;
- }
+ /* set up the transfer function for each field */
+ for (i = 0; i < field_count; ++i) {
+ key = PyTuple_GET_ITEM(dst_dtype->names, i);
+ tup = PyDict_GetItem(dst_dtype->fields, key);
+ if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype,
+ &dst_offset, &title)) {
+ failed = 1;
+ break;
+ }
+ key = PyTuple_GET_ITEM(src_dtype->names, i);
+ tup = PyDict_GetItem(src_dtype->fields, key);
+ if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
+ &src_offset, &title)) {
+ failed = 1;
+ break;
}
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- /* Use field_count to track additional functions added */
- field_count = names_size;
-
- names = src_dtype->names;
- names_size = PyTuple_GET_SIZE(src_dtype->names);
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- if (PyDict_GetItem(used_names_dict, key) == NULL) {
- tup = PyDict_GetItem(src_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- for (i = field_count-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- if (PyDataType_REFCHK(src_fld_dtype)) {
- if (get_decsrcref_transfer_function(0,
- src_stride,
- src_fld_dtype,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = field_count-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize =
- src_fld_dtype->elsize;
- field_count++;
- }
- }
- }
+ if (PyArray_GetDTypeTransferFunction(0,
+ src_stride, dst_stride,
+ src_fld_dtype, dst_fld_dtype,
+ move_references,
+ &fields[i].stransfer,
+ &fields[i].data,
+ out_needs_api) != NPY_SUCCEED) {
+ failed = 1;
+ break;
}
+ fields[i].src_offset = src_offset;
+ fields[i].dst_offset = dst_offset;
+ fields[i].src_itemsize = src_fld_dtype->elsize;
+ }
- Py_XDECREF(used_names_dict);
+ if (failed) {
+ for (i = i-1; i >= 0; --i) {
+ NPY_AUXDATA_FREE(fields[i].data);
+ }
+ PyArray_free(data);
+ return NPY_FAIL;
+ }
- data->field_count = field_count;
+ data->field_count = field_count;
- *out_stransfer = &_strided_to_strided_field_transfer;
- *out_transferdata = (NpyAuxData *)data;
+ *out_stransfer = &_strided_to_strided_field_transfer;
+ *out_transferdata = (NpyAuxData *)data;
- return NPY_SUCCEED;
- }
+ return NPY_SUCCEED;
}
static int
@@ -3649,8 +3465,10 @@ PyArray_GetDTypeTransferFunction(int aligned,
* If there are no references and the data types are equivalent,
* return a simple copy
*/
- if (!PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) &&
- PyArray_EquivTypes(src_dtype, dst_dtype)) {
+ if (PyArray_EquivTypes(src_dtype, dst_dtype) &&
+ !PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) &&
+ ( !PyDataType_HASFIELDS(dst_dtype) ||
+ is_dtype_struct_simple_unaligned_layout(dst_dtype)) ) {
/*
* We can't pass through the aligned flag because it's not
* appropriate. Consider a size-8 string, it will say it's
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index 21bcd6cad..3b5d76362 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -2068,6 +2068,8 @@ PyArray_CountNonzero(PyArrayObject *self)
char *data;
npy_intp stride, count;
npy_intp nonzero_count = 0;
+ int needs_api = 0;
+ PyArray_Descr *dtype;
NpyIter *iter;
NpyIter_IterNextFunc *iternext;
@@ -2076,22 +2078,38 @@ PyArray_CountNonzero(PyArrayObject *self)
NPY_BEGIN_THREADS_DEF;
/* Special low-overhead version specific to the boolean type */
- if (PyArray_DESCR(self)->type_num == NPY_BOOL) {
+ dtype = PyArray_DESCR(self);
+ if (dtype->type_num == NPY_BOOL) {
return count_boolean_trues(PyArray_NDIM(self), PyArray_DATA(self),
PyArray_DIMS(self), PyArray_STRIDES(self));
}
-
nonzero = PyArray_DESCR(self)->f->nonzero;
/* If it's a trivial one-dimensional loop, don't use an iterator */
if (PyArray_TRIVIALLY_ITERABLE(self)) {
+ needs_api = PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI);
PyArray_PREPARE_TRIVIAL_ITERATION(self, count, data, stride);
- while (count--) {
- if (nonzero(data, self)) {
- ++nonzero_count;
+ if (needs_api){
+ while (count--) {
+ if (nonzero(data, self)) {
+ ++nonzero_count;
+ }
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+ data += stride;
}
- data += stride;
+ }
+ else {
+ NPY_BEGIN_THREADS_THRESHOLDED(count);
+ while (count--) {
+ if (nonzero(data, self)) {
+ ++nonzero_count;
+ }
+ data += stride;
+ }
+ NPY_END_THREADS;
}
return nonzero_count;
@@ -2116,6 +2134,7 @@ PyArray_CountNonzero(PyArrayObject *self)
if (iter == NULL) {
return -1;
}
+ needs_api = NpyIter_IterationNeedsAPI(iter);
/* Get the pointers for inner loop iteration */
iternext = NpyIter_GetIterNext(iter, NULL);
@@ -2140,16 +2159,21 @@ PyArray_CountNonzero(PyArrayObject *self)
if (nonzero(data, self)) {
++nonzero_count;
}
+ if (needs_api && PyErr_Occurred()) {
+ nonzero_count = -1;
+ goto finish;
+ }
data += stride;
}
} while(iternext(iter));
+finish:
NPY_END_THREADS;
NpyIter_Deallocate(iter);
- return PyErr_Occurred() ? -1 : nonzero_count;
+ return nonzero_count;
}
/*NUMPY_API
@@ -2383,7 +2407,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index)
data += ind * strides[idim];
}
- return PyArray_DESCR(self)->f->getitem(data, self);
+ return PyArray_GETITEM(self, data);
}
/*
@@ -2412,5 +2436,5 @@ PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index,
data += ind * strides[idim];
}
- return PyArray_DESCR(self)->f->setitem(obj, data, self);
+ return PyArray_SETITEM(self, data, obj);
}
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 14a906ed9..9e6ed712c 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -917,7 +917,7 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val)
if (PyBool_Check(ind)) {
retval = 0;
if (PyObject_IsTrue(ind)) {
- retval = type->f->setitem(val, self->dataptr, self->ao);
+ retval = PyArray_SETITEM(self->ao, self->dataptr, val);
}
goto finish;
}
@@ -1055,7 +1055,28 @@ static PyMappingMethods iter_as_mapping = {
};
-
+/* Two options:
+ * 1) underlying array is contiguous
+ * -- return 1-d wrapper around it
+ * 2) underlying array is not contiguous
+ * -- make new 1-d contiguous array with updateifcopy flag set
+ * to copy back to the old array
+ *
+ * If underlying array is readonly, then we make the output array readonly
+ * and updateifcopy does not apply.
+ *
+ * Changed 2017-07-21, 1.14.0.
+ *
+ * In order to start the process of removing UPDATEIFCOPY, see gh-7054, the
+ * behavior is changed to always return an non-writeable copy when the base
+ * array is non-contiguous. Doing that will hopefully smoke out those few
+ * folks who assign to the result with the expectation that the base array
+ * will be changed. At a later date non-contiguous arrays will always return
+ * writeable copies.
+ *
+ * Note that the type and argument expected for the __array__ method is
+ * ignored.
+ */
static PyArrayObject *
iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
{
@@ -1063,27 +1084,14 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
PyArrayObject *ret;
npy_intp size;
- /* Any argument ignored */
-
- /* Two options:
- * 1) underlying array is contiguous
- * -- return 1-d wrapper around it
- * 2) underlying array is not contiguous
- * -- make new 1-d contiguous array with updateifcopy flag set
- * to copy back to the old array
- *
- * If underlying array is readonly, then we make the output array readonly
- * and updateifcopy does not apply.
- */
size = PyArray_SIZE(it->ao);
Py_INCREF(PyArray_DESCR(it->ao));
+
if (PyArray_ISCONTIGUOUS(it->ao)) {
- ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- PyArray_DESCR(it->ao),
- 1, &size,
- NULL, PyArray_DATA(it->ao),
- PyArray_FLAGS(it->ao),
- (PyObject *)it->ao);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, PyArray_DESCR(it->ao), 1, &size,
+ NULL, PyArray_DATA(it->ao), PyArray_FLAGS(it->ao),
+ (PyObject *)it->ao);
if (ret == NULL) {
return NULL;
}
@@ -1094,11 +1102,10 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
}
}
else {
- ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- PyArray_DESCR(it->ao),
- 1, &size,
- NULL, NULL,
- 0, (PyObject *)it->ao);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, PyArray_DESCR(it->ao), 1, &size,
+ NULL, NULL, 0,
+ (PyObject *)it->ao);
if (ret == NULL) {
return NULL;
}
@@ -1106,16 +1113,7 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
Py_DECREF(ret);
return NULL;
}
- if (PyArray_ISWRITEABLE(it->ao)) {
- Py_INCREF(it->ao);
- if (PyArray_SetUpdateIfCopyBase(ret, it->ao) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
- }
- else {
- PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE);
- }
+ PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE);
}
return ret;
@@ -1805,7 +1803,7 @@ static char* _set_constant(PyArrayNeighborhoodIterObject* iter,
storeflags = PyArray_FLAGS(ar->ao);
PyArray_ENABLEFLAGS(ar->ao, NPY_ARRAY_BEHAVED);
- st = PyArray_DESCR(ar->ao)->f->setitem((PyObject*)fill, ret, ar->ao);
+ st = PyArray_SETITEM(ar->ao, ret, (PyObject*)fill);
((PyArrayObject_fields *)ar->ao)->flags = storeflags;
if (st < 0) {
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 4833b5069..1a92365c8 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -1446,10 +1446,6 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
PyObject *fields, *names;
PyArray_Descr *view_dtype;
- /* variables needed to make a copy, to remove in the future */
- static PyObject *copyfunc = NULL;
- PyObject *viewcopy;
-
seqlen = PySequence_Size(ind);
/* quit if have a 0-d array (seqlen==-1) or a 0-len array */
@@ -1502,6 +1498,35 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
Py_DECREF(names);
return 0;
}
+ // disallow use of titles as index
+ if (PyTuple_Size(tup) == 3) {
+ PyObject *title = PyTuple_GET_ITEM(tup, 2);
+ int titlecmp = PyObject_RichCompareBool(title, name, Py_EQ);
+ if (titlecmp == 1) {
+ // if title == name, we were given a title, not a field name
+ PyErr_SetString(PyExc_KeyError,
+ "cannot use field titles in multi-field index");
+ }
+ if (titlecmp != 0 || PyDict_SetItem(fields, title, tup) < 0) {
+ Py_DECREF(title);
+ Py_DECREF(name);
+ Py_DECREF(fields);
+ Py_DECREF(names);
+ return 0;
+ }
+ Py_DECREF(title);
+ }
+ // disallow duplicate field indices
+ if (PyDict_Contains(fields, name)) {
+ PyObject *errmsg = PyUString_FromString(
+ "duplicate field of name ");
+ PyUString_ConcatAndDel(&errmsg, name);
+ PyErr_SetObject(PyExc_KeyError, errmsg);
+ Py_DECREF(errmsg);
+ Py_DECREF(fields);
+ Py_DECREF(names);
+ return 0;
+ }
if (PyDict_SetItem(fields, name, tup) < 0) {
Py_DECREF(name);
Py_DECREF(fields);
@@ -1545,29 +1570,6 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
return 0;
}
- /*
- * Return copy for now (future plan to return the view above). All the
- * following code in this block can then be replaced by "return 0;"
- */
- npy_cache_import("numpy.core._internal", "_copy_fields", &copyfunc);
- if (copyfunc == NULL) {
- Py_DECREF(*view);
- *view = NULL;
- return 0;
- }
-
- PyArray_CLEARFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
- viewcopy = PyObject_CallFunction(copyfunc, "O", *view);
- if (viewcopy == NULL) {
- Py_DECREF(*view);
- *view = NULL;
- return 0;
- }
- Py_DECREF(*view);
- *view = (PyArrayObject*)viewcopy;
-
- /* warn when writing to the copy */
- PyArray_ENABLEFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
return 0;
}
return -1;
@@ -1601,11 +1603,6 @@ array_subscript(PyArrayObject *self, PyObject *op)
if (view == NULL) {
return NULL;
}
-
- /* warn if writing to a copy. copies will have no base */
- if (PyArray_BASE(view) == NULL) {
- PyArray_ENABLEFLAGS(view, NPY_ARRAY_WARN_ON_WRITE);
- }
return (PyObject*)view;
}
}
@@ -1892,17 +1889,6 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op)
PyArrayObject *view;
int ret = _get_field_view(self, ind, &view);
if (ret == 0){
-
-#if defined(NPY_PY3K)
- if (!PyUnicode_Check(ind)) {
-#else
- if (!PyString_Check(ind) && !PyUnicode_Check(ind)) {
-#endif
- PyErr_SetString(PyExc_ValueError,
- "multi-field assignment is not supported");
- return -1;
- }
-
if (view == NULL) {
return -1;
}
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index ca481a11f..210882ff0 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -315,20 +315,39 @@ PyArray_Free(PyObject *op, void *ptr)
return 0;
}
+/*
+ * Get the ndarray subclass with the highest priority
+ */
+NPY_NO_EXPORT PyTypeObject *
+PyArray_GetSubType(int narrays, PyArrayObject **arrays) {
+ PyTypeObject *subtype = &PyArray_Type;
+ double priority = NPY_PRIORITY;
+ int i;
+
+ /* Get the priority subtype for the array */
+ for (i = 0; i < narrays; ++i) {
+ if (Py_TYPE(arrays[i]) != subtype) {
+ double pr = PyArray_GetPriority((PyObject *)(arrays[i]), 0.0);
+ if (pr > priority) {
+ priority = pr;
+ subtype = Py_TYPE(arrays[i]);
+ }
+ }
+ }
+
+ return subtype;
+}
+
/*
* Concatenates a list of ndarrays.
*/
NPY_NO_EXPORT PyArrayObject *
-PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis)
+PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
+ PyArrayObject* ret)
{
- PyTypeObject *subtype = &PyArray_Type;
- double priority = NPY_PRIORITY;
int iarrays, idim, ndim;
- npy_intp shape[NPY_MAXDIMS], s, strides[NPY_MAXDIMS];
- int strideperm[NPY_MAXDIMS];
- PyArray_Descr *dtype = NULL;
- PyArrayObject *ret = NULL;
+ npy_intp shape[NPY_MAXDIMS];
PyArrayObject_fields *sliding_view = NULL;
if (narrays <= 0) {
@@ -383,47 +402,57 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis)
}
}
- /* Get the priority subtype for the array */
- for (iarrays = 0; iarrays < narrays; ++iarrays) {
- if (Py_TYPE(arrays[iarrays]) != subtype) {
- double pr = PyArray_GetPriority((PyObject *)(arrays[iarrays]), 0.0);
- if (pr > priority) {
- priority = pr;
- subtype = Py_TYPE(arrays[iarrays]);
- }
+ if (ret != NULL) {
+ if (PyArray_NDIM(ret) != ndim) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array has wrong dimensionality");
+ return NULL;
}
+ if (!PyArray_CompareLists(shape, PyArray_SHAPE(ret), ndim)) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array is the wrong shape");
+ return NULL;
+ }
+ Py_INCREF(ret);
}
+ else {
+ npy_intp s, strides[NPY_MAXDIMS];
+ int strideperm[NPY_MAXDIMS];
- /* Get the resulting dtype from combining all the arrays */
- dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
- if (dtype == NULL) {
- return NULL;
- }
+ /* Get the priority subtype for the array */
+ PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays);
- /*
- * Figure out the permutation to apply to the strides to match
- * the memory layout of the input arrays, using ambiguity
- * resolution rules matching that of the NpyIter.
- */
- PyArray_CreateMultiSortedStridePerm(narrays, arrays, ndim, strideperm);
- s = dtype->elsize;
- for (idim = ndim-1; idim >= 0; --idim) {
- int iperm = strideperm[idim];
- strides[iperm] = s;
- s *= shape[iperm];
- }
-
- /* Allocate the array for the result. This steals the 'dtype' reference. */
- ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
- dtype,
- ndim,
- shape,
- strides,
- NULL,
- 0,
- NULL);
- if (ret == NULL) {
- return NULL;
+ /* Get the resulting dtype from combining all the arrays */
+ PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
+ if (dtype == NULL) {
+ return NULL;
+ }
+
+ /*
+ * Figure out the permutation to apply to the strides to match
+ * the memory layout of the input arrays, using ambiguity
+ * resolution rules matching that of the NpyIter.
+ */
+ PyArray_CreateMultiSortedStridePerm(narrays, arrays, ndim, strideperm);
+ s = dtype->elsize;
+ for (idim = ndim-1; idim >= 0; --idim) {
+ int iperm = strideperm[idim];
+ strides[iperm] = s;
+ s *= shape[iperm];
+ }
+
+ /* Allocate the array for the result. This steals the 'dtype' reference. */
+ ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
+ dtype,
+ ndim,
+ shape,
+ strides,
+ NULL,
+ 0,
+ NULL);
+ if (ret == NULL) {
+ return NULL;
+ }
}
/*
@@ -462,15 +491,10 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis)
*/
NPY_NO_EXPORT PyArrayObject *
PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
- NPY_ORDER order)
+ NPY_ORDER order, PyArrayObject *ret)
{
- PyTypeObject *subtype = &PyArray_Type;
- double priority = NPY_PRIORITY;
int iarrays;
- npy_intp stride;
npy_intp shape = 0;
- PyArray_Descr *dtype = NULL;
- PyArrayObject *ret = NULL;
PyArrayObject_fields *sliding_view = NULL;
if (narrays <= 0) {
@@ -494,36 +518,45 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
}
}
- /* Get the priority subtype for the array */
- for (iarrays = 0; iarrays < narrays; ++iarrays) {
- if (Py_TYPE(arrays[iarrays]) != subtype) {
- double pr = PyArray_GetPriority((PyObject *)(arrays[iarrays]), 0.0);
- if (pr > priority) {
- priority = pr;
- subtype = Py_TYPE(arrays[iarrays]);
- }
+ if (ret != NULL) {
+ if (PyArray_NDIM(ret) != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array must be 1D");
+ return NULL;
+ }
+ if (shape != PyArray_SIZE(ret)) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array is the wrong size");
+ return NULL;
}
+ Py_INCREF(ret);
}
+ else {
+ npy_intp stride;
- /* Get the resulting dtype from combining all the arrays */
- dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
- if (dtype == NULL) {
- return NULL;
- }
+ /* Get the priority subtype for the array */
+ PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays);
- stride = dtype->elsize;
+ /* Get the resulting dtype from combining all the arrays */
+ PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
+ if (dtype == NULL) {
+ return NULL;
+ }
- /* Allocate the array for the result. This steals the 'dtype' reference. */
- ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
- dtype,
- 1,
- &shape,
- &stride,
- NULL,
- 0,
- NULL);
- if (ret == NULL) {
- return NULL;
+ stride = dtype->elsize;
+
+ /* Allocate the array for the result. This steals the 'dtype' reference. */
+ ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
+ dtype,
+ 1,
+ &shape,
+ &stride,
+ NULL,
+ 0,
+ NULL);
+ if (ret == NULL) {
+ return NULL;
+ }
}
/*
@@ -558,22 +591,11 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
return ret;
}
-
-/*NUMPY_API
- * Concatenate
- *
- * Concatenate an arbitrary Python sequence into an array.
- * op is a python object supporting the sequence interface.
- * Its elements will be concatenated together to form a single
- * multidimensional array. If axis is NPY_MAXDIMS or bigger, then
- * each sequence object will be flattened before concatenation
-*/
NPY_NO_EXPORT PyObject *
-PyArray_Concatenate(PyObject *op, int axis)
+PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret)
{
int iarrays, narrays;
PyArrayObject **arrays;
- PyArrayObject *ret;
if (!PySequence_Check(op)) {
PyErr_SetString(PyExc_TypeError,
@@ -606,10 +628,10 @@ PyArray_Concatenate(PyObject *op, int axis)
}
if (axis >= NPY_MAXDIMS) {
- ret = PyArray_ConcatenateFlattenedArrays(narrays, arrays, NPY_CORDER);
+ ret = PyArray_ConcatenateFlattenedArrays(narrays, arrays, NPY_CORDER, ret);
}
else {
- ret = PyArray_ConcatenateArrays(narrays, arrays, axis);
+ ret = PyArray_ConcatenateArrays(narrays, arrays, axis, ret);
}
for (iarrays = 0; iarrays < narrays; ++iarrays) {
@@ -629,6 +651,21 @@ fail:
return NULL;
}
+/*NUMPY_API
+ * Concatenate
+ *
+ * Concatenate an arbitrary Python sequence into an array.
+ * op is a python object supporting the sequence interface.
+ * Its elements will be concatenated together to form a single
+ * multidimensional array. If axis is NPY_MAXDIMS or bigger, then
+ * each sequence object will be flattened before concatenation
+*/
+NPY_NO_EXPORT PyObject *
+PyArray_Concatenate(PyObject *op, int axis)
+{
+ return PyArray_ConcatenateInto(op, axis, NULL);
+}
+
static int
_signbit_set(PyArrayObject *arr)
{
@@ -759,32 +796,17 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out,
int nd, npy_intp dimensions[], int typenum, PyArrayObject **result)
{
PyArrayObject *out_buf;
- PyTypeObject *subtype;
- double prior1, prior2;
- /*
- * Need to choose an output array that can hold a sum
- * -- use priority to determine which subtype.
- */
- if (Py_TYPE(ap2) != Py_TYPE(ap1)) {
- prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
- prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
- subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1));
- }
- else {
- prior1 = prior2 = 0.0;
- subtype = Py_TYPE(ap1);
- }
+
if (out) {
int d;
/* verify that out is usable */
- if (Py_TYPE(out) != subtype ||
- PyArray_NDIM(out) != nd ||
+ if (PyArray_NDIM(out) != nd ||
PyArray_TYPE(out) != typenum ||
!PyArray_ISCARRAY(out)) {
PyErr_SetString(PyExc_ValueError,
- "output array is not acceptable "
- "(must have the right type, nr dimensions, and be a C-Array)");
+ "output array is not acceptable (must have the right datatype, "
+ "number of dimensions, and be a C-Array)");
return 0;
}
for (d = 0; d < nd; ++d) {
@@ -825,18 +847,35 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out,
return out_buf;
}
+ else {
+ PyTypeObject *subtype;
+ double prior1, prior2;
+ /*
+ * Need to choose an output array that can hold a sum
+ * -- use priority to determine which subtype.
+ */
+ if (Py_TYPE(ap2) != Py_TYPE(ap1)) {
+ prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
+ prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
+ subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1));
+ }
+ else {
+ prior1 = prior2 = 0.0;
+ subtype = Py_TYPE(ap1);
+ }
- out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
- typenum, NULL, NULL, 0, 0,
- (PyObject *)
- (prior2 > prior1 ? ap2 : ap1));
+ out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
+ typenum, NULL, NULL, 0, 0,
+ (PyObject *)
+ (prior2 > prior1 ? ap2 : ap1));
- if (out_buf != NULL && result) {
- Py_INCREF(out_buf);
- *result = out_buf;
- }
+ if (out_buf != NULL && result) {
+ Py_INCREF(out_buf);
+ *result = out_buf;
+ }
- return out_buf;
+ return out_buf;
+ }
}
/* Could perhaps be redone to not make contiguous arrays */
@@ -1418,29 +1457,34 @@ array_putmask(PyObject *NPY_UNUSED(module), PyObject *args, PyObject *kwds)
/*
* Compare the field dictionaries for two types.
*
- * Return 1 if the contents are the same, 0 if not.
+ * Return 1 if the field types and field names of the two descrs are equal and
+ * in the same order, 0 if not.
*/
static int
-_equivalent_fields(PyObject *field1, PyObject *field2) {
+_equivalent_fields(PyArray_Descr *type1, PyArray_Descr *type2) {
- int same, val;
+ int val;
- if (field1 == field2) {
+ if (type1->fields == type2->fields && type1->names == type2->names) {
return 1;
}
- if (field1 == NULL || field2 == NULL) {
+ if (type1->fields == NULL || type2->fields == NULL) {
return 0;
}
- val = PyObject_RichCompareBool(field1, field2, Py_EQ);
+ val = PyObject_RichCompareBool(type1->fields, type2->fields, Py_EQ);
if (val != 1 || PyErr_Occurred()) {
- same = 0;
+ PyErr_Clear();
+ return 0;
}
- else {
- same = 1;
+
+ val = PyObject_RichCompareBool(type1->names, type2->names, Py_EQ);
+ if (val != 1 || PyErr_Occurred()) {
+ PyErr_Clear();
+ return 0;
}
- PyErr_Clear();
- return same;
+
+ return 1;
}
/*
@@ -1499,10 +1543,8 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2)
return ((type_num1 == type_num2)
&& _equivalent_subarrays(type1->subarray, type2->subarray));
}
- if (type_num1 == NPY_VOID
- || type_num2 == NPY_VOID) {
- return ((type_num1 == type_num2)
- && _equivalent_fields(type1->fields, type2->fields));
+ if (type_num1 == NPY_VOID || type_num2 == NPY_VOID) {
+ return ((type_num1 == type_num2) && _equivalent_fields(type1, type2));
}
if (type_num1 == NPY_DATETIME
|| type_num1 == NPY_TIMEDELTA
@@ -2156,14 +2198,24 @@ static PyObject *
array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
{
PyObject *a0;
+ PyObject *out = NULL;
int axis = 0;
- static char *kwlist[] = {"seq", "axis", NULL};
+ static char *kwlist[] = {"seq", "axis", "out", NULL};
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&:concatenate", kwlist,
- &a0, PyArray_AxisConverter, &axis)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O:concatenate", kwlist,
+ &a0, PyArray_AxisConverter, &axis, &out)) {
return NULL;
}
- return PyArray_Concatenate(a0, axis);
+ if (out != NULL) {
+ if (out == Py_None) {
+ out = NULL;
+ }
+ else if (!PyArray_Check(out)) {
+ PyErr_SetString(PyExc_TypeError, "'out' must be an array");
+ return NULL;
+ }
+ }
+ return PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out);
}
static PyObject *
@@ -3224,7 +3276,7 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), PyObject *args,
npy_bool ret;
PyObject *retobj = NULL;
NPY_CASTING casting = NPY_SAFE_CASTING;
- static char *kwlist[] = {"from", "to", "casting", NULL};
+ static char *kwlist[] = {"from_", "to", "casting", NULL};
if(!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|O&:can_cast", kwlist,
&from_obj,
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 8d1e1a24c..08d177992 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -818,7 +818,7 @@ array_int(PyArrayObject *v)
" converted to Python scalars");
return NULL;
}
- pv = PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v);
+ pv = PyArray_GETITEM(v, PyArray_DATA(v));
if (pv == NULL) {
return NULL;
}
@@ -860,7 +860,7 @@ array_float(PyArrayObject *v)
"be converted to Python scalars");
return NULL;
}
- pv = PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v);
+ pv = PyArray_GETITEM(v, PyArray_DATA(v));
if (pv == NULL) {
return NULL;
}
@@ -903,7 +903,7 @@ array_long(PyArrayObject *v)
"be converted to Python scalars");
return NULL;
}
- pv = PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v);
+ pv = PyArray_GETITEM(v, PyArray_DATA(v));
if (pv == NULL) {
return NULL;
}
@@ -944,7 +944,7 @@ array_oct(PyArrayObject *v)
"be converted to Python scalars");
return NULL;
}
- pv = PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v);
+ pv = PyArray_GETITEM(v, PyArray_DATA(v));
if (pv == NULL) {
return NULL;
}
@@ -985,7 +985,7 @@ array_hex(PyArrayObject *v)
"be converted to Python scalars");
return NULL;
}
- pv = PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v);
+ pv = PyArray_GETITEM(v, PyArray_DATA(v));
if (pv == NULL) {
return NULL;
}
@@ -1033,7 +1033,7 @@ array_index(PyArrayObject *v)
"only integer scalar arrays can be converted to a scalar index");
return NULL;
}
- return PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v);
+ return PyArray_GETITEM(v, PyArray_DATA(v));
}
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 3b2aa8a43..c92d835ed 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -25,6 +25,7 @@
#include "_datetime.h"
#include "datetime_strings.h"
#include "alloc.h"
+#include "npy_import.h"
#include <stdlib.h>
@@ -339,33 +340,6 @@ gentype_nonzero_number(PyObject *m1)
}
static PyObject *
-gentype_str(PyObject *self)
-{
- PyObject *arr, *ret = NULL;
-
- arr = PyArray_FromScalar(self, NULL);
- if (arr != NULL) {
- ret = PyObject_Str((PyObject *)arr);
- Py_DECREF(arr);
- }
- return ret;
-}
-
-static PyObject *
-gentype_repr(PyObject *self)
-{
- PyObject *arr, *ret = NULL;
-
- arr = PyArray_FromScalar(self, NULL);
- if (arr != NULL) {
- /* XXX: Why are we using str here? */
- ret = PyObject_Str((PyObject *)arr);
- Py_DECREF(arr);
- }
- return ret;
-}
-
-static PyObject *
genint_type_str(PyObject *self)
{
PyObject *item, *item_str;
@@ -634,6 +608,34 @@ static PyObject *
/**end repeat**/
static PyObject *
+voidtype_str(PyObject *self)
+{
+ if (PyDataType_HASFIELDS(((PyVoidScalarObject*)self)->descr)) {
+ static PyObject *reprfunc = NULL;
+
+ npy_cache_import("numpy.core.arrayprint",
+ "_void_scalar_repr", &reprfunc);
+ if (reprfunc == NULL) {
+ return NULL;
+ }
+
+ return PyObject_CallFunction(reprfunc, "O", self);
+ }
+ else {
+ PyObject *item, *item_str;
+
+ item = gentype_generic_method(self, NULL, NULL, "item");
+ if (item == NULL) {
+ return NULL;
+ }
+
+ item_str = PyObject_Str(item);
+ Py_DECREF(item);
+ return item_str;
+ }
+}
+
+static PyObject *
datetimetype_repr(PyObject *self)
{
PyDatetimeScalarObject *scal;
@@ -2179,35 +2181,31 @@ static PyObject *
voidtype_subscript(PyVoidScalarObject *self, PyObject *ind)
{
npy_intp n;
- PyObject *ret, *args;
+ PyObject *ret, *res;
- if (!(PyDataType_HASFIELDS(self->descr))) {
- PyErr_SetString(PyExc_IndexError,
- "can't index void scalar without fields");
- return NULL;
+ /* structured voids will accept an integer index */
+ if (PyDataType_HASFIELDS(self->descr)) {
+ n = PyArray_PyIntAsIntp(ind);
+ if (!error_converting(n)) {
+ return voidtype_item(self, (Py_ssize_t)n);
+ }
+ PyErr_Clear();
}
-#if defined(NPY_PY3K)
- if (PyUString_Check(ind)) {
-#else
- if (PyBytes_Check(ind) || PyUnicode_Check(ind)) {
-#endif
- args = Py_BuildValue("(O)", ind);
- ret = gentype_generic_method((PyObject *)self, args, NULL, "__getitem__");
- Py_DECREF(args);
- return ret;
- }
+ res = PyArray_FromScalar((PyObject*)self, NULL);
- /* try to convert it to a number */
- n = PyArray_PyIntAsIntp(ind);
- if (error_converting(n)) {
- goto fail;
+ /* ellipsis should return 0d array */
+ if(ind == Py_Ellipsis){
+ return res;
}
- return voidtype_item(self, (Py_ssize_t)n);
-fail:
- PyErr_SetString(PyExc_IndexError, "invalid index");
- return NULL;
+ /*
+ * other cases (field names, empty tuple) will return either
+ * scalar or non-0d array. Compute this using ndarray subscript.
+ */
+ ret = array_subscript((PyArrayObject *)res, ind);
+ Py_DECREF(res);
+ return PyArray_Return((PyArrayObject*)ret);
}
static int
@@ -4077,8 +4075,6 @@ initialize_numeric_types(void)
PyGenericArrType_Type.tp_new = NULL;
PyGenericArrType_Type.tp_alloc = gentype_alloc;
PyGenericArrType_Type.tp_free = (freefunc)gentype_free;
- PyGenericArrType_Type.tp_repr = gentype_repr;
- PyGenericArrType_Type.tp_str = gentype_str;
PyGenericArrType_Type.tp_richcompare = gentype_richcompare;
PyBoolArrType_Type.tp_as_number = &bool_arrtype_as_number;
@@ -4126,6 +4122,8 @@ initialize_numeric_types(void)
PyVoidArrType_Type.tp_getset = voidtype_getsets;
PyVoidArrType_Type.tp_as_mapping = &voidtype_as_mapping;
PyVoidArrType_Type.tp_as_sequence = &voidtype_as_sequence;
+ PyVoidArrType_Type.tp_repr = voidtype_str;
+ PyVoidArrType_Type.tp_str = voidtype_str;
PyIntegerArrType_Type.tp_getset = inttype_getsets;
diff --git a/numpy/core/src/multiarray/sequence.c b/numpy/core/src/multiarray/sequence.c
index 55b72c198..4769bdad9 100644
--- a/numpy/core/src/multiarray/sequence.c
+++ b/numpy/core/src/multiarray/sequence.c
@@ -15,9 +15,7 @@
#include "mapping.h"
#include "sequence.h"
-
-static int
-array_any_nonzero(PyArrayObject *mp);
+#include "calculation.h"
/*************************************************************************
**************** Implement Sequence Protocol **************************
@@ -32,16 +30,18 @@ array_contains(PyArrayObject *self, PyObject *el)
{
/* equivalent to (self == el).any() */
- PyObject *res;
int ret;
+ PyObject *res, *any;
res = PyArray_EnsureAnyArray(PyObject_RichCompare((PyObject *)self,
el, Py_EQ));
if (res == NULL) {
return -1;
}
- ret = array_any_nonzero((PyArrayObject *)res);
+ any = PyArray_Any((PyArrayObject *)res, NPY_MAXDIMS, NULL);
Py_DECREF(res);
+ ret = PyObject_IsTrue(any);
+ Py_DECREF(any);
return ret;
}
@@ -61,30 +61,3 @@ NPY_NO_EXPORT PySequenceMethods array_as_sequence = {
/****************** End of Sequence Protocol ****************************/
-/*
- * Helpers
- */
-
-/* Array evaluates as "TRUE" if any of the elements are non-zero*/
-static int
-array_any_nonzero(PyArrayObject *arr)
-{
- npy_intp counter;
- PyArrayIterObject *it;
- npy_bool anyTRUE = NPY_FALSE;
-
- it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arr);
- if (it == NULL) {
- return anyTRUE;
- }
- counter = it->size;
- while (counter--) {
- if (PyArray_DESCR(arr)->f->nonzero(it->dataptr, arr)) {
- anyTRUE = NPY_TRUE;
- break;
- }
- PyArray_ITER_NEXT(it);
- }
- Py_DECREF(it);
- return anyTRUE;
-}
diff --git a/numpy/core/src/multiarray/strfuncs.c b/numpy/core/src/multiarray/strfuncs.c
index 5a0d20335..f7980ffe0 100644
--- a/numpy/core/src/multiarray/strfuncs.c
+++ b/numpy/core/src/multiarray/strfuncs.c
@@ -66,7 +66,6 @@ static int
dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd,
npy_intp *dimensions, npy_intp *strides, PyArrayObject* self)
{
- PyArray_Descr *descr=PyArray_DESCR(self);
PyObject *op = NULL, *sp = NULL;
char *ostring;
npy_intp i, N, ret = 0;
@@ -79,7 +78,7 @@ dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd,
} while (0)
if (nd == 0) {
- if ((op = descr->f->getitem(data, self)) == NULL) {
+ if ((op = PyArray_GETITEM(self, data)) == NULL) {
return -1;
}
sp = PyObject_Repr(op);
diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c
index abca0ecd6..b8fa4c0ae 100644
--- a/numpy/core/src/multiarray/temp_elide.c
+++ b/numpy/core/src/multiarray/temp_elide.c
@@ -284,7 +284,7 @@ can_elide_temp(PyArrayObject * alhs, PyObject * orhs, int * cannot)
*/
if (Py_REFCNT(alhs) != 1 || !PyArray_CheckExact(alhs) ||
!PyArray_ISNUMBER(alhs) ||
- !(PyArray_FLAGS(alhs) & NPY_ARRAY_OWNDATA) ||
+ !PyArray_CHKFLAGS(alhs, NPY_ARRAY_OWNDATA) ||
!PyArray_ISWRITEABLE(alhs) ||
PyArray_CHKFLAGS(alhs, NPY_ARRAY_UPDATEIFCOPY) ||
PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES) {
@@ -362,7 +362,9 @@ can_elide_temp_unary(PyArrayObject * m1)
int cannot;
if (Py_REFCNT(m1) != 1 || !PyArray_CheckExact(m1) ||
!PyArray_ISNUMBER(m1) ||
- !(PyArray_FLAGS(m1) & NPY_ARRAY_OWNDATA) ||
+ !PyArray_CHKFLAGS(m1, NPY_ARRAY_OWNDATA) ||
+ !PyArray_ISWRITEABLE(m1) ||
+ PyArray_CHKFLAGS(m1, NPY_ARRAY_UPDATEIFCOPY) ||
PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES) {
return 0;
}
diff --git a/numpy/core/src/npymath/npy_math_complex.c.src b/numpy/core/src/npymath/npy_math_complex.c.src
index a82a6737c..fb31e8e6a 100644
--- a/numpy/core/src/npymath/npy_math_complex.c.src
+++ b/numpy/core/src/npymath/npy_math_complex.c.src
@@ -184,7 +184,9 @@ npy_carg@c@(@ctype@ z)
#define SCALED_CEXP_LOWERL 11357.216553474703895L
#define SCALED_CEXP_UPPERL 22756.021937783004509L
-#ifndef HAVE_CEXP@C@
+#if !defined(HAVE_CSINH@C@) || \
+ !defined(HAVE_CCOSH@C@) || \
+ !defined(HAVE_CEXP@C@)
static
@ctype@
@@ -212,6 +214,10 @@ _npy_scaled_cexp@c@(@type@ x, @type@ y, npy_int expt)
npy_ldexp@c@(mant * mantsin, expt + exsin));
}
+#endif
+
+#ifndef HAVE_CEXP@C@
+
@ctype@
npy_cexp@c@(@ctype@ z)
{
diff --git a/numpy/core/src/npymath/npy_math_internal.h.src b/numpy/core/src/npymath/npy_math_internal.h.src
index 44d6f915f..093e51b2d 100644
--- a/numpy/core/src/npymath/npy_math_internal.h.src
+++ b/numpy/core/src/npymath/npy_math_internal.h.src
@@ -659,7 +659,7 @@ npy_divmod@c@(@type@ a, @type@ b, @type@ *modulus)
/* snap quotient to nearest integral value */
if (div) {
- floordiv = npy_floor(div);
+ floordiv = npy_floor@c@(div);
if (div - floordiv > 0.5@c@)
floordiv += 1.0@c@;
}
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index eb75b7375..259adef0a 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -550,13 +550,13 @@ half_ctype_absolute(npy_half a, npy_half *out)
/**begin repeat
* #name = cfloat, cdouble, clongdouble#
* #type = npy_cfloat, npy_cdouble, npy_clongdouble#
- * #rname = float, double, longdouble#
* #rtype = npy_float, npy_double, npy_longdouble#
+ * #c = f,,l#
*/
static void
@name@_ctype_absolute(@type@ a, @rtype@ *out)
{
- *out = _basic_@rname@_sqrt(a.real*a.real + a.imag*a.imag);
+ *out = npy_cabs@c@(a);
}
/**end repeat**/
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index 7d4acd35d..26faabfb8 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -11,7 +11,7 @@ from numpy.testing import (
class TestArrayRepr(object):
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
- assert_equal(repr(x), 'array([ nan, inf])')
+ assert_equal(repr(x), 'array([nan, inf])')
def test_subclass(self):
class sub(np.ndarray): pass
@@ -60,6 +60,10 @@ class TestArrayRepr(object):
assert_equal(repr(arr1d),
'array([list([1, 2]), list([3])], dtype=object)')
+ def test_void_scalar_recursion(self):
+ # gh-9345
+ repr(np.void(b'test')) # RecursionError ?
+
class TestComplexArray(object):
def test_str(self):
@@ -68,45 +72,45 @@ class TestComplexArray(object):
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
- '[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]',
- '[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]',
- '[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]',
- '[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]',
- '[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]',
- '[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]',
- '[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]',
- '[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]',
- '[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]',
- '[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]',
- '[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]',
- '[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]',
+ '[0.+0.j]', '[0.+0.j]', '[ 0.0+0.0j]',
+ '[0.+1.j]', '[0.+1.j]', '[ 0.0+1.0j]',
+ '[0.-1.j]', '[0.-1.j]', '[ 0.0-1.0j]',
+ '[0.+infj]', '[0.+infj]', '[ 0.0+infj]',
+ '[0.-infj]', '[0.-infj]', '[ 0.0-infj]',
+ '[0.+nanj]', '[0.+nanj]', '[ 0.0+nanj]',
+ '[1.+0.j]', '[1.+0.j]', '[ 1.0+0.0j]',
+ '[1.+1.j]', '[1.+1.j]', '[ 1.0+1.0j]',
+ '[1.-1.j]', '[1.-1.j]', '[ 1.0-1.0j]',
+ '[1.+infj]', '[1.+infj]', '[ 1.0+infj]',
+ '[1.-infj]', '[1.-infj]', '[ 1.0-infj]',
+ '[1.+nanj]', '[1.+nanj]', '[ 1.0+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]',
'[-1.+infj]', '[-1.+infj]', '[-1.0+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.0-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]',
- '[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]',
- '[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]',
- '[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]',
- '[ inf+infj]', '[ inf+infj]', '[ inf+infj]',
- '[ inf-infj]', '[ inf-infj]', '[ inf-infj]',
- '[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]',
+ '[inf+0.j]', '[inf+0.j]', '[ inf+0.0j]',
+ '[inf+1.j]', '[inf+1.j]', '[ inf+1.0j]',
+ '[inf-1.j]', '[inf-1.j]', '[ inf-1.0j]',
+ '[inf+infj]', '[inf+infj]', '[ inf+infj]',
+ '[inf-infj]', '[inf-infj]', '[ inf-infj]',
+ '[inf+nanj]', '[inf+nanj]', '[ inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
- '[ nan+0.j]', '[ nan+0.j]', '[ nan+0.0j]',
- '[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]',
- '[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]',
- '[ nan+infj]', '[ nan+infj]', '[ nan+infj]',
- '[ nan-infj]', '[ nan-infj]', '[ nan-infj]',
- '[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]']
+ '[nan+0.j]', '[nan+0.j]', '[ nan+0.0j]',
+ '[nan+1.j]', '[nan+1.j]', '[ nan+1.0j]',
+ '[nan-1.j]', '[nan-1.j]', '[ nan-1.0j]',
+ '[nan+infj]', '[nan+infj]', '[ nan+infj]',
+ '[nan-infj]', '[nan-infj]', '[ nan-infj]',
+ '[nan+nanj]', '[nan+nanj]', '[ nan+nanj]']
for res, val in zip(actual, wanted):
- assert_(res == val)
+ assert_equal(res, val)
class TestArray2String(object):
def test_basic(self):
@@ -157,7 +161,7 @@ class TestArray2String(object):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_equal(np.array2string(x),
- "[('Sarah', [ 8., 7.]) ('John', [ 6., 7.])]")
+ "[('Sarah', [8., 7.]) ('John', [6., 7.])]")
# for issue #5692
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
@@ -180,7 +184,7 @@ class TestArray2String(object):
# See #8172
array_scalar = np.array(
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
- assert_equal(np.array2string(array_scalar), "( 1., 2.12345679, 3.)")
+ assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
class TestPrintOptions(object):
@@ -194,17 +198,17 @@ class TestPrintOptions(object):
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
- assert_equal(repr(x), "array([ 1.5 , 0. , 1.23456789])")
+ assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
- assert_equal(repr(x), "array([ 1.5 , 0. , 1.2346])")
+ assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
def test_precision_zero(self):
np.set_printoptions(precision=0)
for values, string in (
- ([0.], " 0."), ([.3], " 0."), ([-.3], "-0."), ([.7], " 1."),
- ([1.5], " 2."), ([-1.5], "-2."), ([-15.34], "-15."),
- ([100.], " 100."), ([.2, -1, 122.51], " 0., -1., 123."),
- ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], " 0.-1.j")):
+ ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
+ ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
+ ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
+ ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
x = np.array(values)
assert_equal(repr(x), "array([%s])" % string)
@@ -234,7 +238,7 @@ class TestPrintOptions(object):
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
- assert_equal(repr(x), "array([ 0., 1., 2.])")
+ assert_equal(repr(x), "array([0., 1., 2.])")
def test_0d_arrays(self):
assert_equal(repr(np.datetime64('2005-02-25')[...]),
@@ -244,6 +248,72 @@ class TestPrintOptions(object):
np.set_printoptions(formatter={'all':lambda x: "test"})
assert_equal(repr(x), "array(test)")
+ def test_float_spacing(self):
+ x = np.array([1., 2., 3.])
+ y = np.array([1., 2., -10.])
+ z = np.array([100., 2., -1.])
+ w = np.array([-100., 2., 1.])
+
+ assert_equal(repr(x), 'array([1., 2., 3.])')
+ assert_equal(repr(y), 'array([ 1., 2., -10.])')
+ assert_equal(repr(np.array(y[0])), 'array(1.)')
+ assert_equal(repr(np.array(y[-1])), 'array(-10.)')
+ assert_equal(repr(z), 'array([100., 2., -1.])')
+ assert_equal(repr(w), 'array([-100., 2., 1.])')
+
+ assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
+ assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
+
+ x = np.array([np.inf, 100000, 1.1234])
+ y = np.array([np.inf, 100000, -1.1234])
+ z = np.array([np.inf, 1.1234, -1e120])
+ np.set_printoptions(precision=2)
+ assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
+ assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
+ assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
+
+ def test_bool_spacing(self):
+ assert_equal(repr(np.array([True, True])),
+ 'array([ True, True], dtype=bool)')
+ assert_equal(repr(np.array([True, False])),
+ 'array([ True, False], dtype=bool)')
+ assert_equal(repr(np.array([True])),
+ 'array([ True], dtype=bool)')
+ assert_equal(repr(np.array(True)),
+ 'array(True, dtype=bool)')
+ assert_equal(repr(np.array(False)),
+ 'array(False, dtype=bool)')
+
+ def test_sign_spacing(self):
+ a = np.arange(4.)
+ b = np.array([1.234e9])
+
+ assert_equal(repr(a), 'array([0., 1., 2., 3.])')
+ assert_equal(repr(np.array(1.)), 'array(1.)')
+ assert_equal(repr(b), 'array([1.23400000e+09])')
+
+ np.set_printoptions(sign=' ')
+ assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
+ assert_equal(repr(np.array(1.)), 'array( 1.)')
+ assert_equal(repr(b), 'array([ 1.23400000e+09])')
+
+ np.set_printoptions(sign='+')
+ assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
+ assert_equal(repr(np.array(1.)), 'array(+1.)')
+ assert_equal(repr(b), 'array([+1.23400000e+09])')
+
+ np.set_printoptions(sign='legacy')
+ assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
+ assert_equal(repr(np.array(1.)), 'array(1.)')
+ assert_equal(repr(b), 'array([ 1.23400000e+09])')
+
+ def test_sign_spacing_structured(self):
+ a = np.ones(2, dtype='f,f')
+ assert_equal(repr(a), "array([(1., 1.), (1., 1.)],\n"
+ " dtype=[('f0', '<f4'), ('f1', '<f4')])")
+ assert_equal(repr(a[0]), "(1., 1.)")
+
+
def test_unicode_object_array():
import sys
if sys.version_info[0] >= 3:
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 42871a77d..e3e8c32f9 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -434,5 +434,21 @@ class TestNPY_CHAR(_DeprecationTestCase):
assert_(npy_char_deprecation() == 'S1')
+class TestDatetimeEvent(_DeprecationTestCase):
+ # 2017-08-11, 1.14.0
+ def test_3_tuple(self):
+ for cls in (np.datetime64, np.timedelta64):
+ # two valid uses - (unit, num) and (unit, num, den, None)
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
+
+ # trying to use the event argument, removed in 1.7.0, is deprecated
+ # it used to be a uint8
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 6f6654d42..9cefb2ad1 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -104,6 +104,15 @@ class TestBuiltin(object):
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
+ def test_field_order_equality(self):
+ x = np.dtype({'names': ['A', 'B'],
+ 'formats': ['i4', 'f4'],
+ 'offsets': [0, 4]})
+ y = np.dtype({'names': ['B', 'A'],
+ 'formats': ['f4', 'i4'],
+ 'offsets': [4, 0]})
+ assert_equal(x == y, False)
+
class TestRecord(object):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
@@ -211,11 +220,12 @@ class TestRecord(object):
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
+ # field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
- 'formats':['<u2', '<u4', '<u2'],
- 'offsets':[2, 4, 0]}, align=True)
+ 'formats':['<u4', '<u2', '<u2'],
+ 'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
- vals2 = [(2, 0, 1), (4, 3, -1)]
+ vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 7cc9f67ef..da83bb8c4 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -767,13 +767,13 @@ class TestEinSum(object):
class TestEinSumPath(object):
- def build_operands(self, string):
+ def build_operands(self, string, size_dict=global_size_dict):
# Builds views based off initial operands
operands = [string]
terms = string.split('->')[0].split(',')
for term in terms:
- dims = [global_size_dict[x] for x in term]
+ dims = [size_dict[x] for x in term]
operands.append(np.random.rand(*dims))
return operands
@@ -863,6 +863,16 @@ class TestEinSumPath(object):
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+ # Edge test5
+ edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->',
+ size_dict={"a": 20, "b": 20, "c": 20, "d": 20})
+ path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
+
+ path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
+
+
def test_path_type_input(self):
# Test explicit path handeling
path_test = self.build_operands('dcc,fce,ea,dbf->ab')
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 43965d994..4c3bac529 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -106,6 +106,12 @@ class TestIndexing(object):
a = np.array(0)
assert_(isinstance(a[()], np.int_))
+ def test_void_scalar_empty_tuple(self):
+ s = np.zeros((), dtype='V4')
+ assert_equal(s[()].dtype, s.dtype)
+ assert_equal(s[()], s)
+ assert_equal(type(s[...]), np.ndarray)
+
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 74f6a3af9..c28a72150 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -423,30 +423,33 @@ class TestAssignment(object):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
- # gh-8902
- tinyb = np.nextafter(np.longdouble(0), 1)
- tinya = np.nextafter(np.longdouble(0), -1)
- tiny1d = np.array([tinya])
- assert_equal(tiny1d[0], tinya)
+ for dtype in (np.longdouble, np.longcomplex):
+ # gh-8902
+ tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
+ tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
+
+ # construction
+ tiny1d = np.array([tinya])
+ assert_equal(tiny1d[0], tinya)
- # scalar = scalar
- tiny1d[0] = tinyb
- assert_equal(tiny1d[0], tinyb)
+ # scalar = scalar
+ tiny1d[0] = tinyb
+ assert_equal(tiny1d[0], tinyb)
- # 0d = scalar
- tiny1d[0, ...] = tinya
- assert_equal(tiny1d[0], tinya)
+ # 0d = scalar
+ tiny1d[0, ...] = tinya
+ assert_equal(tiny1d[0], tinya)
- # 0d = 0d
- tiny1d[0, ...] = tinyb[...]
- assert_equal(tiny1d[0], tinyb)
+ # 0d = 0d
+ tiny1d[0, ...] = tinyb[...]
+ assert_equal(tiny1d[0], tinyb)
- # scalar = 0d
- tiny1d[0] = tinyb[...]
- assert_equal(tiny1d[0], tinyb)
+ # scalar = 0d
+ tiny1d[0] = tinyb[...]
+ assert_equal(tiny1d[0], tinyb)
- arr = np.array([np.array(tinya)])
- assert_equal(arr[0], tinya)
+ arr = np.array([np.array(tinya)])
+ assert_equal(arr[0], tinya)
class TestDtypedescr(object):
@@ -952,16 +955,13 @@ class TestStructured(object):
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
- b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
+ b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
- # Check that 'equiv' casting can reorder fields and change byte
- # order
- # New in 1.12: This behavior changes in 1.13, test for dep warning
+ # Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
- with assert_warns(FutureWarning):
- c = a.astype(b.dtype, casting='equiv')
+ c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
@@ -1096,6 +1096,54 @@ class TestStructured(object):
b = a[0]
assert_(b.base is a)
+ def test_assignment(self):
+ def testassign(arr, v):
+ c = arr.copy()
+ c[0] = v # assign using setitem
+ c[1:] = v # assign using "dtype_transfer" code paths
+ return c
+
+ dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
+ arr = np.ones(2, dt)
+ v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
+ v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
+ v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
+ v4 = np.array([(2,)], dtype=[('bar', 'i8')])
+ v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
+ w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
+
+ ans = np.array([(2,3),(2,3)], dtype=dt)
+ assert_equal(testassign(arr, v1), ans)
+ assert_equal(testassign(arr, v2), ans)
+ assert_equal(testassign(arr, v3), ans)
+ assert_raises(ValueError, lambda: testassign(arr, v4))
+ assert_equal(testassign(arr, v5), ans)
+ w[:] = 4
+ assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
+
+ # test field-reordering, assignment by position, and self-assignment
+ a = np.array([(1,2,3)],
+ dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
+ a[['foo', 'bar']] = a[['bar', 'foo']]
+ assert_equal(a[0].item(), (2,1,3))
+
+ # test that this works even for 'simple_unaligned' structs
+ # (ie, that PyArray_EquivTypes cares about field order too)
+ a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
+ a[['a', 'b']] = a[['b', 'a']]
+ assert_equal(a[0].item(), (2,1))
+
+ def test_structuredscalar_indexing(self):
+ # test gh-7262
+ x = np.empty(shape=1, dtype="(2)3S,(2)3U")
+ assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
+ assert_equal(x[0], x[0][()])
+
+ def test_multiindex_titles(self):
+ a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
+ assert_raises(KeyError, lambda : a[['a','c']])
+ assert_raises(KeyError, lambda : a[['b','b']])
+ a[['b','c']] # no exception
class TestBool(object):
def test_test_interning(self):
@@ -2453,6 +2501,18 @@ class TestMethods(object):
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
+ def test_dot_matmul_out(self):
+ # gh-9641
+ class Sub(np.ndarray):
+ pass
+ a = np.ones((2, 2)).view(Sub)
+ b = np.ones((2, 2)).view(Sub)
+ out = np.ones((2, 2))
+
+ # make sure out can be any ndarray (not only subclass of inputs)
+ np.dot(a, b, out=out)
+ np.matmul(a, b, out=out)
+
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
@@ -2791,6 +2851,10 @@ class TestMethods(object):
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
+class TestCequenceMethods(object):
+ def test_array_contains(self):
+ assert_(4.0 in np.arange(16.).reshape(4,4))
+ assert_(20.0 not in np.arange(16.).reshape(4,4))
class TestBinop(object):
def test_inplace(self):
@@ -3201,6 +3265,15 @@ class TestTemporaryElide(object):
a = np.bool_()
assert_(type(~(a & a)) is np.bool_)
+ def test_elide_scalar_readonly(self):
+ # The imaginary part of a real array is readonly. This needs to go
+ # through fast_scalar_power which is only called for powers of
+ # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
+ # elision which can be gotten for the imaginary part of a real
+ # array. Should not error.
+ a = np.empty(100000, dtype=np.float64)
+ a.imag ** 2
+
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
@@ -4311,14 +4384,16 @@ class TestFlat(object):
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
+ # for 1.14 all are set to non-writeable on the way to replacing the
+ # UPDATEIFCOPY array returned for non-contiguous arrays.
assert_(e.flags.writeable is True)
- assert_(f.flags.writeable is True)
+ assert_(f.flags.writeable is False)
assert_(c.flags.updateifcopy is False)
assert_(d.flags.updateifcopy is False)
assert_(e.flags.updateifcopy is False)
- assert_(f.flags.updateifcopy is True)
- assert_(f.base is self.b0)
+ # UPDATEIFCOPY is removed.
+ assert_(f.flags.updateifcopy is False)
class TestResize(object):
@@ -4496,23 +4571,11 @@ class TestRecord(object):
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
- with suppress_warnings() as sup:
- sup.filter(FutureWarning,
- "Assignment between structured arrays.*")
- sup.filter(FutureWarning,
- "Numpy has detected that you .*")
-
- assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
- assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
- assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
- # view of subfield view/copy
- assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
- (2, 3))
- assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
- (3, 2))
- view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
- assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
- (2, (1,)))
+
+ assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
+ assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
+ assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
+
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
@@ -4521,54 +4584,6 @@ class TestRecord(object):
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
- def test_field_names_deprecation(self):
-
- def collect_warnings(f, *args, **kwargs):
- with warnings.catch_warnings(record=True) as log:
- warnings.simplefilter("always")
- f(*args, **kwargs)
- return [w.category for w in log]
-
- a = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- a['f1'][0] = 1
- a['f2'][0] = 2
- a['f3'][0] = (3,)
- b = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- b['f1'][0] = 1
- b['f2'][0] = 2
- b['f3'][0] = (3,)
-
- # All the different functions raise a warning, but not an error
- assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
- [FutureWarning])
- # For <=1.12 a is not modified, but it will be in 1.13
- assert_equal(a, b)
-
- # Views also warn
- subset = a[['f1', 'f2']]
- subset_view = subset.view()
- assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
- [FutureWarning])
- # But the write goes through:
- assert_equal(subset['f1'][0], 10)
- # Only one warning per multiple field indexing, though (even if there
- # are multiple views involved):
- assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
-
- # make sure views of a multi-field index warn too
- c = np.zeros(3, dtype='i8,i8,i8')
- assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
- [FutureWarning])
-
- # make sure assignment using a different dtype warns
- a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
- b = np.zeros(2, dtype=[('b', 'i4'), ('a', 'i4')])
- assert_equal(collect_warnings(a.__setitem__, (), b), [FutureWarning])
-
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 885dcb56f..59e11f22e 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -1816,100 +1816,45 @@ def test_iter_buffered_cast_structured_type():
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(a[0]), rc)
- # struct type -> simple (takes the first value)
- sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
- a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
+ # single-field struct type -> simple
+ sdt = [('a', 'f4')]
+ a = np.array([(5.5,), (8,)], dtype=sdt)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')
assert_equal([x_[()] for x_ in i], [5, 8])
+ # make sure multi-field struct type -> simple doesn't work
+ sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+ a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
+ assert_raises(ValueError, lambda: (
+ nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes='i4')))
+
# struct type -> struct type (field-wise copy)
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
assert_equal([np.array(x_) for x_ in i],
- [np.array((3, 1, 2), dtype=sdt2),
- np.array((6, 4, 5), dtype=sdt2)])
+ [np.array((1, 2, 3), dtype=sdt2),
+ np.array((4, 5, 6), dtype=sdt2)])
- # struct type -> struct type (field gets discarded)
+ # make sure struct type -> struct type with different
+ # number of fields fails
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('b', 'O'), ('a', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, None), (8, 5, None)], dtype=sdt1))
-
- # struct type -> struct type (structured field gets discarded)
- sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'i4')])]
- sdt2 = [('b', 'O'), ('a', 'f8')]
- a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, (0, 0)), (8, 5, (0, 0))], dtype=sdt1))
-
- # struct type -> struct type (structured field w/ ref gets discarded)
- sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])]
- sdt2 = [('b', 'O'), ('a', 'f8')]
- a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, (0, None)), (8, 5, (0, None))], dtype=sdt1))
-
- # struct type -> struct type back (structured field w/ ref gets discarded)
- sdt1 = [('b', 'O'), ('a', 'f8')]
- sdt2 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])]
- a = np.array([(1, 2), (4, 5)], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- assert_equal(x['d'], np.array((0, None), dtype=[('a', 'i2'), ('b', 'O')]))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1, (0, None)), dtype=sdt2),
- np.array((5, 4, (0, None)), dtype=sdt2)])
- assert_equal(a, np.array([(1, 4), (4, 7)], dtype=sdt1))
+
+ assert_raises(ValueError, lambda : (
+ nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
+ casting='unsafe',
+ op_dtypes=sdt2)))
+
def test_iter_buffered_cast_subarray():
# Tests buffering of subarrays
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index bdb3dfe69..f1133b8c9 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -872,6 +872,23 @@ class TestTypes(object):
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
+ # Also test keyword arguments
+ assert_(np.can_cast(from_=np.int32, to=np.int64))
+
+ def test_can_cast_values(self):
+ # gh-5917
+ for dt in np.sctypes['int'] + np.sctypes['uint']:
+ ii = np.iinfo(dt)
+ assert_(np.can_cast(ii.min, dt))
+ assert_(np.can_cast(ii.max, dt))
+ assert_(not np.can_cast(ii.min - 1, dt))
+ assert_(not np.can_cast(ii.max + 1, dt))
+
+ for dt in np.sctypes['float']:
+ fi = np.finfo(dt)
+ assert_(np.can_cast(fi.min, dt))
+ assert_(np.can_cast(fi.max, dt))
+
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
@@ -1117,6 +1134,19 @@ class TestNonzero(object):
assert_equal(m.nonzero(), tgt)
+ def test_nonzero_invalid_object(self):
+ # gh-9295
+ a = np.array([np.array([1, 2]), 3])
+ assert_raises(ValueError, np.nonzero, a)
+
+ class BoolErrors:
+ def __bool__(self):
+ raise ValueError("Not allowed")
+ def __nonzero__(self):
+ raise ValueError("Not allowed")
+
+ assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
+
class TestIndex(object):
def test_boolean(self):
@@ -1930,9 +1960,9 @@ class TestIsclose(object):
def test_non_finite_scalar(self):
# GH7014, when two scalars are compared the output should also be a
# scalar
- assert_(np.isclose(np.inf, -np.inf) is False)
- assert_(np.isclose(0, np.inf) is False)
- assert_(type(np.isclose(0, np.inf)) is bool)
+ assert_(np.isclose(np.inf, -np.inf) is np.False_)
+ assert_(np.isclose(0, np.inf) is np.False_)
+ assert_(type(np.isclose(0, np.inf)) is np.bool_)
class TestStdVar(object):
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index d7714132b..27d35fa65 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -153,11 +153,6 @@ class TestFromrecords(object):
assert_equal(r['c'].dtype.type, np.record)
assert_equal(type(r['c']), np.recarray)
- # suppress deprecation warning in 1.12 (remove in 1.13)
- with assert_warns(FutureWarning):
- assert_equal(r[['a', 'b']].dtype.type, np.record)
- assert_equal(type(r[['a', 'b']]), np.recarray)
-
#and that it preserves subclasses (gh-6949)
class C(np.recarray):
pass
@@ -334,15 +329,6 @@ class TestRecord(object):
with assert_raises(ValueError):
r.setfield([2,3], *r.dtype.fields['f'])
- def test_out_of_order_fields(self):
- """Ticket #1431."""
- # this test will be invalid in 1.13
- # suppress deprecation warning in 1.12 (remove in 1.13)
- with assert_warns(FutureWarning):
- x = self.data[['col1', 'col2']]
- y = self.data[['col2', 'col1']]
- assert_equal(x[0][0], y[0][1])
-
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
@@ -371,8 +357,7 @@ class TestRecord(object):
# https://github.com/numpy/numpy/issues/3256
ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)])
- with assert_warns(FutureWarning):
- ra[['x','y']] # TypeError?
+ ra[['x','y']] # TypeError?
def test_record_scalar_setitem(self):
# https://github.com/numpy/numpy/issues/3561
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 84469d03b..34f9080fb 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -858,7 +858,7 @@ class TestRegression(object):
def test_sign_bit(self):
x = np.array([0, -0.0, 0])
- assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')
+ assert_equal(str(np.abs(x)), '[0. 0. 0.]')
def test_flat_index_byteswap(self):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index cff9f7985..1909f497f 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -18,6 +18,7 @@ types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.cdouble, np.clongdouble]
floating_types = np.floating.__subclasses__()
+complex_floating_types = np.complexfloating.__subclasses__()
# This compares scalarmath against ufuncs.
@@ -603,9 +604,8 @@ class TestSubtract(object):
class TestAbs(object):
-
def _test_abs_func(self, absfunc):
- for tp in floating_types:
+ for tp in floating_types + complex_floating_types:
x = tp(-1.5)
assert_equal(absfunc(x), 1.5)
x = tp(0.0)
@@ -616,6 +616,15 @@ class TestAbs(object):
res = absfunc(x)
assert_equal(res, 0.0)
+ x = tp(np.finfo(tp).max)
+ assert_equal(absfunc(x), x.real)
+
+ x = tp(np.finfo(tp).tiny)
+ assert_equal(absfunc(x), x.real)
+
+ x = tp(np.finfo(tp).min)
+ assert_equal(absfunc(x), -x.real)
+
def test_builtin_abs(self):
self._test_abs_func(abs)
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index d1fbe8e92..5c1e569b7 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -230,6 +230,12 @@ class TestConcatenate(object):
'0', '1', '2', 'x'])
assert_array_equal(r, d)
+ out = np.zeros(a.size + len(b))
+ r = np.concatenate((a, b), axis=None)
+ rout = np.concatenate((a, b), axis=None, out=out)
+ assert_(out is rout)
+ assert_equal(r, rout)
+
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
@@ -278,6 +284,34 @@ class TestConcatenate(object):
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
+ out = res.copy()
+ rout = concatenate((a0, a1, a2), 2, out=out)
+ assert_(out is rout)
+ assert_equal(res, rout)
+
+ def test_bad_out_shape(self):
+ a = array([1, 2])
+ b = array([3, 4])
+
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
+ concatenate((a, b), out=np.empty(4))
+
+ def test_out_dtype(self):
+ out = np.empty(4, np.float32)
+ res = concatenate((array([1, 2]), array([3, 4])), out=out)
+ assert_(out is res)
+
+ out = np.empty(4, np.complex64)
+ res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+ assert_(out is res)
+
+ # invalid cast
+ out = np.empty(4, np.int32)
+ assert_raises(TypeError, concatenate,
+ (array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+
def test_stack():
# non-iterable input
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 2260fd50d..10c60dc6f 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -66,7 +66,8 @@ class GnuFCompiler(FCompiler):
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
- m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
+ m = re.search(
+ r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 01376a7ff..102af874f 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -2278,7 +2278,7 @@ def generate_config_py(target):
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, 'w')
- f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
+ f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py
index 9ad63cf09..659520513 100644
--- a/numpy/distutils/tests/test_fcompiler_gnu.py
+++ b/numpy/distutils/tests/test_fcompiler_gnu.py
@@ -26,7 +26,8 @@ gfortran_version_strings = [
'4.9.1'),
("gfortran: warning: couldn't understand kern.osversion '14.1.0\n"
"gfortran: warning: yet another warning\n4.9.1",
- '4.9.1')
+ '4.9.1'),
+ ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0')
]
class TestG77Versions(object):
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index 6aeeec823..3b7f694d4 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -331,7 +331,7 @@ cppmacros['TRYPYARRAYTEMPLATE'] = """\
/* New SciPy */
#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break;
#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;
-#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_ ## ctype ## 1(*v),PyArray_DATA(arr)); break;
+#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break;
#define TRYPYARRAYTEMPLATE(ctype,typecode) \\
PyArrayObject *arr = NULL;\\
@@ -357,7 +357,7 @@ cppmacros['TRYPYARRAYTEMPLATE'] = """\
case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\
case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\
case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\
- case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_ ## ctype ## 1(*v),PyArray_DATA(arr), arr); break;\\
+ case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\
default: return -2;\\
};\\
return 1
@@ -365,7 +365,7 @@ cppmacros['TRYPYARRAYTEMPLATE'] = """\
needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR']
cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\
-#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),PyArray_DATA(arr), arr); break;
+#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;
#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
@@ -394,7 +394,7 @@ cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\
case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;break;\\
- case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),PyArray_DATA(arr), arr); break;\\
+ case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\
default: return -2;\\
};\\
return -1;
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 6a1f5ae6e..36e2222ea 100644
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -107,16 +107,14 @@ for k in ['decl',
#################### Rules for C/API module #################
+generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
module_rules = {
'modulebody': """\
/* File: #modulename#module.c
* This file is auto-generated with f2py (version:#f2py_version#).
* f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
* written by Pearu Peterson <pearu@cens.ioc.ee>.
- * See http://cens.ioc.ee/projects/f2py2e/
- * Generation date: """ + time.asctime(time.localtime(time.time())) + """
- * $R""" + """evision:$
- * $D""" + """ate:$
+ * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """
* Do not edit this file directly unless you know what you are doing!!!
*/
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index 842f3a9fe..b8966e543 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -1208,7 +1208,7 @@ def pad(array, pad_width, mode, **kwargs):
length to the vector argument with padded values replaced. It has the
following signature::
- padding_func(vector, iaxis_pad_width, iaxis, **kwargs)
+ padding_func(vector, iaxis_pad_width, iaxis, kwargs)
where
@@ -1222,7 +1222,7 @@ def pad(array, pad_width, mode, **kwargs):
the end of vector.
iaxis : int
The axis currently being calculated.
- kwargs : misc
+ kwargs : dict
Any keyword arguments the function requires.
Examples
@@ -1272,21 +1272,27 @@ def pad(array, pad_width, mode, **kwargs):
>>> np.lib.pad(a, (2, 3), 'wrap')
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
- >>> def padwithtens(vector, pad_width, iaxis, kwargs):
- ... vector[:pad_width[0]] = 10
- ... vector[-pad_width[1]:] = 10
+ >>> def pad_with(vector, pad_width, iaxis, kwargs):
+ ... pad_value = kwargs.get('padder', 10)
+ ... vector[:pad_width[0]] = pad_value
+ ... vector[-pad_width[1]:] = pad_value
... return vector
-
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
-
- >>> np.lib.pad(a, 2, padwithtens)
+ >>> np.lib.pad(a, 2, pad_with)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
+ >>> np.lib.pad(a, 2, pad_with, padder=100)
+ array([[100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 0, 1, 2, 100, 100],
+ [100, 100, 3, 4, 5, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100]])
"""
if not np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 14dec01d5..84af2afc8 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -100,9 +100,9 @@ the header data HEADER_LEN.
The next HEADER_LEN bytes form the header data describing the array's
format. It is an ASCII string which contains a Python literal expression
of a dictionary. It is terminated by a newline (``\\n``) and padded with
-spaces (``\\x20``) to make the total length of
-``magic string + 4 + HEADER_LEN`` be evenly divisible by 16 for alignment
-purposes.
+spaces (``\\x20``) to make the total of
+``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
+by 64 for alignment purposes.
The dictionary contains three keys:
@@ -163,6 +163,7 @@ else:
MAGIC_PREFIX = b'\x93NUMPY'
MAGIC_LEN = len(MAGIC_PREFIX) + 2
+ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
@@ -304,27 +305,33 @@ def _write_array_header(fp, d, version=None):
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
- # Pad the header with spaces and a final newline such that the magic
- # string, the header-length short and the header are aligned on a
- # 16-byte boundary. Hopefully, some system, possibly memory-mapping,
- # can take advantage of our premature optimization.
- current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline
- topad = 16 - (current_header_len % 16)
- header = header + ' '*topad + '\n'
header = asbytes(_filter_header(header))
- hlen = len(header)
- if hlen < 256*256 and version in (None, (1, 0)):
+ hlen = len(header) + 1 # 1 for newline
+ padlen_v1 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<H') + hlen) % ARRAY_ALIGN)
+ padlen_v2 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<I') + hlen) % ARRAY_ALIGN)
+
+ # Which version(s) we write depends on the total header size; v1 has a max of 65535
+ if hlen + padlen_v1 < 2**16 and version in (None, (1, 0)):
version = (1, 0)
- header_prefix = magic(1, 0) + struct.pack('<H', hlen)
- elif hlen < 2**32 and version in (None, (2, 0)):
+ header_prefix = magic(1, 0) + struct.pack('<H', hlen + padlen_v1)
+ topad = padlen_v1
+ elif hlen + padlen_v2 < 2**32 and version in (None, (2, 0)):
version = (2, 0)
- header_prefix = magic(2, 0) + struct.pack('<I', hlen)
+ header_prefix = magic(2, 0) + struct.pack('<I', hlen + padlen_v2)
+ topad = padlen_v2
else:
msg = "Header length %s too big for version=%s"
msg %= (hlen, version)
raise ValueError(msg)
+ # Pad the header with spaces and a final newline such that the magic
+ # string, the header-length short and the header are aligned on a
+ # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
+ # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
+ # offset must be page-aligned (i.e. the beginning of the file).
+ header = header + b' '*topad + b'\n'
+
fp.write(header_prefix)
fp.write(header)
return version
@@ -468,18 +475,18 @@ def _read_array_header(fp, version):
# header.
import struct
if version == (1, 0):
- hlength_str = _read_bytes(fp, 2, "array header length")
- header_length = struct.unpack('<H', hlength_str)[0]
- header = _read_bytes(fp, header_length, "array header")
+ hlength_type = '<H'
elif version == (2, 0):
- hlength_str = _read_bytes(fp, 4, "array header length")
- header_length = struct.unpack('<I', hlength_str)[0]
- header = _read_bytes(fp, header_length, "array header")
+ hlength_type = '<I'
else:
raise ValueError("Invalid version %r" % version)
+ hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
+ header_length = struct.unpack(hlength_type, hlength_str)[0]
+ header = _read_bytes(fp, header_length, "array header")
+
# The header is a pretty-printed string representation of a literal
- # Python dictionary with trailing newlines padded to a 16-byte
+ # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
# boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 93cbd69dd..2745b49d1 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -782,7 +782,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
bins = bin_edges
else:
bins = asarray(bins)
- if (np.diff(bins) < 0).any():
+ if np.any(bins[:-1] > bins[1:]):
raise ValueError(
'bins must increase monotonically.')
@@ -1676,23 +1676,28 @@ def gradient(f, *varargs, **kwargs):
len_axes = len(axes)
n = len(varargs)
if n == 0:
+ # no spacing argument - use 1 in all axes
dx = [1.0] * len_axes
- elif n == len_axes or (n == 1 and np.isscalar(varargs[0])):
+ elif n == 1 and np.ndim(varargs[0]) == 0:
+ # single scalar for all axes
+ dx = varargs * len_axes
+ elif n == len_axes:
+ # scalar or 1d array for each axis
dx = list(varargs)
for i, distances in enumerate(dx):
- if np.isscalar(distances):
+ if np.ndim(distances) == 0:
continue
+ elif np.ndim(distances) != 1:
+ raise ValueError("distances must be either scalars or 1d")
if len(distances) != f.shape[axes[i]]:
- raise ValueError("distances must be either scalars or match "
+ raise ValueError("when 1d, distances must match "
"the length of the corresponding dimension")
- diffx = np.diff(dx[i])
+ diffx = np.diff(distances)
# if distances are constant reduce to the scalar case
# since it brings a consistent speedup
if (diffx == diffx[0]).all():
diffx = diffx[0]
dx[i] = diffx
- if len(dx) == 1:
- dx *= len_axes
else:
raise TypeError("invalid number of arguments")
@@ -1728,7 +1733,7 @@ def gradient(f, *varargs, **kwargs):
# all other types convert to floating point
otype = np.double
- for i, axis in enumerate(axes):
+ for axis, ax_dx in zip(axes, dx):
if f.shape[axis] < edge_order + 1:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
@@ -1736,7 +1741,8 @@ def gradient(f, *varargs, **kwargs):
# result allocation
out = np.empty_like(f, dtype=otype)
- uniform_spacing = np.isscalar(dx[i])
+ # spacing for the current axis
+ uniform_spacing = np.ndim(ax_dx) == 0
# Numerical differentiation: 2nd order interior
slice1[axis] = slice(1, -1)
@@ -1745,10 +1751,10 @@ def gradient(f, *varargs, **kwargs):
slice4[axis] = slice(2, None)
if uniform_spacing:
- out[slice1] = (f[slice4] - f[slice2]) / (2. * dx[i])
+ out[slice1] = (f[slice4] - f[slice2]) / (2. * ax_dx)
else:
- dx1 = dx[i][0:-1]
- dx2 = dx[i][1:]
+ dx1 = ax_dx[0:-1]
+ dx2 = ax_dx[1:]
a = -(dx2)/(dx1 * (dx1 + dx2))
b = (dx2 - dx1) / (dx1 * dx2)
c = dx1 / (dx2 * (dx1 + dx2))
@@ -1764,14 +1770,14 @@ def gradient(f, *varargs, **kwargs):
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
- dx_0 = dx[i] if uniform_spacing else dx[i][0]
+ dx_0 = ax_dx if uniform_spacing else ax_dx[0]
# 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
out[slice1] = (f[slice2] - f[slice3]) / dx_0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
- dx_n = dx[i] if uniform_spacing else dx[i][-1]
+ dx_n = ax_dx if uniform_spacing else ax_dx[-1]
# 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
out[slice1] = (f[slice2] - f[slice3]) / dx_n
@@ -1782,12 +1788,12 @@ def gradient(f, *varargs, **kwargs):
slice3[axis] = 1
slice4[axis] = 2
if uniform_spacing:
- a = -1.5 / dx[i]
- b = 2. / dx[i]
- c = -0.5 / dx[i]
+ a = -1.5 / ax_dx
+ b = 2. / ax_dx
+ c = -0.5 / ax_dx
else:
- dx1 = dx[i][0]
- dx2 = dx[i][1]
+ dx1 = ax_dx[0]
+ dx2 = ax_dx[1]
a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2))
b = (dx1 + dx2) / (dx1 * dx2)
c = - dx1 / (dx2 * (dx1 + dx2))
@@ -1799,12 +1805,12 @@ def gradient(f, *varargs, **kwargs):
slice3[axis] = -2
slice4[axis] = -1
if uniform_spacing:
- a = 0.5 / dx[i]
- b = -2. / dx[i]
- c = 1.5 / dx[i]
+ a = 0.5 / ax_dx
+ b = -2. / ax_dx
+ c = 1.5 / ax_dx
else:
- dx1 = dx[i][-2]
- dx2 = dx[i][-1]
+ dx1 = ax_dx[-2]
+ dx2 = ax_dx[-1]
a = (dx2) / (dx1 * (dx1 + dx2))
b = - (dx2 + dx1) / (dx1 * dx2)
c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2))
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 79bf01281..ffedcd68a 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -594,7 +594,7 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
Parameters
----------
a : array_like
- Array containing numbers whose sum is desired. If `a` is not an
+ Array containing numbers whose product is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the product is computed. The default is to compute
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index e7303fc65..7598b2c6b 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -1071,7 +1071,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
- X : array_like
+ X : 1D or 2D array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
@@ -1201,7 +1201,10 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
X = np.asarray(X)
# Handle 1-dimensional arrays
- if X.ndim == 1:
+ if X.ndim == 0 or X.ndim > 2:
+ raise ValueError(
+ "Expected 1D or 2D array, got %dD array instead" % X.ndim)
+ elif X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index ea77f40e0..53578e0e4 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -85,11 +85,9 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
array([[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]],
-
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]],
-
[[7, 0, 0],
[0, 8, 0],
[0, 0, 9]]])
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 155732882..2d2b4cea2 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -615,6 +615,11 @@ def test_version_2_0():
format.write_array(f, d)
assert_(w[0].category is UserWarning)
+ # check alignment of data portion
+ f.seek(0)
+ header = f.readline()
+ assert_(len(header) % format.ARRAY_ALIGN == 0)
+
f.seek(0)
n = format.read_array(f)
assert_array_equal(d, n)
@@ -758,6 +763,7 @@ def test_read_array_header_1_0():
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_1_0(s)
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
@@ -770,6 +776,7 @@ def test_read_array_header_2_0():
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_2_0(s)
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 4c90abbf6..10440d97c 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -804,8 +804,11 @@ class TestGradient(object):
# distances must be scalars or have size equal to gradient[axis]
gradient(np.arange(5), 3.)
+ gradient(np.arange(5), np.array(3.))
gradient(np.arange(5), dx)
- gradient(f_2d, 1.5) # dy is set equal to dx because scalar
+ # dy is set equal to dx because scalar
+ gradient(f_2d, 1.5)
+ gradient(f_2d, np.array(1.5))
gradient(f_2d, dx_uneven, dx_uneven)
# mix between even and uneven spaces and
@@ -815,6 +818,10 @@ class TestGradient(object):
# 2D but axis specified
gradient(f_2d, dx, axis=1)
+ # 2d coordinate arguments are not yet allowed
+ assert_raises_regex(ValueError, '.*scalars or 1d',
+ gradient, f_2d, np.stack([dx]*2, axis=-1), 1)
+
def test_badargs(self):
f_2d = np.arange(25).reshape(5, 5)
x = np.cumsum(np.ones(5))
@@ -1829,6 +1836,14 @@ class TestHistogram(object):
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
assert_equal(hist[-1], 1)
+ def test_unsigned_monotonicity_check(self):
+ # Ensures ValueError is raised if bins not increasing monotonically
+ # when bins contain unsigned values (see #9222)
+ arr = np.array([2])
+ bins = np.array([1, 3, 1], dtype='uint64')
+ with assert_raises(ValueError):
+ hist, edges = np.histogram(arr, bins=bins)
+
class TestHistogramOptimBinNums(object):
"""
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index f2fd37230..6f7fcc54c 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -329,6 +329,12 @@ class TestSaveTxt(object):
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
+ def test_0D_3D(self):
+ c = BytesIO()
+ assert_raises(ValueError, np.savetxt, c, np.array(1))
+ assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
+
+
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
@@ -1178,12 +1184,12 @@ M 33 21.99
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
- control = np.rec.array([[1,5,-1,0], [2,8,-1,1], [3,3,-2,3]], dtype=dtyp)
+ control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
- control = np.rec.array([[1,5,0], [2,8,1], [3,3,3]], dtype=dtyp)
+ control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index edd980dd5..d2ae7befc 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -19,12 +19,13 @@ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
import warnings
from numpy.core import (
- array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
+ array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, moveaxis, amin, amax, product, abs,
- broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
- )
+ broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones, matmul,
+ swapaxes, divide, count_nonzero
+)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
@@ -69,12 +70,8 @@ class LinAlgError(Exception):
"""
pass
-# Dealing with errors in _umath_linalg
-
-_linalg_error_extobj = None
def _determine_error_states():
- global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
@@ -82,9 +79,11 @@ def _determine_error_states():
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
- _linalg_error_extobj = [bufsize, invalid_call_errmask, None]
+ return [bufsize, invalid_call_errmask, None]
-_determine_error_states()
+# Dealing with errors in _umath_linalg
+_linalg_error_extobj = _determine_error_states()
+del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
@@ -99,7 +98,7 @@ def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
- extobj = list(_linalg_error_extobj)
+ extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
@@ -225,6 +224,22 @@ def _assertNoEmpty2d(*arrays):
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
+def transpose(a):
+ """
+ Transpose each matrix in a stack of matrices.
+
+ Unlike np.transpose, this only swaps the last two axes, rather than all of
+ them
+
+ Parameters
+ ----------
+ a : (...,M,N) array_like
+
+ Returns
+ -------
+ aT : (...,N,M) ndarray
+ """
+ return swapaxes(a, -1, -2)
# Linear equations
@@ -1281,7 +1296,7 @@ def eigh(a, UPLO='L'):
# Singular value decomposition
-def svd(a, full_matrices=1, compute_uv=1):
+def svd(a, full_matrices=True, compute_uv=True):
"""
Singular Value Decomposition.
@@ -1489,22 +1504,34 @@ def cond(x, p=None):
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
-def matrix_rank(M, tol=None):
+def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
- Rank of the array is the number of SVD singular values of the array that are
+ Rank of the array is the number of singular values of the array that are
greater than `tol`.
+ .. versionchanged:: 1.14
+ Can now operate on stacks of matrices
+
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
- tol : {None, float}, optional
- threshold below which SVD values are considered zero. If `tol` is
- None, and ``S`` is an array with singular values for `M`, and
- ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
- set to ``S.max() * max(M.shape) * eps``.
+ tol : (...) array_like, float, optional
+ threshold below which SVD values are considered zero. If `tol` is
+ None, and ``S`` is an array with singular values for `M`, and
+ ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
+ set to ``S.max() * max(M.shape) * eps``.
+
+ .. versionchanged:: 1.14
+ Broadcasted against the stack of matrices
+ hermitian : bool, optional
+ If True, `M` is assumed to be Hermitian (symmetric if real-valued),
+ enabling a more efficient method for finding singular values.
+ Defaults to False.
+
+ .. versionadded:: 1.14
Notes
-----
@@ -1568,10 +1595,15 @@ def matrix_rank(M, tol=None):
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
- S = svd(M, compute_uv=False)
+ if hermitian:
+ S = abs(eigvalsh(M))
+ else:
+ S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
- return (S > tol).sum(axis=-1)
+ else:
+ tol = asarray(tol)[..., newaxis]
+ return count_nonzero(S > tol, axis=-1)
# Generalized inverse
@@ -1584,26 +1616,29 @@ def pinv(a, rcond=1e-15 ):
singular-value decomposition (SVD) and including all
*large* singular values.
+ .. versionchanged:: 1.14
+ Can now operate on stacks of matrices
+
Parameters
----------
- a : (M, N) array_like
- Matrix to be pseudo-inverted.
- rcond : float
- Cutoff for small singular values.
- Singular values smaller (in modulus) than
- `rcond` * largest_singular_value (again, in modulus)
- are set to zero.
+ a : (..., M, N) array_like
+ Matrix or stack of matrices to be pseudo-inverted.
+ rcond : (...) array_like of float
+ Cutoff for small singular values.
+ Singular values smaller (in modulus) than
+ `rcond` * largest_singular_value (again, in modulus)
+ are set to zero. Broadcasts against the stack of matrices
Returns
-------
- B : (N, M) ndarray
- The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
- is `B`.
+ B : (..., N, M) ndarray
+ The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
+ is `B`.
Raises
------
LinAlgError
- If the SVD computation does not converge.
+ If the SVD computation does not converge.
Notes
-----
@@ -1640,20 +1675,20 @@ def pinv(a, rcond=1e-15 ):
"""
a, wrap = _makearray(a)
+ rcond = asarray(rcond)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
- u, s, vt = svd(a, 0)
- m = u.shape[0]
- n = vt.shape[1]
- cutoff = rcond*maximum.reduce(s)
- for i in range(min(n, m)):
- if s[i] > cutoff:
- s[i] = 1./s[i]
- else:
- s[i] = 0.
- res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
+ u, s, vt = svd(a, full_matrices=False)
+
+ # discard small singular values
+ cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
+ large = s > cutoff
+ s = divide(1, s, where=large, out=s)
+ s[~large] = 0
+
+ res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
@@ -1989,13 +2024,13 @@ def lstsq(a, b, rcond="warn"):
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
- x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
+ x = array(bstar.T[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
- resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
+ resids = sum(abs(bstar.T[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
- resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
+ resids = sum((bstar.T[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index ab81fc485..0a6566bde 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -712,12 +712,16 @@ class TestCondInf(object):
assert_almost_equal(linalg.cond(A, inf), 3.)
-class TestPinv(LinalgSquareTestCase, LinalgNonsquareTestCase):
+class TestPinv(LinalgSquareTestCase,
+ LinalgNonsquareTestCase,
+ LinalgGeneralizedSquareTestCase,
+ LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
- assert_almost_equal(dot(a, a_ginv).dot(a), a, single_decimal=5, double_decimal=11)
+ dot = dot_generalized
+ assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
@@ -1379,6 +1383,19 @@ class TestMatrixRank(object):
# works on scalar
yield assert_equal, matrix_rank(1), 1
+ def test_symmetric_rank(self):
+ yield assert_equal, 4, matrix_rank(np.eye(4), hermitian=True)
+ yield assert_equal, 1, matrix_rank(np.ones((4, 4)), hermitian=True)
+ yield assert_equal, 0, matrix_rank(np.zeros((4, 4)), hermitian=True)
+ # rank deficient matrix
+ I = np.eye(4)
+ I[-1, -1] = 0.
+ yield assert_equal, 3, matrix_rank(I, hermitian=True)
+ # manually supplied tolerance
+ I[-1, -1] = 1e-8
+ yield assert_equal, 4, matrix_rank(I, hermitian=True, tol=0.99e-8)
+ yield assert_equal, 3, matrix_rank(I, hermitian=True, tol=1.01e-8)
+
def test_reduced_rank():
# Test matrices with reduced rank
@@ -1490,6 +1507,30 @@ class TestQR(object):
class TestCholesky(object):
# TODO: are there no other tests for cholesky?
+ def test_basic_property(self):
+ # Check A = L L^H
+ shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
+ dtypes = (np.float32, np.float64, np.complex64, np.complex128)
+
+ for shape, dtype in itertools.product(shapes, dtypes):
+ np.random.seed(1)
+ a = np.random.randn(*shape)
+ if np.issubdtype(dtype, np.complexfloating):
+ a = a + 1j*np.random.randn(*shape)
+
+ t = list(range(len(shape)))
+ t[-2:] = -1, -2
+
+ a = np.matmul(a.transpose(t).conj(), a)
+ a = np.asarray(a, dtype=dtype)
+
+ c = np.linalg.cholesky(a)
+
+ b = np.matmul(c, c.transpose(t).conj())
+ assert_allclose(b, a,
+ err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c),
+ atol=500 * a.shape[0] * np.finfo(dtype).eps)
+
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 8efe45eed..b6e2edf5a 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -1537,6 +1537,16 @@ def is_mask(m):
return False
+def _shrink_mask(m):
+ """
+ Shrink a mask to nomask if possible
+ """
+ if not m.dtype.names and not m.any():
+ return nomask
+ else:
+ return m
+
+
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
@@ -1613,13 +1623,17 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType):
# Make sure the input dtype is valid.
dtype = make_mask_descr(dtype)
+
+ # legacy boolean special case: "existence of fields implies true"
+ if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_:
+ return np.ones(m.shape, dtype=dtype)
+
# Fill the mask in case there are missing data; turn it into an ndarray.
result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)
# Bas les masques !
- if shrink and (not result.dtype.names) and (not result.any()):
- return nomask
- else:
- return result
+ if shrink:
+ result = _shrink_mask(result)
+ return result
def make_mask_none(newshape, dtype=None):
@@ -1906,7 +1920,7 @@ def masked_where(condition, a, copy=True):
"""
# Make sure that condition is a valid standard-type mask.
- cond = make_mask(condition)
+ cond = make_mask(condition, shrink=False)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
@@ -1920,7 +1934,7 @@ def masked_where(condition, a, copy=True):
cls = MaskedArray
result = a.view(cls)
# Assign to *.mask so that structured masks are handled correctly.
- result.mask = cond
+ result.mask = _shrink_mask(cond)
return result
@@ -2413,12 +2427,13 @@ def _recursive_printoption(result, mask, printopt):
"""
names = result.dtype.names
- for name in names:
- (curdata, curmask) = (result[name], mask[name])
- if curdata.dtype.names:
+ if names:
+ for name in names:
+ curdata = result[name]
+ curmask = mask[name]
_recursive_printoption(curdata, curmask, printopt)
- else:
- np.copyto(curdata, printopt, where=curmask)
+ else:
+ np.copyto(result, printopt, where=mask)
return
_print_templates = dict(long_std="""\
@@ -2558,7 +2573,8 @@ def _arraymethod(funcname, onmask=True):
if not onmask:
result.__setmask__(mask)
elif mask is not nomask:
- result.__setmask__(getattr(mask, funcname)(*args, **params))
+ # __setmask__ makes a copy, which we don't want
+ result._mask = getattr(mask, funcname)(*args, **params)
return result
methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)
if methdoc is not None:
@@ -3537,9 +3553,7 @@ class MaskedArray(ndarray):
False
"""
- m = self._mask
- if m.ndim and not m.any():
- self._mask = nomask
+ self._mask = _shrink_mask(self._mask)
return self
baseclass = property(fget=lambda self: self._baseclass,
@@ -3821,47 +3835,27 @@ class MaskedArray(ndarray):
"""
if masked_print_option.enabled():
- f = masked_print_option
- if self is masked:
- return str(f)
- m = self._mask
- if m is nomask:
+ mask = self._mask
+ if mask is nomask:
res = self._data
else:
- if m.shape == () and m.itemsize==len(m.dtype):
- if m.dtype.names:
- m = m.view((bool, len(m.dtype)))
- if m.any():
- return str(tuple((f if _m else _d) for _d, _m in
- zip(self._data.tolist(), m)))
- else:
- return str(self._data)
- elif m:
- return str(f)
- else:
- return str(self._data)
# convert to object array to make filled work
- names = self.dtype.names
- if names is None:
- data = self._data
- mask = m
- # For big arrays, to avoid a costly conversion to the
- # object dtype, extract the corners before the conversion.
- print_width = (self._print_width if self.ndim > 1
- else self._print_width_1d)
- for axis in range(self.ndim):
- if data.shape[axis] > print_width:
- ind = print_width // 2
- arr = np.split(data, (ind, -ind), axis=axis)
- data = np.concatenate((arr[0], arr[2]), axis=axis)
- arr = np.split(mask, (ind, -ind), axis=axis)
- mask = np.concatenate((arr[0], arr[2]), axis=axis)
- res = data.astype("O")
- res.view(ndarray)[mask] = f
- else:
- rdtype = _replace_dtype_fields(self.dtype, "O")
- res = self._data.astype(rdtype)
- _recursive_printoption(res, m, f)
+ data = self._data
+ # For big arrays, to avoid a costly conversion to the
+ # object dtype, extract the corners before the conversion.
+ print_width = (self._print_width if self.ndim > 1
+ else self._print_width_1d)
+ for axis in range(self.ndim):
+ if data.shape[axis] > print_width:
+ ind = print_width // 2
+ arr = np.split(data, (ind, -ind), axis=axis)
+ data = np.concatenate((arr[0], arr[2]), axis=axis)
+ arr = np.split(mask, (ind, -ind), axis=axis)
+ mask = np.concatenate((arr[0], arr[2]), axis=axis)
+
+ rdtype = _replace_dtype_fields(self.dtype, "O")
+ res = data.astype(rdtype)
+ _recursive_printoption(res, mask, masked_print_option)
else:
res = self.filled(self.fill_value)
return str(res)
@@ -4832,7 +4826,7 @@ class MaskedArray(ndarray):
return result.astype(dtype)
else:
D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
- return D.astype(dtype).filled(0).sum(axis=None, out=out)
+ return D.astype(dtype).filled(0).sum(axis=-1, out=out)
trace.__doc__ = ndarray.trace.__doc__
def dot(self, b, out=None, strict=False):
@@ -5998,7 +5992,7 @@ class mvoid(MaskedArray):
def _get_data(self):
# Make sure that the _data part is a np.void
- return self.view(ndarray)[()]
+ return super(mvoid, self)._data[()]
_data = property(fget=_get_data)
@@ -6034,19 +6028,13 @@ class mvoid(MaskedArray):
def __str__(self):
m = self._mask
if m is nomask:
- return self._data.__str__()
- printopt = masked_print_option
- rdtype = _replace_dtype_fields(self._data.dtype, "O")
-
- # temporary hack to fix gh-7493. A more permanent fix
- # is proposed in gh-6053, after which the next two
- # lines should be changed to
- # res = np.array([self._data], dtype=rdtype)
- res = np.empty(1, rdtype)
- res[:1] = self._data
+ return str(self._data)
- _recursive_printoption(res, self._mask, printopt)
- return str(res[0])
+ rdtype = _replace_dtype_fields(self._data.dtype, "O")
+ data_arr = super(mvoid, self)._data
+ res = data_arr.astype(rdtype)
+ _recursive_printoption(res, self._mask, masked_print_option)
+ return str(res)
__repr__ = __str__
@@ -6174,17 +6162,40 @@ isMA = isMaskedArray # backward compatibility
class MaskedConstant(MaskedArray):
- # We define the masked singleton as a float for higher precedence.
- # Note that it can be tricky sometimes w/ type comparison
- _data = data = np.array(0.)
- _mask = mask = np.array(True)
- _baseclass = ndarray
+ # the lone np.ma.masked instance
+ __singleton = None
- def __new__(self):
- return self._data.view(self)
+ def __new__(cls):
+ if cls.__singleton is None:
+ # We define the masked singleton as a float for higher precedence.
+ # Note that it can be tricky sometimes w/ type comparison
+ data = np.array(0.)
+ mask = np.array(True)
+
+ # prevent any modifications
+ data.flags.writeable = False
+ mask.flags.writeable = False
+
+ # don't fall back on MaskedArray.__new__(MaskedConstant), since
+ # that might confuse it - this way, the construction is entirely
+ # within our control
+ cls.__singleton = MaskedArray(data, mask=mask).view(cls)
+
+ return cls.__singleton
def __array_finalize__(self, obj):
- return
+ if self.__singleton is None:
+ # this handles the `.view` in __new__, which we want to copy across
+ # properties normally
+ return super(MaskedConstant, self).__array_finalize__(obj)
+ elif self is self.__singleton:
+ # not clear how this can happen, play it safe
+ pass
+ else:
+ # everywhere else, we want to downcast to MaskedArray, to prevent a
+ # duplicate maskedconstant.
+ self.__class__ = MaskedArray
+ MaskedArray.__array_finalize__(self, obj)
def __array_prepare__(self, obj, context=None):
return self.view(MaskedArray).__array_prepare__(obj, context)
@@ -6196,16 +6207,36 @@ class MaskedConstant(MaskedArray):
return str(masked_print_option._display)
def __repr__(self):
- return 'masked'
-
- def flatten(self):
- return masked_array([self._data], dtype=float, mask=[True])
+ if self is self.__singleton:
+ return 'masked'
+ else:
+ # it's a subclass, or something is wrong, make it obvious
+ return object.__repr__(self)
def __reduce__(self):
"""Override of MaskedArray's __reduce__.
"""
return (self.__class__, ())
+ # inplace operations have no effect. We have to override them to avoid
+ # trying to modify the readonly data and mask arrays
+ def __iop__(self, other):
+ return self
+ __iadd__ = \
+ __isub__ = \
+ __imul__ = \
+ __ifloordiv__ = \
+ __itruediv__ = \
+ __ipow__ = \
+ __iop__
+ del __iop__ # don't leave this around
+
+ def copy(self, *args, **kwargs):
+ """ Copy is a no-op on the maskedconstant, as it is a scalar """
+ # maskedconstant is a scalar, so copy doesn't need to copy. There's
+ # precedent for this with `np.bool_` scalars.
+ return self
+
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
@@ -6635,12 +6666,11 @@ def concatenate(arrays, axis=0):
return data
# OK, so we have to concatenate the masks
dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
+ dm = dm.reshape(d.shape)
+
# If we decide to keep a '_shrinkmask' option, we want to check that
# all of them are True, and then check for dm.any()
- if not dm.dtype.fields and not dm.any():
- data._mask = nomask
- else:
- data._mask = dm.reshape(d.shape)
+ data._mask = _shrink_mask(dm)
return data
@@ -7058,8 +7088,7 @@ def where(condition, x=_NoValue, y=_NoValue):
mask = np.where(cm, np.ones((), dtype=mask.dtype), mask)
# collapse the mask, for backwards compatibility
- if mask.dtype == np.bool_ and not mask.any():
- mask = nomask
+ mask = _shrink_mask(mask)
return masked_array(data, mask=mask)
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index aedfb95fd..41c56ca1e 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -8,6 +8,7 @@ from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant"
+import sys
import warnings
import pickle
import operator
@@ -21,7 +22,7 @@ import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import (
- run_module_suite, assert_raises, assert_warns, suppress_warnings
+ run_module_suite, assert_raises, assert_warns, suppress_warnings, dec
)
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
@@ -1643,6 +1644,12 @@ class TestMaskedArrayAttributes(object):
assert_equal(a, b)
assert_equal(a.mask, nomask)
+ # Mask cannot be shrunk on structured types, so is a no-op
+ a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)])
+ b = a.copy()
+ a.shrink_mask()
+ assert_equal(a.mask, b.mask)
+
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
# test simple access
@@ -1762,15 +1769,11 @@ class TestFillingValues(object):
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....Using a flexible type w/ a different type shouldn't matter
- # BEHAVIOR in 1.5 and earlier: match structured types by position
- #fill_val = np.array((-999, -12345678.9, "???"),
- # dtype=[("A", int), ("B", float), ("C", "|S3")])
- # BEHAVIOR in 1.6 and later: match structured types by name
- fill_val = np.array(("???", -999, -12345678.9),
- dtype=[("c", "|S3"), ("a", int), ("b", float), ])
- # suppress deprecation warning in 1.12 (remove in 1.13)
- with assert_warns(FutureWarning):
- fval = _check_fill_value(fill_val, ndtype)
+ # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured
+ # types by position
+ fill_val = np.array((-999, -12345678.9, "???"),
+ dtype=[("A", int), ("B", float), ("C", "|S3")])
+ fval = _check_fill_value(fill_val, ndtype)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
@@ -3226,9 +3229,7 @@ class TestMaskedArrayMethods(object):
assert_(m_arr_sq is not np.ma.masked)
assert_equal(m_arr_sq.mask, True)
m_arr_sq[...] = 2
- # TODO: mask isn't copied to/from views yet in maskedarray, so we can
- # only check the data
- assert_equal(m_arr.data[0,0], 2)
+ assert_equal(m_arr[0,0], 2)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
@@ -3417,6 +3418,27 @@ class TestMaskedArrayMethods(object):
assert_equal(x.T.mask, x.mask)
assert_equal(x.T.data, x.data)
+ def test_transpose_view(self):
+ x = np.ma.array([[1, 2, 3], [4, 5, 6]])
+ x[0,1] = np.ma.masked
+ xt = x.T
+
+ xt[1,0] = 10
+ xt[0,1] = np.ma.masked
+
+ assert_equal(x.data, xt.T.data)
+ assert_equal(x.mask, xt.T.mask)
+
+ def test_diagonal_view(self):
+ x = np.ma.zeros((3,3))
+ x[0,0] = 10
+ x[1,1] = np.ma.masked
+ x[2,2] = 20
+ xd = x.diagonal()
+ x[1,1] = 15
+ assert_equal(xd.mask, x.diagonal().mask)
+ assert_equal(xd.data, x.diagonal().data)
+
class TestMaskedArrayMathMethods(object):
@@ -3536,6 +3558,11 @@ class TestMaskedArrayMathMethods(object):
axis=0))
assert_equal(np.trace(mX), mX.trace())
+ # gh-5560
+ arr = np.arange(2*4*4).reshape(2,4,4)
+ m_arr = np.ma.masked_array(arr, False)
+ assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2))
+
def test_dot(self):
# Tests dot on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
@@ -3821,6 +3848,12 @@ class TestMaskedArrayFunctions(object):
assert_equal(am["A"],
np.ma.masked_array(np.zeros(10), np.ones(10)))
+ def test_masked_where_mismatch(self):
+ # gh-4520
+ x = np.arange(10)
+ y = np.arange(5)
+ assert_raises(IndexError, np.ma.masked_where, y > 6, x)
+
def test_masked_otherfunctions(self):
assert_equal(masked_inside(list(range(5)), 1, 3),
[0, 199, 199, 199, 4])
@@ -4788,6 +4821,65 @@ class TestMaskedConstant(object):
assert_(not isinstance(m, np.ma.core.MaskedConstant))
assert_(m is not np.ma.masked)
+ def test_repr(self):
+ # copies should not exist, but if they do, it should be obvious that
+ # something is wrong
+ assert_equal(repr(np.ma.masked), 'masked')
+
+ # create a new instance in a weird way
+ masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant)
+ assert_not_equal(repr(masked2), 'masked')
+
+ def test_pickle(self):
+ from io import BytesIO
+ import pickle
+
+ with BytesIO() as f:
+ pickle.dump(np.ma.masked, f)
+ f.seek(0)
+ res = pickle.load(f)
+ assert_(res is np.ma.masked)
+
+ def test_copy(self):
+ # gh-9328
+ # copy is a no-op, like it is with np.True_
+ assert_equal(
+ np.ma.masked.copy() is np.ma.masked,
+ np.True_.copy() is np.True_)
+
+ def test_immutable(self):
+ orig = np.ma.masked
+ assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1)
+ assert_raises(ValueError,operator.setitem, orig.data, (), 1)
+ assert_raises(ValueError, operator.setitem, orig.mask, (), False)
+
+ view = np.ma.masked.view(np.ma.MaskedArray)
+ assert_raises(ValueError, operator.setitem, view, (), 1)
+ assert_raises(ValueError, operator.setitem, view.data, (), 1)
+ assert_raises(ValueError, operator.setitem, view.mask, (), False)
+
+ @dec.knownfailureif(sys.version_info.major == 2, "See gh-9751")
+ def test_coercion_int(self):
+ a_i = np.zeros((), int)
+ assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked)
+
+ def test_coercion_float(self):
+ a_f = np.zeros((), float)
+ assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked)
+ assert_(np.isnan(a_f[()]))
+
+ @dec.knownfailureif(True, "See gh-9750")
+ def test_coercion_unicode(self):
+ a_u = np.zeros((), 'U10')
+ a_u[()] = np.ma.masked
+ assert_equal(a_u[()], u'--')
+
+ @dec.knownfailureif(True, "See gh-9750")
+ def test_coercion_bytes(self):
+ a_b = np.zeros((), 'S10')
+ a_b[()] = np.ma.masked
+ assert_equal(a_b[()], b'--')
+
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index eb9797e0f..fe2805a03 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -1024,7 +1024,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -1222,7 +1222,7 @@ def chebval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
@@ -1282,7 +1282,7 @@ def chebgrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = chebval(x, c)
@@ -1335,7 +1335,7 @@ def chebval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
@@ -1399,7 +1399,7 @@ def chebgrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = chebval(x, c)
@@ -1510,7 +1510,7 @@ def chebvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1574,7 +1574,7 @@ def chebvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1810,7 +1810,7 @@ def chebcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index 856ac487e..ae1143d28 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -782,7 +782,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -983,7 +983,7 @@ def hermval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
@@ -1043,7 +1043,7 @@ def hermgrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermval(x, c)
@@ -1096,7 +1096,7 @@ def hermval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
@@ -1160,7 +1160,7 @@ def hermgrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermval(x, c)
@@ -1279,7 +1279,7 @@ def hermvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1343,7 +1343,7 @@ def hermvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1584,7 +1584,7 @@ def hermcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1732,7 +1732,7 @@ def hermgauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1796,7 +1796,7 @@ def hermweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = np.exp(-x**2)
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index e83b26327..ee29ec5d3 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -781,7 +781,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -981,7 +981,7 @@ def hermeval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
@@ -1041,7 +1041,7 @@ def hermegrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermeval(x, c)
@@ -1094,7 +1094,7 @@ def hermeval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
@@ -1158,7 +1158,7 @@ def hermegrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermeval(x, c)
@@ -1276,7 +1276,7 @@ def hermevander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1340,7 +1340,7 @@ def hermevander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1582,7 +1582,7 @@ def hermecompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1730,7 +1730,7 @@ def hermegauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1793,7 +1793,7 @@ def hermeweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = np.exp(-.5*x**2)
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index 60373b8c8..079cf97b3 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -782,7 +782,7 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -983,7 +983,7 @@ def lagval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
@@ -1043,7 +1043,7 @@ def laggrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = lagval(x, c)
@@ -1096,7 +1096,7 @@ def lagval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
@@ -1160,7 +1160,7 @@ def laggrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = lagval(x, c)
@@ -1278,7 +1278,7 @@ def lagvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1342,7 +1342,7 @@ def lagvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1582,7 +1582,7 @@ def lagcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1687,7 +1687,7 @@ def laggauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100 higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1747,7 +1747,7 @@ def lagweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = np.exp(-x)
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index 5128643cd..1c42f4881 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -822,7 +822,7 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -1021,7 +1021,7 @@ def legval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
@@ -1081,7 +1081,7 @@ def leggrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = legval(x, c)
@@ -1134,7 +1134,7 @@ def legval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
@@ -1198,7 +1198,7 @@ def leggrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = legval(x, c)
@@ -1309,7 +1309,7 @@ def legvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1373,7 +1373,7 @@ def legvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1611,7 +1611,7 @@ def legcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1712,7 +1712,7 @@ def leggauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1777,7 +1777,7 @@ def legweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = x*0.0 + 1.0
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index 1528de342..1be775f6a 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -619,7 +619,7 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`. Why
is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Examples
diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py
index e2dba1a55..c1ed0c9b3 100644
--- a/numpy/polynomial/polyutils.py
+++ b/numpy/polynomial/polyutils.py
@@ -153,14 +153,23 @@ def as_series(alist, trim=True):
Examples
--------
- >>> from numpy import polynomial as P
+ >>> from numpy.polynomial import polyutils as pu
>>> a = np.arange(4)
- >>> P.as_series(a)
+ >>> pu.as_series(a)
[array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])]
>>> b = np.arange(6).reshape((2,3))
- >>> P.as_series(b)
+ >>> pu.as_series(b)
[array([ 0., 1., 2.]), array([ 3., 4., 5.])]
+ >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16)))
+ [array([ 1.]), array([ 0., 1., 2.]), array([ 0., 1.])]
+
+ >>> pu.as_series([2, [1.1, 0.]])
+ [array([ 2.]), array([ 1.1])]
+
+ >>> pu.as_series([2, [1.1, 0.]], trim=False)
+ [array([ 2.]), array([ 1.1, 0. ])]
+
"""
arrays = [np.array(a, ndmin=1, copy=0) for a in alist]
if min([a.size for a in arrays]) == 0:
@@ -222,13 +231,13 @@ def trimcoef(c, tol=0):
Examples
--------
- >>> from numpy import polynomial as P
- >>> P.trimcoef((0,0,3,0,5,0,0))
+ >>> from numpy.polynomial import polyutils as pu
+ >>> pu.trimcoef((0,0,3,0,5,0,0))
array([ 0., 0., 3., 0., 5.])
- >>> P.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
+ >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
array([ 0.])
>>> i = complex(0,1) # works for complex
- >>> P.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
+ >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
array([ 0.0003+0.j , 0.0010-0.001j])
"""
@@ -319,13 +328,13 @@ def mapparms(old, new):
Examples
--------
- >>> from numpy import polynomial as P
- >>> P.mapparms((-1,1),(-1,1))
+ >>> from numpy.polynomial import polyutils as pu
+ >>> pu.mapparms((-1,1),(-1,1))
(0.0, 1.0)
- >>> P.mapparms((1,-1),(-1,1))
+ >>> pu.mapparms((1,-1),(-1,1))
(0.0, -1.0)
>>> i = complex(0,1)
- >>> P.mapparms((-i,-1),(1,i))
+ >>> pu.mapparms((-i,-1),(1,i))
((1+1j), (1+0j))
"""
@@ -375,15 +384,15 @@ def mapdomain(x, old, new):
Examples
--------
- >>> from numpy import polynomial as P
+ >>> from numpy.polynomial import polyutils as pu
>>> old_domain = (-1,1)
>>> new_domain = (0,2*np.pi)
>>> x = np.linspace(-1,1,6); x
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ])
- >>> x_out = P.mapdomain(x, old_domain, new_domain); x_out
+ >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out
array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825,
6.28318531])
- >>> x - P.mapdomain(x_out, new_domain, old_domain)
+ >>> x - pu.mapdomain(x_out, new_domain, old_domain)
array([ 0., 0., 0., 0., 0., 0.])
Also works for complex numbers (and thus can be used to map any line in
diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py
index f403812c9..939d48a86 100644
--- a/numpy/polynomial/tests/test_printing.py
+++ b/numpy/polynomial/tests/test_printing.py
@@ -7,64 +7,64 @@ from numpy.testing import run_module_suite, assert_equal
class TestStr(object):
def test_polynomial_str(self):
res = str(poly.Polynomial([0, 1]))
- tgt = 'poly([ 0. 1.])'
+ tgt = 'poly([0. 1.])'
assert_equal(res, tgt)
def test_chebyshev_str(self):
res = str(poly.Chebyshev([0, 1]))
- tgt = 'cheb([ 0. 1.])'
+ tgt = 'cheb([0. 1.])'
assert_equal(res, tgt)
def test_legendre_str(self):
res = str(poly.Legendre([0, 1]))
- tgt = 'leg([ 0. 1.])'
+ tgt = 'leg([0. 1.])'
assert_equal(res, tgt)
def test_hermite_str(self):
res = str(poly.Hermite([0, 1]))
- tgt = 'herm([ 0. 1.])'
+ tgt = 'herm([0. 1.])'
assert_equal(res, tgt)
def test_hermiteE_str(self):
res = str(poly.HermiteE([0, 1]))
- tgt = 'herme([ 0. 1.])'
+ tgt = 'herme([0. 1.])'
assert_equal(res, tgt)
def test_laguerre_str(self):
res = str(poly.Laguerre([0, 1]))
- tgt = 'lag([ 0. 1.])'
+ tgt = 'lag([0. 1.])'
assert_equal(res, tgt)
class TestRepr(object):
def test_polynomial_str(self):
res = repr(poly.Polynomial([0, 1]))
- tgt = 'Polynomial([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ tgt = 'Polynomial([0., 1.], domain=[-1, 1], window=[-1, 1])'
assert_equal(res, tgt)
def test_chebyshev_str(self):
res = repr(poly.Chebyshev([0, 1]))
- tgt = 'Chebyshev([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ tgt = 'Chebyshev([0., 1.], domain=[-1, 1], window=[-1, 1])'
assert_equal(res, tgt)
def test_legendre_repr(self):
res = repr(poly.Legendre([0, 1]))
- tgt = 'Legendre([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ tgt = 'Legendre([0., 1.], domain=[-1, 1], window=[-1, 1])'
assert_equal(res, tgt)
def test_hermite_repr(self):
res = repr(poly.Hermite([0, 1]))
- tgt = 'Hermite([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ tgt = 'Hermite([0., 1.], domain=[-1, 1], window=[-1, 1])'
assert_equal(res, tgt)
def test_hermiteE_repr(self):
res = repr(poly.HermiteE([0, 1]))
- tgt = 'HermiteE([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ tgt = 'HermiteE([0., 1.], domain=[-1, 1], window=[-1, 1])'
assert_equal(res, tgt)
def test_laguerre_repr(self):
res = repr(poly.Laguerre([0, 1]))
- tgt = 'Laguerre([ 0., 1.], domain=[0, 1], window=[0, 1])'
+ tgt = 'Laguerre([0., 1.], domain=[0, 1], window=[0, 1])'
assert_equal(res, tgt)
diff --git a/numpy/random/mtrand/numpy.pxd b/numpy/random/mtrand/numpy.pxd
index 32b19c1ab..db6265238 100644
--- a/numpy/random/mtrand/numpy.pxd
+++ b/numpy/random/mtrand/numpy.pxd
@@ -1,4 +1,5 @@
# :Author: Travis Oliphant
+from cpython.exc cimport PyErr_Print
cdef extern from "numpy/npy_no_deprecated_api.h": pass
@@ -134,7 +135,7 @@ cdef extern from "numpy/arrayobject.h":
dtype PyArray_DescrFromType(int)
- void import_array()
+ int _import_array() except -1
# include functions that were once macros in the new api
@@ -151,3 +152,12 @@ cdef extern from "numpy/arrayobject.h":
int PyArray_TYPE(ndarray arr)
int PyArray_CHKFLAGS(ndarray arr, int flags)
object PyArray_GETITEM(ndarray arr, char *itemptr)
+
+
+# copied from cython version with addition of PyErr_Print.
+cdef inline int import_array() except -1:
+ try:
+ _import_array()
+ except Exception:
+ PyErr_Print()
+ raise ImportError("numpy.core.multiarray failed to import")
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 286cd5285..c440d8eca 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -159,9 +159,9 @@ class TestBuildErrorMessage(unittest.TestCase):
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg)
- b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([ '
- '1.00001, 2.00002, 3.00003])\n DESIRED: array([ 1.00002, '
- '2.00003, 3.00004])')
+ b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
+ '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, '
+ '2.00003, 3.00004])')
self.assertEqual(a, b)
def test_build_err_msg_no_verbose(self):
@@ -179,8 +179,8 @@ class TestBuildErrorMessage(unittest.TestCase):
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))
- b = ('\nItems are not equal: There is a mismatch\n FOO: array([ '
- '1.00001, 2.00002, 3.00003])\n BAR: array([ 1.00002, 2.00003, '
+ b = ('\nItems are not equal: There is a mismatch\n FOO: array(['
+ '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, '
'3.00004])')
self.assertEqual(a, b)
@@ -190,9 +190,9 @@ class TestBuildErrorMessage(unittest.TestCase):
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, precision=10)
- b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([ '
- '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([ '
- '1.000000002, 2.00003 , 3.00004 ])')
+ b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
+ '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array(['
+ '1.000000002, 2.00003 , 3.00004 ])')
self.assertEqual(a, b)
@@ -436,8 +436,8 @@ class TestAlmostEqual(_GenericTest, unittest.TestCase):
# test with a different amount of decimal digits
# note that we only check for the formatting of the arrays themselves
- b = ('x: array([ 1.00000000001, 2.00000000002, 3.00003 '
- ' ])\n y: array([ 1.00000000002, 2.00000000003, 3.00004 ])')
+ b = ('x: array([1.00000000001, 2.00000000002, 3.00003 '
+ ' ])\n y: array([1.00000000002, 2.00000000003, 3.00004 ])')
try:
self._assert_func(x, y, decimal=12)
except AssertionError as e:
@@ -446,8 +446,8 @@ class TestAlmostEqual(_GenericTest, unittest.TestCase):
# with the default value of decimal digits, only the 3rd element differs
# note that we only check for the formatting of the arrays themselves
- b = ('x: array([ 1. , 2. , 3.00003])\n y: array([ 1. , '
- '2. , 3.00004])')
+ b = ('x: array([1. , 2. , 3.00003])\n y: array([1. , '
+ '2. , 3.00004])')
try:
self._assert_func(x, y)
except AssertionError as e: