summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--benchmarks/benchmarks/bench_ufunc.py79
-rw-r--r--doc/neps/nep-0000.rst68
-rw-r--r--doc/neps/nep-0015-merge-multiarray-umath.rst157
-rw-r--r--numpy/add_newdocs.py165
-rw-r--r--numpy/core/fromnumeric.py21
-rw-r--r--numpy/core/numeric.py2
-rw-r--r--numpy/core/src/multiarray/cblasfuncs.c105
-rw-r--r--numpy/core/src/multiarray/common.c100
-rw-r--r--numpy/core/src/multiarray/common.h13
-rw-r--r--numpy/core/src/multiarray/compiled_base.c8
-rw-r--r--numpy/core/src/multiarray/einsum.c.src28
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c98
-rw-r--r--numpy/core/tests/test_einsum.py5
-rw-r--r--numpy/distutils/misc_util.py16
-rw-r--r--numpy/lib/arraysetops.py8
-rw-r--r--numpy/lib/index_tricks.py123
-rw-r--r--numpy/lib/tests/test_arraypad.py15
-rw-r--r--numpy/lib/tests/test_function_base.py14
-rw-r--r--numpy/ma/core.py25
-rwxr-xr-xruntests.py38
20 files changed, 646 insertions, 442 deletions
diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py
index 1d4e70a3a..a7e385f70 100644
--- a/benchmarks/benchmarks/bench_ufunc.py
+++ b/benchmarks/benchmarks/bench_ufunc.py
@@ -10,15 +10,17 @@ ufuncs = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh',
'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj', 'conjugate',
'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide', 'divmod',
'equal', 'exp', 'exp2', 'expm1', 'fabs', 'float_power', 'floor',
- 'floor_divide', 'fmax', 'fmin', 'fmod', 'frexp', 'greater',
- 'greater_equal', 'heaviside', 'hypot', 'invert', 'isfinite', 'isinf',
- 'isnan', 'isnat', 'ldexp', 'left_shift', 'less', 'less_equal', 'log',
- 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and',
- 'logical_not', 'logical_or', 'logical_xor', 'maximum', 'minimum',
- 'mod', 'modf', 'multiply', 'negative', 'nextafter', 'not_equal',
- 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder',
- 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing',
- 'sqrt', 'square', 'subtract', 'tan', 'tanh', 'true_divide', 'trunc']
+ 'floor_divide', 'fmax', 'fmin', 'fmod', 'frexp', 'gcd', 'greater',
+ 'greater_equal', 'heaviside', 'hypot', 'invert', 'isfinite',
+ 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less',
+ 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp',
+ 'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
+ 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', 'multiply',
+ 'negative', 'nextafter', 'not_equal', 'positive', 'power',
+ 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift',
+ 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt',
+ 'square', 'subtract', 'tan', 'tanh', 'true_divide', 'trunc']
+
for name in dir(np):
if isinstance(getattr(np, name, None), np.ufunc) and name not in ufuncs:
@@ -148,3 +150,62 @@ class Scalar(Benchmark):
def time_add_scalar_conv_complex(self):
(self.y + self.z)
+
+
+class ArgPack(object):
+ __slots__ = ['args', 'kwargs']
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+ def __repr__(self):
+ return '({})'.format(', '.join(
+ [repr(a) for a in self.args] +
+ ['{}={}'.format(k, repr(v)) for k, v in self.kwargs.items()]
+ ))
+
+
+class ArgParsing(Benchmark):
+ # In order to benchmark the speed of argument parsing, all but the
+ # out arguments are chosen such that they have no effect on the
+ # calculation. In particular, subok=True and where=True are
+ # defaults, and the dtype is the correct one (the latter will
+ # still have some effect on the search for the correct inner loop).
+ x = np.array(1.)
+ y = np.array(2.)
+ out = np.array(3.)
+ param_names = ['arg_kwarg']
+ params = [[
+ ArgPack(x, y),
+ ArgPack(x, y, out),
+ ArgPack(x, y, out=out),
+ ArgPack(x, y, out=(out,)),
+ ArgPack(x, y, out=out, subok=True, where=True),
+ ArgPack(x, y, subok=True),
+ ArgPack(x, y, subok=True, where=True),
+ ArgPack(x, y, out, subok=True, where=True)
+ ]]
+
+ def time_add_arg_parsing(self, arg_pack):
+ np.add(*arg_pack.args, **arg_pack.kwargs)
+
+
+class ArgParsingReduce(Benchmark):
+ # In order to benchmark the speed of argument parsing, all but the
+ # out arguments are chosen such that they have minimal effect on the
+ # calculation.
+ a = np.arange(2.)
+ out = np.array(0.)
+ param_names = ['arg_kwarg']
+ params = [[
+ ArgPack(a,),
+ ArgPack(a, 0),
+ ArgPack(a, axis=0),
+ ArgPack(a, 0, None),
+ ArgPack(a, axis=0, dtype=None),
+ ArgPack(a, 0, None, out),
+ ArgPack(a, axis=0, dtype=None, out=out),
+ ArgPack(a, out=out)
+ ]]
+
+ def time_add_reduce_arg_parsing(self, arg_pack):
+ np.add.reduce(*arg_pack.args, **arg_pack.kwargs)
diff --git a/doc/neps/nep-0000.rst b/doc/neps/nep-0000.rst
index 0824a5635..b451eeff7 100644
--- a/doc/neps/nep-0000.rst
+++ b/doc/neps/nep-0000.rst
@@ -97,16 +97,9 @@ status of NEPs are as follows:
All NEPs should be created with the ``Draft`` status.
-Normally, a NEP is ``Accepted`` by consensus of all interested
-Contributors. To verify that consensus has been reached, the NEP
-author or another interested party should make a post on the
-numpy-discussion mailing list proposing it for acceptance; if there
-are no substantive objections after one week, the NEP can officially
-be marked ``Accepted``, and a link to this post should be added to the
-NEP for reference.
-
-In unusual cases, the `NumPy Steering Council`_ may be asked to decide whether
-a controversial NEP is ``Accepted``.
+Eventually, after discussion, there may be a consensus that the NEP
+should be accepted – see the next section for details. At this point
+the status becomes ``Accepted``.
Once a NEP has been ``Accepted``, the reference implementation must be
completed. When the reference implementation is complete and incorporated
@@ -135,6 +128,61 @@ Process NEPs may also have a status of ``Active`` if they are never
meant to be completed, e.g. NEP 0 (this NEP).
+How a NEP becomes Accepted
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A NEP is ``Accepted`` by consensus of all interested contributors. We
+need a concrete way to tell whether consensus has been reached. When
+you think a NEP is ready to accept, send an email to the
+numpy-discussion mailing list with a subject like:
+
+ Proposal to accept NEP #<number>: <title>
+
+In the body of your email, you should:
+
+* link to the latest version of the NEP,
+
+* briefly describe any major points of contention and how they were
+ resolved,
+
+* include a sentence like: "If there are no substantive objections
+ within 7 days from this email, then the NEP will be accepted; see
+ NEP 0 for more details."
+
+For an example, see: https://mail.python.org/pipermail/numpy-discussion/2018-June/078345.html
+
+After you send the email, you should make sure to link to the email
+thread from the ``Discussion`` section of the NEP, so that people can
+find it later.
+
+Generally the NEP author will be the one to send this email, but
+anyone can do it – the important thing is to make sure that everyone
+knows when a NEP is on the verge of acceptance, and give them a final
+chance to respond. If there's some special reason to extend this final
+comment period beyond 7 days, then that's fine, just say so in the
+email. You shouldn't do less than 7 days, because sometimes people are
+travelling or similar and need some time to respond.
+
+In general, the goal is to make sure that the community has consensus,
+not provide a rigid policy for people to try to game. When in doubt,
+err on the side of asking for more feedback and looking for
+opportunities to compromise.
+
+If the final comment period passes without any substantive objections,
+then the NEP can officially be marked ``Accepted``. You should send a
+followup email notifying the list (celebratory emoji optional but
+encouraged 🎉✨), and then update the NEP by setting its ``:Status:``
+to ``Accepted``, and its ``:Resolution:`` header to a link to your
+followup email.
+
+If there *are* substantive objections, then the NEP remains in
+``Draft`` state, discussion continues as normal, and it can be
+proposed for acceptance again later once the objections are resolved.
+
+In unusual cases, the `NumPy Steering Council`_ may be asked to decide
+whether a controversial NEP is ``Accepted``.
+
+
Maintenance
^^^^^^^^^^^
diff --git a/doc/neps/nep-0015-merge-multiarray-umath.rst b/doc/neps/nep-0015-merge-multiarray-umath.rst
new file mode 100644
index 000000000..17852220f
--- /dev/null
+++ b/doc/neps/nep-0015-merge-multiarray-umath.rst
@@ -0,0 +1,157 @@
+============================
+Merging multiarray and umath
+============================
+
+:Author: Nathaniel J. Smith <njs@pobox.com>
+:Status: Draft
+:Type: Standards Track
+:Created: 2018-02-22
+
+
+Abstract
+--------
+
+Let's merge ``numpy.core.multiarray`` and ``numpy.core.umath`` into a
+single extension module, and deprecate ``np.set_numeric_ops``.
+
+
+Background
+----------
+
+Currently, numpy's core C code is split between two separate extension
+modules.
+
+``numpy.core.multiarray`` is built from
+``numpy/core/src/multiarray/*.c``, and contains the core array
+functionality (in particular, the ``ndarray`` object).
+
+``numpy.core.umath`` is built from ``numpy/core/src/umath/*.c``, and
+contains the ufunc machinery.
+
+These two modules each expose their own separate C API, accessed via
+``import_multiarray()`` and ``import_umath()`` respectively. The idea
+is that they're supposed to be independent modules, with
+``multiarray`` as a lower-level layer with ``umath`` built on top. In
+practice this has turned out to be problematic.
+
+First, the layering isn't perfect: when you write ``ndarray +
+ndarray``, this invokes ``ndarray.__add__``, which then calls the
+ufunc ``np.add``. This means that ``ndarray`` needs to know about
+ufuncs – so instead of a clean layering, we have a circular
+dependency. To solve this, ``multiarray`` exports a somewhat
+terrifying function called ``set_numeric_ops``. The bootstrap
+procedure each time you ``import numpy`` is:
+
+1. ``multiarray`` and its ``ndarray`` object are loaded, but
+ arithmetic operations on ndarrays are broken.
+
+2. ``umath`` is loaded.
+
+3. ``set_numeric_ops`` is used to monkeypatch all the methods like
+ ``ndarray.__add__`` with objects from ``umath``.
+
+In addition, ``set_numeric_ops`` is exposed as a public API,
+``np.set_numeric_ops``.
+
+Furthermore, even when this layering does work, it ends up distorting
+the shape of our public ABI. In recent years, the most common reason
+for adding new functions to ``multiarray``\'s "public" ABI is not that
+they really need to be public or that we expect other projects to use
+them, but rather just that we need to call them from ``umath``. This
+is extremely unfortunate, because it makes our public ABI
+unnecessarily large, and since we can never remove things from it then
+this creates an ongoing maintenance burden. The way C works, you can
+have internal API that's visible to everything inside the same
+extension module, or you can have a public API that everyone can use;
+you can't (easily) have an API that's visible to multiple extension
+modules inside numpy, but not to external users.
+
+We've also increasingly been putting utility code into
+``numpy/core/src/private/``, which now contains a bunch of files which
+are ``#include``\d twice, once into ``multiarray`` and once into
+``umath``. This is pretty gross, and is purely a workaround for these
+being separate C extensions. The ``npymath`` library is also
+included in both extension modules.
+
+
+Proposed changes
+----------------
+
+This NEP proposes three changes:
+
+1. We should start building ``numpy/core/src/multiarray/*.c`` and
+ ``numpy/core/src/umath/*.c`` together into a single extension
+ module.
+
+2. Instead of ``set_numeric_ops``, we should use some new, private API
+ to set up ``ndarray.__add__`` and friends.
+
+3. We should deprecate, and eventually remove, ``np.set_numeric_ops``.
+
+
+Non-proposed changes
+--------------------
+
+We don't necessarily propose to throw away the distinction between
+multiarray/ and umath/ in terms of our source code organization:
+internal organization is useful! We just want to build them together
+into a single extension module. Of course, this does open the door for
+potential future refactorings, which we can then evaluate based on
+their merits as they come up.
+
+It also doesn't propose that we break the public C ABI. We should
+continue to provide ``import_multiarray()`` and ``import_umath()``
+functions – it's just that now both ABIs will ultimately be loaded
+from the same C library. Due to how ``import_multiarray()`` and
+``import_umath()`` are written, we'll also still need to have modules
+called ``numpy.core.multiarray`` and ``numpy.core.umath``, and they'll
+need to continue to export ``_ARRAY_API`` and ``_UFUNC_API`` objects –
+but we can make one or both of these modules be tiny shims that simply
+re-export the magic API object from where-ever it's actually defined.
+(See ``numpy/core/code_generators/generate_{numpy,ufunc}_api.py`` for
+details of how these imports work.)
+
+
+Backward compatibility
+----------------------
+
+The only compatibility break is the deprecation of ``np.set_numeric_ops``.
+
+
+Rejected alternatives
+---------------------
+
+Preserve ``set_numeric_ops`` for monkeypatching
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In discussing this NEP, one additional use case was raised for
+``set_numeric_ops``: if you have an optimized vector math library
+(e.g. Intel's MKL VML, Sleef, or Yeppp), then ``set_numeric_ops`` can
+be used to monkeypatch numpy to use these operations instead of
+numpy's built-in vector operations. But, even if we grant that this is
+a great idea, using ``set_numeric_ops`` isn't actually the best way to
+do it. All ``set_numeric_ops`` allows you to do is take over Python's
+syntactic operators (``+``, ``*``, etc.) on ndarrays; it doesn't let
+you affect operations called via other APIs (e.g., ``np.add``), or
+operations that don't have built-in syntax (e.g., ``np.exp``). Also,
+you have to reimplement the whole ufunc machinery, instead of just the
+core loop. On the other hand, the `PyUFunc_ReplaceLoopBySignature
+<https://docs.scipy.org/doc/numpy/reference/c-api.ufunc.html#c.PyUFunc_ReplaceLoopBySignature>`__
+API – which was added in 2006 – allows replacement of the inner loops
+of arbitrary ufuncs. This is both simpler and more powerful – e.g.
+replacing the inner loop of ``np.add`` means your code will
+automatically be used for both ``ndarray + ndarray`` as well as direct
+calls to ``np.add``. So this doesn't seem like a good reason to not
+deprecate ``set_numeric_ops``.
+
+
+Discussion
+----------
+
+* https://mail.python.org/pipermail/numpy-discussion/2018-March/077764.html
+* https://mail.python.org/pipermail/numpy-discussion/2018-June/078345.html
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 09cae54b1..a882bf1e0 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -1577,71 +1577,72 @@ add_newdoc('numpy.core.multiarray', 'where',
"""
where(condition, [x, y])
- Return elements, either from `x` or `y`, depending on `condition`.
+ Return elements chosen from `x` or `y` depending on `condition`.
- If only `condition` is given, return ``condition.nonzero()``.
+ .. note::
+ When only `condition` is provided, this function is a shorthand for
+ ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
+ preferred, as it behaves correctly for subclasses. The rest of this
+ documentation covers only the case where all three arguments are
+ provided.
Parameters
----------
condition : array_like, bool
- When True, yield `x`, otherwise yield `y`.
- x, y : array_like, optional
+ Where True, yield `x`, otherwise yield `y`.
+ x, y : array_like
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
- out : ndarray or tuple of ndarrays
- If both `x` and `y` are specified, the output array contains
- elements of `x` where `condition` is True, and elements from
- `y` elsewhere.
-
- If only `condition` is given, return the tuple
- ``condition.nonzero()``, the indices where `condition` is True.
+ out : ndarray
+ An array with elements from `x` where `condition` is True, and elements
+ from `y` elsewhere.
See Also
--------
- nonzero, choose
+ choose
+ nonzero : The function that is called when x and y are omitted
Notes
-----
- If `x` and `y` are given and input arrays are 1-D, `where` is
- equivalent to::
+ If all the arrays are 1-D, `where` is equivalent to::
- [xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
+ [xv if c else yv
+ for c, xv, yv in zip(condition, x, y)]
Examples
--------
+ >>> a = np.arange(10)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.where(a < 5, a, 10*a)
+ array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
+
+ This can be used on multidimensional arrays too:
+
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
- >>> np.where([[0, 1], [1, 0]])
- (array([0, 1]), array([1, 0]))
-
- >>> x = np.arange(9.).reshape(3, 3)
- >>> np.where( x > 5 )
- (array([2, 2, 2]), array([0, 1, 2]))
- >>> x[np.where( x > 3.0 )] # Note: result is 1D.
- array([ 4., 5., 6., 7., 8.])
- >>> np.where(x < 5, x, -1) # Note: broadcasting.
- array([[ 0., 1., 2.],
- [ 3., 4., -1.],
- [-1., -1., -1.]])
-
- Find the indices of elements of `x` that are in `goodvalues`.
-
- >>> goodvalues = [3, 4, 7]
- >>> ix = np.isin(x, goodvalues)
- >>> ix
- array([[False, False, False],
- [ True, True, False],
- [False, True, False]])
- >>> np.where(ix)
- (array([1, 1, 2]), array([0, 1, 1]))
+ The shapes of x, y, and the condition are broadcast together:
+ >>> x, y = np.ogrid[:3, :4]
+ >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
+ array([[10, 0, 0, 0],
+ [10, 11, 1, 1],
+ [10, 11, 12, 2]])
+
+ >>> a = np.array([[0, 1, 2],
+ ... [0, 2, 4],
+ ... [0, 3, 6]])
+ >>> np.where(a < 4, a, -1) # -1 is broadcast
+ array([[ 0, 1, 2],
+ [ 0, 2, -1],
+ [ 0, 3, -1]])
""")
@@ -7255,94 +7256,6 @@ add_newdoc('numpy.core.multiarray', 'datetime_data',
numpy.datetime64('2010-01-01T00:00:00','25s')
""")
-##############################################################################
-#
-# nd_grid instances
-#
-##############################################################################
-
-add_newdoc('numpy.lib.index_tricks', 'mgrid',
- """
- `nd_grid` instance which returns a dense multi-dimensional "meshgrid".
-
- An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
- (or fleshed out) mesh-grid when indexed, so that each returned argument
- has the same shape. The dimensions and number of the output arrays are
- equal to the number of indexing dimensions. If the step length is not a
- complex number, then the stop is not inclusive.
-
- However, if the step length is a **complex number** (e.g. 5j), then
- the integer part of its magnitude is interpreted as specifying the
- number of points to create between the start and stop values, where
- the stop value **is inclusive**.
-
- Returns
- ----------
- mesh-grid `ndarrays` all of the same dimensions
-
- See Also
- --------
- numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
- ogrid : like mgrid but returns open (not fleshed out) mesh grids
- r_ : array concatenator
-
- Examples
- --------
- >>> np.mgrid[0:5,0:5]
- array([[[0, 0, 0, 0, 0],
- [1, 1, 1, 1, 1],
- [2, 2, 2, 2, 2],
- [3, 3, 3, 3, 3],
- [4, 4, 4, 4, 4]],
- [[0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4]]])
- >>> np.mgrid[-1:1:5j]
- array([-1. , -0.5, 0. , 0.5, 1. ])
-
- """)
-
-add_newdoc('numpy.lib.index_tricks', 'ogrid',
- """
- `nd_grid` instance which returns an open multi-dimensional "meshgrid".
-
- An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
- (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
- of each returned array is greater than 1. The dimension and number of the
- output arrays are equal to the number of indexing dimensions. If the step
- length is not a complex number, then the stop is not inclusive.
-
- However, if the step length is a **complex number** (e.g. 5j), then
- the integer part of its magnitude is interpreted as specifying the
- number of points to create between the start and stop values, where
- the stop value **is inclusive**.
-
- Returns
- ----------
- mesh-grid `ndarrays` with only one dimension :math:`\\neq 1`
-
- See Also
- --------
- np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
- mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
- r_ : array concatenator
-
- Examples
- --------
- >>> from numpy import ogrid
- >>> ogrid[-1:1:5j]
- array([-1. , -0.5, 0. , 0.5, 1. ])
- >>> ogrid[0:5,0:5]
- [array([[0],
- [1],
- [2],
- [3],
- [4]]), array([[0, 1, 2, 3, 4]])]
-
- """)
-
##############################################################################
#
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 5b67a0dc5..373e0fde8 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -1615,16 +1615,16 @@ def nonzero(a):
Examples
--------
- >>> x = np.array([[1,0,0], [0,2,0], [1,1,0]])
+ >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
- array([[1, 0, 0],
- [0, 2, 0],
- [1, 1, 0]])
+ array([[3, 0, 0],
+ [0, 4, 0],
+ [5, 6, 0]])
>>> np.nonzero(x)
(array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
- array([1, 2, 1, 1])
+ array([3, 4, 5, 6])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
@@ -1636,7 +1636,7 @@ def nonzero(a):
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
- >>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
+ >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
@@ -1644,7 +1644,14 @@ def nonzero(a):
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
- The ``nonzero`` method of the boolean array can also be called.
+ Using this result to index `a` is equivalent to using the mask directly:
+
+ >>> a[np.nonzero(a > 3)]
+ array([4, 5, 6, 7, 8, 9])
+ >>> a[a > 3] # prefer this spelling
+ array([4, 5, 6, 7, 8, 9])
+
+ ``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 106f0ccfe..b49a7f551 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1897,7 +1897,7 @@ def fromfunction(function, shape, **kwargs):
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
- `fromfunction` would match the `shape` parameter.
+ `fromfunction` would not match the `shape` parameter.
See Also
--------
diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c
index c941bb29b..6460c5db1 100644
--- a/numpy/core/src/multiarray/cblasfuncs.c
+++ b/numpy/core/src/multiarray/cblasfuncs.c
@@ -12,32 +12,6 @@
#include "npy_cblas.h"
#include "arraytypes.h"
#include "common.h"
-#include "mem_overlap.h"
-
-
-/*
- * Helper: call appropriate BLAS dot function for typenum.
- * Strides are NumPy strides.
- */
-static void
-blas_dot(int typenum, npy_intp n,
- void *a, npy_intp stridea, void *b, npy_intp strideb, void *res)
-{
- switch (typenum) {
- case NPY_DOUBLE:
- DOUBLE_dot(a, stridea, b, strideb, res, n, NULL);
- break;
- case NPY_FLOAT:
- FLOAT_dot(a, stridea, b, strideb, res, n, NULL);
- break;
- case NPY_CDOUBLE:
- CDOUBLE_dot(a, stridea, b, strideb, res, n, NULL);
- break;
- case NPY_CFLOAT:
- CFLOAT_dot(a, stridea, b, strideb, res, n, NULL);
- break;
- }
-}
static const double oneD[2] = {1.0, 0.0}, zeroD[2] = {0.0, 0.0};
@@ -227,6 +201,7 @@ _bad_strides(PyArrayObject *ap)
return 0;
}
+
/*
* dot(a,b)
* Returns the dot product of a and b for arrays of floating point types.
@@ -379,77 +354,9 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
}
}
- if (out != NULL) {
- int d;
-
- /* verify that out is usable */
- if (PyArray_NDIM(out) != nd ||
- PyArray_TYPE(out) != typenum ||
- !PyArray_ISCARRAY(out)) {
-
- PyErr_SetString(PyExc_ValueError,
- "output array is not acceptable (must have the right datatype, "
- "number of dimensions, and be a C-Array)");
- goto fail;
- }
- for (d = 0; d < nd; ++d) {
- if (dimensions[d] != PyArray_DIM(out, d)) {
- PyErr_SetString(PyExc_ValueError,
- "output array has wrong dimensions");
- goto fail;
- }
- }
-
- /* check for memory overlap */
- if (!(solve_may_share_memory(out, ap1, 1) == 0 &&
- solve_may_share_memory(out, ap2, 1) == 0)) {
- /* allocate temporary output array */
- out_buf = (PyArrayObject *)PyArray_NewLikeArray(out, NPY_CORDER,
- NULL, 0);
- if (out_buf == NULL) {
- goto fail;
- }
-
- /* set copy-back */
- Py_INCREF(out);
- if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) {
- Py_DECREF(out);
- goto fail;
- }
- }
- else {
- Py_INCREF(out);
- out_buf = out;
- }
- Py_INCREF(out);
- result = out;
- }
- else {
- double prior1, prior2;
- PyTypeObject *subtype;
- PyObject *tmp;
-
- /* Choose which subtype to return */
- if (Py_TYPE(ap1) != Py_TYPE(ap2)) {
- prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
- prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
- subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1));
- }
- else {
- prior1 = prior2 = 0.0;
- subtype = Py_TYPE(ap1);
- }
-
- tmp = (PyObject *)(prior2 > prior1 ? ap2 : ap1);
-
- out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
- typenum, NULL, NULL, 0, 0, tmp);
- if (out_buf == NULL) {
- goto fail;
- }
-
- Py_INCREF(out_buf);
- result = out_buf;
+ out_buf = new_array_for_sum(ap1, ap2, out, nd, dimensions, typenum, &result);
+ if (out_buf == NULL) {
+ goto fail;
}
numbytes = PyArray_NBYTES(out_buf);
@@ -617,10 +524,10 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
NPY_BEGIN_ALLOW_THREADS;
/* Dot product between two vectors -- Level 1 BLAS */
- blas_dot(typenum, l,
+ PyArray_DESCR(out_buf)->f->dotfunc(
PyArray_DATA(ap1), PyArray_STRIDE(ap1, (ap1shape == _row)),
PyArray_DATA(ap2), PyArray_STRIDE(ap2, 0),
- PyArray_DATA(out_buf));
+ PyArray_DATA(out_buf), l, NULL);
NPY_END_ALLOW_THREADS;
}
else if (ap1shape == _matrix && ap2shape != _matrix) {
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index c70f8526e..4f695fdc7 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -15,6 +15,7 @@
#include "buffer.h"
#include "get_attr_string.h"
+#include "mem_overlap.h"
/*
* The casting to use for implicit assignment operations resulting from
@@ -852,3 +853,102 @@ _may_have_objects(PyArray_Descr *dtype)
return (PyDataType_HASFIELDS(base) ||
PyDataType_FLAGCHK(base, NPY_ITEM_HASOBJECT) );
}
+
+/*
+ * Make a new empty array, of the passed size, of a type that takes the
+ * priority of ap1 and ap2 into account.
+ *
+ * If `out` is non-NULL, memory overlap is checked with ap1 and ap2, and an
+ * updateifcopy temporary array may be returned. If `result` is non-NULL, the
+ * output array to be returned (`out` if non-NULL and the newly allocated array
+ * otherwise) is incref'd and put to *result.
+ */
+NPY_NO_EXPORT PyArrayObject *
+new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out,
+ int nd, npy_intp dimensions[], int typenum, PyArrayObject **result)
+{
+ PyArrayObject *out_buf;
+
+ if (out) {
+ int d;
+
+ /* verify that out is usable */
+ if (PyArray_NDIM(out) != nd ||
+ PyArray_TYPE(out) != typenum ||
+ !PyArray_ISCARRAY(out)) {
+ PyErr_SetString(PyExc_ValueError,
+ "output array is not acceptable (must have the right datatype, "
+ "number of dimensions, and be a C-Array)");
+ return 0;
+ }
+ for (d = 0; d < nd; ++d) {
+ if (dimensions[d] != PyArray_DIM(out, d)) {
+ PyErr_SetString(PyExc_ValueError,
+ "output array has wrong dimensions");
+ return 0;
+ }
+ }
+
+ /* check for memory overlap */
+ if (!(solve_may_share_memory(out, ap1, 1) == 0 &&
+ solve_may_share_memory(out, ap2, 1) == 0)) {
+ /* allocate temporary output array */
+ out_buf = (PyArrayObject *)PyArray_NewLikeArray(out, NPY_CORDER,
+ NULL, 0);
+ if (out_buf == NULL) {
+ return NULL;
+ }
+
+ /* set copy-back */
+ Py_INCREF(out);
+ if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) {
+ Py_DECREF(out);
+ Py_DECREF(out_buf);
+ return NULL;
+ }
+ }
+ else {
+ Py_INCREF(out);
+ out_buf = out;
+ }
+
+ if (result) {
+ Py_INCREF(out);
+ *result = out;
+ }
+
+ return out_buf;
+ }
+ else {
+ PyTypeObject *subtype;
+ double prior1, prior2;
+ /*
+ * Need to choose an output array that can hold a sum
+ * -- use priority to determine which subtype.
+ */
+ if (Py_TYPE(ap2) != Py_TYPE(ap1)) {
+ prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
+ prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
+ subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1));
+ }
+ else {
+ prior1 = prior2 = 0.0;
+ subtype = Py_TYPE(ap1);
+ }
+
+ out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
+ typenum, NULL, NULL, 0, 0,
+ (PyObject *)
+ (prior2 > prior1 ? ap2 : ap1));
+
+ if (out_buf != NULL && result) {
+ Py_INCREF(out_buf);
+ *result = out_buf;
+ }
+
+ return out_buf;
+ }
+}
+
+
+
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index ae9b960c8..db0a49920 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -283,4 +283,17 @@ blas_stride(npy_intp stride, unsigned itemsize)
#include "ucsnarrow.h"
+/*
+ * Make a new empty array, of the passed size, of a type that takes the
+ * priority of ap1 and ap2 into account.
+ *
+ * If `out` is non-NULL, memory overlap is checked with ap1 and ap2, and an
+ * updateifcopy temporary array may be returned. If `result` is non-NULL, the
+ * output array to be returned (`out` if non-NULL and the newly allocated array
+ * otherwise) is incref'd and put to *result.
+ */
+NPY_NO_EXPORT PyArrayObject *
+new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out,
+ int nd, npy_intp dimensions[], int typenum, PyArrayObject **result);
+
#endif
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index bcb44f6d1..8c140f5e2 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -654,6 +654,10 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
else if (j == lenxp - 1) {
dres[i] = dy[j];
}
+ else if (dx[j] == x_val) {
+ /* Avoid potential non-finite interpolation */
+ dres[i] = dy[j];
+ }
else {
const npy_double slope = (slopes != NULL) ? slopes[j] :
(dy[j+1] - dy[j]) / (dx[j+1] - dx[j]);
@@ -822,6 +826,10 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
else if (j == lenxp - 1) {
dres[i] = dy[j];
}
+ else if (dx[j] == x_val) {
+ /* Avoid potential non-finite interpolation */
+ dres[i] = dy[j];
+ }
else {
if (slopes!=NULL) {
dres[i].real = slopes[j].real*(x_val - dx[j]) + dy[j].real;
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index 33184d99a..1765982a0 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -2767,11 +2767,11 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
goto fail;
}
- /* Initialize the output to all zeros and reset the iterator */
+ /* Initialize the output to all zeros */
ret = NpyIter_GetOperandArray(iter)[nop];
- Py_INCREF(ret);
- PyArray_AssignZero(ret, NULL);
-
+ if (PyArray_AssignZero(ret, NULL) < 0) {
+ goto fail;
+ }
/***************************/
/*
@@ -2785,16 +2785,12 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
case 1:
if (ndim == 2) {
if (unbuffered_loop_nop1_ndim2(iter) < 0) {
- Py_DECREF(ret);
- ret = NULL;
goto fail;
}
goto finish;
}
else if (ndim == 3) {
if (unbuffered_loop_nop1_ndim3(iter) < 0) {
- Py_DECREF(ret);
- ret = NULL;
goto fail;
}
goto finish;
@@ -2803,16 +2799,12 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
case 2:
if (ndim == 2) {
if (unbuffered_loop_nop2_ndim2(iter) < 0) {
- Py_DECREF(ret);
- ret = NULL;
goto fail;
}
goto finish;
}
else if (ndim == 3) {
if (unbuffered_loop_nop2_ndim3(iter) < 0) {
- Py_DECREF(ret);
- ret = NULL;
goto fail;
}
goto finish;
@@ -2823,7 +2815,6 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
/***************************/
if (NpyIter_Reset(iter, NULL) != NPY_SUCCEED) {
- Py_DECREF(ret);
goto fail;
}
@@ -2845,8 +2836,6 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
if (sop == NULL) {
PyErr_SetString(PyExc_TypeError,
"invalid data type for einsum");
- Py_DECREF(ret);
- ret = NULL;
}
else if (NpyIter_GetIterSize(iter) != 0) {
NpyIter_IterNextFunc *iternext;
@@ -2858,7 +2847,6 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
iternext = NpyIter_GetIterNext(iter, NULL);
if (iternext == NULL) {
NpyIter_Deallocate(iter);
- Py_DECREF(ret);
goto fail;
}
dataptr = NpyIter_GetDataPtrArray(iter);
@@ -2874,12 +2862,16 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
/* If the API was needed, it may have thrown an error */
if (NpyIter_IterationNeedsAPI(iter) && PyErr_Occurred()) {
- Py_DECREF(ret);
- ret = NULL;
+ goto fail;
}
}
finish:
+ if (out != NULL) {
+ ret = out;
+ }
+ Py_INCREF(ret);
+
NpyIter_Deallocate(iter);
for (iop = 0; iop < nop; ++iop) {
Py_DECREF(op[iop]);
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index f78a748c0..e6af5a81e 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -800,102 +800,6 @@ PyArray_CanCoerceScalar(int thistype, int neededtype,
return 0;
}
-/*
- * Make a new empty array, of the passed size, of a type that takes the
- * priority of ap1 and ap2 into account.
- *
- * If `out` is non-NULL, memory overlap is checked with ap1 and ap2, and an
- * updateifcopy temporary array may be returned. If `result` is non-NULL, the
- * output array to be returned (`out` if non-NULL and the newly allocated array
- * otherwise) is incref'd and put to *result.
- */
-static PyArrayObject *
-new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out,
- int nd, npy_intp dimensions[], int typenum, PyArrayObject **result)
-{
- PyArrayObject *out_buf;
-
- if (out) {
- int d;
-
- /* verify that out is usable */
- if (PyArray_NDIM(out) != nd ||
- PyArray_TYPE(out) != typenum ||
- !PyArray_ISCARRAY(out)) {
- PyErr_SetString(PyExc_ValueError,
- "output array is not acceptable (must have the right datatype, "
- "number of dimensions, and be a C-Array)");
- return 0;
- }
- for (d = 0; d < nd; ++d) {
- if (dimensions[d] != PyArray_DIM(out, d)) {
- PyErr_SetString(PyExc_ValueError,
- "output array has wrong dimensions");
- return 0;
- }
- }
-
- /* check for memory overlap */
- if (!(solve_may_share_memory(out, ap1, 1) == 0 &&
- solve_may_share_memory(out, ap2, 1) == 0)) {
- /* allocate temporary output array */
- out_buf = (PyArrayObject *)PyArray_NewLikeArray(out, NPY_CORDER,
- NULL, 0);
- if (out_buf == NULL) {
- return NULL;
- }
-
- /* set copy-back */
- Py_INCREF(out);
- if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) {
- Py_DECREF(out);
- Py_DECREF(out_buf);
- return NULL;
- }
- }
- else {
- Py_INCREF(out);
- out_buf = out;
- }
-
- if (result) {
- Py_INCREF(out);
- *result = out;
- }
-
- return out_buf;
- }
- else {
- PyTypeObject *subtype;
- double prior1, prior2;
- /*
- * Need to choose an output array that can hold a sum
- * -- use priority to determine which subtype.
- */
- if (Py_TYPE(ap2) != Py_TYPE(ap1)) {
- prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
- prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
- subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1));
- }
- else {
- prior1 = prior2 = 0.0;
- subtype = Py_TYPE(ap1);
- }
-
- out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
- typenum, NULL, NULL, 0, 0,
- (PyObject *)
- (prior2 > prior1 ? ap2 : ap1));
-
- if (out_buf != NULL && result) {
- Py_INCREF(out_buf);
- *result = out_buf;
- }
-
- return out_buf;
- }
-}
-
/* Could perhaps be redone to not make contiguous arrays */
/*NUMPY_API
@@ -1101,7 +1005,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out)
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ap2));
while (it1->index < it1->size) {
while (it2->index < it2->size) {
- dot(it1->dataptr, is1, it2->dataptr, is2, op, l, out_buf);
+ dot(it1->dataptr, is1, it2->dataptr, is2, op, l, NULL);
op += os;
PyArray_ITER_NEXT(it2);
}
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 647738831..a72079218 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -730,6 +730,11 @@ class TestEinSum(object):
res = np.einsum('...ij,...jk->...ik', a, a, out=out)
assert_equal(res, tgt)
+ def test_out_is_res(self):
+ a = np.arange(9).reshape(3, 3)
+ res = np.einsum('...ij,...jk->...ik', a, a, out=a)
+ assert res is a
+
def optimize_compare(self, string):
# Tests all paths of the optimization function against
# conventional einsum
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 186ed949d..8305aeae5 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -2300,19 +2300,9 @@ import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
-if os.path.isdir(extra_dll_dir) and sys.platform == 'win32':
- try:
- from ctypes import windll, c_wchar_p
- _AddDllDirectory = windll.kernel32.AddDllDirectory
- _AddDllDirectory.argtypes = [c_wchar_p]
- # Needed to initialize AddDllDirectory modifications
- windll.kernel32.SetDefaultDllDirectories(0x1000)
- except AttributeError:
- def _AddDllDirectory(dll_directory):
- os.environ.setdefault('PATH', '')
- os.environ['PATH'] += os.pathsep + dll_directory
-
- _AddDllDirectory(extra_dll_dir)
+if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
+ os.environ.setdefault('PATH', '')
+ os.environ['PATH'] += os.pathsep + extra_dll_dir
""")
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 4d3f35183..5880ea154 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -607,6 +607,14 @@ def isin(element, test_elements, assume_unique=False, invert=False):
[ True, False]])
>>> element[mask]
array([2, 4])
+
+ The indices of the matched values can be obtained with `nonzero`:
+
+ >>> np.nonzero(mask)
+ (array([0, 1]), array([1, 0]))
+
+ The test can also be inverted:
+
>>> mask = np.isin(element, test_elements, invert=True)
>>> mask
array([[ True, False],
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index d2139338e..009e6d229 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -121,39 +121,13 @@ class nd_grid(object):
Notes
-----
Two instances of `nd_grid` are made available in the NumPy namespace,
- `mgrid` and `ogrid`::
+ `mgrid` and `ogrid`, approximately defined as::
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
Users should use these pre-defined instances instead of using `nd_grid`
directly.
-
- Examples
- --------
- >>> mgrid = np.lib.index_tricks.nd_grid()
- >>> mgrid[0:5,0:5]
- array([[[0, 0, 0, 0, 0],
- [1, 1, 1, 1, 1],
- [2, 2, 2, 2, 2],
- [3, 3, 3, 3, 3],
- [4, 4, 4, 4, 4]],
- [[0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4]]])
- >>> mgrid[-1:1:5j]
- array([-1. , -0.5, 0. , 0.5, 1. ])
-
- >>> ogrid = np.lib.index_tricks.nd_grid(sparse=True)
- >>> ogrid[0:5,0:5]
- [array([[0],
- [1],
- [2],
- [3],
- [4]]), array([[0, 1, 2, 3, 4]])]
-
"""
def __init__(self, sparse=False):
@@ -223,10 +197,97 @@ class nd_grid(object):
def __len__(self):
return 0
-mgrid = nd_grid(sparse=False)
-ogrid = nd_grid(sparse=True)
-mgrid.__doc__ = None # set in numpy.add_newdocs
-ogrid.__doc__ = None # set in numpy.add_newdocs
+
+class MGridClass(nd_grid):
+ """
+ `nd_grid` instance which returns a dense multi-dimensional "meshgrid".
+
+ An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
+ (or fleshed out) mesh-grid when indexed, so that each returned argument
+ has the same shape. The dimensions and number of the output arrays are
+ equal to the number of indexing dimensions. If the step length is not a
+ complex number, then the stop is not inclusive.
+
+ However, if the step length is a **complex number** (e.g. 5j), then
+ the integer part of its magnitude is interpreted as specifying the
+ number of points to create between the start and stop values, where
+ the stop value **is inclusive**.
+
+ Returns
+ ----------
+ mesh-grid `ndarrays` all of the same dimensions
+
+ See Also
+ --------
+ numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
+ ogrid : like mgrid but returns open (not fleshed out) mesh grids
+ r_ : array concatenator
+
+ Examples
+ --------
+ >>> np.mgrid[0:5,0:5]
+ array([[[0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 2],
+ [3, 3, 3, 3, 3],
+ [4, 4, 4, 4, 4]],
+ [[0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4]]])
+ >>> np.mgrid[-1:1:5j]
+ array([-1. , -0.5, 0. , 0.5, 1. ])
+
+ """
+ def __init__(self):
+ super(MGridClass, self).__init__(sparse=False)
+
+mgrid = MGridClass()
+
+class OGridClass(nd_grid):
+ """
+ `nd_grid` instance which returns an open multi-dimensional "meshgrid".
+
+ An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
+ (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
+ of each returned array is greater than 1. The dimension and number of the
+ output arrays are equal to the number of indexing dimensions. If the step
+ length is not a complex number, then the stop is not inclusive.
+
+ However, if the step length is a **complex number** (e.g. 5j), then
+ the integer part of its magnitude is interpreted as specifying the
+ number of points to create between the start and stop values, where
+ the stop value **is inclusive**.
+
+ Returns
+ ----------
+ mesh-grid `ndarrays` with only one dimension :math:`\\neq 1`
+
+ See Also
+ --------
+ np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
+ mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
+ r_ : array concatenator
+
+ Examples
+ --------
+ >>> from numpy import ogrid
+ >>> ogrid[-1:1:5j]
+ array([-1. , -0.5, 0. , 0.5, 1. ])
+ >>> ogrid[0:5,0:5]
+ [array([[0],
+ [1],
+ [2],
+ [3],
+ [4]]), array([[0, 1, 2, 3, 4]])]
+
+ """
+ def __init__(self):
+ super(OGridClass, self).__init__(sparse=True)
+
+ogrid = OGridClass()
+
class AxisConcatenator(object):
"""
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 8ba0370b0..45d624781 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -1009,6 +1009,21 @@ class TestUnicodeInput(object):
assert_array_equal(a, b)
+class TestObjectInput(object):
+ def test_object_input(self):
+ # Regression test for issue gh-11395.
+ a = np.full((4, 3), None)
+ pad_amt = ((2, 3), (3, 2))
+ b = np.full((9, 8), None)
+ modes = ['edge',
+ 'symmetric',
+ 'reflect',
+ 'wrap',
+ ]
+ for mode in modes:
+ assert_array_equal(pad(a, pad_amt, mode=mode), b)
+
+
class TestValueError1(object):
def test_check_simple(self):
arr = np.arange(30)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 4103a9eb3..d2a9181db 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -2237,6 +2237,14 @@ class TestInterp(object):
x0 = np.nan
assert_almost_equal(np.interp(x0, x, y), x0)
+ def test_non_finite_behavior(self):
+ x = [1, 2, 2.5, 3, 4]
+ xp = [1, 2, 3, 4]
+ fp = [1, 2, np.inf, 4]
+ assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4])
+ fp = [1, 2, np.nan, 4]
+ assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])
+
def test_complex_interp(self):
# test complex interpolation
x = np.linspace(0, 1, 5)
@@ -2251,6 +2259,12 @@ class TestInterp(object):
x0 = 2.0
right = 2 + 3.0j
assert_almost_equal(np.interp(x0, x, y, right=right), right)
+ # test complex non finite
+ x = [1, 2, 2.5, 3, 4]
+ xp = [1, 2, 3, 4]
+ fp = [1, 2+1j, np.inf, 4]
+ y = [1, 2+1j, np.inf+0.5j, np.inf, 4]
+ assert_almost_equal(np.interp(x, xp, fp), y)
# test complex periodic
x = [-180, -170, -185, 185, -10, -5, 0, 365]
xp = [190, -190, 350, -350]
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 091ab4e20..5bfa51b12 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -7115,32 +7115,32 @@ size.__doc__ = np.size.__doc__
def where(condition, x=_NoValue, y=_NoValue):
"""
- Return a masked array with elements from x or y, depending on condition.
+ Return a masked array with elements from `x` or `y`, depending on condition.
- Returns a masked array, shaped like condition, where the elements
- are from `x` when `condition` is True, and from `y` otherwise.
- If neither `x` nor `y` are given, the function returns a tuple of
- indices where `condition` is True (the result of
- ``condition.nonzero()``).
+ .. note::
+ When only `condition` is provided, this function is identical to
+ `nonzero`. The rest of this documentation covers only the case where
+ all three arguments are provided.
Parameters
----------
condition : array_like, bool
- The condition to meet. For each True element, yield the corresponding
- element from `x`, otherwise from `y`.
+ Where True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
- out : MaskedArray or tuple of ndarrays
- The resulting masked array if `x` and `y` were given, otherwise
- the result of ``condition.nonzero()``.
+ out : MaskedArray
+ An masked array with `masked` elements where the condition is masked,
+ elements from `x` where `condition` is True, and elements from `y`
+ elsewhere.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
+ nonzero : The function that is called when x and y are omitted
Examples
--------
@@ -7151,9 +7151,6 @@ def where(condition, x=_NoValue, y=_NoValue):
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
- >>> np.ma.where(x > 5) # return the indices where x > 5
- (array([2, 2]), array([0, 2]))
-
>>> print(np.ma.where(x > 5, x, -3.1416))
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
diff --git a/runtests.py b/runtests.py
index 35717b319..355173326 100755
--- a/runtests.py
+++ b/runtests.py
@@ -384,23 +384,27 @@ def build_project(args):
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
-
- # Wait for it to finish, and print something to indicate the
- # process is alive, but only if the log file has grown (to
- # allow continuous integration environments kill a hanging
- # process accurately if it produces no output)
- last_blip = time.time()
- last_log_size = os.stat(log_filename).st_size
- while p.poll() is None:
- time.sleep(0.5)
- if time.time() - last_blip > 60:
- log_size = os.stat(log_filename).st_size
- if log_size > last_log_size:
- print(" ... build in progress")
- last_blip = time.time()
- last_log_size = log_size
-
- ret = p.wait()
+ try:
+ # Wait for it to finish, and print something to indicate the
+ # process is alive, but only if the log file has grown (to
+ # allow continuous integration environments kill a hanging
+ # process accurately if it produces no output)
+ last_blip = time.time()
+ last_log_size = os.stat(log_filename).st_size
+ while p.poll() is None:
+ time.sleep(0.5)
+ if time.time() - last_blip > 60:
+ log_size = os.stat(log_filename).st_size
+ if log_size > last_log_size:
+ print(" ... build in progress")
+ last_blip = time.time()
+ last_log_size = log_size
+
+ ret = p.wait()
+ except:
+ p.kill()
+ p.wait()
+ raise
if ret == 0:
print("Build OK")