diff options
author | Eric Wieser <wieser.eric@gmail.com> | 2019-08-19 19:16:44 -0500 |
---|---|---|
committer | Eric Wieser <wieser.eric@gmail.com> | 2019-08-19 19:16:44 -0500 |
commit | 0f5e376d3eb6118b783cdd3ecd27722c2d1934ba (patch) | |
tree | c44850b579cbd27993c45dda1a7922e2d109b24f /numpy/core/fromnumeric.py | |
parent | 483f565d85dadc899f94710531fba8355d554d59 (diff) | |
parent | 98bdde643af6443d68a8c6233807b75bd3f0ed80 (diff) | |
download | numpy-0f5e376d3eb6118b783cdd3ecd27722c2d1934ba.tar.gz |
Merge remote-tracking branch 'upstream/master' into fix-if-fields
Diffstat (limited to 'numpy/core/fromnumeric.py')
-rw-r--r-- | numpy/core/fromnumeric.py | 379 |
1 files changed, 250 insertions, 129 deletions
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 59a820d53..bde37fca3 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -13,7 +13,8 @@ from . import multiarray as mu from . import overrides from . import umath as um from . import numerictypes as nt -from .numeric import asarray, array, asanyarray, concatenate +from ._asarray import asarray, array, asanyarray +from .multiarray import concatenate from . import _methods _dt_ = nt.sctype2char @@ -24,7 +25,7 @@ __all__ = [ 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', - 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_', + 'ravel', 'repeat', 'reshape', 'resize', 'round_', 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', ] @@ -52,17 +53,20 @@ def _wrapit(obj, method, *args, **kwds): def _wrapfunc(obj, method, *args, **kwds): - try: - return getattr(obj, method)(*args, **kwds) - - # An AttributeError occurs if the object does not have - # such a method in its class. + bound = getattr(obj, method, None) + if bound is None: + return _wrapit(obj, method, *args, **kwds) - # A TypeError occurs if the object does have such a method - # in its class, but its signature is not identical to that - # of NumPy's. This situation has occurred in the case of - # a downstream library like 'pandas'. - except (AttributeError, TypeError): + try: + return bound(*args, **kwds) + except TypeError: + # A TypeError occurs if the object does have such a method in its + # class, but its signature is not identical to that of NumPy's. This + # situation has occurred in the case of a downstream library like + # 'pandas'. + # + # Call _wrapit from within the except clause to ensure a potential + # exception has a traceback chain. return _wrapit(obj, method, *args, **kwds) @@ -127,7 +131,8 @@ def take(a, indices, axis=None, out=None, mode='raise'): input array is used. out : ndarray, optional (Ni..., Nj..., Nk...) If provided, the result will be placed in this array. It should - be of the appropriate shape and dtype. + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. @@ -240,12 +245,16 @@ def reshape(a, newshape, order='C'): you should assign the new shape to the shape attribute of the array:: >>> a = np.zeros((10, 2)) + # A transpose makes the array non-contiguous >>> b = a.T + # Taking a view makes it possible to modify the shape without modifying # the initial object. >>> c = b.view() >>> c.shape = (20) + Traceback (most recent call last): + ... AttributeError: incompatible shape for a non-contiguous array The `order` keyword gives the index ordering both for *fetching* the values @@ -348,7 +357,8 @@ def choose(a, choices, out=None, mode='raise'): ``choices.shape[0]``) is taken as defining the "sequence". out : array, optional If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. mode : {'raise' (default), 'wrap', 'clip'}, optional Specifies how indices outside `[0, n-1]` will be treated: @@ -370,6 +380,7 @@ def choose(a, choices, out=None, mode='raise'): See Also -------- ndarray.choose : equivalent method + numpy.take_along_axis : Preferable if `choices` is an array Notes ----- @@ -505,7 +516,8 @@ def put(a, ind, v, mode='raise'): 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. + that this disables indexing with negative numbers. In 'raise' mode, + if an exception occurs the target array may still be modified. See Also -------- @@ -813,7 +825,7 @@ def _sort_dispatcher(a, axis=None, kind=None, order=None): @array_function_dispatch(_sort_dispatcher) -def sort(a, axis=-1, kind='quicksort', order=None): +def sort(a, axis=-1, kind=None, order=None): """ Return a sorted copy of an array. @@ -825,7 +837,14 @@ def sort(a, axis=-1, kind='quicksort', order=None): Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional - Sorting algorithm. Default is 'quicksort'. + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort or radix sort under the covers and, in general, + the actual implementation will vary with data type. The 'mergesort' option + is retained for backwards compatibility. + + .. versionchanged:: 1.15.0. + The 'stable' option was added. + order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can @@ -851,17 +870,22 @@ def sort(a, axis=-1, kind='quicksort', order=None): The various sorting algorithms are characterized by their average speed, worst case performance, work space size, and whether they are stable. A stable sort keeps items with the same key in the same relative - order. The three available algorithms have the following + order. The four algorithms implemented in NumPy have the following properties: =========== ======= ============= ============ ======== kind speed worst case work space stable =========== ======= ============= ============ ======== 'quicksort' 1 O(n^2) 0 no - 'mergesort' 2 O(n*log(n)) ~n/2 yes 'heapsort' 3 O(n*log(n)) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'timsort' 2 O(n*log(n)) ~n/2 yes =========== ======= ============= ============ ======== + .. note:: The datatype determines which of 'mergesort' or 'timsort' + is actually used, even if 'mergesort' is specified. User selection + at a finer scale is not currently available. + All the sort algorithms make temporary copies of the data when sorting along any but the last axis. Consequently, sorting along the last axis is faster and uses less space than sorting along @@ -885,13 +909,30 @@ def sort(a, axis=-1, kind='quicksort', order=None): .. versionadded:: 1.12.0 - quicksort has been changed to an introsort which will switch - heapsort when it does not make enough progress. This makes its - worst case O(n*log(n)). - - 'stable' automatically choses the best stable sorting algorithm - for the data type being sorted. It is currently mapped to - merge sort. + quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_. + When sorting does not make enough progress it switches to + `heapsort <https://en.wikipedia.org/wiki/Heapsort>`_. + This implementation makes quicksort O(n*log(n)) in the worst case. + + 'stable' automatically chooses the best stable sorting algorithm + for the data type being sorted. + It, along with 'mergesort' is currently mapped to + `timsort <https://en.wikipedia.org/wiki/Timsort>`_ + or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_ + depending on the data type. + API forward compatibility currently limits the + ability to select the implementation and it is hardwired for the different + data types. + + .. versionadded:: 1.17.0 + + Timsort is added for better performance on already or nearly + sorted data. On random data timsort is almost identical to + mergesort. It is now used for stable sort while quicksort is still the + default sort if none is chosen. For timsort details, refer to + `CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_. + 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an + O(n) sort instead of O(n log n). Examples -------- @@ -940,7 +981,7 @@ def _argsort_dispatcher(a, axis=None, kind=None, order=None): @array_function_dispatch(_argsort_dispatcher) -def argsort(a, axis=-1, kind='quicksort', order=None): +def argsort(a, axis=-1, kind=None, order=None): """ Returns the indices that would sort an array. @@ -956,7 +997,13 @@ def argsort(a, axis=-1, kind='quicksort', order=None): Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional - Sorting algorithm. + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort under the covers and, in general, the + actual implementation will vary with data type. The 'mergesort' option + is retained for backwards compatibility. + + .. versionchanged:: 1.15.0. + The 'stable' option was added. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can @@ -967,10 +1014,10 @@ def argsort(a, axis=-1, kind='quicksort', order=None): Returns ------- index_array : ndarray, int - Array of indices that sort `a` along the specified axis. + Array of indices that sort `a` along the specified `axis`. If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`. - More generally, ``np.take_along_axis(a, index_array, axis=a)`` always - yields the sorted `a`, irrespective of dimensionality. + More generally, ``np.take_along_axis(a, index_array, axis=axis)`` + always yields the sorted `a`, irrespective of dimensionality. See Also -------- @@ -1001,13 +1048,21 @@ def argsort(a, axis=-1, kind='quicksort', order=None): array([[0, 3], [2, 2]]) - >>> np.argsort(x, axis=0) # sorts along first axis (down) + >>> ind = np.argsort(x, axis=0) # sorts along first axis (down) + >>> ind array([[0, 1], [1, 0]]) + >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0) + array([[0, 2], + [2, 3]]) - >>> np.argsort(x, axis=1) # sorts along last axis (across) + >>> ind = np.argsort(x, axis=1) # sorts along last axis (across) + >>> ind array([[0, 1], [0, 1]]) + >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1) + array([[0, 3], + [2, 2]]) Indices of the sorted elements of a N-dimensional array: @@ -1229,7 +1284,7 @@ def searchsorted(a, v, side='left', sorter=None): As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing `nan` values. The enhanced sort order is documented in `sort`. - This function is a faster version of the builtin python `bisect.bisect_left` + This function uses the same algorithm as the builtin python `bisect.bisect_left` (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions, which is also vectorized in the `v` argument. @@ -1381,7 +1436,7 @@ def squeeze(a, axis=None): try: squeeze = a.squeeze except AttributeError: - return _wrapit(a, 'squeeze') + return _wrapit(a, 'squeeze', axis=axis) if axis is None: return squeeze() else: @@ -1448,7 +1503,7 @@ def diagonal(a, offset=0, axis1=0, axis2=1): same type as `a` is returned unless `a` is a `matrix`, in which case a 1-D array rather than a (2-D) `matrix` is returned in order to maintain backward compatibility. - + If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` are removed, and a new axis inserted at the end corresponding to the diagonal. @@ -1482,9 +1537,9 @@ def diagonal(a, offset=0, axis1=0, axis2=1): [2, 3]], [[4, 5], [6, 7]]]) - >>> a.diagonal(0, # Main diagonals of two arrays created by skipping - ... 0, # across the outer(left)-most axis last and - ... 1) # the "middle" (row) axis first. + >>> a.diagonal(0, # Main diagonals of two arrays created by skipping + ... 0, # across the outer(left)-most axis last and + ... 1) # the "middle" (row) axis first. array([[0, 6], [1, 7]]) @@ -1492,13 +1547,28 @@ def diagonal(a, offset=0, axis1=0, axis2=1): corresponds to fixing the right-most (column) axis, and that the diagonals are "packed" in rows. - >>> a[:,:,0] # main diagonal is [0 6] + >>> a[:,:,0] # main diagonal is [0 6] array([[0, 2], [4, 6]]) - >>> a[:,:,1] # main diagonal is [1 7] + >>> a[:,:,1] # main diagonal is [1 7] array([[1, 3], [5, 7]]) + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.fliplr(a).diagonal() # Horizontal flip + array([2, 4, 6]) + >>> np.flipud(a).diagonal() # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. """ if isinstance(a, np.matrix): # Make diagonal of matrix 1-D to preserve backward compatibility. @@ -1644,21 +1714,21 @@ def ravel(a, order='C'): It is equivalent to ``reshape(-1, order=order)``. >>> x = np.array([[1, 2, 3], [4, 5, 6]]) - >>> print(np.ravel(x)) - [1 2 3 4 5 6] + >>> np.ravel(x) + array([1, 2, 3, 4, 5, 6]) - >>> print(x.reshape(-1)) - [1 2 3 4 5 6] + >>> x.reshape(-1) + array([1, 2, 3, 4, 5, 6]) - >>> print(np.ravel(x, order='F')) - [1 4 2 5 3 6] + >>> np.ravel(x, order='F') + array([1, 4, 2, 5, 3, 6]) When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: - >>> print(np.ravel(x.T)) - [1 4 2 5 3 6] - >>> print(np.ravel(x.T, order='A')) - [1 2 3 4 5 6] + >>> np.ravel(x.T) + array([1, 4, 2, 5, 3, 6]) + >>> np.ravel(x.T, order='A') + array([1, 2, 3, 4, 5, 6]) When ``order`` is 'K', it will preserve orderings that are neither 'C' nor 'F', but won't reverse axes: @@ -1699,17 +1769,17 @@ def nonzero(a): Returns a tuple of arrays, one for each dimension of `a`, containing the indices of the non-zero elements in that dimension. The values in `a` are always tested and returned in - row-major, C-style order. The corresponding non-zero - values can be obtained with:: + row-major, C-style order. - a[nonzero(a)] + To group the indices by element, rather than dimension, use `argwhere`, + which returns a row for each non-zero element. - To group the indices by element, rather than dimension, use:: - - transpose(nonzero(a)) + .. note:: + When called on a zero-d array or scalar, ``nonzero(a)`` is treated + as ``nonzero(atleast1d(a))``. - The result of this is always a 2-D array, with a row for - each non-zero element. + ..deprecated:: 1.17.0 + Use `atleast1d` explicitly if this behavior is deliberate. Parameters ---------- @@ -1731,6 +1801,12 @@ def nonzero(a): count_nonzero : Counts the number of non-zero elements in the input array. + Notes + ----- + While the nonzero values can be obtained with ``a[nonzero(a)]``, it is + recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which + will correctly handle 0-d arrays. + Examples -------- >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) @@ -1747,7 +1823,7 @@ def nonzero(a): array([[0, 0], [1, 1], [2, 0], - [2, 1]) + [2, 1]]) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a @@ -1896,12 +1972,12 @@ def compress(condition, a, axis=None, out=None): return _wrapfunc(a, 'compress', condition, axis=axis, out=out) -def _clip_dispatcher(a, a_min, a_max, out=None): +def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs): return (a, a_min, a_max) @array_function_dispatch(_clip_dispatcher) -def clip(a, a_min, a_max, out=None): +def clip(a, a_min, a_max, out=None, **kwargs): """ Clip (limit) the values in an array. @@ -1910,6 +1986,9 @@ def clip(a, a_min, a_max, out=None): is specified, values smaller than 0 become 0, and values larger than 1 become 1. + Equivalent to but faster than ``np.maximum(a_min, np.minimum(a, a_max))``. + No check is performed to ensure ``a_min < a_max``. + Parameters ---------- a : array_like @@ -1927,6 +2006,11 @@ def clip(a, a_min, a_max, out=None): The results will be placed in this array. It may be the input array for in-place clipping. `out` must be of the right shape to hold the output. Its type is preserved. + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs <ufuncs.kwargs>`. + + .. versionadded:: 1.17.0 Returns ------- @@ -1955,16 +2039,17 @@ def clip(a, a_min, a_max, out=None): array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) """ - return _wrapfunc(a, 'clip', a_min, a_max, out=out) + return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs) def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, - initial=None): + initial=None, where=None): return (a, out) @array_function_dispatch(_sum_dispatcher) -def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue): +def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): """ Sum of array elements over a given axis. @@ -2008,6 +2093,11 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No .. versionadded:: 1.15.0 + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.17.0 + Returns ------- sum_along_axis : ndarray @@ -2020,6 +2110,8 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No -------- ndarray.sum : Equivalent method. + add.reduce : Equivalent functionality of `add`. + cumsum : Cumulative sum of array elements. trapz : Integration of array values using the composite trapezoidal rule. @@ -2036,6 +2128,23 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No >>> np.sum([]) 0.0 + For floating point numbers the numerical precision of sum (and + ``np.add.reduce``) is in general limited by directly adding each number + individually to the result causing rounding errors in every step. + However, often numpy will use a numerically better approach (partial + pairwise summation) leading to improved precision in many use-cases. + This improved precision is always provided when no ``axis`` is given. + When ``axis`` is given, it will depend on which axis is summed. + Technically, to provide the best speed possible, the improved precision + is only used when the summation is along the fast axis in memory. + Note that the exact precision may vary depending on other parameters. + In contrast to NumPy, Python's ``math.fsum`` function uses a slower but + more precise approach to summation. + Especially when summing a large number of lower precision floating point + numbers, such as ``float32``, numerical errors can become significant. + In such cases it can be advisable to use `dtype="float64"` to use a higher + precision for the output. + Examples -------- >>> np.sum([0.5, 1.5]) @@ -2048,6 +2157,8 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No array([0, 6]) >>> np.sum([[0, 1], [0, 5]], axis=1) array([1, 5]) + >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1) + array([1., 5.]) If the accumulator is too small, overflow occurs: @@ -2064,7 +2175,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No warnings.warn( "Calling np.sum(generator) is deprecated, and in the future will give a different result. " "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.", - DeprecationWarning, stacklevel=2) + DeprecationWarning, stacklevel=3) res = _sum_(a) if out is not None: @@ -2073,7 +2184,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No return res return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims, - initial=initial) + initial=initial, where=where) def _any_dispatcher(a, axis=None, out=None, keepdims=None): @@ -2150,10 +2261,10 @@ def any(a, axis=None, out=None, keepdims=np._NoValue): >>> np.any(np.nan) True - >>> o=np.array([False]) + >>> o=np.array(False) >>> z=np.any([-1, 4, 5], out=o) >>> z, o - (array([ True]), array([ True])) + (array(True), array(True)) >>> # Check now that z is a reference to o >>> z is o True @@ -2236,10 +2347,10 @@ def all(a, axis=None, out=None, keepdims=np._NoValue): >>> np.all([1.0, np.nan]) True - >>> o=np.array([False]) + >>> o=np.array(False) >>> z=np.all([-1, 4, 5], out=o) - >>> id(z), id(o), z # doctest: +SKIP - (28293632, 28293632, array([ True])) + >>> id(z), id(o), z + (28293632, 28293632, array(True)) # may vary """ return _wrapreduction(a, np.logical_and, 'all', axis, None, out, keepdims=keepdims) @@ -2390,12 +2501,14 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue): return _methods._ptp(a, axis=axis, out=out, **kwargs) -def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None): +def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): return (a, out) @array_function_dispatch(_amax_dispatcher) -def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue): +def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): """ Return the maximum of an array or maximum along an axis. @@ -2433,6 +2546,11 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue): .. versionadded:: 1.15.0 + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.17.0 Returns ------- @@ -2478,11 +2596,14 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue): array([2, 3]) >>> np.amax(a, axis=1) # Maxima along the second axis array([1, 3]) - + >>> np.amax(a, where=[False, True], initial=-1, axis=0) + array([-1, 3]) >>> b = np.arange(5, dtype=float) >>> b[2] = np.NaN >>> np.amax(b) nan + >>> np.amax(b, where=~np.isnan(b), initial=-1) + 4.0 >>> np.nanmax(b) 4.0 @@ -2501,16 +2622,18 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue): >>> max([5], default=6) 5 """ - return _wrapreduction(a, np.maximum, 'max', axis, None, out, keepdims=keepdims, - initial=initial) + return _wrapreduction(a, np.maximum, 'max', axis, None, out, + keepdims=keepdims, initial=initial, where=where) -def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None): +def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): return (a, out) @array_function_dispatch(_amin_dispatcher) -def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue): +def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): """ Return the minimum of an array or minimum along an axis. @@ -2548,6 +2671,12 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue): .. versionadded:: 1.15.0 + where : array_like of bool, optional + Elements to compare for the minimum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.17.0 + Returns ------- amin : ndarray or scalar @@ -2592,11 +2721,15 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue): array([0, 1]) >>> np.amin(a, axis=1) # Minima along the second axis array([0, 2]) + >>> np.amin(a, where=[False, True], initial=10, axis=0) + array([10, 1]) >>> b = np.arange(5, dtype=float) >>> b[2] = np.NaN >>> np.amin(b) nan + >>> np.amin(b, where=~np.isnan(b), initial=10) + 0.0 >>> np.nanmin(b) 0.0 @@ -2614,8 +2747,8 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue): >>> min([6], default=5) 6 """ - return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims, - initial=initial) + return _wrapreduction(a, np.minimum, 'min', axis, None, out, + keepdims=keepdims, initial=initial, where=where) def _alen_dispathcer(a): @@ -2650,19 +2783,24 @@ def alen(a): 7 """ + # NumPy 1.18.0, 2019-08-02 + warnings.warn( + "`np.alen` is deprecated, use `len` instead", + DeprecationWarning, stacklevel=2) try: return len(a) except TypeError: return len(array(a, ndmin=1)) -def _prod_dispatcher( - a, axis=None, dtype=None, out=None, keepdims=None, initial=None): +def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): return (a, out) @array_function_dispatch(_prod_dispatcher) -def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue): +def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): """ Return the product of array elements over a given axis. @@ -2707,6 +2845,11 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._N .. versionadded:: 1.15.0 + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.17.0 + Returns ------- product_along_axis : ndarray, see `dtype` parameter above. @@ -2724,8 +2867,8 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._N raised on overflow. That means that, on a 32-bit platform: >>> x = np.array([536870910, 536870910, 536870910, 536870910]) - >>> np.prod(x) # random - 16 + >>> np.prod(x) + 16 # may vary The product of an empty array is the neutral element 1: @@ -2749,6 +2892,11 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._N >>> np.prod([[1.,2.],[3.,4.]], axis=1) array([ 2., 12.]) + Or select specific elements to include: + + >>> np.prod([1., np.nan, 3.], where=[True, False, True]) + 3.0 + If the type of `x` is unsigned, then the output type is the unsigned platform integer: @@ -2768,8 +2916,8 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._N >>> np.prod([1, 2], initial=5) 10 """ - return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, keepdims=keepdims, - initial=initial) + return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, + keepdims=keepdims, initial=initial, where=where) def _cumprod_dispatcher(a, axis=None, dtype=None, out=None): @@ -2993,11 +3141,11 @@ def around(a, decimals=0, out=None): Examples -------- >>> np.around([0.37, 1.64]) - array([ 0., 2.]) + array([0., 2.]) >>> np.around([0.37, 1.64], decimals=1) - array([ 0.4, 1.6]) + array([0.4, 1.6]) >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value - array([ 0., 2., 2., 4., 4.]) + array([0., 2., 2., 4., 4.]) >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned array([ 1, 2, 3, 11]) >>> np.around([1,2,3,11], decimals=-1) @@ -3085,9 +3233,9 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): >>> np.mean(a) 2.5 >>> np.mean(a, axis=0) - array([ 2., 3.]) + array([2., 3.]) >>> np.mean(a, axis=1) - array([ 1.5, 3.5]) + array([1.5, 3.5]) In single precision, `mean` can be inaccurate: @@ -3100,7 +3248,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): Computing the mean in float64 is more accurate: >>> np.mean(a, dtype=np.float64) - 0.55000000074505806 + 0.55000000074505806 # may vary """ kwargs = {} @@ -3206,11 +3354,11 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): -------- >>> a = np.array([[1, 2], [3, 4]]) >>> np.std(a) - 1.1180339887498949 + 1.1180339887498949 # may vary >>> np.std(a, axis=0) - array([ 1., 1.]) + array([1., 1.]) >>> np.std(a, axis=1) - array([ 0.5, 0.5]) + array([0.5, 0.5]) In single precision, std() can be inaccurate: @@ -3223,7 +3371,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): Computing the standard deviation in float64 is more accurate: >>> np.std(a, dtype=np.float64) - 0.44999999925494177 + 0.44999999925494177 # may vary """ kwargs = {} @@ -3300,7 +3448,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): See Also -------- - std , mean, nanmean, nanstd, nanvar + std, mean, nanmean, nanstd, nanvar numpy.doc.ufuncs : Section "Output arguments" Notes @@ -3330,9 +3478,9 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): >>> np.var(a) 1.25 >>> np.var(a, axis=0) - array([ 1., 1.]) + array([1., 1.]) >>> np.var(a, axis=1) - array([ 0.25, 0.25]) + array([0.25, 0.25]) In single precision, var() can be inaccurate: @@ -3345,7 +3493,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): Computing the variance in float64 is more accurate: >>> np.var(a, dtype=np.float64) - 0.20249999932944759 + 0.20249999932944759 # may vary >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 0.2025 @@ -3430,30 +3578,3 @@ def alltrue(*args, **kwargs): numpy.all : Equivalent function; see for details. """ return all(*args, **kwargs) - - -@array_function_dispatch(_ndim_dispatcher) -def rank(a): - """ - Return the number of dimensions of an array. - - .. note:: - This function is deprecated in NumPy 1.9 to avoid confusion with - `numpy.linalg.matrix_rank`. The ``ndim`` attribute or function - should be used instead. - - See Also - -------- - ndim : equivalent non-deprecated function - - Notes - ----- - In the old Numeric package, `rank` was the term used for the number of - dimensions, but in NumPy `ndim` is used instead. - """ - # 2014-04-12, 1.9 - warnings.warn( - "`rank` is deprecated; use the `ndim` attribute or function instead. " - "To find the rank of a matrix see `numpy.linalg.matrix_rank`.", - VisibleDeprecationWarning, stacklevel=2) - return ndim(a) |