summaryrefslogtreecommitdiff
path: root/numpy/lib/function_base.py
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/lib/function_base.py')
-rw-r--r--numpy/lib/function_base.py319
1 files changed, 152 insertions, 167 deletions
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index b9f3bbb16..4ebca6360 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -29,7 +29,6 @@ from numpy.core.multiarray import (
interp as compiled_interp, interp_complex as compiled_interp_complex
)
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
-from numpy.compat import long
import builtins
@@ -605,14 +604,13 @@ def piecewise(x, condlist, funclist, *args, **kw):
)
y = zeros(x.shape, x.dtype)
- for k in range(n):
- item = funclist[k]
- if not isinstance(item, collections.abc.Callable):
- y[condlist[k]] = item
+ for cond, func in zip(condlist, funclist):
+ if not isinstance(func, collections.abc.Callable):
+ y[cond] = func
else:
- vals = x[condlist[k]]
+ vals = x[cond]
if vals.size > 0:
- y[condlist[k]] = item(vals, *args, **kw)
+ y[cond] = func(vals, *args, **kw)
return y
@@ -683,8 +681,7 @@ def select(condlist, choicelist, default=0):
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
- for i in range(len(condlist)):
- cond = condlist[i]
+ for i, cond in enumerate(condlist):
if cond.dtype.type is not np.bool_:
raise TypeError(
'invalid entry {} in condlist: should be boolean ndarray'.format(i))
@@ -765,6 +762,30 @@ def copy(a, order='K', subok=False):
>>> x[0] == z[0]
False
+ Note that np.copy is a shallow copy and will not copy object
+ elements within arrays. This is mainly important for arrays
+ containing Python objects. The new array will contain the
+ same object which may lead to surprises if that object can
+ be modified (is mutable):
+
+ >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)
+ >>> b = np.copy(a)
+ >>> b[2][0] = 10
+ >>> a
+ array([1, 'm', list([10, 3, 4])], dtype=object)
+
+ To ensure all elements within an ``object`` array are copied,
+ use `copy.deepcopy`:
+
+ >>> import copy
+ >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)
+ >>> c = copy.deepcopy(a)
+ >>> c[2][0] = 10
+ >>> c
+ array([1, 'm', list([10, 3, 4])], dtype=object)
+ >>> a
+ array([1, 'm', list([2, 3, 4])], dtype=object)
+
"""
return array(a, order=order, subok=subok, copy=True)
@@ -1309,6 +1330,10 @@ def interp(x, xp, fp, left=None, right=None, period=None):
If `xp` or `fp` are not 1-D sequences
If `period == 0`
+ See Also
+ --------
+ scipy.interpolate
+
Notes
-----
The x-coordinate sequence is expected to be increasing, but this is not
@@ -1430,6 +1455,11 @@ def angle(z, deg=False):
arctan2
absolute
+ Notes
+ -----
+ Although the angle of the complex number 0 is undefined, ``numpy.angle(0)``
+ returns the value 0.
+
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
@@ -2022,7 +2052,7 @@ class vectorize:
self.pyfunc = pyfunc
self.cache = cache
self.signature = signature
- self._ufunc = None # Caching to improve default performance
+ self._ufunc = {} # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
@@ -2087,14 +2117,22 @@ class vectorize:
if self.otypes is not None:
otypes = self.otypes
- nout = len(otypes)
- # Note logic here: We only *use* self._ufunc if func is self.pyfunc
- # even though we set self._ufunc regardless.
- if func is self.pyfunc and self._ufunc is not None:
- ufunc = self._ufunc
+ # self._ufunc is a dictionary whose keys are the number of
+ # arguments (i.e. len(args)) and whose values are ufuncs created
+ # by frompyfunc. len(args) can be different for different calls if
+ # self.pyfunc has parameters with default values. We only use the
+ # cache when func is self.pyfunc, which occurs when the call uses
+ # only positional arguments and no arguments are excluded.
+
+ nin = len(args)
+ nout = len(self.otypes)
+ if func is not self.pyfunc or nin not in self._ufunc:
+ ufunc = frompyfunc(func, nin, nout)
else:
- ufunc = self._ufunc = frompyfunc(func, len(args), nout)
+ ufunc = None # We'll get it from self._ufunc
+ if func is self.pyfunc:
+ ufunc = self._ufunc.setdefault(nin, ufunc)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
@@ -3220,7 +3258,6 @@ def kaiser(M, beta):
>>> plt.show()
"""
- from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
@@ -3234,10 +3271,17 @@ def _sinc_dispatcher(x):
@array_function_dispatch(_sinc_dispatcher)
def sinc(x):
- """
- Return the sinc function.
+ r"""
+ Return the normalized sinc function.
+
+ The sinc function is :math:`\sin(\pi x)/(\pi x)`.
+
+ .. note::
- The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
+ Note the normalization factor of ``pi`` used in the definition.
+ This is the most commonly used definition in signal processing.
+ Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function
+ :math:`\sin(x)/(x)` that is more common in mathematics.
Parameters
----------
@@ -3834,15 +3878,20 @@ def _quantile_is_valid(q):
return True
+def _lerp(a, b, t, out=None):
+ """ Linearly interpolate from a to b by a factor of t """
+ return add(a*(1 - t), b*t, out=out)
+
+
def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
a = asarray(a)
- if q.ndim == 0:
- # Do not allow 0-d arrays because following code fails for scalar
- zerod = True
- q = q[None]
- else:
- zerod = False
+
+ # ufuncs cause 0d array results to decay to scalars (see gh-13105), which
+ # makes them problematic for __setitem__ and attribute access. As a
+ # workaround, we call this on the result of every ufunc on a possibly-0d
+ # array.
+ not_scalar = np.asanyarray
# prepare a for partitioning
if overwrite_input:
@@ -3859,9 +3908,14 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
if axis is None:
axis = 0
- Nx = ap.shape[axis]
- indices = q * (Nx - 1)
+ if q.ndim > 2:
+ # The code below works fine for nd, but it might not have useful
+ # semantics. For now, keep the supported dimensions the same as it was
+ # before.
+ raise ValueError("q must be a scalar or 1d")
+ Nx = ap.shape[axis]
+ indices = not_scalar(q * (Nx - 1))
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
@@ -3878,87 +3932,60 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
- n = np.array(False, dtype=bool) # check for nan's flag
- if np.issubdtype(indices.dtype, np.integer): # take the points along axis
- # Check if the array contains any nan's
- if np.issubdtype(a.dtype, np.inexact):
- indices = concatenate((indices, [-1]))
-
- ap.partition(indices, axis=axis)
- # ensure axis with q-th is first
- ap = np.moveaxis(ap, axis, 0)
- axis = 0
-
- # Check if the array contains any nan's
- if np.issubdtype(a.dtype, np.inexact):
- indices = indices[:-1]
- n = np.isnan(ap[-1:, ...])
+ # The dimensions of `q` are prepended to the output shape, so we need the
+ # axis being sampled from `ap` to be first.
+ ap = np.moveaxis(ap, axis, 0)
+ del axis
- if zerod:
- indices = indices[0]
- r = take(ap, indices, axis=axis, out=out)
+ if np.issubdtype(indices.dtype, np.integer):
+ # take the points along axis
- else: # weight the points above and below the indices
- indices_below = floor(indices).astype(intp)
- indices_above = indices_below + 1
- indices_above[indices_above > Nx - 1] = Nx - 1
-
- # Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
- indices_above = concatenate((indices_above, [-1]))
-
- weights_above = indices - indices_below
- weights_below = 1 - weights_above
+ # may contain nan, which would sort to the end
+ ap.partition(concatenate((indices.ravel(), [-1])), axis=0)
+ n = np.isnan(ap[-1])
+ else:
+ # cannot contain nan
+ ap.partition(indices.ravel(), axis=0)
+ n = np.array(False, dtype=bool)
- weights_shape = [1, ] * ap.ndim
- weights_shape[axis] = len(indices)
- weights_below.shape = weights_shape
- weights_above.shape = weights_shape
+ r = take(ap, indices, axis=0, out=out)
- ap.partition(concatenate((indices_below, indices_above)), axis=axis)
+ else:
+ # weight the points above and below the indices
- # ensure axis with q-th is first
- ap = np.moveaxis(ap, axis, 0)
- weights_below = np.moveaxis(weights_below, axis, 0)
- weights_above = np.moveaxis(weights_above, axis, 0)
- axis = 0
+ indices_below = not_scalar(floor(indices)).astype(intp)
+ indices_above = not_scalar(indices_below + 1)
+ indices_above[indices_above > Nx - 1] = Nx - 1
- # Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
- indices_above = indices_above[:-1]
- n = np.isnan(ap[-1:, ...])
-
- x1 = take(ap, indices_below, axis=axis) * weights_below
- x2 = take(ap, indices_above, axis=axis) * weights_above
+ # may contain nan, which would sort to the end
+ ap.partition(concatenate((
+ indices_below.ravel(), indices_above.ravel(), [-1]
+ )), axis=0)
+ n = np.isnan(ap[-1])
+ else:
+ # cannot contain nan
+ ap.partition(concatenate((
+ indices_below.ravel(), indices_above.ravel()
+ )), axis=0)
+ n = np.array(False, dtype=bool)
- # ensure axis with q-th is first
- x1 = np.moveaxis(x1, axis, 0)
- x2 = np.moveaxis(x2, axis, 0)
+ weights_shape = indices.shape + (1,) * (ap.ndim - 1)
+ weights_above = not_scalar(indices - indices_below).reshape(weights_shape)
- if zerod:
- x1 = x1.squeeze(0)
- x2 = x2.squeeze(0)
+ x_below = take(ap, indices_below, axis=0)
+ x_above = take(ap, indices_above, axis=0)
- if out is not None:
- r = add(x1, x2, out=out)
- else:
- r = add(x1, x2)
+ r = _lerp(x_below, x_above, weights_above, out=out)
+ # if any slice contained a nan, then all results on that slice are also nan
if np.any(n):
- if zerod:
- if ap.ndim == 1:
- if out is not None:
- out[...] = a.dtype.type(np.nan)
- r = out
- else:
- r = a.dtype.type(np.nan)
- else:
- r[..., n.squeeze(0)] = a.dtype.type(np.nan)
+ if r.ndim == 0 and out is None:
+ # can't write to a scalar
+ r = a.dtype.type(np.nan)
else:
- if r.ndim == 1:
- r[:] = a.dtype.type(np.nan)
- else:
- r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
+ r[..., n] = a.dtype.type(np.nan)
return r
@@ -4207,12 +4234,17 @@ def delete(arr, obj, axis=None):
Parameters
----------
arr : array_like
- Input array.
+ Input array.
obj : slice, int or array of ints
- Indicate indices of sub-arrays to remove along the specified axis.
+ Indicate indices of sub-arrays to remove along the specified axis.
+
+ .. versionchanged:: 1.19.0
+ Boolean indices are now treated as a mask of elements to remove,
+ rather than being cast to the integers 0 and 1.
+
axis : int, optional
- The axis along which to delete the subarray defined by `obj`.
- If `axis` is None, `obj` is applied to the flattened array.
+ The axis along which to delete the subarray defined by `obj`.
+ If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
@@ -4270,20 +4302,11 @@ def delete(arr, obj, axis=None):
if axis is None:
if ndim != 1:
arr = arr.ravel()
+ # needed for np.matrix, which is still not 1d after being ravelled
ndim = arr.ndim
- axis = -1
-
- if ndim == 0:
- # 2013-09-24, 1.9
- warnings.warn(
- "in the future the special handling of scalars will be removed "
- "from delete and raise an error", DeprecationWarning, stacklevel=3)
- if wrap:
- return wrap(arr)
- else:
- return arr.copy(order=arrorder)
-
- axis = normalize_axis_index(axis, ndim)
+ axis = ndim - 1
+ else:
+ axis = normalize_axis_index(axis, ndim)
slobj = [slice(None)]*ndim
N = arr.shape[axis]
@@ -4339,18 +4362,8 @@ def delete(arr, obj, axis=None):
else:
return new
- _obj = obj
- obj = np.asarray(obj)
- # After removing the special handling of booleans and out of
- # bounds values, the conversion to the array can be removed.
- if obj.dtype == bool:
- warnings.warn("in the future insert will treat boolean arrays and "
- "array-likes as boolean index instead of casting it "
- "to integer", FutureWarning, stacklevel=3)
- obj = obj.astype(intp)
- if isinstance(_obj, (int, long, integer)):
+ if isinstance(obj, (int, integer)) and not isinstance(obj, bool):
# optimization for a single value
- obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
@@ -4366,35 +4379,23 @@ def delete(arr, obj, axis=None):
slobj2[axis] = slice(obj+1, None)
new[tuple(slobj)] = arr[tuple(slobj2)]
else:
+ _obj = obj
+ obj = np.asarray(obj)
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
- if not np.can_cast(obj, intp, 'same_kind'):
- # obj.size = 1 special case always failed and would just
- # give superfluous warnings.
- # 2013-09-24, 1.9
- warnings.warn(
- "using a non-integer array as obj in delete will result in an "
- "error in the future", DeprecationWarning, stacklevel=3)
- obj = obj.astype(intp)
- keep = ones(N, dtype=bool)
- # Test if there are out of bound indices, this is deprecated
- inside_bounds = (obj < N) & (obj >= -N)
- if not inside_bounds.all():
- # 2013-09-24, 1.9
- warnings.warn(
- "in the future out of bounds indices will raise an error "
- "instead of being ignored by `numpy.delete`.",
- DeprecationWarning, stacklevel=3)
- obj = obj[inside_bounds]
- positive_indices = obj >= 0
- if not positive_indices.all():
- warnings.warn(
- "in the future negative indices will not be ignored by "
- "`numpy.delete`.", FutureWarning, stacklevel=3)
- obj = obj[positive_indices]
+ if obj.dtype == bool:
+ if obj.shape != (N,):
+ raise ValueError('boolean array argument obj to delete '
+ 'must be one dimensional and match the axis '
+ 'length of {}'.format(N))
+
+ # optimization, the other branch is slower
+ keep = ~obj
+ else:
+ keep = ones(N, dtype=bool)
+ keep[obj,] = False
- keep[obj, ] = False
slobj[axis] = keep
new = arr[tuple(slobj)]
@@ -4510,19 +4511,9 @@ def insert(arr, obj, values, axis=None):
if axis is None:
if ndim != 1:
arr = arr.ravel()
+ # needed for np.matrix, which is still not 1d after being ravelled
ndim = arr.ndim
axis = ndim - 1
- elif ndim == 0:
- # 2013-09-24, 1.9
- warnings.warn(
- "in the future the special handling of scalars will be removed "
- "from insert and raise an error", DeprecationWarning, stacklevel=3)
- arr = arr.copy(order=arrorder)
- arr[...] = values
- if wrap:
- return wrap(arr)
- else:
- return arr
else:
axis = normalize_axis_index(axis, ndim)
slobj = [slice(None)]*ndim
@@ -4531,12 +4522,13 @@ def insert(arr, obj, values, axis=None):
if isinstance(obj, slice):
# turn it into a range object
- indices = arange(*obj.indices(N), **{'dtype': intp})
+ indices = arange(*obj.indices(N), dtype=intp)
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
+ # 2012-10-11, NumPy 1.8
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
@@ -4586,13 +4578,6 @@ def insert(arr, obj, values, axis=None):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
- if not np.can_cast(indices, intp, 'same_kind'):
- # 2013-09-24, 1.9
- warnings.warn(
- "using a non-integer array as obj in insert will result in an "
- "error in the future", DeprecationWarning, stacklevel=3)
- indices = indices.astype(intp)
-
indices[indices < 0] += N
numnew = len(indices)