summaryrefslogtreecommitdiff
path: root/numpy/lib
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/lib')
-rw-r--r--numpy/lib/__init__.py19
-rw-r--r--numpy/lib/__init__.pyi1
-rw-r--r--numpy/lib/_iotools.py4
-rw-r--r--numpy/lib/arraysetops.py20
-rw-r--r--numpy/lib/format.py36
-rw-r--r--numpy/lib/function_base.py172
-rw-r--r--numpy/lib/histograms.py2
-rw-r--r--numpy/lib/index_tricks.py103
-rw-r--r--numpy/lib/index_tricks.pyi2
-rw-r--r--numpy/lib/mixins.py1
-rw-r--r--numpy/lib/nanfunctions.py24
-rw-r--r--numpy/lib/npyio.py79
-rw-r--r--numpy/lib/npyio.pyi5
-rw-r--r--numpy/lib/polynomial.py4
-rw-r--r--numpy/lib/shape_base.py8
-rw-r--r--numpy/lib/tests/test_arraysetops.py39
-rw-r--r--numpy/lib/tests/test_format.py3
-rw-r--r--numpy/lib/tests/test_function_base.py187
-rw-r--r--numpy/lib/tests/test_io.py64
-rw-r--r--numpy/lib/tests/test_loadtxt.py37
-rw-r--r--numpy/lib/tests/test_nanfunctions.py2
-rw-r--r--numpy/lib/tests/test_shape_base.py4
-rw-r--r--numpy/lib/tests/test_twodim_base.py11
-rw-r--r--numpy/lib/tests/test_type_check.py2
-rw-r--r--numpy/lib/tests/test_ufunclike.py6
-rw-r--r--numpy/lib/twodim_base.py93
-rw-r--r--numpy/lib/type_check.py12
-rw-r--r--numpy/lib/ufunclike.py60
-rw-r--r--numpy/lib/utils.py67
29 files changed, 742 insertions, 325 deletions
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index 58166d4b1..d3cc9fee4 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -11,7 +11,6 @@ Most contains basic functions that are used by several submodules and are
useful to have in the main name-space.
"""
-import math
from numpy.version import version as __version__
@@ -58,7 +57,7 @@ from .arraypad import *
from ._version import *
from numpy.core._multiarray_umath import tracemalloc_domain
-__all__ = ['emath', 'math', 'tracemalloc_domain', 'Arrayterator']
+__all__ = ['emath', 'tracemalloc_domain', 'Arrayterator']
__all__ += type_check.__all__
__all__ += index_tricks.__all__
__all__ += function_base.__all__
@@ -77,3 +76,19 @@ __all__ += histograms.__all__
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
+
+def __getattr__(attr):
+ # Warn for reprecated attributes
+ import math
+ import warnings
+
+ if attr == 'math':
+ warnings.warn(
+ "`np.lib.math` is a deprecated alias for the standard library "
+ "`math` module (Deprecated Numpy 1.25). Replace usages of "
+ "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2)
+ return math
+ else:
+ raise AttributeError("module {!r} has no attribute "
+ "{!r}".format(__name__, attr))
+
diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi
index 1fa2d226e..d3553bbcc 100644
--- a/numpy/lib/__init__.pyi
+++ b/numpy/lib/__init__.pyi
@@ -64,7 +64,6 @@ from numpy.lib.function_base import (
digitize as digitize,
cov as cov,
corrcoef as corrcoef,
- msort as msort,
median as median,
sinc as sinc,
hamming as hamming,
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index 4a5ac1285..534d1b3ee 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -513,8 +513,8 @@ class StringConverter:
(nx.complexfloating, complex, nx.nan + 0j),
# Last, try with the string types (must be last, because
# `_mapper[-1]` is used as default in some cases)
- (nx.unicode_, asunicode, '???'),
- (nx.string_, asbytes, '???'),
+ (nx.str_, asunicode, '???'),
+ (nx.bytes_, asbytes, '???'),
])
@classmethod
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index cf5f47a82..300bbda26 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -649,8 +649,24 @@ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
ar2_range = int(ar2_max) - int(ar2_min)
# Constraints on whether we can actually use the table method:
- range_safe_from_overflow = ar2_range < np.iinfo(ar2.dtype).max
+ # 1. Assert memory usage is not too large
below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)
+ # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype
+ range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max
+ # 3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype
+ if ar1.size > 0:
+ ar1_min = np.min(ar1)
+ ar1_max = np.max(ar1)
+
+ # After masking, the range of ar1 is guaranteed to be
+ # within the range of ar2:
+ ar1_upper = min(int(ar1_max), int(ar2_max))
+ ar1_lower = max(int(ar1_min), int(ar2_min))
+
+ range_safe_from_overflow &= all((
+ ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max,
+ ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min
+ ))
# Optimal performance is for approximately
# log10(size) > (log10(range) - 2.27) / 0.927.
@@ -687,7 +703,7 @@ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
elif kind == 'table': # not range_safe_from_overflow
raise RuntimeError(
"You have specified kind='table', "
- "but the range of values in `ar2` exceeds the "
+ "but the range of values in `ar2` or `ar1` exceed the "
"maximum integer of the datatype. "
"Please set `kind` to None or 'sort'."
)
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 54fd0b0bc..ef50fb19d 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -437,15 +437,15 @@ def _write_array_header(fp, d, version=None):
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
-
+
# Add some spare space so that the array header can be modified in-place
# when changing the array size, e.g. when growing it by appending data at
- # the end.
+ # the end.
shape = d['shape']
header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(
shape[-1 if d['fortran_order'] else 0]
))) if len(shape) > 0 else 0)
-
+
if version is None:
header = _wrap_header_guess_version(header)
else:
@@ -505,7 +505,7 @@ def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
Raises
------
@@ -532,7 +532,7 @@ def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
Returns
-------
@@ -623,13 +623,27 @@ def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE):
# "descr" : dtype.descr
# Versions (2, 0) and (1, 0) could have been created by a Python 2
# implementation before header filtering was implemented.
- if version <= (2, 0):
- header = _filter_header(header)
+ #
+ # For performance reasons, we try without _filter_header first though
try:
d = safe_eval(header)
except SyntaxError as e:
- msg = "Cannot parse header: {!r}"
- raise ValueError(msg.format(header)) from e
+ if version <= (2, 0):
+ header = _filter_header(header)
+ try:
+ d = safe_eval(header)
+ except SyntaxError as e2:
+ msg = "Cannot parse header: {!r}"
+ raise ValueError(msg.format(header)) from e2
+ else:
+ warnings.warn(
+ "Reading `.npy` or `.npz` file required additional "
+ "header parsing as it was created on Python 2. Save the "
+ "file again to speed up loading and avoid this warning.",
+ UserWarning, stacklevel=4)
+ else:
+ msg = "Cannot parse header: {!r}"
+ raise ValueError(msg.format(header)) from e
if not isinstance(d, dict):
msg = "Header is not a dictionary: {!r}"
raise ValueError(msg.format(d))
@@ -750,7 +764,7 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *,
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
@@ -869,7 +883,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
Returns
-------
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 5e666c17e..02e141920 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -24,7 +24,7 @@ from numpy.core import overrides
from numpy.core.function_base import add_newdoc
from numpy.lib.twodim_base import diag
from numpy.core.multiarray import (
- _insert, add_docstring, bincount, normalize_axis_index, _monotonicity,
+ _place, add_docstring, bincount, normalize_axis_index, _monotonicity,
interp as compiled_interp, interp_complex as compiled_interp_complex
)
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
@@ -161,6 +161,8 @@ def rot90(m, k=1, axes=(0, 1)):
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
+ This means for a 2D array with the default `k` and `axes`, the
+ rotation will be counterclockwise.
Parameters
----------
@@ -1309,6 +1311,8 @@ def gradient(f, *varargs, axis=None, edge_order=1):
if len_axes == 1:
return outvals[0]
+ elif np._using_numpy2_behavior():
+ return tuple(outvals)
else:
return outvals
@@ -1947,11 +1951,7 @@ def place(arr, mask, vals):
[44, 55, 44]])
"""
- if not isinstance(arr, np.ndarray):
- raise TypeError("argument 1 must be numpy.ndarray, "
- "not {name}".format(name=type(arr).__name__))
-
- return _insert(arr, mask, vals)
+ return _place(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
@@ -2117,10 +2117,10 @@ def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes,
@set_module('numpy')
class vectorize:
"""
- vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,
- signature=None)
+ vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None,
+ cache=False, signature=None)
- Generalized function class.
+ Returns an object that acts like pyfunc, but takes arrays as input.
Define a vectorized function which takes a nested sequence of objects or
numpy arrays as inputs and returns a single numpy array or a tuple of numpy
@@ -2134,8 +2134,9 @@ class vectorize:
Parameters
----------
- pyfunc : callable
+ pyfunc : callable, optional
A python function or method.
+ Can be omitted to produce a decorator with keyword arguments.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
@@ -2167,8 +2168,9 @@ class vectorize:
Returns
-------
- vectorized : callable
- Vectorized function.
+ out : callable
+ A vectorized function if ``pyfunc`` was provided,
+ a decorator otherwise.
See Also
--------
@@ -2265,18 +2267,44 @@ class vectorize:
[0., 0., 1., 2., 1., 0.],
[0., 0., 0., 1., 2., 1.]])
+ Decorator syntax is supported. The decorator can be called as
+ a function to provide keyword arguments.
+ >>>@np.vectorize
+ ...def identity(x):
+ ... return x
+ ...
+ >>>identity([0, 1, 2])
+ array([0, 1, 2])
+ >>>@np.vectorize(otypes=[float])
+ ...def as_float(x):
+ ... return x
+ ...
+ >>>as_float([0, 1, 2])
+ array([0., 1., 2.])
"""
- def __init__(self, pyfunc, otypes=None, doc=None, excluded=None,
- cache=False, signature=None):
+ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None,
+ excluded=None, cache=False, signature=None):
+
+ if (pyfunc != np._NoValue) and (not callable(pyfunc)):
+ #Splitting the error message to keep
+ #the length below 79 characters.
+ part1 = "When used as a decorator, "
+ part2 = "only accepts keyword arguments."
+ raise TypeError(part1 + part2)
+
self.pyfunc = pyfunc
self.cache = cache
self.signature = signature
- self._ufunc = {} # Caching to improve default performance
+ if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'):
+ self.__name__ = pyfunc.__name__
- if doc is None:
+ self._ufunc = {} # Caching to improve default performance
+ self._doc = None
+ self.__doc__ = doc
+ if doc is None and hasattr(pyfunc, '__doc__'):
self.__doc__ = pyfunc.__doc__
else:
- self.__doc__ = doc
+ self._doc = doc
if isinstance(otypes, str):
for char in otypes:
@@ -2298,7 +2326,15 @@ class vectorize:
else:
self._in_and_out_core_dims = None
- def __call__(self, *args, **kwargs):
+ def _init_stage_2(self, pyfunc, *args, **kwargs):
+ self.__name__ = pyfunc.__name__
+ self.pyfunc = pyfunc
+ if self._doc is None:
+ self.__doc__ = pyfunc.__doc__
+ else:
+ self.__doc__ = self._doc
+
+ def _call_as_normal(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
@@ -2328,6 +2364,13 @@ class vectorize:
return self._vectorize_call(func=func, args=vargs)
+ def __call__(self, *args, **kwargs):
+ if self.pyfunc is np._NoValue:
+ self._init_stage_2(*args, **kwargs)
+ return self
+
+ return self._call_as_normal(*args, **kwargs)
+
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
@@ -2693,7 +2736,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice",
- RuntimeWarning, stacklevel=3)
+ RuntimeWarning, stacklevel=2)
fact = 0.0
X -= avg[:, None]
@@ -2842,7 +2885,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *,
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
- DeprecationWarning, stacklevel=3)
+ DeprecationWarning, stacklevel=2)
c = cov(x, y, rowvar, dtype=dtype)
try:
d = diag(c)
@@ -2956,10 +2999,15 @@ def blackman(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1))
@@ -3064,10 +3112,15 @@ def bartlett(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1))
@@ -3168,10 +3221,15 @@ def hanning(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return 0.5 + 0.5*cos(pi*n/(M-1))
@@ -3268,10 +3326,15 @@ def hamming(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return 0.54 + 0.46*cos(pi*n/(M-1))
@@ -3547,11 +3610,19 @@ def kaiser(M, beta):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range. (Simplified result_type with 0.0
+ # strongly typed. result-type is not/less order sensitive, but that mainly
+ # matters for integers anyway.)
+ values = np.array([0.0, M, beta])
+ M = values[1]
+ beta = values[2]
+
if M == 1:
- return np.ones(1, dtype=np.result_type(M, 0.0))
+ return np.ones(1, dtype=values.dtype)
n = arange(0, M)
alpha = (M-1)/2.0
- return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
+ return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(beta)
def _sinc_dispatcher(x):
@@ -3682,7 +3753,7 @@ def msort(a):
warnings.warn(
"msort is deprecated, use np.sort(a, axis=0) instead",
DeprecationWarning,
- stacklevel=3,
+ stacklevel=2,
)
b = array(a, subok=True, copy=True)
b.sort(0)
@@ -3937,8 +4008,8 @@ def percentile(a,
a : array_like of real numbers
Input array or object that can be converted to an array.
q : array_like of float
- Percentile or sequence of percentiles to compute, which must be between
- 0 and 100 inclusive.
+ Percentage or sequence of percentages for the percentiles to compute.
+ Values must be between 0 and 100 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
@@ -4185,7 +4256,8 @@ def percentile(a,
xlabel='Percentile',
ylabel='Estimated percentile value',
yticks=a)
- ax.legend()
+ ax.legend(bbox_to_anchor=(1.03, 1))
+ plt.tight_layout()
plt.show()
References
@@ -4236,8 +4308,8 @@ def quantile(a,
a : array_like of real numbers
Input array or object that can be converted to an array.
q : array_like of float
- Quantile or sequence of quantiles to compute, which must be between
- 0 and 1 inclusive.
+ Probability or sequence of probabilities for the quantiles to compute.
+ Values must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed. The default is
to compute the quantile(s) along a flattened version of the array.
@@ -4291,8 +4363,8 @@ def quantile(a,
Returns
-------
quantile : scalar or ndarray
- If `q` is a single quantile and `axis=None`, then the result
- is a scalar. If multiple quantiles are given, first axis of
+ If `q` is a single probability and `axis=None`, then the result
+ is a scalar. If multiple probabilies levels are given, first axis of
the result corresponds to the quantiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
@@ -4909,6 +4981,24 @@ def trapz(y, x=None, dx=1.0, axis=-1):
return ret
+# __array_function__ has no __code__ or other attributes normal Python funcs we
+# wrap everything into a C callable. SciPy however, tries to "clone" `trapz`
+# into a new Python function which requires `__code__` and a few other
+# attributes. So we create a dummy clone and copy over its attributes allowing
+# SciPy <= 1.10 to work: https://github.com/scipy/scipy/issues/17811
+assert not hasattr(trapz, "__code__")
+
+def _fake_trapz(y, x=None, dx=1.0, axis=-1):
+ return trapz(y, x=x, dx=dx, axis=axis)
+
+
+trapz.__code__ = _fake_trapz.__code__
+trapz.__globals__ = _fake_trapz.__globals__
+trapz.__defaults__ = _fake_trapz.__defaults__
+trapz.__closure__ = _fake_trapz.__closure__
+trapz.__kwdefaults__ = _fake_trapz.__kwdefaults__
+
+
def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None):
return xi
@@ -4917,7 +5007,7 @@ def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None):
@array_function_dispatch(_meshgrid_dispatcher)
def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
"""
- Return coordinate matrices from coordinate vectors.
+ Return a list of coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
@@ -4958,7 +5048,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
Returns
-------
- X1, X2,..., XN : ndarray
+ X1, X2,..., XN : list of ndarrays
For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``,
returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy'
@@ -5395,7 +5485,7 @@ def insert(arr, obj, values, axis=None):
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
- "integer", FutureWarning, stacklevel=3)
+ "integer", FutureWarning, stacklevel=2)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index 0dfa7b4c1..35745e6dd 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -981,7 +981,7 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None):
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
- ' sample x.')
+ 'sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 58dd394e1..6913d2b95 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -3,12 +3,11 @@ import sys
import math
import warnings
+import numpy as np
from .._utils import set_module
import numpy.core.numeric as _nx
-from numpy.core.numeric import (
- asarray, ScalarType, array, alltrue, cumprod, arange, ndim
-)
-from numpy.core.numerictypes import find_common_type, issubdtype
+from numpy.core.numeric import ScalarType, array
+from numpy.core.numerictypes import issubdtype
import numpy.matrixlib as matrixlib
from .function_base import diff
@@ -94,7 +93,7 @@ def ix_(*args):
nd = len(args)
for k, new in enumerate(args):
if not isinstance(new, _nx.ndarray):
- new = asarray(new)
+ new = np.asarray(new)
if new.size == 0:
# Explicitly type empty arrays to avoid float default
new = new.astype(_nx.intp)
@@ -211,13 +210,13 @@ class nd_grid:
class MGridClass(nd_grid):
"""
- `nd_grid` instance which returns a dense multi-dimensional "meshgrid".
+ An instance which returns a dense multi-dimensional "meshgrid".
- An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
- (or fleshed out) mesh-grid when indexed, so that each returned argument
- has the same shape. The dimensions and number of the output arrays are
- equal to the number of indexing dimensions. If the step length is not a
- complex number, then the stop is not inclusive.
+ An instance which returns a dense (or fleshed out) mesh-grid
+ when indexed, so that each returned argument has the same shape.
+ The dimensions and number of the output arrays are equal to the
+ number of indexing dimensions. If the step length is not a complex
+ number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
@@ -230,8 +229,7 @@ class MGridClass(nd_grid):
See Also
--------
- lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
- ogrid : like mgrid but returns open (not fleshed out) mesh grids
+ ogrid : like `mgrid` but returns open (not fleshed out) mesh grids
meshgrid: return coordinate matrices from coordinate vectors
r_ : array concatenator
:ref:`how-to-partition`
@@ -263,13 +261,13 @@ mgrid = MGridClass()
class OGridClass(nd_grid):
"""
- `nd_grid` instance which returns an open multi-dimensional "meshgrid".
+ An instance which returns an open multi-dimensional "meshgrid".
- An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
- (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
- of each returned array is greater than 1. The dimension and number of the
- output arrays are equal to the number of indexing dimensions. If the step
- length is not a complex number, then the stop is not inclusive.
+ An instance which returns an open (i.e. not fleshed out) mesh-grid
+ when indexed, so that only one dimension of each returned array is
+ greater than 1. The dimension and number of the output arrays are
+ equal to the number of indexing dimensions. If the step length is
+ not a complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
@@ -283,7 +281,6 @@ class OGridClass(nd_grid):
See Also
--------
- np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
meshgrid: return coordinate matrices from coordinate vectors
r_ : array concatenator
@@ -343,9 +340,8 @@ class AxisConcatenator:
axis = self.axis
objs = []
- scalars = []
- arraytypes = []
- scalartypes = []
+ # dtypes or scalars for weak scalar handling in result_type
+ result_type_objs = []
for k, item in enumerate(key):
scalar = False
@@ -391,12 +387,10 @@ class AxisConcatenator:
except (ValueError, TypeError) as e:
raise ValueError("unknown special directive") from e
elif type(item) in ScalarType:
- newobj = array(item, ndmin=ndmin)
- scalars.append(len(objs))
scalar = True
- scalartypes.append(newobj.dtype)
+ newobj = item
else:
- item_ndim = ndim(item)
+ item_ndim = np.ndim(item)
newobj = array(item, copy=False, subok=True, ndmin=ndmin)
if trans1d != -1 and item_ndim < ndmin:
k2 = ndmin - item_ndim
@@ -406,15 +400,20 @@ class AxisConcatenator:
defaxes = list(range(ndmin))
axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
newobj = newobj.transpose(axes)
+
objs.append(newobj)
- if not scalar and isinstance(newobj, _nx.ndarray):
- arraytypes.append(newobj.dtype)
+ if scalar:
+ result_type_objs.append(item)
+ else:
+ result_type_objs.append(newobj.dtype)
- # Ensure that scalars won't up-cast unless warranted
- final_dtype = find_common_type(arraytypes, scalartypes)
- if final_dtype is not None:
- for k in scalars:
- objs[k] = objs[k].astype(final_dtype)
+ # Ensure that scalars won't up-cast unless warranted, for 0, drops
+ # through to error in concatenate.
+ if len(result_type_objs) != 0:
+ final_dtype = _nx.result_type(*result_type_objs)
+ # concatenate could do cast, but that can be overriden:
+ objs = [array(obj, copy=False, subok=True,
+ ndmin=ndmin, dtype=final_dtype) for obj in objs]
res = self.concatenate(tuple(objs), axis=axis)
@@ -596,7 +595,7 @@ class ndenumerate:
"""
def __init__(self, arr):
- self.iter = asarray(arr).flat
+ self.iter = np.asarray(arr).flat
def __next__(self):
"""
@@ -909,9 +908,9 @@ def fill_diagonal(a, val, wrap=False):
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
- if not alltrue(diff(a.shape) == 0):
+ if not np.all(diff(a.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
- step = 1 + (cumprod(a.shape[:-1])).sum()
+ step = 1 + (np.cumprod(a.shape[:-1])).sum()
# Write the value out into the diagonal.
a.flat[:end:step] = val
@@ -982,7 +981,7 @@ def diag_indices(n, ndim=2):
[0, 1]]])
"""
- idx = arange(n)
+ idx = np.arange(n)
return (idx,) * ndim
@@ -1009,13 +1008,39 @@ def diag_indices_from(arr):
-----
.. versionadded:: 1.4.0
+ Examples
+ --------
+
+ Create a 4 by 4 array.
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Get the indices of the diagonal elements.
+
+ >>> di = np.diag_indices_from(a)
+ >>> di
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+
+ >>> a[di]
+ array([ 0, 5, 10, 15])
+
+ This is simply syntactic sugar for diag_indices.
+
+ >>> np.diag_indices(a.shape[0])
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+
"""
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
- if not alltrue(diff(arr.shape) == 0):
+ if not np.all(diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
diff --git a/numpy/lib/index_tricks.pyi b/numpy/lib/index_tricks.pyi
index c9251abd1..29a6b9e2b 100644
--- a/numpy/lib/index_tricks.pyi
+++ b/numpy/lib/index_tricks.pyi
@@ -119,7 +119,7 @@ class AxisConcatenator:
@staticmethod
def makemat(
data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ...
- ) -> _Matrix: ...
+ ) -> _Matrix[Any, Any]: ...
# TODO: Sort out this `__getitem__` method
def __getitem__(self, key: Any) -> Any: ...
diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py
index c81239f6b..117cc7851 100644
--- a/numpy/lib/mixins.py
+++ b/numpy/lib/mixins.py
@@ -133,6 +133,7 @@ class NDArrayOperatorsMixin:
.. versionadded:: 1.13
"""
+ __slots__ = ()
# Like np.ndarray, this mixin class implements "Option 1" from the ufunc
# overrides NEP.
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 786d2021e..b3b570860 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -169,7 +169,7 @@ def _remove_nan_1d(arr1d, overwrite_input=False):
s = np.nonzero(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning,
- stacklevel=5)
+ stacklevel=6)
return arr1d[:0], True
elif s.size == 0:
return arr1d, overwrite_input
@@ -343,7 +343,7 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning,
- stacklevel=3)
+ stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
@@ -357,7 +357,7 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning,
- stacklevel=3)
+ stacklevel=2)
return res
@@ -476,7 +476,7 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning,
- stacklevel=3)
+ stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
@@ -490,7 +490,7 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning,
- stacklevel=3)
+ stacklevel=2)
return res
@@ -1049,7 +1049,7 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
isbad = (cnt == 0)
if isbad.any():
- warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=3)
+ warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
# NaN is the only possible bad value, so no further
# action is needed to handle bad results.
return avg
@@ -1109,7 +1109,7 @@ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
for i in range(np.count_nonzero(m.mask.ravel())):
warnings.warn("All-NaN slice encountered", RuntimeWarning,
- stacklevel=4)
+ stacklevel=5)
fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan
if out is not None:
@@ -1415,8 +1415,8 @@ def nanquantile(
Input array or object that can be converted to an array, containing
nan values to be ignored
q : array_like of float
- Quantile or sequence of quantiles to compute, which must be between
- 0 and 1 inclusive.
+ Probability or sequence of probabilities for the quantiles to compute.
+ Values must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed. The
default is to compute the quantile(s) along a flattened
@@ -1476,8 +1476,8 @@ def nanquantile(
Returns
-------
quantile : scalar or ndarray
- If `q` is a single percentile and `axis=None`, then the result
- is a scalar. If multiple quantiles are given, first axis of
+ If `q` is a single probability and `axis=None`, then the result
+ is a scalar. If multiple probability levels are given, first axis of
the result corresponds to the quantiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
@@ -1763,7 +1763,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
isbad = (dof <= 0)
if np.any(isbad):
warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning,
- stacklevel=3)
+ stacklevel=2)
# NaN, inf, or negative numbers are all possible bad
# values, so explicitly replace them with NaN.
var = _copyto(var, np.nan, isbad)
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 4a27c7898..339b1dc62 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -142,7 +142,7 @@ class NpzFile(Mapping):
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
@@ -167,6 +167,8 @@ class NpzFile(Mapping):
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.npyio.NpzFile)
True
+ >>> npz
+ NpzFile 'object' with keys x, y
>>> sorted(npz.files)
['x', 'y']
>>> npz['x'] # getitem access
@@ -178,6 +180,7 @@ class NpzFile(Mapping):
# Make __exit__ safe if zipfile_factory raises an exception
zip = None
fid = None
+ _MAX_REPR_ARRAY_COUNT = 5
def __init__(self, fid, own_fid=False, allow_pickle=False,
pickle_kwargs=None, *,
@@ -257,7 +260,23 @@ class NpzFile(Mapping):
else:
return self.zip.read(key)
else:
- raise KeyError("%s is not a file in the archive" % key)
+ raise KeyError(f"{key} is not a file in the archive")
+
+ def __contains__(self, key):
+ return (key in self._files or key in self.files)
+
+ def __repr__(self):
+ # Get filename or default to `object`
+ if isinstance(self.fid, str):
+ filename = self.fid
+ else:
+ filename = getattr(self.fid, "name", "object")
+
+ # Get the name of arrays
+ array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])
+ if len(self.files) > self._MAX_REPR_ARRAY_COUNT:
+ array_names += "..."
+ return f"NpzFile {filename!r} with keys: {array_names}"
@set_module('numpy')
@@ -309,7 +328,7 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
@@ -327,6 +346,9 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
ValueError
The file contains an object array, but ``allow_pickle=False`` given.
+ EOFError
+ When calling ``np.load`` multiple times on the same file handle,
+ if all data has already been read
See Also
--------
@@ -410,6 +432,8 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
+ if not magic:
+ raise EOFError("No data left in file")
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
@@ -760,13 +784,6 @@ def _ensure_ndmin_ndarray(a, *, ndmin: int):
_loadtxt_chunksize = 50000
-def _loadtxt_dispatcher(
- fname, dtype=None, comments=None, delimiter=None,
- converters=None, skiprows=None, usecols=None, unpack=None,
- ndmin=None, encoding=None, max_rows=None, *, like=None):
- return (like,)
-
-
def _check_nonneg_int(value, name="argument"):
try:
operator.index(value)
@@ -1161,10 +1178,10 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
while such lines are counted in `skiprows`.
.. versionadded:: 1.16.0
-
+
.. versionchanged:: 1.23.0
- Lines containing no data, including comment lines (e.g., lines
- starting with '#' or as specified via `comments`) are not counted
+ Lines containing no data, including comment lines (e.g., lines
+ starting with '#' or as specified via `comments`) are not counted
towards `max_rows`.
quotechar : unicode character or None, optional
The character used to denote the start and end of a quoted item.
@@ -1303,6 +1320,14 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
array([('alpha, #42', 10.), ('beta, #64', 2.)],
dtype=[('label', '<U12'), ('value', '<f8')])
+ Quoted fields can be separated by multiple whitespace characters:
+
+ >>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n')
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
+ >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
+ dtype=[('label', '<U12'), ('value', '<f8')])
+
Two consecutive quote characters within a quoted field are treated as a
single escaped character:
@@ -1323,10 +1348,10 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
if like is not None:
return _loadtxt_with_like(
- fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
converters=converters, skiprows=skiprows, usecols=usecols,
unpack=unpack, ndmin=ndmin, encoding=encoding,
- max_rows=max_rows, like=like
+ max_rows=max_rows
)
if isinstance(delimiter, bytes):
@@ -1353,9 +1378,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
return arr
-_loadtxt_with_like = array_function_dispatch(
- _loadtxt_dispatcher
-)(loadtxt)
+_loadtxt_with_like = array_function_dispatch()(loadtxt)
def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
@@ -1716,17 +1739,6 @@ def fromregex(file, regexp, dtype, encoding=None):
#####--------------------------------------------------------------------------
-def _genfromtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None,
- skip_header=None, skip_footer=None, converters=None,
- missing_values=None, filling_values=None, usecols=None,
- names=None, excludelist=None, deletechars=None,
- replace_space=None, autostrip=None, case_sensitive=None,
- defaultfmt=None, unpack=None, usemask=None, loose=None,
- invalid_raise=None, max_rows=None, encoding=None,
- *, ndmin=None, like=None):
- return (like,)
-
-
@set_array_function_like_doc
@set_module('numpy')
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
@@ -1924,7 +1936,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
if like is not None:
return _genfromtxt_with_like(
- fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
skip_header=skip_header, skip_footer=skip_footer,
converters=converters, missing_values=missing_values,
filling_values=filling_values, usecols=usecols, names=names,
@@ -1934,7 +1946,6 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
unpack=unpack, usemask=usemask, loose=loose,
invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
ndmin=ndmin,
- like=like
)
_ensure_ndmin_ndarray_check_param(ndmin)
@@ -2327,7 +2338,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
- if v == np.unicode_]
+ if v == np.str_]
if byte_converters and strcolidx:
# convert strings back to bytes for backward compatibility
@@ -2463,9 +2474,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
return output
-_genfromtxt_with_like = array_function_dispatch(
- _genfromtxt_dispatcher
-)(genfromtxt)
+_genfromtxt_with_like = array_function_dispatch()(genfromtxt)
def recfromtxt(fname, **kwargs):
diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi
index 8007b2dc7..ef0f2a5f1 100644
--- a/numpy/lib/npyio.pyi
+++ b/numpy/lib/npyio.pyi
@@ -72,6 +72,7 @@ class NpzFile(Mapping[str, NDArray[Any]]):
files: list[str]
allow_pickle: bool
pickle_kwargs: None | Mapping[str, Any]
+ _MAX_REPR_ARRAY_COUNT: int
# Represent `f` as a mutable property so we can access the type of `self`
@property
def f(self: _T) -> BagObj[_T]: ...
@@ -97,6 +98,8 @@ class NpzFile(Mapping[str, NDArray[Any]]):
def __iter__(self) -> Iterator[str]: ...
def __len__(self) -> int: ...
def __getitem__(self, key: str) -> NDArray[Any]: ...
+ def __contains__(self, key: str) -> bool: ...
+ def __repr__(self) -> str: ...
# NOTE: Returns a `NpzFile` if file is a zip file;
# returns an `ndarray`/`memmap` otherwise
@@ -236,7 +239,7 @@ def genfromtxt(
*,
ndmin: L[0, 1, 2] = ...,
like: None | _SupportsArrayFunc = ...,
-) -> NDArray[float64]: ...
+) -> NDArray[Any]: ...
@overload
def genfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 0f7ab0334..3b8db2a95 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -104,7 +104,7 @@ def poly(seq_of_zeros):
References
----------
- .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
+ .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
@@ -672,7 +672,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
- warnings.warn(msg, RankWarning, stacklevel=4)
+ warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, resids, rank, s, rcond
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 3cb4dc19c..5d8a41bfe 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -643,10 +643,6 @@ def column_stack(tup):
[3, 4]])
"""
- if not overrides.ARRAY_FUNCTION_ENABLED:
- # raise warning if necessary
- _arrays_for_stack_dispatcher(tup, stacklevel=2)
-
arrays = []
for v in tup:
arr = asanyarray(v)
@@ -713,10 +709,6 @@ def dstack(tup):
[[3, 4]]])
"""
- if not overrides.ARRAY_FUNCTION_ENABLED:
- # raise warning if necessary
- _arrays_for_stack_dispatcher(tup, stacklevel=2)
-
arrs = atleast_3d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index bb07e25a9..a180accbe 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -414,13 +414,48 @@ class TestSetOps:
with pytest.raises(ValueError):
in1d(a, b, kind="table")
+ @pytest.mark.parametrize(
+ "dtype1,dtype2",
+ [
+ (np.int8, np.int16),
+ (np.int16, np.int8),
+ (np.uint8, np.uint16),
+ (np.uint16, np.uint8),
+ (np.uint8, np.int16),
+ (np.int16, np.uint8),
+ ]
+ )
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_in1d_mixed_dtype(self, dtype1, dtype2, kind):
+ """Test that in1d works as expected for mixed dtype input."""
+ is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger)
+ ar1 = np.array([0, 0, 1, 1], dtype=dtype1)
+
+ if is_dtype2_signed:
+ ar2 = np.array([-128, 0, 127], dtype=dtype2)
+ else:
+ ar2 = np.array([127, 0, 255], dtype=dtype2)
+
+ expected = np.array([True, True, False, False])
+
+ expect_failure = kind == "table" and any((
+ dtype1 == np.int8 and dtype2 == np.int16,
+ dtype1 == np.int16 and dtype2 == np.int8
+ ))
+
+ if expect_failure:
+ with pytest.raises(RuntimeError, match="exceed the maximum"):
+ in1d(ar1, ar2, kind=kind)
+ else:
+ assert_array_equal(in1d(ar1, ar2, kind=kind), expected)
+
@pytest.mark.parametrize("kind", [None, "sort", "table"])
def test_in1d_mixed_boolean(self, kind):
"""Test that in1d works as expected for bool/int input."""
for dtype in np.typecodes["AllInteger"]:
a = np.array([True, False, False], dtype=bool)
- b = np.array([1, 1, 1, 1], dtype=dtype)
- expected = np.array([True, False, False], dtype=bool)
+ b = np.array([0, 0, 0, 0], dtype=dtype)
+ expected = np.array([False, True, True], dtype=bool)
assert_array_equal(in1d(a, b, kind=kind), expected)
a, b = b, a
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 6f6406cf8..58d08f1e5 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -531,7 +531,8 @@ def test_load_padded_dtype(tmpdir, dt):
def test_python2_python3_interoperability():
fname = 'win64python2.npy'
path = os.path.join(os.path.dirname(__file__), 'data', fname)
- data = np.load(path)
+ with pytest.warns(UserWarning, match="Reading.*this warning\\."):
+ data = np.load(path)
assert_array_equal(data, np.ones(2))
def test_pickle_python2_python3():
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index e38a187d8..b0944ec85 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -8,7 +8,7 @@ import pytest
import hypothesis
from hypothesis.extra.numpy import arrays
import hypothesis.strategies as st
-
+from functools import partial
import numpy as np
from numpy import ma
@@ -229,8 +229,8 @@ class TestAny:
def test_nd(self):
y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]
assert_(np.any(y1))
- assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0])
- assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
+ assert_array_equal(np.any(y1, axis=0), [1, 1, 0])
+ assert_array_equal(np.any(y1, axis=1), [0, 1, 1])
class TestAll:
@@ -247,8 +247,8 @@ class TestAll:
def test_nd(self):
y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
assert_(not np.all(y1))
- assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1])
- assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
+ assert_array_equal(np.all(y1, axis=0), [0, 0, 1])
+ assert_array_equal(np.all(y1, axis=1), [0, 0, 1])
class TestCopy:
@@ -1217,6 +1217,13 @@ class TestGradient:
dfdx = gradient(f, x)
assert_array_equal(dfdx, [0.5, 0.5])
+ def test_return_type(self):
+ res = np.gradient(([1, 2], [2, 3]))
+ if np._using_numpy2_behavior():
+ assert type(res) is tuple
+ else:
+ assert type(res) is list
+
class TestAngle:
@@ -1780,6 +1787,70 @@ class TestVectorize:
assert_equal(type(r), subclass)
assert_equal(r, m * v)
+ def test_name(self):
+ #See gh-23021
+ @np.vectorize
+ def f2(a, b):
+ return a + b
+
+ assert f2.__name__ == 'f2'
+
+ def test_decorator(self):
+ @vectorize
+ def addsubtract(a, b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+
+ r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7])
+ assert_array_equal(r, [1, 6, 1, 2])
+
+ def test_docstring(self):
+ @vectorize
+ def f(x):
+ """Docstring"""
+ return x
+
+ if sys.flags.optimize < 2:
+ assert f.__doc__ == "Docstring"
+
+ def test_partial(self):
+ def foo(x, y):
+ return x + y
+
+ bar = partial(foo, 3)
+ vbar = np.vectorize(bar)
+ assert vbar(1) == 4
+
+ def test_signature_otypes_decorator(self):
+ @vectorize(signature='(n)->(n)', otypes=['float64'])
+ def f(x):
+ return x
+
+ r = f([1, 2, 3])
+ assert_equal(r.dtype, np.dtype('float64'))
+ assert_array_equal(r, [1, 2, 3])
+ assert f.__name__ == 'f'
+
+ def test_bad_input(self):
+ with assert_raises(TypeError):
+ A = np.vectorize(pyfunc = 3)
+
+ def test_no_keywords(self):
+ with assert_raises(TypeError):
+ @np.vectorize("string")
+ def foo():
+ return "bar"
+
+ def test_positional_regression_9477(self):
+ # This supplies the first keyword argument as a positional,
+ # to ensure that they are still properly forwarded after the
+ # enhancement for #9477
+ f = vectorize((lambda x: x), ['float64'])
+ r = f([2])
+ assert_equal(r.dtype, np.dtype('float64'))
+
class TestLeaks:
class A:
@@ -3467,9 +3538,20 @@ class TestPercentile:
np.percentile([1, 2, 3, 4.0], q)
+quantile_methods = [
+ 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation',
+ 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear',
+ 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher',
+ 'midpoint']
+
+
class TestQuantile:
# most of this is already tested by TestPercentile
+ def V(self, x, y, alpha):
+ # Identification function used in several tests.
+ return (x >= y) - alpha
+
def test_max_ulp(self):
x = [0.0, 0.2, 0.4]
a = np.quantile(x, 0.45)
@@ -3484,7 +3566,6 @@ class TestQuantile:
assert_equal(np.quantile(x, 1), 3.5)
assert_equal(np.quantile(x, 0.5), 1.75)
- @pytest.mark.xfail(reason="See gh-19154")
def test_correct_quantile_value(self):
a = np.array([True])
tf_quant = np.quantile(True, False)
@@ -3549,11 +3630,7 @@ class TestQuantile:
method="nearest")
assert res.dtype == dtype
- @pytest.mark.parametrize("method",
- ['inverted_cdf', 'averaged_inverted_cdf', 'closest_observation',
- 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear',
- 'median_unbiased', 'normal_unbiased',
- 'nearest', 'lower', 'higher', 'midpoint'])
+ @pytest.mark.parametrize("method", quantile_methods)
def test_quantile_monotonic(self, method):
# GH 14685
# test that the return value of quantile is monotonic if p0 is ordered
@@ -3584,6 +3661,94 @@ class TestQuantile:
assert np.isscalar(actual)
assert_equal(np.quantile(a, 0.5), np.nan)
+ @pytest.mark.parametrize("method", quantile_methods)
+ @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9])
+ def test_quantile_identification_equation(self, method, alpha):
+ # Test that the identification equation holds for the empirical
+ # CDF:
+ # E[V(x, Y)] = 0 <=> x is quantile
+ # with Y the random variable for which we have observed values and
+ # V(x, y) the canonical identification function for the quantile (at
+ # level alpha), see
+ # https://doi.org/10.48550/arXiv.0912.0902
+ rng = np.random.default_rng(4321)
+ # We choose n and alpha such that we cover 3 cases:
+ # - n * alpha is an integer
+ # - n * alpha is a float that gets rounded down
+ # - n * alpha is a float that gest rounded up
+ n = 102 # n * alpha = 20.4, 51. , 91.8
+ y = rng.random(n)
+ x = np.quantile(y, alpha, method=method)
+ if method in ("higher",):
+ # These methods do not fulfill the identification equation.
+ assert np.abs(np.mean(self.V(x, y, alpha))) > 0.1 / n
+ elif int(n * alpha) == n * alpha:
+ # We can expect exact results, up to machine precision.
+ assert_allclose(np.mean(self.V(x, y, alpha)), 0, atol=1e-14)
+ else:
+ # V = (x >= y) - alpha cannot sum to zero exactly but within
+ # "sample precision".
+ assert_allclose(np.mean(self.V(x, y, alpha)), 0,
+ atol=1 / n / np.amin([alpha, 1 - alpha]))
+
+ @pytest.mark.parametrize("method", quantile_methods)
+ @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9])
+ def test_quantile_add_and_multiply_constant(self, method, alpha):
+ # Test that
+ # 1. quantile(c + x) = c + quantile(x)
+ # 2. quantile(c * x) = c * quantile(x)
+ # 3. quantile(-x) = -quantile(x, 1 - alpha)
+ # On empirical quantiles, this equation does not hold exactly.
+ # Koenker (2005) "Quantile Regression" Chapter 2.2.3 calls these
+ # properties equivariance.
+ rng = np.random.default_rng(4321)
+ # We choose n and alpha such that we have cases for
+ # - n * alpha is an integer
+ # - n * alpha is a float that gets rounded down
+ # - n * alpha is a float that gest rounded up
+ n = 102 # n * alpha = 20.4, 51. , 91.8
+ y = rng.random(n)
+ q = np.quantile(y, alpha, method=method)
+ c = 13.5
+
+ # 1
+ assert_allclose(np.quantile(c + y, alpha, method=method), c + q)
+ # 2
+ assert_allclose(np.quantile(c * y, alpha, method=method), c * q)
+ # 3
+ q = -np.quantile(-y, 1 - alpha, method=method)
+ if method == "inverted_cdf":
+ if (
+ n * alpha == int(n * alpha)
+ or np.round(n * alpha) == int(n * alpha) + 1
+ ):
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ elif method == "closest_observation":
+ if n * alpha == int(n * alpha):
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ elif np.round(n * alpha) == int(n * alpha) + 1:
+ assert_allclose(
+ q, np.quantile(y, alpha + 1/n, method="higher"))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ elif method == "interpolated_inverted_cdf":
+ assert_allclose(q, np.quantile(y, alpha + 1/n, method=method))
+ elif method == "nearest":
+ if n * alpha == int(n * alpha):
+ assert_allclose(q, np.quantile(y, alpha + 1/n, method=method))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method=method))
+ elif method == "lower":
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ elif method == "higher":
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ else:
+ # "averaged_inverted_cdf", "hazen", "weibull", "linear",
+ # "median_unbiased", "normal_unbiased", "midpoint"
+ assert_allclose(q, np.quantile(y, alpha, method=method))
+
class TestLerp:
@hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False,
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 4699935ca..c1032df8e 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -232,6 +232,17 @@ class TestSavezLoad(RoundtripTest):
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
+
+ def test_tuple_getitem_raises(self):
+ # gh-23748
+ a = np.array([1, 2, 3])
+ f = BytesIO()
+ np.savez(f, a=a)
+ f.seek(0)
+ l = np.load(f)
+ with pytest.raises(KeyError, match="(1, 2)"):
+ l[1, 2]
+
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
@@ -321,6 +332,21 @@ class TestSavezLoad(RoundtripTest):
data.close()
assert_(fp.closed)
+ @pytest.mark.parametrize("count, expected_repr", [
+ (1, "NpzFile {fname!r} with keys: arr_0"),
+ (5, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4"),
+ # _MAX_REPR_ARRAY_COUNT is 5, so files with more than 5 keys are
+ # expected to end in '...'
+ (6, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4..."),
+ ])
+ def test_repr_lists_keys(self, count, expected_repr):
+ a = np.array([[1, 2], [3, 4]], float)
+ with temppath(suffix='.npz') as tmp:
+ np.savez(tmp, *[a]*count)
+ l = np.load(tmp)
+ assert repr(l) == expected_repr.format(fname=tmp)
+ l.close()
+
class TestSaveTxt:
def test_array(self):
@@ -522,7 +548,7 @@ class TestSaveTxt:
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
@@ -530,7 +556,7 @@ class TestSaveTxt:
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
if HAS_BZ2:
@@ -542,12 +568,12 @@ class TestSaveTxt:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
- encoding='UTF-16-LE', dtype=np.unicode_)
+ encoding='UTF-16-LE', dtype=np.str_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
@@ -555,7 +581,7 @@ class TestSaveTxt:
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
@@ -597,8 +623,8 @@ class TestSaveTxt:
# in our process if needed, see gh-16889
memoryerror_raised = Value(c_bool)
- # Since Python 3.8, the default start method for multiprocessing has
- # been changed from 'fork' to 'spawn' on macOS, causing inconsistency
+ # Since Python 3.8, the default start method for multiprocessing has
+ # been changed from 'fork' to 'spawn' on macOS, causing inconsistency
# on memory sharing model, lead to failed test for check_large_zip
ctx = get_context('fork')
p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,))
@@ -652,12 +678,12 @@ class LoadTxtBase:
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
- x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
+ x = self.loadfunc(path, encoding="UTF-16", dtype=np.str_)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
- v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
+ v = self.loadfunc(BytesIO(utf16), dtype=np.str_, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
@@ -665,7 +691,7 @@ class LoadTxtBase:
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
- x = self.loadfunc(c, dtype=np.unicode_,
+ x = self.loadfunc(c, dtype=np.str_,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
@@ -676,7 +702,7 @@ class LoadTxtBase:
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
- x = self.loadfunc(path, dtype=np.unicode_,
+ x = self.loadfunc(path, dtype=np.str_,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
@@ -1161,7 +1187,7 @@ class TestLoadTxt(LoadTxtBase):
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
- x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
+ x = np.loadtxt(f, encoding="UTF-8", dtype=np.str_)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
@@ -2219,7 +2245,7 @@ M 33 21.99
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
- dtype=np.unicode_)
+ dtype=np.str_)
assert_array_equal(test, ctl)
# test a mixed dtype
@@ -2262,7 +2288,7 @@ M 33 21.99
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
- dtype=np.unicode_)
+ dtype=np.str_)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
@@ -2737,3 +2763,13 @@ def test_load_refcount():
with assert_no_gc_cycles():
x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
+
+def test_load_multiple_arrays_until_eof():
+ f = BytesIO()
+ np.save(f, 1)
+ np.save(f, 2)
+ f.seek(0)
+ assert np.load(f) == 1
+ assert np.load(f) == 2
+ with pytest.raises(EOFError):
+ np.load(f)
diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py
index 0b8fe3c47..2d805e434 100644
--- a/numpy/lib/tests/test_loadtxt.py
+++ b/numpy/lib/tests/test_loadtxt.py
@@ -244,6 +244,14 @@ def test_converters_negative_indices_with_usecols():
usecols=[0, -1], converters={-1: (lambda x: -1)})
assert_array_equal(res, [[0, -1], [0, -1]])
+
+def test_ragged_error():
+ rows = ["1,2,3", "1,2,3", "4,3,2,1"]
+ with pytest.raises(ValueError,
+ match="the number of columns changed from 3 to 4 at row 3"):
+ np.loadtxt(rows, delimiter=",")
+
+
def test_ragged_usecols():
# usecols, and negative ones, work even with varying number of columns.
txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n")
@@ -534,12 +542,27 @@ def test_quoted_field(q):
assert_array_equal(res, expected)
+@pytest.mark.parametrize("q", ('"', "'", "`"))
+def test_quoted_field_with_whitepace_delimiter(q):
+ txt = StringIO(
+ f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n"
+ )
+ dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)])
+ expected = np.array(
+ [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype
+ )
+
+ res = np.loadtxt(txt, dtype=dtype, delimiter=None, quotechar=q)
+ assert_array_equal(res, expected)
+
+
def test_quote_support_default():
"""Support for quoted fields is disabled by default."""
txt = StringIO('"lat,long", 45, 30\n')
dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)])
- with pytest.raises(ValueError, match="the number of columns changed"):
+ with pytest.raises(ValueError,
+ match="the dtype passed requires 3 columns but 4 were"):
np.loadtxt(txt, dtype=dtype, delimiter=",")
# Enable quoting support with non-None value for quotechar param
@@ -1011,3 +1034,15 @@ def test_control_characters_as_bytes():
"""Byte control characters (comments, delimiter) are supported."""
a = np.loadtxt(StringIO("#header\n1,2,3"), comments=b"#", delimiter=b",")
assert_equal(a, [1, 2, 3])
+
+
+@pytest.mark.filterwarnings('ignore::UserWarning')
+def test_field_growing_cases():
+ # Test empty field appending/growing (each field still takes 1 character)
+ # to see if the final field appending does not create issues.
+ res = np.loadtxt([""], delimiter=",", dtype=bytes)
+ assert len(res) == 0
+
+ for i in range(1, 1024):
+ res = np.loadtxt(["," * i], delimiter=",", dtype=bytes)
+ assert len(res) == i+1
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index 45cacb792..257de381b 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -824,6 +824,7 @@ class TestNanFunctions_Median:
(-3, -1),
]
)
+ @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning")
def test_keepdims_out(self, axis):
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
@@ -1027,6 +1028,7 @@ class TestNanFunctions_Percentile:
(-3, -1),
]
)
+ @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning")
def test_keepdims_out(self, q, axis):
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 76058cf20..eb6628904 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -492,7 +492,7 @@ class TestColumnStack:
assert_equal(actual, expected)
def test_generator(self):
- with assert_warns(FutureWarning):
+ with pytest.raises(TypeError, match="arrays to stack must be"):
column_stack((np.arange(3) for _ in range(2)))
@@ -529,7 +529,7 @@ class TestDstack:
assert_array_equal(res, desired)
def test_generator(self):
- with assert_warns(FutureWarning):
+ with pytest.raises(TypeError, match="arrays to stack must be"):
dstack((np.arange(3) for _ in range(2)))
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index 141f508fd..eb008c600 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -4,20 +4,14 @@
from numpy.testing import (
assert_equal, assert_array_equal, assert_array_max_ulp,
assert_array_almost_equal, assert_raises, assert_
- )
-
+)
from numpy import (
arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d,
tri, mask_indices, triu_indices, triu_indices_from, tril_indices,
tril_indices_from, vander,
- )
-
+)
import numpy as np
-
-from numpy.core.tests.test_overrides import requires_array_function
-
-
import pytest
@@ -283,7 +277,6 @@ class TestHistogram2d:
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
- @requires_array_function
def test_dispatch(self):
class ShouldDispatch:
def __array_function__(self, function, types, args, kwargs):
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index 3f4ca6309..ea0326139 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -155,7 +155,7 @@ class TestIscomplex:
def test_fail(self):
z = np.array([-1, 0, 1])
res = iscomplex(z)
- assert_(not np.sometrue(res, axis=0))
+ assert_(not np.any(res, axis=0))
def test_pass(self):
z = np.array([-1j, 1, 0])
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index c280b6969..fac4f41d0 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -80,12 +80,6 @@ class TestUfunclike:
assert_(isinstance(f0d, MyArray))
assert_equal(f0d.metadata, 'bar')
- def test_deprecated(self):
- # NumPy 1.13.0, 2017-04-26
- assert_warns(DeprecationWarning, ufl.fix, [1, 2], y=nx.empty(2))
- assert_warns(DeprecationWarning, ufl.isposinf, [1, 2], y=nx.empty(2))
- assert_warns(DeprecationWarning, ufl.isneginf, [1, 2], y=nx.empty(2))
-
def test_scalar(self):
x = np.inf
actual = np.isposinf(x)
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 654ee4cf5..6dcb65651 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -155,10 +155,6 @@ def flipud(m):
return m[::-1, ...]
-def _eye_dispatcher(N, M=None, k=None, dtype=None, order=None, *, like=None):
- return (like,)
-
-
@set_array_function_like_doc
@set_module('numpy')
def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
@@ -209,7 +205,7 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
"""
if like is not None:
- return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like)
+ return _eye_with_like(like, N, M=M, k=k, dtype=dtype, order=order)
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
@@ -228,9 +224,7 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
return m
-_eye_with_like = array_function_dispatch(
- _eye_dispatcher
-)(eye)
+_eye_with_like = array_function_dispatch()(eye)
def _diag_dispatcher(v, k=None):
@@ -369,10 +363,6 @@ def diagflat(v, k=0):
return wrap(res)
-def _tri_dispatcher(N, M=None, k=None, dtype=None, *, like=None):
- return (like,)
-
-
@set_array_function_like_doc
@set_module('numpy')
def tri(N, M=None, k=0, dtype=float, *, like=None):
@@ -416,7 +406,7 @@ def tri(N, M=None, k=0, dtype=float, *, like=None):
"""
if like is not None:
- return _tri_with_like(N, M=M, k=k, dtype=dtype, like=like)
+ return _tri_with_like(like, N, M=M, k=k, dtype=dtype)
if M is None:
M = N
@@ -430,9 +420,7 @@ def tri(N, M=None, k=0, dtype=float, *, like=None):
return m
-_tri_with_like = array_function_dispatch(
- _tri_dispatcher
-)(tri)
+_tri_with_like = array_function_dispatch()(tri)
def _trilu_dispatcher(m, k=None):
@@ -766,7 +754,7 @@ def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
- >>> ax.images.append(im)
+ >>> ax.add_image(im)
>>> plt.show()
It is also possible to construct a 2-D histogram without specifying bin
@@ -995,9 +983,42 @@ def tril_indices_from(arr, k=0):
k : int, optional
Diagonal offset (see `tril` for details).
+ Examples
+ --------
+
+ Create a 4 by 4 array.
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Pass the array to get the indices of the lower triangular elements.
+
+ >>> trili = np.tril_indices_from(a)
+ >>> trili
+ (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
+
+ >>> a[trili]
+ array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
+
+ This is syntactic sugar for tril_indices().
+
+ >>> np.tril_indices(a.shape[0])
+ (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
+
+ Use the `k` parameter to return the indices for the lower triangular array
+ up to the k-th diagonal.
+
+ >>> trili1 = np.tril_indices_from(a, k=1)
+ >>> a[trili1]
+ array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15])
+
See Also
--------
- tril_indices, tril
+ tril_indices, tril, triu_indices_from
Notes
-----
@@ -1114,9 +1135,43 @@ def triu_indices_from(arr, k=0):
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
+ Examples
+ --------
+
+ Create a 4 by 4 array.
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Pass the array to get the indices of the upper triangular elements.
+
+ >>> triui = np.triu_indices_from(a)
+ >>> triui
+ (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
+
+ >>> a[triui]
+ array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
+
+ This is syntactic sugar for triu_indices().
+
+ >>> np.triu_indices(a.shape[0])
+ (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
+
+ Use the `k` parameter to return the indices for the upper triangular array
+ from the k-th diagonal.
+
+ >>> triuim1 = np.triu_indices_from(a, k=1)
+ >>> a[triuim1]
+ array([ 1, 2, 3, 6, 7, 11])
+
+
See Also
--------
- triu_indices, triu
+ triu_indices, triu, tril_indices_from
Notes
-----
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 0dc014d76..3f84b80e5 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -2,7 +2,6 @@
"""
import functools
-import warnings
__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'isreal', 'nan_to_num', 'real', 'real_if_close',
@@ -12,7 +11,7 @@ __all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
from .._utils import set_module
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, asanyarray, isnan, zeros
-from numpy.core import overrides
+from numpy.core import overrides, getlimits
from .ufunclike import isneginf, isposinf
@@ -541,7 +540,8 @@ def real_if_close(a, tol=100):
Input array.
tol : float
Tolerance in machine epsilons for the complex part of the elements
- in the array.
+ in the array. If the tolerance is <=1, then the absolute tolerance
+ is used.
Returns
-------
@@ -572,11 +572,11 @@ def real_if_close(a, tol=100):
"""
a = asanyarray(a)
- if not issubclass(a.dtype.type, _nx.complexfloating):
+ type_ = a.dtype.type
+ if not issubclass(type_, _nx.complexfloating):
return a
if tol > 1:
- from numpy.core import getlimits
- f = getlimits.finfo(a.dtype.type)
+ f = getlimits.finfo(type_)
tol = f.eps * tol
if _nx.all(_nx.absolute(a.imag) < tol):
a = a.real
diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py
index a93c4773b..05fe60c5b 100644
--- a/numpy/lib/ufunclike.py
+++ b/numpy/lib/ufunclike.py
@@ -6,72 +6,16 @@ storing results in an output array.
__all__ = ['fix', 'isneginf', 'isposinf']
import numpy.core.numeric as nx
-from numpy.core.overrides import (
- array_function_dispatch, ARRAY_FUNCTION_ENABLED,
-)
+from numpy.core.overrides import array_function_dispatch
import warnings
import functools
-def _deprecate_out_named_y(f):
- """
- Allow the out argument to be passed as the name `y` (deprecated)
-
- In future, this decorator should be removed.
- """
- @functools.wraps(f)
- def func(x, out=None, **kwargs):
- if 'y' in kwargs:
- if 'out' in kwargs:
- raise TypeError(
- "{} got multiple values for argument 'out'/'y'"
- .format(f.__name__)
- )
- out = kwargs.pop('y')
- # NumPy 1.13.0, 2017-04-26
- warnings.warn(
- "The name of the out argument to {} has changed from `y` to "
- "`out`, to match other ufuncs.".format(f.__name__),
- DeprecationWarning, stacklevel=3)
- return f(x, out=out, **kwargs)
-
- return func
-
-
-def _fix_out_named_y(f):
- """
- Allow the out argument to be passed as the name `y` (deprecated)
-
- This decorator should only be used if _deprecate_out_named_y is used on
- a corresponding dispatcher function.
- """
- @functools.wraps(f)
- def func(x, out=None, **kwargs):
- if 'y' in kwargs:
- # we already did error checking in _deprecate_out_named_y
- out = kwargs.pop('y')
- return f(x, out=out, **kwargs)
-
- return func
-
-
-def _fix_and_maybe_deprecate_out_named_y(f):
- """
- Use the appropriate decorator, depending upon if dispatching is being used.
- """
- if ARRAY_FUNCTION_ENABLED:
- return _fix_out_named_y(f)
- else:
- return _deprecate_out_named_y(f)
-
-
-@_deprecate_out_named_y
def _dispatcher(x, out=None):
return (x, out)
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
-@_fix_and_maybe_deprecate_out_named_y
def fix(x, out=None):
"""
Round to nearest integer towards zero.
@@ -125,7 +69,6 @@ def fix(x, out=None):
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
-@_fix_and_maybe_deprecate_out_named_y
def isposinf(x, out=None):
"""
Test element-wise for positive infinity, return result as bool array.
@@ -197,7 +140,6 @@ def isposinf(x, out=None):
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
-@_fix_and_maybe_deprecate_out_named_y
def isneginf(x, out=None):
"""
Test element-wise for negative infinity, return result as bool array.
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 2a9d30b16..095c914db 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -5,6 +5,7 @@ import types
import re
import warnings
import functools
+import platform
from .._utils import set_module
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
@@ -24,6 +25,8 @@ def show_runtime():
including available intrinsic support and BLAS/LAPACK library
in use
+ .. versionadded:: 1.24.0
+
See Also
--------
show_config : Show libraries in the system on which NumPy was built.
@@ -31,45 +34,20 @@ def show_runtime():
Notes
-----
1. Information is derived with the help of `threadpoolctl <https://pypi.org/project/threadpoolctl/>`_
- library.
+ library if available.
2. SIMD related information is derived from ``__cpu_features__``,
``__cpu_baseline__`` and ``__cpu_dispatch__``
- Examples
- --------
- >>> import numpy as np
- >>> np.show_runtime()
- [{'simd_extensions': {'baseline': ['SSE', 'SSE2', 'SSE3'],
- 'found': ['SSSE3',
- 'SSE41',
- 'POPCNT',
- 'SSE42',
- 'AVX',
- 'F16C',
- 'FMA3',
- 'AVX2'],
- 'not_found': ['AVX512F',
- 'AVX512CD',
- 'AVX512_KNL',
- 'AVX512_KNM',
- 'AVX512_SKX',
- 'AVX512_CLX',
- 'AVX512_CNL',
- 'AVX512_ICL']}},
- {'architecture': 'Zen',
- 'filepath': '/usr/lib/x86_64-linux-gnu/openblas-pthread/libopenblasp-r0.3.20.so',
- 'internal_api': 'openblas',
- 'num_threads': 12,
- 'prefix': 'libopenblas',
- 'threading_layer': 'pthreads',
- 'user_api': 'blas',
- 'version': '0.3.20'}]
"""
from numpy.core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
from pprint import pprint
- config_found = []
+ config_found = [{
+ "numpy_version": np.__version__,
+ "python": sys.version,
+ "uname": platform.uname(),
+ }]
features_found, features_not_found = [], []
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
@@ -550,15 +528,16 @@ def _info(obj, output=None):
@set_module('numpy')
def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
"""
- Get help information for a function, class, or module.
+ Get help information for an array, function, class, or module.
Parameters
----------
object : object or str, optional
- Input object or name to get information about. If `object` is a
- numpy object, its docstring is given. If it is a string, available
- modules are searched for matching objects. If None, information
- about `info` itself is returned.
+ Input object or name to get information about. If `object` is
+ an `ndarray` instance, information about the array is printed.
+ If `object` is a numpy object, its docstring is given. If it is
+ a string, available modules are searched for matching objects.
+ If None, information about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
@@ -597,6 +576,22 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
+ When the argument is an array, information about the array is printed.
+
+ >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64)
+ >>> np.info(a)
+ class: ndarray
+ shape: (2, 3)
+ strides: (24, 8)
+ itemsize: 8
+ aligned: True
+ contiguous: True
+ fortran: False
+ data pointer: 0x562b6e0d2860 # may vary
+ byteorder: little
+ byteswap: False
+ type: complex64
+
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.