diff options
Diffstat (limited to 'numpy/lib')
| -rw-r--r-- | numpy/lib/__init__.pyi | 5 | ||||
| -rw-r--r-- | numpy/lib/_version.py | 4 | ||||
| -rw-r--r-- | numpy/lib/arraypad.pyi | 93 | ||||
| -rw-r--r-- | numpy/lib/arraysetops.py | 4 | ||||
| -rw-r--r-- | numpy/lib/format.py | 4 | ||||
| -rw-r--r-- | numpy/lib/function_base.py | 52 | ||||
| -rw-r--r-- | numpy/lib/function_base.pyi | 2 | ||||
| -rw-r--r-- | numpy/lib/index_tricks.py | 7 | ||||
| -rw-r--r-- | numpy/lib/index_tricks.pyi | 33 | ||||
| -rw-r--r-- | numpy/lib/polynomial.py | 9 | ||||
| -rw-r--r-- | numpy/lib/stride_tricks.py | 5 | ||||
| -rw-r--r-- | numpy/lib/tests/test__version.py | 2 | ||||
| -rw-r--r-- | numpy/lib/tests/test_arraysetops.py | 11 | ||||
| -rw-r--r-- | numpy/lib/tests/test_function_base.py | 22 | ||||
| -rw-r--r-- | numpy/lib/tests/test_regression.py | 3 | ||||
| -rw-r--r-- | numpy/lib/twodim_base.py | 37 | ||||
| -rw-r--r-- | numpy/lib/twodim_base.pyi | 265 | ||||
| -rw-r--r-- | numpy/lib/type_check.pyi | 246 | ||||
| -rw-r--r-- | numpy/lib/utils.py | 2 |
19 files changed, 695 insertions, 111 deletions
diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 2904b6a84..45a283782 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -226,8 +226,11 @@ from numpy.lib.utils import ( safe_eval as safe_eval, ) +from numpy.core.multiarray import ( + tracemalloc_domain as tracemalloc_domain, +) + __all__: List[str] __version__ = version emath = scimath -tracemalloc_domain: int diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index 00e00e9a7..bfac5f814 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -15,7 +15,7 @@ class NumpyVersion(): """Parse and compare numpy version strings. NumPy has the following versioning scheme (numbers given are examples; they - can be > 9) in principle): + can be > 9 in principle): - Released version: '1.8.0', '1.8.1', etc. - Alpha: '1.8.0a1', '1.8.0a2', etc. @@ -54,7 +54,7 @@ class NumpyVersion(): def __init__(self, vstring): self.vstring = vstring - ver_main = re.match(r'\d\.\d+\.\d+', vstring) + ver_main = re.match(r'\d+\.\d+\.\d+', vstring) if not ver_main: raise ValueError("Not a valid numpy version string") diff --git a/numpy/lib/arraypad.pyi b/numpy/lib/arraypad.pyi index 64e3e1331..df9538dd7 100644 --- a/numpy/lib/arraypad.pyi +++ b/numpy/lib/arraypad.pyi @@ -1,5 +1,94 @@ -from typing import List +import sys +from typing import ( + Any, + Dict, + List, + overload, + Tuple, + TypeVar, +) + +from numpy import ndarray, dtype, generic + +from numpy.typing import ( + ArrayLike, + NDArray, + _ArrayLikeInt, + _NestedSequence, + _SupportsArray, +) + +if sys.version_info >= (3, 8): + from typing import Literal as L, Protocol +else: + from typing_extensions import Literal as L, Protocol + +_SCT = TypeVar("_SCT", bound=generic) + +class _ModeFunc(Protocol): + def __call__( + self, + __vector: NDArray[Any], + __iaxis_pad_width: Tuple[int, int], + __iaxis: int, + __kwargs: Dict[str, Any], + ) -> None: ... + +_ModeKind = L[ + "constant", + "edge", + "linear_ramp", + "maximum", + "mean", + "median", + "minimum", + "reflect", + "symmetric", + "wrap", + "empty", +] + +_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]] __all__: List[str] -def pad(array, pad_width, mode=..., **kwargs): ... +# TODO: In practice each keyword argument is exclusive to one or more +# specific modes. Consider adding more overloads to express this in the future. + +# Expand `**kwargs` into explicit keyword-only arguments +@overload +def pad( + array: _ArrayLike[_SCT], + pad_width: _ArrayLikeInt, + mode: _ModeKind = ..., + *, + stat_length: None | _ArrayLikeInt = ..., + constant_values: ArrayLike = ..., + end_values: ArrayLike = ..., + reflect_type: L["odd", "even"] = ..., +) -> NDArray[_SCT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _ArrayLikeInt, + mode: _ModeKind = ..., + *, + stat_length: None | _ArrayLikeInt = ..., + constant_values: ArrayLike = ..., + end_values: ArrayLike = ..., + reflect_type: L["odd", "even"] = ..., +) -> NDArray[Any]: ... +@overload +def pad( + array: _ArrayLike[_SCT], + pad_width: _ArrayLikeInt, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[_SCT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _ArrayLikeInt, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[Any]: ... diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index 7600e17be..bd56b6975 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -339,7 +339,9 @@ def _unique1d(ar, return_index=False, return_inverse=False, aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left') else: aux_firstnan = np.searchsorted(aux, aux[-1], side='left') - mask[1:aux_firstnan] = (aux[1:aux_firstnan] != aux[:aux_firstnan - 1]) + if aux_firstnan > 0: + mask[1:aux_firstnan] = ( + aux[1:aux_firstnan] != aux[:aux_firstnan - 1]) mask[aux_firstnan] = True mask[aux_firstnan + 1:] = False else: diff --git a/numpy/lib/format.py b/numpy/lib/format.py index ead6a0420..6ac66c22a 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -44,7 +44,7 @@ Capabilities read most ``.npy`` files that they have been given without much documentation. -- Allows memory-mapping of the data. See `open_memmep`. +- Allows memory-mapping of the data. See `open_memmap`. - Can be read from a filelike stream object instead of an actual file. @@ -606,7 +606,7 @@ def _read_array_header(fp, version): if EXPECTED_KEYS != d.keys(): keys = sorted(d.keys()) msg = "Header does not contain the correct keys: {!r}" - raise ValueError(msg.format(d.keys())) + raise ValueError(msg.format(keys)) # Sanity-check the values. if (not isinstance(d['shape'], tuple) or diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 783d45c2f..fdbe698d6 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -88,8 +88,11 @@ def rot90(m, k=1, axes=(0, 1)): Notes ----- - rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) - rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) + ``rot90(m, k=1, axes=(1,0))`` is the reverse of + ``rot90(m, k=1, axes=(0,1))`` + + ``rot90(m, k=1, axes=(1,0))`` is equivalent to + ``rot90(m, k=-1, axes=(0,1))`` Examples -------- @@ -1519,7 +1522,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): p : array_like Input array. discont : float, optional - Maximum discontinuity between values, default is ``period/2``. + Maximum discontinuity between values, default is ``period/2``. Values below ``period/2`` are treated as if they were ``period/2``. To have an effect different from the default, `discont` should be larger than ``period/2``. @@ -1528,7 +1531,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): period: float, optional Size of the range over which the input wraps. By default, it is ``2 pi``. - + .. versionadded:: 1.21.0 Returns @@ -1542,8 +1545,8 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): Notes ----- - If the discontinuity in `p` is smaller than ``period/2``, - but larger than `discont`, no unwrapping is done because taking + If the discontinuity in `p` is smaller than ``period/2``, + but larger than `discont`, no unwrapping is done because taking the complement would only make the discontinuity larger. Examples @@ -1576,7 +1579,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): slice1 = tuple(slice1) dtype = np.result_type(dd, period) if _nx.issubdtype(dtype, _nx.integer): - interval_high, rem = divmod(period, 2) + interval_high, rem = divmod(period, 2) boundary_ambiguous = rem == 0 else: interval_high = period / 2 @@ -1940,11 +1943,19 @@ def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): for core_dims in list_of_core_dims] -def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes): +def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, + results=None): """Helper for creating output arrays in vectorize.""" shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) - arrays = tuple(np.empty(shape, dtype=dtype) - for shape, dtype in zip(shapes, dtypes)) + if dtypes is None: + dtypes = [None] * len(shapes) + if results is None: + arrays = tuple(np.empty(shape=shape, dtype=dtype) + for shape, dtype in zip(shapes, dtypes)) + else: + arrays = tuple(np.empty_like(result, shape=shape, dtype=dtype) + for result, shape, dtype + in zip(results, shapes, dtypes)) return arrays @@ -2290,11 +2301,8 @@ class vectorize: for result, core_dims in zip(results, output_core_dims): _update_dim_sizes(dim_sizes, result, core_dims) - if otypes is None: - otypes = [asarray(result).dtype for result in results] - outputs = _create_arrays(broadcast_shape, dim_sizes, - output_core_dims, otypes) + output_core_dims, otypes, results) for output, result in zip(outputs, results): output[index] = result @@ -4133,13 +4141,13 @@ def trapz(y, x=None, dx=1.0, axis=-1): If `x` is provided, the integration happens in sequence along its elements - they are not sorted. - + Integrate `y` (`x`) along each 1d slice on the given axis, compute :math:`\int y(x) dx`. When `x` is specified, this integrates along the parametric curve, computing :math:`\int_t y(t) dt = \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. - + Parameters ---------- y : array_like @@ -4160,7 +4168,7 @@ def trapz(y, x=None, dx=1.0, axis=-1): a single axis by the trapezoidal rule. If 'y' is a 1-dimensional array, then the result is a float. If 'n' is greater than 1, then the result is an 'n-1' dimensional array. - + See Also -------- sum, cumsum @@ -4189,16 +4197,16 @@ def trapz(y, x=None, dx=1.0, axis=-1): 8.0 >>> np.trapz([1,2,3], dx=2) 8.0 - + Using a decreasing `x` corresponds to integrating in reverse: - - >>> np.trapz([1,2,3], x=[8,6,4]) + + >>> np.trapz([1,2,3], x=[8,6,4]) -8.0 - + More generally `x` is used to integrate along a parametric curve. This finds the area of a circle, noting we repeat the sample which closes the curve: - + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) >>> np.trapz(np.cos(theta), x=np.sin(theta)) 3.141571941375841 diff --git a/numpy/lib/function_base.pyi b/numpy/lib/function_base.pyi index da24ab21d..69c615c9c 100644 --- a/numpy/lib/function_base.pyi +++ b/numpy/lib/function_base.pyi @@ -30,7 +30,7 @@ def gradient(f, *varargs, axis=..., edge_order=...): ... def diff(a, n=..., axis=..., prepend = ..., append = ...): ... def interp(x, xp, fp, left=..., right=..., period=...): ... def angle(z, deg=...): ... -def unwrap(p, discont = ..., axis=...): ... +def unwrap(p, discont = ..., axis=..., *, period=...): ... def sort_complex(a): ... def trim_zeros(filt, trim=...): ... def extract(condition, arr): ... diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index 72d8e9de4..8d1b6e5be 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -201,7 +201,6 @@ class nd_grid: length = int(step) if step != 1: step = (key.stop-start)/float(step-1) - stop = key.stop + step return _nx.arange(0, length, 1, float)*step + start else: return _nx.arange(start, stop, step) @@ -631,7 +630,8 @@ class ndindex: Examples -------- - # dimensions as individual arguments + Dimensions as individual arguments + >>> for index in np.ndindex(3, 2, 1): ... print(index) (0, 0, 0) @@ -641,7 +641,8 @@ class ndindex: (2, 0, 0) (2, 1, 0) - # same dimensions - but in a tuple (3, 2, 1) + Same dimensions - but in a tuple ``(3, 2, 1)`` + >>> for index in np.ndindex((3, 2, 1)): ... print(index) (0, 0, 0) diff --git a/numpy/lib/index_tricks.pyi b/numpy/lib/index_tricks.pyi index a3bfef6b6..0f9ae94a9 100644 --- a/numpy/lib/index_tricks.pyi +++ b/numpy/lib/index_tricks.pyi @@ -44,6 +44,11 @@ from numpy.typing import ( _ShapeLike, ) +from numpy.core.multiarray import ( + unravel_index as unravel_index, + ravel_multi_index as ravel_multi_index, +) + if sys.version_info >= (3, 8): from typing import Literal, SupportsIndex else: @@ -58,34 +63,6 @@ _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) __all__: List[str] @overload -def unravel_index( # type: ignore[misc] - indices: Union[int, integer[Any]], - shape: _ShapeLike, - order: _OrderCF = ... -) -> Tuple[intp, ...]: ... -@overload -def unravel_index( - indices: _ArrayLikeInt, - shape: _ShapeLike, - order: _OrderCF = ... -) -> Tuple[NDArray[intp], ...]: ... - -@overload -def ravel_multi_index( # type: ignore[misc] - multi_index: Sequence[Union[int, integer[Any]]], - dims: _ShapeLike, - mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ..., - order: _OrderCF = ... -) -> intp: ... -@overload -def ravel_multi_index( - multi_index: Sequence[_ArrayLikeInt], - dims: _ShapeLike, - mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ..., - order: _OrderCF = ... -) -> NDArray[intp]: ... - -@overload def ix_(*args: _NestedSequence[_SupportsDType[_DType]]) -> Tuple[ndarray[Any, _DType], ...]: ... @overload def ix_(*args: _NestedSequence[str]) -> Tuple[NDArray[str_], ...]: ... diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index 56fcce621..23021cafa 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -489,8 +489,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): default) just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. w : array_like, shape (M,), optional - Weights to apply to the y-coordinates of the sample points. For - gaussian uncertainties, use 1/sigma (not 1/sigma**2). + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. cov : bool or str, optional If given and not `False`, return not just the estimate but also its covariance matrix. By default, the covariance are scaled by @@ -498,7 +501,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): to be unreliable except in a relative sense and everything is scaled such that the reduced chi2 is unity. This scaling is omitted if ``cov='unscaled'``, as is relevant for the case that the weights are - 1/sigma**2, with sigma known to be a reliable estimate of the + w = 1/sigma, with sigma known to be a reliable estimate of the uncertainty. Returns diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py index 82c8a57c8..5093993a9 100644 --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -371,8 +371,9 @@ def broadcast_to(array, shape, subok=False): ---------- array : array_like The array to broadcast. - shape : tuple - The shape of the desired array. + shape : tuple or int + The shape of the desired array. A single integer ``i`` is interpreted + as ``(i,)``. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (default). diff --git a/numpy/lib/tests/test__version.py b/numpy/lib/tests/test__version.py index 182504631..e6d41ad93 100644 --- a/numpy/lib/tests/test__version.py +++ b/numpy/lib/tests/test__version.py @@ -7,7 +7,7 @@ from numpy.lib import NumpyVersion def test_main_versions(): assert_(NumpyVersion('1.8.0') == '1.8.0') - for ver in ['1.9.0', '2.0.0', '1.8.1']: + for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']: assert_(NumpyVersion('1.8.0') < ver) for ver in ['1.7.0', '1.7.1', '0.9.9']: diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index d62da9efb..13385cd24 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -610,6 +610,17 @@ class TestUnique: assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + # test for gh-19300 + all_nans = [np.nan] * 4 + ua = [np.nan] + ua_idx = [0] + ua_inv = [0, 0, 0, 0] + ua_cnt = [4] + assert_equal(np.unique(all_nans), ua) + assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt)) + def test_unique_axis_errors(self): assert_raises(TypeError, self._run_axis_tests, object) assert_raises(TypeError, self._run_axis_tests, diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index a4f49a78b..e1b615223 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1665,6 +1665,26 @@ class TestVectorize: with assert_raises_regex(ValueError, 'new output dimensions'): f(x) + def test_subclasses(self): + class subclass(np.ndarray): + pass + + m = np.array([[1., 0., 0.], + [0., 0., 1.], + [0., 1., 0.]]).view(subclass) + v = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]).view(subclass) + # generalized (gufunc) + matvec = np.vectorize(np.matmul, signature='(m,m),(m)->(m)') + r = matvec(m, v) + assert_equal(type(r), subclass) + assert_equal(r, [[1., 3., 2.], [4., 6., 5.], [7., 9., 8.]]) + + # element-wise (ufunc) + mult = np.vectorize(lambda x, y: x*y) + r = mult(m, v) + assert_equal(type(r), subclass) + assert_equal(r, m * v) + class TestLeaks: class A: @@ -1798,7 +1818,7 @@ class TestUnwrap: assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) # check that unwrap maintains continuity assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) - + def test_period(self): # check that unwrap removes jumps greater that 255 assert_array_equal(unwrap([1, 1 + 256], period=255), [1, 2]) diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index 94fac7ef0..373226277 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -64,8 +64,7 @@ class TestRegression: def test_mem_string_concat(self): # Ticket #469 x = np.array([]) - with pytest.warns(FutureWarning): - np.append(x, 'asdasd\tasdasd') + np.append(x, 'asdasd\tasdasd') def test_poly_div(self): # Ticket #553 diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index ac5fe1c6b..83c028061 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -439,10 +439,12 @@ def tril(m, k=0): Lower triangle of an array. Return a copy of an array with elements above the `k`-th diagonal zeroed. + For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two + axes. Parameters ---------- - m : array_like, shape (M, N) + m : array_like, shape (..., M, N) Input array. k : int, optional Diagonal above which to zero elements. `k = 0` (the default) is the @@ -450,7 +452,7 @@ def tril(m, k=0): Returns ------- - tril : ndarray, shape (M, N) + tril : ndarray, shape (..., M, N) Lower triangle of `m`, of same shape and data-type as `m`. See Also @@ -465,6 +467,20 @@ def tril(m, k=0): [ 7, 8, 0], [10, 11, 12]]) + >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 0, 0, 0, 0], + [ 5, 6, 0, 0, 0], + [10, 11, 12, 0, 0], + [15, 16, 17, 18, 0]], + [[20, 0, 0, 0, 0], + [25, 26, 0, 0, 0], + [30, 31, 32, 0, 0], + [35, 36, 37, 38, 0]], + [[40, 0, 0, 0, 0], + [45, 46, 0, 0, 0], + [50, 51, 52, 0, 0], + [55, 56, 57, 58, 0]]]) + """ m = asanyarray(m) mask = tri(*m.shape[-2:], k=k, dtype=bool) @@ -478,7 +494,8 @@ def triu(m, k=0): Upper triangle of an array. Return a copy of an array with the elements below the `k`-th diagonal - zeroed. + zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the final + two axes. Please refer to the documentation for `tril` for further details. @@ -494,6 +511,20 @@ def triu(m, k=0): [ 0, 8, 9], [ 0, 0, 12]]) + >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 1, 2, 3, 4], + [ 0, 6, 7, 8, 9], + [ 0, 0, 12, 13, 14], + [ 0, 0, 0, 18, 19]], + [[20, 21, 22, 23, 24], + [ 0, 26, 27, 28, 29], + [ 0, 0, 32, 33, 34], + [ 0, 0, 0, 38, 39]], + [[40, 41, 42, 43, 44], + [ 0, 46, 47, 48, 49], + [ 0, 0, 52, 53, 54], + [ 0, 0, 0, 58, 59]]]) + """ m = asanyarray(m) mask = tri(*m.shape[-2:], k=k-1, dtype=bool) diff --git a/numpy/lib/twodim_base.pyi b/numpy/lib/twodim_base.pyi index 79b9511b8..007338d77 100644 --- a/numpy/lib/twodim_base.pyi +++ b/numpy/lib/twodim_base.pyi @@ -1,32 +1,255 @@ -from typing import List, Optional, Any +from typing import ( + Any, + Callable, + List, + Sequence, + overload, + Tuple, + Type, + TypeVar, + Union, +) -from numpy import ndarray, _OrderCF -from numpy.typing import ArrayLike, DTypeLike +from numpy import ( + ndarray, + dtype, + generic, + number, + bool_, + timedelta64, + datetime64, + int_, + intp, + float64, + signedinteger, + floating, + complexfloating, + object_, + _OrderCF, +) + +from numpy.typing import ( + DTypeLike, + _SupportsDType, + ArrayLike, + NDArray, + _NestedSequence, + _SupportsArray, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeObject_co, +) + +_T = TypeVar("_T") +_SCT = TypeVar("_SCT", bound=generic) + +# The returned arrays dtype must be compatible with `np.equal` +_MaskFunc = Callable[ + [NDArray[int_], _T], + NDArray[Union[number[Any], bool_, timedelta64, datetime64, object_]], +] + +_DTypeLike = Union[ + Type[_SCT], + dtype[_SCT], + _SupportsDType[dtype[_SCT]], +] +_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]] __all__: List[str] -def fliplr(m): ... -def flipud(m): ... +@overload +def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def fliplr(m: ArrayLike) -> NDArray[Any]: ... + +@overload +def flipud(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def flipud(m: ArrayLike) -> NDArray[Any]: ... +@overload def eye( N: int, - M: Optional[int] = ..., + M: None | int = ..., + k: int = ..., + dtype: None = ..., + order: _OrderCF = ..., + *, + like: None | ArrayLike = ..., +) -> NDArray[float64]: ... +@overload +def eye( + N: int, + M: None | int = ..., + k: int = ..., + dtype: _DTypeLike[_SCT] = ..., + order: _OrderCF = ..., + *, + like: None | ArrayLike = ..., +) -> NDArray[_SCT]: ... +@overload +def eye( + N: int, + M: None | int = ..., k: int = ..., dtype: DTypeLike = ..., order: _OrderCF = ..., *, - like: Optional[ArrayLike] = ... -) -> ndarray[Any, Any]: ... - -def diag(v, k=...): ... -def diagflat(v, k=...): ... -def tri(N, M=..., k=..., dtype = ..., *, like=...): ... -def tril(m, k=...): ... -def triu(m, k=...): ... -def vander(x, N=..., increasing=...): ... -def histogram2d(x, y, bins=..., range=..., normed=..., weights=..., density=...): ... -def mask_indices(n, mask_func, k=...): ... -def tril_indices(n, k=..., m=...): ... -def tril_indices_from(arr, k=...): ... -def triu_indices(n, k=..., m=...): ... -def triu_indices_from(arr, k=...): ... + like: None | ArrayLike = ..., +) -> NDArray[Any]: ... + +@overload +def diag(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def diagflat(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def tri( + N: int, + M: None | int = ..., + k: int = ..., + dtype: None = ..., + *, + like: None | ArrayLike = ... +) -> NDArray[float64]: ... +@overload +def tri( + N: int, + M: None | int = ..., + k: int = ..., + dtype: _DTypeLike[_SCT] = ..., + *, + like: None | ArrayLike = ... +) -> NDArray[_SCT]: ... +@overload +def tri( + N: int, + M: None | int = ..., + k: int = ..., + dtype: DTypeLike = ..., + *, + like: None | ArrayLike = ... +) -> NDArray[Any]: ... + +@overload +def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def vander( # type: ignore[misc] + x: _ArrayLikeInt_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def vander( # type: ignore[misc] + x: _ArrayLikeFloat_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[floating[Any]]: ... +@overload +def vander( + x: _ArrayLikeComplex_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def vander( + x: _ArrayLikeObject_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[object_]: ... + +@overload +def histogram2d( # type: ignore[misc] + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + bins: int | Sequence[int] = ..., + range: None | _ArrayLikeFloat_co = ..., + normed: None | bool = ..., + weights: None | _ArrayLikeFloat_co = ..., + density: None | bool = ..., +) -> Tuple[ + NDArray[float64], + NDArray[floating[Any]], + NDArray[floating[Any]], +]: ... +@overload +def histogram2d( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + bins: int | Sequence[int] = ..., + range: None | _ArrayLikeFloat_co = ..., + normed: None | bool = ..., + weights: None | _ArrayLikeFloat_co = ..., + density: None | bool = ..., +) -> Tuple[ + NDArray[float64], + NDArray[complexfloating[Any, Any]], + NDArray[complexfloating[Any, Any]], +]: ... +@overload # TODO: Sort out `bins` +def histogram2d( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + bins: Sequence[_ArrayLikeInt_co], + range: None | _ArrayLikeFloat_co = ..., + normed: None | bool = ..., + weights: None | _ArrayLikeFloat_co = ..., + density: None | bool = ..., +) -> Tuple[ + NDArray[float64], + NDArray[Any], + NDArray[Any], +]: ... + +# NOTE: we're assuming/demanding here the `mask_func` returns +# an ndarray of shape `(n, n)`; otherwise there is the possibility +# of the output tuple having more or less than 2 elements +@overload +def mask_indices( + n: int, + mask_func: _MaskFunc[int], + k: int = ..., +) -> Tuple[NDArray[intp], NDArray[intp]]: ... +@overload +def mask_indices( + n: int, + mask_func: _MaskFunc[_T], + k: _T, +) -> Tuple[NDArray[intp], NDArray[intp]]: ... + +def tril_indices( + n: int, + k: int = ..., + m: None | int = ..., +) -> Tuple[NDArray[int_], NDArray[int_]]: ... + +def tril_indices_from( + arr: NDArray[Any], + k: int = ..., +) -> Tuple[NDArray[int_], NDArray[int_]]: ... + +def triu_indices( + n: int, + k: int = ..., + m: None | int = ..., +) -> Tuple[NDArray[int_], NDArray[int_]]: ... + +def triu_indices_from( + arr: NDArray[Any], + k: int = ..., +) -> Tuple[NDArray[int_], NDArray[int_]]: ... diff --git a/numpy/lib/type_check.pyi b/numpy/lib/type_check.pyi index 7da02bb9f..fbe325858 100644 --- a/numpy/lib/type_check.pyi +++ b/numpy/lib/type_check.pyi @@ -1,19 +1,235 @@ -from typing import List +import sys +from typing import ( + Any, + Container, + Iterable, + List, + overload, + Type, + TypeVar, +) + +from numpy import ( + dtype, + generic, + bool_, + floating, + float64, + complexfloating, + integer, +) + +from numpy.typing import ( + ArrayLike, + DTypeLike, + NBitBase, + NDArray, + _64Bit, + _SupportsDType, + _ScalarLike_co, + _NestedSequence, + _SupportsArray, + _DTypeLikeComplex, +) + +if sys.version_info >= (3, 8): + from typing import Protocol, Literal as L +else: + from typing_extensions import Protocol, Literal as L + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_SCT = TypeVar("_SCT", bound=generic) +_NBit1 = TypeVar("_NBit1", bound=NBitBase) +_NBit2 = TypeVar("_NBit2", bound=NBitBase) + +_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]] + +class _SupportsReal(Protocol[_T_co]): + @property + def real(self) -> _T_co: ... + +class _SupportsImag(Protocol[_T_co]): + @property + def imag(self) -> _T_co: ... __all__: List[str] -def mintypecode(typechars, typeset=..., default=...): ... -def asfarray(a, dtype = ...): ... -def real(val): ... -def imag(val): ... -def iscomplex(x): ... -def isreal(x): ... -def iscomplexobj(x): ... -def isrealobj(x): ... -def nan_to_num(x, copy=..., nan=..., posinf=..., neginf=...): ... -def real_if_close(a, tol=...): ... -def typename(char): ... -def common_type(*arrays): ... - -# NOTE: Deprecated +def mintypecode( + typechars: Iterable[str | ArrayLike], + typeset: Container[str] = ..., + default: str = ..., +) -> str: ... + +# `asfarray` ignores dtypes if they're not inexact + +@overload +def asfarray( + a: object, + dtype: None | Type[float] = ..., +) -> NDArray[float64]: ... +@overload +def asfarray( # type: ignore[misc] + a: Any, + dtype: _DTypeLikeComplex, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def asfarray( + a: Any, + dtype: DTypeLike, +) -> NDArray[floating[Any]]: ... + +@overload +def real(val: _SupportsReal[_T]) -> _T: ... +@overload +def real(val: ArrayLike) -> NDArray[Any]: ... + +@overload +def imag(val: _SupportsImag[_T]) -> _T: ... +@overload +def imag(val: ArrayLike) -> NDArray[Any]: ... + +@overload +def iscomplex(x: _ScalarLike_co) -> bool_: ... # type: ignore[misc] +@overload +def iscomplex(x: ArrayLike) -> NDArray[bool_]: ... + +@overload +def isreal(x: _ScalarLike_co) -> bool_: ... # type: ignore[misc] +@overload +def isreal(x: ArrayLike) -> NDArray[bool_]: ... + +def iscomplexobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ... + +def isrealobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ... + +@overload +def nan_to_num( # type: ignore[misc] + x: _SCT, + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> _SCT: ... +@overload +def nan_to_num( + x: _ScalarLike_co, + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> Any: ... +@overload +def nan_to_num( + x: _ArrayLike[_SCT], + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> NDArray[_SCT]: ... +@overload +def nan_to_num( + x: ArrayLike, + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> NDArray[Any]: ... + +# If one passes a complex array to `real_if_close`, then one is reasonably +# expected to verify the output dtype (so we can return an unsafe union here) + +@overload +def real_if_close( # type: ignore[misc] + a: _ArrayLike[complexfloating[_NBit1, _NBit1]], + tol: float = ..., +) -> NDArray[floating[_NBit1]] | NDArray[complexfloating[_NBit1, _NBit1]]: ... +@overload +def real_if_close( + a: _ArrayLike[_SCT], + tol: float = ..., +) -> NDArray[_SCT]: ... +@overload +def real_if_close( + a: ArrayLike, + tol: float = ..., +) -> NDArray[Any]: ... + +# NOTE: deprecated # def asscalar(a): ... + +@overload +def typename(char: L['S1']) -> L['character']: ... +@overload +def typename(char: L['?']) -> L['bool']: ... +@overload +def typename(char: L['b']) -> L['signed char']: ... +@overload +def typename(char: L['B']) -> L['unsigned char']: ... +@overload +def typename(char: L['h']) -> L['short']: ... +@overload +def typename(char: L['H']) -> L['unsigned short']: ... +@overload +def typename(char: L['i']) -> L['integer']: ... +@overload +def typename(char: L['I']) -> L['unsigned integer']: ... +@overload +def typename(char: L['l']) -> L['long integer']: ... +@overload +def typename(char: L['L']) -> L['unsigned long integer']: ... +@overload +def typename(char: L['q']) -> L['long long integer']: ... +@overload +def typename(char: L['Q']) -> L['unsigned long long integer']: ... +@overload +def typename(char: L['f']) -> L['single precision']: ... +@overload +def typename(char: L['d']) -> L['double precision']: ... +@overload +def typename(char: L['g']) -> L['long precision']: ... +@overload +def typename(char: L['F']) -> L['complex single precision']: ... +@overload +def typename(char: L['D']) -> L['complex double precision']: ... +@overload +def typename(char: L['G']) -> L['complex long double precision']: ... +@overload +def typename(char: L['S']) -> L['string']: ... +@overload +def typename(char: L['U']) -> L['unicode']: ... +@overload +def typename(char: L['V']) -> L['void']: ... +@overload +def typename(char: L['O']) -> L['object']: ... + +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + integer[Any] + ]] +) -> Type[floating[_64Bit]]: ... +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + floating[_NBit1] + ]] +) -> Type[floating[_NBit1]]: ... +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + integer[Any] | floating[_NBit1] + ]] +) -> Type[floating[_NBit1 | _64Bit]]: ... +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + floating[_NBit1] | complexfloating[_NBit2, _NBit2] + ]] +) -> Type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]: ... +@overload +def common_type( + *arrays: _SupportsDType[dtype[ + integer[Any] | floating[_NBit1] | complexfloating[_NBit2, _NBit2] + ]] +) -> Type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]: ... diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 12a7cacdc..b1a916d4a 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -904,7 +904,7 @@ def _lookfor_generate_cache(module, import_modules, regenerate): sys.stdout = old_stdout sys.stderr = old_stderr # Catch SystemExit, too - except BaseException: + except (Exception, SystemExit): continue for n, v in _getmembers(item): |
