diff options
author | Charles Harris <charlesr.harris@gmail.com> | 2021-11-15 17:31:41 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-11-15 17:31:41 -0700 |
commit | 376ad691fe4df77e502108d279872f56b30376dc (patch) | |
tree | 11635deb5fb17746e7d18035d747edc7c1217a31 | |
parent | b75fe5766c3739972e8f5c10a85f320f08e74d26 (diff) | |
parent | 546c47adae1066411ff7a3e3da5d758236ee90cf (diff) | |
download | numpy-376ad691fe4df77e502108d279872f56b30376dc.tar.gz |
Merge pull request #20327 from seberg/rename-interpolation
BUG,DEP: Fixup quantile/percentile and rename interpolation->method
-rw-r--r-- | doc/release/upcoming_changes/19857.improvement.rst | 19 | ||||
-rw-r--r-- | numpy/core/tests/test_deprecations.py | 20 | ||||
-rw-r--r-- | numpy/lib/function_base.py | 269 | ||||
-rw-r--r-- | numpy/lib/function_base.pyi | 26 | ||||
-rw-r--r-- | numpy/lib/nanfunctions.py | 161 | ||||
-rw-r--r-- | numpy/lib/tests/test_function_base.py | 108 | ||||
-rw-r--r-- | numpy/lib/tests/test_nanfunctions.py | 4 | ||||
-rw-r--r-- | numpy/typing/tests/data/reveal/lib_function_base.pyi | 4 |
8 files changed, 366 insertions, 245 deletions
diff --git a/doc/release/upcoming_changes/19857.improvement.rst b/doc/release/upcoming_changes/19857.improvement.rst index e39d413cc..cbeff08b1 100644 --- a/doc/release/upcoming_changes/19857.improvement.rst +++ b/doc/release/upcoming_changes/19857.improvement.rst @@ -1,8 +1,13 @@ -Add new linear interpolation methods for ``quantile`` and ``percentile`` ------------------------------------------------------------------------- +Add new methods for ``quantile`` and ``percentile`` +--------------------------------------------------- -``quantile`` and ``percentile`` now have 13 linear interpolation methods, -nine of which can be found in the scientific literature. -The remaining methods are NumPy specific and are kept for backwards -compatibility. The default is "inclusive" (method 7), whose behavior is equivalent -to the former default "linear". +``quantile`` and ``percentile`` now have have a ``method=`` +keyword argument supporting 13 different methods. +This replaces the ``interpolation=`` keyword argument. + +The methods are now aligned with nine methods which can be +found in scientific literature and the R language. +The remaining methods are the previous discontinuous variations +of the default "linear" one. + +Please see the documentation of `numpy.percentile` for more information. diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index a1b379d92..94583a5ee 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -1230,3 +1230,23 @@ class TestMachAr(_DeprecationTestCase): def test_deprecated_attr(self): finfo = np.finfo(float) self.assert_deprecated(lambda: getattr(finfo, "machar")) + + +class TestQuantileInterpolationDeprecation(_DeprecationTestCase): + # Deprecated 2021-11-08, NumPy 1.22 + @pytest.mark.parametrize("func", + [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) + def test_deprecated(self, func): + self.assert_deprecated( + lambda: func([0., 1.], 0., interpolation="linear")) + self.assert_deprecated( + lambda: func([0., 1.], 0., interpolation="nearest")) + + @pytest.mark.parametrize("func", + [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) + def test_both_passed(self, func): + with warnings.catch_warnings(): + # catch the DeprecationWarning so that it does not raise: + warnings.simplefilter("always", DeprecationWarning) + with pytest.raises(TypeError): + func([0., 1.], 0., interpolation="nearest", method="nearest") diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 3c9983edf..a215f63d3 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -50,8 +50,8 @@ __all__ = [ 'quantile' ] -# _QuantileInterpolation is a dictionary listing all the supported -# interpolation methods to compute quantile/percentile. +# _QuantileMethods is a dictionary listing all the supported methods to +# compute quantile/percentile. # # Below virtual_index refer to the index of the element where the percentile # would be found in the sorted sample. @@ -61,13 +61,13 @@ __all__ = [ # is made of a integer part (a.k.a 'i' or 'left') and a fractional part # (a.k.a 'g' or 'gamma') # -# Each _QuantileInterpolation have two properties +# Each method in _QuantileMethods has two properties # get_virtual_index : Callable # The function used to compute the virtual_index. # fix_gamma : Callable # A function used for discret methods to force the index to a specific value. -_QuantileInterpolation = dict( - # --- HYNDMAN AND FAN METHODS +_QuantileMethods = dict( + # --- HYNDMAN and FAN METHODS # Discrete methods inverted_cdf=dict( get_virtual_index=lambda n, quantiles: _inverted_cdf(n, quantiles), @@ -3854,7 +3854,7 @@ def _median(a, axis=None, out=None, overwrite_input=False): def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): + method=None, keepdims=None, *, interpolation=None): return (a, q, out) @@ -3864,8 +3864,10 @@ def percentile(a, axis=None, out=None, overwrite_input=False, - interpolation="linear", - keepdims=False): + method="linear", + keepdims=False, + *, + interpolation=None): """ Compute the q-th percentile of the data along the specified axis. @@ -3893,31 +3895,33 @@ def percentile(a, If True, then allow the input array `a` to be modified by intermediate calculations, to save memory. In this case, the contents of the input `a` after this function completes is undefined. - interpolation : str, optional - This parameter specifies the interpolation method to - use when the desired percentile lies between two data points - There are many different methods, some unique to NumPy. See the - notes for explanation. Options - - * (NPY 1): 'lower' - * (NPY 2): 'higher', - * (NPY 3): 'midpoint' - * (NPY 4): 'nearest' - * (NPY 5): 'linear' - - New options: - - * (H&F 1): 'inverted_cdf' - * (H&F 2): 'averaged_inverted_cdf' - * (H&F 3): 'closest_observation' - * (H&F 4): 'interpolated_inverted_cdf' - * (H&F 5): 'hazen' - * (H&F 6): 'weibull' - * (H&F 7): 'linear' (default) - * (H&F 8): 'median_unbiased' - * (H&F 9): 'normal_unbiased' + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontiuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. keepdims : bool, optional If this is set to True, the axes which are reduced are left in @@ -3926,6 +3930,11 @@ def percentile(a, .. versionadded:: 1.9.0 + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + Returns ------- percentile : scalar or ndarray @@ -3950,16 +3959,16 @@ def percentile(a, Given a vector ``V`` of length ``N``, the q-th percentile of ``V`` is the value ``q/100`` of the way from the minimum to the maximum in a sorted copy of ``V``. The values and distances of the two nearest - neighbors as well as the `interpolation` parameter will determine the + neighbors as well as the `method` parameter will determine the percentile if the normalized ranking does not match the location of ``q`` exactly. This function is the same as the median if ``q=50``, the same as the minimum if ``q=0`` and the same as the maximum if ``q=100``. - This optional `interpolation` parameter specifies the interpolation - method to use when the desired quantile lies between two data points - ``i < j``. If ``g`` is the fractional part of the index surrounded by - ``i`` and alpha and beta are correction constants modifying i and j. + This optional `method` parameter specifies the method to use when the + desired quantile lies between two data points ``i < j``. + If ``g`` is the fractional part of the index surrounded by ``i`` and + alpha and beta are correction constants modifying i and j. Below, 'q' is the quantile value, 'n' is the sample size and alpha and beta are constants. @@ -3970,7 +3979,7 @@ def percentile(a, .. math:: i + g = (q - alpha) / ( n - alpha - beta + 1 ) - The different interpolation methods then work as follows + The different methods then work as follows inverted_cdf: method 1 of H&F [1]_. @@ -4075,7 +4084,7 @@ def percentile(a, array([7., 2.]) >>> assert not np.all(a == b) - The different types of interpolation can be visualized graphically: + The different methods can be visualized graphically: .. plot:: @@ -4085,20 +4094,25 @@ def percentile(a, p = np.linspace(0, 100, 6001) ax = plt.gca() lines = [ - ('linear', None), - ('higher', '--'), - ('lower', '--'), - ('nearest', '-.'), - ('midpoint', '-.'), - ] - for interpolation, style in lines: + ('linear', '-', 'C0'), + ('inverted_cdf', ':', 'C1'), + # Almost the same as `inverted_cdf`: + ('averaged_inverted_cdf', '-.', 'C1'), + ('closest_observation', ':', 'C2'), + ('interpolated_inverted_cdf', '--', 'C1'), + ('hazen', '--', 'C3'), + ('weibull', '-.', 'C4'), + ('median_unbiased', '--', 'C5'), + ('normal_unbiased', '-.', 'C6'), + ] + for method, style, color in lines: ax.plot( - p, np.percentile(a, p, interpolation=interpolation), - label=interpolation, linestyle=style) + p, np.percentile(a, p, method=method), + label=method, linestyle=style, color=color) ax.set( - title='Interpolation methods for list: ' + str(a), + title='Percentiles for different methods and data: ' + str(a), xlabel='Percentile', - ylabel='List item returned', + ylabel='Estimated percentile value', yticks=a) ax.legend() plt.show() @@ -4110,16 +4124,19 @@ def percentile(a, The American Statistician, 50(4), pp. 361-365, 1996 """ + if interpolation is not None: + method = _check_interpolation_as_method( + method, interpolation, "percentile") q = np.true_divide(q, 100) q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105) if not _quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") return _quantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) + a, q, axis, out, overwrite_input, method, keepdims) def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): + method=None, keepdims=None, *, interpolation=None): return (a, q, out) @@ -4129,8 +4146,10 @@ def quantile(a, axis=None, out=None, overwrite_input=False, - interpolation="linear", - keepdims=False): + method="linear", + keepdims=False, + *, + interpolation=None): """ Compute the q-th quantile of the data along the specified axis. @@ -4155,37 +4174,44 @@ def quantile(a, intermediate calculations, to save memory. In this case, the contents of the input `a` after this function completes is undefined. - interpolation : str, optional - This parameter specifies the interpolation method to use when the - desired quantile lies between two data points There are many - different methods, some unique to NumPy. See the notes for - explanation. Options: - - * (NPY 1): 'lower' - * (NPY 2): 'higher', - * (NPY 3): 'midpoint' - * (NPY 4): 'nearest' - * (NPY 5): 'linear' - - New options: - - * (H&F 1): 'inverted_cdf' - * (H&F 2): 'averaged_inverted_cdf' - * (H&F 3): 'closest_observation' - * (H&F 4): 'interpolated_inverted_cdf' - * (H&F 5): 'hazen' - * (H&F 6): 'weibull' - * (H&F 7): 'linear' (default) - * (H&F 8): 'median_unbiased' - * (H&F 9): 'normal_unbiased' - - .. versionadded:: 1.22.0 + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontiuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + Returns ------- quantile : scalar or ndarray @@ -4210,20 +4236,20 @@ def quantile(a, Given a vector ``V`` of length ``N``, the q-th quantile of ``V`` is the value ``q`` of the way from the minimum to the maximum in a sorted copy of ``V``. The values and distances of the two nearest neighbors as well as the - `interpolation` parameter will determine the quantile if the normalized + `method` parameter will determine the quantile if the normalized ranking does not match the location of ``q`` exactly. This function is the same as the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the same as the maximum if ``q=1.0``. - This optional `interpolation` parameter specifies the interpolation method - to use when the desired quantile lies between two data points ``i < j``. If - ``g`` is the fractional part of the index surrounded by ``i`` and alpha - and beta are correction constants modifying i and j. + This optional `method` parameter specifies the method to use when the + desired quantile lies between two data points ``i < j``. + If ``g`` is the fractional part of the index surrounded by ``i`` and + alpha and beta are correction constants modifying i and j. .. math:: i + g = (q - alpha) / ( n - alpha - beta + 1 ) - The different interpolation methods then work as follows + The different methods then work as follows inverted_cdf: method 1 of H&F [1]_. @@ -4326,6 +4352,8 @@ def quantile(a, array([7., 2.]) >>> assert not np.all(a == b) + See also `numpy.percentile` for a visualization of most methods. + References ---------- .. [1] R. J. Hyndman and Y. Fan, @@ -4333,11 +4361,15 @@ def quantile(a, The American Statistician, 50(4), pp. 361-365, 1996 """ + if interpolation is not None: + method = _check_interpolation_as_method( + method, interpolation, "quantile") + q = np.asanyarray(q) if not _quantile_is_valid(q): raise ValueError("Quantiles must be in the range [0, 1]") return _quantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) + a, q, axis, out, overwrite_input, method, keepdims) def _quantile_unchecked(a, @@ -4345,7 +4377,7 @@ def _quantile_unchecked(a, axis=None, out=None, overwrite_input=False, - interpolation="linear", + method="linear", keepdims=False): """Assumes that q is in [0, 1], and is an ndarray""" r, k = _ureduce(a, @@ -4354,7 +4386,7 @@ def _quantile_unchecked(a, axis=axis, out=out, overwrite_input=overwrite_input, - interpolation=interpolation) + method=method) if keepdims: return r.reshape(q.shape + k) else: @@ -4373,6 +4405,23 @@ def _quantile_is_valid(q): return True +def _check_interpolation_as_method(method, interpolation, fname): + # Deprecated NumPy 1.22, 2021-11-08 + warnings.warn( + f"the `interpolation=` argument to {fname} was renamed to " + "`method=`, which has additional options.\n" + "Users of the modes 'nearest', 'lower', 'higher', or " + "'midpoint' are encouraged to review the method they. " + "(Deprecated NumPy 1.22)", + DeprecationWarning, stacklevel=4) + if method != "linear": + # sanity check, we assume this basically never happens + raise TypeError( + "You shall not pass both `method` and `interpolation`!\n" + "(`interpolation` is Deprecated in favor of `method`)") + return interpolation + + def _compute_virtual_index(n, quantiles, alpha: float, beta: float): """ Compute the floating point indexes of an array for the linear @@ -4398,9 +4447,7 @@ def _compute_virtual_index(n, quantiles, alpha: float, beta: float): ) - 1 -def _get_gamma(virtual_indexes, - previous_indexes, - interpolation: _QuantileInterpolation): +def _get_gamma(virtual_indexes, previous_indexes, method): """ Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation of quantiles. @@ -4410,7 +4457,7 @@ def _get_gamma(virtual_indexes, sample. previous_indexes : array_like The floor values of virtual_indexes. - interpolation : _QuantileInterpolation + interpolation : dict The interpolation method chosen, which may have a specific rule modifying gamma. @@ -4418,7 +4465,7 @@ def _get_gamma(virtual_indexes, by the interpolation method. """ gamma = np.asanyarray(virtual_indexes - previous_indexes) - gamma = interpolation["fix_gamma"](gamma, virtual_indexes) + gamma = method["fix_gamma"](gamma, virtual_indexes) return np.asanyarray(gamma) @@ -4447,7 +4494,7 @@ def _lerp(a, b, t, out=None): def _get_gamma_mask(shape, default_value, conditioned_value, where): out = np.full(shape, default_value) - out[where] = conditioned_value + np.copyto(out, conditioned_value, where=where, casting="unsafe") return out @@ -4455,11 +4502,14 @@ def _discret_interpolation_to_boundaries(index, gamma_condition_fun): previous = np.floor(index) next = previous + 1 gamma = index - previous - return _get_gamma_mask(shape=index.shape, - default_value=next, - conditioned_value=previous, - where=gamma_condition_fun(gamma, index) - ).astype(np.intp) + res = _get_gamma_mask(shape=index.shape, + default_value=next, + conditioned_value=previous, + where=gamma_condition_fun(gamma, index) + ).astype(np.intp) + # Some methods can lead to out-of-bound integers, clip them: + res[res < 0] = 0 + return res def _closest_observation(n, quantiles): @@ -4480,7 +4530,7 @@ def _quantile_ureduce_func( axis: int = None, out=None, overwrite_input: bool = False, - interpolation="linear", + method="linear", ) -> np.array: if q.ndim > 2: # The code below works fine for nd, but it might not have useful @@ -4502,7 +4552,7 @@ def _quantile_ureduce_func( result = _quantile(arr, quantiles=q, axis=axis, - interpolation=interpolation, + method=method, out=out) return result @@ -4546,7 +4596,7 @@ def _quantile( arr: np.array, quantiles: np.array, axis: int = -1, - interpolation="linear", + method="linear", out=None, ): """ @@ -4556,8 +4606,8 @@ def _quantile( It computes the quantiles of the array for the given axis. A linear interpolation is performed based on the `interpolation`. - By default, the interpolation is "linear" where - alpha == beta == 1 which performs the 7th method of Hyndman&Fan. + By default, the method is "linear" where alpha == beta == 1 which + performs the 7th method of Hyndman&Fan. With "median_unbiased" we get alpha == beta == 1/3 thus the 8th method of Hyndman&Fan. """ @@ -4574,13 +4624,12 @@ def _quantile( # Virtual because it is a floating point value, not an valid index. # The nearest neighbours are used for interpolation try: - interpolation = _QuantileInterpolation[interpolation] + method = _QuantileMethods[method] except KeyError: raise ValueError( - f"{interpolation!r} is not a valid interpolation. Use one of: " - f"{_QuantileInterpolation.keys()}") from None - virtual_indexes = interpolation["get_virtual_index"](values_count, - quantiles) + f"{method!r} is not a valid method. Use one of: " + f"{_QuantileMethods.keys()}") from None + virtual_indexes = method["get_virtual_index"](values_count, quantiles) virtual_indexes = np.asanyarray(virtual_indexes) if np.issubdtype(virtual_indexes.dtype, np.integer): # No interpolation needed, take the points along axis @@ -4614,9 +4663,7 @@ def _quantile( previous = np.take(arr, previous_indexes, axis=DATA_AXIS) next = np.take(arr, next_indexes, axis=DATA_AXIS) # --- Linear interpolation - gamma = _get_gamma(virtual_indexes, - previous_indexes, - interpolation) + gamma = _get_gamma(virtual_indexes, previous_indexes, method) result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) gamma = gamma.reshape(result_shape) result = _lerp(previous, diff --git a/numpy/lib/function_base.pyi b/numpy/lib/function_base.pyi index 82c625fed..7e227f9da 100644 --- a/numpy/lib/function_base.pyi +++ b/numpy/lib/function_base.pyi @@ -500,7 +500,7 @@ def median( keepdims: bool = ..., ) -> _ArrayType: ... -_InterpolationKind = L[ +_MethodKind = L[ "inverted_cdf", "averaged_inverted_cdf", "closest_observation", @@ -523,7 +523,7 @@ def percentile( axis: None = ..., out: None = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: L[False] = ..., ) -> floating[Any]: ... @overload @@ -533,7 +533,7 @@ def percentile( axis: None = ..., out: None = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: L[False] = ..., ) -> complexfloating[Any, Any]: ... @overload @@ -543,7 +543,7 @@ def percentile( axis: None = ..., out: None = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: L[False] = ..., ) -> timedelta64: ... @overload @@ -553,7 +553,7 @@ def percentile( axis: None = ..., out: None = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: L[False] = ..., ) -> datetime64: ... @overload @@ -563,7 +563,7 @@ def percentile( axis: None = ..., out: None = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: L[False] = ..., ) -> Any: ... @overload @@ -573,7 +573,7 @@ def percentile( axis: None = ..., out: None = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: L[False] = ..., ) -> NDArray[floating[Any]]: ... @overload @@ -583,7 +583,7 @@ def percentile( axis: None = ..., out: None = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: L[False] = ..., ) -> NDArray[complexfloating[Any, Any]]: ... @overload @@ -593,7 +593,7 @@ def percentile( axis: None = ..., out: None = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: L[False] = ..., ) -> NDArray[timedelta64]: ... @overload @@ -603,7 +603,7 @@ def percentile( axis: None = ..., out: None = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: L[False] = ..., ) -> NDArray[datetime64]: ... @overload @@ -613,7 +613,7 @@ def percentile( axis: None = ..., out: None = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: L[False] = ..., ) -> NDArray[object_]: ... @overload @@ -623,7 +623,7 @@ def percentile( axis: None | _ShapeLike = ..., out: None = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: bool = ..., ) -> Any: ... @overload @@ -633,7 +633,7 @@ def percentile( axis: None | _ShapeLike = ..., out: _ArrayType = ..., overwrite_input: bool = ..., - interpolation: _InterpolationKind = ..., + method: _MethodKind = ..., keepdims: bool = ..., ) -> _ArrayType: ... diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 7e953be03..d7ea1ca65 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -1223,8 +1223,9 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu return r -def _nanpercentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): +def _nanpercentile_dispatcher( + a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, interpolation=None): return (a, q, out) @@ -1235,8 +1236,10 @@ def nanpercentile( axis=None, out=None, overwrite_input=False, - interpolation="linear", + method="linear", keepdims=np._NoValue, + *, + interpolation=None, ): """ Compute the qth percentile of the data along the specified axis, @@ -1267,31 +1270,33 @@ def nanpercentile( intermediate calculations, to save memory. In this case, the contents of the input `a` after this function completes is undefined. - interpolation : str, optional - This parameter specifies the interpolation method to use when the - desired percentile lies between two data points There are many - different methods, some unique to NumPy. See the notes for - explanation. Options: - - * (NPY 1): 'lower' - * (NPY 2): 'higher', - * (NPY 3): 'midpoint' - * (NPY 4): 'nearest' - * (NPY 5): 'linear' (default) - - New options: - - * (H&F 1): 'inverted_cdf' - * (H&F 2): 'averaged_inverted_cdf' - * (H&F 3): 'closest_observation' - * (H&F 4): 'interpolated_inverted_cdf' - * (H&F 5): 'hazen' - * (H&F 6): 'weibull' - * (H&F 7): 'linear' (default) - * (H&F 8): 'median_unbiased' - * (H&F 9): 'normal_unbiased' + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontiuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' - .. versionadded:: 1.22.0 + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. keepdims : bool, optional If this is set to True, the axes which are reduced are left in @@ -1304,6 +1309,11 @@ def nanpercentile( a sub-class and `mean` does not have the kwarg `keepdims` this will raise a RuntimeError. + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + Returns ------- percentile : scalar or ndarray @@ -1355,7 +1365,17 @@ def nanpercentile( array([7., 2.]) >>> assert not np.all(a==b) + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + """ + if interpolation is not None: + method = function_base._check_interpolation_as_method( + method, interpolation, "nanpercentile") + a = np.asanyarray(a) q = np.true_divide(q, 100.0) # undo any decay that the ufunc performed (see gh-13105) @@ -1363,11 +1383,11 @@ def nanpercentile( if not function_base._quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) + a, q, axis, out, overwrite_input, method, keepdims) def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): + method=None, keepdims=None, *, interpolation=None): return (a, q, out) @@ -1378,8 +1398,10 @@ def nanquantile( axis=None, out=None, overwrite_input=False, - interpolation="linear", + method="linear", keepdims=np._NoValue, + *, + interpolation=None, ): """ Compute the qth quantile of the data along the specified axis, @@ -1408,31 +1430,33 @@ def nanquantile( If True, then allow the input array `a` to be modified by intermediate calculations, to save memory. In this case, the contents of the input `a` after this function completes is undefined. - interpolation : str, optional - This parameter specifies the interpolation method to - use when the desired quantile lies between two data points - There are many different methods, some unique to NumPy. See the - notes for explanation. Options: - - * (NPY 1): 'lower' - * (NPY 2): 'higher', - * (NPY 3): 'midpoint' - * (NPY 4): 'nearest' - * (NPY 5): 'linear' (default) - - New options: - - * (H&F 1): 'inverted_cdf' - * (H&F 2): 'averaged_inverted_cdf' - * (H&F 3): 'closest_observation' - * (H&F 4): 'interpolated_inverted_cdf' - * (H&F 5): 'hazen' - * (H&F 6): 'weibull' - * (H&F 7): 'linear' (default) - * (H&F 8): 'median_unbiased' - * (H&F 9): 'normal_unbiased' + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontiuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. keepdims : bool, optional If this is set to True, the axes which are reduced are left in @@ -1445,6 +1469,11 @@ def nanquantile( a sub-class and `mean` does not have the kwarg `keepdims` this will raise a RuntimeError. + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + Returns ------- quantile : scalar or ndarray @@ -1495,13 +1524,23 @@ def nanquantile( array([7., 2.]) >>> assert not np.all(a==b) + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + """ + if interpolation is not None: + method = function_base._check_interpolation_as_method( + method, interpolation, "nanquantile") + a = np.asanyarray(a) q = np.asanyarray(q) if not function_base._quantile_is_valid(q): raise ValueError("Quantiles must be in the range [0, 1]") return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) + a, q, axis, out, overwrite_input, method, keepdims) def _nanquantile_unchecked( @@ -1510,7 +1549,7 @@ def _nanquantile_unchecked( axis=None, out=None, overwrite_input=False, - interpolation="linear", + method="linear", keepdims=np._NoValue, ): """Assumes that q is in [0, 1], and is an ndarray""" @@ -1524,7 +1563,7 @@ def _nanquantile_unchecked( axis=axis, out=out, overwrite_input=overwrite_input, - interpolation=interpolation) + method=method) if keepdims and keepdims is not np._NoValue: return r.reshape(q.shape + k) else: @@ -1532,7 +1571,7 @@ def _nanquantile_unchecked( def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, - interpolation="linear"): + method="linear"): """ Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce @@ -1540,10 +1579,10 @@ def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, """ if axis is None or a.ndim == 1: part = a.ravel() - result = _nanquantile_1d(part, q, overwrite_input, interpolation) + result = _nanquantile_1d(part, q, overwrite_input, method) else: result = np.apply_along_axis(_nanquantile_1d, axis, a, q, - overwrite_input, interpolation) + overwrite_input, method) # apply_along_axis fills in collapsed axis with results. # Move that axis to the beginning to match percentile's # convention. @@ -1555,7 +1594,7 @@ def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, return result -def _nanquantile_1d(arr1d, q, overwrite_input=False, interpolation="linear"): +def _nanquantile_1d(arr1d, q, overwrite_input=False, method="linear"): """ Private function for rank 1 arrays. Compute quantile ignoring NaNs. See nanpercentile for parameter usage @@ -1567,7 +1606,7 @@ def _nanquantile_1d(arr1d, q, overwrite_input=False, interpolation="linear"): return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()] return function_base._quantile_unchecked( - arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation) + arr1d, q, overwrite_input=overwrite_input, method=method) def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 1c274afae..b67a31b18 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2867,7 +2867,7 @@ class TestPercentile: assert_equal(np.percentile(x, 50), 1.75) x[1] = np.nan assert_equal(np.percentile(x, 0), np.nan) - assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan) + assert_equal(np.percentile(x, 0, method='nearest'), np.nan) def test_fraction(self): x = [Fraction(i, 2) for i in range(8)] @@ -2910,7 +2910,7 @@ class TestPercentile: res = np.percentile( arr, 40.0, - interpolation="linear") + method="linear") np.testing.assert_equal(res, np.NAN) np.testing.assert_equal(res.dtype, arr.dtype) @@ -2926,7 +2926,7 @@ class TestPercentile: (np.dtype("O"), np.float64)] @pytest.mark.parametrize(["input_dtype", "expected_dtype"], H_F_TYPE_CODES) - @pytest.mark.parametrize(["interpolation", "expected"], + @pytest.mark.parametrize(["method", "expected"], [("inverted_cdf", 20), ("averaged_inverted_cdf", 27.5), ("closest_observation", 20), @@ -2938,16 +2938,16 @@ class TestPercentile: ("normal_unbiased", 27.125), ]) def test_linear_interpolation(self, - interpolation, + method, expected, input_dtype, expected_dtype): arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype) - actual = np.percentile(arr, 40.0, interpolation=interpolation) + actual = np.percentile(arr, 40.0, method=method) np.testing.assert_almost_equal(actual, expected, 14) - if interpolation in ["inverted_cdf", "closest_observation"]: + if method in ["inverted_cdf", "closest_observation"]: if input_dtype == "O": np.testing.assert_equal(np.asarray(actual).dtype, np.float64) else: @@ -2962,27 +2962,27 @@ class TestPercentile: @pytest.mark.parametrize("dtype", TYPE_CODES) def test_lower_higher(self, dtype): assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, - interpolation='lower'), 4) + method='lower'), 4) assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, - interpolation='higher'), 5) + method='higher'), 5) @pytest.mark.parametrize("dtype", TYPE_CODES) def test_midpoint(self, dtype): assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, - interpolation='midpoint'), 4.5) + method='midpoint'), 4.5) assert_equal(np.percentile(np.arange(9, dtype=dtype) + 1, 50, - interpolation='midpoint'), 5) + method='midpoint'), 5) assert_equal(np.percentile(np.arange(11, dtype=dtype), 51, - interpolation='midpoint'), 5.5) + method='midpoint'), 5.5) assert_equal(np.percentile(np.arange(11, dtype=dtype), 50, - interpolation='midpoint'), 5) + method='midpoint'), 5) @pytest.mark.parametrize("dtype", TYPE_CODES) def test_nearest(self, dtype): assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, - interpolation='nearest'), 5) + method='nearest'), 5) assert_equal(np.percentile(np.arange(10, dtype=dtype), 49, - interpolation='nearest'), 4) + method='nearest'), 4) def test_linear_interpolation_extrapolation(self): arr = np.random.rand(5) @@ -3019,19 +3019,19 @@ class TestPercentile: assert_equal( np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) assert_equal(np.percentile(x, (25, 50), - interpolation="higher").shape, (2,)) + method="higher").shape, (2,)) assert_equal(np.percentile(x, (25, 50, 75), - interpolation="higher").shape, (3,)) + method="higher").shape, (3,)) assert_equal(np.percentile(x, (25, 50), axis=0, - interpolation="higher").shape, (2, 4, 5, 6)) + method="higher").shape, (2, 4, 5, 6)) assert_equal(np.percentile(x, (25, 50), axis=1, - interpolation="higher").shape, (2, 3, 5, 6)) + method="higher").shape, (2, 3, 5, 6)) assert_equal(np.percentile(x, (25, 50), axis=2, - interpolation="higher").shape, (2, 3, 4, 6)) + method="higher").shape, (2, 3, 4, 6)) assert_equal(np.percentile(x, (25, 50), axis=3, - interpolation="higher").shape, (2, 3, 4, 5)) + method="higher").shape, (2, 3, 4, 5)) assert_equal(np.percentile(x, (25, 50, 75), axis=1, - interpolation="higher").shape, (3, 3, 5, 6)) + method="higher").shape, (3, 3, 5, 6)) def test_scalar_q(self): # test for no empty dimensions for compatibility with old percentile @@ -3057,33 +3057,33 @@ class TestPercentile: # test for no empty dimensions for compatibility with old percentile x = np.arange(12).reshape(3, 4) - assert_equal(np.percentile(x, 50, interpolation='lower'), 5.) + assert_equal(np.percentile(x, 50, method='lower'), 5.) assert_(np.isscalar(np.percentile(x, 50))) r0 = np.array([4., 5., 6., 7.]) - c0 = np.percentile(x, 50, interpolation='lower', axis=0) + c0 = np.percentile(x, 50, method='lower', axis=0) assert_equal(c0, r0) assert_equal(c0.shape, r0.shape) r1 = np.array([1., 5., 9.]) - c1 = np.percentile(x, 50, interpolation='lower', axis=1) + c1 = np.percentile(x, 50, method='lower', axis=1) assert_almost_equal(c1, r1) assert_equal(c1.shape, r1.shape) out = np.empty((), dtype=x.dtype) - c = np.percentile(x, 50, interpolation='lower', out=out) + c = np.percentile(x, 50, method='lower', out=out) assert_equal(c, 5) assert_equal(out, 5) out = np.empty(4, dtype=x.dtype) - c = np.percentile(x, 50, interpolation='lower', axis=0, out=out) + c = np.percentile(x, 50, method='lower', axis=0, out=out) assert_equal(c, r0) assert_equal(out, r0) out = np.empty(3, dtype=x.dtype) - c = np.percentile(x, 50, interpolation='lower', axis=1, out=out) + c = np.percentile(x, 50, method='lower', axis=1, out=out) assert_equal(c, r1) assert_equal(out, r1) def test_exception(self): assert_raises(ValueError, np.percentile, [1, 2], 56, - interpolation='foobar') + method='foobar') assert_raises(ValueError, np.percentile, [1], 101) assert_raises(ValueError, np.percentile, [1], -1) assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) @@ -3124,12 +3124,12 @@ class TestPercentile: # q.dim > 1, int r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) out = np.empty((2, 4), dtype=x.dtype) - c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out) + c = np.percentile(x, (25, 50), method='lower', axis=0, out=out) assert_equal(c, r0) assert_equal(out, r0) r1 = np.array([[0, 4, 8], [1, 5, 9]]) out = np.empty((2, 3), dtype=x.dtype) - c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out) + c = np.percentile(x, (25, 50), method='lower', axis=1, out=out) assert_equal(c, r1) assert_equal(out, r1) @@ -3146,10 +3146,10 @@ class TestPercentile: assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) assert_array_equal(np.percentile(d, 50, axis=2, - interpolation='midpoint').shape, + method='midpoint').shape, (11, 1, 1)) assert_array_equal(np.percentile(d, 50, axis=-2, - interpolation='midpoint').shape, + method='midpoint').shape, (11, 1, 1)) assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, @@ -3172,10 +3172,10 @@ class TestPercentile: def test_no_p_overwrite(self): p = np.linspace(0., 100., num=5) - np.percentile(np.arange(100.), p, interpolation="midpoint") + np.percentile(np.arange(100.), p, method="midpoint") assert_array_equal(p, np.linspace(0., 100., num=5)) p = np.linspace(0., 100., num=5).tolist() - np.percentile(np.arange(100.), p, interpolation="midpoint") + np.percentile(np.arange(100.), p, method="midpoint") assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) def test_percentile_overwrite(self): @@ -3253,14 +3253,14 @@ class TestPercentile: o = np.zeros((4,)) d = np.ones((3, 4)) assert_equal(np.percentile(d, 0, 0, out=o), o) - assert_equal(np.percentile(d, 0, 0, interpolation='nearest', out=o), o) + assert_equal(np.percentile(d, 0, 0, method='nearest', out=o), o) o = np.zeros((3,)) assert_equal(np.percentile(d, 1, 1, out=o), o) - assert_equal(np.percentile(d, 1, 1, interpolation='nearest', out=o), o) + assert_equal(np.percentile(d, 1, 1, method='nearest', out=o), o) o = np.zeros(()) assert_equal(np.percentile(d, 2, out=o), o) - assert_equal(np.percentile(d, 2, interpolation='nearest', out=o), o) + assert_equal(np.percentile(d, 2, method='nearest', out=o), o) def test_out_nan(self): with warnings.catch_warnings(record=True): @@ -3270,15 +3270,15 @@ class TestPercentile: d[2, 1] = np.nan assert_equal(np.percentile(d, 0, 0, out=o), o) assert_equal( - np.percentile(d, 0, 0, interpolation='nearest', out=o), o) + np.percentile(d, 0, 0, method='nearest', out=o), o) o = np.zeros((3,)) assert_equal(np.percentile(d, 1, 1, out=o), o) assert_equal( - np.percentile(d, 1, 1, interpolation='nearest', out=o), o) + np.percentile(d, 1, 1, method='nearest', out=o), o) o = np.zeros(()) assert_equal(np.percentile(d, 1, out=o), o) assert_equal( - np.percentile(d, 1, interpolation='nearest', out=o), o) + np.percentile(d, 1, method='nearest', out=o), o) def test_nan_behavior(self): a = np.arange(24, dtype=float) @@ -3333,13 +3333,13 @@ class TestPercentile: b[:, 1] = np.nan b[:, 2] = np.nan assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) - # axis02 not zerod with nearest interpolation + # axis02 not zerod with method='nearest' b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), - [0.3, 0.6], (0, 2), interpolation='nearest') + [0.3, 0.6], (0, 2), method='nearest') b[:, 1] = np.nan b[:, 2] = np.nan assert_equal(np.percentile( - a, [0.3, 0.6], (0, 2), interpolation='nearest'), b) + a, [0.3, 0.6], (0, 2), method='nearest'), b) def test_nan_q(self): # GH18830 @@ -3412,26 +3412,36 @@ class TestQuantile: # this is worth retesting, because quantile does not make a copy p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) p = p0.copy() - np.quantile(np.arange(100.), p, interpolation="midpoint") + np.quantile(np.arange(100.), p, method="midpoint") assert_array_equal(p, p0) p0 = p0.tolist() p = p.tolist() - np.quantile(np.arange(100.), p, interpolation="midpoint") + np.quantile(np.arange(100.), p, method="midpoint") assert_array_equal(p, p0) @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) def test_quantile_preserve_int_type(self, dtype): res = np.quantile(np.array([1, 2], dtype=dtype), [0.5], - interpolation="nearest") + method="nearest") assert res.dtype == dtype - def test_quantile_monotonic(self): + @pytest.mark.parametrize("method", + ['inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', + 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', + 'median_unbiased', 'normal_unbiased', + 'nearest', 'lower', 'higher', 'midpoint']) + def test_quantile_monotonic(self, method): # GH 14685 # test that the return value of quantile is monotonic if p0 is ordered - p0 = np.arange(0, 1, 0.01) + # Also tests that the boundary values are not mishandled. + p0 = np.linspace(0, 1, 101) quantile = np.quantile(np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, - 8, 8, 7]) * 0.1, p0) + 8, 8, 7]) * 0.1, p0, method=method) + assert_equal(np.sort(quantile), quantile) + + # Also test one where the number of data points is clearly divisible: + quantile = np.quantile([0., 1., 2., 3.], p0, method=method) assert_equal(np.sort(quantile), quantile) @hypothesis.given( diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 126dba495..733a077ea 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -1108,12 +1108,12 @@ class TestNanFunctions_Quantile: # this is worth retesting, because quantile does not make a copy p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) p = p0.copy() - np.nanquantile(np.arange(100.), p, interpolation="midpoint") + np.nanquantile(np.arange(100.), p, method="midpoint") assert_array_equal(p, p0) p0 = p0.tolist() p = p.tolist() - np.nanquantile(np.arange(100.), p, interpolation="midpoint") + np.nanquantile(np.arange(100.), p, method="midpoint") assert_array_equal(p, p0) @pytest.mark.parametrize("axis", [None, 0, 1]) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 854b955b4..c559eb295 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -144,7 +144,7 @@ reveal_type(np.percentile(AR_O, 50)) # E: Any reveal_type(np.percentile(AR_f8, [50])) # E: ndarray[Any, dtype[floating[Any]]] reveal_type(np.percentile(AR_c16, [50])) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] reveal_type(np.percentile(AR_m, [50])) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(np.percentile(AR_M, [50], interpolation="nearest")) # E: ndarray[Any, dtype[datetime64]] +reveal_type(np.percentile(AR_M, [50], method="nearest")) # E: ndarray[Any, dtype[datetime64]] reveal_type(np.percentile(AR_O, [50])) # E: ndarray[Any, dtype[object_]] reveal_type(np.percentile(AR_f8, [50], keepdims=True)) # E: Any reveal_type(np.percentile(AR_f8, [50], axis=[1])) # E: Any @@ -158,7 +158,7 @@ reveal_type(np.quantile(AR_O, 0.5)) # E: Any reveal_type(np.quantile(AR_f8, [0.5])) # E: ndarray[Any, dtype[floating[Any]]] reveal_type(np.quantile(AR_c16, [0.5])) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] reveal_type(np.quantile(AR_m, [0.5])) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(np.quantile(AR_M, [0.5], interpolation="nearest")) # E: ndarray[Any, dtype[datetime64]] +reveal_type(np.quantile(AR_M, [0.5], method="nearest")) # E: ndarray[Any, dtype[datetime64]] reveal_type(np.quantile(AR_O, [0.5])) # E: ndarray[Any, dtype[object_]] reveal_type(np.quantile(AR_f8, [0.5], keepdims=True)) # E: Any reveal_type(np.quantile(AR_f8, [0.5], axis=[1])) # E: Any |