diff options
author | Stephan Hoyer <shoyer@gmail.com> | 2019-05-20 09:37:18 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-05-20 09:37:18 -0700 |
commit | f9c1502e7ace9b48f0256a77c560aae43763a1f2 (patch) | |
tree | 1b04f2a79f979f6039a794173560da7153093ac9 | |
parent | bdd75dff0aa8b77be5784923befb6410fc2f4837 (diff) | |
download | numpy-f9c1502e7ace9b48f0256a77c560aae43763a1f2.tar.gz |
BUG: Increment stacklevel for warnings to account for NEP-18 overrides (#13589)
* Increment stacklevel for warnings to account for NEP-18 overrides
For NumPy functions that make use of `__array_function__`, the appropriate the
stack level for warnings should generally be increased by 1 to account for
the override function defined in numpy.core.overrides.
Fixes GH-13329
* Update numpy/lib/type_check.py
Co-Authored-By: Sebastian Berg <sebastian@sipsolutions.net>
-rw-r--r-- | numpy/core/fromnumeric.py | 4 | ||||
-rw-r--r-- | numpy/lib/function_base.py | 24 | ||||
-rw-r--r-- | numpy/lib/histograms.py | 11 | ||||
-rw-r--r-- | numpy/lib/nanfunctions.py | 25 | ||||
-rw-r--r-- | numpy/lib/polynomial.py | 2 | ||||
-rw-r--r-- | numpy/lib/shape_base.py | 2 | ||||
-rw-r--r-- | numpy/lib/type_check.py | 7 | ||||
-rw-r--r-- | numpy/linalg/linalg.py | 6 |
8 files changed, 44 insertions, 37 deletions
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 58da8a54b..cbbee463a 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -2145,7 +2145,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, warnings.warn( "Calling np.sum(generator) is deprecated, and in the future will give a different result. " "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.", - DeprecationWarning, stacklevel=2) + DeprecationWarning, stacklevel=3) res = _sum_(a) if out is not None: @@ -3569,5 +3569,5 @@ def rank(a): warnings.warn( "`rank` is deprecated; use the `ndim` attribute or function instead. " "To find the rank of a matrix see `numpy.linalg.matrix_rank`.", - VisibleDeprecationWarning, stacklevel=2) + VisibleDeprecationWarning, stacklevel=3) return ndim(a) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 7fa51d683..2a6d39abc 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -682,7 +682,7 @@ def select(condlist, choicelist, default=0): # 2014-02-24, 1.9 warnings.warn("select with an empty condition list is not possible" "and will be deprecated", - DeprecationWarning, stacklevel=2) + DeprecationWarning, stacklevel=3) return np.asarray(default)[()] choicelist = [np.asarray(choice) for choice in choicelist] @@ -717,7 +717,7 @@ def select(condlist, choicelist, default=0): msg = "select condlists containing integer ndarrays is deprecated " \ "and will be removed in the future. Use `.astype(bool)` to " \ "convert to bools." - warnings.warn(msg, DeprecationWarning, stacklevel=2) + warnings.warn(msg, DeprecationWarning, stacklevel=3) if choicelist[0].ndim == 0: # This may be common, so avoid the call. @@ -2443,7 +2443,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, if fact <= 0: warnings.warn("Degrees of freedom <= 0 for slice", - RuntimeWarning, stacklevel=2) + RuntimeWarning, stacklevel=3) fact = 0.0 X -= avg[:, None] @@ -2522,7 +2522,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue): if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 warnings.warn('bias and ddof have no effect and are deprecated', - DeprecationWarning, stacklevel=2) + DeprecationWarning, stacklevel=3) c = cov(x, y, rowvar) try: d = diag(c) @@ -4304,7 +4304,7 @@ def delete(arr, obj, axis=None): # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " - "from delete and raise an error", DeprecationWarning, stacklevel=2) + "from delete and raise an error", DeprecationWarning, stacklevel=3) if wrap: return wrap(arr) else: @@ -4373,7 +4373,7 @@ def delete(arr, obj, axis=None): if obj.dtype == bool: warnings.warn("in the future insert will treat boolean arrays and " "array-likes as boolean index instead of casting it " - "to integer", FutureWarning, stacklevel=2) + "to integer", FutureWarning, stacklevel=3) obj = obj.astype(intp) if isinstance(_obj, (int, long, integer)): # optimization for a single value @@ -4401,7 +4401,7 @@ def delete(arr, obj, axis=None): # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in delete will result in an " - "error in the future", DeprecationWarning, stacklevel=2) + "error in the future", DeprecationWarning, stacklevel=3) obj = obj.astype(intp) keep = ones(N, dtype=bool) @@ -4412,13 +4412,13 @@ def delete(arr, obj, axis=None): warnings.warn( "in the future out of bounds indices will raise an error " "instead of being ignored by `numpy.delete`.", - DeprecationWarning, stacklevel=2) + DeprecationWarning, stacklevel=3) obj = obj[inside_bounds] positive_indices = obj >= 0 if not positive_indices.all(): warnings.warn( "in the future negative indices will not be ignored by " - "`numpy.delete`.", FutureWarning, stacklevel=2) + "`numpy.delete`.", FutureWarning, stacklevel=3) obj = obj[positive_indices] keep[obj, ] = False @@ -4543,7 +4543,7 @@ def insert(arr, obj, values, axis=None): # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " - "from insert and raise an error", DeprecationWarning, stacklevel=2) + "from insert and raise an error", DeprecationWarning, stacklevel=3) arr = arr.copy(order=arrorder) arr[...] = values if wrap: @@ -4567,7 +4567,7 @@ def insert(arr, obj, values, axis=None): warnings.warn( "in the future insert will treat boolean arrays and " "array-likes as a boolean index instead of casting it to " - "integer", FutureWarning, stacklevel=2) + "integer", FutureWarning, stacklevel=3) indices = indices.astype(intp) # Code after warning period: #if obj.ndim != 1: @@ -4617,7 +4617,7 @@ def insert(arr, obj, values, axis=None): # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in insert will result in an " - "error in the future", DeprecationWarning, stacklevel=2) + "error in the future", DeprecationWarning, stacklevel=3) indices = indices.astype(intp) indices[indices < 0] += N diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py index ee9a3053c..5bcedc7f4 100644 --- a/numpy/lib/histograms.py +++ b/numpy/lib/histograms.py @@ -148,7 +148,8 @@ def _hist_bin_stone(x, range): nbins_upper_bound = max(100, int(np.sqrt(n))) nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) if nbins == nbins_upper_bound: - warnings.warn("The number of bins estimated may be suboptimal.", RuntimeWarning, stacklevel=2) + warnings.warn("The number of bins estimated may be suboptimal.", + RuntimeWarning, stacklevel=3) return ptp_x / nbins @@ -279,7 +280,7 @@ def _ravel_and_check_weights(a, weights): if a.dtype == np.bool_: warnings.warn("Converting input from {} to {} for compatibility." .format(a.dtype, np.uint8), - RuntimeWarning, stacklevel=2) + RuntimeWarning, stacklevel=3) a = a.astype(np.uint8) if weights is not None: @@ -888,7 +889,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, warnings.warn( "The normed argument is ignored when density is provided. " "In future passing both will result in an error.", - DeprecationWarning, stacklevel=2) + DeprecationWarning, stacklevel=3) normed = None if density: @@ -904,7 +905,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, "density=True will produce the same result anyway. " "The argument will be removed in a future version of " "numpy.", - np.VisibleDeprecationWarning, stacklevel=2) + np.VisibleDeprecationWarning, stacklevel=3) # this normalization is incorrect, but db = np.array(np.diff(bin_edges), float) @@ -915,7 +916,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, warnings.warn( "Passing normed=False is deprecated, and has no effect. " "Consider passing the density argument instead.", - DeprecationWarning, stacklevel=2) + DeprecationWarning, stacklevel=3) return n, bin_edges diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 77c851fcf..f497aca63 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -165,7 +165,8 @@ def _remove_nan_1d(arr1d, overwrite_input=False): c = np.isnan(arr1d) s = np.nonzero(c)[0] if s.size == arr1d.size: - warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=4) + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=5) return arr1d[:0], True elif s.size == 0: return arr1d, overwrite_input @@ -318,7 +319,8 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue): # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) if np.isnan(res).any(): - warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2) + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=3) else: # Slow, but safe for subclasses of ndarray a, mask = _replace_nan(a, +np.inf) @@ -330,7 +332,8 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue): mask = np.all(mask, axis=axis, **kwargs) if np.any(mask): res = _copyto(res, np.nan, mask) - warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=3) return res @@ -431,7 +434,8 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue): # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) if np.isnan(res).any(): - warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2) + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=3) else: # Slow, but safe for subclasses of ndarray a, mask = _replace_nan(a, -np.inf) @@ -443,7 +447,8 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue): mask = np.all(mask, axis=axis, **kwargs) if np.any(mask): res = _copyto(res, np.nan, mask) - warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=3) return res @@ -947,7 +952,7 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): isbad = (cnt == 0) if isbad.any(): - warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=3) # NaN is the only possible bad value, so no further # action is needed to handle bad results. return avg @@ -959,7 +964,7 @@ def _nanmedian1d(arr1d, overwrite_input=False): See nanmedian for parameter usage """ arr1d, overwrite_input = _remove_nan_1d(arr1d, - overwrite_input=overwrite_input) + overwrite_input=overwrite_input) if arr1d.size == 0: return np.nan @@ -1002,7 +1007,8 @@ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): a = np.ma.masked_array(a, np.isnan(a)) m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) for i in range(np.count_nonzero(m.mask.ravel())): - warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3) + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=4) if out is not None: out[...] = m.filled(np.nan) return out @@ -1547,7 +1553,8 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): isbad = (dof <= 0) if np.any(isbad): - warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, stacklevel=2) + warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, + stacklevel=3) # NaN, inf, or negative numbers are all possible bad # values, so explicitly replace them with NaN. var = _copyto(var, np.nan, isbad) diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index 1f08abf36..2c72f623c 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -634,7 +634,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: msg = "Polyfit may be poorly conditioned" - warnings.warn(msg, RankWarning, stacklevel=3) + warnings.warn(msg, RankWarning, stacklevel=4) if full: return c, resids, rank, s, rcond diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index 8ebe7a695..1b66f3a3e 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -578,7 +578,7 @@ def expand_dims(a, axis): # 2017-05-17, 1.13.0 warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are " "deprecated and will raise an AxisError in the future.", - DeprecationWarning, stacklevel=2) + DeprecationWarning, stacklevel=3) # When the deprecation period expires, delete this if block, if axis < 0: axis = axis + a.ndim + 1 diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py index 2b254b6c0..ac4b03a6c 100644 --- a/numpy/lib/type_check.py +++ b/numpy/lib/type_check.py @@ -541,6 +541,9 @@ def real_if_close(a, tol=100): def _asscalar_dispatcher(a): + # 2018-10-10, 1.16 + warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use ' + 'a.item() instead', DeprecationWarning, stacklevel=3) return (a,) @@ -569,10 +572,6 @@ def asscalar(a): >>> np.asscalar(np.array([24])) 24 """ - - # 2018-10-10, 1.16 - warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use ' - 'a.item() instead', DeprecationWarning, stacklevel=1) return a.item() #----------------------------------------------------------------------------- diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 0aa8d5ca9..27ec62403 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -890,12 +890,12 @@ def qr(a, mode='reduced'): msg = "".join(( "The 'full' option is deprecated in favor of 'reduced'.\n", "For backward compatibility let mode default.")) - warnings.warn(msg, DeprecationWarning, stacklevel=2) + warnings.warn(msg, DeprecationWarning, stacklevel=3) mode = 'reduced' elif mode in ('e', 'economic'): # 2013-04-01, 1.8 msg = "The 'economic' option is deprecated." - warnings.warn(msg, DeprecationWarning, stacklevel=2) + warnings.warn(msg, DeprecationWarning, stacklevel=3) mode = 'economic' else: raise ValueError("Unrecognized mode '%s'" % mode) @@ -2245,7 +2245,7 @@ def lstsq(a, b, rcond="warn"): "To use the future default and silence this warning " "we advise to pass `rcond=None`, to keep using the old, " "explicitly pass `rcond=-1`.", - FutureWarning, stacklevel=2) + FutureWarning, stacklevel=3) rcond = -1 if rcond is None: rcond = finfo(t).eps * max(n, m) |