diff options
Diffstat (limited to 'numpy/lib')
-rw-r--r-- | numpy/lib/arrayterator.py | 4 | ||||
-rw-r--r-- | numpy/lib/format.py | 4 | ||||
-rw-r--r-- | numpy/lib/function_base.py | 63 | ||||
-rw-r--r-- | numpy/lib/histograms.py | 10 | ||||
-rw-r--r-- | numpy/lib/index_tricks.py | 4 | ||||
-rw-r--r-- | numpy/lib/tests/test__iotools.py | 7 | ||||
-rw-r--r-- | numpy/lib/tests/test_function_base.py | 41 | ||||
-rw-r--r-- | numpy/lib/tests/test_io.py | 19 | ||||
-rw-r--r-- | numpy/lib/tests/test_regression.py | 3 | ||||
-rw-r--r-- | numpy/lib/tests/test_type_check.py | 2 | ||||
-rw-r--r-- | numpy/lib/user_array.py | 8 |
11 files changed, 71 insertions, 94 deletions
diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py index 924092995..b9ea21f8e 100644 --- a/numpy/lib/arrayterator.py +++ b/numpy/lib/arrayterator.py @@ -10,8 +10,6 @@ a user-specified number of elements. from operator import mul from functools import reduce -from numpy.compat import long - __all__ = ['Arrayterator'] @@ -108,7 +106,7 @@ class Arrayterator: if slice_ is Ellipsis: fixed.extend([slice(None)] * (dims-length+1)) length = len(fixed) - elif isinstance(slice_, (int, long)): + elif isinstance(slice_, int): fixed.append(slice(slice_, slice_+1, 1)) else: fixed.append(slice_) diff --git a/numpy/lib/format.py b/numpy/lib/format.py index e2696c286..2afa4ac10 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -166,7 +166,7 @@ import io import warnings from numpy.lib.utils import safe_eval from numpy.compat import ( - isfileobj, long, os_fspath, pickle + isfileobj, os_fspath, pickle ) @@ -594,7 +594,7 @@ def _read_array_header(fp, version): # Sanity-check the values. if (not isinstance(d['shape'], tuple) or - not numpy.all([isinstance(x, (int, long)) for x in d['shape']])): + not numpy.all([isinstance(x, int) for x in d['shape']])): msg = "shape is not valid: {!r}" raise ValueError(msg.format(d['shape'])) if not isinstance(d['fortran_order'], bool): diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index a49c34741..bfcf0d316 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -29,7 +29,6 @@ from numpy.core.multiarray import ( interp as compiled_interp, interp_complex as compiled_interp_complex ) from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc -from numpy.compat import long import builtins @@ -4207,12 +4206,17 @@ def delete(arr, obj, axis=None): Parameters ---------- arr : array_like - Input array. + Input array. obj : slice, int or array of ints - Indicate indices of sub-arrays to remove along the specified axis. + Indicate indices of sub-arrays to remove along the specified axis. + + .. versionchanged:: 1.19.0 + Boolean indices are now treated as a mask of elements to remove, + rather than being cast to the integers 0 and 1. + axis : int, optional - The axis along which to delete the subarray defined by `obj`. - If `axis` is None, `obj` is applied to the flattened array. + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. Returns ------- @@ -4273,15 +4277,6 @@ def delete(arr, obj, axis=None): # needed for np.matrix, which is still not 1d after being ravelled ndim = arr.ndim axis = ndim - 1 - elif ndim == 0: - # 2013-09-24, 1.9 - warnings.warn( - "in the future the special handling of scalars will be removed " - "from delete and raise an error", DeprecationWarning, stacklevel=3) - if wrap: - return wrap(arr) - else: - return arr.copy(order=arrorder) else: axis = normalize_axis_index(axis, ndim) @@ -4339,19 +4334,8 @@ def delete(arr, obj, axis=None): else: return new - _obj = obj - obj = np.asarray(obj) - # After removing the special handling of booleans and out of - # bounds values, the conversion to the array can be removed. - if obj.dtype == bool: - # 2012-10-11, NumPy 1.8 - warnings.warn("in the future insert will treat boolean arrays and " - "array-likes as boolean index instead of casting it " - "to integer", FutureWarning, stacklevel=3) - obj = obj.astype(intp) - if isinstance(_obj, (int, long, integer)): + if isinstance(obj, (int, integer)) and not isinstance(obj, bool): # optimization for a single value - obj = obj.item() if (obj < -N or obj >= N): raise IndexError( "index %i is out of bounds for axis %i with " @@ -4367,11 +4351,23 @@ def delete(arr, obj, axis=None): slobj2[axis] = slice(obj+1, None) new[tuple(slobj)] = arr[tuple(slobj2)] else: + _obj = obj + obj = np.asarray(obj) if obj.size == 0 and not isinstance(_obj, np.ndarray): obj = obj.astype(intp) - keep = ones(N, dtype=bool) - keep[obj, ] = False + if obj.dtype == bool: + if obj.shape != (N,): + raise ValueError('boolean array argument obj to delete ' + 'must be one dimensional and match the axis ' + 'length of {}'.format(N)) + + # optimization, the other branch is slower + keep = ~obj + else: + keep = ones(N, dtype=bool) + keep[obj,] = False + slobj[axis] = keep new = arr[tuple(slobj)] @@ -4490,17 +4486,6 @@ def insert(arr, obj, values, axis=None): # needed for np.matrix, which is still not 1d after being ravelled ndim = arr.ndim axis = ndim - 1 - elif ndim == 0: - # 2013-09-24, 1.9 - warnings.warn( - "in the future the special handling of scalars will be removed " - "from insert and raise an error", DeprecationWarning, stacklevel=3) - arr = arr.copy(order=arrorder) - arr[...] = values - if wrap: - return wrap(arr) - else: - return arr else: axis = normalize_axis_index(axis, ndim) slobj = [slice(None)]*ndim diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py index 5358c6846..32d7df117 100644 --- a/numpy/lib/histograms.py +++ b/numpy/lib/histograms.py @@ -229,21 +229,21 @@ def _hist_bin_fd(x, range): def _hist_bin_auto(x, range): """ Histogram bin estimator that uses the minimum width of the - Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero - and the Sturges estimator if the FD bandwidth is 0. + Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero. + If the bin width from the FD estimator is 0, the Sturges estimator is used. The FD estimator is usually the most robust method, but its width estimate tends to be too large for small `x` and bad for data with limited variance. The Sturges estimator is quite good for small (<1000) datasets - and is the default in the R language. This method gives good off the shelf + and is the default in the R language. This method gives good off-the-shelf behaviour. .. versionchanged:: 1.15.0 If there is limited variance the IQR can be 0, which results in the FD bin width being 0 too. This is not a valid bin width, so ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal. - If the IQR is 0, it's unlikely any variance based estimators will be of - use, so we revert to the sturges estimator, which only uses the size of the + If the IQR is 0, it's unlikely any variance-based estimators will be of + use, so we revert to the Sturges estimator, which only uses the size of the dataset in its calculation. Parameters diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index 0560bd36d..b4118814d 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -597,8 +597,6 @@ class ndenumerate: def __iter__(self): return self - next = __next__ - @set_module('numpy') class ndindex: @@ -665,8 +663,6 @@ class ndindex: next(self._it) return self._it.multi_index - next = __next__ - # You can do all this with slice() plus a few special objects, # but there's a lot to remember. This version is simpler because diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py index 1d69d869e..6964c1128 100644 --- a/numpy/lib/tests/test__iotools.py +++ b/numpy/lib/tests/test__iotools.py @@ -9,7 +9,6 @@ from numpy.lib._iotools import ( LineSplitter, NameValidator, StringConverter, has_nested_fields, easy_dtype, flatten_dtype ) -from numpy.compat import unicode class TestLineSplitter: @@ -179,10 +178,10 @@ class TestStringConverter: # note that the longdouble type has been skipped, so the # _status increases by 2. Everything should succeed with # unicode conversion (5). - for s in ['a', u'a', b'a']: + for s in ['a', b'a']: res = converter.upgrade(s) - assert_(type(res) is unicode) - assert_equal(res, u'a') + assert_(type(res) is str) + assert_equal(res, 'a') assert_equal(converter._status, 5 + status_offset) def test_missing(self): diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 04b280038..23bf3296d 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -21,7 +21,6 @@ from numpy.lib import ( select, setxor1d, sinc, trapz, trim_zeros, unwrap, unique, vectorize ) -from numpy.compat import long def get_mat(n): data = np.arange(n) @@ -509,12 +508,11 @@ class TestInsert: insert(a, 1, a[:, 2, :], axis=1)) def test_0d(self): - # This is an error in the future a = np.array(1) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', DeprecationWarning) - assert_equal(insert(a, [], 2, axis=0), np.array(2)) - assert_(w[0].category is DeprecationWarning) + with pytest.raises(np.AxisError): + insert(a, [], 2, axis=0) + with pytest.raises(TypeError): + insert(a, [], 2, axis="nonsense") def test_subclass(self): class SubClass(np.ndarray): @@ -810,9 +808,6 @@ class TestDelete: a_del = delete(self.a, indices) nd_a_del = delete(self.nd_a, indices, axis=1) msg = 'Delete failed for obj: %r' % indices - # NOTE: The cast should be removed after warning phase for bools - if not isinstance(indices, (slice, int, long, np.integer)): - indices = np.asarray(indices, dtype=np.intp) assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a, err_msg=msg) xor = setxor1d(nd_a_del[0,:, 0], self.nd_a[0, indices, 0]) @@ -828,7 +823,6 @@ class TestDelete: self._check_inverse_of_slicing(s) def test_fancy(self): - # Deprecation/FutureWarning tests should be kept after change. self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]])) with pytest.raises(IndexError): delete(self.a, [100]) @@ -837,14 +831,17 @@ class TestDelete: self._check_inverse_of_slicing([0, -1, 2, 2]) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', category=FutureWarning) - obj = np.array([True, False, False], dtype=bool) - self._check_inverse_of_slicing(obj) - # _check_inverse_of_slicing operates on two arrays, so warns twice - assert len(w) == 2 - assert_(w[0].category is FutureWarning) - assert_(w[1].category is FutureWarning) + self._check_inverse_of_slicing([True, False, False, True, False]) + + # not legal, indexing with these would change the dimension + with pytest.raises(ValueError): + delete(self.a, True) + with pytest.raises(ValueError): + delete(self.a, False) + + # not enough items + with pytest.raises(ValueError): + delete(self.a, [False]*4) def test_single(self): self._check_inverse_of_slicing(0) @@ -852,10 +849,10 @@ class TestDelete: def test_0d(self): a = np.array(1) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', DeprecationWarning) - assert_equal(delete(a, [], axis=0), a) - assert_(w[0].category is DeprecationWarning) + with pytest.raises(np.AxisError): + delete(a, [], axis=0) + with pytest.raises(TypeError): + delete(a, [], axis="nonsense") def test_subclass(self): class SubClass(np.ndarray): diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 6812d8d68..8ce20a116 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -13,6 +13,7 @@ from tempfile import NamedTemporaryFile from io import BytesIO, StringIO from datetime import datetime import locale +from multiprocessing import Process import numpy as np import numpy.ma as ma @@ -569,16 +570,20 @@ class TestSaveTxt: assert_equal(s.read(), b"%f\n" % 1.) @pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work") - @pytest.mark.skipif(IS_PYPY, - reason="GC problems after test, gc.collect does not help. see gh-15775") @pytest.mark.slow @requires_memory(free_bytes=7e9) def test_large_zip(self): - # The test takes at least 6GB of memory, writes a file larger than 4GB - test_data = np.asarray([np.random.rand(np.random.randint(50,100),4) - for i in range(800000)], dtype=object) - with tempdir() as tmpdir: - np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data) + def check_large_zip(): + # The test takes at least 6GB of memory, writes a file larger than 4GB + test_data = np.asarray([np.random.rand(np.random.randint(50,100),4) + for i in range(800000)], dtype=object) + with tempdir() as tmpdir: + np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data) + # run in a subprocess to ensure memory is released on PyPy, see gh-15775 + p = Process(target=check_large_zip) + p.start() + p.join() + assert p.exitcode == 0 class LoadTxtBase: def check_compressed(self, fopen, suffixes): diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index a2598990b..55df2a675 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -5,7 +5,6 @@ from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_array_almost_equal, assert_raises, _assert_valid_refcount, ) -from numpy.compat import unicode class TestRegression: @@ -180,7 +179,7 @@ class TestRegression: # related to ticket #1405. include_dirs = [np.get_include()] for path in include_dirs: - assert_(isinstance(path, (str, unicode))) + assert_(isinstance(path, str)) assert_(path != '') def test_polyder_return_type(self): diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py index 47685550a..3f4ca6309 100644 --- a/numpy/lib/tests/test_type_check.py +++ b/numpy/lib/tests/test_type_check.py @@ -1,5 +1,4 @@ import numpy as np -from numpy.compat import long from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises ) @@ -86,7 +85,6 @@ class TestIsscalar: assert_(not np.isscalar([3])) assert_(not np.isscalar((3,))) assert_(np.isscalar(3j)) - assert_(np.isscalar(long(10))) assert_(np.isscalar(4.0)) diff --git a/numpy/lib/user_array.py b/numpy/lib/user_array.py index 9c266fd6b..0e96b477e 100644 --- a/numpy/lib/user_array.py +++ b/numpy/lib/user_array.py @@ -11,7 +11,6 @@ from numpy.core import ( bitwise_xor, invert, less, less_equal, not_equal, equal, greater, greater_equal, shape, reshape, arange, sin, sqrt, transpose ) -from numpy.compat import long class container: @@ -196,9 +195,6 @@ class container: def __int__(self): return self._scalarfunc(int) - def __long__(self): - return self._scalarfunc(long) - def __hex__(self): return self._scalarfunc(hex) @@ -231,6 +227,10 @@ class container: "" return self.array.tostring() + def tobytes(self): + "" + return self.array.tobytes() + def byteswap(self): "" return self._rc(self.array.byteswap()) |