diff options
author | Eric Wieser <wieser.eric@gmail.com> | 2019-11-11 23:05:31 +0000 |
---|---|---|
committer | Eric Wieser <wieser.eric@gmail.com> | 2019-11-13 21:45:50 +0000 |
commit | 6dfe3318400972f414b02e3e12c83c52372f1e00 (patch) | |
tree | f05556eace312e4a86b8e7bbd865a75a4b199fc5 /numpy/lib | |
parent | 231f59daab0e8ea222a21d524817b2eeb46b9cd7 (diff) | |
download | numpy-6dfe3318400972f414b02e3e12c83c52372f1e00.tar.gz |
MAINT: Remove uses of scalar aliases
Relates to gh-6103
Diffstat (limited to 'numpy/lib')
-rw-r--r-- | numpy/lib/npyio.py | 4 | ||||
-rw-r--r-- | numpy/lib/tests/test_io.py | 26 | ||||
-rw-r--r-- | numpy/lib/tests/test_ufunclike.py | 4 |
3 files changed, 17 insertions, 17 deletions
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index 7e1d4db4f..430d44374 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -1529,9 +1529,9 @@ def fromregex(file, regexp, dtype, encoding=None): dtype = np.dtype(dtype) content = file.read() - if isinstance(content, bytes) and isinstance(regexp, np.unicode): + if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode): regexp = asbytes(regexp) - elif isinstance(content, np.unicode) and isinstance(regexp, bytes): + elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes): regexp = asstr(regexp) if not hasattr(regexp, 'match'): diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 1181fe986..6e2291ca3 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -518,7 +518,7 @@ class TestSaveTxt(object): def test_unicode(self): utf8 = b'\xcf\x96'.decode('UTF-8') - a = np.array([utf8], dtype=np.unicode) + a = np.array([utf8], dtype=np.unicode_) with tempdir() as tmpdir: # set encoding as on windows it may not be unicode even on py3 np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'], @@ -526,7 +526,7 @@ class TestSaveTxt(object): def test_unicode_roundtrip(self): utf8 = b'\xcf\x96'.decode('UTF-8') - a = np.array([utf8], dtype=np.unicode) + a = np.array([utf8], dtype=np.unicode_) # our gz wrapper support encoding suffixes = ['', '.gz'] # stdlib 2 versions do not support encoding @@ -540,12 +540,12 @@ class TestSaveTxt(object): np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, fmt=['%s'], encoding='UTF-16-LE') b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix), - encoding='UTF-16-LE', dtype=np.unicode) + encoding='UTF-16-LE', dtype=np.unicode_) assert_array_equal(a, b) def test_unicode_bytestream(self): utf8 = b'\xcf\x96'.decode('UTF-8') - a = np.array([utf8], dtype=np.unicode) + a = np.array([utf8], dtype=np.unicode_) s = BytesIO() np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') s.seek(0) @@ -553,7 +553,7 @@ class TestSaveTxt(object): def test_unicode_stringstream(self): utf8 = b'\xcf\x96'.decode('UTF-8') - a = np.array([utf8], dtype=np.unicode) + a = np.array([utf8], dtype=np.unicode_) s = StringIO() np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') s.seek(0) @@ -632,12 +632,12 @@ class LoadTxtBase(object): with temppath() as path: with open(path, "wb") as f: f.write(nonascii.encode("UTF-16")) - x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode) + x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_) assert_array_equal(x, nonascii) def test_binary_decode(self): utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' - v = self.loadfunc(BytesIO(utf16), dtype=np.unicode, encoding='UTF-16') + v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16') assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) def test_converters_decode(self): @@ -645,7 +645,7 @@ class LoadTxtBase(object): c = TextIO() c.write(b'\xcf\x96') c.seek(0) - x = self.loadfunc(c, dtype=np.unicode, + x = self.loadfunc(c, dtype=np.unicode_, converters={0: lambda x: x.decode('UTF-8')}) a = np.array([b'\xcf\x96'.decode('UTF-8')]) assert_array_equal(x, a) @@ -656,7 +656,7 @@ class LoadTxtBase(object): with temppath() as path: with io.open(path, 'wt', encoding='UTF-8') as f: f.write(utf8) - x = self.loadfunc(path, dtype=np.unicode, + x = self.loadfunc(path, dtype=np.unicode_, converters={0: lambda x: x + 't'}, encoding='UTF-8') a = np.array([utf8 + 't']) @@ -1104,7 +1104,7 @@ class TestLoadTxt(LoadTxtBase): with open(path, "wb") as f: f.write(butf8) with open(path, "rb") as f: - x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode) + x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_) assert_array_equal(x, sutf8) # test broken latin1 conversion people now rely on with open(path, "rb") as f: @@ -1587,7 +1587,7 @@ M 33 21.99 with open(path, 'wb') as f: f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip') test = np.genfromtxt(path, delimiter=",", names=None, dtype=float, - usecols=(2, 3), converters={2: np.unicode}, + usecols=(2, 3), converters={2: np.compat.unicode}, encoding='UTF-8') control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)], dtype=[('', '|U11'), ('', float)]) @@ -2126,7 +2126,7 @@ M 33 21.99 ctl = np.array([ ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"], ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]], - dtype=np.unicode) + dtype=np.unicode_) assert_array_equal(test, ctl) # test a mixed dtype @@ -2169,7 +2169,7 @@ M 33 21.99 ["norm1", "norm2", "norm3"], ["norm1", latin1, "norm3"], ["test1", "testNonethe" + utf8, "test3"]], - dtype=np.unicode) + dtype=np.unicode_) assert_array_equal(test, ctl) def test_recfromtxt(self): diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py index 0f06876a1..64280616f 100644 --- a/numpy/lib/tests/test_ufunclike.py +++ b/numpy/lib/tests/test_ufunclike.py @@ -21,7 +21,7 @@ class TestUfunclike(object): assert_equal(res, tgt) assert_equal(out, tgt) - a = a.astype(np.complex) + a = a.astype(np.complex_) with assert_raises(TypeError): ufl.isposinf(a) @@ -36,7 +36,7 @@ class TestUfunclike(object): assert_equal(res, tgt) assert_equal(out, tgt) - a = a.astype(np.complex) + a = a.astype(np.complex_) with assert_raises(TypeError): ufl.isneginf(a) |