summaryrefslogtreecommitdiff
path: root/numpy/lib/tests
diff options
context:
space:
mode:
authorEric Wieser <wieser.eric@gmail.com>2018-07-31 00:41:28 -0700
committerGitHub <noreply@github.com>2018-07-31 00:41:28 -0700
commit7f4579279a6a6aa07df664b901afa36ab3fc5ce0 (patch)
tree3524c05c661f4948eabf066b46b5ad3aaf6ad617 /numpy/lib/tests
parent24960daf3e326591047eb099af840da6e95d0910 (diff)
parent9bb569c4e0e1cf08128179d157bdab10c8706a97 (diff)
downloadnumpy-7f4579279a6a6aa07df664b901afa36ab3fc5ce0.tar.gz
Merge branch 'master' into ix_-preserve-type
Diffstat (limited to 'numpy/lib/tests')
-rw-r--r--numpy/lib/tests/__init__.py0
-rw-r--r--numpy/lib/tests/test__datasource.py79
-rw-r--r--numpy/lib/tests/test__iotools.py153
-rw-r--r--numpy/lib/tests/test__version.py6
-rw-r--r--numpy/lib/tests/test_arraypad.py85
-rw-r--r--numpy/lib/tests/test_arraysetops.py173
-rw-r--r--numpy/lib/tests/test_arrayterator.py4
-rw-r--r--numpy/lib/tests/test_financial.py300
-rw-r--r--numpy/lib/tests/test_format.py88
-rw-r--r--numpy/lib/tests/test_function_base.py1097
-rw-r--r--numpy/lib/tests/test_histograms.py745
-rw-r--r--numpy/lib/tests/test_index_tricks.py190
-rw-r--r--numpy/lib/tests/test_io.py590
-rw-r--r--numpy/lib/tests/test_mixins.py213
-rw-r--r--numpy/lib/tests/test_nanfunctions.py193
-rw-r--r--numpy/lib/tests/test_packbits.py17
-rw-r--r--numpy/lib/tests/test_polynomial.py183
-rw-r--r--numpy/lib/tests/test_recfunctions.py171
-rw-r--r--numpy/lib/tests/test_regression.py75
-rw-r--r--numpy/lib/tests/test_shape_base.py205
-rw-r--r--numpy/lib/tests/test_stride_tricks.py19
-rw-r--r--numpy/lib/tests/test_twodim_base.py206
-rw-r--r--numpy/lib/tests/test_type_check.py112
-rw-r--r--numpy/lib/tests/test_ufunclike.py43
-rw-r--r--numpy/lib/tests/test_utils.py12
25 files changed, 3428 insertions, 1531 deletions
diff --git a/numpy/lib/tests/__init__.py b/numpy/lib/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/lib/tests/__init__.py
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index f4bece352..32812990c 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -5,10 +5,7 @@ import sys
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
-from numpy.compat import asbytes
-from numpy.testing import (
- run_module_suite, TestCase, assert_, SkipTest
- )
+from numpy.testing import assert_, assert_equal, assert_raises, SkipTest
import numpy.lib._datasource as datasource
if sys.version_info[0] >= 3:
@@ -53,10 +50,10 @@ http_fakefile = 'fake.txt'
malicious_files = ['/etc/shadow', '../../shadow',
'..\\system.dat', 'c:\\windows\\system.dat']
-magic_line = asbytes('three is the magic number')
+magic_line = b'three is the magic number'
-# Utility functions used by many TestCases
+# Utility functions used by many tests
def valid_textfile(filedir):
# Generate and return a valid temporary file.
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
@@ -96,12 +93,12 @@ def invalid_httpfile():
return http_fakefile
-class TestDataSourceOpen(TestCase):
- def setUp(self):
+class TestDataSourceOpen(object):
+ def setup(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.ds
@@ -112,7 +109,7 @@ class TestDataSourceOpen(TestCase):
def test_InvalidHTTP(self):
url = invalid_httpurl()
- self.assertRaises(IOError, self.ds.open, url)
+ assert_raises(IOError, self.ds.open, url)
try:
self.ds.open(url)
except IOError as e:
@@ -120,7 +117,7 @@ class TestDataSourceOpen(TestCase):
assert_(e.errno is None)
def test_InvalidHTTPCacheURLError(self):
- self.assertRaises(URLError, self.ds._cache, invalid_httpurl())
+ assert_raises(URLError, self.ds._cache, invalid_httpurl())
def test_ValidFile(self):
local_file = valid_textfile(self.tmpdir)
@@ -130,7 +127,7 @@ class TestDataSourceOpen(TestCase):
def test_InvalidFile(self):
invalid_file = invalid_textfile(self.tmpdir)
- self.assertRaises(IOError, self.ds.open, invalid_file)
+ assert_raises(IOError, self.ds.open, invalid_file)
def test_ValidGzipFile(self):
try:
@@ -146,7 +143,7 @@ class TestDataSourceOpen(TestCase):
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
- self.assertEqual(magic_line, result)
+ assert_equal(magic_line, result)
def test_ValidBz2File(self):
try:
@@ -162,15 +159,15 @@ class TestDataSourceOpen(TestCase):
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
- self.assertEqual(magic_line, result)
+ assert_equal(magic_line, result)
-class TestDataSourceExists(TestCase):
- def setUp(self):
+class TestDataSourceExists(object):
+ def setup(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.ds
@@ -178,7 +175,7 @@ class TestDataSourceExists(TestCase):
assert_(self.ds.exists(valid_httpurl()))
def test_InvalidHTTP(self):
- self.assertEqual(self.ds.exists(invalid_httpurl()), False)
+ assert_equal(self.ds.exists(invalid_httpurl()), False)
def test_ValidFile(self):
# Test valid file in destpath
@@ -192,15 +189,15 @@ class TestDataSourceExists(TestCase):
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
- self.assertEqual(self.ds.exists(tmpfile), False)
+ assert_equal(self.ds.exists(tmpfile), False)
-class TestDataSourceAbspath(TestCase):
- def setUp(self):
+class TestDataSourceAbspath(object):
+ def setup(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.ds = datasource.DataSource(self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.ds
@@ -208,30 +205,30 @@ class TestDataSourceAbspath(TestCase):
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
local_path = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
- self.assertEqual(local_path, self.ds.abspath(valid_httpurl()))
+ assert_equal(local_path, self.ds.abspath(valid_httpurl()))
def test_ValidFile(self):
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
- self.assertEqual(tmpfile, self.ds.abspath(tmpfilename))
+ assert_equal(tmpfile, self.ds.abspath(tmpfilename))
# Test filename with complete path
- self.assertEqual(tmpfile, self.ds.abspath(tmpfile))
+ assert_equal(tmpfile, self.ds.abspath(tmpfile))
def test_InvalidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
invalidhttp = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
- self.assertNotEqual(invalidhttp, self.ds.abspath(valid_httpurl()))
+ assert_(invalidhttp != self.ds.abspath(valid_httpurl()))
def test_InvalidFile(self):
invalidfile = valid_textfile(self.tmpdir)
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
- self.assertNotEqual(invalidfile, self.ds.abspath(tmpfilename))
+ assert_(invalidfile != self.ds.abspath(tmpfilename))
# Test filename with complete path
- self.assertNotEqual(invalidfile, self.ds.abspath(tmpfile))
+ assert_(invalidfile != self.ds.abspath(tmpfile))
def test_sandboxing(self):
tmpfile = valid_textfile(self.tmpdir)
@@ -260,12 +257,12 @@ class TestDataSourceAbspath(TestCase):
os.sep = orig_os_sep
-class TestRepositoryAbspath(TestCase):
- def setUp(self):
+class TestRepositoryAbspath(object):
+ def setup(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.repos
@@ -274,7 +271,7 @@ class TestRepositoryAbspath(TestCase):
local_path = os.path.join(self.repos._destpath, netloc,
upath.strip(os.sep).strip('/'))
filepath = self.repos.abspath(valid_httpfile())
- self.assertEqual(local_path, filepath)
+ assert_equal(local_path, filepath)
def test_sandboxing(self):
tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
@@ -293,12 +290,12 @@ class TestRepositoryAbspath(TestCase):
os.sep = orig_os_sep
-class TestRepositoryExists(TestCase):
- def setUp(self):
+class TestRepositoryExists(object):
+ def setup(self):
self.tmpdir = mkdtemp()
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.repos
@@ -309,7 +306,7 @@ class TestRepositoryExists(TestCase):
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
- self.assertEqual(self.repos.exists(tmpfile), False)
+ assert_equal(self.repos.exists(tmpfile), False)
def test_RemoveHTTPFile(self):
assert_(self.repos.exists(valid_httpurl()))
@@ -326,11 +323,11 @@ class TestRepositoryExists(TestCase):
assert_(self.repos.exists(tmpfile))
-class TestOpenFunc(TestCase):
- def setUp(self):
+class TestOpenFunc(object):
+ def setup(self):
self.tmpdir = mkdtemp()
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
def test_DataSourceOpen(self):
@@ -343,7 +340,3 @@ class TestOpenFunc(TestCase):
fp = datasource.open(local_file)
assert_(fp)
fp.close()
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index e0a917a21..b4888f1bd 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -5,82 +5,86 @@ import time
from datetime import date
import numpy as np
-from numpy.compat import asbytes, asbytes_nested
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_allclose,
- assert_raises
+ assert_, assert_equal, assert_allclose, assert_raises,
)
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
has_nested_fields, easy_dtype, flatten_dtype
)
+from numpy.compat import unicode
-class TestLineSplitter(TestCase):
+class TestLineSplitter(object):
"Tests the LineSplitter class."
def test_no_delimiter(self):
"Test LineSplitter w/o delimiter"
- strg = asbytes(" 1 2 3 4 5 # test")
+ strg = " 1 2 3 4 5 # test"
test = LineSplitter()(strg)
- assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5']))
+ assert_equal(test, ['1', '2', '3', '4', '5'])
test = LineSplitter('')(strg)
- assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5']))
+ assert_equal(test, ['1', '2', '3', '4', '5'])
def test_space_delimiter(self):
"Test space delimiter"
- strg = asbytes(" 1 2 3 4 5 # test")
- test = LineSplitter(asbytes(' '))(strg)
- assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5']))
- test = LineSplitter(asbytes(' '))(strg)
- assert_equal(test, asbytes_nested(['1 2 3 4', '5']))
+ strg = " 1 2 3 4 5 # test"
+ test = LineSplitter(' ')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
+ test = LineSplitter(' ')(strg)
+ assert_equal(test, ['1 2 3 4', '5'])
def test_tab_delimiter(self):
"Test tab delimiter"
- strg = asbytes(" 1\t 2\t 3\t 4\t 5 6")
- test = LineSplitter(asbytes('\t'))(strg)
- assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5 6']))
- strg = asbytes(" 1 2\t 3 4\t 5 6")
- test = LineSplitter(asbytes('\t'))(strg)
- assert_equal(test, asbytes_nested(['1 2', '3 4', '5 6']))
+ strg = " 1\t 2\t 3\t 4\t 5 6"
+ test = LineSplitter('\t')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '5 6'])
+ strg = " 1 2\t 3 4\t 5 6"
+ test = LineSplitter('\t')(strg)
+ assert_equal(test, ['1 2', '3 4', '5 6'])
def test_other_delimiter(self):
"Test LineSplitter on delimiter"
- strg = asbytes("1,2,3,4,,5")
- test = LineSplitter(asbytes(','))(strg)
- assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5']))
+ strg = "1,2,3,4,,5"
+ test = LineSplitter(',')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
#
- strg = asbytes(" 1,2,3,4,,5 # test")
- test = LineSplitter(asbytes(','))(strg)
- assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5']))
+ strg = " 1,2,3,4,,5 # test"
+ test = LineSplitter(',')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
+
+ # gh-11028 bytes comment/delimiters should get encoded
+ strg = b" 1,2,3,4,,5 % test"
+ test = LineSplitter(delimiter=b',', comments=b'%')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
def test_constant_fixed_width(self):
"Test LineSplitter w/ fixed-width fields"
- strg = asbytes(" 1 2 3 4 5 # test")
+ strg = " 1 2 3 4 5 # test"
test = LineSplitter(3)(strg)
- assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5', '']))
+ assert_equal(test, ['1', '2', '3', '4', '', '5', ''])
#
- strg = asbytes(" 1 3 4 5 6# test")
+ strg = " 1 3 4 5 6# test"
test = LineSplitter(20)(strg)
- assert_equal(test, asbytes_nested(['1 3 4 5 6']))
+ assert_equal(test, ['1 3 4 5 6'])
#
- strg = asbytes(" 1 3 4 5 6# test")
+ strg = " 1 3 4 5 6# test"
test = LineSplitter(30)(strg)
- assert_equal(test, asbytes_nested(['1 3 4 5 6']))
+ assert_equal(test, ['1 3 4 5 6'])
def test_variable_fixed_width(self):
- strg = asbytes(" 1 3 4 5 6# test")
+ strg = " 1 3 4 5 6# test"
test = LineSplitter((3, 6, 6, 3))(strg)
- assert_equal(test, asbytes_nested(['1', '3', '4 5', '6']))
+ assert_equal(test, ['1', '3', '4 5', '6'])
#
- strg = asbytes(" 1 3 4 5 6# test")
+ strg = " 1 3 4 5 6# test"
test = LineSplitter((6, 6, 9))(strg)
- assert_equal(test, asbytes_nested(['1', '3 4', '5 6']))
+ assert_equal(test, ['1', '3 4', '5 6'])
# -----------------------------------------------------------------------------
-class TestNameValidator(TestCase):
+class TestNameValidator(object):
def test_case_sensitivity(self):
"Test case sensitivity"
@@ -135,13 +139,10 @@ class TestNameValidator(TestCase):
def _bytes_to_date(s):
- if sys.version_info[0] >= 3:
- return date(*time.strptime(s.decode('latin1'), "%Y-%m-%d")[:3])
- else:
- return date(*time.strptime(s, "%Y-%m-%d")[:3])
+ return date(*time.strptime(s, "%Y-%m-%d")[:3])
-class TestStringConverter(TestCase):
+class TestStringConverter(object):
"Test StringConverter"
def test_creation(self):
@@ -157,39 +158,45 @@ class TestStringConverter(TestCase):
assert_equal(converter._status, 0)
# test int
- assert_equal(converter.upgrade(asbytes('0')), 0)
+ assert_equal(converter.upgrade('0'), 0)
assert_equal(converter._status, 1)
- # On systems where integer defaults to 32-bit, the statuses will be
+ # On systems where long defaults to 32-bit, the statuses will be
# offset by one, so we check for this here.
import numpy.core.numeric as nx
- status_offset = int(nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize)
+ status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)
# test int > 2**32
- assert_equal(converter.upgrade(asbytes('17179869184')), 17179869184)
+ assert_equal(converter.upgrade('17179869184'), 17179869184)
assert_equal(converter._status, 1 + status_offset)
# test float
- assert_allclose(converter.upgrade(asbytes('0.')), 0.0)
+ assert_allclose(converter.upgrade('0.'), 0.0)
assert_equal(converter._status, 2 + status_offset)
# test complex
- assert_equal(converter.upgrade(asbytes('0j')), complex('0j'))
+ assert_equal(converter.upgrade('0j'), complex('0j'))
assert_equal(converter._status, 3 + status_offset)
# test str
- assert_equal(converter.upgrade(asbytes('a')), asbytes('a'))
- assert_equal(converter._status, len(converter._mapper) - 1)
+ # note that the longdouble type has been skipped, so the
+ # _status increases by 2. Everything should succeed with
+ # unicode conversion (5).
+ for s in ['a', u'a', b'a']:
+ res = converter.upgrade(s)
+ assert_(type(res) is unicode)
+ assert_equal(res, u'a')
+ assert_equal(converter._status, 5 + status_offset)
def test_missing(self):
"Tests the use of missing values."
- converter = StringConverter(missing_values=(asbytes('missing'),
- asbytes('missed')))
- converter.upgrade(asbytes('0'))
- assert_equal(converter(asbytes('0')), 0)
- assert_equal(converter(asbytes('')), converter.default)
- assert_equal(converter(asbytes('missing')), converter.default)
- assert_equal(converter(asbytes('missed')), converter.default)
+ converter = StringConverter(missing_values=('missing',
+ 'missed'))
+ converter.upgrade('0')
+ assert_equal(converter('0'), 0)
+ assert_equal(converter(''), converter.default)
+ assert_equal(converter('missing'), converter.default)
+ assert_equal(converter('missed'), converter.default)
try:
converter('miss')
except ValueError:
@@ -200,66 +207,67 @@ class TestStringConverter(TestCase):
dateparser = _bytes_to_date
StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
convert = StringConverter(dateparser, date(2000, 1, 1))
- test = convert(asbytes('2001-01-01'))
+ test = convert('2001-01-01')
assert_equal(test, date(2001, 1, 1))
- test = convert(asbytes('2009-01-01'))
+ test = convert('2009-01-01')
assert_equal(test, date(2009, 1, 1))
- test = convert(asbytes(''))
+ test = convert('')
assert_equal(test, date(2000, 1, 1))
def test_string_to_object(self):
"Make sure that string-to-object functions are properly recognized"
+ old_mapper = StringConverter._mapper[:] # copy of list
conv = StringConverter(_bytes_to_date)
- assert_equal(conv._mapper[-2][0](0), 0j)
+ assert_equal(conv._mapper, old_mapper)
assert_(hasattr(conv, 'default'))
def test_keep_default(self):
"Make sure we don't lose an explicit default"
- converter = StringConverter(None, missing_values=asbytes(''),
+ converter = StringConverter(None, missing_values='',
default=-999)
- converter.upgrade(asbytes('3.14159265'))
+ converter.upgrade('3.14159265')
assert_equal(converter.default, -999)
assert_equal(converter.type, np.dtype(float))
#
converter = StringConverter(
- None, missing_values=asbytes(''), default=0)
- converter.upgrade(asbytes('3.14159265'))
+ None, missing_values='', default=0)
+ converter.upgrade('3.14159265')
assert_equal(converter.default, 0)
assert_equal(converter.type, np.dtype(float))
def test_keep_default_zero(self):
"Check that we don't lose a default of 0"
converter = StringConverter(int, default=0,
- missing_values=asbytes("N/A"))
+ missing_values="N/A")
assert_equal(converter.default, 0)
def test_keep_missing_values(self):
"Check that we're not losing missing values"
converter = StringConverter(int, default=0,
- missing_values=asbytes("N/A"))
+ missing_values="N/A")
assert_equal(
- converter.missing_values, set(asbytes_nested(['', 'N/A'])))
+ converter.missing_values, set(['', 'N/A']))
def test_int64_dtype(self):
"Check that int64 integer types can be specified"
converter = StringConverter(np.int64, default=0)
- val = asbytes("-9223372036854775807")
+ val = "-9223372036854775807"
assert_(converter(val) == -9223372036854775807)
- val = asbytes("9223372036854775807")
+ val = "9223372036854775807"
assert_(converter(val) == 9223372036854775807)
def test_uint64_dtype(self):
"Check that uint64 integer types can be specified"
converter = StringConverter(np.uint64, default=0)
- val = asbytes("9223372043271415339")
+ val = "9223372043271415339"
assert_(converter(val) == 9223372043271415339)
-class TestMiscFunctions(TestCase):
+class TestMiscFunctions(object):
def test_has_nested_dtype(self):
"Test has_nested_dtype"
- ndtype = np.dtype(np.float)
+ ndtype = np.dtype(float)
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', '|S3'), ('B', float)])
assert_equal(has_nested_fields(ndtype), False)
@@ -343,6 +351,3 @@ class TestMiscFunctions(TestCase):
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test__version.py b/numpy/lib/tests/test__version.py
index 993c9d507..8e66a0c03 100644
--- a/numpy/lib/tests/test__version.py
+++ b/numpy/lib/tests/test__version.py
@@ -3,7 +3,7 @@
"""
from __future__ import division, absolute_import, print_function
-from numpy.testing import assert_, run_module_suite, assert_raises
+from numpy.testing import assert_, assert_raises
from numpy.lib import NumpyVersion
@@ -64,7 +64,3 @@ def test_dev0_a_b_rc_mixed():
def test_raises():
for ver in ['1.9', '1,9.0', '1.7.x']:
assert_raises(ValueError, NumpyVersion, ver)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index d037962e6..45d624781 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -4,12 +4,11 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
- TestCase)
+from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,)
from numpy.lib import pad
-class TestConditionalShortcuts(TestCase):
+class TestConditionalShortcuts(object):
def test_zero_padding_shortcuts(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for axis in test.shape]
@@ -52,7 +51,7 @@ class TestConditionalShortcuts(TestCase):
pad(test, pad_amt, mode=mode, stat_length=30))
-class TestStatistic(TestCase):
+class TestStatistic(object):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
@@ -346,7 +345,7 @@ class TestStatistic(TestCase):
assert_array_equal(a, b)
-class TestConstant(TestCase):
+class TestConstant(object):
def test_check_constant(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant', constant_values=(10, 20))
@@ -490,8 +489,21 @@ class TestConstant(TestCase):
)
assert_allclose(test, expected)
+ def test_check_large_integers(self):
+ uint64_max = 2 ** 64 - 1
+ arr = np.full(5, uint64_max, dtype=np.uint64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, uint64_max, dtype=np.uint64)
+ assert_array_equal(test, expected)
-class TestLinearRamp(TestCase):
+ int64_max = 2 ** 63 - 1
+ arr = np.full(5, int64_max, dtype=np.int64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, int64_max, dtype=np.int64)
+ assert_array_equal(test, expected)
+
+
+class TestLinearRamp(object):
def test_check_simple(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
@@ -531,7 +543,7 @@ class TestLinearRamp(TestCase):
assert_allclose(test, expected)
-class TestReflect(TestCase):
+class TestReflect(object):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect')
@@ -640,8 +652,13 @@ class TestReflect(TestCase):
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
assert_array_equal(a, b)
+ def test_check_padding_an_empty_array(self):
+ a = pad(np.zeros((0, 3)), ((0,), (1,)), mode='reflect')
+ b = np.zeros((0, 5))
+ assert_array_equal(a, b)
+
-class TestSymmetric(TestCase):
+class TestSymmetric(object):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric')
@@ -775,7 +792,7 @@ class TestSymmetric(TestCase):
assert_array_equal(a, b)
-class TestWrap(TestCase):
+class TestWrap(object):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'wrap')
@@ -871,7 +888,7 @@ class TestWrap(TestCase):
assert_array_equal(a, b)
-class TestStatLen(TestCase):
+class TestStatLen(object):
def test_check_simple(self):
a = np.arange(30)
a = np.reshape(a, (6, 5))
@@ -894,7 +911,7 @@ class TestStatLen(TestCase):
assert_array_equal(a, b)
-class TestEdge(TestCase):
+class TestEdge(object):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
@@ -933,7 +950,7 @@ class TestEdge(TestCase):
assert_array_equal(padded, expected)
-class TestZeroPadWidth(TestCase):
+class TestZeroPadWidth(object):
def test_zero_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
@@ -941,7 +958,7 @@ class TestZeroPadWidth(TestCase):
assert_array_equal(arr, pad(arr, pad_width, mode='constant'))
-class TestLegacyVectorFunction(TestCase):
+class TestLegacyVectorFunction(object):
def test_legacy_vector_functionality(self):
def _padwithtens(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 10
@@ -963,7 +980,7 @@ class TestLegacyVectorFunction(TestCase):
assert_array_equal(a, b)
-class TestNdarrayPadWidth(TestCase):
+class TestNdarrayPadWidth(object):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
@@ -984,18 +1001,30 @@ class TestNdarrayPadWidth(TestCase):
assert_array_equal(a, b)
-class TestUnicodeInput(TestCase):
+class TestUnicodeInput(object):
def test_unicode_mode(self):
- try:
- constant_mode = unicode('constant')
- except NameError:
- constant_mode = 'constant'
+ constant_mode = u'constant'
a = np.pad([1], 2, mode=constant_mode)
b = np.array([0, 0, 1, 0, 0])
assert_array_equal(a, b)
-class ValueError1(TestCase):
+class TestObjectInput(object):
+ def test_object_input(self):
+ # Regression test for issue gh-11395.
+ a = np.full((4, 3), None)
+ pad_amt = ((2, 3), (3, 2))
+ b = np.full((9, 8), None)
+ modes = ['edge',
+ 'symmetric',
+ 'reflect',
+ 'wrap',
+ ]
+ for mode in modes:
+ assert_array_equal(pad(a, pad_amt, mode=mode), b)
+
+
+class TestValueError1(object):
def test_check_simple(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
@@ -1017,8 +1046,14 @@ class ValueError1(TestCase):
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
+ def test_check_empty_array(self):
+ assert_raises(ValueError, pad, [], 4, mode='reflect')
+ assert_raises(ValueError, pad, np.ndarray(0), 4, mode='reflect')
+ assert_raises(ValueError, pad, np.zeros((0, 3)), ((1,), (0,)),
+ mode='reflect')
-class ValueError2(TestCase):
+
+class TestValueError2(object):
def test_check_negative_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
@@ -1027,7 +1062,7 @@ class ValueError2(TestCase):
**kwargs)
-class ValueError3(TestCase):
+class TestValueError3(object):
def test_check_kwarg_not_allowed(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, 4, mode='mean',
@@ -1055,7 +1090,7 @@ class ValueError3(TestCase):
mode='constant')
-class TypeError1(TestCase):
+class TestTypeError1(object):
def test_float(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2)))
@@ -1083,7 +1118,3 @@ class TypeError1(TestCase):
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)),
**kwargs)
-
-
-if __name__ == "__main__":
- np.testing.run_module_suite()
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index 8b142c264..dace5ade8 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -4,15 +4,15 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import (
- run_module_suite, TestCase, assert_array_equal, assert_equal, assert_raises
- )
+import sys
+
+from numpy.testing import assert_array_equal, assert_equal, assert_raises
from numpy.lib.arraysetops import (
- ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d
+ ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
)
-class TestSetOps(TestCase):
+class TestSetOps(object):
def test_intersect1d(self):
# unique inputs
@@ -32,7 +32,46 @@ class TestSetOps(TestCase):
assert_array_equal(c, ed)
assert_array_equal([], intersect1d([], []))
-
+
+ def test_intersect1d_indices(self):
+ # unique inputs
+ a = np.array([1, 2, 3, 4])
+ b = np.array([2, 1, 4, 6])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ee = np.array([1, 2, 4])
+ assert_array_equal(c, ee)
+ assert_array_equal(a[i1], ee)
+ assert_array_equal(b[i2], ee)
+
+ # non-unique inputs
+ a = np.array([1, 2, 2, 3, 4, 3, 2])
+ b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ef = np.array([1, 2, 3, 4])
+ assert_array_equal(c, ef)
+ assert_array_equal(a[i1], ef)
+ assert_array_equal(b[i2], ef)
+
+ # non1d, unique inputs
+ a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
+ b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 6, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
+ # non1d, not assumed to be uniqueinputs
+ a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
+ b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
def test_setxor1d(self):
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
@@ -74,8 +113,46 @@ class TestSetOps(TestCase):
assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8]))
assert_array_equal([7,1], ediff1d(two_elem, to_begin=7))
assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
- assert(isinstance(ediff1d(np.matrix(1)), np.matrix))
- assert(isinstance(ediff1d(np.matrix(1), to_begin=1), np.matrix))
+
+ def test_isin(self):
+ # the tests for in1d cover most of isin's behavior
+ # if in1d is removed, would need to change those tests to test
+ # isin instead.
+ def _isin_slow(a, b):
+ b = np.asarray(b).flatten().tolist()
+ return a in b
+ isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})
+ def assert_isin_equal(a, b):
+ x = isin(a, b)
+ y = isin_slow(a, b)
+ assert_array_equal(x, y)
+
+ #multidimensional arrays in both arguments
+ a = np.arange(24).reshape([2, 3, 4])
+ b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])
+ assert_isin_equal(a, b)
+
+ #array-likes as both arguments
+ c = [(9, 8), (7, 6)]
+ d = (9, 7)
+ assert_isin_equal(c, d)
+
+ #zero-d array:
+ f = np.array(3)
+ assert_isin_equal(f, b)
+ assert_isin_equal(a, f)
+ assert_isin_equal(f, f)
+
+ #scalar:
+ assert_isin_equal(5, b)
+ assert_isin_equal(a, 6)
+ assert_isin_equal(5, 6)
+
+ #empty array-like:
+ x = []
+ assert_isin_equal(x, b)
+ assert_isin_equal(a, x)
+ assert_isin_equal(x, x)
def test_in1d(self):
# we use two different sizes for the b array here to test the
@@ -168,6 +245,37 @@ class TestSetOps(TestCase):
assert_array_equal(in1d(a, long_b, assume_unique=True), ec)
assert_array_equal(in1d(a, long_b, assume_unique=False), ec)
+ def test_in1d_first_array_is_object(self):
+ ar1 = [None]
+ ar2 = np.array([1]*10)
+ expected = np.array([False])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+
+ def test_in1d_second_array_is_object(self):
+ ar1 = 1
+ ar2 = np.array([None]*10)
+ expected = np.array([False])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+
+ def test_in1d_both_arrays_are_object(self):
+ ar1 = [None]
+ ar2 = np.array([None]*10)
+ expected = np.array([True])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+
+ def test_in1d_both_arrays_have_structured_dtype(self):
+ # Test arrays of a structured data type containing an integer field
+ # and a field of dtype `object` allowing for arbitrary Python objects
+ dt = np.dtype([('field1', int), ('field2', object)])
+ ar1 = np.array([(1, None)], dtype=dt)
+ ar2 = np.array([(1, None)]*10, dtype=dt)
+ expected = np.array([True])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+
def test_union1d(self):
a = np.array([5, 4, 7, 1, 2])
b = np.array([2, 4, 3, 3, 2, 1, 5])
@@ -176,6 +284,14 @@ class TestSetOps(TestCase):
c = union1d(a, b)
assert_array_equal(c, ec)
+ # Tests gh-10340, arguments to union1d should be
+ # flattened if they are not already 1D
+ x = np.array([[0, 1, 2], [3, 4, 5]])
+ y = np.array([0, 1, 2, 3, 4])
+ ez = np.array([0, 1, 2, 3, 4, 5])
+ z = union1d(x, y)
+ assert_array_equal(z, ez)
+
assert_array_equal([], union1d([], []))
def test_setdiff1d(self):
@@ -212,7 +328,7 @@ class TestSetOps(TestCase):
assert_array_equal(c1, c2)
-class TestUnique(TestCase):
+class TestUnique(object):
def test_unique_1d(self):
@@ -315,13 +431,23 @@ class TestUnique(TestCase):
a2, a2_inv = np.unique(a, return_inverse=True)
assert_array_equal(a2_inv, np.zeros(5))
+ # test for ticket #9137
+ a = []
+ a1_idx = np.unique(a, return_index=True)[1]
+ a2_inv = np.unique(a, return_inverse=True)[1]
+ a3_idx, a3_inv = np.unique(a, return_index=True, return_inverse=True)[1:]
+ assert_equal(a1_idx.dtype, np.intp)
+ assert_equal(a2_inv.dtype, np.intp)
+ assert_equal(a3_idx.dtype, np.intp)
+ assert_equal(a3_inv.dtype, np.intp)
+
def test_unique_axis_errors(self):
assert_raises(TypeError, self._run_axis_tests, object)
assert_raises(TypeError, self._run_axis_tests,
[('a', int), ('b', object)])
- assert_raises(ValueError, unique, np.arange(10), axis=2)
- assert_raises(ValueError, unique, np.arange(10), axis=-2)
+ assert_raises(np.AxisError, unique, np.arange(10), axis=2)
+ assert_raises(np.AxisError, unique, np.arange(10), axis=-2)
def test_unique_axis_list(self):
msg = "Unique failed on list of lists"
@@ -352,6 +478,27 @@ class TestUnique(TestCase):
result = np.array([[-0.0, 0.0]])
assert_array_equal(unique(data, axis=0), result, msg)
+ def test_unique_masked(self):
+ # issue 8664
+ x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], dtype='uint8')
+ y = np.ma.masked_equal(x, 0)
+
+ v = np.unique(y)
+ v2, i, c = np.unique(y, return_index=True, return_counts=True)
+
+ msg = 'Unique returned different results when asked for index'
+ assert_array_equal(v.data, v2.data, msg)
+ assert_array_equal(v.mask, v2.mask, msg)
+
+ def test_unique_sort_order_with_axis(self):
+ # These tests fail if sorting along axis is done by treating subarrays
+ # as unsigned byte strings. See gh-10495.
+ fmt = "sort order incorrect for integer type '%s'"
+ for dt in 'bhilq':
+ a = np.array([[-1],[0]], dt)
+ b = np.unique(a, axis=0)
+ assert_array_equal(a, b, fmt % dt)
+
def _run_axis_tests(self, dtype):
data = np.array([[0, 1, 0, 0],
[1, 0, 0, 0],
@@ -392,7 +539,3 @@ class TestUnique(TestCase):
assert_array_equal(uniq[:, inv], data)
msg = "Unique's return_counts=True failed with axis=1"
assert_array_equal(cnt, np.array([2, 1, 1]), msg)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py
index 64ad7f4de..2ce4456a5 100644
--- a/numpy/lib/tests/test_arrayterator.py
+++ b/numpy/lib/tests/test_arrayterator.py
@@ -46,7 +46,3 @@ def test():
# Check that all elements are iterated correctly
assert_(list(c.flat) == list(d.flat))
-
-if __name__ == '__main__':
- from numpy.testing import run_module_suite
- run_module_suite()
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index cc8ba55e5..524915041 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -1,16 +1,22 @@
from __future__ import division, absolute_import, print_function
+from decimal import Decimal
+
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_almost_equal,
- assert_allclose, assert_equal
+ assert_, assert_almost_equal, assert_allclose, assert_equal, assert_raises
)
-class TestFinancial(TestCase):
+class TestFinancial(object):
def test_rate(self):
- assert_almost_equal(np.rate(10, 0, -3500, 10000),
- 0.1107, 4)
+ assert_almost_equal(
+ np.rate(10, 0, -3500, 10000),
+ 0.1107, 4)
+
+ def test_rate_decimal(self):
+ rate = np.rate(Decimal('10'), Decimal('0'), Decimal('-3500'), Decimal('10000'))
+ assert_equal(Decimal('0.1106908537142689284704528100'), rate)
def test_irr(self):
v = [-150000, 15000, 25000, 35000, 45000, 60000]
@@ -34,28 +40,84 @@ class TestFinancial(TestCase):
def test_pv(self):
assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2)
+ def test_pv_decimal(self):
+ assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')),
+ Decimal('-127128.1709461939327295222005'))
+
def test_fv(self):
- assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.36, 2)
+ assert_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.362673042924)
+
+ def test_fv_decimal(self):
+ assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), 0, 0),
+ Decimal('86609.36267304300040536731624'))
def test_pmt(self):
- res = np.pmt(0.08/12, 5*12, 15000)
+ res = np.pmt(0.08 / 12, 5 * 12, 15000)
tgt = -304.145914
assert_allclose(res, tgt)
# Test the edge case where rate == 0.0
- res = np.pmt(0.0, 5*12, 15000)
+ res = np.pmt(0.0, 5 * 12, 15000)
tgt = -250.0
assert_allclose(res, tgt)
# Test the case where we use broadcast and
# the arguments passed in are arrays.
- res = np.pmt([[0.0, 0.8],[0.3, 0.8]],[12, 3],[2000, 20000])
- tgt = np.array([[-166.66667, -19311.258],[-626.90814, -19311.258]])
+ res = np.pmt([[0.0, 0.8], [0.3, 0.8]], [12, 3], [2000, 20000])
+ tgt = np.array([[-166.66667, -19311.258], [-626.90814, -19311.258]])
assert_allclose(res, tgt)
+ def test_pmt_decimal(self):
+ res = np.pmt(Decimal('0.08') / Decimal('12'), 5 * 12, 15000)
+ tgt = Decimal('-304.1459143262052370338701494')
+ assert_equal(res, tgt)
+ # Test the edge case where rate == 0.0
+ res = np.pmt(Decimal('0'), Decimal('60'), Decimal('15000'))
+ tgt = -250
+ assert_equal(res, tgt)
+ # Test the case where we use broadcast and
+ # the arguments passed in are arrays.
+ res = np.pmt([[Decimal('0'), Decimal('0.8')], [Decimal('0.3'), Decimal('0.8')]],
+ [Decimal('12'), Decimal('3')], [Decimal('2000'), Decimal('20000')])
+ tgt = np.array([[Decimal('-166.6666666666666666666666667'), Decimal('-19311.25827814569536423841060')],
+ [Decimal('-626.9081401700757748402586600'), Decimal('-19311.25827814569536423841060')]])
+
+ # Cannot use the `assert_allclose` because it uses isfinite under the covers
+ # which does not support the Decimal type
+ # See issue: https://github.com/numpy/numpy/issues/9954
+ assert_equal(res[0][0], tgt[0][0])
+ assert_equal(res[0][1], tgt[0][1])
+ assert_equal(res[1][0], tgt[1][0])
+ assert_equal(res[1][1], tgt[1][1])
+
def test_ppmt(self):
- np.round(np.ppmt(0.1/12, 1, 60, 55000), 2) == 710.25
+ assert_equal(np.round(np.ppmt(0.1 / 12, 1, 60, 55000), 2), -710.25)
+
+ def test_ppmt_decimal(self):
+ assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000')),
+ Decimal('-710.2541257864217612489830917'))
+
+ # Two tests showing how Decimal is actually getting at a more exact result
+ # .23 / 12 does not come out nicely as a float but does as a decimal
+ def test_ppmt_special_rate(self):
+ assert_equal(np.round(np.ppmt(0.23 / 12, 1, 60, 10000000000), 8), -90238044.232277036)
+
+ def test_ppmt_special_rate_decimal(self):
+ # When rounded out to 8 decimal places like the float based test, this should not equal the same value
+ # as the float, substituted for the decimal
+ def raise_error_because_not_equal():
+ assert_equal(
+ round(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), 8),
+ Decimal('-90238044.232277036'))
+
+ assert_raises(AssertionError, raise_error_because_not_equal)
+ assert_equal(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')),
+ Decimal('-90238044.2322778884413969909'))
def test_ipmt(self):
- np.round(np.ipmt(0.1/12, 1, 24, 2000), 2) == 16.67
+ assert_almost_equal(np.round(np.ipmt(0.1 / 12, 1, 24, 2000), 2), -16.67)
+
+ def test_ipmt_decimal(self):
+ result = np.ipmt(Decimal('0.1') / Decimal('12'), 1, 24, 2000)
+ assert_equal(result.flat[0], Decimal('-16.66666666666666666666666667'))
def test_nper(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
@@ -70,6 +132,11 @@ class TestFinancial(TestCase):
np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]),
122.89, 2)
+ def test_npv_decimal(self):
+ assert_equal(
+ np.npv(Decimal('0.05'), [-15000, 1500, 2500, 3500, 4500, 6000]),
+ Decimal('122.894854950942692161628715'))
+
def test_mirr(self):
val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000]
assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4)
@@ -83,86 +150,191 @@ class TestFinancial(TestCase):
val = [39000, 30000, 21000, 37000, 46000]
assert_(np.isnan(np.mirr(val, 0.10, 0.12)))
+ def test_mirr_decimal(self):
+ val = [Decimal('-4500'), Decimal('-800'), Decimal('800'), Decimal('800'),
+ Decimal('600'), Decimal('600'), Decimal('800'), Decimal('800'),
+ Decimal('700'), Decimal('3000')]
+ assert_equal(np.mirr(val, Decimal('0.08'), Decimal('0.055')),
+ Decimal('0.066597175031553548874239618'))
+
+ val = [Decimal('-120000'), Decimal('39000'), Decimal('30000'),
+ Decimal('21000'), Decimal('37000'), Decimal('46000')]
+ assert_equal(np.mirr(val, Decimal('0.10'), Decimal('0.12')), Decimal('0.126094130365905145828421880'))
+
+ val = [Decimal('100'), Decimal('200'), Decimal('-50'),
+ Decimal('300'), Decimal('-200')]
+ assert_equal(np.mirr(val, Decimal('0.05'), Decimal('0.06')), Decimal('0.342823387842176663647819868'))
+
+ val = [Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')]
+ assert_(np.isnan(np.mirr(val, Decimal('0.10'), Decimal('0.12'))))
+
def test_when(self):
- #begin
- assert_almost_equal(np.rate(10, 20, -3500, 10000, 1),
- np.rate(10, 20, -3500, 10000, 'begin'), 4)
- #end
- assert_almost_equal(np.rate(10, 20, -3500, 10000),
- np.rate(10, 20, -3500, 10000, 'end'), 4)
- assert_almost_equal(np.rate(10, 20, -3500, 10000, 0),
- np.rate(10, 20, -3500, 10000, 'end'), 4)
+ # begin
+ assert_equal(np.rate(10, 20, -3500, 10000, 1),
+ np.rate(10, 20, -3500, 10000, 'begin'))
+ # end
+ assert_equal(np.rate(10, 20, -3500, 10000),
+ np.rate(10, 20, -3500, 10000, 'end'))
+ assert_equal(np.rate(10, 20, -3500, 10000, 0),
+ np.rate(10, 20, -3500, 10000, 'end'))
# begin
- assert_almost_equal(np.pv(0.07, 20, 12000, 0, 1),
- np.pv(0.07, 20, 12000, 0, 'begin'), 2)
+ assert_equal(np.pv(0.07, 20, 12000, 0, 1),
+ np.pv(0.07, 20, 12000, 0, 'begin'))
# end
- assert_almost_equal(np.pv(0.07, 20, 12000, 0),
- np.pv(0.07, 20, 12000, 0, 'end'), 2)
- assert_almost_equal(np.pv(0.07, 20, 12000, 0, 0),
- np.pv(0.07, 20, 12000, 0, 'end'), 2)
+ assert_equal(np.pv(0.07, 20, 12000, 0),
+ np.pv(0.07, 20, 12000, 0, 'end'))
+ assert_equal(np.pv(0.07, 20, 12000, 0, 0),
+ np.pv(0.07, 20, 12000, 0, 'end'))
# begin
- assert_almost_equal(np.fv(0.075, 20, -2000, 0, 1),
- np.fv(0.075, 20, -2000, 0, 'begin'), 4)
+ assert_equal(np.fv(0.075, 20, -2000, 0, 1),
+ np.fv(0.075, 20, -2000, 0, 'begin'))
# end
- assert_almost_equal(np.fv(0.075, 20, -2000, 0),
- np.fv(0.075, 20, -2000, 0, 'end'), 4)
- assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0),
- np.fv(0.075, 20, -2000, 0, 'end'), 4)
+ assert_equal(np.fv(0.075, 20, -2000, 0),
+ np.fv(0.075, 20, -2000, 0, 'end'))
+ assert_equal(np.fv(0.075, 20, -2000, 0, 0),
+ np.fv(0.075, 20, -2000, 0, 'end'))
# begin
- assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 1),
- np.pmt(0.08/12, 5*12, 15000., 0, 'begin'), 4)
+ assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 1),
+ np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'begin'))
# end
- assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0),
- np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4)
- assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 0),
- np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4)
+ assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0),
+ np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end'))
+ assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 0),
+ np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end'))
# begin
- assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 1),
- np.ppmt(0.1/12, 1, 60, 55000, 0, 'begin'), 4)
+ assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 1),
+ np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'begin'))
# end
- assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0),
- np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4)
- assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 0),
- np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4)
+ assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0),
+ np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end'))
+ assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 0),
+ np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end'))
# begin
- assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 1),
- np.ipmt(0.1/12, 1, 24, 2000, 0, 'begin'), 4)
+ assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 1),
+ np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'begin'))
# end
- assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0),
- np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4)
- assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 0),
- np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4)
+ assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0),
+ np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end'))
+ assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 0),
+ np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end'))
# begin
- assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 1),
- np.nper(0.075, -2000, 0, 100000., 'begin'), 4)
+ assert_equal(np.nper(0.075, -2000, 0, 100000., 1),
+ np.nper(0.075, -2000, 0, 100000., 'begin'))
# end
- assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
- np.nper(0.075, -2000, 0, 100000., 'end'), 4)
- assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 0),
- np.nper(0.075, -2000, 0, 100000., 'end'), 4)
+ assert_equal(np.nper(0.075, -2000, 0, 100000.),
+ np.nper(0.075, -2000, 0, 100000., 'end'))
+ assert_equal(np.nper(0.075, -2000, 0, 100000., 0),
+ np.nper(0.075, -2000, 0, 100000., 'end'))
+
+ def test_decimal_with_when(self):
+ """Test that decimals are still supported if the when argument is passed"""
+ # begin
+ assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('1')),
+ np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'begin'))
+ # end
+ assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000')),
+ np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end'))
+ assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('0')),
+ np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end'))
+
+ # begin
+ assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('1')),
+ np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'begin'))
+ # end
+ assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')),
+ np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end'))
+ assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('0')),
+ np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end'))
+
+ # begin
+ assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('1')),
+ np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'begin'))
+ # end
+ assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0')),
+ np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end'))
+ assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('0')),
+ np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end'))
+
+ # begin
+ assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
+ Decimal('0'), Decimal('1')),
+ np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
+ Decimal('0'), 'begin'))
+ # end
+ assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
+ Decimal('0')),
+ np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
+ Decimal('0'), 'end'))
+ assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
+ Decimal('0'), Decimal('0')),
+ np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
+ Decimal('0'), 'end'))
+
+ # begin
+ assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
+ Decimal('0'), Decimal('1')),
+ np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
+ Decimal('0'), 'begin'))
+ # end
+ assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
+ Decimal('0')),
+ np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
+ Decimal('0'), 'end'))
+ assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
+ Decimal('0'), Decimal('0')),
+ np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
+ Decimal('0'), 'end'))
+
+ # begin
+ assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
+ Decimal('0'), Decimal('1')).flat[0],
+ np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
+ Decimal('0'), 'begin').flat[0])
+ # end
+ assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
+ Decimal('0')).flat[0],
+ np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
+ Decimal('0'), 'end').flat[0])
+ assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
+ Decimal('0'), Decimal('0')).flat[0],
+ np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
+ Decimal('0'), 'end').flat[0])
def test_broadcast(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]),
[21.5449442, 20.76156441], 4)
- assert_almost_equal(np.ipmt(0.1/12, list(range(5)), 24, 2000),
+ assert_almost_equal(np.ipmt(0.1 / 12, list(range(5)), 24, 2000),
[-17.29165168, -16.66666667, -16.03647345,
- -15.40102862, -14.76028842], 4)
+ -15.40102862, -14.76028842], 4)
- assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000),
+ assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000),
[-74.998201, -75.62318601, -76.25337923,
- -76.88882405, -77.52956425], 4)
+ -76.88882405, -77.52956425], 4)
- assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000, 0,
+ assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000, 0,
[0, 0, 1, 'end', 'begin']),
[-74.998201, -75.62318601, -75.62318601,
- -76.88882405, -76.88882405], 4)
+ -76.88882405, -76.88882405], 4)
+
+ def test_broadcast_decimal(self):
+ # Use almost equal because precision is tested in the explicit tests, this test is to ensure
+ # broadcast with Decimal is not broken.
+ assert_almost_equal(np.ipmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')),
+ [Decimal('-17.29165168'), Decimal('-16.66666667'), Decimal('-16.03647345'),
+ Decimal('-15.40102862'), Decimal('-14.76028842')], 4)
+
+ assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')),
+ [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-76.25337923'),
+ Decimal('-76.88882405'), Decimal('-77.52956425')], 4)
-if __name__ == "__main__":
- run_module_suite()
+ assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000'),
+ Decimal('0'), [Decimal('0'), Decimal('0'), Decimal('1'), 'end', 'begin']),
+ [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-75.62318601'),
+ Decimal('-76.88882405'), Decimal('-76.88882405')], 4)
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 7cc72e775..c7869c582 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -1,5 +1,6 @@
from __future__ import division, absolute_import, print_function
+# doctest
r''' Test the .npy file format.
Set up:
@@ -275,19 +276,17 @@ Test the header writing.
"v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
'''
-
import sys
import os
import shutil
import tempfile
import warnings
+import pytest
from io import BytesIO
import numpy as np
-from numpy.compat import asbytes, asbytes_nested, sixu
from numpy.testing import (
- run_module_suite, assert_, assert_array_equal, assert_raises, raises,
- dec, SkipTest
+ assert_, assert_array_equal, assert_raises, raises, SkipTest
)
from numpy.lib import format
@@ -455,20 +454,20 @@ def assert_equal_(o1, o2):
def test_roundtrip():
for arr in basic_arrays + record_arrays:
arr2 = roundtrip(arr)
- yield assert_array_equal, arr, arr2
+ assert_array_equal(arr, arr2)
def test_roundtrip_randsize():
for arr in basic_arrays + record_arrays:
if arr.dtype != object:
arr2 = roundtrip_randsize(arr)
- yield assert_array_equal, arr, arr2
+ assert_array_equal(arr, arr2)
def test_roundtrip_truncated():
for arr in basic_arrays:
if arr.dtype != object:
- yield assert_raises, ValueError, roundtrip_truncated, arr
+ assert_raises(ValueError, roundtrip_truncated, arr)
def test_long_str():
@@ -478,9 +477,9 @@ def test_long_str():
assert_array_equal(long_str_arr, long_str_arr2)
-@dec.slow
+@pytest.mark.slow
def test_memmap_roundtrip():
- # Fixme: test crashes nose on windows.
+ # Fixme: used to crash on windows
if not (sys.platform == 'win32' or sys.platform == 'cygwin'):
for arr in basic_arrays + record_arrays:
if arr.dtype.hasobject:
@@ -509,7 +508,7 @@ def test_memmap_roundtrip():
fp = open(mfn, 'rb')
memmap_bytes = fp.read()
fp.close()
- yield assert_equal_, normal_bytes, memmap_bytes
+ assert_equal_(normal_bytes, memmap_bytes)
# Check that reading the file using memmap works.
ma = format.open_memmap(nfn, mode='r')
@@ -545,8 +544,8 @@ def test_pickle_python2_python3():
import __builtin__
xrange = __builtin__.xrange
- expected = np.array([None, xrange, sixu('\u512a\u826f'),
- asbytes('\xe4\xb8\x8d\xe8\x89\xaf')],
+ expected = np.array([None, xrange, u'\u512a\u826f',
+ b'\xe4\xb8\x8d\xe8\x89\xaf'],
dtype=object)
for fname in ['py2-objarr.npy', 'py2-objarr.npz',
@@ -616,6 +615,11 @@ def test_version_2_0():
format.write_array(f, d)
assert_(w[0].category is UserWarning)
+ # check alignment of data portion
+ f.seek(0)
+ header = f.readline()
+ assert_(len(header) % format.ARRAY_ALIGN == 0)
+
f.seek(0)
n = format.read_array(f)
assert_array_equal(d, n)
@@ -624,7 +628,7 @@ def test_version_2_0():
assert_raises(ValueError, format.write_array, f, d, (1, 0))
-@dec.slow
+@pytest.mark.slow
def test_version_2_0_memmap():
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
@@ -682,23 +686,23 @@ def test_write_version():
raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,))
-bad_version_magic = asbytes_nested([
- '\x93NUMPY\x01\x01',
- '\x93NUMPY\x00\x00',
- '\x93NUMPY\x00\x01',
- '\x93NUMPY\x02\x00',
- '\x93NUMPY\x02\x02',
- '\x93NUMPY\xff\xff',
-])
-malformed_magic = asbytes_nested([
- '\x92NUMPY\x01\x00',
- '\x00NUMPY\x01\x00',
- '\x93numpy\x01\x00',
- '\x93MATLB\x01\x00',
- '\x93NUMPY\x01',
- '\x93NUMPY',
- '',
-])
+bad_version_magic = [
+ b'\x93NUMPY\x01\x01',
+ b'\x93NUMPY\x00\x00',
+ b'\x93NUMPY\x00\x01',
+ b'\x93NUMPY\x02\x00',
+ b'\x93NUMPY\x02\x02',
+ b'\x93NUMPY\xff\xff',
+]
+malformed_magic = [
+ b'\x92NUMPY\x01\x00',
+ b'\x00NUMPY\x01\x00',
+ b'\x93numpy\x01\x00',
+ b'\x93MATLB\x01\x00',
+ b'\x93NUMPY\x01',
+ b'\x93NUMPY',
+ b'',
+]
def test_read_magic():
s1 = BytesIO()
@@ -724,13 +728,13 @@ def test_read_magic():
def test_read_magic_bad_magic():
for magic in malformed_magic:
f = BytesIO(magic)
- yield raises(ValueError)(format.read_magic), f
+ assert_raises(ValueError, format.read_array, f)
def test_read_version_1_0_bad_magic():
for magic in bad_version_magic + malformed_magic:
f = BytesIO(magic)
- yield raises(ValueError)(format.read_array), f
+ assert_raises(ValueError, format.read_array, f)
def test_bad_magic_args():
@@ -759,6 +763,7 @@ def test_read_array_header_1_0():
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_1_0(s)
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
@@ -771,6 +776,7 @@ def test_read_array_header_2_0():
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_2_0(s)
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
@@ -778,11 +784,11 @@ def test_bad_header():
# header of length less than 2 should fail
s = BytesIO()
assert_raises(ValueError, format.read_array_header_1_0, s)
- s = BytesIO(asbytes('1'))
+ s = BytesIO(b'1')
assert_raises(ValueError, format.read_array_header_1_0, s)
# header shorter than indicated size should fail
- s = BytesIO(asbytes('\x01\x00'))
+ s = BytesIO(b'\x01\x00')
assert_raises(ValueError, format.read_array_header_1_0, s)
# headers without the exact keys required should fail
@@ -812,7 +818,7 @@ def test_large_file_support():
# avoid actually writing 5GB
import subprocess as sp
sp.check_call(["truncate", "-s", "5368709120", tf_name])
- except:
+ except Exception:
raise SkipTest("Could not create 5GB large file")
# write a small array to the end
with open(tf_name, "wb") as f:
@@ -826,8 +832,9 @@ def test_large_file_support():
assert_array_equal(r, d)
-@dec.slow
-@dec.skipif(np.dtype(np.intp).itemsize < 8, "test requires 64-bit system")
+@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8,
+ reason="test requires 64-bit system")
+@pytest.mark.slow
def test_large_archive():
# Regression test for product of saving arrays with dimensions of array
# having a product that doesn't fit in int32. See gh-7598 for details.
@@ -847,5 +854,8 @@ def test_large_archive():
assert_(a.shape == new_a.shape)
-if __name__ == "__main__":
- run_module_suite()
+def test_empty_npz():
+ # Test for gh-9989
+ fname = os.path.join(tempdir, "nothing.npz")
+ np.savez(fname)
+ np.load(fname)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index f69c24d59..ba5b90e8c 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -3,15 +3,17 @@ from __future__ import division, absolute_import, print_function
import operator
import warnings
import sys
+import decimal
+import pytest
import numpy as np
+from numpy import ma
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_array_almost_equal, assert_raises,
- assert_allclose, assert_array_max_ulp, assert_warns,
- assert_raises_regex, dec, suppress_warnings
-)
-from numpy.testing.utils import HAS_REFCOUNT
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_allclose,
+ assert_array_max_ulp, assert_warns, assert_raises_regex, suppress_warnings,
+ HAS_REFCOUNT,
+ )
import numpy.lib.function_base as nfb
from numpy.random import rand
from numpy.lib import (
@@ -20,7 +22,7 @@ from numpy.lib import (
histogram, histogramdd, i0, insert, interp, kaiser, meshgrid, msort,
piecewise, place, rot90, select, setxor1d, sinc, split, trapz, trim_zeros,
unwrap, unique, vectorize
-)
+ )
from numpy.compat import long
@@ -31,9 +33,9 @@ def get_mat(n):
return data
-class TestRot90(TestCase):
+class TestRot90(object):
def test_basic(self):
- self.assertRaises(ValueError, rot90, np.ones(4))
+ assert_raises(ValueError, rot90, np.ones(4))
assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1))
@@ -96,15 +98,16 @@ class TestRot90(TestCase):
for k in range(1,5):
assert_equal(rot90(a, k=k, axes=(2, 0)),
- rot90(a_rot90_20, k=k-1, axes=(2, 0)))
+ rot90(a_rot90_20, k=k-1, axes=(2, 0)))
-class TestFlip(TestCase):
+class TestFlip(object):
def test_axes(self):
- self.assertRaises(ValueError, np.flip, np.ones(4), axis=1)
- self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=2)
- self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(np.AxisError, np.flip, np.ones(4), axis=1)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=2)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=(0, 3))
def test_basic_lr(self):
a = get_mat(4)
@@ -168,10 +171,40 @@ class TestFlip(TestCase):
def test_4d(self):
a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5)
for i in range(a.ndim):
- assert_equal(np.flip(a, i), np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
+ assert_equal(np.flip(a, i),
+ np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
+ def test_default_axis(self):
+ a = np.array([[1, 2, 3],
+ [4, 5, 6]])
+ b = np.array([[6, 5, 4],
+ [3, 2, 1]])
+ assert_equal(np.flip(a), b)
+
+ def test_multiple_axes(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ assert_equal(np.flip(a, axis=()), a)
+
+ b = np.array([[[5, 4],
+ [7, 6]],
+ [[1, 0],
+ [3, 2]]])
+
+ assert_equal(np.flip(a, axis=(0, 2)), b)
-class TestAny(TestCase):
+ c = np.array([[[3, 2],
+ [1, 0]],
+ [[7, 6],
+ [5, 4]]])
+
+ assert_equal(np.flip(a, axis=(1, 2)), c)
+
+
+class TestAny(object):
def test_basic(self):
y1 = [0, 0, 1, 0]
@@ -188,7 +221,7 @@ class TestAny(TestCase):
assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
-class TestAll(TestCase):
+class TestAll(object):
def test_basic(self):
y1 = [0, 1, 1, 0]
@@ -206,7 +239,7 @@ class TestAll(TestCase):
assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
-class TestCopy(TestCase):
+class TestCopy(object):
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
@@ -219,7 +252,7 @@ class TestCopy(TestCase):
def test_order(self):
# It turns out that people rely on np.copy() preserving order by
# default; changing this broke scikit-learn:
- # https://github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783
+ # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa
a = np.array([[1, 2], [3, 4]])
assert_(a.flags.c_contiguous)
assert_(not a.flags.f_contiguous)
@@ -234,7 +267,7 @@ class TestCopy(TestCase):
assert_(a_fort_copy.flags.f_contiguous)
-class TestAverage(TestCase):
+class TestAverage(object):
def test_basic(self):
y1 = np.array([1, 2, 3])
@@ -254,9 +287,6 @@ class TestAverage(TestCase):
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
- y6 = np.matrix(rand(5, 5))
- assert_array_equal(y6.mean(0), average(y6, 0))
-
def test_weights(self):
y = np.arange(10)
w = np.arange(10)
@@ -324,14 +354,6 @@ class TestAverage(TestCase):
assert_equal(type(np.average(a)), subclass)
assert_equal(type(np.average(a, weights=w)), subclass)
- # also test matrices
- a = np.matrix([[1,2],[3,4]])
- w = np.matrix([[1,2],[3,4]])
-
- r = np.average(a, axis=0, weights=w)
- assert_equal(type(r), np.matrix)
- assert_equal(r, [[2.5, 10.0/3]])
-
def test_upcasting(self):
types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]
@@ -340,7 +362,13 @@ class TestAverage(TestCase):
w = np.array([[1,2],[3,4]], dtype=wt)
assert_equal(np.average(a, weights=w).dtype, np.dtype(rt))
-class TestSelect(TestCase):
+ def test_object_dtype(self):
+ a = np.array([decimal.Decimal(x) for x in range(10)])
+ w = np.array([decimal.Decimal(1) for _ in range(10)])
+ w /= w.sum()
+ assert_almost_equal(a.mean(0), average(a, weights=w))
+
+class TestSelect(object):
choices = [np.array([1, 2, 3]),
np.array([4, 5, 6]),
np.array([7, 8, 9])]
@@ -412,7 +440,7 @@ class TestSelect(TestCase):
select(conditions, choices)
-class TestInsert(TestCase):
+class TestInsert(object):
def test_basic(self):
a = [1, 2, 3]
@@ -466,8 +494,8 @@ class TestInsert(TestCase):
insert(a, 1, a[:, 2,:], axis=1))
# invalid axis value
- assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=3)
- assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=-4)
+ assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=3)
+ assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=-4)
# negative axis value
a = np.arange(24).reshape((2, 3, 4))
@@ -513,7 +541,7 @@ class TestInsert(TestCase):
assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))
-class TestAmax(TestCase):
+class TestAmax(object):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
@@ -525,7 +553,7 @@ class TestAmax(TestCase):
assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
-class TestAmin(TestCase):
+class TestAmin(object):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
@@ -537,7 +565,7 @@ class TestAmin(TestCase):
assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0])
-class TestPtp(TestCase):
+class TestPtp(object):
def test_basic(self):
a = np.array([3, 4, 5, 10, -3, -5, 6.0])
@@ -548,14 +576,18 @@ class TestPtp(TestCase):
assert_equal(b.ptp(axis=0), [5.0, 7.0, 7.0])
assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0])
+ assert_equal(b.ptp(axis=0, keepdims=True), [[5.0, 7.0, 7.0]])
+ assert_equal(b.ptp(axis=(0,1), keepdims=True), [[8.0]])
+
-class TestCumsum(TestCase):
+class TestCumsum(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
- np.uint32, np.float32, np.float64, np.complex64, np.complex128]:
+ np.uint32, np.float32, np.float64, np.complex64,
+ np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
@@ -571,7 +603,7 @@ class TestCumsum(TestCase):
assert_array_equal(np.cumsum(a2, axis=1), tgt)
-class TestProd(TestCase):
+class TestProd(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -581,8 +613,8 @@ class TestProd(TestCase):
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
- self.assertRaises(ArithmeticError, np.prod, a)
- self.assertRaises(ArithmeticError, np.prod, a2, 1)
+ assert_raises(ArithmeticError, np.prod, a)
+ assert_raises(ArithmeticError, np.prod, a2, 1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
@@ -591,7 +623,7 @@ class TestProd(TestCase):
np.array([24, 1890, 600], ctype))
-class TestCumprod(TestCase):
+class TestCumprod(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -601,9 +633,9 @@ class TestCumprod(TestCase):
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
- self.assertRaises(ArithmeticError, np.cumprod, a)
- self.assertRaises(ArithmeticError, np.cumprod, a2, 1)
- self.assertRaises(ArithmeticError, np.cumprod, a)
+ assert_raises(ArithmeticError, np.cumprod, a)
+ assert_raises(ArithmeticError, np.cumprod, a2, 1)
+ assert_raises(ArithmeticError, np.cumprod, a)
else:
assert_array_equal(np.cumprod(a, axis=-1),
np.array([1, 2, 20, 220,
@@ -618,7 +650,7 @@ class TestCumprod(TestCase):
[10, 30, 120, 600]], ctype))
-class TestDiff(TestCase):
+class TestDiff(object):
def test_basic(self):
x = [1, 4, 6, 7, 12]
@@ -629,6 +661,29 @@ class TestDiff(TestCase):
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, n=3), out3)
+ x = [1.1, 2.2, 3.0, -0.2, -0.1]
+ out = np.array([1.1, 0.8, -3.2, 0.1])
+ assert_almost_equal(diff(x), out)
+
+ x = [True, True, False, False]
+ out = np.array([False, True, False])
+ out2 = np.array([True, True])
+ assert_array_equal(diff(x), out)
+ assert_array_equal(diff(x, n=2), out2)
+
+ def test_axis(self):
+ x = np.zeros((10, 20, 30))
+ x[:, 1::2, :] = 1
+ exp = np.ones((10, 19, 30))
+ exp[:, 1::2, :] = -1
+ assert_array_equal(diff(x), np.zeros((10, 20, 29)))
+ assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29)))
+ assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30)))
+ assert_array_equal(diff(x, axis=1), exp)
+ assert_array_equal(diff(x, axis=-2), exp)
+ assert_raises(np.AxisError, diff, x, axis=3)
+ assert_raises(np.AxisError, diff, x, axis=-4)
+
def test_nd(self):
x = 20 * rand(10, 20, 30)
out1 = x[:, :, 1:] - x[:, :, :-1]
@@ -640,10 +695,49 @@ class TestDiff(TestCase):
assert_array_equal(diff(x, axis=0), out3)
assert_array_equal(diff(x, n=2, axis=0), out4)
+ def test_n(self):
+ x = list(range(3))
+ assert_raises(ValueError, diff, x, n=-1)
+ output = [diff(x, n=n) for n in range(1, 5)]
+ expected = [[1, 1], [0], [], []]
+ assert_(diff(x, n=0) is x)
+ for n, (expected, out) in enumerate(zip(expected, output), start=1):
+ assert_(type(out) is np.ndarray)
+ assert_array_equal(out, expected)
+ assert_equal(out.dtype, np.int_)
+ assert_equal(len(out), max(0, len(x) - n))
+
+ def test_times(self):
+ x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
+ expected = [
+ np.array([1, 1], dtype='timedelta64[D]'),
+ np.array([0], dtype='timedelta64[D]'),
+ ]
+ expected.extend([np.array([], dtype='timedelta64[D]')] * 3)
+ for n, exp in enumerate(expected, start=1):
+ out = diff(x, n=n)
+ assert_array_equal(out, exp)
+ assert_equal(out.dtype, exp.dtype)
+
+ def test_subclass(self):
+ x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]],
+ mask=[[False, False], [True, False],
+ [False, True], [True, True], [False, False]])
+ out = diff(x)
+ assert_array_equal(out.data, [[1], [1], [1], [1], [1]])
+ assert_array_equal(out.mask, [[False], [True],
+ [True], [True], [False]])
+ assert_(type(out) is type(x))
+
+ out3 = diff(x, n=3)
+ assert_array_equal(out3.data, [[], [], [], [], []])
+ assert_array_equal(out3.mask, [[], [], [], [], []])
+ assert_(type(out3) is type(x))
+
-class TestDelete(TestCase):
+class TestDelete(object):
- def setUp(self):
+ def setup(self):
self.a = np.arange(5)
self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2)
@@ -716,7 +810,7 @@ class TestDelete(TestCase):
assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)
-class TestGradient(TestCase):
+class TestGradient(object):
def test_basic(self):
v = [[1, 1], [3, 4]]
@@ -726,14 +820,58 @@ class TestGradient(TestCase):
assert_array_equal(gradient(x), dx)
assert_array_equal(gradient(v), dx)
+ def test_args(self):
+ dx = np.cumsum(np.ones(5))
+ dx_uneven = [1., 2., 5., 9., 11.]
+ f_2d = np.arange(25).reshape(5, 5)
+
+ # distances must be scalars or have size equal to gradient[axis]
+ gradient(np.arange(5), 3.)
+ gradient(np.arange(5), np.array(3.))
+ gradient(np.arange(5), dx)
+ # dy is set equal to dx because scalar
+ gradient(f_2d, 1.5)
+ gradient(f_2d, np.array(1.5))
+
+ gradient(f_2d, dx_uneven, dx_uneven)
+ # mix between even and uneven spaces and
+ # mix between scalar and vector
+ gradient(f_2d, dx, 2)
+
+ # 2D but axis specified
+ gradient(f_2d, dx, axis=1)
+
+ # 2d coordinate arguments are not yet allowed
+ assert_raises_regex(ValueError, '.*scalars or 1d',
+ gradient, f_2d, np.stack([dx]*2, axis=-1), 1)
+
def test_badargs(self):
- # for 2D array, gradient can take 0, 1, or 2 extra args
- x = np.array([[1, 1], [3, 4]])
- assert_raises(SyntaxError, gradient, x, np.array([1., 1.]),
- np.array([1., 1.]), np.array([1., 1.]))
+ f_2d = np.arange(25).reshape(5, 5)
+ x = np.cumsum(np.ones(5))
+
+ # wrong sizes
+ assert_raises(ValueError, gradient, f_2d, x, np.ones(2))
+ assert_raises(ValueError, gradient, f_2d, 1, np.ones(2))
+ assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2))
+ # wrong number of arguments
+ assert_raises(TypeError, gradient, f_2d, x)
+ assert_raises(TypeError, gradient, f_2d, x, axis=(0,1))
+ assert_raises(TypeError, gradient, f_2d, x, x, x)
+ assert_raises(TypeError, gradient, f_2d, 1, 1, 1)
+ assert_raises(TypeError, gradient, f_2d, x, x, axis=1)
+ assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1)
- # disallow arrays as distances, see gh-6847
- assert_raises(ValueError, gradient, np.arange(5), np.ones(5))
+ def test_datetime64(self):
+ # Make sure gradient() can handle special types like datetime64
+ x = np.array(
+ ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',
+ '1910-10-12', '1910-12-12', '1912-12-12'],
+ dtype='datetime64[D]')
+ dx = np.array(
+ [-5, -3, 0, 31, 61, 396, 731],
+ dtype='timedelta64[D]')
+ assert_array_equal(gradient(x), dx)
+ assert_(dx.dtype == np.dtype('timedelta64[D]'))
def test_masked(self):
# Make sure that gradient supports subclasses like masked arrays
@@ -750,29 +888,6 @@ class TestGradient(TestCase):
np.gradient(x2, edge_order=2)
assert_array_equal(x2.mask, [False, False, True, False, False])
- def test_datetime64(self):
- # Make sure gradient() can handle special types like datetime64
- x = np.array(
- ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',
- '1910-10-12', '1910-12-12', '1912-12-12'],
- dtype='datetime64[D]')
- dx = np.array(
- [-5, -3, 0, 31, 61, 396, 731],
- dtype='timedelta64[D]')
- assert_array_equal(gradient(x), dx)
- assert_(dx.dtype == np.dtype('timedelta64[D]'))
-
- def test_timedelta64(self):
- # Make sure gradient() can handle special types like timedelta64
- x = np.array(
- [-5, -3, 10, 12, 61, 321, 300],
- dtype='timedelta64[D]')
- dx = np.array(
- [2, 7, 7, 25, 154, 119, -21],
- dtype='timedelta64[D]')
- assert_array_equal(gradient(x), dx)
- assert_(dx.dtype == np.dtype('timedelta64[D]'))
-
def test_second_order_accurate(self):
# Testing that the relative numerical error is less that 3% for
# this example problem. This corresponds to second order
@@ -785,6 +900,78 @@ class TestGradient(TestCase):
num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1)
assert_(np.all(num_error < 0.03) == True)
+ # test with unevenly spaced
+ np.random.seed(0)
+ x = np.sort(np.random.random(10))
+ y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
+ analytical = 6 * x ** 2 + 8 * x + 2
+ num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1)
+ assert_(np.all(num_error < 0.03) == True)
+
+ def test_spacing(self):
+ f = np.array([0, 2., 3., 4., 5., 5.])
+ f = np.tile(f, (6,1)) + f.reshape(-1, 1)
+ x_uneven = np.array([0., 0.5, 1., 3., 5., 7.])
+ x_even = np.arange(6.)
+
+ fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1))
+ fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1))
+ fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1))
+ fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1))
+
+ # evenly spaced
+ for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]:
+ res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order)
+ res2 = gradient(f, x_even, x_even,
+ axis=(0,1), edge_order=edge_order)
+ res3 = gradient(f, x_even, x_even,
+ axis=None, edge_order=edge_order)
+ assert_array_equal(res1, res2)
+ assert_array_equal(res2, res3)
+ assert_almost_equal(res1[0], exp_res.T)
+ assert_almost_equal(res1[1], exp_res)
+
+ res1 = gradient(f, 1., axis=0, edge_order=edge_order)
+ res2 = gradient(f, x_even, axis=0, edge_order=edge_order)
+ assert_(res1.shape == res2.shape)
+ assert_almost_equal(res2, exp_res.T)
+
+ res1 = gradient(f, 1., axis=1, edge_order=edge_order)
+ res2 = gradient(f, x_even, axis=1, edge_order=edge_order)
+ assert_(res1.shape == res2.shape)
+ assert_array_equal(res2, exp_res)
+
+ # unevenly spaced
+ for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]:
+ res1 = gradient(f, x_uneven, x_uneven,
+ axis=(0,1), edge_order=edge_order)
+ res2 = gradient(f, x_uneven, x_uneven,
+ axis=None, edge_order=edge_order)
+ assert_array_equal(res1, res2)
+ assert_almost_equal(res1[0], exp_res.T)
+ assert_almost_equal(res1[1], exp_res)
+
+ res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order)
+ assert_almost_equal(res1, exp_res.T)
+
+ res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order)
+ assert_almost_equal(res1, exp_res)
+
+ # mixed
+ res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1)
+ res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1)
+ assert_array_equal(res1[0], res2[1])
+ assert_array_equal(res1[1], res2[0])
+ assert_almost_equal(res1[0], fdx_even_ord1.T)
+ assert_almost_equal(res1[1], fdx_uneven_ord1)
+
+ res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2)
+ res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2)
+ assert_array_equal(res1[0], res2[1])
+ assert_array_equal(res1[1], res2[0])
+ assert_almost_equal(res1[0], fdx_even_ord2.T)
+ assert_almost_equal(res1[1], fdx_uneven_ord2)
+
def test_specific_axes(self):
# Testing that gradient can work on a given axis only
v = [[1, 1], [3, 4]]
@@ -802,16 +989,46 @@ class TestGradient(TestCase):
assert_almost_equal(gradient(x, axis=None), gradient(x))
# test vararg order
- assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1]/2.0, dx[0]/3.0])
+ assert_array_equal(gradient(x, 2, 3, axis=(1, 0)),
+ [dx[1]/2.0, dx[0]/3.0])
# test maximal number of varargs
- assert_raises(SyntaxError, gradient, x, 1, 2, axis=1)
+ assert_raises(TypeError, gradient, x, 1, 2, axis=1)
+
+ assert_raises(np.AxisError, gradient, x, axis=3)
+ assert_raises(np.AxisError, gradient, x, axis=-3)
+ # assert_raises(TypeError, gradient, x, axis=[1,])
- assert_raises(ValueError, gradient, x, axis=3)
- assert_raises(ValueError, gradient, x, axis=-3)
- assert_raises(TypeError, gradient, x, axis=[1,])
+ def test_timedelta64(self):
+ # Make sure gradient() can handle special types like timedelta64
+ x = np.array(
+ [-5, -3, 10, 12, 61, 321, 300],
+ dtype='timedelta64[D]')
+ dx = np.array(
+ [2, 7, 7, 25, 154, 119, -21],
+ dtype='timedelta64[D]')
+ assert_array_equal(gradient(x), dx)
+ assert_(dx.dtype == np.dtype('timedelta64[D]'))
+ def test_inexact_dtypes(self):
+ for dt in [np.float16, np.float32, np.float64]:
+ # dtypes should not be promoted in a different way to what diff does
+ x = np.array([1, 2, 3], dtype=dt)
+ assert_equal(gradient(x).dtype, np.diff(x).dtype)
-class TestAngle(TestCase):
+ def test_values(self):
+ # needs at least 2 points for edge_order ==1
+ gradient(np.arange(2), edge_order=1)
+ # needs at least 3 points for edge_order ==1
+ gradient(np.arange(3), edge_order=2)
+
+ assert_raises(ValueError, gradient, np.arange(0), edge_order=1)
+ assert_raises(ValueError, gradient, np.arange(0), edge_order=2)
+ assert_raises(ValueError, gradient, np.arange(1), edge_order=1)
+ assert_raises(ValueError, gradient, np.arange(1), edge_order=2)
+ assert_raises(ValueError, gradient, np.arange(2), edge_order=2)
+
+
+class TestAngle(object):
def test_basic(self):
x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2,
@@ -827,7 +1044,7 @@ class TestAngle(TestCase):
assert_array_almost_equal(z, zo, 11)
-class TestTrimZeros(TestCase):
+class TestTrimZeros(object):
"""
Only testing for integer splits.
@@ -850,7 +1067,7 @@ class TestTrimZeros(TestCase):
assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4]))
-class TestExtins(TestCase):
+class TestExtins(object):
def test_basic(self):
a = np.array([1, 3, 2, 1, 2, 3, 3])
@@ -889,7 +1106,7 @@ class TestExtins(TestCase):
assert_array_equal(a, ac)
-class TestVectorize(TestCase):
+class TestVectorize(object):
def test_simple(self):
def addsubtract(a, b):
@@ -948,7 +1165,7 @@ class TestVectorize(TestCase):
import random
try:
vectorize(random.randrange) # Should succeed
- except:
+ except Exception:
raise AssertionError()
def test_keywords2_ticket_2100(self):
@@ -1221,7 +1438,7 @@ class TestVectorize(TestCase):
f(x)
-class TestDigitize(TestCase):
+class TestDigitize(object):
def test_forward(self):
x = np.arange(-6, 5)
@@ -1293,17 +1510,29 @@ class TestDigitize(TestCase):
assert_(not isinstance(digitize(b, a, False), A))
assert_(not isinstance(digitize(b, a, True), A))
+ def test_large_integers_increasing(self):
+ # gh-11022
+ x = 2**54 # loses precision in a float
+ assert_equal(np.digitize(x, [x - 1, x + 1]), 1)
+
+ @pytest.mark.xfail(
+ reason="gh-11022: np.core.multiarray._monoticity loses precision")
+ def test_large_integers_decreasing(self):
+ # gh-11022
+ x = 2**54 # loses precision in a float
+ assert_equal(np.digitize(x, [x + 1, x - 1]), 1)
-class TestUnwrap(TestCase):
+
+class TestUnwrap(object):
def test_simple(self):
- # check that unwrap removes jumps greather that 2*pi
+ # check that unwrap removes jumps greater that 2*pi
assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
- # check that unwrap maintans continuity
+ # check that unwrap maintains continuity
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
-class TestFilterwindows(TestCase):
+class TestFilterwindows(object):
def test_hanning(self):
# check symmetry
@@ -1334,7 +1563,7 @@ class TestFilterwindows(TestCase):
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
-class TestTrapz(TestCase):
+class TestTrapz(object):
def test_simple(self):
x = np.arange(-10, 10, .1)
@@ -1395,18 +1624,8 @@ class TestTrapz(TestCase):
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(y, xm), r)
- def test_matrix(self):
- # Test to make sure matrices give the same answer as ndarrays
- x = np.linspace(0, 5)
- y = x * x
- r = trapz(y, x)
- mx = np.matrix(x)
- my = np.matrix(y)
- mr = trapz(my, mx)
- assert_almost_equal(mr, r)
-
-class TestSinc(TestCase):
+class TestSinc(object):
def test_simple(self):
assert_(sinc(0) == 1)
@@ -1423,502 +1642,7 @@ class TestSinc(TestCase):
assert_array_equal(y1, y3)
-class TestHistogram(TestCase):
-
- def setUp(self):
- pass
-
- def tearDown(self):
- pass
-
- def test_simple(self):
- n = 100
- v = rand(n)
- (a, b) = histogram(v)
- # check if the sum of the bins equals the number of samples
- assert_equal(np.sum(a, axis=0), n)
- # check that the bin counts are evenly spaced when the data is from
- # a linear function
- (a, b) = histogram(np.linspace(0, 10, 100))
- assert_array_equal(a, 10)
-
- def test_one_bin(self):
- # Ticket 632
- hist, edges = histogram([1, 2, 3, 4], [1, 2])
- assert_array_equal(hist, [2, ])
- assert_array_equal(edges, [1, 2])
- assert_raises(ValueError, histogram, [1, 2], bins=0)
- h, e = histogram([1, 2], bins=1)
- assert_equal(h, np.array([2]))
- assert_allclose(e, np.array([1., 2.]))
-
- def test_normed(self):
- # Check that the integral of the density equals 1.
- n = 100
- v = rand(n)
- a, b = histogram(v, normed=True)
- area = np.sum(a * diff(b))
- assert_almost_equal(area, 1)
-
- # Check with non-constant bin widths (buggy but backwards
- # compatible)
- v = np.arange(10)
- bins = [0, 1, 5, 9, 10]
- a, b = histogram(v, bins, normed=True)
- area = np.sum(a * diff(b))
- assert_almost_equal(area, 1)
-
- def test_density(self):
- # Check that the integral of the density equals 1.
- n = 100
- v = rand(n)
- a, b = histogram(v, density=True)
- area = np.sum(a * diff(b))
- assert_almost_equal(area, 1)
-
- # Check with non-constant bin widths
- v = np.arange(10)
- bins = [0, 1, 3, 6, 10]
- a, b = histogram(v, bins, density=True)
- assert_array_equal(a, .1)
- assert_equal(np.sum(a * diff(b)), 1)
-
- # Variale bin widths are especially useful to deal with
- # infinities.
- v = np.arange(10)
- bins = [0, 1, 3, 6, np.inf]
- a, b = histogram(v, bins, density=True)
- assert_array_equal(a, [.1, .1, .1, 0.])
-
- # Taken from a bug report from N. Becker on the numpy-discussion
- # mailing list Aug. 6, 2010.
- counts, dmy = np.histogram(
- [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
- assert_equal(counts, [.25, 0])
-
- def test_outliers(self):
- # Check that outliers are not tallied
- a = np.arange(10) + .5
-
- # Lower outliers
- h, b = histogram(a, range=[0, 9])
- assert_equal(h.sum(), 9)
-
- # Upper outliers
- h, b = histogram(a, range=[1, 10])
- assert_equal(h.sum(), 9)
-
- # Normalization
- h, b = histogram(a, range=[1, 9], normed=True)
- assert_almost_equal((h * diff(b)).sum(), 1, decimal=15)
-
- # Weights
- w = np.arange(10) + .5
- h, b = histogram(a, range=[1, 9], weights=w, normed=True)
- assert_equal((h * diff(b)).sum(), 1)
-
- h, b = histogram(a, bins=8, range=[1, 9], weights=w)
- assert_equal(h, w[1:-1])
-
- def test_type(self):
- # Check the type of the returned histogram
- a = np.arange(10) + .5
- h, b = histogram(a)
- assert_(np.issubdtype(h.dtype, int))
-
- h, b = histogram(a, normed=True)
- assert_(np.issubdtype(h.dtype, float))
-
- h, b = histogram(a, weights=np.ones(10, int))
- assert_(np.issubdtype(h.dtype, int))
-
- h, b = histogram(a, weights=np.ones(10, float))
- assert_(np.issubdtype(h.dtype, float))
-
- def test_f32_rounding(self):
- # gh-4799, check that the rounding of the edges works with float32
- x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)
- y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)
- counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
- assert_equal(counts_hist.sum(), 3.)
-
- def test_weights(self):
- v = rand(100)
- w = np.ones(100) * 5
- a, b = histogram(v)
- na, nb = histogram(v, normed=True)
- wa, wb = histogram(v, weights=w)
- nwa, nwb = histogram(v, weights=w, normed=True)
- assert_array_almost_equal(a * 5, wa)
- assert_array_almost_equal(na, nwa)
-
- # Check weights are properly applied.
- v = np.linspace(0, 10, 10)
- w = np.concatenate((np.zeros(5), np.ones(5)))
- wa, wb = histogram(v, bins=np.arange(11), weights=w)
- assert_array_almost_equal(wa, w)
-
- # Check with integer weights
- wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
- assert_array_equal(wa, [4, 5, 0, 1])
- wa, wb = histogram(
- [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True)
- assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
-
- # Check weights with non-uniform bin widths
- a, b = histogram(
- np.arange(9), [0, 1, 3, 6, 10],
- weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)
- assert_almost_equal(a, [.2, .1, .1, .075])
-
- def test_exotic_weights(self):
-
- # Test the use of weights that are not integer or floats, but e.g.
- # complex numbers or object types.
-
- # Complex weights
- values = np.array([1.3, 2.5, 2.3])
- weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])
-
- # Check with custom bins
- wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
- assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
-
- # Check with even bins
- wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
- assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
-
- # Decimal weights
- from decimal import Decimal
- values = np.array([1.3, 2.5, 2.3])
- weights = np.array([Decimal(1), Decimal(2), Decimal(3)])
-
- # Check with custom bins
- wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
- assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
-
- # Check with even bins
- wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
- assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
-
- def test_no_side_effects(self):
- # This is a regression test that ensures that values passed to
- # ``histogram`` are unchanged.
- values = np.array([1.3, 2.5, 2.3])
- np.histogram(values, range=[-10, 10], bins=100)
- assert_array_almost_equal(values, [1.3, 2.5, 2.3])
-
- def test_empty(self):
- a, b = histogram([], bins=([0, 1]))
- assert_array_equal(a, np.array([0]))
- assert_array_equal(b, np.array([0, 1]))
-
- def test_error_binnum_type (self):
- # Tests if right Error is raised if bins argument is float
- vals = np.linspace(0.0, 1.0, num=100)
- histogram(vals, 5)
- assert_raises(TypeError, histogram, vals, 2.4)
-
- def test_finite_range(self):
- # Normal ranges should be fine
- vals = np.linspace(0.0, 1.0, num=100)
- histogram(vals, range=[0.25,0.75])
- assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
- assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
-
- def test_bin_edge_cases(self):
- # Ensure that floating-point computations correctly place edge cases.
- arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
- hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
- mask = hist > 0
- left_edges = edges[:-1][mask]
- right_edges = edges[1:][mask]
- for x, left, right in zip(arr, left_edges, right_edges):
- self.assertGreaterEqual(x, left)
- self.assertLess(x, right)
-
- def test_last_bin_inclusive_range(self):
- arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
- hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
- self.assertEqual(hist[-1], 1)
-
-
-class TestHistogramOptimBinNums(TestCase):
- """
- Provide test coverage when using provided estimators for optimal number of
- bins
- """
-
- def test_empty(self):
- estimator_list = ['fd', 'scott', 'rice', 'sturges',
- 'doane', 'sqrt', 'auto']
- # check it can deal with empty data
- for estimator in estimator_list:
- a, b = histogram([], bins=estimator)
- assert_array_equal(a, np.array([0]))
- assert_array_equal(b, np.array([0, 1]))
-
- def test_simple(self):
- """
- Straightforward testing with a mixture of linspace data (for
- consistency). All test values have been precomputed and the values
- shouldn't change
- """
- # Some basic sanity checking, with some fixed data.
- # Checking for the correct number of bins
- basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
- 'doane': 8, 'sqrt': 8, 'auto': 7},
- 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
- 'doane': 12, 'sqrt': 23, 'auto': 10},
- 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
- 'doane': 17, 'sqrt': 71, 'auto': 17}}
-
- for testlen, expectedResults in basic_test.items():
- # Create some sort of non uniform data to test with
- # (2 peak uniform mixture)
- x1 = np.linspace(-10, -1, testlen // 5 * 2)
- x2 = np.linspace(1, 10, testlen // 5 * 3)
- x = np.concatenate((x1, x2))
- for estimator, numbins in expectedResults.items():
- a, b = np.histogram(x, estimator)
- assert_equal(len(a), numbins, err_msg="For the {0} estimator "
- "with datasize of {1}".format(estimator, testlen))
-
- def test_small(self):
- """
- Smaller datasets have the potential to cause issues with the data
- adaptive methods, especially the FD method. All bin numbers have been
- precalculated.
- """
- small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
- 'doane': 1, 'sqrt': 1},
- 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
- 'doane': 1, 'sqrt': 2},
- 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
- 'doane': 3, 'sqrt': 2}}
-
- for testlen, expectedResults in small_dat.items():
- testdat = np.arange(testlen)
- for estimator, expbins in expectedResults.items():
- a, b = np.histogram(testdat, estimator)
- assert_equal(len(a), expbins, err_msg="For the {0} estimator "
- "with datasize of {1}".format(estimator, testlen))
-
- def test_incorrect_methods(self):
- """
- Check a Value Error is thrown when an unknown string is passed in
- """
- check_list = ['mad', 'freeman', 'histograms', 'IQR']
- for estimator in check_list:
- assert_raises(ValueError, histogram, [1, 2, 3], estimator)
-
- def test_novariance(self):
- """
- Check that methods handle no variance in data
- Primarily for Scott and FD as the SD and IQR are both 0 in this case
- """
- novar_dataset = np.ones(100)
- novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
- 'doane': 1, 'sqrt': 1, 'auto': 1}
-
- for estimator, numbins in novar_resultdict.items():
- a, b = np.histogram(novar_dataset, estimator)
- assert_equal(len(a), numbins, err_msg="{0} estimator, "
- "No Variance test".format(estimator))
-
- def test_outlier(self):
- """
- Check the FD, Scott and Doane with outliers.
-
- The FD estimates a smaller binwidth since it's less affected by
- outliers. Since the range is so (artificially) large, this means more
- bins, most of which will be empty, but the data of interest usually is
- unaffected. The Scott estimator is more affected and returns fewer bins,
- despite most of the variance being in one area of the data. The Doane
- estimator lies somewhere between the other two.
- """
- xcenter = np.linspace(-10, 10, 50)
- outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
-
- outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11}
-
- for estimator, numbins in outlier_resultdict.items():
- a, b = np.histogram(outlier_dataset, estimator)
- assert_equal(len(a), numbins)
-
- def test_simple_range(self):
- """
- Straightforward testing with a mixture of linspace data (for
- consistency). Adding in a 3rd mixture that will then be
- completely ignored. All test values have been precomputed and
- the shouldn't change.
- """
- # some basic sanity checking, with some fixed data. Checking for the correct number of bins
- basic_test = {50: {'fd': 8, 'scott': 8, 'rice': 15, 'sturges': 14, 'auto': 14},
- 500: {'fd': 15, 'scott': 16, 'rice': 32, 'sturges': 20, 'auto': 20},
- 5000: {'fd': 33, 'scott': 33, 'rice': 69, 'sturges': 27, 'auto': 33}}
-
- for testlen, expectedResults in basic_test.items():
- # create some sort of non uniform data to test with (3 peak uniform mixture)
- x1 = np.linspace(-10, -1, testlen // 5 * 2)
- x2 = np.linspace(1, 10, testlen // 5 * 3)
- x3 = np.linspace(-100, -50, testlen)
- x = np.hstack((x1, x2, x3))
- for estimator, numbins in expectedResults.items():
- a, b = np.histogram(x, estimator, range = (-20, 20))
- msg = "For the {0} estimator with datasize of {1}".format(estimator, testlen)
- assert_equal(len(a), numbins, err_msg=msg)
-
- def test_simple_weighted(self):
- """
- Check that weighted data raises a TypeError
- """
- estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
- for estimator in estimator_list:
- assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3])
-
-
-class TestHistogramdd(TestCase):
-
- def test_simple(self):
- x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
- [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
- H, edges = histogramdd(x, (2, 3, 3),
- range=[[-1, 1], [0, 3], [0, 3]])
- answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
- [[0, 1, 0], [0, 0, 1], [0, 0, 1]]])
- assert_array_equal(H, answer)
-
- # Check normalization
- ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
- H, edges = histogramdd(x, bins=ed, normed=True)
- assert_(np.all(H == answer / 12.))
-
- # Check that H has the correct shape.
- H, edges = histogramdd(x, (2, 3, 4),
- range=[[-1, 1], [0, 3], [0, 4]],
- normed=True)
- answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
- [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
- assert_array_almost_equal(H, answer / 6., 4)
- # Check that a sequence of arrays is accepted and H has the correct
- # shape.
- z = [np.squeeze(y) for y in split(x, 3, axis=1)]
- H, edges = histogramdd(
- z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
- answer = np.array([[[0, 0], [0, 0], [0, 0]],
- [[0, 1], [0, 0], [1, 0]],
- [[0, 1], [0, 0], [0, 0]],
- [[0, 0], [0, 0], [0, 0]]])
- assert_array_equal(H, answer)
-
- Z = np.zeros((5, 5, 5))
- Z[list(range(5)), list(range(5)), list(range(5))] = 1.
- H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
- assert_array_equal(H, Z)
-
- def test_shape_3d(self):
- # All possible permutations for bins of different lengths in 3D.
- bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
- (4, 5, 6))
- r = rand(10, 3)
- for b in bins:
- H, edges = histogramdd(r, b)
- assert_(H.shape == b)
-
- def test_shape_4d(self):
- # All possible permutations for bins of different lengths in 4D.
- bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
- (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
- (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
- (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
- (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
- (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
-
- r = rand(10, 4)
- for b in bins:
- H, edges = histogramdd(r, b)
- assert_(H.shape == b)
-
- def test_weights(self):
- v = rand(100, 2)
- hist, edges = histogramdd(v)
- n_hist, edges = histogramdd(v, normed=True)
- w_hist, edges = histogramdd(v, weights=np.ones(100))
- assert_array_equal(w_hist, hist)
- w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, normed=True)
- assert_array_equal(w_hist, n_hist)
- w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
- assert_array_equal(w_hist, 2 * hist)
-
- def test_identical_samples(self):
- x = np.zeros((10, 2), int)
- hist, edges = histogramdd(x, bins=2)
- assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))
-
- def test_empty(self):
- a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
- assert_array_max_ulp(a, np.array([[0.]]))
- a, b = np.histogramdd([[], [], []], bins=2)
- assert_array_max_ulp(a, np.zeros((2, 2, 2)))
-
- def test_bins_errors(self):
- # There are two ways to specify bins. Check for the right errors
- # when mixing those.
- x = np.arange(8).reshape(2, 4)
- assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
- assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
- assert_raises(
- ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]])
- assert_raises(
- ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
- assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
-
- def test_inf_edges(self):
- # Test using +/-inf bin edges works. See #1788.
- with np.errstate(invalid='ignore'):
- x = np.arange(6).reshape(3, 2)
- expected = np.array([[1, 0], [0, 1], [0, 1]])
- h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
- assert_allclose(h, expected)
- h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
- assert_allclose(h, expected)
- h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
- assert_allclose(h, expected)
-
- def test_rightmost_binedge(self):
- # Test event very close to rightmost binedge. See Github issue #4266
- x = [0.9999999995]
- bins = [[0., 0.5, 1.0]]
- hist, _ = histogramdd(x, bins=bins)
- assert_(hist[0] == 0.0)
- assert_(hist[1] == 1.)
- x = [1.0]
- bins = [[0., 0.5, 1.0]]
- hist, _ = histogramdd(x, bins=bins)
- assert_(hist[0] == 0.0)
- assert_(hist[1] == 1.)
- x = [1.0000000001]
- bins = [[0., 0.5, 1.0]]
- hist, _ = histogramdd(x, bins=bins)
- assert_(hist[0] == 0.0)
- assert_(hist[1] == 1.)
- x = [1.0001]
- bins = [[0., 0.5, 1.0]]
- hist, _ = histogramdd(x, bins=bins)
- assert_(hist[0] == 0.0)
- assert_(hist[1] == 0.0)
-
- def test_finite_range(self):
- vals = np.random.random((100, 3))
- histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])
- assert_raises(ValueError, histogramdd, vals,
- range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
- assert_raises(ValueError, histogramdd, vals,
- range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
-
-
-class TestUnique(TestCase):
+class TestUnique(object):
def test_simple(self):
x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
@@ -1930,7 +1654,7 @@ class TestUnique(TestCase):
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
-class TestCheckFinite(TestCase):
+class TestCheckFinite(object):
def test_simple(self):
a = [1, 2, 3]
@@ -1947,7 +1671,7 @@ class TestCheckFinite(TestCase):
assert_(a.dtype == np.float64)
-class TestCorrCoef(TestCase):
+class TestCorrCoef(object):
A = np.array(
[[0.15391142, 0.18045767, 0.14197213],
[0.70461506, 0.96474128, 0.27906989],
@@ -2032,7 +1756,7 @@ class TestCorrCoef(TestCase):
assert_(np.all(np.abs(c) <= 1.0))
-class TestCov(TestCase):
+class TestCov(object):
x1 = np.array([[0, 2], [1, 1], [2, 0]]).T
res1 = np.array([[1., -1.], [-1., 1.]])
x2 = np.array([0.0, 1.0, 2.0], ndmin=2)
@@ -2050,7 +1774,9 @@ class TestCov(TestCase):
def test_complex(self):
x = np.array([[1, 2, 3], [1j, 2j, 3j]])
- assert_allclose(cov(x), np.array([[1., -1.j], [1.j, 1.]]))
+ res = np.array([[1., -1.j], [1.j, 1.]])
+ assert_allclose(cov(x), res)
+ assert_allclose(cov(x, aweights=np.ones(3)), res)
def test_xy(self):
x = np.array([[1, 2, 3]])
@@ -2130,7 +1856,7 @@ class TestCov(TestCase):
self.res1)
-class Test_I0(TestCase):
+class Test_I0(object):
def test_simple(self):
assert_almost_equal(
@@ -2156,7 +1882,7 @@ class Test_I0(TestCase):
[1.05884290, 1.06432317]]))
-class TestKaiser(TestCase):
+class TestKaiser(object):
def test_simple(self):
assert_(np.isfinite(kaiser(1, 1.0)))
@@ -2175,7 +1901,7 @@ class TestKaiser(TestCase):
kaiser(3, 4)
-class TestMsort(TestCase):
+class TestMsort(object):
def test_simple(self):
A = np.array([[0.44567325, 0.79115165, 0.54900530],
@@ -2188,7 +1914,7 @@ class TestMsort(TestCase):
[0.64864341, 0.79115165, 0.96098397]]))
-class TestMeshgrid(TestCase):
+class TestMeshgrid(object):
def test_simple(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
@@ -2277,7 +2003,7 @@ class TestMeshgrid(TestCase):
assert_equal(x[1, :], X)
-class TestPiecewise(TestCase):
+class TestPiecewise(object):
def test_simple(self):
# Condition is single bool list
@@ -2303,6 +2029,11 @@ class TestPiecewise(TestCase):
x = piecewise([0, 0], [[False, True]], [lambda x:-1])
assert_array_equal(x, [0, -1])
+ assert_raises_regex(ValueError, '1 or 2 functions are expected',
+ piecewise, [0, 0], [[False, True]], [])
+ assert_raises_regex(ValueError, '1 or 2 functions are expected',
+ piecewise, [0, 0], [[False, True]], [1, 2, 3])
+
def test_two_conditions(self):
x = piecewise([1, 2], [[True, False], [False, True]], [3, 4])
assert_array_equal(x, [3, 4])
@@ -2327,7 +2058,7 @@ class TestPiecewise(TestCase):
assert_(y == 0)
x = 5
- y = piecewise(x, [[True], [False]], [1, 0])
+ y = piecewise(x, [True, False], [1, 0])
assert_(y.ndim == 0)
assert_(y == 1)
@@ -2345,6 +2076,17 @@ class TestPiecewise(TestCase):
y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3])
assert_array_equal(y, 2)
+ assert_raises_regex(ValueError, '2 or 3 functions are expected',
+ piecewise, x, [x <= 3, x > 3], [1])
+ assert_raises_regex(ValueError, '2 or 3 functions are expected',
+ piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1])
+
+ def test_0d_0d_condition(self):
+ x = np.array(3)
+ c = np.array(x > 3)
+ y = piecewise(x, [c], [1, 2])
+ assert_equal(y, 2)
+
def test_multidimensional_extrafunc(self):
x = np.array([[-2.5, -1.5, -0.5],
[0.5, 1.5, 2.5]])
@@ -2353,7 +2095,7 @@ class TestPiecewise(TestCase):
[3., 3., 1.]]))
-class TestBincount(TestCase):
+class TestBincount(object):
def test_simple(self):
y = np.bincount(np.arange(4))
@@ -2379,11 +2121,16 @@ class TestBincount(TestCase):
x = np.array([0, 1, 0, 1, 1])
y = np.bincount(x, minlength=3)
assert_array_equal(y, np.array([2, 3, 0]))
+ x = []
+ y = np.bincount(x, minlength=0)
+ assert_array_equal(y, np.array([]))
def test_with_minlength_smaller_than_maxvalue(self):
x = np.array([0, 1, 1, 2, 2, 3, 3])
y = np.bincount(x, minlength=2)
assert_array_equal(y, np.array([1, 2, 2, 2]))
+ y = np.bincount(x, minlength=0)
+ assert_array_equal(y, np.array([1, 2, 2, 2]))
def test_with_minlength_and_weights(self):
x = np.array([1, 2, 4, 5, 2])
@@ -2407,24 +2154,18 @@ class TestBincount(TestCase):
"'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
assert_raises_regex(ValueError,
- "must be positive",
+ "must not be negative",
lambda: np.bincount(x, minlength=-1))
- assert_raises_regex(ValueError,
- "must be positive",
- lambda: np.bincount(x, minlength=0))
x = np.arange(5)
assert_raises_regex(TypeError,
"'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
assert_raises_regex(ValueError,
- "minlength must be positive",
+ "must not be negative",
lambda: np.bincount(x, minlength=-1))
- assert_raises_regex(ValueError,
- "minlength must be positive",
- lambda: np.bincount(x, minlength=0))
- @dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_dtype_reference_leaks(self):
# gh-6805
intp_refcount = sys.getrefcount(np.dtype(np.intp))
@@ -2441,7 +2182,7 @@ class TestBincount(TestCase):
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
-class TestInterp(TestCase):
+class TestInterp(object):
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
@@ -2468,28 +2209,28 @@ class TestInterp(TestCase):
incres = interp(incpts, xp, yp)
decres = interp(decpts, xp, yp)
- inctgt = np.array([1, 1, 1, 1], dtype=np.float)
+ inctgt = np.array([1, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0)
decres = interp(decpts, xp, yp, left=0)
- inctgt = np.array([0, 1, 1, 1], dtype=np.float)
+ inctgt = np.array([0, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, right=2)
decres = interp(decpts, xp, yp, right=2)
- inctgt = np.array([1, 1, 1, 2], dtype=np.float)
+ inctgt = np.array([1, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0, right=2)
decres = interp(decpts, xp, yp, left=0, right=2)
- inctgt = np.array([0, 1, 1, 2], dtype=np.float)
+ inctgt = np.array([0, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
@@ -2508,6 +2249,14 @@ class TestInterp(TestCase):
x0 = np.nan
assert_almost_equal(np.interp(x0, x, y), x0)
+ def test_non_finite_behavior(self):
+ x = [1, 2, 2.5, 3, 4]
+ xp = [1, 2, 3, 4]
+ fp = [1, 2, np.inf, 4]
+ assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4])
+ fp = [1, 2, np.nan, 4]
+ assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])
+
def test_complex_interp(self):
# test complex interpolation
x = np.linspace(0, 1, 5)
@@ -2522,6 +2271,12 @@ class TestInterp(TestCase):
x0 = 2.0
right = 2 + 3.0j
assert_almost_equal(np.interp(x0, x, y, right=right), right)
+ # test complex non finite
+ x = [1, 2, 2.5, 3, 4]
+ xp = [1, 2, 3, 4]
+ fp = [1, 2+1j, np.inf, 4]
+ y = [1, 2+1j, np.inf+0.5j, np.inf, 4]
+ assert_almost_equal(np.interp(x, xp, fp), y)
# test complex periodic
x = [-180, -170, -185, 185, -10, -5, 0, 365]
xp = [190, -190, 350, -350]
@@ -2535,8 +2290,17 @@ class TestInterp(TestCase):
y = np.linspace(0, 1, 5)
x0 = np.array(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
- x0 = np.array(.3, dtype=object)
- assert_almost_equal(np.interp(x0, x, y), .3)
+
+ xp = np.array([0, 2, 4])
+ fp = np.array([1, -1, 1])
+
+ actual = np.interp(np.array(1), xp, fp)
+ assert_equal(actual, 0)
+ assert_(isinstance(actual, np.float64))
+
+ actual = np.interp(np.array(4.5), xp, fp, period=4)
+ assert_equal(actual, 0.5)
+ assert_(isinstance(actual, np.float64))
def test_if_len_x_is_small(self):
xp = np.arange(0, 10, 0.0001)
@@ -2559,7 +2323,7 @@ def compare_results(res, desired):
assert_array_equal(res[i], desired[i])
-class TestPercentile(TestCase):
+class TestPercentile(object):
def test_basic(self):
x = np.arange(8) * 0.5
@@ -2660,10 +2424,10 @@ class TestPercentile(TestCase):
interpolation="higher").shape, (3, 3, 5, 6))
def test_scalar_q(self):
- # test for no empty dimensions for compatiblity with old percentile
+ # test for no empty dimensions for compatibility with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50), 5.5)
- self.assertTrue(np.isscalar(np.percentile(x, 50)))
+ assert_(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
assert_equal(np.percentile(x, 50, axis=0), r0)
assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape)
@@ -2681,10 +2445,10 @@ class TestPercentile(TestCase):
assert_equal(np.percentile(x, 50, axis=1, out=out), r1)
assert_equal(out, r1)
- # test for no empty dimensions for compatiblity with old percentile
+ # test for no empty dimensions for compatibility with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50, interpolation='lower'), 5.)
- self.assertTrue(np.isscalar(np.percentile(x, 50)))
+ assert_(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
c0 = np.percentile(x, 50, interpolation='lower', axis=0)
assert_equal(c0, r0)
@@ -2816,7 +2580,7 @@ class TestPercentile(TestCase):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30))
- x = np.rollaxis(x, -1, 0)
+ x = np.moveaxis(x, -1, 0)
assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30))
x = x.swapaxes(0, 1).copy()
assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30))
@@ -2846,11 +2610,14 @@ class TestPercentile(TestCase):
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
- assert_raises(IndexError, np.percentile, d, axis=-5, q=25)
- assert_raises(IndexError, np.percentile, d, axis=(0, -5), q=25)
- assert_raises(IndexError, np.percentile, d, axis=4, q=25)
- assert_raises(IndexError, np.percentile, d, axis=(0, 4), q=25)
+ assert_raises(np.AxisError, np.percentile, d, axis=-5, q=25)
+ assert_raises(np.AxisError, np.percentile, d, axis=(0, -5), q=25)
+ assert_raises(np.AxisError, np.percentile, d, axis=4, q=25)
+ assert_raises(np.AxisError, np.percentile, d, axis=(0, 4), q=25)
+ # each of these refers to the same axis twice
assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25)
+ assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25)
+ assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25)
def test_keepdims(self):
d = np.ones((3, 5, 7, 11))
@@ -2987,7 +2754,29 @@ class TestPercentile(TestCase):
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
-class TestMedian(TestCase):
+class TestQuantile(object):
+ # most of this is already tested by TestPercentile
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.quantile(x, 0), 0.)
+ assert_equal(np.quantile(x, 1), 3.5)
+ assert_equal(np.quantile(x, 0.5), 1.75)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.quantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.quantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+
+class TestMedian(object):
def test_basic(self):
a0 = np.array(1)
@@ -3194,7 +2983,7 @@ class TestMedian(TestCase):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.median(x, axis=(0, 1)), np.median(o))
- x = np.rollaxis(x, -1, 0)
+ x = np.moveaxis(x, -1, 0)
assert_equal(np.median(x, axis=(-2, -1)), np.median(o))
x = x.swapaxes(0, 1).copy()
assert_equal(np.median(x, axis=(0, -1)), np.median(o))
@@ -3222,10 +3011,10 @@ class TestMedian(TestCase):
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
- assert_raises(IndexError, np.median, d, axis=-5)
- assert_raises(IndexError, np.median, d, axis=(0, -5))
- assert_raises(IndexError, np.median, d, axis=4)
- assert_raises(IndexError, np.median, d, axis=(0, 4))
+ assert_raises(np.AxisError, np.median, d, axis=-5)
+ assert_raises(np.AxisError, np.median, d, axis=(0, -5))
+ assert_raises(np.AxisError, np.median, d, axis=4)
+ assert_raises(np.AxisError, np.median, d, axis=(0, 4))
assert_raises(ValueError, np.median, d, axis=(1, 1))
def test_keepdims(self):
@@ -3244,7 +3033,7 @@ class TestMedian(TestCase):
(1, 1, 7, 1))
-class TestAdd_newdoc_ufunc(TestCase):
+class TestAdd_newdoc_ufunc(object):
def test_ufunc_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, 2, "blah")
@@ -3254,16 +3043,12 @@ class TestAdd_newdoc_ufunc(TestCase):
assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)
-class TestAdd_newdoc(TestCase):
+class TestAdd_newdoc(object):
- @dec.skipif(sys.flags.optimize == 2)
+ @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
def test_add_doc(self):
# test np.add_newdoc
tgt = "Current flat index into the array."
- self.assertEqual(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
- self.assertTrue(len(np.core.ufunc.identity.__doc__) > 300)
- self.assertTrue(len(np.lib.index_tricks.mgrid.__doc__) > 300)
-
-
-if __name__ == "__main__":
- run_module_suite()
+ assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
+ assert_(len(np.core.ufunc.identity.__doc__) > 300)
+ assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
new file mode 100644
index 000000000..f136b5c81
--- /dev/null
+++ b/numpy/lib/tests/test_histograms.py
@@ -0,0 +1,745 @@
+from __future__ import division, absolute_import, print_function
+
+import numpy as np
+
+from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_allclose,
+ assert_array_max_ulp, assert_warns, assert_raises_regex, suppress_warnings,
+ )
+
+
+class TestHistogram(object):
+
+ def setup(self):
+ pass
+
+ def teardown(self):
+ pass
+
+ def test_simple(self):
+ n = 100
+ v = np.random.rand(n)
+ (a, b) = histogram(v)
+ # check if the sum of the bins equals the number of samples
+ assert_equal(np.sum(a, axis=0), n)
+ # check that the bin counts are evenly spaced when the data is from
+ # a linear function
+ (a, b) = histogram(np.linspace(0, 10, 100))
+ assert_array_equal(a, 10)
+
+ def test_one_bin(self):
+ # Ticket 632
+ hist, edges = histogram([1, 2, 3, 4], [1, 2])
+ assert_array_equal(hist, [2, ])
+ assert_array_equal(edges, [1, 2])
+ assert_raises(ValueError, histogram, [1, 2], bins=0)
+ h, e = histogram([1, 2], bins=1)
+ assert_equal(h, np.array([2]))
+ assert_allclose(e, np.array([1., 2.]))
+
+ def test_normed(self):
+ sup = suppress_warnings()
+ with sup:
+ rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
+ # Check that the integral of the density equals 1.
+ n = 100
+ v = np.random.rand(n)
+ a, b = histogram(v, normed=True)
+ area = np.sum(a * np.diff(b))
+ assert_almost_equal(area, 1)
+ assert_equal(len(rec), 1)
+
+ sup = suppress_warnings()
+ with sup:
+ rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
+ # Check with non-constant bin widths (buggy but backwards
+ # compatible)
+ v = np.arange(10)
+ bins = [0, 1, 5, 9, 10]
+ a, b = histogram(v, bins, normed=True)
+ area = np.sum(a * np.diff(b))
+ assert_almost_equal(area, 1)
+ assert_equal(len(rec), 1)
+
+ def test_density(self):
+ # Check that the integral of the density equals 1.
+ n = 100
+ v = np.random.rand(n)
+ a, b = histogram(v, density=True)
+ area = np.sum(a * np.diff(b))
+ assert_almost_equal(area, 1)
+
+ # Check with non-constant bin widths
+ v = np.arange(10)
+ bins = [0, 1, 3, 6, 10]
+ a, b = histogram(v, bins, density=True)
+ assert_array_equal(a, .1)
+ assert_equal(np.sum(a * np.diff(b)), 1)
+
+ # Test that passing False works too
+ a, b = histogram(v, bins, density=False)
+ assert_array_equal(a, [1, 2, 3, 4])
+
+ # Variale bin widths are especially useful to deal with
+ # infinities.
+ v = np.arange(10)
+ bins = [0, 1, 3, 6, np.inf]
+ a, b = histogram(v, bins, density=True)
+ assert_array_equal(a, [.1, .1, .1, 0.])
+
+ # Taken from a bug report from N. Becker on the numpy-discussion
+ # mailing list Aug. 6, 2010.
+ counts, dmy = np.histogram(
+ [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
+ assert_equal(counts, [.25, 0])
+
+ def test_outliers(self):
+ # Check that outliers are not tallied
+ a = np.arange(10) + .5
+
+ # Lower outliers
+ h, b = histogram(a, range=[0, 9])
+ assert_equal(h.sum(), 9)
+
+ # Upper outliers
+ h, b = histogram(a, range=[1, 10])
+ assert_equal(h.sum(), 9)
+
+ # Normalization
+ h, b = histogram(a, range=[1, 9], density=True)
+ assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15)
+
+ # Weights
+ w = np.arange(10) + .5
+ h, b = histogram(a, range=[1, 9], weights=w, density=True)
+ assert_equal((h * np.diff(b)).sum(), 1)
+
+ h, b = histogram(a, bins=8, range=[1, 9], weights=w)
+ assert_equal(h, w[1:-1])
+
+ def test_type(self):
+ # Check the type of the returned histogram
+ a = np.arange(10) + .5
+ h, b = histogram(a)
+ assert_(np.issubdtype(h.dtype, np.integer))
+
+ h, b = histogram(a, density=True)
+ assert_(np.issubdtype(h.dtype, np.floating))
+
+ h, b = histogram(a, weights=np.ones(10, int))
+ assert_(np.issubdtype(h.dtype, np.integer))
+
+ h, b = histogram(a, weights=np.ones(10, float))
+ assert_(np.issubdtype(h.dtype, np.floating))
+
+ def test_f32_rounding(self):
+ # gh-4799, check that the rounding of the edges works with float32
+ x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)
+ y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)
+ counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
+ assert_equal(counts_hist.sum(), 3.)
+
+ def test_weights(self):
+ v = np.random.rand(100)
+ w = np.ones(100) * 5
+ a, b = histogram(v)
+ na, nb = histogram(v, density=True)
+ wa, wb = histogram(v, weights=w)
+ nwa, nwb = histogram(v, weights=w, density=True)
+ assert_array_almost_equal(a * 5, wa)
+ assert_array_almost_equal(na, nwa)
+
+ # Check weights are properly applied.
+ v = np.linspace(0, 10, 10)
+ w = np.concatenate((np.zeros(5), np.ones(5)))
+ wa, wb = histogram(v, bins=np.arange(11), weights=w)
+ assert_array_almost_equal(wa, w)
+
+ # Check with integer weights
+ wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
+ assert_array_equal(wa, [4, 5, 0, 1])
+ wa, wb = histogram(
+ [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True)
+ assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
+
+ # Check weights with non-uniform bin widths
+ a, b = histogram(
+ np.arange(9), [0, 1, 3, 6, 10],
+ weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)
+ assert_almost_equal(a, [.2, .1, .1, .075])
+
+ def test_exotic_weights(self):
+
+ # Test the use of weights that are not integer or floats, but e.g.
+ # complex numbers or object types.
+
+ # Complex weights
+ values = np.array([1.3, 2.5, 2.3])
+ weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])
+
+ # Check with custom bins
+ wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
+ assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
+
+ # Check with even bins
+ wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
+ assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
+
+ # Decimal weights
+ from decimal import Decimal
+ values = np.array([1.3, 2.5, 2.3])
+ weights = np.array([Decimal(1), Decimal(2), Decimal(3)])
+
+ # Check with custom bins
+ wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
+ assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
+
+ # Check with even bins
+ wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
+ assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
+
+ def test_no_side_effects(self):
+ # This is a regression test that ensures that values passed to
+ # ``histogram`` are unchanged.
+ values = np.array([1.3, 2.5, 2.3])
+ np.histogram(values, range=[-10, 10], bins=100)
+ assert_array_almost_equal(values, [1.3, 2.5, 2.3])
+
+ def test_empty(self):
+ a, b = histogram([], bins=([0, 1]))
+ assert_array_equal(a, np.array([0]))
+ assert_array_equal(b, np.array([0, 1]))
+
+ def test_error_binnum_type (self):
+ # Tests if right Error is raised if bins argument is float
+ vals = np.linspace(0.0, 1.0, num=100)
+ histogram(vals, 5)
+ assert_raises(TypeError, histogram, vals, 2.4)
+
+ def test_finite_range(self):
+ # Normal ranges should be fine
+ vals = np.linspace(0.0, 1.0, num=100)
+ histogram(vals, range=[0.25,0.75])
+ assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
+ assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
+
+ def test_bin_edge_cases(self):
+ # Ensure that floating-point computations correctly place edge cases.
+ arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
+ hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
+ mask = hist > 0
+ left_edges = edges[:-1][mask]
+ right_edges = edges[1:][mask]
+ for x, left, right in zip(arr, left_edges, right_edges):
+ assert_(x >= left)
+ assert_(x < right)
+
+ def test_last_bin_inclusive_range(self):
+ arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
+ hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
+ assert_equal(hist[-1], 1)
+
+ def test_unsigned_monotonicity_check(self):
+ # Ensures ValueError is raised if bins not increasing monotonically
+ # when bins contain unsigned values (see #9222)
+ arr = np.array([2])
+ bins = np.array([1, 3, 1], dtype='uint64')
+ with assert_raises(ValueError):
+ hist, edges = np.histogram(arr, bins=bins)
+
+ def test_object_array_of_0d(self):
+ # gh-7864
+ assert_raises(ValueError,
+ histogram, [np.array([0.4]) for i in range(10)] + [-np.inf])
+ assert_raises(ValueError,
+ histogram, [np.array([0.4]) for i in range(10)] + [np.inf])
+
+ # these should not crash
+ np.histogram([np.array([0.5]) for i in range(10)] + [.500000000000001])
+ np.histogram([np.array([0.5]) for i in range(10)] + [.5])
+
+ def test_some_nan_values(self):
+ # gh-7503
+ one_nan = np.array([0, 1, np.nan])
+ all_nan = np.array([np.nan, np.nan])
+
+ # the internal comparisons with NaN give warnings
+ sup = suppress_warnings()
+ sup.filter(RuntimeWarning)
+ with sup:
+ # can't infer range with nan
+ assert_raises(ValueError, histogram, one_nan, bins='auto')
+ assert_raises(ValueError, histogram, all_nan, bins='auto')
+
+ # explicit range solves the problem
+ h, b = histogram(one_nan, bins='auto', range=(0, 1))
+ assert_equal(h.sum(), 2) # nan is not counted
+ h, b = histogram(all_nan, bins='auto', range=(0, 1))
+ assert_equal(h.sum(), 0) # nan is not counted
+
+ # as does an explicit set of bins
+ h, b = histogram(one_nan, bins=[0, 1])
+ assert_equal(h.sum(), 2) # nan is not counted
+ h, b = histogram(all_nan, bins=[0, 1])
+ assert_equal(h.sum(), 0) # nan is not counted
+
+ def test_datetime(self):
+ begin = np.datetime64('2000-01-01', 'D')
+ offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20])
+ bins = np.array([0, 2, 7, 20])
+ dates = begin + offsets
+ date_bins = begin + bins
+
+ td = np.dtype('timedelta64[D]')
+
+ # Results should be the same for integer offsets or datetime values.
+ # For now, only explicit bins are supported, since linspace does not
+ # work on datetimes or timedeltas
+ d_count, d_edge = histogram(dates, bins=date_bins)
+ t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td))
+ i_count, i_edge = histogram(offsets, bins=bins)
+
+ assert_equal(d_count, i_count)
+ assert_equal(t_count, i_count)
+
+ assert_equal((d_edge - begin).astype(int), i_edge)
+ assert_equal(t_edge.astype(int), i_edge)
+
+ assert_equal(d_edge.dtype, dates.dtype)
+ assert_equal(t_edge.dtype, td)
+
+ def do_precision_lower_bound(self, float_small, float_large):
+ eps = np.finfo(float_large).eps
+
+ arr = np.array([1.0], float_small)
+ range = np.array([1.0 + eps, 2.0], float_large)
+
+ # test is looking for behavior when the bounds change between dtypes
+ if range.astype(float_small)[0] != 1:
+ return
+
+ # previously crashed
+ count, x_loc = np.histogram(arr, bins=1, range=range)
+ assert_equal(count, [1])
+
+ # gh-10322 means that the type comes from arr - this may change
+ assert_equal(x_loc.dtype, float_small)
+
+ def do_precision_upper_bound(self, float_small, float_large):
+ eps = np.finfo(float_large).eps
+
+ arr = np.array([1.0], float_small)
+ range = np.array([0.0, 1.0 - eps], float_large)
+
+ # test is looking for behavior when the bounds change between dtypes
+ if range.astype(float_small)[-1] != 1:
+ return
+
+ # previously crashed
+ count, x_loc = np.histogram(arr, bins=1, range=range)
+ assert_equal(count, [1])
+
+ # gh-10322 means that the type comes from arr - this may change
+ assert_equal(x_loc.dtype, float_small)
+
+ def do_precision(self, float_small, float_large):
+ self.do_precision_lower_bound(float_small, float_large)
+ self.do_precision_upper_bound(float_small, float_large)
+
+ def test_precision(self):
+ # not looping results in a useful stack trace upon failure
+ self.do_precision(np.half, np.single)
+ self.do_precision(np.half, np.double)
+ self.do_precision(np.half, np.longdouble)
+ self.do_precision(np.single, np.double)
+ self.do_precision(np.single, np.longdouble)
+ self.do_precision(np.double, np.longdouble)
+
+ def test_histogram_bin_edges(self):
+ hist, e = histogram([1, 2, 3, 4], [1, 2])
+ edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
+ assert_array_equal(edges, e)
+
+ arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
+ hist, e = histogram(arr, bins=30, range=(-0.5, 5))
+ edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5))
+ assert_array_equal(edges, e)
+
+ hist, e = histogram(arr, bins='auto', range=(0, 1))
+ edges = histogram_bin_edges(arr, bins='auto', range=(0, 1))
+ assert_array_equal(edges, e)
+
+
+class TestHistogramOptimBinNums(object):
+ """
+ Provide test coverage when using provided estimators for optimal number of
+ bins
+ """
+
+ def test_empty(self):
+ estimator_list = ['fd', 'scott', 'rice', 'sturges',
+ 'doane', 'sqrt', 'auto']
+ # check it can deal with empty data
+ for estimator in estimator_list:
+ a, b = histogram([], bins=estimator)
+ assert_array_equal(a, np.array([0]))
+ assert_array_equal(b, np.array([0, 1]))
+
+ def test_simple(self):
+ """
+ Straightforward testing with a mixture of linspace data (for
+ consistency). All test values have been precomputed and the values
+ shouldn't change
+ """
+ # Some basic sanity checking, with some fixed data.
+ # Checking for the correct number of bins
+ basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
+ 'doane': 8, 'sqrt': 8, 'auto': 7},
+ 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
+ 'doane': 12, 'sqrt': 23, 'auto': 10},
+ 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
+ 'doane': 17, 'sqrt': 71, 'auto': 17}}
+
+ for testlen, expectedResults in basic_test.items():
+ # Create some sort of non uniform data to test with
+ # (2 peak uniform mixture)
+ x1 = np.linspace(-10, -1, testlen // 5 * 2)
+ x2 = np.linspace(1, 10, testlen // 5 * 3)
+ x = np.concatenate((x1, x2))
+ for estimator, numbins in expectedResults.items():
+ a, b = np.histogram(x, estimator)
+ assert_equal(len(a), numbins, err_msg="For the {0} estimator "
+ "with datasize of {1}".format(estimator, testlen))
+
+ def test_small(self):
+ """
+ Smaller datasets have the potential to cause issues with the data
+ adaptive methods, especially the FD method. All bin numbers have been
+ precalculated.
+ """
+ small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
+ 'doane': 1, 'sqrt': 1},
+ 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
+ 'doane': 1, 'sqrt': 2},
+ 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
+ 'doane': 3, 'sqrt': 2}}
+
+ for testlen, expectedResults in small_dat.items():
+ testdat = np.arange(testlen)
+ for estimator, expbins in expectedResults.items():
+ a, b = np.histogram(testdat, estimator)
+ assert_equal(len(a), expbins, err_msg="For the {0} estimator "
+ "with datasize of {1}".format(estimator, testlen))
+
+ def test_incorrect_methods(self):
+ """
+ Check a Value Error is thrown when an unknown string is passed in
+ """
+ check_list = ['mad', 'freeman', 'histograms', 'IQR']
+ for estimator in check_list:
+ assert_raises(ValueError, histogram, [1, 2, 3], estimator)
+
+ def test_novariance(self):
+ """
+ Check that methods handle no variance in data
+ Primarily for Scott and FD as the SD and IQR are both 0 in this case
+ """
+ novar_dataset = np.ones(100)
+ novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
+ 'doane': 1, 'sqrt': 1, 'auto': 1}
+
+ for estimator, numbins in novar_resultdict.items():
+ a, b = np.histogram(novar_dataset, estimator)
+ assert_equal(len(a), numbins, err_msg="{0} estimator, "
+ "No Variance test".format(estimator))
+
+ def test_limited_variance(self):
+ """
+ Check when IQR is 0, but variance exists, we return the sturges value
+ and not the fd value.
+ """
+ lim_var_data = np.ones(1000)
+ lim_var_data[:3] = 0
+ lim_var_data[-4:] = 100
+
+ edges_auto = histogram_bin_edges(lim_var_data, 'auto')
+ assert_equal(edges_auto, np.linspace(0, 100, 12))
+
+ edges_fd = histogram_bin_edges(lim_var_data, 'fd')
+ assert_equal(edges_fd, np.array([0, 100]))
+
+ edges_sturges = histogram_bin_edges(lim_var_data, 'sturges')
+ assert_equal(edges_sturges, np.linspace(0, 100, 12))
+
+ def test_outlier(self):
+ """
+ Check the FD, Scott and Doane with outliers.
+
+ The FD estimates a smaller binwidth since it's less affected by
+ outliers. Since the range is so (artificially) large, this means more
+ bins, most of which will be empty, but the data of interest usually is
+ unaffected. The Scott estimator is more affected and returns fewer bins,
+ despite most of the variance being in one area of the data. The Doane
+ estimator lies somewhere between the other two.
+ """
+ xcenter = np.linspace(-10, 10, 50)
+ outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
+
+ outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11}
+
+ for estimator, numbins in outlier_resultdict.items():
+ a, b = np.histogram(outlier_dataset, estimator)
+ assert_equal(len(a), numbins)
+
+ def test_simple_range(self):
+ """
+ Straightforward testing with a mixture of linspace data (for
+ consistency). Adding in a 3rd mixture that will then be
+ completely ignored. All test values have been precomputed and
+ the shouldn't change.
+ """
+ # some basic sanity checking, with some fixed data.
+ # Checking for the correct number of bins
+ basic_test = {
+ 50: {'fd': 8, 'scott': 8, 'rice': 15,
+ 'sturges': 14, 'auto': 14},
+ 500: {'fd': 15, 'scott': 16, 'rice': 32,
+ 'sturges': 20, 'auto': 20},
+ 5000: {'fd': 33, 'scott': 33, 'rice': 69,
+ 'sturges': 27, 'auto': 33}
+ }
+
+ for testlen, expectedResults in basic_test.items():
+ # create some sort of non uniform data to test with
+ # (3 peak uniform mixture)
+ x1 = np.linspace(-10, -1, testlen // 5 * 2)
+ x2 = np.linspace(1, 10, testlen // 5 * 3)
+ x3 = np.linspace(-100, -50, testlen)
+ x = np.hstack((x1, x2, x3))
+ for estimator, numbins in expectedResults.items():
+ a, b = np.histogram(x, estimator, range = (-20, 20))
+ msg = "For the {0} estimator".format(estimator)
+ msg += " with datasize of {0}".format(testlen)
+ assert_equal(len(a), numbins, err_msg=msg)
+
+ def test_simple_weighted(self):
+ """
+ Check that weighted data raises a TypeError
+ """
+ estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
+ for estimator in estimator_list:
+ assert_raises(TypeError, histogram, [1, 2, 3],
+ estimator, weights=[1, 2, 3])
+
+
+class TestHistogramdd(object):
+
+ def test_simple(self):
+ x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
+ [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
+ H, edges = histogramdd(x, (2, 3, 3),
+ range=[[-1, 1], [0, 3], [0, 3]])
+ answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
+ [[0, 1, 0], [0, 0, 1], [0, 0, 1]]])
+ assert_array_equal(H, answer)
+
+ # Check normalization
+ ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
+ H, edges = histogramdd(x, bins=ed, density=True)
+ assert_(np.all(H == answer / 12.))
+
+ # Check that H has the correct shape.
+ H, edges = histogramdd(x, (2, 3, 4),
+ range=[[-1, 1], [0, 3], [0, 4]],
+ density=True)
+ answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
+ [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
+ assert_array_almost_equal(H, answer / 6., 4)
+ # Check that a sequence of arrays is accepted and H has the correct
+ # shape.
+ z = [np.squeeze(y) for y in np.split(x, 3, axis=1)]
+ H, edges = histogramdd(
+ z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
+ answer = np.array([[[0, 0], [0, 0], [0, 0]],
+ [[0, 1], [0, 0], [1, 0]],
+ [[0, 1], [0, 0], [0, 0]],
+ [[0, 0], [0, 0], [0, 0]]])
+ assert_array_equal(H, answer)
+
+ Z = np.zeros((5, 5, 5))
+ Z[list(range(5)), list(range(5)), list(range(5))] = 1.
+ H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
+ assert_array_equal(H, Z)
+
+ def test_shape_3d(self):
+ # All possible permutations for bins of different lengths in 3D.
+ bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
+ (4, 5, 6))
+ r = np.random.rand(10, 3)
+ for b in bins:
+ H, edges = histogramdd(r, b)
+ assert_(H.shape == b)
+
+ def test_shape_4d(self):
+ # All possible permutations for bins of different lengths in 4D.
+ bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
+ (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
+ (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
+ (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
+ (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
+ (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
+
+ r = np.random.rand(10, 4)
+ for b in bins:
+ H, edges = histogramdd(r, b)
+ assert_(H.shape == b)
+
+ def test_weights(self):
+ v = np.random.rand(100, 2)
+ hist, edges = histogramdd(v)
+ n_hist, edges = histogramdd(v, density=True)
+ w_hist, edges = histogramdd(v, weights=np.ones(100))
+ assert_array_equal(w_hist, hist)
+ w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True)
+ assert_array_equal(w_hist, n_hist)
+ w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
+ assert_array_equal(w_hist, 2 * hist)
+
+ def test_identical_samples(self):
+ x = np.zeros((10, 2), int)
+ hist, edges = histogramdd(x, bins=2)
+ assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))
+
+ def test_empty(self):
+ a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
+ assert_array_max_ulp(a, np.array([[0.]]))
+ a, b = np.histogramdd([[], [], []], bins=2)
+ assert_array_max_ulp(a, np.zeros((2, 2, 2)))
+
+ def test_bins_errors(self):
+ # There are two ways to specify bins. Check for the right errors
+ # when mixing those.
+ x = np.arange(8).reshape(2, 4)
+ assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
+ assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
+ assert_raises(
+ ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
+ assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
+
+ def test_inf_edges(self):
+ # Test using +/-inf bin edges works. See #1788.
+ with np.errstate(invalid='ignore'):
+ x = np.arange(6).reshape(3, 2)
+ expected = np.array([[1, 0], [0, 1], [0, 1]])
+ h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
+ assert_allclose(h, expected)
+ h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
+ assert_allclose(h, expected)
+ h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
+ assert_allclose(h, expected)
+
+ def test_rightmost_binedge(self):
+ # Test event very close to rightmost binedge. See Github issue #4266
+ x = [0.9999999995]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 1.)
+ x = [1.0]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 1.)
+ x = [1.0000000001]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 0.0)
+ x = [1.0001]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 0.0)
+
+ def test_finite_range(self):
+ vals = np.random.random((100, 3))
+ histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])
+ assert_raises(ValueError, histogramdd, vals,
+ range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
+ assert_raises(ValueError, histogramdd, vals,
+ range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
+
+ def test_equal_edges(self):
+ """ Test that adjacent entries in an edge array can be equal """
+ x = np.array([0, 1, 2])
+ y = np.array([0, 1, 2])
+ x_edges = np.array([0, 2, 2])
+ y_edges = 1
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ hist_expected = np.array([
+ [2.],
+ [1.], # x == 2 falls in the final bin
+ ])
+ assert_equal(hist, hist_expected)
+
+ def test_edge_dtype(self):
+ """ Test that if an edge array is input, its type is preserved """
+ x = np.array([0, 10, 20])
+ y = x / 10
+ x_edges = np.array([0, 5, 15, 20])
+ y_edges = x_edges / 10
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(edges[0].dtype, x_edges.dtype)
+ assert_equal(edges[1].dtype, y_edges.dtype)
+
+ def test_large_integers(self):
+ big = 2**60 # Too large to represent with a full precision float
+
+ x = np.array([0], np.int64)
+ x_edges = np.array([-1, +1], np.int64)
+ y = big + x
+ y_edges = big + x_edges
+
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(hist[0, 0], 1)
+
+ def test_density_non_uniform_2d(self):
+ # Defines the following grid:
+ #
+ # 0 2 8
+ # 0+-+-----+
+ # + | +
+ # + | +
+ # 6+-+-----+
+ # 8+-+-----+
+ x_edges = np.array([0, 2, 8])
+ y_edges = np.array([0, 6, 8])
+ relative_areas = np.array([
+ [3, 9],
+ [1, 3]])
+
+ # ensure the number of points in each region is proportional to its area
+ x = np.array([1] + [1]*3 + [7]*3 + [7]*9)
+ y = np.array([7] + [1]*3 + [7]*3 + [1]*9)
+
+ # sanity check that the above worked as intended
+ hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
+ assert_equal(hist, relative_areas)
+
+ # resulting histogram should be uniform, since counts and areas are propotional
+ hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
+ assert_equal(hist, 1 / (8*8))
+
+ def test_density_non_uniform_1d(self):
+ # compare to histogram to show the results are the same
+ v = np.arange(10)
+ bins = np.array([0, 1, 3, 6, 10])
+ hist, edges = histogram(v, bins, density=True)
+ hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
+ assert_equal(hist, hist_dd)
+ assert_equal(edges, edges_dd[0])
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index e645114ab..8f76f5d11 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -2,16 +2,16 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_array_almost_equal, assert_raises
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_raises_regex
)
from numpy.lib.index_tricks import (
- mgrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
+ mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
index_exp, ndindex, r_, s_, ix_
)
-class TestRavelUnravelIndex(TestCase):
+class TestRavelUnravelIndex(object):
def test_basic(self):
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
@@ -110,11 +110,21 @@ class TestRavelUnravelIndex(TestCase):
def test_writeability(self):
# See gh-7269
x, y = np.unravel_index([1, 2, 3], (4, 5))
- self.assertTrue(x.flags.writeable)
- self.assertTrue(y.flags.writeable)
+ assert_(x.flags.writeable)
+ assert_(y.flags.writeable)
-class TestGrid(TestCase):
+ def test_0d(self):
+ # gh-580
+ x = np.unravel_index(0, ())
+ assert_equal(x, ())
+
+ assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ())
+ assert_raises_regex(
+ ValueError, "out of bounds", np.unravel_index, [1], ())
+
+
+class TestGrid(object):
def test_basic(self):
a = mgrid[-1:1:10j]
b = mgrid[-1:1:0.1]
@@ -146,8 +156,17 @@ class TestGrid(TestCase):
assert_array_almost_equal(d[1, :, 1] - d[1, :, 0],
0.2*np.ones(20, 'd'), 11)
+ def test_sparse(self):
+ grid_full = mgrid[-1:1:10j, -2:2:10j]
+ grid_sparse = ogrid[-1:1:10j, -2:2:10j]
+
+ # sparse grids can be made dense by broadcasting
+ grid_broadcast = np.broadcast_arrays(*grid_sparse)
+ for f, b in zip(grid_full, grid_broadcast):
+ assert_equal(f, b)
+
-class TestConcatenator(TestCase):
+class TestConcatenator(object):
def test_1d(self):
assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
b = np.ones(5)
@@ -174,15 +193,20 @@ class TestConcatenator(TestCase):
assert_array_equal(d[:5, :], b)
assert_array_equal(d[5:, :], c)
+ def test_0d(self):
+ assert_equal(r_[0, np.array(1), 2], [0, 1, 2])
+ assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3])
+ assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3])
-class TestNdenumerate(TestCase):
+
+class TestNdenumerate(object):
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(list(ndenumerate(a)),
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
-class TestIndexExpression(TestCase):
+class TestIndexExpression(object):
def test_regression_1(self):
# ticket #1196
a = np.arange(2)
@@ -196,9 +220,9 @@ class TestIndexExpression(TestCase):
assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
-class TestIx_(TestCase):
+class TestIx_(object):
def test_regression_1(self):
- # Test empty untyped inputs create ouputs of indexing type, gh-5804
+ # Test empty untyped inputs create outputs of indexing type, gh-5804
a, = np.ix_(range(0))
assert_equal(a.dtype, np.intp)
@@ -217,7 +241,7 @@ class TestIx_(TestCase):
for k, (a, sz) in enumerate(zip(arrays, sizes)):
assert_equal(a.shape[k], sz)
assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
- assert_(np.issubdtype(a.dtype, int))
+ assert_(np.issubdtype(a.dtype, np.integer))
def test_bool(self):
bool_a = [True, False, True, True]
@@ -243,71 +267,77 @@ def test_c_():
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
-def test_fill_diagonal():
- a = np.zeros((3, 3), int)
- fill_diagonal(a, 5)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0],
- [0, 5, 0],
- [0, 0, 5]]))
-
- #Test tall matrix
- a = np.zeros((10, 3), int)
- fill_diagonal(a, 5)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0],
- [0, 5, 0],
- [0, 0, 5],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0]]))
-
- #Test tall matrix wrap
- a = np.zeros((10, 3), int)
- fill_diagonal(a, 5, True)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0],
- [0, 5, 0],
- [0, 0, 5],
- [0, 0, 0],
- [5, 0, 0],
- [0, 5, 0],
- [0, 0, 5],
- [0, 0, 0],
- [5, 0, 0],
- [0, 5, 0]]))
-
- #Test wide matrix
- a = np.zeros((3, 10), int)
- fill_diagonal(a, 5)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]))
-
- # The same function can operate on a 4-d array:
- a = np.zeros((3, 3, 3, 3), int)
- fill_diagonal(a, 4)
- i = np.array([0, 1, 2])
- yield (assert_equal, np.where(a != 0), (i, i, i, i))
+class TestFillDiagonal(object):
+ def test_basic(self):
+ a = np.zeros((3, 3), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5]])
+ )
+
+ def test_tall_matrix(self):
+ a = np.zeros((10, 3), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0]])
+ )
+
+ def test_tall_matrix_wrap(self):
+ a = np.zeros((10, 3), int)
+ fill_diagonal(a, 5, True)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0]])
+ )
+
+ def test_wide_matrix(self):
+ a = np.zeros((3, 10), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])
+ )
+
+ def test_operate_4d_array(self):
+ a = np.zeros((3, 3, 3, 3), int)
+ fill_diagonal(a, 4)
+ i = np.array([0, 1, 2])
+ assert_equal(np.where(a != 0), (i, i, i, i))
def test_diag_indices():
di = diag_indices(4)
a = np.array([[1, 2, 3, 4],
- [5, 6, 7, 8],
- [9, 10, 11, 12],
- [13, 14, 15, 16]])
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
a[di] = 100
- yield (assert_array_equal, a,
- np.array([[100, 2, 3, 4],
- [5, 100, 7, 8],
- [9, 10, 100, 12],
- [13, 14, 15, 100]]))
+ assert_array_equal(
+ a, np.array([[100, 2, 3, 4],
+ [5, 100, 7, 8],
+ [9, 10, 100, 12],
+ [13, 14, 15, 100]])
+ )
# Now, we create indices to manipulate a 3-d array:
d3 = diag_indices(2, 3)
@@ -315,12 +345,12 @@ def test_diag_indices():
# And use it to set the diagonal of a zeros array to 1:
a = np.zeros((2, 2, 2), int)
a[d3] = 1
- yield (assert_array_equal, a,
- np.array([[[1, 0],
- [0, 0]],
-
- [[0, 0],
- [0, 1]]]))
+ assert_array_equal(
+ a, np.array([[[1, 0],
+ [0, 0]],
+ [[0, 0],
+ [0, 1]]])
+ )
def test_diag_indices_from():
@@ -352,7 +382,3 @@ def test_ndindex():
# Make sure 0-sized ndindex works correctly
x = list(ndindex(*[0]))
assert_equal(x, [])
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 83fca5b91..f58c9e33d 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -4,12 +4,16 @@ import sys
import gzip
import os
import threading
-from tempfile import NamedTemporaryFile
import time
import warnings
import gc
-from io import BytesIO
+import io
+import re
+import pytest
+from tempfile import NamedTemporaryFile
+from io import BytesIO, StringIO
from datetime import datetime
+import locale
import numpy as np
import numpy.ma as ma
@@ -17,10 +21,10 @@ from numpy.lib._iotools import ConverterError, ConversionWarning
from numpy.compat import asbytes, bytes, unicode, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
- TestCase, run_module_suite, assert_warns, assert_,
- assert_raises_regex, assert_raises, assert_allclose,
- assert_array_equal, temppath, dec, IS_PYPY, suppress_warnings
-)
+ assert_warns, assert_, SkipTest, assert_raises_regex, assert_raises,
+ assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
+ HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles,
+ )
class TextIO(BytesIO):
@@ -44,6 +48,16 @@ class TextIO(BytesIO):
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
+try:
+ import bz2
+ HAS_BZ2 = True
+except ImportError:
+ HAS_BZ2 = False
+try:
+ import lzma
+ HAS_LZMA = True
+except ImportError:
+ HAS_LZMA = False
def strptime(s, fmt=None):
@@ -52,10 +66,9 @@ def strptime(s, fmt=None):
2.5.
"""
- if sys.version_info[0] >= 3:
- return datetime(*time.strptime(s.decode('latin1'), fmt)[:3])
- else:
- return datetime(*time.strptime(s, fmt)[:3])
+ if type(s) == bytes:
+ s = s.decode("latin1")
+ return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
@@ -103,8 +116,9 @@ class RoundtripTest(object):
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
- if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
- os.remove(target_file.name)
+ if 'arr_reloaded' in locals():
+ if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
+ os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
@@ -143,7 +157,7 @@ class RoundtripTest(object):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
- @np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
+ @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
@@ -155,7 +169,7 @@ class RoundtripTest(object):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
- @dec.slow
+ @pytest.mark.slow
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
@@ -164,7 +178,7 @@ class RoundtripTest(object):
self.check_roundtrips(a)
-class TestSaveLoad(RoundtripTest, TestCase):
+class TestSaveLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
@@ -172,7 +186,7 @@ class TestSaveLoad(RoundtripTest, TestCase):
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
-class TestSavezLoad(RoundtripTest, TestCase):
+class TestSavezLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
@@ -187,8 +201,8 @@ class TestSavezLoad(RoundtripTest, TestCase):
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
- @np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems")
- @np.testing.dec.slow
+ @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
+ @pytest.mark.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
@@ -264,7 +278,8 @@ class TestSavezLoad(RoundtripTest, TestCase):
fp.seek(0)
assert_(not fp.closed)
- @np.testing.dec.skipif(IS_PYPY, "context manager required on PyPy")
+ #FIXME: Is this still true?
+ @pytest.mark.skipif(IS_PYPY, reason="Missing context manager on PyPy")
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
@@ -303,7 +318,7 @@ class TestSavezLoad(RoundtripTest, TestCase):
assert_(fp.closed)
-class TestSaveTxt(TestCase):
+class TestSaveTxt(object):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
@@ -328,6 +343,12 @@ class TestSaveTxt(TestCase):
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
+ def test_0D_3D(self):
+ c = BytesIO()
+ assert_raises(ValueError, np.savetxt, c, np.array(1))
+ assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
+
+
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
@@ -357,7 +378,7 @@ class TestSaveTxt(TestCase):
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
- # Specify delimiter, should be overiden
+ # Specify delimiter, should be overridden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
@@ -372,7 +393,7 @@ class TestSaveTxt(TestCase):
# Test the functionality of the header and footer keyword argument.
c = BytesIO()
- a = np.array([(1, 2), (3, 4)], dtype=np.int)
+ a = np.array([(1, 2), (3, 4)], dtype=int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
@@ -447,6 +468,26 @@ class TestSaveTxt(TestCase):
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
+ def test_complex_negative_exponent(self):
+ # Previous to 1.15, some formats generated x+-yj, gh 7895
+ ncols = 2
+ nrows = 2
+ a = np.zeros((ncols, nrows), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.3e')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
+ b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
+
+
+
+
def test_custom_writer(self):
class CustomWriter(list):
@@ -459,8 +500,136 @@ class TestSaveTxt(TestCase):
b = np.loadtxt(w)
assert_array_equal(a, b)
+ def test_unicode(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.unicode)
+ with tempdir() as tmpdir:
+ # set encoding as on windows it may not be unicode even on py3
+ np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
+ encoding='UTF-8')
+
+ def test_unicode_roundtrip(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.unicode)
+ # our gz wrapper support encoding
+ suffixes = ['', '.gz']
+ # stdlib 2 versions do not support encoding
+ if MAJVER > 2:
+ if HAS_BZ2:
+ suffixes.append('.bz2')
+ if HAS_LZMA:
+ suffixes.extend(['.xz', '.lzma'])
+ with tempdir() as tmpdir:
+ for suffix in suffixes:
+ np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
+ fmt=['%s'], encoding='UTF-16-LE')
+ b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
+ encoding='UTF-16-LE', dtype=np.unicode)
+ assert_array_equal(a, b)
+
+ def test_unicode_bytestream(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.unicode)
+ s = BytesIO()
+ np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
+ s.seek(0)
+ assert_equal(s.read().decode('UTF-8'), utf8 + '\n')
+
+ def test_unicode_stringstream(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.unicode)
+ s = StringIO()
+ np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
+ s.seek(0)
+ assert_equal(s.read(), utf8 + '\n')
+
+
+class LoadTxtBase(object):
+ def check_compressed(self, fopen, suffixes):
+ # Test that we can load data from a compressed file
+ wanted = np.arange(6).reshape((2, 3))
+ linesep = ('\n', '\r\n', '\r')
+ for sep in linesep:
+ data = '0 1 2' + sep + '3 4 5'
+ for suffix in suffixes:
+ with temppath(suffix=suffix) as name:
+ with fopen(name, mode='wt', encoding='UTF-32-LE') as f:
+ f.write(data)
+ res = self.loadfunc(name, encoding='UTF-32-LE')
+ assert_array_equal(res, wanted)
+ with fopen(name, "rt", encoding='UTF-32-LE') as f:
+ res = self.loadfunc(f)
+ assert_array_equal(res, wanted)
+
+ # Python2 .open does not support encoding
+ @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
+ def test_compressed_gzip(self):
+ self.check_compressed(gzip.open, ('.gz',))
+
+ @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
+ @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
+ def test_compressed_gzip(self):
+ self.check_compressed(bz2.open, ('.bz2',))
+
+ @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
+ @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
+ def test_compressed_gzip(self):
+ self.check_compressed(lzma.open, ('.xz', '.lzma'))
+
+ def test_encoding(self):
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write('0.\n1.\n2.'.encode("UTF-16"))
+ x = self.loadfunc(path, encoding="UTF-16")
+ assert_array_equal(x, [0., 1., 2.])
+
+ def test_stringload(self):
+ # umlaute
+ nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8")
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write(nonascii.encode("UTF-16"))
+ x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode)
+ assert_array_equal(x, nonascii)
+
+ def test_binary_decode(self):
+ utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
+ v = self.loadfunc(BytesIO(utf16), dtype=np.unicode, encoding='UTF-16')
+ assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
+
+ def test_converters_decode(self):
+ # test converters that decode strings
+ c = TextIO()
+ c.write(b'\xcf\x96')
+ c.seek(0)
+ x = self.loadfunc(c, dtype=np.unicode,
+ converters={0: lambda x: x.decode('UTF-8')})
+ a = np.array([b'\xcf\x96'.decode('UTF-8')])
+ assert_array_equal(x, a)
+
+ def test_converters_nodecode(self):
+ # test native string converters enabled by setting an encoding
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ with temppath() as path:
+ with io.open(path, 'wt', encoding='UTF-8') as f:
+ f.write(utf8)
+ x = self.loadfunc(path, dtype=np.unicode,
+ converters={0: lambda x: x + 't'},
+ encoding='UTF-8')
+ a = np.array([utf8 + 't'])
+ assert_array_equal(x, a)
+
+
+class TestLoadTxt(LoadTxtBase):
+ loadfunc = staticmethod(np.loadtxt)
+
+ def setup(self):
+ # lower chunksize for testing
+ self.orig_chunk = np.lib.npyio._loadtxt_chunksize
+ np.lib.npyio._loadtxt_chunksize = 1
+ def teardown(self):
+ np.lib.npyio._loadtxt_chunksize = self.orig_chunk
-class TestLoadTxt(TestCase):
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
@@ -484,7 +653,7 @@ class TestLoadTxt(TestCase):
c.write('1 2\n3 4')
c.seek(0)
- x = np.loadtxt(c, dtype=np.int)
+ x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
@@ -532,7 +701,7 @@ class TestLoadTxt(TestCase):
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
- comments=unicode('#'))
+ comments=u'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
@@ -720,7 +889,7 @@ class TestLoadTxt(TestCase):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
- ndtype = [('idx', int), ('code', np.object)]
+ ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
@@ -750,11 +919,11 @@ class TestLoadTxt(TestCase):
# IEEE doubles and floats only, otherwise the float32
# conversion may fail.
tgt = np.logspace(-10, 10, 5).astype(np.float32)
- tgt = np.hstack((tgt, -tgt)).astype(np.float)
+ tgt = np.hstack((tgt, -tgt)).astype(float)
inp = '\n'.join(map(float.hex, tgt))
c = TextIO()
c.write(inp)
- for dt in [np.float, np.float32]:
+ for dt in [float, np.float32]:
c.seek(0)
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
@@ -764,9 +933,29 @@ class TestLoadTxt(TestCase):
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
- res = np.loadtxt(c, dtype=np.complex)
+ res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
+ def test_complex_misformatted(self):
+ # test for backward compatibility
+ # some complex formats used to generate x+-yj
+ a = np.zeros((2, 2), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.16e')
+ c.seek(0)
+ txt = c.read()
+ c.seek(0)
+ # misformat the sign on the imaginary part, gh 7895
+ txt_bad = txt.replace(b'e+00-', b'e00+-')
+ assert_(txt_bad != txt)
+ c.write(txt_bad)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=complex)
+ assert_equal(res, a)
+
def test_universal_newline(self):
with temppath() as name:
with open(name, 'w') as f:
@@ -862,9 +1051,25 @@ class TestLoadTxt(TestCase):
dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
-
-class Testfromregex(TestCase):
- # np.fromregex expects files opened in binary mode.
+ @pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
+ reason="Wrong preferred encoding")
+ def test_binary_load(self):
+ butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\
+ b"20,2,3,\xc3\x95scar\n\r"
+ sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines()
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write(butf8)
+ with open(path, "rb") as f:
+ x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode)
+ assert_array_equal(x, sutf8)
+ # test broken latin1 conversion people now rely on
+ with open(path, "rb") as f:
+ x = np.loadtxt(f, encoding="UTF-8", dtype="S")
+ x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar']
+ assert_array_equal(x, np.array(x, dtype="S"))
+
+class Testfromregex(object):
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
@@ -897,12 +1102,36 @@ class Testfromregex(TestCase):
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
+ def test_record_unicode(self):
+ utf8 = b'\xcf\x96'
+ with temppath() as path:
+ with open(path, 'wb') as f:
+ f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
+
+ dt = [('num', np.float64), ('val', 'U4')]
+ x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8')
+ a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'),
+ (4.444, 'qux')], dtype=dt)
+ assert_array_equal(x, a)
+
+ regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE)
+ x = np.fromregex(path, regexp, dt, encoding='UTF-8')
+ assert_array_equal(x, a)
+
+ def test_compiled_bytes(self):
+ regexp = re.compile(b'(\\d)')
+ c = BytesIO(b'123')
+ dt = [('num', np.float64)]
+ a = np.array([1, 2, 3], dtype=dt)
+ x = np.fromregex(c, regexp, dt)
+ assert_array_equal(x, a)
#####--------------------------------------------------------------------------
-class TestFromTxt(TestCase):
- #
+class TestFromTxt(LoadTxtBase):
+ loadfunc = staticmethod(np.genfromtxt)
+
def test_record(self):
# Test w/ explicit dtype
data = TextIO('1 2\n3 4')
@@ -919,7 +1148,7 @@ class TestFromTxt(TestCase):
assert_equal(test, control)
def test_array(self):
- # Test outputing a standard ndarray
+ # Test outputting a standard ndarray
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
@@ -1005,7 +1234,10 @@ class TestFromTxt(TestCase):
def test_header(self):
# Test retrieving a header
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
- test = np.ndfromtxt(data, dtype=None, names=True)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.ndfromtxt(data, dtype=None, names=True)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
@@ -1016,7 +1248,10 @@ class TestFromTxt(TestCase):
def test_auto_dtype(self):
# Test the automatic definition of the output dtype
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
- test = np.ndfromtxt(data, dtype=None)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.ndfromtxt(data, dtype=None)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
@@ -1062,7 +1297,10 @@ F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
- test = np.genfromtxt(data, names=True, dtype=None)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(data, names=True, dtype=None)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
@@ -1073,14 +1311,27 @@ M 21 72.100000
F 35 58.330000
M 33 21.99
""")
- test = np.genfromtxt(data, names=True, dtype=None)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(data, names=True, dtype=None)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test, ctrl)
+ def test_names_and_comments_none(self):
+ # Tests case when names is true but comments is None (gh-10780)
+ data = TextIO('col1 col2\n 1 2\n 3 4')
+ test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
+ control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
+ assert_equal(test, control)
+
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
- test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
- names=True, dtype=None)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
+ names=True, dtype=None)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
@@ -1097,8 +1348,12 @@ M 33 21.99
def test_converters_with_usecols_and_names(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
- test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
- dtype=None, converters={'C': lambda s: 2 * int(s)})
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
+ dtype=None,
+ converters={'C': lambda s: 2 * int(s)})
+ assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
@@ -1177,19 +1432,19 @@ M 33 21.99
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
- control = np.rec.array([[1,5,-1,0], [2,8,-1,1], [3,3,-2,3]], dtype=dtyp)
+ control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
- control = np.rec.array([[1,5,0], [2,8,1], [3,3,3]], dtype=dtyp)
+ control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
- ndtype = [('idx', int), ('code', np.object)]
+ ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
@@ -1199,7 +1454,7 @@ M 33 21.99
dtype=ndtype)
assert_equal(test, control)
- ndtype = [('nest', [('idx', int), ('code', np.object)])]
+ ndtype = [('nest', [('idx', int), ('code', object)])]
try:
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
@@ -1218,6 +1473,18 @@ M 33 21.99
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
+ def test_utf8_userconverters_with_explicit_dtype(self):
+ utf8 = b'\xcf\x96'
+ with temppath() as path:
+ with open(path, 'wb') as f:
+ f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
+ test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
+ usecols=(2, 3), converters={2: np.unicode},
+ encoding='UTF-8')
+ control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
+ dtype=[('', '|U11'), ('', float)])
+ assert_equal(test, control)
+
def test_spacedelimiter(self):
# Test space delimiter
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
@@ -1336,7 +1603,7 @@ M 33 21.99
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.int)])
+ dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
@@ -1344,7 +1611,7 @@ M 33 21.99
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.float), ('B', np.float)])
+ dtype=[('A', float), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
@@ -1413,7 +1680,7 @@ M 33 21.99
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.float)])
+ dtype=[('A', int), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
@@ -1544,11 +1811,17 @@ M 33 21.99
# Test autostrip
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
- mtest = np.ndfromtxt(TextIO(data), **kwargs)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ mtest = np.ndfromtxt(TextIO(data), **kwargs)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
- mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
@@ -1668,28 +1941,141 @@ M 33 21.99
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
- test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
- dtype=None, comments=None, delimiter=',')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
+ dtype=None, comments=None, delimiter=',')
+ assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b'testNonetherestofthedata')
- test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
- dtype=None, comments=None, delimiter=',')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
+ dtype=None, comments=None, delimiter=',')
+ assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b' testNonetherestofthedata')
+ def test_latin1(self):
+ latin1 = b'\xf6\xfc\xf6'
+ norm = b"norm1,norm2,norm3\n"
+ enc = b"test1,testNonethe" + latin1 + b",test3\n"
+ s = norm + enc + norm
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO(s),
+ dtype=None, comments=None, delimiter=',')
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ assert_equal(test[1, 0], b"test1")
+ assert_equal(test[1, 1], b"testNonethe" + latin1)
+ assert_equal(test[1, 2], b"test3")
+ test = np.genfromtxt(TextIO(s),
+ dtype=None, comments=None, delimiter=',',
+ encoding='latin1')
+ assert_equal(test[1, 0], u"test1")
+ assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1'))
+ assert_equal(test[1, 2], u"test3")
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1),
+ dtype=None, comments=None, delimiter=',')
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ assert_equal(test['f0'], 0)
+ assert_equal(test['f1'], b"testNonethe" + latin1)
+
+ def test_binary_decode_autodtype(self):
+ utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
+ v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16')
+ assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
+
+ def test_utf8_byte_encoding(self):
+ utf8 = b"\xcf\x96"
+ norm = b"norm1,norm2,norm3\n"
+ enc = b"test1,testNonethe" + utf8 + b",test3\n"
+ s = norm + enc + norm
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO(s),
+ dtype=None, comments=None, delimiter=',')
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ ctl = np.array([
+ [b'norm1', b'norm2', b'norm3'],
+ [b'test1', b'testNonethe' + utf8, b'test3'],
+ [b'norm1', b'norm2', b'norm3']])
+ assert_array_equal(test, ctl)
+
+ def test_utf8_file(self):
+ utf8 = b"\xcf\x96"
+ latin1 = b"\xf6\xfc\xf6"
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
+ test = np.genfromtxt(path, dtype=None, comments=None,
+ delimiter=',', encoding="UTF-8")
+ ctl = np.array([
+ ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
+ ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
+ dtype=np.unicode)
+ assert_array_equal(test, ctl)
+
+ # test a mixed dtype
+ with open(path, "wb") as f:
+ f.write(b"0,testNonethe" + utf8)
+ test = np.genfromtxt(path, dtype=None, comments=None,
+ delimiter=',', encoding="UTF-8")
+ assert_equal(test['f0'], 0)
+ assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
+
+
+ def test_utf8_file_nodtype_unicode(self):
+ # bytes encoding with non-latin1 -> unicode upcast
+ utf8 = u'\u03d6'
+ latin1 = u'\xf6\xfc\xf6'
+
+ # skip test if cannot encode utf8 test string with preferred
+ # encoding. The preferred encoding is assumed to be the default
+ # encoding of io.open. Will need to change this for PyTest, maybe
+ # using pytest.mark.xfail(raises=***).
+ try:
+ encoding = locale.getpreferredencoding()
+ utf8.encode(encoding)
+ except (UnicodeError, ImportError):
+ raise SkipTest('Skipping test_utf8_file_nodtype_unicode, '
+ 'unable to encode utf8 in preferred encoding')
+
+ with temppath() as path:
+ with io.open(path, "wt") as f:
+ f.write(u"norm1,norm2,norm3\n")
+ f.write(u"norm1," + latin1 + u",norm3\n")
+ f.write(u"test1,testNonethe" + utf8 + u",test3\n")
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '',
+ np.VisibleDeprecationWarning)
+ test = np.genfromtxt(path, dtype=None, comments=None,
+ delimiter=',')
+ # Check for warning when encoding not specified.
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ ctl = np.array([
+ ["norm1", "norm2", "norm3"],
+ ["norm1", latin1, "norm3"],
+ ["test1", "testNonethe" + utf8, "test3"]],
+ dtype=np.unicode)
+ assert_array_equal(test, ctl)
+
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.int)])
+ dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
@@ -1700,15 +2086,15 @@ M 33 21.99
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.int)])
+ dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
@@ -1716,16 +2102,23 @@ M 33 21.99
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
- dtype=[('a', np.int), ('b', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('a', int), ('b', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
- dtype = [('a', np.int), ('b', np.float)]
+ dtype = [('a', int), ('b', float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
- self.assertTrue(isinstance(test, np.recarray))
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+
+ #gh-10394
+ data = TextIO('color\n"red"\n"blue"')
+ test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')})
+ control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))])
+ assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
def test_max_rows(self):
@@ -1786,11 +2179,7 @@ M 33 21.99
# Test that we can load data from a filename as well as a file
# object
tgt = np.arange(6).reshape((2, 3))
- if sys.version_info[0] >= 3:
- # python 3k is known to fail for '\r'
- linesep = ('\n', '\r\n')
- else:
- linesep = ('\n', '\r\n', '\r')
+ linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
@@ -1800,6 +2189,22 @@ M 33 21.99
res = np.genfromtxt(name)
assert_array_equal(res, tgt)
+ def test_gft_from_gzip(self):
+ # Test that we can load data from a gzipped file
+ wanted = np.arange(6).reshape((2, 3))
+ linesep = ('\n', '\r\n', '\r')
+
+ for sep in linesep:
+ data = '0 1 2' + sep + '3 4 5'
+ s = BytesIO()
+ with gzip.GzipFile(fileobj=s, mode='w') as g:
+ g.write(asbytes(data))
+
+ with temppath(suffix='.gz2') as name:
+ with open(name, 'w') as f:
+ f.write(data)
+ assert_array_equal(np.genfromtxt(name), wanted)
+
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
@@ -1826,7 +2231,7 @@ M 33 21.99
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
- assert_(test.dtype['f0'] == np.float)
+ assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
assert_(test.dtype['f2'] == np.integer)
@@ -1835,9 +2240,9 @@ M 33 21.99
assert_equal(test['f2'], 1024)
-class TestPathUsage(TestCase):
+@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
+class TestPathUsage(object):
# Test that pathlib.Path can be used
- @np.testing.dec.skipif(Path is None, "No pathlib.Path")
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
@@ -1846,7 +2251,6 @@ class TestPathUsage(TestCase):
x = np.loadtxt(path)
assert_array_equal(x, a)
- @np.testing.dec.skipif(Path is None, "No pathlib.Path")
def test_save_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npy') as path:
@@ -1856,7 +2260,6 @@ class TestPathUsage(TestCase):
data = np.load(path)
assert_array_equal(data, a)
- @np.testing.dec.skipif(Path is None, "No pathlib.Path")
def test_savez_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
@@ -1864,8 +2267,7 @@ class TestPathUsage(TestCase):
np.savez(path, lab='place holder')
with np.load(path) as data:
assert_array_equal(data['lab'], 'place holder')
-
- @np.testing.dec.skipif(Path is None, "No pathlib.Path")
+
def test_savez_compressed_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
@@ -1875,7 +2277,6 @@ class TestPathUsage(TestCase):
assert_array_equal(data['lab'], 'place holder')
data.close()
- @np.testing.dec.skipif(Path is None, "No pathlib.Path")
def test_genfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
@@ -1884,9 +2285,8 @@ class TestPathUsage(TestCase):
data = np.genfromtxt(path)
assert_array_equal(a, data)
- @np.testing.dec.skipif(Path is None, "No pathlib.Path")
def test_ndfromtxt(self):
- # Test outputing a standard ndarray
+ # Test outputting a standard ndarray
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
@@ -1896,7 +2296,6 @@ class TestPathUsage(TestCase):
test = np.ndfromtxt(path, dtype=int)
assert_array_equal(test, control)
- @np.testing.dec.skipif(Path is None, "No pathlib.Path")
def test_mafromtxt(self):
# From `test_fancy_dtype_alt` above
with temppath(suffix='.txt') as path:
@@ -1908,7 +2307,6 @@ class TestPathUsage(TestCase):
control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
assert_equal(test, control)
- @np.testing.dec.skipif(Path is None, "No pathlib.Path")
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
@@ -1918,11 +2316,10 @@ class TestPathUsage(TestCase):
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(path, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
- @np.testing.dec.skipif(Path is None, "No pathlib.Path")
def test_recfromcsv(self):
with temppath(suffix='.txt') as path:
path = Path(path)
@@ -1932,8 +2329,8 @@ class TestPathUsage(TestCase):
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(path, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
@@ -1952,7 +2349,7 @@ def test_gzip_load():
def test_gzip_loadtxt():
- # Thanks to another windows brokeness, we can't use
+ # Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
@@ -2010,6 +2407,7 @@ def test_npzfile_dict():
assert_('x' in z.keys())
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
@@ -2018,17 +2416,5 @@ def test_load_refcount():
np.savez(f, [1, 2, 3])
f.seek(0)
- assert_(gc.isenabled())
- gc.disable()
- try:
- gc.collect()
+ with assert_no_gc_cycles():
np.load(f)
- # gc.collect returns the number of unreachable objects in cycles that
- # were found -- we are checking that no cycles were created by np.load
- n_objects_in_cycles = gc.collect()
- finally:
- gc.enable()
- assert_equal(n_objects_in_cycles, 0)
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_mixins.py b/numpy/lib/tests/test_mixins.py
new file mode 100644
index 000000000..f2d915502
--- /dev/null
+++ b/numpy/lib/tests/test_mixins.py
@@ -0,0 +1,213 @@
+from __future__ import division, absolute_import, print_function
+
+import numbers
+import operator
+import sys
+
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_raises
+
+
+PY2 = sys.version_info.major < 3
+
+
+# NOTE: This class should be kept as an exact copy of the example from the
+# docstring for NDArrayOperatorsMixin.
+
+class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
+ def __init__(self, value):
+ self.value = np.asarray(value)
+
+ # One might also consider adding the built-in list type to this
+ # list, to support operations like np.add(array_like, list)
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
+
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ out = kwargs.get('out', ())
+ for x in inputs + out:
+ # Only support operations with instances of _HANDLED_TYPES.
+ # Use ArrayLike instead of type(self) for isinstance to
+ # allow subclasses that don't override __array_ufunc__ to
+ # handle ArrayLike objects.
+ if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
+ return NotImplemented
+
+ # Defer to the implementation of the ufunc on unwrapped values.
+ inputs = tuple(x.value if isinstance(x, ArrayLike) else x
+ for x in inputs)
+ if out:
+ kwargs['out'] = tuple(
+ x.value if isinstance(x, ArrayLike) else x
+ for x in out)
+ result = getattr(ufunc, method)(*inputs, **kwargs)
+
+ if type(result) is tuple:
+ # multiple return values
+ return tuple(type(self)(x) for x in result)
+ elif method == 'at':
+ # no return value
+ return None
+ else:
+ # one return value
+ return type(self)(result)
+
+ def __repr__(self):
+ return '%s(%r)' % (type(self).__name__, self.value)
+
+
+def wrap_array_like(result):
+ if type(result) is tuple:
+ return tuple(ArrayLike(r) for r in result)
+ else:
+ return ArrayLike(result)
+
+
+def _assert_equal_type_and_value(result, expected, err_msg=None):
+ assert_equal(type(result), type(expected), err_msg=err_msg)
+ if isinstance(result, tuple):
+ assert_equal(len(result), len(expected), err_msg=err_msg)
+ for result_item, expected_item in zip(result, expected):
+ _assert_equal_type_and_value(result_item, expected_item, err_msg)
+ else:
+ assert_equal(result.value, expected.value, err_msg=err_msg)
+ assert_equal(getattr(result.value, 'dtype', None),
+ getattr(expected.value, 'dtype', None), err_msg=err_msg)
+
+
+_ALL_BINARY_OPERATORS = [
+ operator.lt,
+ operator.le,
+ operator.eq,
+ operator.ne,
+ operator.gt,
+ operator.ge,
+ operator.add,
+ operator.sub,
+ operator.mul,
+ operator.truediv,
+ operator.floordiv,
+ # TODO: test div on Python 2, only
+ operator.mod,
+ divmod,
+ pow,
+ operator.lshift,
+ operator.rshift,
+ operator.and_,
+ operator.xor,
+ operator.or_,
+]
+
+
+class TestNDArrayOperatorsMixin(object):
+
+ def test_array_like_add(self):
+
+ def check(result):
+ _assert_equal_type_and_value(result, ArrayLike(0))
+
+ check(ArrayLike(0) + 0)
+ check(0 + ArrayLike(0))
+
+ check(ArrayLike(0) + np.array(0))
+ check(np.array(0) + ArrayLike(0))
+
+ check(ArrayLike(np.array(0)) + 0)
+ check(0 + ArrayLike(np.array(0)))
+
+ check(ArrayLike(np.array(0)) + np.array(0))
+ check(np.array(0) + ArrayLike(np.array(0)))
+
+ def test_inplace(self):
+ array_like = ArrayLike(np.array([0]))
+ array_like += 1
+ _assert_equal_type_and_value(array_like, ArrayLike(np.array([1])))
+
+ array = np.array([0])
+ array += ArrayLike(1)
+ _assert_equal_type_and_value(array, ArrayLike(np.array([1])))
+
+ def test_opt_out(self):
+
+ class OptOut(object):
+ """Object that opts out of __array_ufunc__."""
+ __array_ufunc__ = None
+
+ def __add__(self, other):
+ return self
+
+ def __radd__(self, other):
+ return self
+
+ array_like = ArrayLike(1)
+ opt_out = OptOut()
+
+ # supported operations
+ assert_(array_like + opt_out is opt_out)
+ assert_(opt_out + array_like is opt_out)
+
+ # not supported
+ with assert_raises(TypeError):
+ # don't use the Python default, array_like = array_like + opt_out
+ array_like += opt_out
+ with assert_raises(TypeError):
+ array_like - opt_out
+ with assert_raises(TypeError):
+ opt_out - array_like
+
+ def test_subclass(self):
+
+ class SubArrayLike(ArrayLike):
+ """Should take precedence over ArrayLike."""
+
+ x = ArrayLike(0)
+ y = SubArrayLike(1)
+ _assert_equal_type_and_value(x + y, y)
+ _assert_equal_type_and_value(y + x, y)
+
+ def test_object(self):
+ x = ArrayLike(0)
+ obj = object()
+ with assert_raises(TypeError):
+ x + obj
+ with assert_raises(TypeError):
+ obj + x
+ with assert_raises(TypeError):
+ x += obj
+
+ def test_unary_methods(self):
+ array = np.array([-1, 0, 1, 2])
+ array_like = ArrayLike(array)
+ for op in [operator.neg,
+ operator.pos,
+ abs,
+ operator.invert]:
+ _assert_equal_type_and_value(op(array_like), ArrayLike(op(array)))
+
+ def test_forward_binary_methods(self):
+ array = np.array([-1, 0, 1, 2])
+ array_like = ArrayLike(array)
+ for op in _ALL_BINARY_OPERATORS:
+ expected = wrap_array_like(op(array, 1))
+ actual = op(array_like, 1)
+ err_msg = 'failed for operator {}'.format(op)
+ _assert_equal_type_and_value(expected, actual, err_msg=err_msg)
+
+ def test_reflected_binary_methods(self):
+ for op in _ALL_BINARY_OPERATORS:
+ expected = wrap_array_like(op(2, 1))
+ actual = op(2, ArrayLike(1))
+ err_msg = 'failed for operator {}'.format(op)
+ _assert_equal_type_and_value(expected, actual, err_msg=err_msg)
+
+ def test_ufunc_at(self):
+ array = ArrayLike(np.array([1, 2, 3, 4]))
+ assert_(np.negative.at(array, np.array([0, 1])) is None)
+ _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4]))
+
+ def test_ufunc_two_outputs(self):
+ mantissa, exponent = np.frexp(2 ** -3)
+ expected = (ArrayLike(mantissa), ArrayLike(exponent))
+ _assert_equal_type_and_value(
+ np.frexp(ArrayLike(2 ** -3)), expected)
+ _assert_equal_type_and_value(
+ np.frexp(ArrayLike(np.array(2 ** -3))), expected)
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index 2b310457b..504372faf 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -4,8 +4,8 @@ import warnings
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
- assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings
+ assert_, assert_equal, assert_almost_equal, assert_no_warnings,
+ assert_raises, assert_array_equal, suppress_warnings
)
@@ -35,7 +35,7 @@ _ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
-class TestNanFunctions_MinMax(TestCase):
+class TestNanFunctions_MinMax(object):
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
@@ -113,47 +113,63 @@ class TestNanFunctions_MinMax(TestCase):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
+
# check that rows of nan are dealt with for subclasses (#4628)
- mat[1] = np.nan
+ mine[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
- and not np.isnan(res[2, 0]))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(np.isnan(res[1]) and not np.isnan(res[0])
+ and not np.isnan(res[2]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine)
+ assert_(res.shape == ())
assert_(res != np.nan)
assert_(len(w) == 0)
+ def test_object_array(self):
+ arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object)
+ assert_equal(np.nanmin(arr), 1.0)
+ assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0])
-class TestNanFunctions_ArgminArgmax(TestCase):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ # assert_equal does not work on object arrays of nan
+ assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan])
+ assert_(len(w) == 1, 'no warning raised')
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+
+class TestNanFunctions_ArgminArgmax(object):
nanfuncs = [np.nanargmin, np.nanargmax]
@@ -197,22 +213,25 @@ class TestNanFunctions_ArgminArgmax(TestCase):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
-class TestNanFunctions_IntTypes(TestCase):
+class TestNanFunctions_IntTypes(object):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
@@ -369,22 +388,30 @@ class SharedNanFunctionsTestsMixin(object):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ array = np.eye(3)
+ mine = array.view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
-
-
-class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
+ expected_shape = f(array, axis=0).shape
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array, axis=1).shape
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array).shape
+ res = f(mine)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+
+
+class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nansum, np.nanprod]
stdfuncs = [np.sum, np.prod]
@@ -418,7 +445,7 @@ class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
assert_equal(res, tgt)
-class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin):
+class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nancumsum, np.nancumprod]
stdfuncs = [np.cumsum, np.cumprod]
@@ -469,18 +496,6 @@ class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin):
res = f(d, axis=axis)
assert_equal(res.shape, (3, 5, 7, 11))
- def test_matrices(self):
- # Check that it works and that type and
- # shape are preserved
- mat = np.matrix(np.eye(3))
- for f in self.nanfuncs:
- for axis in np.arange(2):
- res = f(mat, axis=axis)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 3))
- res = f(mat)
- assert_(res.shape == (1, 3*3))
-
def test_result_values(self):
for axis in (-2, -1, 0, 1, None):
tgt = np.cumprod(_ndat_ones, axis=axis)
@@ -501,7 +516,7 @@ class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin):
assert_almost_equal(res, tgt)
-class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
+class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
stdfuncs = [np.mean, np.var, np.std]
@@ -573,7 +588,7 @@ class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
assert_(len(w) == 0)
-class TestNanFunctions_Median(TestCase):
+class TestNanFunctions_Median(object):
def test_mutation(self):
# Check that passed array is not modified.
@@ -684,10 +699,10 @@ class TestNanFunctions_Median(TestCase):
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
- assert_raises(IndexError, np.nanmedian, d, axis=-5)
- assert_raises(IndexError, np.nanmedian, d, axis=(0, -5))
- assert_raises(IndexError, np.nanmedian, d, axis=4)
- assert_raises(IndexError, np.nanmedian, d, axis=(0, 4))
+ assert_raises(np.AxisError, np.nanmedian, d, axis=-5)
+ assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5))
+ assert_raises(np.AxisError, np.nanmedian, d, axis=4)
+ assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4))
assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
def test_float_special(self):
@@ -737,7 +752,7 @@ class TestNanFunctions_Median(TestCase):
([np.nan] * i) + [-inf] * j)
-class TestNanFunctions_Percentile(TestCase):
+class TestNanFunctions_Percentile(object):
def test_mutation(self):
# Check that passed array is not modified.
@@ -843,10 +858,10 @@ class TestNanFunctions_Percentile(TestCase):
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
- assert_raises(IndexError, np.nanpercentile, d, q=5, axis=-5)
- assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, -5))
- assert_raises(IndexError, np.nanpercentile, d, q=5, axis=4)
- assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, 4))
+ assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=-5)
+ assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, -5))
+ assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=4)
+ assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, 4))
assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1))
def test_multiple_percentiles(self):
@@ -876,5 +891,37 @@ class TestNanFunctions_Percentile(TestCase):
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
-if __name__ == "__main__":
- run_module_suite()
+class TestNanFunctions_Quantile(object):
+ # most of this is already tested by TestPercentile
+
+ def test_regression(self):
+ ar = np.arange(24).reshape(2, 3, 4).astype(float)
+ ar[0][1] = np.nan
+
+ assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=0),
+ np.nanpercentile(ar, q=50, axis=0))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=1),
+ np.nanpercentile(ar, q=50, axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.5], axis=1),
+ np.nanpercentile(ar, q=[50], axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1),
+ np.nanpercentile(ar, q=[25, 50, 75], axis=1))
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.nanquantile(x, 0), 0.)
+ assert_equal(np.nanquantile(x, 1), 3.5)
+ assert_equal(np.nanquantile(x, 0.5), 1.75)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.nanquantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.nanquantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py
index 4bf505f56..fde5c37f2 100644
--- a/numpy/lib/tests/test_packbits.py
+++ b/numpy/lib/tests/test_packbits.py
@@ -1,9 +1,7 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import (
- assert_array_equal, assert_equal, assert_raises, run_module_suite
-)
+from numpy.testing import assert_array_equal, assert_equal, assert_raises
def test_packbits():
@@ -213,6 +211,15 @@ def test_packbits_large():
assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
+def test_packbits_very_large():
+ # test some with a larger arrays gh-8637
+ # code is covered earlier but larger array makes crash on bug more likely
+ for s in range(950, 1050):
+ for dt in '?bBhHiIlLqQ':
+ x = np.ones((200, s), dtype=bool)
+ np.packbits(x, axis=1)
+
+
def test_unpackbits():
# Copied from the docstring.
a = np.array([[2], [7], [23]], dtype=np.uint8)
@@ -259,7 +266,3 @@ def test_unpackbits_large():
assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)
d = d.T.copy()
assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 00dffd3d3..9f7c117a2 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -1,93 +1,79 @@
from __future__ import division, absolute_import, print_function
-'''
->>> p = np.poly1d([1.,2,3])
->>> p
-poly1d([ 1., 2., 3.])
->>> print(p)
- 2
-1 x + 2 x + 3
->>> q = np.poly1d([3.,2,1])
->>> q
-poly1d([ 3., 2., 1.])
->>> print(q)
- 2
-3 x + 2 x + 1
->>> print(np.poly1d([1.89999+2j, -3j, -5.12345678, 2+1j]))
- 3 2
-(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)
->>> print(np.poly1d([-3, -2, -1]))
- 2
--3 x - 2 x - 1
-
->>> p(0)
-3.0
->>> p(5)
-38.0
->>> q(0)
-1.0
->>> q(5)
-86.0
-
->>> p * q
-poly1d([ 3., 8., 14., 8., 3.])
->>> p / q
-(poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667]))
->>> p + q
-poly1d([ 4., 4., 4.])
->>> p - q
-poly1d([-2., 0., 2.])
->>> p ** 4
-poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
-
->>> p(q)
-poly1d([ 9., 12., 16., 8., 6.])
->>> q(p)
-poly1d([ 3., 12., 32., 40., 34.])
-
->>> np.asarray(p)
-array([ 1., 2., 3.])
->>> len(p)
-2
-
->>> p[0], p[1], p[2], p[3]
-(3.0, 2.0, 1.0, 0)
-
->>> p.integ()
-poly1d([ 0.33333333, 1. , 3. , 0. ])
->>> p.integ(1)
-poly1d([ 0.33333333, 1. , 3. , 0. ])
->>> p.integ(5)
-poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. ,
- 0. , 0. , 0. ])
->>> p.deriv()
-poly1d([ 2., 2.])
->>> p.deriv(2)
-poly1d([ 2.])
-
->>> q = np.poly1d([1.,2,3], variable='y')
->>> print(q)
- 2
-1 y + 2 y + 3
->>> q = np.poly1d([1.,2,3], variable='lambda')
->>> print(q)
- 2
-1 lambda + 2 lambda + 3
-
->>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1]))
-(poly1d([ 1., -1.]), poly1d([ 0.]))
-
-'''
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_array_almost_equal, assert_raises, rundocs
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises
)
-class TestDocs(TestCase):
- def test_doctests(self):
- return rundocs()
+class TestPolynomial(object):
+ def test_poly1d_str_and_repr(self):
+ p = np.poly1d([1., 2, 3])
+ assert_equal(repr(p), 'poly1d([1., 2., 3.])')
+ assert_equal(str(p),
+ ' 2\n'
+ '1 x + 2 x + 3')
+
+ q = np.poly1d([3., 2, 1])
+ assert_equal(repr(q), 'poly1d([3., 2., 1.])')
+ assert_equal(str(q),
+ ' 2\n'
+ '3 x + 2 x + 1')
+
+ r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j])
+ assert_equal(str(r),
+ ' 3 2\n'
+ '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)')
+
+ assert_equal(str(np.poly1d([-3, -2, -1])),
+ ' 2\n'
+ '-3 x - 2 x - 1')
+
+ def test_poly1d_resolution(self):
+ p = np.poly1d([1., 2, 3])
+ q = np.poly1d([3., 2, 1])
+ assert_equal(p(0), 3.0)
+ assert_equal(p(5), 38.0)
+ assert_equal(q(0), 1.0)
+ assert_equal(q(5), 86.0)
+
+ def test_poly1d_math(self):
+ # here we use some simple coeffs to make calculations easier
+ p = np.poly1d([1., 2, 4])
+ q = np.poly1d([4., 2, 1])
+ assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75])))
+ assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.]))
+ assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.]))
+
+ p = np.poly1d([1., 2, 3])
+ q = np.poly1d([3., 2, 1])
+ assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.]))
+ assert_equal(p + q, np.poly1d([4., 4., 4.]))
+ assert_equal(p - q, np.poly1d([-2., 0., 2.]))
+ assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.]))
+ assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.]))
+ assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.]))
+ assert_equal(p.deriv(), np.poly1d([2., 2.]))
+ assert_equal(p.deriv(2), np.poly1d([2.]))
+ assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])),
+ (np.poly1d([1., -1.]), np.poly1d([0.])))
+
+ def test_poly1d_misc(self):
+ p = np.poly1d([1., 2, 3])
+ assert_equal(np.asarray(p), np.array([1., 2., 3.]))
+ assert_equal(len(p), 2)
+ assert_equal((p[0], p[1], p[2], p[3]), (3.0, 2.0, 1.0, 0))
+
+ def test_poly1d_variable_arg(self):
+ q = np.poly1d([1., 2, 3], variable='y')
+ assert_equal(str(q),
+ ' 2\n'
+ '1 y + 2 y + 3')
+ q = np.poly1d([1., 2, 3], variable='lambda')
+ assert_equal(str(q),
+ ' 2\n'
+ '1 lambda + 2 lambda + 3')
def test_poly(self):
assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]),
@@ -213,6 +199,33 @@ class TestDocs(TestCase):
v = np.arange(1, 21)
assert_almost_equal(np.poly(v), np.poly(np.diag(v)))
+ def test_poly_eq(self):
+ p = np.poly1d([1, 2, 3])
+ p2 = np.poly1d([1, 2, 4])
+ assert_equal(p == None, False)
+ assert_equal(p != None, True)
+ assert_equal(p == p, True)
+ assert_equal(p == p2, False)
+ assert_equal(p != p2, True)
+
+ def test_polydiv(self):
+ b = np.poly1d([2, 6, 6, 1])
+ a = np.poly1d([-1j, (1+2j), -(2+1j), 1])
+ q, r = np.polydiv(b, a)
+ assert_equal(q.coeffs.dtype, np.complex128)
+ assert_equal(r.coeffs.dtype, np.complex128)
+ assert_equal(q*a + r, b)
+
+ def test_poly_coeffs_immutable(self):
+ """ Coefficients should not be modifiable """
+ p = np.poly1d([1, 2, 3])
+
+ try:
+ # despite throwing an exception, this used to change state
+ p.coeffs += 1
+ except Exception:
+ pass
+ assert_equal(p.coeffs, [1, 2, 3])
-if __name__ == "__main__":
- run_module_suite()
+ p.coeffs[2] += 10
+ assert_equal(p.coeffs, [1, 2, 3])
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index 699a04716..d4828bc1f 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -1,23 +1,25 @@
from __future__ import division, absolute_import, print_function
+import pytest
+
import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
-from numpy.testing import TestCase, run_module_suite, assert_
+from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
- find_duplicates, merge_arrays, append_fields, stack_arrays, join_by
- )
+ find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
+ repack_fields)
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
-class TestRecFunctions(TestCase):
+class TestRecFunctions(object):
# Misc tests
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
@@ -190,8 +192,20 @@ class TestRecFunctions(TestCase):
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
+ def test_repack_fields(self):
+ dt = np.dtype('u1,f4,i8', align=True)
+ a = np.zeros(2, dtype=dt)
+
+ assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
+ assert_equal(repack_fields(a).itemsize, 13)
+ assert_equal(repack_fields(repack_fields(dt), align=True), dt)
+
+ # make sure type is preserved
+ dt = np.dtype((np.record, dt))
+ assert_(repack_fields(dt).type is np.record)
-class TestRecursiveFillFields(TestCase):
+
+class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
@@ -214,10 +228,10 @@ class TestRecursiveFillFields(TestCase):
assert_equal(test, control)
-class TestMergeArrays(TestCase):
+class TestMergeArrays(object):
# Test merge_arrays
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
@@ -347,10 +361,10 @@ class TestMergeArrays(TestCase):
assert_equal(test, control)
-class TestAppendFields(TestCase):
+class TestAppendFields(object):
# Test append_fields
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
@@ -401,9 +415,9 @@ class TestAppendFields(TestCase):
assert_equal(test, control)
-class TestStackArrays(TestCase):
+class TestStackArrays(object):
# Test stack_arrays
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
@@ -417,11 +431,11 @@ class TestStackArrays(TestCase):
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
- self.assertTrue(test is x)
+ assert_(test is x)
test = stack_arrays(x)
assert_equal(test, x)
- self.assertTrue(test is x)
+ assert_(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
@@ -546,9 +560,38 @@ class TestStackArrays(TestCase):
assert_equal(test, control)
assert_equal(test.mask, control.mask)
-
-class TestJoinBy(TestCase):
- def setUp(self):
+ def test_subdtype(self):
+ z = np.array([
+ ('A', 1), ('B', 2)
+ ], dtype=[('A', '|S3'), ('B', float, (1,))])
+ zz = np.array([
+ ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
+ ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
+
+ res = stack_arrays((z, zz))
+ expected = ma.array(
+ data=[
+ (b'A', [1.0], 0),
+ (b'B', [2.0], 0),
+ (b'a', [10.0], 100.0),
+ (b'b', [20.0], 200.0),
+ (b'c', [30.0], 300.0)],
+ mask=[
+ (False, [False], True),
+ (False, [False], True),
+ (False, [False], False),
+ (False, [False], False),
+ (False, [False], False)
+ ],
+ dtype=zz.dtype
+ )
+ assert_equal(res.dtype, expected.dtype)
+ assert_equal(res, expected)
+ assert_equal(res.mask, expected.mask)
+
+
+class TestJoinBy(object):
+ def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
@@ -588,6 +631,16 @@ class TestJoinBy(TestCase):
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
+ def test_join_subdtype(self):
+ # tests the bug in https://stackoverflow.com/q/44769632/102441
+ from numpy.lib import recfunctions as rfn
+ foo = np.array([(1,)],
+ dtype=[('key', int)])
+ bar = np.array([(1, np.array([1,2,3]))],
+ dtype=[('key', int), ('value', 'uint16', 3)])
+ res = join_by('key', foo, bar)
+ assert_equal(res, bar.view(ma.MaskedArray))
+
def test_outer_join(self):
a, b = self.a, self.b
@@ -633,10 +686,79 @@ class TestJoinBy(TestCase):
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
assert_equal(test, control)
+ def test_different_field_order(self):
+ # gh-8940
+ a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
+ b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
+ # this should not give a FutureWarning:
+ j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)
+ assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])
+
+ def test_duplicate_keys(self):
+ a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
+ b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
+ assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
+
+ @pytest.mark.xfail(reason="See comment at gh-9343")
+ def test_same_name_different_dtypes_key(self):
+ a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
+ b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
+ expected_dtype = np.dtype([
+ ('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
+
+ a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
+ b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
+ res = join_by('key', a, b)
+
+ assert_equal(res.dtype, expected_dtype)
+
+ def test_same_name_different_dtypes(self):
+ # gh-9338
+ a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
+ b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
+ expected_dtype = np.dtype([
+ ('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
+
+ a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
+ b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
+ res = join_by('key', a, b)
-class TestJoinBy2(TestCase):
+ assert_equal(res.dtype, expected_dtype)
+
+ def test_subarray_key(self):
+ a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
+ a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
+
+ b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
+ b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
+
+ expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
+ expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
+
+ res = join_by('pos', a, b)
+ assert_equal(res.dtype, expected_dtype)
+ assert_equal(res, expected)
+
+ def test_padded_dtype(self):
+ dt = np.dtype('i1,f4', align=True)
+ dt.names = ('k', 'v')
+ assert_(len(dt.descr), 3) # padding field is inserted
+
+ a = np.array([(1, 3), (3, 2)], dt)
+ b = np.array([(1, 1), (2, 2)], dt)
+ res = join_by('k', a, b)
+
+ # no padding fields remain
+ expected_dtype = np.dtype([
+ ('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
+ ])
+
+ assert_equal(res.dtype, expected_dtype)
+
+
+class TestJoinBy2(object):
@classmethod
- def setUp(cls):
+ def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
@@ -660,8 +782,8 @@ class TestJoinBy2(TestCase):
assert_equal(test, control)
def test_no_postfix(self):
- self.assertRaises(ValueError, join_by, 'a', self.a, self.b,
- r1postfix='', r2postfix='')
+ assert_raises(ValueError, join_by, 'a', self.a, self.b,
+ r1postfix='', r2postfix='')
def test_no_r2postfix(self):
# Basic test of join_by no_r2postfix
@@ -699,13 +821,13 @@ class TestJoinBy2(TestCase):
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
-class TestAppendFieldsObj(TestCase):
+class TestAppendFieldsObj(object):
"""
Test append_fields with arrays containing objects
"""
# https://github.com/numpy/numpy/issues/2346
- def setUp(self):
+ def setup(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
@@ -719,6 +841,3 @@ class TestAppendFieldsObj(TestCase):
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
-
-if __name__ == '__main__':
- run_module_suite()
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index ee50dcfa4..4c46bc46b 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -5,22 +5,19 @@ import sys
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_array_almost_equal, assert_raises
+ assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
+ assert_raises, _assert_valid_refcount,
)
-from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import unicode
-rlevel = 1
-
-class TestRegression(TestCase):
- def test_poly1d(self, level=rlevel):
+class TestRegression(object):
+ def test_poly1d(self):
# Ticket #28
assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
np.poly1d([-1, 1]))
- def test_cov_parameters(self, level=rlevel):
+ def test_cov_parameters(self):
# Ticket #91
x = np.random.random((3, 3))
y = x.copy()
@@ -28,57 +25,57 @@ class TestRegression(TestCase):
np.cov(y, rowvar=0)
assert_array_equal(x, y)
- def test_mem_digitize(self, level=rlevel):
+ def test_mem_digitize(self):
# Ticket #95
for i in range(100):
np.digitize([1, 2, 3, 4], [1, 3])
np.digitize([0, 1, 2, 3, 4], [1, 3])
- def test_unique_zero_sized(self, level=rlevel):
+ def test_unique_zero_sized(self):
# Ticket #205
assert_array_equal([], np.unique(np.array([])))
- def test_mem_vectorise(self, level=rlevel):
+ def test_mem_vectorise(self):
# Ticket #325
vt = np.vectorize(lambda *args: args)
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)))
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1,
1, 2)), np.zeros((2, 2)))
- def test_mgrid_single_element(self, level=rlevel):
+ def test_mgrid_single_element(self):
# Ticket #339
assert_array_equal(np.mgrid[0:0:1j], [0])
assert_array_equal(np.mgrid[0:0], [])
- def test_refcount_vectorize(self, level=rlevel):
+ def test_refcount_vectorize(self):
# Ticket #378
def p(x, y):
return 123
v = np.vectorize(p)
_assert_valid_refcount(v)
- def test_poly1d_nan_roots(self, level=rlevel):
+ def test_poly1d_nan_roots(self):
# Ticket #396
p = np.poly1d([np.nan, np.nan, 1], r=0)
- self.assertRaises(np.linalg.LinAlgError, getattr, p, "r")
+ assert_raises(np.linalg.LinAlgError, getattr, p, "r")
- def test_mem_polymul(self, level=rlevel):
+ def test_mem_polymul(self):
# Ticket #448
np.polymul([], [1.])
- def test_mem_string_concat(self, level=rlevel):
+ def test_mem_string_concat(self):
# Ticket #469
x = np.array([])
np.append(x, 'asdasd\tasdasd')
- def test_poly_div(self, level=rlevel):
+ def test_poly_div(self):
# Ticket #553
u = np.poly1d([1, 2, 3])
v = np.poly1d([1, 2, 3, 4, 5])
q, r = np.polydiv(u, v)
assert_equal(q*v + r, u)
- def test_poly_eq(self, level=rlevel):
+ def test_poly_eq(self):
# Ticket #554
x = np.poly1d([1, 2, 3])
y = np.poly1d([3, 4])
@@ -109,13 +106,13 @@ class TestRegression(TestCase):
def test_polydiv_type(self):
# Make polydiv work for complex types
msg = "Wrong type, should be complex"
- x = np.ones(3, dtype=np.complex)
+ x = np.ones(3, dtype=complex)
q, r = np.polydiv(x, x)
- assert_(q.dtype == np.complex, msg)
+ assert_(q.dtype == complex, msg)
msg = "Wrong type, should be float"
- x = np.ones(3, dtype=np.int)
+ x = np.ones(3, dtype=int)
q, r = np.polydiv(x, x)
- assert_(q.dtype == np.float, msg)
+ assert_(q.dtype == float, msg)
def test_histogramdd_too_many_bins(self):
# Ticket 928.
@@ -124,22 +121,22 @@ class TestRegression(TestCase):
def test_polyint_type(self):
# Ticket #944
msg = "Wrong type, should be complex"
- x = np.ones(3, dtype=np.complex)
- assert_(np.polyint(x).dtype == np.complex, msg)
+ x = np.ones(3, dtype=complex)
+ assert_(np.polyint(x).dtype == complex, msg)
msg = "Wrong type, should be float"
- x = np.ones(3, dtype=np.int)
- assert_(np.polyint(x).dtype == np.float, msg)
+ x = np.ones(3, dtype=int)
+ assert_(np.polyint(x).dtype == float, msg)
def test_ndenumerate_crash(self):
# Ticket 1140
# Shouldn't crash:
list(np.ndenumerate(np.array([[]])))
- def test_asfarray_none(self, level=rlevel):
+ def test_asfarray_none(self):
# Test for changeset r5065
assert_array_equal(np.array([np.nan]), np.asfarray([None]))
- def test_large_fancy_indexing(self, level=rlevel):
+ def test_large_fancy_indexing(self):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0/5.0)+1)
@@ -156,15 +153,15 @@ class TestRegression(TestCase):
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)]
- self.assertRaises(ValueError, dp)
- self.assertRaises(ValueError, dp2)
+ assert_raises(ValueError, dp)
+ assert_raises(ValueError, dp2)
- def test_void_coercion(self, level=rlevel):
+ def test_void_coercion(self):
dt = np.dtype([('a', 'f4'), ('b', 'i4')])
x = np.zeros((1,), dt)
assert_(np.r_[x, x].dtype == dt)
- def test_who_with_0dim_array(self, level=rlevel):
+ def test_who_with_0dim_array(self):
# ticket #1243
import os
import sys
@@ -174,7 +171,7 @@ class TestRegression(TestCase):
try:
try:
np.who({'foo': np.array(1)})
- except:
+ except Exception:
raise AssertionError("ticket #1243")
finally:
sys.stdout.close()
@@ -206,7 +203,7 @@ class TestRegression(TestCase):
dlist = [np.float64, np.int32, np.int32]
try:
append_fields(base, names, data, dlist)
- except:
+ except Exception:
raise AssertionError()
def test_loadtxt_fields_subarrays(self):
@@ -235,10 +232,10 @@ class TestRegression(TestCase):
def test_nansum_with_boolean(self):
# gh-2978
- a = np.zeros(2, dtype=np.bool)
+ a = np.zeros(2, dtype=bool)
try:
np.nansum(a)
- except:
+ except Exception:
raise AssertionError()
def test_py3_compat(self):
@@ -255,7 +252,3 @@ class TestRegression(TestCase):
raise AssertionError()
finally:
out.close()
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 8bdf3d3da..c95894f94 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -1,23 +1,114 @@
from __future__ import division, absolute_import, print_function
import numpy as np
+import warnings
+import functools
+
from numpy.lib.shape_base import (
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
- vsplit, dstack, column_stack, kron, tile
+ vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis,
+ put_along_axis
)
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_raises, assert_warns
+ assert_, assert_equal, assert_array_equal, assert_raises, assert_warns
)
-class TestApplyAlongAxis(TestCase):
+def _add_keepdims(func):
+ """ hack in keepdims behavior into a function taking an axis """
+ @functools.wraps(func)
+ def wrapped(a, axis, **kwargs):
+ res = func(a, axis=axis, **kwargs)
+ if axis is None:
+ axis = 0 # res is now a scalar, so we can insert this anywhere
+ return np.expand_dims(res, axis=axis)
+ return wrapped
+
+
+class TestTakeAlongAxis(object):
+ def test_argequivalent(self):
+ """ Test it translates from arg<func> to <func> """
+ from numpy.random import rand
+ a = rand(3, 4, 5)
+
+ funcs = [
+ (np.sort, np.argsort, dict()),
+ (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()),
+ (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()),
+ (np.partition, np.argpartition, dict(kth=2)),
+ ]
+
+ for func, argfunc, kwargs in funcs:
+ for axis in list(range(a.ndim)) + [None]:
+ a_func = func(a, axis=axis, **kwargs)
+ ai_func = argfunc(a, axis=axis, **kwargs)
+ assert_equal(a_func, take_along_axis(a, ai_func, axis=axis))
+
+ def test_invalid(self):
+ """ Test it errors when indices has too few dimensions """
+ a = np.ones((10, 10))
+ ai = np.ones((10, 2), dtype=np.intp)
+
+ # sanity check
+ take_along_axis(a, ai, axis=1)
+
+ # not enough indices
+ assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)
+ # bool arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)
+ # float arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)
+ # invalid axis
+ assert_raises(np.AxisError, take_along_axis, a, ai, axis=10)
+
+ def test_empty(self):
+ """ Test everything is ok with empty results, even with inserted dims """
+ a = np.ones((3, 4, 5))
+ ai = np.ones((3, 0, 5), dtype=np.intp)
+
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, ai.shape)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.ones((1, 2, 5), dtype=np.intp)
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, (3, 2, 5))
+
+
+class TestPutAlongAxis(object):
+ def test_replace_max(self):
+ a_base = np.array([[10, 30, 20], [60, 40, 50]])
+
+ for axis in list(range(a_base.ndim)) + [None]:
+ # we mutate this in the loop
+ a = a_base.copy()
+
+ # replace the max with a small value
+ i_max = _add_keepdims(np.argmax)(a, axis=axis)
+ put_along_axis(a, i_max, -99, axis=axis)
+
+ # find the new minimum, which should max
+ i_min = _add_keepdims(np.argmin)(a, axis=axis)
+
+ assert_equal(i_min, i_max)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4
+ put_along_axis(a, ai, 20, axis=1)
+ assert_equal(take_along_axis(a, ai, axis=1), 20)
+
+
+class TestApplyAlongAxis(object):
def test_simple(self):
a = np.ones((20, 10), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
- def test_simple101(self, level=11):
+ def test_simple101(self):
a = np.ones((10, 101), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
@@ -28,19 +119,21 @@ class TestApplyAlongAxis(TestCase):
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
def test_preserve_subclass(self):
- # this test is particularly malicious because matrix
- # refuses to become 1d
def double(row):
return row * 2
- m = np.matrix([[0, 1], [2, 3]])
- expected = np.matrix([[0, 2], [4, 6]])
+
+ class MyNDArray(np.ndarray):
+ pass
+
+ m = np.array([[0, 1], [2, 3]]).view(MyNDArray)
+ expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)
result = apply_along_axis(double, 0, m)
- assert_(isinstance(result, np.matrix))
+ assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
result = apply_along_axis(double, 1, m)
- assert_(isinstance(result, np.matrix))
+ assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
def test_subclass(self):
@@ -78,7 +171,7 @@ class TestApplyAlongAxis(TestCase):
def test_axis_insertion(self, cls=np.ndarray):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
return (x[::-1] * x[1:,None]).view(cls)
@@ -122,7 +215,7 @@ class TestApplyAlongAxis(TestCase):
def test_axis_insertion_ma(self):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
res = x[::-1] * x[1:,None]
return np.ma.masked_where(res%5==0, res)
@@ -159,15 +252,49 @@ class TestApplyAlongAxis(TestCase):
assert_equal(actual, np.ones(10))
assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a)
+ def test_with_iterable_object(self):
+ # from issue 5248
+ d = np.array([
+ [set([1, 11]), set([2, 22]), set([3, 33])],
+ [set([4, 44]), set([5, 55]), set([6, 66])]
+ ])
+ actual = np.apply_along_axis(lambda a: set.union(*a), 0, d)
+ expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}])
+
+ assert_equal(actual, expected)
+
+ # issue 8642 - assert_equal doesn't detect this!
+ for i in np.ndindex(actual.shape):
+ assert_equal(type(actual[i]), type(expected[i]))
-class TestApplyOverAxes(TestCase):
+
+class TestApplyOverAxes(object):
def test_simple(self):
a = np.arange(24).reshape(2, 3, 4)
aoa_a = apply_over_axes(np.sum, a, [0, 2])
assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
-class TestArraySplit(TestCase):
+class TestExpandDims(object):
+ def test_functionality(self):
+ s = (2, 3, 4, 5)
+ a = np.empty(s)
+ for axis in range(-5, 4):
+ b = expand_dims(a, axis)
+ assert_(b.shape[axis] == 1)
+ assert_(np.squeeze(b).shape == s)
+
+ def test_deprecations(self):
+ # 2017-05-17, 1.13.0
+ s = (2, 3, 4, 5)
+ a = np.empty(s)
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, expand_dims, a, -6)
+ assert_warns(DeprecationWarning, expand_dims, a, 5)
+
+
+class TestArraySplit(object):
def test_integer_0_split(self):
a = np.arange(10)
assert_raises(ValueError, array_split, a, 0)
@@ -292,7 +419,7 @@ class TestArraySplit(TestCase):
compare_results(res, desired)
-class TestSplit(TestCase):
+class TestSplit(object):
# The split function is essentially the same as array_split,
# except that it test if splitting will result in an
# equal split. Only test for this case.
@@ -307,12 +434,12 @@ class TestSplit(TestCase):
a = np.arange(10)
assert_raises(ValueError, split, a, 3)
-class TestColumnStack(TestCase):
+class TestColumnStack(object):
def test_non_iterable(self):
assert_raises(TypeError, column_stack, 1)
-class TestDstack(TestCase):
+class TestDstack(object):
def test_non_iterable(self):
assert_raises(TypeError, dstack, 1)
@@ -347,7 +474,7 @@ class TestDstack(TestCase):
# array_split has more comprehensive test of splitting.
# only do simple test on hsplit, vsplit, and dsplit
-class TestHsplit(TestCase):
+class TestHsplit(object):
"""Only testing for integer splits.
"""
@@ -376,7 +503,7 @@ class TestHsplit(TestCase):
compare_results(res, desired)
-class TestVsplit(TestCase):
+class TestVsplit(object):
"""Only testing for integer splits.
"""
@@ -403,7 +530,7 @@ class TestVsplit(TestCase):
compare_results(res, desired)
-class TestDsplit(TestCase):
+class TestDsplit(object):
# Only testing for integer splits.
def test_non_iterable(self):
assert_raises(ValueError, dsplit, 1, 1)
@@ -436,7 +563,7 @@ class TestDsplit(TestCase):
compare_results(res, desired)
-class TestSqueeze(TestCase):
+class TestSqueeze(object):
def test_basic(self):
from numpy.random import rand
@@ -455,18 +582,12 @@ class TestSqueeze(TestCase):
assert_equal(type(res), np.ndarray)
-class TestKron(TestCase):
+class TestKron(object):
def test_return_type(self):
- a = np.ones([2, 2])
- m = np.asmatrix(a)
- assert_equal(type(kron(a, a)), np.ndarray)
- assert_equal(type(kron(m, m)), np.matrix)
- assert_equal(type(kron(a, m)), np.matrix)
- assert_equal(type(kron(m, a)), np.matrix)
-
class myarray(np.ndarray):
__array_priority__ = 0.0
+ a = np.ones([2, 2])
ma = myarray(a.shape, a.dtype, a.data)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(ma, ma)), myarray)
@@ -474,7 +595,7 @@ class TestKron(TestCase):
assert_equal(type(kron(ma, a)), myarray)
-class TestTile(TestCase):
+class TestTile(object):
def test_basic(self):
a = np.array([0, 1, 2])
b = [[1, 2], [3, 4]]
@@ -514,26 +635,22 @@ class TestTile(TestCase):
assert_equal(large, klarge)
-class TestMayShareMemory(TestCase):
+class TestMayShareMemory(object):
def test_basic(self):
d = np.ones((50, 60))
d2 = np.ones((30, 60, 6))
- self.assertTrue(np.may_share_memory(d, d))
- self.assertTrue(np.may_share_memory(d, d[::-1]))
- self.assertTrue(np.may_share_memory(d, d[::2]))
- self.assertTrue(np.may_share_memory(d, d[1:, ::-1]))
+ assert_(np.may_share_memory(d, d))
+ assert_(np.may_share_memory(d, d[::-1]))
+ assert_(np.may_share_memory(d, d[::2]))
+ assert_(np.may_share_memory(d, d[1:, ::-1]))
- self.assertFalse(np.may_share_memory(d[::-1], d2))
- self.assertFalse(np.may_share_memory(d[::2], d2))
- self.assertFalse(np.may_share_memory(d[1:, ::-1], d2))
- self.assertTrue(np.may_share_memory(d2[1:, ::-1], d2))
+ assert_(not np.may_share_memory(d[::-1], d2))
+ assert_(not np.may_share_memory(d[::2], d2))
+ assert_(not np.may_share_memory(d[1:, ::-1], d2))
+ assert_(np.may_share_memory(d2[1:, ::-1], d2))
# Utility
def compare_results(res, desired):
for i in range(len(desired)):
assert_array_equal(res[i], desired[i])
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index 39a76c2f6..3c2ca8b87 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -1,13 +1,13 @@
from __future__ import division, absolute_import, print_function
import numpy as np
+from numpy.core._rational_tests import rational
from numpy.testing import (
- run_module_suite, assert_equal, assert_array_equal,
- assert_raises, assert_
+ assert_equal, assert_array_equal, assert_raises, assert_
)
from numpy.lib.stride_tricks import (
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to
-)
+ )
def assert_shapes_correct(input_shapes, expected_shape):
# Broadcast a list of arrays with the given input shapes and check the
@@ -317,6 +317,13 @@ def test_as_strided():
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
+ # Custom dtypes should not be lost (gh-9161)
+ r = [rational(i) for i in range(4)]
+ a = np.array(r, dtype=rational)
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ assert_equal(a.dtype, a_view.dtype)
+ assert_array_equal([r] * 3, a_view)
+
def as_strided_writeable():
arr = np.ones(10)
view = as_strided(arr, writeable=False)
@@ -407,7 +414,7 @@ def test_writeable():
_, result = broadcast_arrays(0, original)
assert_equal(result.flags.writeable, False)
- # regresssion test for GH6491
+ # regression test for GH6491
shape = (2,)
strides = [0]
tricky_array = as_strided(np.array(0), shape, strides)
@@ -424,7 +431,3 @@ def test_reference_types():
actual, _ = broadcast_arrays(input_array, np.ones(3))
assert_array_equal(expected, actual)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index 98b8aa39c..bf93b4adb 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -4,18 +4,17 @@
from __future__ import division, absolute_import, print_function
from numpy.testing import (
- TestCase, run_module_suite, assert_equal, assert_array_equal,
- assert_array_max_ulp, assert_array_almost_equal, assert_raises,
+ assert_equal, assert_array_equal, assert_array_max_ulp,
+ assert_array_almost_equal, assert_raises,
)
from numpy import (
- arange, add, fliplr, flipud, zeros, ones, eye, array, diag,
- histogram2d, tri, mask_indices, triu_indices, triu_indices_from,
- tril_indices, tril_indices_from, vander,
+ arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d,
+ tri, mask_indices, triu_indices, triu_indices_from, tril_indices,
+ tril_indices_from, vander,
)
import numpy as np
-from numpy.compat import asbytes_nested
def get_mat(n):
@@ -24,7 +23,7 @@ def get_mat(n):
return data
-class TestEye(TestCase):
+class TestEye(object):
def test_basic(self):
assert_equal(eye(4),
array([[1, 0, 0, 0],
@@ -91,13 +90,22 @@ class TestEye(TestCase):
def test_strings(self):
assert_equal(eye(2, 2, dtype='S3'),
- asbytes_nested([['1', ''], ['', '1']]))
+ [[b'1', b''], [b'', b'1']])
def test_bool(self):
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
+ def test_order(self):
+ mat_c = eye(4, 3, k=-1)
+ mat_f = eye(4, 3, k=-1, order='F')
+ assert_equal(mat_c, mat_f)
+ assert mat_c.flags.c_contiguous
+ assert not mat_c.flags.f_contiguous
+ assert not mat_f.flags.c_contiguous
+ assert mat_f.flags.f_contiguous
-class TestDiag(TestCase):
+
+class TestDiag(object):
def test_vector(self):
vals = (100 * arange(5)).astype('l')
b = zeros((5, 5))
@@ -141,12 +149,12 @@ class TestDiag(TestCase):
assert_equal(diag(A, k=-3), [])
def test_failure(self):
- self.assertRaises(ValueError, diag, [[[1]]])
+ assert_raises(ValueError, diag, [[[1]]])
-class TestFliplr(TestCase):
+class TestFliplr(object):
def test_basic(self):
- self.assertRaises(ValueError, fliplr, ones(4))
+ assert_raises(ValueError, fliplr, ones(4))
a = get_mat(4)
b = a[:, ::-1]
assert_equal(fliplr(a), b)
@@ -157,7 +165,7 @@ class TestFliplr(TestCase):
assert_equal(fliplr(a), b)
-class TestFlipud(TestCase):
+class TestFlipud(object):
def test_basic(self):
a = get_mat(4)
b = a[::-1, :]
@@ -169,7 +177,7 @@ class TestFlipud(TestCase):
assert_equal(flipud(a), b)
-class TestHistogram2d(TestCase):
+class TestHistogram2d(object):
def test_simple(self):
x = array(
[0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
@@ -200,7 +208,7 @@ class TestHistogram2d(TestCase):
x = array([1, 1, 2, 3, 4, 4, 4, 5])
y = array([1, 3, 2, 0, 1, 2, 3, 4])
H, xed, yed = histogram2d(
- x, y, (6, 5), range=[[0, 6], [0, 5]], normed=True)
+ x, y, (6, 5), range=[[0, 6], [0, 5]], density=True)
answer = array(
[[0., 0, 0, 0, 0],
[0, 1, 0, 1, 0],
@@ -212,11 +220,11 @@ class TestHistogram2d(TestCase):
assert_array_equal(xed, np.linspace(0, 6, 7))
assert_array_equal(yed, np.linspace(0, 5, 6))
- def test_norm(self):
+ def test_density(self):
x = array([1, 2, 3, 1, 2, 3, 1, 2, 3])
y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])
H, xed, yed = histogram2d(
- x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], normed=True)
+ x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True)
answer = array([[1, 1, .5],
[1, 1, .5],
[.5, .5, .25]])/9.
@@ -236,37 +244,37 @@ class TestHistogram2d(TestCase):
def test_binparameter_combination(self):
x = array(
- [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
+ [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
0.59944483, 1])
y = array(
- [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
+ [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
0.15886423, 1])
edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
H, xe, ye = histogram2d(x, y, (edges, 4))
answer = array(
- [[ 2., 0., 0., 0.],
- [ 0., 1., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 1., 0., 0.],
- [ 1., 0., 0., 0.],
- [ 0., 1., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 0., 0., 1.]])
+ [[2., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [1., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))
H, xe, ye = histogram2d(x, y, (4, edges))
answer = array(
- [[ 1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
- [ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
- [ 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
- [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
+ [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
-class TestTri(TestCase):
+class TestTri(object):
def test_dtype(self):
out = array([[1, 0, 0],
[1, 1, 0],
@@ -280,11 +288,11 @@ def test_tril_triu_ndim2():
a = np.ones((2, 2), dtype=dtype)
b = np.tril(a)
c = np.triu(a)
- yield assert_array_equal, b, [[1, 0], [1, 1]]
- yield assert_array_equal, c, b.T
+ assert_array_equal(b, [[1, 0], [1, 1]])
+ assert_array_equal(c, b.T)
# should return the same dtype as the original array
- yield assert_equal, b.dtype, a.dtype
- yield assert_equal, c.dtype, a.dtype
+ assert_equal(b.dtype, a.dtype)
+ assert_equal(c.dtype, a.dtype)
def test_tril_triu_ndim3():
@@ -306,10 +314,11 @@ def test_tril_triu_ndim3():
], dtype=dtype)
a_triu_observed = np.triu(a)
a_tril_observed = np.tril(a)
- yield assert_array_equal, a_triu_observed, a_triu_desired
- yield assert_array_equal, a_tril_observed, a_tril_desired
- yield assert_equal, a_triu_observed.dtype, a.dtype
- yield assert_equal, a_tril_observed.dtype, a.dtype
+ assert_array_equal(a_triu_observed, a_triu_desired)
+ assert_array_equal(a_tril_observed, a_tril_desired)
+ assert_equal(a_triu_observed.dtype, a.dtype)
+ assert_equal(a_tril_observed.dtype, a.dtype)
+
def test_tril_triu_with_inf():
# Issue 4859
@@ -350,10 +359,10 @@ def test_mask_indices():
# simple test without offset
iu = mask_indices(3, np.triu)
a = np.arange(9).reshape(3, 3)
- yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8]))
+ assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8]))
# Now with an offset
iu1 = mask_indices(3, np.triu, 1)
- yield (assert_array_equal, a[iu1], array([1, 2, 5]))
+ assert_array_equal(a[iu1], array([1, 2, 5]))
def test_tril_indices():
@@ -370,37 +379,37 @@ def test_tril_indices():
b = np.arange(1, 21).reshape(4, 5)
# indexing:
- yield (assert_array_equal, a[il1],
- array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
- yield (assert_array_equal, b[il3],
- array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
+ assert_array_equal(a[il1],
+ array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
+ assert_array_equal(b[il3],
+ array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
# And for assigning values:
a[il1] = -1
- yield (assert_array_equal, a,
- array([[-1, 2, 3, 4],
- [-1, -1, 7, 8],
- [-1, -1, -1, 12],
- [-1, -1, -1, -1]]))
+ assert_array_equal(a,
+ array([[-1, 2, 3, 4],
+ [-1, -1, 7, 8],
+ [-1, -1, -1, 12],
+ [-1, -1, -1, -1]]))
b[il3] = -1
- yield (assert_array_equal, b,
- array([[-1, 2, 3, 4, 5],
- [-1, -1, 8, 9, 10],
- [-1, -1, -1, 14, 15],
- [-1, -1, -1, -1, 20]]))
+ assert_array_equal(b,
+ array([[-1, 2, 3, 4, 5],
+ [-1, -1, 8, 9, 10],
+ [-1, -1, -1, 14, 15],
+ [-1, -1, -1, -1, 20]]))
# These cover almost the whole array (two diagonals right of the main one):
a[il2] = -10
- yield (assert_array_equal, a,
- array([[-10, -10, -10, 4],
- [-10, -10, -10, -10],
- [-10, -10, -10, -10],
- [-10, -10, -10, -10]]))
+ assert_array_equal(a,
+ array([[-10, -10, -10, 4],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10]]))
b[il4] = -10
- yield (assert_array_equal, b,
- array([[-10, -10, -10, 4, 5],
- [-10, -10, -10, -10, 10],
- [-10, -10, -10, -10, -10],
- [-10, -10, -10, -10, -10]]))
+ assert_array_equal(b,
+ array([[-10, -10, -10, 4, 5],
+ [-10, -10, -10, -10, 10],
+ [-10, -10, -10, -10, -10],
+ [-10, -10, -10, -10, -10]]))
class TestTriuIndices(object):
@@ -417,39 +426,40 @@ class TestTriuIndices(object):
b = np.arange(1, 21).reshape(4, 5)
# Both for indexing:
- yield (assert_array_equal, a[iu1],
- array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
- yield (assert_array_equal, b[iu3],
- array([1, 2, 3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 19, 20]))
+ assert_array_equal(a[iu1],
+ array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
+ assert_array_equal(b[iu3],
+ array([1, 2, 3, 4, 5, 7, 8, 9,
+ 10, 13, 14, 15, 19, 20]))
# And for assigning values:
a[iu1] = -1
- yield (assert_array_equal, a,
- array([[-1, -1, -1, -1],
- [5, -1, -1, -1],
- [9, 10, -1, -1],
- [13, 14, 15, -1]]))
+ assert_array_equal(a,
+ array([[-1, -1, -1, -1],
+ [5, -1, -1, -1],
+ [9, 10, -1, -1],
+ [13, 14, 15, -1]]))
b[iu3] = -1
- yield (assert_array_equal, b,
- array([[-1, -1, -1, -1, -1],
- [6, -1, -1, -1, -1],
- [11, 12, -1, -1, -1],
- [16, 17, 18, -1, -1]]))
+ assert_array_equal(b,
+ array([[-1, -1, -1, -1, -1],
+ [6, -1, -1, -1, -1],
+ [11, 12, -1, -1, -1],
+ [16, 17, 18, -1, -1]]))
# These cover almost the whole array (two diagonals right of the
# main one):
a[iu2] = -10
- yield (assert_array_equal, a,
- array([[-1, -1, -10, -10],
- [5, -1, -1, -10],
- [9, 10, -1, -1],
- [13, 14, 15, -1]]))
+ assert_array_equal(a,
+ array([[-1, -1, -10, -10],
+ [5, -1, -1, -10],
+ [9, 10, -1, -1],
+ [13, 14, 15, -1]]))
b[iu4] = -10
- yield (assert_array_equal, b,
- array([[-1, -1, -10, -10, -10],
- [6, -1, -1, -10, -10],
- [11, 12, -1, -1, -10],
- [16, 17, 18, -1, -1]]))
+ assert_array_equal(b,
+ array([[-1, -1, -10, -10, -10],
+ [6, -1, -1, -10, -10],
+ [11, 12, -1, -1, -10],
+ [16, 17, 18, -1, -1]]))
class TestTrilIndicesFrom(object):
@@ -475,12 +485,12 @@ class TestVander(object):
[16, -8, 4, -2, 1],
[81, 27, 9, 3, 1]])
# Check default value of N:
- yield (assert_array_equal, v, powers[:, 1:])
+ assert_array_equal(v, powers[:, 1:])
# Check a range of N values, including 0 and 5 (greater than default)
m = powers.shape[1]
for n in range(6):
v = vander(c, N=n)
- yield (assert_array_equal, v, powers[:, m-n:m])
+ assert_array_equal(v, powers[:, m-n:m])
def test_dtypes(self):
c = array([11, -12, 13], dtype=np.int8)
@@ -488,7 +498,7 @@ class TestVander(object):
expected = np.array([[121, 11, 1],
[144, -12, 1],
[169, 13, 1]])
- yield (assert_array_equal, v, expected)
+ assert_array_equal(v, expected)
c = array([1.0+1j, 1.0-1j])
v = vander(c, N=3)
@@ -497,8 +507,4 @@ class TestVander(object):
# The data is floating point, but the values are small integers,
# so assert_array_equal *should* be safe here (rather than, say,
# assert_array_almost_equal).
- yield (assert_array_equal, v, expected)
-
-
-if __name__ == "__main__":
- run_module_suite()
+ assert_array_equal(v, expected)
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index 4523e3f24..2982ca31a 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import long
from numpy.testing import (
- TestCase, assert_, assert_equal, assert_array_equal, run_module_suite
+ assert_, assert_equal, assert_array_equal, assert_raises
)
from numpy.lib.type_check import (
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
@@ -15,7 +15,7 @@ def assert_all(x):
assert_(np.all(x), x)
-class TestCommonType(TestCase):
+class TestCommonType(object):
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
@@ -31,7 +31,7 @@ class TestCommonType(TestCase):
assert_(common_type(acd) == np.cdouble)
-class TestMintypecode(TestCase):
+class TestMintypecode(object):
def test_default_1(self):
for itype in '1bcsuwil':
@@ -81,7 +81,7 @@ class TestMintypecode(TestCase):
assert_equal(mintypecode('idD'), 'D')
-class TestIsscalar(TestCase):
+class TestIsscalar(object):
def test_basic(self):
assert_(np.isscalar(3))
@@ -92,29 +92,69 @@ class TestIsscalar(TestCase):
assert_(np.isscalar(4.0))
-class TestReal(TestCase):
+class TestReal(object):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(y, np.real(y))
+ y = np.array(1)
+ out = np.real(y)
+ assert_array_equal(y, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1
+ out = np.real(y)
+ assert_equal(y, out)
+ assert_(not isinstance(out, np.ndarray))
+
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.real, np.real(y))
+ y = np.array(1 + 1j)
+ out = np.real(y)
+ assert_array_equal(y.real, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1 + 1j
+ out = np.real(y)
+ assert_equal(1.0, out)
+ assert_(not isinstance(out, np.ndarray))
-class TestImag(TestCase):
+
+class TestImag(object):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(0, np.imag(y))
+ y = np.array(1)
+ out = np.imag(y)
+ assert_array_equal(0, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1
+ out = np.imag(y)
+ assert_equal(0, out)
+ assert_(not isinstance(out, np.ndarray))
+
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.imag, np.imag(y))
+ y = np.array(1 + 1j)
+ out = np.imag(y)
+ assert_array_equal(y.imag, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1 + 1j
+ out = np.imag(y)
+ assert_equal(1.0, out)
+ assert_(not isinstance(out, np.ndarray))
-class TestIscomplex(TestCase):
+
+class TestIscomplex(object):
def test_fail(self):
z = np.array([-1, 0, 1])
@@ -127,7 +167,7 @@ class TestIscomplex(TestCase):
assert_array_equal(res, [1, 0, 0])
-class TestIsreal(TestCase):
+class TestIsreal(object):
def test_pass(self):
z = np.array([-1, 0, 1j])
@@ -140,7 +180,7 @@ class TestIsreal(TestCase):
assert_array_equal(res, [0, 1, 1])
-class TestIscomplexobj(TestCase):
+class TestIscomplexobj(object):
def test_basic(self):
z = np.array([-1, 0, 1])
@@ -193,7 +233,7 @@ class TestIscomplexobj(TestCase):
assert_(iscomplexobj(a))
-class TestIsrealobj(TestCase):
+class TestIsrealobj(object):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(isrealobj(z))
@@ -201,7 +241,7 @@ class TestIsrealobj(TestCase):
assert_(not isrealobj(z))
-class TestIsnan(TestCase):
+class TestIsnan(object):
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
@@ -231,7 +271,7 @@ class TestIsnan(TestCase):
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
-class TestIsfinite(TestCase):
+class TestIsfinite(object):
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
@@ -262,7 +302,7 @@ class TestIsfinite(TestCase):
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
-class TestIsinf(TestCase):
+class TestIsinf(object):
# Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
@@ -291,7 +331,7 @@ class TestIsinf(TestCase):
assert_all(np.isinf(np.array((0.,))/0.) == 0)
-class TestIsposinf(TestCase):
+class TestIsposinf(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -301,7 +341,7 @@ class TestIsposinf(TestCase):
assert_(vals[2] == 1)
-class TestIsneginf(TestCase):
+class TestIsneginf(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -311,7 +351,7 @@ class TestIsneginf(TestCase):
assert_(vals[2] == 0)
-class TestNanToNum(TestCase):
+class TestNanToNum(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -319,16 +359,38 @@ class TestNanToNum(TestCase):
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
+ assert_equal(type(vals), np.ndarray)
+
+ # perform the same test but in-place
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = np.array((-1., 0, 1))/0.
+ result = nan_to_num(vals, copy=False)
+
+ assert_(result is vals)
+ assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
+ assert_(vals[1] == 0)
+ assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
+ assert_equal(type(vals), np.ndarray)
+
+ def test_array(self):
+ vals = nan_to_num([1])
+ assert_array_equal(vals, np.array([1], int))
+ assert_equal(type(vals), np.ndarray)
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
- vals = nan_to_num([1])
- assert_array_equal(vals, np.array([1], np.int))
+ assert_equal(type(vals), np.int_)
+
+ def test_float(self):
+ vals = nan_to_num(1.0)
+ assert_all(vals == 1.0)
+ assert_equal(type(vals), np.float_)
def test_complex_good(self):
vals = nan_to_num(1+1j)
assert_all(vals == 1+1j)
+ assert_equal(type(vals), np.complex_)
def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -337,6 +399,7 @@ class TestNanToNum(TestCase):
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(np.isfinite(vals))
+ assert_equal(type(vals), np.complex_)
def test_complex_bad2(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -344,6 +407,7 @@ class TestNanToNum(TestCase):
v += np.array(-1+1.j)/0.
vals = nan_to_num(v)
assert_all(np.isfinite(vals))
+ assert_equal(type(vals), np.complex_)
# Fixme
#assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
# !! This is actually (unexpectedly) positive
@@ -352,7 +416,7 @@ class TestNanToNum(TestCase):
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
-class TestRealIfClose(TestCase):
+class TestRealIfClose(object):
def test_basic(self):
a = np.random.rand(10)
@@ -365,12 +429,14 @@ class TestRealIfClose(TestCase):
assert_all(isrealobj(b))
-class TestArrayConversion(TestCase):
+class TestArrayConversion(object):
def test_asfarray(self):
a = asfarray(np.array([1, 2, 3]))
assert_equal(a.__class__, np.ndarray)
- assert_(np.issubdtype(a.dtype, np.float))
+ assert_(np.issubdtype(a.dtype, np.floating))
-if __name__ == "__main__":
- run_module_suite()
+ # previously this would infer dtypes from arrays, unlike every single
+ # other numpy function
+ assert_raises(TypeError,
+ asfarray, np.array([1, 2, 3]), dtype=np.array(1.0))
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index 97d608ecf..361367b97 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -1,13 +1,14 @@
from __future__ import division, absolute_import, print_function
+import numpy as np
import numpy.core as nx
import numpy.lib.ufunclike as ufl
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal
+ assert_, assert_equal, assert_array_equal, assert_warns
)
-class TestUfunclike(TestCase):
+class TestUfunclike(object):
def test_isposinf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
@@ -51,9 +52,14 @@ class TestUfunclike(TestCase):
return res
def __array_wrap__(self, obj, context=None):
- obj.metadata = self.metadata
+ if isinstance(obj, MyArray):
+ obj.metadata = self.metadata
return obj
+ def __array_finalize__(self, obj):
+ self.metadata = getattr(obj, 'metadata', None)
+ return self
+
a = nx.array([1.1, -1.1])
m = MyArray(a, metadata='foo')
f = ufl.fix(m)
@@ -61,5 +67,32 @@ class TestUfunclike(TestCase):
assert_(isinstance(f, MyArray))
assert_equal(f.metadata, 'foo')
-if __name__ == "__main__":
- run_module_suite()
+ # check 0d arrays don't decay to scalars
+ m0d = m[0,...]
+ m0d.metadata = 'bar'
+ f0d = ufl.fix(m0d)
+ assert_(isinstance(f0d, MyArray))
+ assert_equal(f0d.metadata, 'bar')
+
+ def test_deprecated(self):
+ # NumPy 1.13.0, 2017-04-26
+ assert_warns(DeprecationWarning, ufl.fix, [1, 2], y=nx.empty(2))
+ assert_warns(DeprecationWarning, ufl.isposinf, [1, 2], y=nx.empty(2))
+ assert_warns(DeprecationWarning, ufl.isneginf, [1, 2], y=nx.empty(2))
+
+ def test_scalar(self):
+ x = np.inf
+ actual = np.isposinf(x)
+ expected = np.True_
+ assert_equal(actual, expected)
+ assert_equal(type(actual), type(expected))
+
+ x = -3.4
+ actual = np.fix(x)
+ expected = np.float64(-3.0)
+ assert_equal(actual, expected)
+ assert_equal(type(actual), type(expected))
+
+ out = np.array(0.0)
+ actual = np.fix(x, out=out)
+ assert_(actual is out)
diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py
index 92bcdc238..c27c3cbf5 100644
--- a/numpy/lib/tests/test_utils.py
+++ b/numpy/lib/tests/test_utils.py
@@ -1,10 +1,10 @@
from __future__ import division, absolute_import, print_function
import sys
+import pytest
+
from numpy.core import arange
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises_regex, dec
- )
+from numpy.testing import assert_, assert_equal, assert_raises_regex
from numpy.lib import deprecate
import numpy.lib.utils as utils
@@ -14,7 +14,7 @@ else:
from StringIO import StringIO
-@dec.skipif(sys.flags.optimize == 2)
+@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
def test_lookfor():
out = StringIO()
utils.lookfor('eigenvalue', module='numpy', output=out,
@@ -65,7 +65,3 @@ def test_byte_bounds():
def test_assert_raises_regex_context_manager():
with assert_raises_regex(ValueError, 'no deprecation warning'):
raise ValueError('no deprecation warning')
-
-
-if __name__ == "__main__":
- run_module_suite()